From b63264c8342e6a1b6971c79550d2af2024b6a4de Mon Sep 17 00:00:00 2001 From: Luca Boccassi Date: Tue, 14 Aug 2018 18:52:30 +0100 Subject: New upstream version 18.08 Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a Signed-off-by: Luca Boccassi --- .gitignore | 1 + GNUmakefile | 2 +- MAINTAINERS | 286 +- app/Makefile | 2 +- app/meson.build | 56 +- app/pdump/main.c | 146 +- app/pdump/meson.build | 6 + app/proc-info/Makefile | 15 + app/proc-info/main.c | 672 + app/proc-info/meson.build | 6 + app/proc_info/Makefile | 15 - app/proc_info/main.c | 668 - app/test-bbdev/Makefile | 2 + app/test-bbdev/meson.build | 9 + app/test-bbdev/test-bbdev.py | 4 +- app/test-bbdev/test_bbdev.c | 29 +- app/test-bbdev/test_bbdev_perf.c | 407 +- app/test-bbdev/test_bbdev_vector.c | 8 +- app/test-bbdev/test_vectors/bbdev_null.data | 5 + app/test-bbdev/test_vectors/bbdev_vector_null.data | 5 - .../test_vectors/bbdev_vector_td_default.data | 54 - .../test_vectors/bbdev_vector_te_default.data | 33 - .../turbo_dec_c1_k40_r0_e17280_sbd_negllr.data | 57 + ...k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data | 643 + ..._k6144_r0_e10376_crc24b_sbd_negllr_low_snr.data | 643 + .../turbo_dec_c1_k6144_r0_e34560_negllr.data | 645 + .../turbo_dec_c1_k6144_r0_e34560_posllr.data | 645 + .../turbo_dec_c1_k6144_r0_e34560_sbd_negllr.data | 1224 + .../turbo_dec_c1_k6144_r0_e34560_sbd_posllr.data | 1225 + .../turbo_dec_c2_k3136_r0_e4920_sbd_negllr.data | 676 + ...bo_dec_c2_k3136_r0_e4920_sbd_negllr_crc24b.data | 677 + .../test_vectors/turbo_enc_c1_k40_r0_e1190_rm.data | 36 + .../test_vectors/turbo_enc_c1_k40_r0_e1194_rm.data | 36 + .../test_vectors/turbo_enc_c1_k40_r0_e1196_rm.data | 36 + .../test_vectors/turbo_enc_c1_k40_r0_e272_rm.data | 33 + .../turbo_enc_c1_k6144_r0_e120_rm_rvidx.data | 63 + .../test_vectors/turbo_enc_c1_k6144_r0_e18444.data | 156 + .../turbo_enc_c1_k6144_r0_e18448_crc24a.data | 159 + .../turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data | 180 + .../turbo_enc_c2_k5952_r0_e17868_crc24b.data | 300 + .../turbo_enc_c3_k4800_r2_e14412_crc24b.data | 153 + .../turbo_enc_c4_k4800_r2_e14412_crc24b.data | 252 + app/test-bbdev/turbo_dec_default.data | 1 + app/test-bbdev/turbo_enc_default.data | 1 + app/test-crypto-perf/Makefile | 2 + app/test-crypto-perf/cperf_ops.c | 3 + app/test-crypto-perf/cperf_options.h | 5 + app/test-crypto-perf/cperf_test_common.c | 41 +- app/test-crypto-perf/cperf_test_pmd_cyclecount.c | 2 +- app/test-crypto-perf/cperf_test_vector_parsing.c | 7 +- app/test-crypto-perf/main.c | 70 +- app/test-crypto-perf/meson.build | 15 + app/test-eventdev/evt_main.c | 42 +- app/test-eventdev/evt_options.c | 132 +- app/test-eventdev/evt_options.h | 35 + app/test-eventdev/evt_test.h | 3 +- app/test-eventdev/meson.build | 18 +- app/test-eventdev/test_order_atq.c | 12 +- app/test-eventdev/test_order_queue.c | 12 +- app/test-eventdev/test_perf_atq.c | 12 +- app/test-eventdev/test_perf_common.c | 276 +- app/test-eventdev/test_perf_common.h | 14 +- app/test-eventdev/test_perf_queue.c | 9 +- app/test-eventdev/test_pipeline_atq.c | 4 +- app/test-eventdev/test_pipeline_common.c | 33 +- app/test-eventdev/test_pipeline_queue.c | 6 +- app/test-pmd/Makefile | 10 +- app/test-pmd/bpf_cmd.c | 198 + app/test-pmd/bpf_cmd.h | 16 + app/test-pmd/cmdline.c | 1791 +- app/test-pmd/cmdline_flow.c | 1388 +- app/test-pmd/cmdline_tm.c | 185 +- app/test-pmd/cmdline_tm.h | 2 + app/test-pmd/config.c | 480 +- app/test-pmd/csumonly.c | 108 +- app/test-pmd/macfwd.c | 3 +- app/test-pmd/macswap.c | 3 +- app/test-pmd/meson.build | 33 +- app/test-pmd/parameters.c | 86 +- app/test-pmd/softnicfwd.c | 674 + app/test-pmd/testpmd.c | 469 +- app/test-pmd/testpmd.h | 141 +- app/test-pmd/tm.c | 840 - buildtools/Makefile | 33 +- buildtools/auto-config-h.sh | 32 +- buildtools/pmdinfogen/Makefile | 35 +- config/arm/arm64_dpaa2_linuxapp_gcc | 15 + config/arm/arm64_dpaa_linuxapp_gcc | 15 + config/arm/arm64_thunderx_linuxapp_gcc | 1 + config/arm/meson.build | 24 +- config/common_armv8a_linuxapp | 58 - config/common_base | 171 +- config/common_linuxapp | 21 + config/defconfig_arm-armv7a-linuxapp-gcc | 31 +- config/defconfig_arm64-dpaa-linuxapp-gcc | 9 +- config/defconfig_arm64-dpaa2-linuxapp-gcc | 22 - config/defconfig_arm64-stingray-linuxapp-gcc | 16 + config/defconfig_i686-native-linuxapp-gcc | 3 + config/defconfig_i686-native-linuxapp-icc | 8 +- config/defconfig_x86_x32-native-linuxapp-gcc | 3 + config/meson.build | 21 +- config/rte_config.h | 33 +- devtools/check-dup-includes.sh | 23 +- devtools/check-git-log.sh | 38 +- devtools/check-includes.sh | 32 +- devtools/check-maintainers.sh | 30 +- devtools/check-symbol-change.sh | 159 + devtools/check-symbol-maps.sh | 30 + devtools/checkpatches.sh | 166 +- devtools/cocci/strlcpy.cocci | 8 + devtools/get-maintainer.sh | 5 +- devtools/git-log-fixes.sh | 30 +- devtools/test-build.sh | 51 +- devtools/test-meson-builds.sh | 60 + devtools/test-null.sh | 30 +- doc/api/doxy-api-index.md | 46 +- doc/api/doxy-api.conf | 37 +- doc/api/doxy-html-custom.sh | 30 +- doc/guides/bbdevs/null.rst | 8 +- doc/guides/bbdevs/turbo_sw.rst | 70 +- doc/guides/compressdevs/features/default.ini | 26 + doc/guides/compressdevs/features/isal.ini | 16 + doc/guides/compressdevs/features/octeontx.ini | 10 + doc/guides/compressdevs/features/qat.ini | 15 + doc/guides/compressdevs/features/zlib.ini | 10 + doc/guides/compressdevs/index.rst | 16 + doc/guides/compressdevs/isal.rst | 123 + doc/guides/compressdevs/octeontx.rst | 105 + doc/guides/compressdevs/overview.rst | 32 + doc/guides/compressdevs/qat_comp.rst | 47 + doc/guides/compressdevs/zlib.rst | 69 + doc/guides/conf.py | 24 +- doc/guides/contributing/cheatsheet.rst | 3 + doc/guides/contributing/coding_style.rst | 7 +- doc/guides/contributing/design.rst | 3 + doc/guides/contributing/documentation.rst | 3 + doc/guides/contributing/index.rst | 3 + doc/guides/contributing/patches.rst | 77 + doc/guides/contributing/stable.rst | 23 +- doc/guides/contributing/versioning.rst | 57 +- doc/guides/cryptodevs/aesni_gcm.rst | 13 +- doc/guides/cryptodevs/aesni_mb.rst | 15 +- doc/guides/cryptodevs/ccp.rst | 140 + doc/guides/cryptodevs/dpaa2_sec.rst | 38 +- doc/guides/cryptodevs/dpaa_sec.rst | 40 +- doc/guides/cryptodevs/features/aesni_gcm.ini | 3 +- doc/guides/cryptodevs/features/aesni_mb.ini | 2 + doc/guides/cryptodevs/features/ccp.ini | 59 + doc/guides/cryptodevs/features/default.ini | 20 +- doc/guides/cryptodevs/features/dpaa2_sec.ini | 6 +- doc/guides/cryptodevs/features/dpaa_sec.ini | 6 +- doc/guides/cryptodevs/features/mrvl.ini | 42 - doc/guides/cryptodevs/features/mvsam.ini | 42 + doc/guides/cryptodevs/features/null.ini | 2 +- doc/guides/cryptodevs/features/openssl.ini | 14 +- doc/guides/cryptodevs/features/qat.ini | 6 +- doc/guides/cryptodevs/features/virtio.ini | 26 + doc/guides/cryptodevs/index.rst | 4 +- doc/guides/cryptodevs/kasumi.rst | 10 +- doc/guides/cryptodevs/mrvl.rst | 193 - doc/guides/cryptodevs/mvsam.rst | 193 + doc/guides/cryptodevs/openssl.rst | 1 + doc/guides/cryptodevs/overview.rst | 27 + doc/guides/cryptodevs/qat.rst | 191 +- doc/guides/cryptodevs/scheduler.rst | 12 +- doc/guides/cryptodevs/snow3g.rst | 10 +- doc/guides/cryptodevs/virtio.rst | 117 + doc/guides/cryptodevs/zuc.rst | 10 +- doc/guides/eventdevs/dpaa2.rst | 14 +- doc/guides/eventdevs/octeontx.rst | 33 +- doc/guides/faq/faq.rst | 23 +- doc/guides/freebsd_gsg/build_sample_apps.rst | 7 +- doc/guides/howto/rte_flow.rst | 34 +- .../howto/virtio_user_for_container_networking.rst | 3 +- doc/guides/index.rst | 2 + doc/guides/linux_gsg/build_sample_apps.rst | 16 +- .../linux_gsg/cross_build_dpdk_for_arm64.rst | 132 + doc/guides/linux_gsg/index.rst | 1 + doc/guides/linux_gsg/linux_drivers.rst | 2 +- doc/guides/nics/axgbe.rst | 89 + doc/guides/nics/bnx2x.rst | 1 + doc/guides/nics/bnxt.rst | 36 +- doc/guides/nics/cxgbe.rst | 194 +- doc/guides/nics/dpaa.rst | 10 + doc/guides/nics/dpaa2.rst | 53 +- doc/guides/nics/enic.rst | 239 +- doc/guides/nics/fail_safe.rst | 28 +- doc/guides/nics/features.rst | 43 +- doc/guides/nics/features/avf.ini | 1 - doc/guides/nics/features/avf_vec.ini | 1 - doc/guides/nics/features/axgbe.ini | 19 + doc/guides/nics/features/cxgbe.ini | 3 + doc/guides/nics/features/cxgbevf.ini | 29 + doc/guides/nics/features/default.ini | 4 + doc/guides/nics/features/enic.ini | 10 +- doc/guides/nics/features/fm10k.ini | 4 + doc/guides/nics/features/fm10k_vf.ini | 2 + doc/guides/nics/features/i40e.ini | 3 + doc/guides/nics/features/i40e_vec.ini | 1 + doc/guides/nics/features/i40e_vf.ini | 1 + doc/guides/nics/features/i40e_vf_vec.ini | 1 + doc/guides/nics/features/ifcvf.ini | 8 + doc/guides/nics/features/igb.ini | 1 + doc/guides/nics/features/igb_vf.ini | 1 + doc/guides/nics/features/ixgbe.ini | 1 + doc/guides/nics/features/ixgbe_vec.ini | 1 + doc/guides/nics/features/mlx4.ini | 1 + doc/guides/nics/features/mlx5.ini | 5 + doc/guides/nics/features/mrvl.ini | 23 - doc/guides/nics/features/mvpp2.ini | 25 + doc/guides/nics/features/netvsc.ini | 23 + doc/guides/nics/features/qede.ini | 12 +- doc/guides/nics/features/qede_vf.ini | 2 +- doc/guides/nics/features/softnic.ini | 9 + doc/guides/nics/features/vhost.ini | 1 - doc/guides/nics/features/virtio.ini | 1 + doc/guides/nics/features/virtio_vec.ini | 1 + doc/guides/nics/fm10k.rst | 27 +- doc/guides/nics/i40e.rst | 129 +- doc/guides/nics/ifc.rst | 96 + .../nics/img/szedata2_nfb200g_architecture.svg | 214 + doc/guides/nics/index.rst | 6 +- doc/guides/nics/ixgbe.rst | 43 +- doc/guides/nics/liquidio.rst | 4 +- doc/guides/nics/mlx4.rst | 63 +- doc/guides/nics/mlx5.rst | 264 +- doc/guides/nics/mrvl.rst | 275 - doc/guides/nics/mvpp2.rst | 520 + doc/guides/nics/netvsc.rst | 105 + doc/guides/nics/nfp.rst | 65 +- doc/guides/nics/octeontx.rst | 3 +- doc/guides/nics/overview.rst | 28 +- doc/guides/nics/pcap_ring.rst | 25 +- doc/guides/nics/qede.rst | 34 +- doc/guides/nics/sfc_efx.rst | 153 +- doc/guides/nics/softnic.rst | 250 + doc/guides/nics/szedata2.rst | 104 +- doc/guides/nics/tap.rst | 30 +- doc/guides/nics/thunderx.rst | 24 +- doc/guides/nics/vdev_netvsc.rst | 14 +- doc/guides/nics/virtio.rst | 40 +- doc/guides/platform/octeontx.rst | 6 +- doc/guides/prog_guide/bbdev.rst | 259 +- doc/guides/prog_guide/bpf_lib.rst | 38 + doc/guides/prog_guide/compressdev.rst | 623 + doc/guides/prog_guide/cryptodev_lib.rst | 304 +- doc/guides/prog_guide/env_abstraction_layer.rst | 239 +- doc/guides/prog_guide/event_crypto_adapter.rst | 296 + .../prog_guide/event_ethernet_rx_adapter.rst | 47 +- doc/guides/prog_guide/event_timer_adapter.rst | 296 + doc/guides/prog_guide/eventdev.rst | 57 +- .../generic_segmentation_offload_lib.rst | 10 + doc/guides/prog_guide/glossary.rst | 3 - doc/guides/prog_guide/hash_lib.rst | 29 +- .../img/event_crypto_adapter_op_forward.svg | 1078 + .../prog_guide/img/event_crypto_adapter_op_new.svg | 1061 + doc/guides/prog_guide/img/eventdev_usage.svg | 1519 +- doc/guides/prog_guide/img/malloc_heap.svg | 1348 +- doc/guides/prog_guide/img/stateful-op.svg | 116 + doc/guides/prog_guide/img/stateless-op-shared.svg | 124 + doc/guides/prog_guide/img/stateless-op.svg | 140 + doc/guides/prog_guide/img/turbo_tb_decode.svg | 1471 + doc/guides/prog_guide/img/turbo_tb_encode.svg | 1948 + doc/guides/prog_guide/index.rst | 5 + .../prog_guide/link_bonding_poll_mode_drv_lib.rst | 43 +- doc/guides/prog_guide/mbuf_lib.rst | 11 +- doc/guides/prog_guide/multi_proc_support.rst | 172 +- doc/guides/prog_guide/overview.rst | 4 +- doc/guides/prog_guide/poll_mode_drv.rst | 60 +- doc/guides/prog_guide/rawdev.rst | 2 +- doc/guides/prog_guide/rte_flow.rst | 1226 +- doc/guides/prog_guide/source_org.rst | 4 +- doc/guides/prog_guide/switch_representation.rst | 837 + .../prog_guide/traffic_metering_and_policing.rst | 2 +- doc/guides/prog_guide/vhost_lib.rst | 128 +- doc/guides/rawdevs/dpaa2_cmdif.rst | 144 + doc/guides/rawdevs/dpaa2_qdma.rst | 140 + doc/guides/rawdevs/ifpga_rawdev.rst | 112 + doc/guides/rawdevs/index.rst | 16 + doc/guides/rel_notes/deprecation.rst | 188 +- doc/guides/rel_notes/index.rst | 2 + doc/guides/rel_notes/known_issues.rst | 45 + doc/guides/rel_notes/release_16_04.rst | 3 + doc/guides/rel_notes/release_16_07.rst | 3 + doc/guides/rel_notes/release_16_11.rst | 3 + doc/guides/rel_notes/release_17_02.rst | 3 + doc/guides/rel_notes/release_17_05.rst | 3 + doc/guides/rel_notes/release_17_08.rst | 3 + doc/guides/rel_notes/release_17_11.rst | 7 +- doc/guides/rel_notes/release_18_02.rst | 3 + doc/guides/rel_notes/release_18_05.rst | 983 + doc/guides/rel_notes/release_18_08.rst | 549 + doc/guides/rel_notes/release_2_2.rst | 3 + doc/guides/sample_app_ug/bbdev_app.rst | 11 +- doc/guides/sample_app_ug/ethtool.rst | 3 + doc/guides/sample_app_ug/flow_classify.rst | 14 +- doc/guides/sample_app_ug/flow_filtering.rst | 34 +- doc/guides/sample_app_ug/img/ip_pipelines_1.svg | 738 - doc/guides/sample_app_ug/img/ip_pipelines_2.svg | 997 - doc/guides/sample_app_ug/img/ip_pipelines_3.svg | 826 - doc/guides/sample_app_ug/img/master_slave_proc.png | Bin 195232 -> 0 bytes doc/guides/sample_app_ug/img/slave_proc_recov.png | Bin 85287 -> 0 bytes doc/guides/sample_app_ug/index.rst | 5 +- doc/guides/sample_app_ug/ip_pipeline.rst | 1448 +- doc/guides/sample_app_ug/kernel_nic_interface.rst | 363 +- doc/guides/sample_app_ug/l2_forward_job_stats.rst | 26 +- .../sample_app_ug/l2_forward_real_virtual.rst | 26 +- doc/guides/sample_app_ug/link_status_intr.rst | 11 +- doc/guides/sample_app_ug/multi_process.rst | 406 - doc/guides/sample_app_ug/quota_watermark.rst | 2 +- doc/guides/sample_app_ug/rxtx_callbacks.rst | 3 - doc/guides/sample_app_ug/skeleton.rst | 11 +- doc/guides/sample_app_ug/vhost.rst | 30 +- doc/guides/sample_app_ug/vhost_crypto.rst | 82 + doc/guides/sample_app_ug/vm_power_management.rst | 165 +- doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst | 4 - doc/guides/testpmd_app_ug/run_app.rst | 21 +- doc/guides/testpmd_app_ug/testpmd_funcs.rst | 463 +- doc/guides/tools/cryptoperf.rst | 2 +- doc/guides/tools/testbbdev.rst | 189 +- doc/guides/tools/testeventdev.rst | 60 + drivers/Makefile | 19 +- drivers/baseband/Makefile | 14 + drivers/baseband/null/Makefile | 25 + drivers/baseband/null/bbdev_null.c | 356 + .../baseband/null/rte_pmd_bbdev_null_version.map | 3 + drivers/baseband/turbo_sw/Makefile | 42 + drivers/baseband/turbo_sw/bbdev_turbo_software.c | 1307 + .../turbo_sw/rte_pmd_bbdev_turbo_sw_version.map | 3 + drivers/bbdev/Makefile | 14 - drivers/bbdev/null/Makefile | 25 - drivers/bbdev/null/bbdev_null.c | 356 - drivers/bbdev/null/rte_pmd_bbdev_null_version.map | 3 - drivers/bbdev/turbo_sw/Makefile | 42 - drivers/bbdev/turbo_sw/bbdev_turbo_software.c | 1217 - .../turbo_sw/rte_pmd_bbdev_turbo_sw_version.map | 3 - drivers/bus/Makefile | 4 + drivers/bus/dpaa/base/fman/fman.c | 6 +- drivers/bus/dpaa/base/fman/fman_hw.c | 62 +- drivers/bus/dpaa/base/fman/netcfg_layer.c | 5 - drivers/bus/dpaa/base/fman/of.c | 44 + drivers/bus/dpaa/base/qbman/bman_driver.c | 6 +- drivers/bus/dpaa/base/qbman/qman.c | 36 +- drivers/bus/dpaa/base/qbman/qman_driver.c | 14 +- drivers/bus/dpaa/base/qbman/qman_priv.h | 1 - drivers/bus/dpaa/dpaa_bus.c | 148 +- drivers/bus/dpaa/include/compat.h | 36 +- drivers/bus/dpaa/include/fsl_fman.h | 6 + drivers/bus/dpaa/include/fsl_qman.h | 27 +- drivers/bus/dpaa/include/of.h | 2 + drivers/bus/dpaa/meson.build | 29 + drivers/bus/dpaa/rte_bus_dpaa_version.map | 10 + drivers/bus/dpaa/rte_dpaa_bus.h | 40 +- drivers/bus/dpaa/rte_dpaa_logs.h | 11 +- drivers/bus/fslmc/Makefile | 19 +- drivers/bus/fslmc/fslmc_bus.c | 179 +- drivers/bus/fslmc/fslmc_logs.h | 69 +- drivers/bus/fslmc/fslmc_vfio.c | 384 +- drivers/bus/fslmc/fslmc_vfio.h | 2 - drivers/bus/fslmc/mc/dpdmai.c | 429 + drivers/bus/fslmc/mc/fsl_dpdmai.h | 189 + drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h | 107 + drivers/bus/fslmc/mc/fsl_mc_cmd.h | 2 +- drivers/bus/fslmc/meson.build | 27 + drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c | 12 +- drivers/bus/fslmc/portal/dpaa2_hw_dpci.c | 100 +- drivers/bus/fslmc/portal/dpaa2_hw_dpio.c | 123 +- drivers/bus/fslmc/portal/dpaa2_hw_dpio.h | 8 +- drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 94 +- drivers/bus/fslmc/qbman/include/fsl_qbman_base.h | 2 - drivers/bus/fslmc/qbman/qbman_portal.c | 17 +- drivers/bus/fslmc/qbman/qbman_portal.h | 1 - drivers/bus/fslmc/qbman/qbman_sys.h | 30 +- drivers/bus/fslmc/qbman/qbman_sys_decl.h | 23 + drivers/bus/fslmc/rte_bus_fslmc_version.map | 17 +- drivers/bus/fslmc/rte_fslmc.h | 12 +- drivers/bus/ifpga/Makefile | 32 + drivers/bus/ifpga/ifpga_bus.c | 467 + drivers/bus/ifpga/ifpga_common.c | 88 + drivers/bus/ifpga/ifpga_common.h | 18 + drivers/bus/ifpga/ifpga_logs.h | 31 + drivers/bus/ifpga/meson.build | 8 + drivers/bus/ifpga/rte_bus_ifpga.h | 149 + drivers/bus/ifpga/rte_bus_ifpga_version.map | 10 + drivers/bus/meson.build | 2 +- drivers/bus/pci/Makefile | 36 +- drivers/bus/pci/bsd/Makefile | 32 +- drivers/bus/pci/linux/Makefile | 32 +- drivers/bus/pci/linux/pci.c | 36 +- drivers/bus/pci/linux/pci_uio.c | 47 +- drivers/bus/pci/linux/pci_vfio.c | 118 +- drivers/bus/pci/meson.build | 3 + drivers/bus/pci/pci_common.c | 124 +- drivers/bus/pci/private.h | 50 - drivers/bus/pci/rte_bus_pci.h | 8 +- drivers/bus/vdev/Makefile | 1 + drivers/bus/vdev/meson.build | 2 + drivers/bus/vdev/rte_bus_vdev.h | 38 +- drivers/bus/vdev/vdev.c | 262 +- drivers/bus/vmbus/Makefile | 36 + drivers/bus/vmbus/linux/Makefile | 3 + drivers/bus/vmbus/linux/vmbus_bus.c | 355 + drivers/bus/vmbus/linux/vmbus_uio.c | 398 + drivers/bus/vmbus/meson.build | 18 + drivers/bus/vmbus/private.h | 132 + drivers/bus/vmbus/rte_bus_vmbus.h | 407 + drivers/bus/vmbus/rte_bus_vmbus_version.map | 29 + drivers/bus/vmbus/rte_vmbus_reg.h | 344 + drivers/bus/vmbus/vmbus_bufring.c | 244 + drivers/bus/vmbus/vmbus_channel.c | 405 + drivers/bus/vmbus/vmbus_common.c | 286 + drivers/bus/vmbus/vmbus_common_uio.c | 232 + drivers/common/Makefile | 11 + drivers/common/meson.build | 7 + drivers/common/octeontx/Makefile | 24 + drivers/common/octeontx/meson.build | 5 + drivers/common/octeontx/octeontx_mbox.c | 249 + drivers/common/octeontx/octeontx_mbox.h | 37 + .../octeontx/rte_common_octeontx_version.map | 7 + drivers/common/qat/Makefile | 66 + drivers/common/qat/meson.build | 14 + .../qat/qat_adf/adf_transport_access_macros.h | 136 + drivers/common/qat/qat_adf/icp_qat_fw.h | 318 + drivers/common/qat/qat_adf/icp_qat_fw_comp.h | 482 + drivers/common/qat/qat_adf/icp_qat_fw_la.h | 361 + drivers/common/qat/qat_adf/icp_qat_hw.h | 386 + drivers/common/qat/qat_common.c | 123 + drivers/common/qat/qat_common.h | 79 + drivers/common/qat/qat_device.c | 279 + drivers/common/qat/qat_device.h | 102 + drivers/common/qat/qat_logs.c | 38 + drivers/common/qat/qat_logs.h | 34 + drivers/common/qat/qat_qp.c | 642 + drivers/common/qat/qat_qp.h | 111 + drivers/compress/Makefile | 10 + drivers/compress/isal/Makefile | 31 + drivers/compress/isal/isal_compress_pmd.c | 694 + drivers/compress/isal/isal_compress_pmd_ops.c | 351 + drivers/compress/isal/isal_compress_pmd_private.h | 57 + drivers/compress/isal/meson.build | 14 + drivers/compress/isal/rte_pmd_isal_version.map | 3 + drivers/compress/meson.build | 8 + drivers/compress/octeontx/Makefile | 30 + drivers/compress/octeontx/include/zip_regs.h | 711 + drivers/compress/octeontx/meson.build | 9 + drivers/compress/octeontx/otx_zip.c | 180 + drivers/compress/octeontx/otx_zip.h | 277 + drivers/compress/octeontx/otx_zip_pmd.c | 658 + .../octeontx/rte_pmd_octeontx_compress_version.map | 3 + drivers/compress/qat/meson.build | 18 + drivers/compress/qat/qat_comp.c | 393 + drivers/compress/qat/qat_comp.h | 65 + drivers/compress/qat/qat_comp_pmd.c | 429 + drivers/compress/qat/qat_comp_pmd.h | 39 + drivers/compress/qat/rte_pmd_qat_version.map | 3 + drivers/compress/zlib/Makefile | 29 + drivers/compress/zlib/meson.build | 14 + drivers/compress/zlib/rte_pmd_zlib_version.map | 3 + drivers/compress/zlib/zlib_pmd.c | 436 + drivers/compress/zlib/zlib_pmd_ops.c | 307 + drivers/compress/zlib/zlib_pmd_private.h | 71 + drivers/crypto/Makefile | 9 +- drivers/crypto/aesni_gcm/Makefile | 10 +- drivers/crypto/aesni_gcm/aesni_gcm_ops.h | 3 +- drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 53 +- drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c | 54 +- drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h | 36 +- drivers/crypto/aesni_mb/Makefile | 10 +- drivers/crypto/aesni_mb/aesni_mb_ops.h | 31 +- drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 178 +- drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 116 +- drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 78 +- drivers/crypto/armv8/rte_armv8_pmd.c | 15 +- drivers/crypto/armv8/rte_armv8_pmd_ops.c | 47 +- drivers/crypto/armv8/rte_armv8_pmd_private.h | 2 - drivers/crypto/ccp/Makefile | 35 + drivers/crypto/ccp/ccp_crypto.c | 2951 ++ drivers/crypto/ccp/ccp_crypto.h | 388 + drivers/crypto/ccp/ccp_dev.c | 810 + drivers/crypto/ccp/ccp_dev.h | 495 + drivers/crypto/ccp/ccp_pci.c | 236 + drivers/crypto/ccp/ccp_pci.h | 27 + drivers/crypto/ccp/ccp_pmd_ops.c | 833 + drivers/crypto/ccp/ccp_pmd_private.h | 107 + drivers/crypto/ccp/meson.build | 21 + drivers/crypto/ccp/rte_ccp_pmd.c | 397 + drivers/crypto/ccp/rte_pmd_ccp_version.map | 4 + drivers/crypto/dpaa2_sec/Makefile | 5 - drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 564 +- drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h | 62 +- drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h | 28 +- drivers/crypto/dpaa2_sec/meson.build | 14 + drivers/crypto/dpaa_sec/Makefile | 5 - drivers/crypto/dpaa_sec/dpaa_sec.c | 412 +- drivers/crypto/dpaa_sec/dpaa_sec.h | 36 +- drivers/crypto/dpaa_sec/dpaa_sec_log.h | 65 +- drivers/crypto/dpaa_sec/meson.build | 13 + drivers/crypto/kasumi/rte_kasumi_pmd.c | 50 +- drivers/crypto/kasumi/rte_kasumi_pmd_ops.c | 53 +- drivers/crypto/kasumi/rte_kasumi_pmd_private.h | 28 +- drivers/crypto/meson.build | 4 +- drivers/crypto/mrvl/Makefile | 67 - drivers/crypto/mrvl/rte_mrvl_compat.h | 51 - drivers/crypto/mrvl/rte_mrvl_pmd.c | 857 - drivers/crypto/mrvl/rte_mrvl_pmd_ops.c | 778 - drivers/crypto/mrvl/rte_mrvl_pmd_private.h | 123 - drivers/crypto/mrvl/rte_pmd_mrvl_version.map | 3 - drivers/crypto/mvsam/Makefile | 42 + drivers/crypto/mvsam/meson.build | 21 + drivers/crypto/mvsam/rte_mrvl_compat.h | 23 + drivers/crypto/mvsam/rte_mrvl_pmd.c | 937 + drivers/crypto/mvsam/rte_mrvl_pmd_ops.c | 722 + drivers/crypto/mvsam/rte_mrvl_pmd_private.h | 95 + drivers/crypto/mvsam/rte_pmd_mvsam_version.map | 3 + drivers/crypto/null/null_crypto_pmd.c | 28 +- drivers/crypto/null/null_crypto_pmd_ops.c | 68 +- drivers/crypto/null/null_crypto_pmd_private.h | 24 +- drivers/crypto/openssl/compat.h | 108 + drivers/crypto/openssl/rte_openssl_pmd.c | 527 +- drivers/crypto/openssl/rte_openssl_pmd_ops.c | 581 +- drivers/crypto/openssl/rte_openssl_pmd_private.h | 55 +- drivers/crypto/qat/Makefile | 35 - drivers/crypto/qat/README | 7 + drivers/crypto/qat/meson.build | 24 +- .../qat/qat_adf/adf_transport_access_macros.h | 176 - drivers/crypto/qat/qat_adf/icp_qat_fw.h | 316 - drivers/crypto/qat/qat_adf/icp_qat_fw_la.h | 404 - drivers/crypto/qat/qat_adf/icp_qat_hw.h | 329 - drivers/crypto/qat/qat_adf/qat_algs.h | 169 - drivers/crypto/qat/qat_adf/qat_algs_build_desc.c | 1059 - drivers/crypto/qat/qat_crypto.c | 1696 - drivers/crypto/qat/qat_crypto.h | 150 - drivers/crypto/qat/qat_crypto_capabilities.h | 557 - drivers/crypto/qat/qat_logs.h | 49 - drivers/crypto/qat/qat_qp.c | 470 - drivers/crypto/qat/qat_sym.c | 569 + drivers/crypto/qat/qat_sym.h | 174 + drivers/crypto/qat/qat_sym_capabilities.h | 557 + drivers/crypto/qat/qat_sym_pmd.c | 331 + drivers/crypto/qat/qat_sym_pmd.h | 41 + drivers/crypto/qat/qat_sym_session.c | 1725 + drivers/crypto/qat/qat_sym_session.h | 145 + drivers/crypto/qat/rte_pmd_qat_version.map | 3 - drivers/crypto/qat/rte_qat_cryptodev.c | 180 - drivers/crypto/scheduler/rte_cryptodev_scheduler.c | 101 +- drivers/crypto/scheduler/rte_cryptodev_scheduler.h | 3 +- drivers/crypto/scheduler/scheduler_failover.c | 4 +- drivers/crypto/scheduler/scheduler_multicore.c | 60 +- .../crypto/scheduler/scheduler_pkt_size_distr.c | 18 +- drivers/crypto/scheduler/scheduler_pmd.c | 182 +- drivers/crypto/scheduler/scheduler_pmd_ops.c | 101 +- drivers/crypto/scheduler/scheduler_pmd_private.h | 26 +- drivers/crypto/scheduler/scheduler_roundrobin.c | 2 +- drivers/crypto/snow3g/rte_snow3g_pmd.c | 42 +- drivers/crypto/snow3g/rte_snow3g_pmd_ops.c | 51 +- drivers/crypto/snow3g/rte_snow3g_pmd_private.h | 30 +- drivers/crypto/virtio/Makefile | 35 + drivers/crypto/virtio/meson.build | 8 + .../virtio/rte_pmd_virtio_crypto_version.map | 3 + drivers/crypto/virtio/virtio_crypto_algs.h | 28 + drivers/crypto/virtio/virtio_crypto_capabilities.h | 51 + drivers/crypto/virtio/virtio_cryptodev.c | 1505 + drivers/crypto/virtio/virtio_cryptodev.h | 64 + drivers/crypto/virtio/virtio_logs.h | 91 + drivers/crypto/virtio/virtio_pci.c | 462 + drivers/crypto/virtio/virtio_pci.h | 253 + drivers/crypto/virtio/virtio_ring.h | 137 + drivers/crypto/virtio/virtio_rxtx.c | 527 + drivers/crypto/virtio/virtqueue.c | 43 + drivers/crypto/virtio/virtqueue.h | 171 + drivers/crypto/zuc/rte_zuc_pmd.c | 152 +- drivers/crypto/zuc/rte_zuc_pmd_ops.c | 52 +- drivers/crypto/zuc/rte_zuc_pmd_private.h | 29 +- drivers/event/Makefile | 4 + drivers/event/dpaa/dpaa_eventdev.c | 4 +- drivers/event/dpaa/dpaa_eventdev.h | 2 +- drivers/event/dpaa/meson.build | 10 + drivers/event/dpaa2/Makefile | 6 +- drivers/event/dpaa2/dpaa2_eventdev.c | 84 +- drivers/event/dpaa2/dpaa2_eventdev.h | 1 - drivers/event/dpaa2/dpaa2_eventdev_logs.h | 12 +- drivers/event/dpaa2/dpaa2_hw_dpcon.c | 15 +- drivers/event/dpaa2/meson.build | 11 + drivers/event/meson.build | 2 +- drivers/event/octeontx/Makefile | 12 +- drivers/event/octeontx/meson.build | 9 +- drivers/event/octeontx/ssovf_evdev.c | 77 +- drivers/event/octeontx/ssovf_evdev.h | 17 +- drivers/event/octeontx/ssovf_evdev_selftest.c | 36 + drivers/event/octeontx/ssovf_probe.c | 290 + drivers/event/octeontx/ssovf_worker.c | 30 +- drivers/event/octeontx/ssovf_worker.h | 2 +- drivers/event/octeontx/timvf_evdev.c | 405 + drivers/event/octeontx/timvf_evdev.h | 225 + drivers/event/octeontx/timvf_probe.c | 148 + drivers/event/octeontx/timvf_worker.c | 199 + drivers/event/octeontx/timvf_worker.h | 443 + drivers/event/opdl/opdl_evdev.c | 7 +- drivers/event/opdl/opdl_evdev_init.c | 3 + drivers/event/opdl/opdl_ring.c | 97 +- drivers/event/opdl/opdl_ring.h | 16 +- drivers/event/skeleton/skeleton_eventdev.c | 2 +- drivers/event/sw/sw_evdev.c | 151 +- drivers/event/sw/sw_evdev_scheduler.c | 17 +- drivers/event/sw/sw_evdev_selftest.c | 81 +- drivers/event/sw/sw_evdev_worker.c | 6 +- drivers/mempool/Makefile | 5 + drivers/mempool/bucket/Makefile | 27 + drivers/mempool/bucket/meson.build | 9 + drivers/mempool/bucket/rte_mempool_bucket.c | 628 + .../mempool/bucket/rte_mempool_bucket_version.map | 4 + drivers/mempool/dpaa/Makefile | 3 + drivers/mempool/dpaa/dpaa_mempool.c | 54 +- drivers/mempool/dpaa/dpaa_mempool.h | 2 +- drivers/mempool/dpaa/meson.build | 12 + drivers/mempool/dpaa/rte_mempool_dpaa_version.map | 1 + drivers/mempool/dpaa2/Makefile | 11 +- drivers/mempool/dpaa2/dpaa2_hw_mempool.c | 139 +- drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h | 38 + drivers/mempool/dpaa2/meson.build | 12 + drivers/mempool/dpaa2/rte_dpaa2_mempool.h | 53 + .../mempool/dpaa2/rte_mempool_dpaa2_version.map | 9 + drivers/mempool/meson.build | 2 +- drivers/mempool/octeontx/Makefile | 5 +- drivers/mempool/octeontx/meson.build | 6 +- drivers/mempool/octeontx/octeontx_fpavf.c | 63 +- drivers/mempool/octeontx/octeontx_fpavf.h | 9 + drivers/mempool/octeontx/octeontx_mbox.c | 214 - drivers/mempool/octeontx/octeontx_mbox.h | 36 - drivers/mempool/octeontx/octeontx_pool_logs.h | 9 - drivers/mempool/octeontx/octeontx_ssovf.c | 271 - drivers/mempool/octeontx/rte_mempool_octeontx.c | 64 +- .../octeontx/rte_mempool_octeontx_version.map | 6 - drivers/meson.build | 25 +- drivers/net/Makefile | 13 +- drivers/net/af_packet/Makefile | 37 +- drivers/net/af_packet/rte_eth_af_packet.c | 175 +- drivers/net/ark/Makefile | 32 +- drivers/net/ark/ark_ddm.c | 33 +- drivers/net/ark/ark_ddm.h | 33 +- drivers/net/ark/ark_ethdev.c | 46 +- drivers/net/ark/ark_ethdev_rx.c | 33 +- drivers/net/ark/ark_ethdev_rx.h | 33 +- drivers/net/ark/ark_ethdev_tx.c | 33 +- drivers/net/ark/ark_ethdev_tx.h | 33 +- drivers/net/ark/ark_ext.h | 33 +- drivers/net/ark/ark_global.h | 33 +- drivers/net/ark/ark_logs.h | 33 +- drivers/net/ark/ark_mpu.c | 33 +- drivers/net/ark/ark_mpu.h | 33 +- drivers/net/ark/ark_pktchkr.c | 33 +- drivers/net/ark/ark_pktchkr.h | 33 +- drivers/net/ark/ark_pktdir.c | 33 +- drivers/net/ark/ark_pktdir.h | 33 +- drivers/net/ark/ark_pktgen.c | 33 +- drivers/net/ark/ark_pktgen.h | 33 +- drivers/net/ark/ark_rqp.c | 33 +- drivers/net/ark/ark_rqp.h | 33 +- drivers/net/ark/ark_udm.c | 33 +- drivers/net/ark/ark_udm.h | 33 +- drivers/net/ark/meson.build | 13 + drivers/net/avf/avf_ethdev.c | 56 +- drivers/net/avf/avf_rxtx.c | 9 +- drivers/net/avf/avf_rxtx.h | 10 +- drivers/net/avp/Makefile | 33 +- drivers/net/avp/avp_ethdev.c | 69 +- drivers/net/avp/avp_logs.h | 32 +- drivers/net/avp/meson.build | 5 + drivers/net/avp/rte_avp_common.h | 57 +- drivers/net/avp/rte_avp_fifo.h | 57 +- drivers/net/axgbe/Makefile | 35 + drivers/net/axgbe/axgbe_common.h | 1710 + drivers/net/axgbe/axgbe_dev.c | 1103 + drivers/net/axgbe/axgbe_ethdev.c | 770 + drivers/net/axgbe/axgbe_ethdev.h | 586 + drivers/net/axgbe/axgbe_i2c.c | 331 + drivers/net/axgbe/axgbe_logs.h | 26 + drivers/net/axgbe/axgbe_mdio.c | 1066 + drivers/net/axgbe/axgbe_phy.h | 192 + drivers/net/axgbe/axgbe_phy_impl.c | 2191 ++ drivers/net/axgbe/axgbe_rxtx.c | 674 + drivers/net/axgbe/axgbe_rxtx.h | 186 + drivers/net/axgbe/axgbe_rxtx_vec_sse.c | 93 + drivers/net/axgbe/meson.build | 19 + drivers/net/axgbe/rte_pmd_axgbe_version.map | 4 + drivers/net/bnx2x/LICENSE.bnx2x_pmd | 28 - drivers/net/bnx2x/Makefile | 8 +- drivers/net/bnx2x/bnx2x.c | 64 +- drivers/net/bnx2x/bnx2x.h | 9 +- drivers/net/bnx2x/bnx2x_ethdev.c | 148 +- drivers/net/bnx2x/bnx2x_ethdev.h | 12 +- drivers/net/bnx2x/bnx2x_logs.h | 9 +- drivers/net/bnx2x/bnx2x_rxtx.c | 20 +- drivers/net/bnx2x/bnx2x_rxtx.h | 9 +- drivers/net/bnx2x/bnx2x_stats.c | 10 +- drivers/net/bnx2x/bnx2x_stats.h | 10 +- drivers/net/bnx2x/bnx2x_vfpf.c | 9 +- drivers/net/bnx2x/bnx2x_vfpf.h | 9 +- drivers/net/bnx2x/ecore_fw_defs.h | 10 +- drivers/net/bnx2x/ecore_hsi.h | 10 +- drivers/net/bnx2x/ecore_init.h | 10 +- drivers/net/bnx2x/ecore_init_ops.h | 10 +- drivers/net/bnx2x/ecore_mfw_req.h | 10 +- drivers/net/bnx2x/ecore_reg.h | 10 +- drivers/net/bnx2x/ecore_sp.c | 10 +- drivers/net/bnx2x/ecore_sp.h | 10 +- drivers/net/bnx2x/elink.c | 354 +- drivers/net/bnx2x/elink.h | 10 +- drivers/net/bnx2x/meson.build | 14 + drivers/net/bnxt/Makefile | 39 +- drivers/net/bnxt/bnxt.h | 74 +- drivers/net/bnxt/bnxt_cpr.c | 119 +- drivers/net/bnxt/bnxt_cpr.h | 51 +- drivers/net/bnxt/bnxt_ethdev.c | 418 +- drivers/net/bnxt/bnxt_filter.c | 1113 +- drivers/net/bnxt/bnxt_filter.h | 35 +- drivers/net/bnxt/bnxt_flow.c | 1171 + drivers/net/bnxt/bnxt_hwrm.c | 501 +- drivers/net/bnxt/bnxt_hwrm.h | 47 +- drivers/net/bnxt/bnxt_irq.c | 62 +- drivers/net/bnxt/bnxt_irq.h | 34 +- drivers/net/bnxt/bnxt_nvm_defs.h | 11 +- drivers/net/bnxt/bnxt_ring.c | 237 +- drivers/net/bnxt/bnxt_ring.h | 40 +- drivers/net/bnxt/bnxt_rxq.c | 113 +- drivers/net/bnxt/bnxt_rxq.h | 40 +- drivers/net/bnxt/bnxt_rxr.c | 99 +- drivers/net/bnxt/bnxt_rxr.h | 58 +- drivers/net/bnxt/bnxt_stats.c | 62 +- drivers/net/bnxt/bnxt_stats.h | 34 +- drivers/net/bnxt/bnxt_txq.c | 49 +- drivers/net/bnxt/bnxt_txq.h | 37 +- drivers/net/bnxt/bnxt_txr.c | 190 +- drivers/net/bnxt/bnxt_txr.h | 44 +- drivers/net/bnxt/bnxt_util.c | 18 + drivers/net/bnxt/bnxt_util.h | 11 + drivers/net/bnxt/bnxt_vnic.c | 49 +- drivers/net/bnxt/bnxt_vnic.h | 41 +- drivers/net/bnxt/hsi_struct_def_dpdk.h | 35565 +++++++++++++------ drivers/net/bnxt/meson.build | 20 + drivers/net/bnxt/rte_pmd_bnxt.c | 34 +- drivers/net/bnxt/rte_pmd_bnxt.h | 34 +- drivers/net/bonding/Makefile | 1 + drivers/net/bonding/meson.build | 3 +- drivers/net/bonding/rte_eth_bond_8023ad.c | 126 +- drivers/net/bonding/rte_eth_bond_alb.c | 4 +- drivers/net/bonding/rte_eth_bond_api.c | 118 +- drivers/net/bonding/rte_eth_bond_args.c | 11 +- drivers/net/bonding/rte_eth_bond_flow.c | 228 + drivers/net/bonding/rte_eth_bond_pmd.c | 561 +- drivers/net/bonding/rte_eth_bond_private.h | 40 +- drivers/net/bonding/rte_pmd_bond_version.map | 1 + drivers/net/cxgbe/Makefile | 47 +- drivers/net/cxgbe/base/adapter.h | 169 +- drivers/net/cxgbe/base/common.h | 205 +- drivers/net/cxgbe/base/t4_chip_type.h | 34 +- drivers/net/cxgbe/base/t4_hw.c | 1159 +- drivers/net/cxgbe/base/t4_hw.h | 38 +- drivers/net/cxgbe/base/t4_msg.h | 244 +- drivers/net/cxgbe/base/t4_pci_id_tbl.h | 34 +- drivers/net/cxgbe/base/t4_regs.h | 172 +- drivers/net/cxgbe/base/t4_regs_values.h | 34 +- drivers/net/cxgbe/base/t4_tcb.h | 26 + drivers/net/cxgbe/base/t4fw_interface.h | 667 +- drivers/net/cxgbe/base/t4vf_hw.c | 880 + drivers/net/cxgbe/base/t4vf_hw.h | 15 + drivers/net/cxgbe/clip_tbl.c | 193 + drivers/net/cxgbe/clip_tbl.h | 31 + drivers/net/cxgbe/cxgbe.h | 65 +- drivers/net/cxgbe/cxgbe_compat.h | 55 +- drivers/net/cxgbe/cxgbe_ethdev.c | 436 +- drivers/net/cxgbe/cxgbe_filter.c | 1252 + drivers/net/cxgbe/cxgbe_filter.h | 235 + drivers/net/cxgbe/cxgbe_flow.c | 845 + drivers/net/cxgbe/cxgbe_flow.h | 42 + drivers/net/cxgbe/cxgbe_main.c | 768 +- drivers/net/cxgbe/cxgbe_ofld.h | 89 + drivers/net/cxgbe/cxgbe_pfvf.h | 45 + drivers/net/cxgbe/cxgbevf_ethdev.c | 201 + drivers/net/cxgbe/cxgbevf_main.c | 295 + drivers/net/cxgbe/meson.build | 14 + drivers/net/cxgbe/sge.c | 627 +- drivers/net/dpaa/Makefile | 3 + drivers/net/dpaa/dpaa_ethdev.c | 213 +- drivers/net/dpaa/dpaa_ethdev.h | 24 +- drivers/net/dpaa/dpaa_rxtx.c | 41 +- drivers/net/dpaa/meson.build | 14 + drivers/net/dpaa/rte_pmd_dpaa.h | 5 +- drivers/net/dpaa/rte_pmd_dpaa_version.map | 4 +- drivers/net/dpaa2/Makefile | 10 +- drivers/net/dpaa2/base/dpaa2_hw_dpni.c | 30 +- drivers/net/dpaa2/dpaa2_ethdev.c | 502 +- drivers/net/dpaa2/dpaa2_ethdev.h | 6 + drivers/net/dpaa2/dpaa2_pmd_logs.h | 41 + drivers/net/dpaa2/dpaa2_rxtx.c | 179 +- drivers/net/dpaa2/mc/dpni.c | 2 +- drivers/net/dpaa2/meson.build | 18 + drivers/net/e1000/Makefile | 4 +- drivers/net/e1000/base/e1000_82575.c | 5 + drivers/net/e1000/base/e1000_defines.h | 1 + drivers/net/e1000/base/e1000_phy.h | 8 + drivers/net/e1000/e1000_ethdev.h | 27 +- drivers/net/e1000/e1000_logs.c | 26 + drivers/net/e1000/e1000_logs.h | 6 + drivers/net/e1000/em_ethdev.c | 146 +- drivers/net/e1000/em_rxtx.c | 113 +- drivers/net/e1000/igb_ethdev.c | 246 +- drivers/net/e1000/igb_flow.c | 110 +- drivers/net/e1000/igb_rxtx.c | 188 +- drivers/net/e1000/meson.build | 1 + drivers/net/ena/Makefile | 4 + drivers/net/ena/base/ena_com.c | 711 +- drivers/net/ena/base/ena_com.h | 112 +- drivers/net/ena/base/ena_defs/ena_admin_defs.h | 1164 +- drivers/net/ena/base/ena_defs/ena_common_defs.h | 8 +- drivers/net/ena/base/ena_defs/ena_eth_io_defs.h | 758 +- drivers/net/ena/base/ena_defs/ena_gen_info.h | 4 +- drivers/net/ena/base/ena_defs/ena_includes.h | 2 - drivers/net/ena/base/ena_defs/ena_regs_defs.h | 36 + drivers/net/ena/base/ena_eth_com.c | 78 +- drivers/net/ena/base/ena_eth_com.h | 10 +- drivers/net/ena/base/ena_plat.h | 4 + drivers/net/ena/base/ena_plat_dpdk.h | 79 +- drivers/net/ena/ena_ethdev.c | 800 +- drivers/net/ena/ena_ethdev.h | 32 +- drivers/net/ena/meson.build | 11 + drivers/net/enic/base/cq_desc.h | 1 + drivers/net/enic/base/vnic_dev.c | 88 +- drivers/net/enic/base/vnic_dev.h | 12 +- drivers/net/enic/base/vnic_devcmd.h | 38 +- drivers/net/enic/base/vnic_enet.h | 3 + drivers/net/enic/base/vnic_nic.h | 6 +- drivers/net/enic/base/vnic_rq.h | 4 + drivers/net/enic/base/vnic_wq.c | 9 +- drivers/net/enic/base/vnic_wq.h | 13 +- drivers/net/enic/enic.h | 142 +- drivers/net/enic/enic_clsf.c | 21 +- drivers/net/enic/enic_compat.h | 5 + drivers/net/enic/enic_ethdev.c | 483 +- drivers/net/enic/enic_flow.c | 87 +- drivers/net/enic/enic_main.c | 641 +- drivers/net/enic/enic_res.c | 94 +- drivers/net/enic/enic_res.h | 22 + drivers/net/enic/enic_rxtx.c | 397 +- drivers/net/enic/meson.build | 19 + drivers/net/failsafe/Makefile | 33 +- drivers/net/failsafe/failsafe.c | 52 +- drivers/net/failsafe/failsafe_args.c | 9 +- drivers/net/failsafe/failsafe_eal.c | 61 +- drivers/net/failsafe/failsafe_ether.c | 55 +- drivers/net/failsafe/failsafe_flow.c | 6 +- drivers/net/failsafe/failsafe_intr.c | 2 +- drivers/net/failsafe/failsafe_ops.c | 151 +- drivers/net/failsafe/failsafe_private.h | 18 +- drivers/net/failsafe/failsafe_rxtx.c | 2 +- drivers/net/failsafe/meson.build | 23 + drivers/net/fm10k/Makefile | 3 +- drivers/net/fm10k/fm10k.h | 13 +- drivers/net/fm10k/fm10k_ethdev.c | 228 +- drivers/net/fm10k/fm10k_rxtx.c | 78 + drivers/net/fm10k/fm10k_rxtx_vec.c | 6 +- drivers/net/i40e/Makefile | 5 +- drivers/net/i40e/base/i40e_register.h | 24 +- drivers/net/i40e/i40e_ethdev.c | 1016 +- drivers/net/i40e/i40e_ethdev.h | 120 +- drivers/net/i40e/i40e_ethdev_vf.c | 325 +- drivers/net/i40e/i40e_fdir.c | 3 +- drivers/net/i40e/i40e_flow.c | 218 +- drivers/net/i40e/i40e_rxtx.c | 383 +- drivers/net/i40e/i40e_rxtx.h | 5 +- drivers/net/i40e/i40e_rxtx_vec_avx2.c | 2 +- drivers/net/i40e/i40e_rxtx_vec_common.h | 4 +- drivers/net/i40e/i40e_rxtx_vec_neon.c | 35 +- drivers/net/i40e/i40e_vf_representor.c | 531 + drivers/net/i40e/meson.build | 12 +- drivers/net/i40e/rte_pmd_i40e.c | 86 +- drivers/net/i40e/rte_pmd_i40e.h | 18 + drivers/net/ifc/Makefile | 35 + drivers/net/ifc/base/ifcvf.c | 298 + drivers/net/ifc/base/ifcvf.h | 154 + drivers/net/ifc/base/ifcvf_osdep.h | 52 + drivers/net/ifc/ifcvf_vdpa.c | 793 + drivers/net/ifc/meson.build | 8 + drivers/net/ifc/rte_pmd_ifc_version.map | 4 + drivers/net/ixgbe/Makefile | 6 +- drivers/net/ixgbe/ixgbe_ethdev.c | 764 +- drivers/net/ixgbe/ixgbe_ethdev.h | 37 +- drivers/net/ixgbe/ixgbe_fdir.c | 30 +- drivers/net/ixgbe/ixgbe_flow.c | 209 +- drivers/net/ixgbe/ixgbe_ipsec.c | 13 +- drivers/net/ixgbe/ixgbe_pf.c | 18 +- drivers/net/ixgbe/ixgbe_rxtx.c | 434 +- drivers/net/ixgbe/ixgbe_rxtx.h | 24 +- drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 5 - drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c | 2 +- drivers/net/ixgbe/ixgbe_vf_representor.c | 231 + drivers/net/ixgbe/meson.build | 3 + drivers/net/ixgbe/rte_pmd_ixgbe.c | 230 + drivers/net/ixgbe/rte_pmd_ixgbe.h | 84 + drivers/net/ixgbe/rte_pmd_ixgbe_version.map | 10 + drivers/net/kni/Makefile | 1 + drivers/net/kni/meson.build | 8 + drivers/net/kni/rte_eth_kni.c | 66 +- drivers/net/liquidio/Makefile | 2 +- drivers/net/liquidio/lio_ethdev.c | 76 +- drivers/net/liquidio/meson.build | 8 + drivers/net/liquidio/rte_pmd_lio_version.map | 4 - drivers/net/liquidio/rte_pmd_liquidio_version.map | 4 + drivers/net/meson.build | 29 +- drivers/net/mlx4/Makefile | 50 +- drivers/net/mlx4/mlx4.c | 255 +- drivers/net/mlx4/mlx4.h | 57 +- drivers/net/mlx4/mlx4_ethdev.c | 208 +- drivers/net/mlx4/mlx4_flow.c | 240 +- drivers/net/mlx4/mlx4_flow.h | 6 +- drivers/net/mlx4/mlx4_glue.c | 2 +- drivers/net/mlx4/mlx4_glue.h | 2 +- drivers/net/mlx4/mlx4_intr.c | 2 +- drivers/net/mlx4/mlx4_mr.c | 1247 +- drivers/net/mlx4/mlx4_mr.h | 122 + drivers/net/mlx4/mlx4_prm.h | 18 +- drivers/net/mlx4/mlx4_rxq.c | 109 +- drivers/net/mlx4/mlx4_rxtx.c | 566 +- drivers/net/mlx4/mlx4_rxtx.h | 94 +- drivers/net/mlx4/mlx4_txq.c | 130 +- drivers/net/mlx4/mlx4_utils.c | 2 +- drivers/net/mlx4/mlx4_utils.h | 2 +- drivers/net/mlx5/Makefile | 303 +- drivers/net/mlx5/mlx5.c | 1528 +- drivers/net/mlx5/mlx5.h | 381 +- drivers/net/mlx5/mlx5_defs.h | 76 +- drivers/net/mlx5/mlx5_ethdev.c | 1216 +- drivers/net/mlx5/mlx5_flow.c | 5075 +-- drivers/net/mlx5/mlx5_glue.c | 44 +- drivers/net/mlx5/mlx5_glue.h | 18 +- drivers/net/mlx5/mlx5_mac.c | 159 +- drivers/net/mlx5/mlx5_mr.c | 1316 +- drivers/net/mlx5/mlx5_mr.h | 120 + drivers/net/mlx5/mlx5_nl.c | 916 + drivers/net/mlx5/mlx5_nl_flow.c | 1248 + drivers/net/mlx5/mlx5_prm.h | 53 +- drivers/net/mlx5/mlx5_rss.c | 179 +- drivers/net/mlx5/mlx5_rxmode.c | 56 +- drivers/net/mlx5/mlx5_rxq.c | 1558 +- drivers/net/mlx5/mlx5_rxtx.c | 748 +- drivers/net/mlx5/mlx5_rxtx.h | 538 +- drivers/net/mlx5/mlx5_rxtx_vec.c | 40 +- drivers/net/mlx5/mlx5_rxtx_vec.h | 15 +- drivers/net/mlx5/mlx5_rxtx_vec_neon.h | 53 +- drivers/net/mlx5/mlx5_rxtx_vec_sse.h | 37 +- drivers/net/mlx5/mlx5_socket.c | 179 +- drivers/net/mlx5/mlx5_stats.c | 246 +- drivers/net/mlx5/mlx5_trigger.c | 299 +- drivers/net/mlx5/mlx5_txq.c | 434 +- drivers/net/mlx5/mlx5_utils.h | 31 +- drivers/net/mlx5/mlx5_vlan.c | 114 +- drivers/net/mrvl/Makefile | 68 - drivers/net/mrvl/mrvl_ethdev.c | 2511 -- drivers/net/mrvl/mrvl_ethdev.h | 118 - drivers/net/mrvl/mrvl_qos.c | 636 - drivers/net/mrvl/mrvl_qos.h | 113 - drivers/net/mrvl/rte_pmd_mrvl_version.map | 3 - drivers/net/mvpp2/Makefile | 42 + drivers/net/mvpp2/meson.build | 25 + drivers/net/mvpp2/mrvl_ethdev.c | 2761 ++ drivers/net/mvpp2/mrvl_ethdev.h | 109 + drivers/net/mvpp2/mrvl_flow.c | 2779 ++ drivers/net/mvpp2/mrvl_qos.c | 894 + drivers/net/mvpp2/mrvl_qos.h | 107 + drivers/net/mvpp2/rte_pmd_mvpp2_version.map | 3 + drivers/net/netvsc/Makefile | 23 + drivers/net/netvsc/hn_ethdev.c | 761 + drivers/net/netvsc/hn_logs.h | 36 + drivers/net/netvsc/hn_nvs.c | 546 + drivers/net/netvsc/hn_nvs.h | 229 + drivers/net/netvsc/hn_rndis.c | 1099 + drivers/net/netvsc/hn_rndis.h | 32 + drivers/net/netvsc/hn_rxtx.c | 1334 + drivers/net/netvsc/hn_var.h | 158 + drivers/net/netvsc/meson.build | 10 + drivers/net/netvsc/ndis.h | 378 + drivers/net/netvsc/rndis.h | 414 + drivers/net/netvsc/rte_pmd_netvsc_version.map | 5 + drivers/net/nfp/Makefile | 17 +- drivers/net/nfp/meson.build | 16 + drivers/net/nfp/nfp_net.c | 873 +- drivers/net/nfp/nfp_net_ctrl.h | 6 + drivers/net/nfp/nfp_net_eth.h | 82 - drivers/net/nfp/nfp_net_logs.h | 9 +- drivers/net/nfp/nfp_net_pmd.h | 46 +- drivers/net/nfp/nfp_nfpu.c | 108 - drivers/net/nfp/nfp_nfpu.h | 55 - drivers/net/nfp/nfp_nspu.c | 642 - drivers/net/nfp/nfp_nspu.h | 83 - drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h | 722 + drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h | 35 + drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h | 592 + drivers/net/nfp/nfpcore/nfp6000/nfp6000.h | 40 + drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h | 26 + drivers/net/nfp/nfpcore/nfp_cpp.h | 781 + drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c | 845 + drivers/net/nfp/nfpcore/nfp_cppcore.c | 858 + drivers/net/nfp/nfpcore/nfp_crc.c | 49 + drivers/net/nfp/nfpcore/nfp_crc.h | 19 + drivers/net/nfp/nfpcore/nfp_hwinfo.c | 199 + drivers/net/nfp/nfpcore/nfp_hwinfo.h | 85 + drivers/net/nfp/nfpcore/nfp_mip.c | 154 + drivers/net/nfp/nfpcore/nfp_mip.h | 21 + drivers/net/nfp/nfpcore/nfp_mutex.c | 424 + drivers/net/nfp/nfpcore/nfp_nffw.c | 235 + drivers/net/nfp/nfpcore/nfp_nffw.h | 86 + drivers/net/nfp/nfpcore/nfp_nsp.c | 427 + drivers/net/nfp/nfpcore/nfp_nsp.h | 304 + drivers/net/nfp/nfpcore/nfp_nsp_cmds.c | 109 + drivers/net/nfp/nfpcore/nfp_nsp_eth.c | 665 + drivers/net/nfp/nfpcore/nfp_resource.c | 266 + drivers/net/nfp/nfpcore/nfp_resource.h | 52 + drivers/net/nfp/nfpcore/nfp_rtsym.c | 327 + drivers/net/nfp/nfpcore/nfp_rtsym.h | 61 + drivers/net/nfp/nfpcore/nfp_target.h | 579 + drivers/net/null/meson.build | 1 + drivers/net/null/rte_eth_null.c | 82 +- drivers/net/octeontx/Makefile | 3 +- drivers/net/octeontx/base/octeontx_bgx.c | 22 +- drivers/net/octeontx/base/octeontx_pkivf.c | 10 +- drivers/net/octeontx/base/octeontx_pkivf.h | 10 +- drivers/net/octeontx/octeontx_ethdev.c | 124 +- drivers/net/octeontx/octeontx_ethdev.h | 4 + drivers/net/octeontx/octeontx_rxtx.c | 2 +- drivers/net/pcap/meson.build | 20 +- drivers/net/pcap/rte_eth_pcap.c | 274 +- drivers/net/qede/LICENSE.qede_pmd | 28 - drivers/net/qede/Makefile | 10 +- drivers/net/qede/base/bcm_osal.c | 24 +- drivers/net/qede/base/bcm_osal.h | 8 +- drivers/net/qede/base/common_hsi.h | 16 +- drivers/net/qede/base/ecore.h | 36 +- drivers/net/qede/base/ecore_attn_values.h | 8 +- drivers/net/qede/base/ecore_chain.h | 57 +- drivers/net/qede/base/ecore_cxt.c | 38 +- drivers/net/qede/base/ecore_cxt.h | 12 +- drivers/net/qede/base/ecore_cxt_api.h | 8 +- drivers/net/qede/base/ecore_dcbx.c | 15 +- drivers/net/qede/base/ecore_dcbx.h | 8 +- drivers/net/qede/base/ecore_dcbx_api.h | 9 +- drivers/net/qede/base/ecore_dev.c | 344 +- drivers/net/qede/base/ecore_dev_api.h | 28 +- drivers/net/qede/base/ecore_gtt_reg_addr.h | 8 +- drivers/net/qede/base/ecore_gtt_values.h | 8 +- drivers/net/qede/base/ecore_hsi_common.h | 114 +- drivers/net/qede/base/ecore_hsi_debug_tools.h | 116 +- drivers/net/qede/base/ecore_hsi_eth.h | 37 +- drivers/net/qede/base/ecore_hsi_init_func.h | 44 +- drivers/net/qede/base/ecore_hsi_init_tool.h | 115 +- drivers/net/qede/base/ecore_hw.c | 130 +- drivers/net/qede/base/ecore_hw.h | 12 +- drivers/net/qede/base/ecore_hw_defs.h | 8 +- drivers/net/qede/base/ecore_init_fw_funcs.c | 218 +- drivers/net/qede/base/ecore_init_fw_funcs.h | 41 +- drivers/net/qede/base/ecore_init_ops.c | 40 +- drivers/net/qede/base/ecore_init_ops.h | 8 +- drivers/net/qede/base/ecore_int.c | 56 +- drivers/net/qede/base/ecore_int.h | 11 +- drivers/net/qede/base/ecore_int_api.h | 8 +- drivers/net/qede/base/ecore_iov_api.h | 19 +- drivers/net/qede/base/ecore_iro.h | 8 +- drivers/net/qede/base/ecore_iro_values.h | 72 +- drivers/net/qede/base/ecore_l2.c | 73 +- drivers/net/qede/base/ecore_l2.h | 8 +- drivers/net/qede/base/ecore_l2_api.h | 10 +- drivers/net/qede/base/ecore_mcp.c | 228 +- drivers/net/qede/base/ecore_mcp.h | 8 +- drivers/net/qede/base/ecore_mcp_api.h | 54 +- drivers/net/qede/base/ecore_mng_tlv.c | 6 + drivers/net/qede/base/ecore_proto_if.h | 11 +- drivers/net/qede/base/ecore_rt_defs.h | 599 +- drivers/net/qede/base/ecore_sp_api.h | 8 +- drivers/net/qede/base/ecore_sp_commands.c | 37 +- drivers/net/qede/base/ecore_sp_commands.h | 8 +- drivers/net/qede/base/ecore_spq.c | 44 +- drivers/net/qede/base/ecore_spq.h | 21 +- drivers/net/qede/base/ecore_sriov.c | 96 +- drivers/net/qede/base/ecore_sriov.h | 8 +- drivers/net/qede/base/ecore_status.h | 8 +- drivers/net/qede/base/ecore_utils.h | 8 +- drivers/net/qede/base/ecore_vf.c | 46 +- drivers/net/qede/base/ecore_vf.h | 17 +- drivers/net/qede/base/ecore_vf_api.h | 8 +- drivers/net/qede/base/ecore_vfpf_if.h | 32 +- drivers/net/qede/base/eth_common.h | 11 +- drivers/net/qede/base/mcp_public.h | 11 +- drivers/net/qede/base/nvm_cfg.h | 8 +- drivers/net/qede/base/reg_addr.h | 18 +- drivers/net/qede/qede_ethdev.c | 618 +- drivers/net/qede/qede_ethdev.h | 18 +- drivers/net/qede/qede_fdir.c | 15 +- drivers/net/qede/qede_if.h | 8 +- drivers/net/qede/qede_logs.h | 8 +- drivers/net/qede/qede_main.c | 24 +- drivers/net/qede/qede_rxtx.c | 51 +- drivers/net/qede/qede_rxtx.h | 14 +- drivers/net/ring/meson.build | 1 + drivers/net/ring/rte_eth_ring.c | 65 +- drivers/net/sfc/Makefile | 7 +- drivers/net/sfc/base/ef10_ev.c | 112 +- drivers/net/sfc/base/ef10_filter.c | 163 +- drivers/net/sfc/base/ef10_image.c | 885 + drivers/net/sfc/base/ef10_impl.h | 74 +- drivers/net/sfc/base/ef10_intr.c | 13 +- drivers/net/sfc/base/ef10_mac.c | 193 +- drivers/net/sfc/base/ef10_mcdi.c | 25 +- drivers/net/sfc/base/ef10_nic.c | 923 +- drivers/net/sfc/base/ef10_nvram.c | 33 +- drivers/net/sfc/base/ef10_phy.c | 126 +- drivers/net/sfc/base/ef10_rx.c | 184 +- drivers/net/sfc/base/ef10_signed_image_layout.h | 62 + drivers/net/sfc/base/ef10_tlv_layout.h | 115 +- drivers/net/sfc/base/ef10_tx.c | 67 +- drivers/net/sfc/base/ef10_vpd.c | 37 +- drivers/net/sfc/base/efx.h | 425 +- drivers/net/sfc/base/efx_bootcfg.c | 89 +- drivers/net/sfc/base/efx_check.h | 122 +- drivers/net/sfc/base/efx_ev.c | 10 +- drivers/net/sfc/base/efx_filter.c | 71 +- drivers/net/sfc/base/efx_impl.h | 134 +- drivers/net/sfc/base/efx_intr.c | 21 +- drivers/net/sfc/base/efx_lic.c | 26 +- drivers/net/sfc/base/efx_mac.c | 38 +- drivers/net/sfc/base/efx_mcdi.c | 95 +- drivers/net/sfc/base/efx_mcdi.h | 4 +- drivers/net/sfc/base/efx_mon.c | 6 +- drivers/net/sfc/base/efx_nic.c | 304 +- drivers/net/sfc/base/efx_nvram.c | 10 +- drivers/net/sfc/base/efx_phy.c | 14 +- drivers/net/sfc/base/efx_port.c | 5 +- drivers/net/sfc/base/efx_regs_ef10.h | 230 +- drivers/net/sfc/base/efx_regs_mcdi.h | 9493 +++-- drivers/net/sfc/base/efx_regs_mcdi_aoe.h | 2914 ++ drivers/net/sfc/base/efx_rx.c | 245 +- drivers/net/sfc/base/efx_sram.c | 14 +- drivers/net/sfc/base/efx_tunnel.c | 32 +- drivers/net/sfc/base/efx_tx.c | 56 +- drivers/net/sfc/base/efx_types.h | 38 +- drivers/net/sfc/base/efx_vpd.c | 10 +- drivers/net/sfc/base/hunt_nic.c | 172 +- drivers/net/sfc/base/mcdi_mon.c | 9 + drivers/net/sfc/base/medford2_impl.h | 35 + drivers/net/sfc/base/medford2_nic.c | 162 + drivers/net/sfc/base/medford_nic.c | 240 +- drivers/net/sfc/base/meson.build | 4 +- drivers/net/sfc/base/siena_flash.h | 9 +- drivers/net/sfc/base/siena_mac.c | 31 +- drivers/net/sfc/base/siena_mcdi.c | 12 +- drivers/net/sfc/base/siena_nic.c | 24 + drivers/net/sfc/base/siena_nvram.c | 17 +- drivers/net/sfc/base/siena_phy.c | 9 +- drivers/net/sfc/base/siena_vpd.c | 25 +- drivers/net/sfc/efsys.h | 17 +- drivers/net/sfc/meson.build | 7 +- drivers/net/sfc/sfc.c | 350 +- drivers/net/sfc/sfc.h | 41 +- drivers/net/sfc/sfc_dp.c | 5 +- drivers/net/sfc/sfc_dp.h | 9 +- drivers/net/sfc/sfc_dp_rx.h | 30 +- drivers/net/sfc/sfc_dp_tx.h | 4 +- drivers/net/sfc/sfc_ef10.h | 34 + drivers/net/sfc/sfc_ef10_essb_rx.c | 723 + drivers/net/sfc/sfc_ef10_rx.c | 184 +- drivers/net/sfc/sfc_ef10_rx_ev.h | 175 + drivers/net/sfc/sfc_ef10_tx.c | 2 +- drivers/net/sfc/sfc_ethdev.c | 232 +- drivers/net/sfc/sfc_ev.c | 56 +- drivers/net/sfc/sfc_filter.c | 14 + drivers/net/sfc/sfc_filter.h | 10 + drivers/net/sfc/sfc_flow.c | 1340 +- drivers/net/sfc/sfc_flow.h | 23 +- drivers/net/sfc/sfc_intr.c | 6 +- drivers/net/sfc/sfc_kvargs.c | 4 +- drivers/net/sfc/sfc_kvargs.h | 24 +- drivers/net/sfc/sfc_log.h | 77 +- drivers/net/sfc/sfc_mcdi.c | 25 +- drivers/net/sfc/sfc_port.c | 68 +- drivers/net/sfc/sfc_rx.c | 437 +- drivers/net/sfc/sfc_rx.h | 11 +- drivers/net/sfc/sfc_tso.c | 3 +- drivers/net/sfc/sfc_tweak.h | 8 + drivers/net/sfc/sfc_tx.c | 98 +- drivers/net/sfc/sfc_tx.h | 1 - drivers/net/softnic/Makefile | 23 +- drivers/net/softnic/conn.c | 332 + drivers/net/softnic/conn.h | 49 + drivers/net/softnic/firmware.cli | 21 + drivers/net/softnic/hash_func.h | 359 + drivers/net/softnic/hash_func_arm64.h | 261 + drivers/net/softnic/meson.build | 18 + drivers/net/softnic/parser.c | 703 + drivers/net/softnic/parser.h | 68 + drivers/net/softnic/rte_eth_softnic.c | 758 +- drivers/net/softnic/rte_eth_softnic.h | 49 +- drivers/net/softnic/rte_eth_softnic_action.c | 389 + drivers/net/softnic/rte_eth_softnic_cli.c | 5259 +++ drivers/net/softnic/rte_eth_softnic_internals.h | 812 +- drivers/net/softnic/rte_eth_softnic_link.c | 98 + drivers/net/softnic/rte_eth_softnic_mempool.c | 103 + drivers/net/softnic/rte_eth_softnic_pipeline.c | 966 + drivers/net/softnic/rte_eth_softnic_swq.c | 114 + drivers/net/softnic/rte_eth_softnic_tap.c | 118 + drivers/net/softnic/rte_eth_softnic_thread.c | 2929 ++ drivers/net/softnic/rte_eth_softnic_tm.c | 345 +- .../net/softnic/rte_pmd_eth_softnic_version.map | 7 - drivers/net/softnic/rte_pmd_softnic_version.map | 13 + drivers/net/szedata2/Makefile | 33 +- drivers/net/szedata2/meson.build | 7 + drivers/net/szedata2/rte_eth_szedata2.c | 929 +- drivers/net/szedata2/rte_eth_szedata2.h | 37 +- drivers/net/szedata2/szedata2_iobuf.c | 203 - drivers/net/szedata2/szedata2_iobuf.h | 356 - drivers/net/szedata2/szedata2_logs.h | 22 + drivers/net/tap/Makefile | 2 +- drivers/net/tap/rte_eth_tap.c | 1009 +- drivers/net/tap/rte_eth_tap.h | 16 +- drivers/net/tap/tap_bpf.h | 2 +- drivers/net/tap/tap_bpf_api.c | 2 +- drivers/net/tap/tap_bpf_insns.h | 2 +- drivers/net/tap/tap_bpf_program.c | 4 +- drivers/net/tap/tap_flow.c | 144 +- drivers/net/tap/tap_flow.h | 2 +- drivers/net/tap/tap_intr.c | 4 +- drivers/net/tap/tap_log.h | 10 + drivers/net/tap/tap_netlink.c | 20 +- drivers/net/tap/tap_netlink.h | 2 +- drivers/net/tap/tap_rss.h | 8 +- drivers/net/tap/tap_tcmsgs.c | 11 +- drivers/net/tap/tap_tcmsgs.h | 2 +- drivers/net/thunderx/base/nicvf_hw.c | 13 + drivers/net/thunderx/base/nicvf_hw.h | 1 + drivers/net/thunderx/base/nicvf_hw_defs.h | 5 +- drivers/net/thunderx/nicvf_ethdev.c | 302 +- drivers/net/thunderx/nicvf_ethdev.h | 2 + drivers/net/thunderx/nicvf_rxtx.c | 144 +- drivers/net/thunderx/nicvf_rxtx.h | 24 +- drivers/net/thunderx/nicvf_struct.h | 29 +- drivers/net/vdev_netvsc/Makefile | 2 +- drivers/net/vdev_netvsc/vdev_netvsc.c | 275 +- drivers/net/vhost/meson.build | 8 + drivers/net/vhost/rte_eth_vhost.c | 396 +- drivers/net/vhost/rte_eth_vhost.h | 35 +- drivers/net/virtio/meson.build | 27 + drivers/net/virtio/virtio_ethdev.c | 240 +- drivers/net/virtio/virtio_ethdev.h | 11 +- drivers/net/virtio/virtio_pci.h | 12 +- drivers/net/virtio/virtio_rxtx.c | 698 +- drivers/net/virtio/virtio_rxtx_simple.c | 67 - drivers/net/virtio/virtio_rxtx_simple.h | 49 - drivers/net/virtio/virtio_user/vhost_kernel.c | 86 +- drivers/net/virtio/virtio_user/vhost_kernel_tap.c | 14 +- drivers/net/virtio/virtio_user/vhost_kernel_tap.h | 3 +- drivers/net/virtio/virtio_user/vhost_user.c | 76 +- drivers/net/virtio/virtio_user/virtio_user_dev.c | 201 +- drivers/net/virtio/virtio_user/virtio_user_dev.h | 12 +- drivers/net/virtio/virtio_user_ethdev.c | 158 +- drivers/net/virtio/virtqueue.c | 8 + drivers/net/virtio/virtqueue.h | 2 + drivers/net/vmxnet3/Makefile | 4 +- drivers/net/vmxnet3/base/upt1_defs.h | 7 +- drivers/net/vmxnet3/base/vmxnet3_defs.h | 34 +- drivers/net/vmxnet3/vmxnet3_ethdev.c | 156 +- drivers/net/vmxnet3/vmxnet3_ethdev.h | 1 + drivers/net/vmxnet3/vmxnet3_ring.h | 2 +- drivers/net/vmxnet3/vmxnet3_rxtx.c | 212 +- drivers/raw/Makefile | 5 + drivers/raw/dpaa2_cmdif/Makefile | 36 + drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c | 297 + drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h | 46 + drivers/raw/dpaa2_cmdif/meson.build | 10 + drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h | 35 + .../dpaa2_cmdif/rte_pmd_dpaa2_cmdif_version.map | 4 + drivers/raw/dpaa2_qdma/Makefile | 37 + drivers/raw/dpaa2_qdma/dpaa2_qdma.c | 1001 + drivers/raw/dpaa2_qdma/dpaa2_qdma.h | 150 + drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h | 46 + drivers/raw/dpaa2_qdma/meson.build | 10 + drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h | 286 + .../raw/dpaa2_qdma/rte_pmd_dpaa2_qdma_version.map | 20 + drivers/raw/ifpga_rawdev/Makefile | 36 + drivers/raw/ifpga_rawdev/base/Makefile | 26 + drivers/raw/ifpga_rawdev/base/README | 31 + drivers/raw/ifpga_rawdev/base/ifpga_api.c | 294 + drivers/raw/ifpga_rawdev/base/ifpga_api.h | 28 + drivers/raw/ifpga_rawdev/base/ifpga_compat.h | 58 + drivers/raw/ifpga_rawdev/base/ifpga_defines.h | 1663 + drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c | 821 + drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h | 11 + drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c | 253 + drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h | 168 + drivers/raw/ifpga_rawdev/base/ifpga_fme.c | 734 + drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c | 301 + drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c | 381 + drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c | 715 + drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c | 352 + drivers/raw/ifpga_rawdev/base/ifpga_hw.h | 127 + drivers/raw/ifpga_rawdev/base/ifpga_port.c | 388 + drivers/raw/ifpga_rawdev/base/ifpga_port_error.c | 144 + drivers/raw/ifpga_rawdev/base/meson.build | 34 + drivers/raw/ifpga_rawdev/base/opae_debug.c | 99 + drivers/raw/ifpga_rawdev/base/opae_debug.h | 19 + drivers/raw/ifpga_rawdev/base/opae_hw_api.c | 381 + drivers/raw/ifpga_rawdev/base/opae_hw_api.h | 253 + drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c | 145 + drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h | 279 + drivers/raw/ifpga_rawdev/base/opae_osdep.h | 79 + .../ifpga_rawdev/base/osdep_raw/osdep_generic.h | 75 + .../ifpga_rawdev/base/osdep_rte/osdep_generic.h | 45 + drivers/raw/ifpga_rawdev/ifpga_rawdev.c | 617 + drivers/raw/ifpga_rawdev/ifpga_rawdev.h | 37 + drivers/raw/ifpga_rawdev/meson.build | 15 + .../ifpga_rawdev/rte_pmd_ifpga_rawdev_version.map | 4 + drivers/raw/meson.build | 7 + drivers/raw/skeleton_rawdev/Makefile | 1 - drivers/raw/skeleton_rawdev/meson.build | 6 + drivers/raw/skeleton_rawdev/skeleton_rawdev.c | 18 +- drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c | 16 +- examples/Makefile | 34 +- examples/bbdev_app/main.c | 16 +- examples/bond/Makefile | 3 + examples/bond/main.c | 25 +- examples/bond/meson.build | 1 + examples/distributor/main.c | 42 +- examples/ethtool/Makefile | 2 +- examples/ethtool/ethtool-app/ethapp.c | 74 +- examples/ethtool/ethtool-app/main.c | 4 +- examples/ethtool/lib/Makefile | 3 +- examples/ethtool/lib/rte_ethtool.c | 46 +- examples/ethtool/lib/rte_ethtool.h | 34 + examples/ethtool/meson.build | 10 + examples/eventdev_pipeline/main.c | 38 +- .../eventdev_pipeline/pipeline_worker_generic.c | 7 +- examples/eventdev_pipeline/pipeline_worker_tx.c | 6 +- examples/exception_path/main.c | 33 +- examples/flow_classify/flow_classify.c | 19 +- examples/flow_filtering/Makefile | 33 +- examples/flow_filtering/flow_blocks.c | 32 +- examples/flow_filtering/main.c | 53 +- examples/ip_fragmentation/main.c | 16 +- examples/ip_pipeline/Makefile | 55 +- examples/ip_pipeline/action.c | 358 + examples/ip_pipeline/action.h | 77 + examples/ip_pipeline/app.h | 1401 - examples/ip_pipeline/cli.c | 5240 +++ examples/ip_pipeline/cli.h | 18 + examples/ip_pipeline/common.h | 12 + examples/ip_pipeline/config/action.cfg | 68 - examples/ip_pipeline/config/action.sh | 119 - examples/ip_pipeline/config/action.txt | 8 - examples/ip_pipeline/config/diagram-generator.py | 317 - .../ip_pipeline/config/edge_router_downstream.cfg | 97 - .../ip_pipeline/config/edge_router_downstream.sh | 13 - .../ip_pipeline/config/edge_router_upstream.cfg | 124 - .../ip_pipeline/config/edge_router_upstream.sh | 33 - examples/ip_pipeline/config/firewall.cfg | 68 - examples/ip_pipeline/config/firewall.sh | 13 - examples/ip_pipeline/config/firewall.txt | 9 - examples/ip_pipeline/config/flow.cfg | 72 - examples/ip_pipeline/config/flow.sh | 25 - examples/ip_pipeline/config/flow.txt | 17 - examples/ip_pipeline/config/ip_pipeline.cfg | 9 - examples/ip_pipeline/config/ip_pipeline.sh | 5 - examples/ip_pipeline/config/kni.cfg | 67 - examples/ip_pipeline/config/l2fwd.cfg | 58 - examples/ip_pipeline/config/l3fwd.cfg | 68 - examples/ip_pipeline/config/l3fwd.sh | 33 - examples/ip_pipeline/config/l3fwd_arp.cfg | 70 - examples/ip_pipeline/config/l3fwd_arp.sh | 43 - examples/ip_pipeline/config/network_layers.cfg | 227 - examples/ip_pipeline/config/network_layers.sh | 79 - .../ip_pipeline/config/pipeline-to-core-mapping.py | 906 - examples/ip_pipeline/config/tap.cfg | 64 - examples/ip_pipeline/config/tm_profile.cfg | 105 - examples/ip_pipeline/config_check.c | 488 - examples/ip_pipeline/config_parse.c | 3395 -- examples/ip_pipeline/config_parse_tm.c | 419 - examples/ip_pipeline/conn.c | 329 + examples/ip_pipeline/conn.h | 47 + examples/ip_pipeline/cpu_core_map.c | 471 - examples/ip_pipeline/cpu_core_map.h | 40 - examples/ip_pipeline/examples/firewall.cli | 59 + examples/ip_pipeline/examples/flow.cli | 60 + examples/ip_pipeline/examples/kni.cli | 69 + examples/ip_pipeline/examples/l2fwd.cli | 51 + examples/ip_pipeline/examples/route.cli | 60 + examples/ip_pipeline/examples/route_ecmp.cli | 57 + examples/ip_pipeline/examples/rss.cli | 112 + examples/ip_pipeline/examples/tap.cli | 66 + examples/ip_pipeline/hash_func.h | 357 + examples/ip_pipeline/hash_func_arm64.h | 232 + examples/ip_pipeline/init.c | 1927 - examples/ip_pipeline/kni.c | 175 + examples/ip_pipeline/kni.h | 46 + examples/ip_pipeline/link.c | 267 + examples/ip_pipeline/link.h | 66 + examples/ip_pipeline/main.c | 259 +- examples/ip_pipeline/mempool.c | 82 + examples/ip_pipeline/mempool.h | 40 + examples/ip_pipeline/meson.build | 36 +- examples/ip_pipeline/parser.c | 17 +- examples/ip_pipeline/parser.h | 8 + examples/ip_pipeline/pipeline.c | 988 + examples/ip_pipeline/pipeline.h | 389 +- examples/ip_pipeline/pipeline/hash_func.h | 356 - examples/ip_pipeline/pipeline/hash_func_arm64.h | 261 - .../ip_pipeline/pipeline/pipeline_actions_common.h | 202 - examples/ip_pipeline/pipeline/pipeline_common_be.c | 176 - examples/ip_pipeline/pipeline/pipeline_common_be.h | 134 - examples/ip_pipeline/pipeline/pipeline_common_fe.c | 1455 - examples/ip_pipeline/pipeline/pipeline_common_fe.h | 231 - examples/ip_pipeline/pipeline/pipeline_firewall.c | 1421 - examples/ip_pipeline/pipeline/pipeline_firewall.h | 60 - .../ip_pipeline/pipeline/pipeline_firewall_be.c | 856 - .../ip_pipeline/pipeline/pipeline_firewall_be.h | 147 - .../ip_pipeline/pipeline/pipeline_flow_actions.c | 1286 - .../ip_pipeline/pipeline/pipeline_flow_actions.h | 60 - .../pipeline/pipeline_flow_actions_be.c | 960 - .../pipeline/pipeline_flow_actions_be.h | 139 - .../pipeline/pipeline_flow_classification.c | 1878 - .../pipeline/pipeline_flow_classification.h | 106 - .../pipeline/pipeline_flow_classification_be.c | 723 - .../pipeline/pipeline_flow_classification_be.h | 113 - examples/ip_pipeline/pipeline/pipeline_master.c | 20 - examples/ip_pipeline/pipeline/pipeline_master.h | 12 - examples/ip_pipeline/pipeline/pipeline_master_be.c | 141 - examples/ip_pipeline/pipeline/pipeline_master_be.h | 12 - .../ip_pipeline/pipeline/pipeline_passthrough.c | 45 - .../ip_pipeline/pipeline/pipeline_passthrough.h | 12 - .../ip_pipeline/pipeline/pipeline_passthrough_be.c | 929 - .../ip_pipeline/pipeline/pipeline_passthrough_be.h | 44 - examples/ip_pipeline/pipeline/pipeline_routing.c | 1613 - examples/ip_pipeline/pipeline/pipeline_routing.h | 71 - .../ip_pipeline/pipeline/pipeline_routing_be.c | 1966 - .../ip_pipeline/pipeline/pipeline_routing_be.h | 283 - examples/ip_pipeline/pipeline_be.h | 322 - examples/ip_pipeline/swq.c | 76 + examples/ip_pipeline/swq.h | 37 + examples/ip_pipeline/tap.c | 102 + examples/ip_pipeline/tap.h | 29 + examples/ip_pipeline/thread.c | 3075 +- examples/ip_pipeline/thread.h | 75 +- examples/ip_pipeline/thread_fe.c | 457 - examples/ip_pipeline/thread_fe.h | 72 - examples/ip_pipeline/tmgr.c | 229 + examples/ip_pipeline/tmgr.h | 70 + examples/ip_reassembly/main.c | 26 +- examples/ipsec-secgw/ipsec-secgw.c | 248 +- examples/ipsec-secgw/ipsec.c | 75 +- examples/ipsec-secgw/ipsec.h | 5 +- examples/ipsec-secgw/parser.c | 23 +- examples/ipsec-secgw/sa.c | 2 +- examples/ipv4_multicast/main.c | 12 +- examples/kni/Makefile | 2 +- examples/kni/main.c | 37 +- examples/kni/meson.build | 2 + examples/l2fwd-cat/l2fwd-cat.c | 11 +- examples/l2fwd-cat/meson.build | 4 +- examples/l2fwd-crypto/Makefile | 6 + examples/l2fwd-crypto/main.c | 593 +- examples/l2fwd-jobstats/main.c | 23 +- examples/l2fwd-keepalive/main.c | 23 +- examples/l2fwd/main.c | 25 +- examples/l3fwd-acl/main.c | 32 +- examples/l3fwd-power/Makefile | 6 +- examples/l3fwd-power/main.c | 111 +- examples/l3fwd-power/main.h | 20 + examples/l3fwd-power/meson.build | 7 +- examples/l3fwd-power/perf_core.c | 230 + examples/l3fwd-power/perf_core.h | 12 + examples/l3fwd-vf/main.c | 29 +- examples/l3fwd/l3fwd_common.h | 35 +- examples/l3fwd/l3fwd_em.c | 1 - examples/l3fwd/l3fwd_em_hlm.h | 35 +- examples/l3fwd/l3fwd_em_hlm_neon.h | 35 +- examples/l3fwd/l3fwd_lpm.c | 1 - examples/l3fwd/l3fwd_lpm_neon.h | 35 +- examples/l3fwd/l3fwd_neon.h | 36 +- examples/l3fwd/main.c | 34 +- examples/link_status_interrupt/main.c | 4 +- examples/load_balancer/config.c | 2 +- examples/load_balancer/init.c | 14 +- examples/meson.build | 55 +- .../client_server_mp/mp_client/client.c | 2 +- .../client_server_mp/mp_server/Makefile | 3 +- .../client_server_mp/mp_server/init.c | 2 +- examples/multi_process/l2fwd_fork/Makefile | 22 - examples/multi_process/l2fwd_fork/flib.c | 280 - examples/multi_process/l2fwd_fork/flib.h | 120 - examples/multi_process/l2fwd_fork/main.c | 1261 - examples/multi_process/meson.build | 10 + examples/multi_process/symmetric_mp/main.c | 22 +- examples/netmap_compat/bridge/Makefile | 2 +- examples/netmap_compat/bridge/bridge.c | 3 +- examples/netmap_compat/lib/compat_netmap.c | 1 - examples/netmap_compat/meson.build | 10 + examples/packet_ordering/main.c | 34 +- examples/performance-thread/common/arch/x86/ctx.c | 62 +- examples/performance-thread/common/lthread.c | 64 +- examples/performance-thread/common/lthread.h | 62 +- examples/performance-thread/common/lthread_api.h | 64 +- examples/performance-thread/common/lthread_cond.c | 61 +- examples/performance-thread/common/lthread_cond.h | 61 +- examples/performance-thread/common/lthread_int.h | 61 +- examples/performance-thread/common/lthread_pool.h | 68 +- examples/performance-thread/common/lthread_queue.h | 68 +- examples/performance-thread/common/lthread_sched.c | 62 +- examples/performance-thread/common/lthread_sched.h | 61 +- examples/performance-thread/l3fwd-thread/main.c | 52 +- examples/performance-thread/meson.build | 10 + examples/performance-thread/pthread_shim/main.c | 6 +- .../performance-thread/pthread_shim/pthread_shim.c | 4 +- examples/ptpclient/ptpclient.c | 9 +- examples/qos_meter/Makefile | 2 + examples/qos_meter/main.c | 64 +- examples/qos_meter/main.h | 32 +- examples/qos_sched/Makefile | 2 +- examples/qos_sched/init.c | 4 +- examples/quota_watermark/meson.build | 10 + examples/quota_watermark/qw/init.c | 4 +- examples/quota_watermark/qw/main.c | 14 +- examples/rxtx_callbacks/main.c | 15 +- examples/server_node_efd/meson.build | 10 + examples/server_node_efd/node/node.c | 2 +- examples/server_node_efd/server/Makefile | 2 +- examples/server_node_efd/server/init.c | 4 +- examples/service_cores/Makefile | 3 - examples/service_cores/meson.build | 1 - examples/skeleton/basicfwd.c | 13 +- examples/tep_termination/Makefile | 5 +- examples/tep_termination/main.c | 27 +- examples/tep_termination/meson.build | 4 + examples/tep_termination/vxlan_setup.c | 4 +- examples/vhost/Makefile | 5 +- examples/vhost/main.c | 35 +- examples/vhost/meson.build | 4 + examples/vhost/virtio_net.c | 94 +- examples/vhost_crypto/Makefile | 32 + examples/vhost_crypto/main.c | 536 + examples/vhost_crypto/meson.build | 14 + examples/vhost_scsi/Makefile | 2 +- examples/vhost_scsi/meson.build | 3 + examples/vhost_scsi/scsi.c | 14 +- examples/vhost_scsi/scsi_spec.h | 2 +- examples/vhost_scsi/vhost_scsi.c | 56 +- examples/vm_power_manager/Makefile | 7 +- examples/vm_power_manager/channel_monitor.c | 28 +- examples/vm_power_manager/guest_cli/Makefile | 2 +- examples/vm_power_manager/guest_cli/main.c | 151 +- examples/vm_power_manager/guest_cli/parse.c | 82 + examples/vm_power_manager/guest_cli/parse.h | 19 + .../guest_cli/vm_power_cli_guest.c | 113 +- .../guest_cli/vm_power_cli_guest.h | 6 + examples/vm_power_manager/main.c | 177 +- examples/vm_power_manager/meson.build | 10 + examples/vm_power_manager/oob_monitor.h | 68 + examples/vm_power_manager/oob_monitor_nop.c | 38 + examples/vm_power_manager/oob_monitor_x86.c | 258 + examples/vm_power_manager/parse.c | 81 + examples/vm_power_manager/parse.h | 20 + examples/vm_power_manager/power_manager.c | 139 +- examples/vm_power_manager/power_manager.h | 23 + examples/vmdq/main.c | 14 +- examples/vmdq_dcb/main.c | 27 +- kernel/Makefile | 9 + kernel/freebsd/BSDmakefile.meson | 18 + kernel/freebsd/Makefile | 9 + kernel/freebsd/contigmem/BSDmakefile | 8 + kernel/freebsd/contigmem/Makefile | 24 + kernel/freebsd/contigmem/contigmem.c | 353 + kernel/freebsd/contigmem/meson.build | 4 + kernel/freebsd/meson.build | 32 + kernel/freebsd/nic_uio/BSDmakefile | 8 + kernel/freebsd/nic_uio/Makefile | 24 + kernel/freebsd/nic_uio/meson.build | 4 + kernel/freebsd/nic_uio/nic_uio.c | 350 + kernel/linux/Makefile | 9 + kernel/linux/igb_uio/Kbuild | 2 + kernel/linux/igb_uio/Makefile | 25 + kernel/linux/igb_uio/compat.h | 154 + kernel/linux/igb_uio/igb_uio.c | 669 + kernel/linux/igb_uio/meson.build | 18 + kernel/linux/kni/Makefile | 58 + kernel/linux/kni/compat.h | 112 + kernel/linux/kni/ethtool/README | 71 + kernel/linux/kni/ethtool/igb/e1000_82575.c | 3650 ++ kernel/linux/kni/ethtool/igb/e1000_82575.h | 494 + kernel/linux/kni/ethtool/igb/e1000_api.c | 1144 + kernel/linux/kni/ethtool/igb/e1000_api.h | 142 + kernel/linux/kni/ethtool/igb/e1000_defines.h | 1365 + kernel/linux/kni/ethtool/igb/e1000_hw.h | 778 + kernel/linux/kni/ethtool/igb/e1000_i210.c | 894 + kernel/linux/kni/ethtool/igb/e1000_i210.h | 76 + kernel/linux/kni/ethtool/igb/e1000_mac.c | 2081 ++ kernel/linux/kni/ethtool/igb/e1000_mac.h | 65 + kernel/linux/kni/ethtool/igb/e1000_manage.c | 539 + kernel/linux/kni/ethtool/igb/e1000_manage.h | 74 + kernel/linux/kni/ethtool/igb/e1000_mbx.c | 510 + kernel/linux/kni/ethtool/igb/e1000_mbx.h | 72 + kernel/linux/kni/ethtool/igb/e1000_nvm.c | 950 + kernel/linux/kni/ethtool/igb/e1000_nvm.h | 60 + kernel/linux/kni/ethtool/igb/e1000_osdep.h | 121 + kernel/linux/kni/ethtool/igb/e1000_phy.c | 3392 ++ kernel/linux/kni/ethtool/igb/e1000_phy.h | 241 + kernel/linux/kni/ethtool/igb/e1000_regs.h | 631 + kernel/linux/kni/ethtool/igb/igb.h | 844 + kernel/linux/kni/ethtool/igb/igb_ethtool.c | 2843 ++ kernel/linux/kni/ethtool/igb/igb_main.c | 10344 ++++++ kernel/linux/kni/ethtool/igb/igb_param.c | 832 + kernel/linux/kni/ethtool/igb/igb_regtest.h | 234 + kernel/linux/kni/ethtool/igb/igb_vmdq.c | 421 + kernel/linux/kni/ethtool/igb/igb_vmdq.h | 31 + kernel/linux/kni/ethtool/igb/kcompat.h | 3945 ++ kernel/linux/kni/ethtool/ixgbe/ixgbe.h | 910 + kernel/linux/kni/ethtool/ixgbe/ixgbe_82598.c | 1281 + kernel/linux/kni/ethtool/ixgbe/ixgbe_82598.h | 29 + kernel/linux/kni/ethtool/ixgbe/ixgbe_82599.c | 2299 ++ kernel/linux/kni/ethtool/ixgbe/ixgbe_82599.h | 43 + kernel/linux/kni/ethtool/ixgbe/ixgbe_api.c | 1142 + kernel/linux/kni/ethtool/ixgbe/ixgbe_api.h | 153 + kernel/linux/kni/ethtool/ixgbe/ixgbe_common.c | 4067 +++ kernel/linux/kni/ethtool/ixgbe/ixgbe_common.h | 125 + kernel/linux/kni/ethtool/ixgbe/ixgbe_dcb.h | 153 + kernel/linux/kni/ethtool/ixgbe/ixgbe_ethtool.c | 2886 ++ kernel/linux/kni/ethtool/ixgbe/ixgbe_fcoe.h | 76 + kernel/linux/kni/ethtool/ixgbe/ixgbe_main.c | 2951 ++ kernel/linux/kni/ethtool/ixgbe/ixgbe_mbx.h | 90 + kernel/linux/kni/ethtool/ixgbe/ixgbe_osdep.h | 117 + kernel/linux/kni/ethtool/ixgbe/ixgbe_phy.c | 1832 + kernel/linux/kni/ethtool/ixgbe/ixgbe_phy.h | 122 + kernel/linux/kni/ethtool/ixgbe/ixgbe_type.h | 3239 ++ kernel/linux/kni/ethtool/ixgbe/ixgbe_x540.c | 922 + kernel/linux/kni/ethtool/ixgbe/ixgbe_x540.h | 43 + kernel/linux/kni/ethtool/ixgbe/kcompat.c | 1231 + kernel/linux/kni/ethtool/ixgbe/kcompat.h | 3140 ++ kernel/linux/kni/kni_dev.h | 107 + kernel/linux/kni/kni_ethtool.c | 219 + kernel/linux/kni/kni_fifo.h | 75 + kernel/linux/kni/kni_misc.c | 665 + kernel/linux/kni/kni_net.c | 797 + kernel/linux/meson.build | 42 + kernel/meson.build | 4 + lib/Makefile | 44 +- lib/librte_bbdev/rte_bbdev.c | 17 +- lib/librte_bbdev/rte_bbdev.h | 8 +- lib/librte_bbdev/rte_bbdev_op.h | 28 +- lib/librte_bitratestats/meson.build | 1 + lib/librte_bitratestats/rte_bitrate.c | 6 + lib/librte_bpf/Makefile | 41 + lib/librte_bpf/bpf.c | 61 + lib/librte_bpf/bpf_def.h | 143 + lib/librte_bpf/bpf_exec.c | 453 + lib/librte_bpf/bpf_impl.h | 55 + lib/librte_bpf/bpf_jit_x86.c | 1356 + lib/librte_bpf/bpf_load.c | 148 + lib/librte_bpf/bpf_load_elf.c | 322 + lib/librte_bpf/bpf_pkt.c | 605 + lib/librte_bpf/bpf_validate.c | 2248 ++ lib/librte_bpf/meson.build | 25 + lib/librte_bpf/rte_bpf.h | 203 + lib/librte_bpf/rte_bpf_ethdev.h | 117 + lib/librte_bpf/rte_bpf_version.map | 16 + lib/librte_cmdline/cmdline_parse.c | 13 +- lib/librte_cmdline/cmdline_parse_etheraddr.c | 2 +- lib/librte_cmdline/cmdline_parse_ipaddr.c | 223 +- lib/librte_cmdline/cmdline_parse_portlist.c | 2 +- lib/librte_cmdline/cmdline_parse_string.c | 4 +- lib/librte_compat/Makefile | 33 +- lib/librte_compressdev/Makefile | 31 + lib/librte_compressdev/meson.build | 12 + lib/librte_compressdev/rte_comp.c | 215 + lib/librte_compressdev/rte_comp.h | 485 + lib/librte_compressdev/rte_compressdev.c | 772 + lib/librte_compressdev/rte_compressdev.h | 540 + lib/librte_compressdev/rte_compressdev_internal.h | 114 + lib/librte_compressdev/rte_compressdev_pmd.c | 160 + lib/librte_compressdev/rte_compressdev_pmd.h | 390 + lib/librte_compressdev/rte_compressdev_version.map | 39 + lib/librte_cryptodev/Makefile | 1 + lib/librte_cryptodev/meson.build | 5 +- lib/librte_cryptodev/rte_crypto.h | 82 +- lib/librte_cryptodev/rte_crypto_asym.h | 496 + lib/librte_cryptodev/rte_crypto_sym.h | 17 + lib/librte_cryptodev/rte_cryptodev.c | 430 +- lib/librte_cryptodev/rte_cryptodev.h | 407 +- lib/librte_cryptodev/rte_cryptodev_pmd.c | 12 +- lib/librte_cryptodev/rte_cryptodev_pmd.h | 157 +- lib/librte_cryptodev/rte_cryptodev_version.map | 33 +- lib/librte_eal/bsdapp/Makefile | 2 - lib/librte_eal/bsdapp/contigmem/BSDmakefile | 8 - lib/librte_eal/bsdapp/contigmem/Makefile | 24 - lib/librte_eal/bsdapp/contigmem/contigmem.c | 353 - lib/librte_eal/bsdapp/contigmem/meson.build | 4 - lib/librte_eal/bsdapp/eal/Makefile | 11 +- lib/librte_eal/bsdapp/eal/eal.c | 290 +- lib/librte_eal/bsdapp/eal/eal_alarm.c | 299 +- lib/librte_eal/bsdapp/eal/eal_alarm_private.h | 19 + lib/librte_eal/bsdapp/eal/eal_cpuflags.c | 21 + lib/librte_eal/bsdapp/eal/eal_dev.c | 21 + lib/librte_eal/bsdapp/eal/eal_hugepage_info.c | 69 +- lib/librte_eal/bsdapp/eal/eal_interrupts.c | 464 +- lib/librte_eal/bsdapp/eal/eal_memalloc.c | 54 + lib/librte_eal/bsdapp/eal/eal_memory.c | 471 +- lib/librte_eal/bsdapp/eal/eal_thread.c | 2 +- lib/librte_eal/bsdapp/eal/meson.build | 5 + lib/librte_eal/bsdapp/nic_uio/BSDmakefile | 8 - lib/librte_eal/bsdapp/nic_uio/Makefile | 24 - lib/librte_eal/bsdapp/nic_uio/meson.build | 4 - lib/librte_eal/bsdapp/nic_uio/nic_uio.c | 350 - lib/librte_eal/common/Makefile | 4 +- lib/librte_eal/common/arch/arm/rte_cpuflags.c | 54 +- lib/librte_eal/common/arch/arm/rte_hypervisor.c | 2 +- lib/librte_eal/common/arch/ppc_64/rte_cpuflags.c | 15 +- lib/librte_eal/common/arch/ppc_64/rte_hypervisor.c | 2 +- lib/librte_eal/common/arch/x86/rte_hypervisor.c | 2 +- lib/librte_eal/common/eal_common_bus.c | 3 +- lib/librte_eal/common/eal_common_class.c | 64 + lib/librte_eal/common/eal_common_dev.c | 443 +- lib/librte_eal/common/eal_common_devargs.c | 228 +- lib/librte_eal/common/eal_common_fbarray.c | 1239 + lib/librte_eal/common/eal_common_hypervisor.c | 2 +- lib/librte_eal/common/eal_common_lcore.c | 75 +- lib/librte_eal/common/eal_common_log.c | 121 +- lib/librte_eal/common/eal_common_memalloc.c | 364 + lib/librte_eal/common/eal_common_memory.c | 528 +- lib/librte_eal/common/eal_common_memzone.c | 290 +- lib/librte_eal/common/eal_common_options.c | 192 +- lib/librte_eal/common/eal_common_proc.c | 713 +- lib/librte_eal/common/eal_common_thread.c | 98 +- lib/librte_eal/common/eal_common_uuid.c | 193 + lib/librte_eal/common/eal_filesystem.h | 70 +- lib/librte_eal/common/eal_hugepages.h | 11 +- lib/librte_eal/common/eal_internal_cfg.h | 20 +- lib/librte_eal/common/eal_memalloc.h | 82 + lib/librte_eal/common/eal_options.h | 8 + lib/librte_eal/common/eal_private.h | 99 + .../common/include/arch/arm/rte_atomic.h | 32 +- .../common/include/arch/arm/rte_atomic_32.h | 32 +- .../common/include/arch/arm/rte_byteorder.h | 32 +- .../common/include/arch/arm/rte_cpuflags.h | 32 +- .../common/include/arch/arm/rte_cpuflags_32.h | 32 +- .../common/include/arch/arm/rte_cycles.h | 32 +- .../common/include/arch/arm/rte_cycles_32.h | 32 +- .../common/include/arch/arm/rte_memcpy.h | 32 +- .../common/include/arch/arm/rte_memcpy_32.h | 32 +- .../common/include/arch/arm/rte_prefetch.h | 32 +- .../common/include/arch/arm/rte_prefetch_32.h | 32 +- .../common/include/arch/arm/rte_rwlock.h | 2 + .../common/include/arch/arm/rte_spinlock.h | 32 +- .../common/include/arch/ppc_64/rte_atomic.h | 23 +- .../common/include/arch/ppc_64/rte_rwlock.h | 2 + .../common/include/arch/x86/rte_atomic.h | 24 + .../common/include/arch/x86/rte_atomic_32.h | 12 + .../common/include/arch/x86/rte_atomic_64.h | 12 + .../common/include/arch/x86/rte_memcpy.h | 24 +- .../common/include/arch/x86/rte_spinlock.h | 4 +- lib/librte_eal/common/include/generic/rte_atomic.h | 90 + .../common/include/generic/rte_byteorder.h | 6 +- .../common/include/generic/rte_cpuflags.h | 21 + lib/librte_eal/common/include/generic/rte_rwlock.h | 4 +- lib/librte_eal/common/include/rte_bitmap.h | 8 +- lib/librte_eal/common/include/rte_bus.h | 4 +- lib/librte_eal/common/include/rte_class.h | 134 + lib/librte_eal/common/include/rte_common.h | 160 +- lib/librte_eal/common/include/rte_dev.h | 211 +- lib/librte_eal/common/include/rte_devargs.h | 173 +- lib/librte_eal/common/include/rte_eal.h | 54 +- lib/librte_eal/common/include/rte_eal_interrupts.h | 1 + lib/librte_eal/common/include/rte_eal_memconfig.h | 28 +- lib/librte_eal/common/include/rte_fbarray.h | 470 + lib/librte_eal/common/include/rte_hypervisor.h | 2 +- lib/librte_eal/common/include/rte_lcore.h | 60 +- lib/librte_eal/common/include/rte_log.h | 40 +- lib/librte_eal/common/include/rte_malloc.h | 10 + lib/librte_eal/common/include/rte_malloc_heap.h | 6 + lib/librte_eal/common/include/rte_memory.h | 330 +- lib/librte_eal/common/include/rte_memzone.h | 45 +- .../common/include/rte_pci_dev_feature_defs.h | 58 +- .../common/include/rte_pci_dev_features.h | 58 +- lib/librte_eal/common/include/rte_random.h | 6 +- lib/librte_eal/common/include/rte_service.h | 167 +- .../common/include/rte_service_component.h | 38 +- lib/librte_eal/common/include/rte_string_fns.h | 31 + lib/librte_eal/common/include/rte_tailq.h | 3 +- lib/librte_eal/common/include/rte_uuid.h | 129 + lib/librte_eal/common/include/rte_version.h | 2 +- lib/librte_eal/common/include/rte_vfio.h | 243 +- lib/librte_eal/common/malloc_elem.c | 479 +- lib/librte_eal/common/malloc_elem.h | 51 +- lib/librte_eal/common/malloc_heap.c | 868 +- lib/librte_eal/common/malloc_heap.h | 19 +- lib/librte_eal/common/malloc_mp.c | 743 + lib/librte_eal/common/malloc_mp.h | 86 + lib/librte_eal/common/meson.build | 8 + lib/librte_eal/common/rte_malloc.c | 85 +- lib/librte_eal/common/rte_service.c | 130 +- lib/librte_eal/linuxapp/Makefile | 2 - lib/librte_eal/linuxapp/eal/Makefile | 12 +- lib/librte_eal/linuxapp/eal/eal.c | 269 +- lib/librte_eal/linuxapp/eal/eal_alarm.c | 9 +- lib/librte_eal/linuxapp/eal/eal_cpuflags.c | 84 + lib/librte_eal/linuxapp/eal/eal_dev.c | 224 + lib/librte_eal/linuxapp/eal/eal_hugepage_info.c | 253 +- lib/librte_eal/linuxapp/eal/eal_interrupts.c | 44 +- lib/librte_eal/linuxapp/eal/eal_log.c | 13 +- lib/librte_eal/linuxapp/eal/eal_memalloc.c | 1363 + lib/librte_eal/linuxapp/eal/eal_memory.c | 1528 +- lib/librte_eal/linuxapp/eal/eal_thread.c | 6 +- lib/librte_eal/linuxapp/eal/eal_timer.c | 12 +- lib/librte_eal/linuxapp/eal/eal_vfio.c | 1586 +- lib/librte_eal/linuxapp/eal/eal_vfio.h | 60 +- lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c | 402 +- lib/librte_eal/linuxapp/eal/meson.build | 4 + lib/librte_eal/linuxapp/igb_uio/Kbuild | 1 - lib/librte_eal/linuxapp/igb_uio/Makefile | 25 - lib/librte_eal/linuxapp/igb_uio/compat.h | 134 - lib/librte_eal/linuxapp/igb_uio/igb_uio.c | 643 - lib/librte_eal/linuxapp/igb_uio/meson.build | 24 - lib/librte_eal/linuxapp/kni/Makefile | 58 - lib/librte_eal/linuxapp/kni/compat.h | 106 - lib/librte_eal/linuxapp/kni/ethtool/README | 71 - .../linuxapp/kni/ethtool/igb/e1000_82575.c | 3650 -- .../linuxapp/kni/ethtool/igb/e1000_82575.h | 494 - .../linuxapp/kni/ethtool/igb/e1000_api.c | 1144 - .../linuxapp/kni/ethtool/igb/e1000_api.h | 142 - .../linuxapp/kni/ethtool/igb/e1000_defines.h | 1365 - lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_hw.h | 778 - .../linuxapp/kni/ethtool/igb/e1000_i210.c | 894 - .../linuxapp/kni/ethtool/igb/e1000_i210.h | 76 - .../linuxapp/kni/ethtool/igb/e1000_mac.c | 2081 -- .../linuxapp/kni/ethtool/igb/e1000_mac.h | 65 - .../linuxapp/kni/ethtool/igb/e1000_manage.c | 539 - .../linuxapp/kni/ethtool/igb/e1000_manage.h | 74 - .../linuxapp/kni/ethtool/igb/e1000_mbx.c | 510 - .../linuxapp/kni/ethtool/igb/e1000_mbx.h | 72 - .../linuxapp/kni/ethtool/igb/e1000_nvm.c | 950 - .../linuxapp/kni/ethtool/igb/e1000_nvm.h | 60 - .../linuxapp/kni/ethtool/igb/e1000_osdep.h | 121 - .../linuxapp/kni/ethtool/igb/e1000_phy.c | 3392 -- .../linuxapp/kni/ethtool/igb/e1000_phy.h | 241 - .../linuxapp/kni/ethtool/igb/e1000_regs.h | 631 - lib/librte_eal/linuxapp/kni/ethtool/igb/igb.h | 844 - .../linuxapp/kni/ethtool/igb/igb_ethtool.c | 2842 -- lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c | 10344 ------ .../linuxapp/kni/ethtool/igb/igb_param.c | 832 - .../linuxapp/kni/ethtool/igb/igb_regtest.h | 234 - lib/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c | 421 - lib/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.h | 31 - lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h | 3933 -- lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h | 910 - .../linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c | 1281 - .../linuxapp/kni/ethtool/ixgbe/ixgbe_82598.h | 29 - .../linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c | 2299 -- .../linuxapp/kni/ethtool/ixgbe/ixgbe_82599.h | 43 - .../linuxapp/kni/ethtool/ixgbe/ixgbe_api.c | 1142 - .../linuxapp/kni/ethtool/ixgbe/ixgbe_api.h | 153 - .../linuxapp/kni/ethtool/ixgbe/ixgbe_common.c | 4067 --- .../linuxapp/kni/ethtool/ixgbe/ixgbe_common.h | 125 - .../linuxapp/kni/ethtool/ixgbe/ixgbe_dcb.h | 153 - .../linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c | 2886 -- .../linuxapp/kni/ethtool/ixgbe/ixgbe_fcoe.h | 76 - .../linuxapp/kni/ethtool/ixgbe/ixgbe_main.c | 2951 -- .../linuxapp/kni/ethtool/ixgbe/ixgbe_mbx.h | 90 - .../linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h | 117 - .../linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c | 1832 - .../linuxapp/kni/ethtool/ixgbe/ixgbe_phy.h | 122 - .../linuxapp/kni/ethtool/ixgbe/ixgbe_type.h | 3239 -- .../linuxapp/kni/ethtool/ixgbe/ixgbe_x540.c | 922 - .../linuxapp/kni/ethtool/ixgbe/ixgbe_x540.h | 43 - .../linuxapp/kni/ethtool/ixgbe/kcompat.c | 1231 - .../linuxapp/kni/ethtool/ixgbe/kcompat.h | 3140 -- lib/librte_eal/linuxapp/kni/kni_dev.h | 106 - lib/librte_eal/linuxapp/kni/kni_ethtool.c | 219 - lib/librte_eal/linuxapp/kni/kni_fifo.h | 75 - lib/librte_eal/linuxapp/kni/kni_misc.c | 663 - lib/librte_eal/linuxapp/kni/kni_net.c | 757 - lib/librte_eal/meson.build | 27 +- lib/librte_eal/rte_eal_version.map | 120 +- lib/librte_ethdev/Makefile | 44 + lib/librte_ethdev/ethdev_profile.c | 135 + lib/librte_ethdev/ethdev_profile.h | 27 + lib/librte_ethdev/meson.build | 27 + lib/librte_ethdev/rte_dev_info.h | 49 + lib/librte_ethdev/rte_eth_ctrl.h | 828 + lib/librte_ethdev/rte_ethdev.c | 4425 +++ lib/librte_ethdev/rte_ethdev.h | 4298 +++ lib/librte_ethdev/rte_ethdev_core.h | 625 + lib/librte_ethdev/rte_ethdev_driver.h | 357 + lib/librte_ethdev/rte_ethdev_pci.h | 210 + lib/librte_ethdev/rte_ethdev_vdev.h | 84 + lib/librte_ethdev/rte_ethdev_version.map | 255 + lib/librte_ethdev/rte_flow.c | 635 + lib/librte_ethdev/rte_flow.h | 2208 ++ lib/librte_ethdev/rte_flow_driver.h | 184 + lib/librte_ethdev/rte_mtr.c | 201 + lib/librte_ethdev/rte_mtr.h | 730 + lib/librte_ethdev/rte_mtr_driver.h | 192 + lib/librte_ethdev/rte_tm.c | 409 + lib/librte_ethdev/rte_tm.h | 1965 + lib/librte_ethdev/rte_tm_driver.h | 337 + lib/librte_ether/Makefile | 44 - lib/librte_ether/ethdev_profile.c | 135 - lib/librte_ether/ethdev_profile.h | 27 - lib/librte_ether/meson.build | 27 - lib/librte_ether/rte_dev_info.h | 31 - lib/librte_ether/rte_eth_ctrl.h | 827 - lib/librte_ether/rte_ethdev.c | 4170 --- lib/librte_ether/rte_ethdev.h | 4112 --- lib/librte_ether/rte_ethdev_core.h | 613 - lib/librte_ether/rte_ethdev_driver.h | 132 - lib/librte_ether/rte_ethdev_pci.h | 196 - lib/librte_ether/rte_ethdev_vdev.h | 84 - lib/librte_ether/rte_ethdev_version.map | 232 - lib/librte_ether/rte_flow.c | 420 - lib/librte_ether/rte_flow.h | 1483 - lib/librte_ether/rte_flow_driver.h | 121 - lib/librte_ether/rte_mtr.c | 201 - lib/librte_ether/rte_mtr.h | 730 - lib/librte_ether/rte_mtr_driver.h | 192 - lib/librte_ether/rte_tm.c | 409 - lib/librte_ether/rte_tm.h | 1912 - lib/librte_ether/rte_tm_driver.h | 337 - lib/librte_eventdev/Makefile | 15 +- lib/librte_eventdev/meson.build | 19 +- lib/librte_eventdev/rte_event_crypto_adapter.c | 1128 + lib/librte_eventdev/rte_event_crypto_adapter.h | 575 + lib/librte_eventdev/rte_event_eth_rx_adapter.c | 1784 +- lib/librte_eventdev/rte_event_eth_rx_adapter.h | 129 +- lib/librte_eventdev/rte_event_ring.c | 15 +- lib/librte_eventdev/rte_event_ring.h | 4 +- lib/librte_eventdev/rte_event_timer_adapter.c | 1299 + lib/librte_eventdev/rte_event_timer_adapter.h | 766 + lib/librte_eventdev/rte_event_timer_adapter_pmd.h | 114 + lib/librte_eventdev/rte_eventdev.c | 79 +- lib/librte_eventdev/rte_eventdev.h | 167 +- lib/librte_eventdev/rte_eventdev_pmd.h | 225 + lib/librte_eventdev/rte_eventdev_version.map | 39 +- lib/librte_flow_classify/rte_flow_classify.c | 9 +- lib/librte_flow_classify/rte_flow_classify_parse.c | 24 +- lib/librte_gso/Makefile | 1 + lib/librte_gso/gso_common.h | 3 + lib/librte_gso/gso_udp4.c | 81 + lib/librte_gso/gso_udp4.h | 42 + lib/librte_gso/meson.build | 2 +- lib/librte_gso/rte_gso.c | 24 +- lib/librte_gso/rte_gso.h | 6 +- lib/librte_hash/meson.build | 1 - lib/librte_hash/rte_cuckoo_hash.c | 700 +- lib/librte_hash/rte_cuckoo_hash.h | 22 +- lib/librte_hash/rte_cuckoo_hash_x86.h | 164 - lib/librte_hash/rte_hash.h | 88 +- lib/librte_hash/rte_hash_crc.h | 11 +- lib/librte_hash/rte_hash_version.map | 8 + lib/librte_ip_frag/ip_frag_internal.c | 8 +- lib/librte_ip_frag/rte_ipv4_reassembly.c | 2 + lib/librte_ip_frag/rte_ipv6_reassembly.c | 2 + lib/librte_kni/meson.build | 2 +- lib/librte_kni/rte_kni.c | 7 +- lib/librte_kvargs/Makefile | 34 +- lib/librte_kvargs/meson.build | 5 + lib/librte_kvargs/rte_kvargs.c | 61 +- lib/librte_kvargs/rte_kvargs.h | 58 + lib/librte_kvargs/rte_kvargs_version.map | 8 + lib/librte_latencystats/rte_latencystats.c | 16 +- lib/librte_mbuf/Makefile | 3 +- lib/librte_mbuf/meson.build | 1 - lib/librte_mbuf/rte_mbuf.c | 26 +- lib/librte_mbuf/rte_mbuf.h | 540 +- lib/librte_mbuf/rte_mbuf_pool_ops.c | 14 +- lib/librte_mbuf/rte_mbuf_pool_ops.h | 13 +- lib/librte_mbuf/rte_mbuf_ptype.c | 3 + lib/librte_mbuf/rte_mbuf_ptype.h | 53 +- lib/librte_mbuf/rte_mbuf_version.map | 4 +- lib/librte_member/rte_member.c | 5 +- lib/librte_mempool/Makefile | 7 +- lib/librte_mempool/meson.build | 18 +- lib/librte_mempool/rte_mempool.c | 546 +- lib/librte_mempool/rte_mempool.h | 588 +- lib/librte_mempool/rte_mempool_ops.c | 51 +- lib/librte_mempool/rte_mempool_ops_default.c | 70 + lib/librte_mempool/rte_mempool_version.map | 23 +- lib/librte_meter/Makefile | 2 +- lib/librte_meter/meson.build | 1 + lib/librte_meter/rte_meter.c | 93 +- lib/librte_meter/rte_meter.h | 197 +- lib/librte_meter/rte_meter_version.map | 7 + lib/librte_metrics/rte_metrics.c | 23 +- lib/librte_net/Makefile | 1 + lib/librte_net/meson.build | 1 + lib/librte_net/rte_esp.h | 2 +- lib/librte_net/rte_ether.h | 31 +- lib/librte_net/rte_ip.h | 42 +- lib/librte_net/rte_net.c | 21 +- lib/librte_net/rte_net.h | 27 + lib/librte_net/rte_net_version.map | 3 +- lib/librte_pci/Makefile | 32 +- lib/librte_pci/rte_pci.c | 11 +- lib/librte_pci/rte_pci_version.map | 5 +- lib/librte_pdump/Makefile | 3 +- lib/librte_pdump/meson.build | 2 + lib/librte_pdump/rte_pdump.c | 428 +- lib/librte_pdump/rte_pdump.h | 9 +- lib/librte_pipeline/Makefile | 7 +- lib/librte_pipeline/meson.build | 7 +- lib/librte_pipeline/rte_pipeline_version.map | 28 + lib/librte_pipeline/rte_port_in_action.c | 531 + lib/librte_pipeline/rte_port_in_action.h | 301 + lib/librte_pipeline/rte_table_action.c | 2386 ++ lib/librte_pipeline/rte_table_action.h | 905 + lib/librte_port/meson.build | 12 +- lib/librte_power/channel_commands.h | 3 +- lib/librte_power/power_acpi_cpufreq.c | 21 + lib/librte_power/power_acpi_cpufreq.h | 16 + lib/librte_power/power_kvm_vm.c | 8 + lib/librte_power/power_kvm_vm.h | 17 + lib/librte_power/rte_power.c | 3 + lib/librte_power/rte_power.h | 32 + lib/librte_power/rte_power_version.map | 9 +- lib/librte_rawdev/Makefile | 1 - lib/librte_rawdev/meson.build | 5 + lib/librte_rawdev/rte_rawdev.c | 76 +- lib/librte_rawdev/rte_rawdev.h | 55 +- lib/librte_rawdev/rte_rawdev_pmd.h | 28 +- lib/librte_rawdev/rte_rawdev_version.map | 3 +- lib/librte_ring/Makefile | 2 +- lib/librte_ring/rte_ring.h | 27 +- lib/librte_ring/rte_ring_c11_mem.h | 10 +- lib/librte_ring/rte_ring_generic.h | 10 +- lib/librte_sched/rte_sched.c | 239 +- lib/librte_sched/rte_sched.h | 21 + lib/librte_sched/rte_sched_version.map | 6 + lib/librte_security/rte_security.c | 37 +- lib/librte_security/rte_security.h | 50 +- lib/librte_security/rte_security_driver.h | 40 +- lib/librte_table/Makefile | 1 + lib/librte_table/meson.build | 37 +- lib/librte_table/rte_table_acl.c | 6 - lib/librte_table/rte_table_hash.h | 3 - lib/librte_table/rte_table_hash_cuckoo.c | 11 +- lib/librte_table/rte_table_hash_cuckoo.h | 57 + lib/librte_timer/rte_timer.c | 2 +- lib/librte_vhost/Makefile | 13 +- lib/librte_vhost/fd_man.c | 98 +- lib/librte_vhost/fd_man.h | 17 + lib/librte_vhost/iotlb.c | 10 +- lib/librte_vhost/iotlb.h | 2 +- lib/librte_vhost/meson.build | 11 +- lib/librte_vhost/rte_vdpa.h | 87 + lib/librte_vhost/rte_vhost.h | 212 + lib/librte_vhost/rte_vhost_crypto.h | 109 + lib/librte_vhost/rte_vhost_version.map | 24 + lib/librte_vhost/socket.c | 288 +- lib/librte_vhost/vdpa.c | 115 + lib/librte_vhost/vhost.c | 312 +- lib/librte_vhost/vhost.h | 358 +- lib/librte_vhost/vhost_crypto.c | 1372 + lib/librte_vhost/vhost_user.c | 581 +- lib/librte_vhost/vhost_user.h | 56 +- lib/librte_vhost/virtio_crypto.h | 422 + lib/librte_vhost/virtio_net.c | 1542 +- lib/meson.build | 30 +- meson.build | 7 +- meson_options.txt | 2 + mk/arch/arm/rte.vars.mk | 32 +- mk/machine/armv7a/rte.vars.mk | 31 +- mk/machine/dpaa/rte.vars.mk | 3 + mk/machine/dpaa2/rte.vars.mk | 3 + mk/rte.app.mk | 135 +- mk/rte.extshared.mk | 29 +- mk/rte.extsubdir.mk | 31 +- mk/rte.sdkbuild.mk | 1 + mk/rte.sdkconfig.mk | 19 +- mk/rte.sdkdoc.mk | 34 +- mk/rte.sdkexamples.mk | 31 +- mk/rte.sdkinstall.mk | 70 +- mk/rte.sdkroot.mk | 4 +- mk/rte.sdktest.mk | 32 +- mk/rte.shared.mk | 29 +- mk/toolchain/gcc/rte.toolchain-compat.mk | 13 + mk/toolchain/gcc/rte.vars.mk | 9 + pkg/dpdk.spec | 32 +- test/bpf/dummy.c | 20 + test/bpf/mbuf.h | 578 + test/bpf/t1.c | 52 + test/bpf/t2.c | 31 + test/bpf/t3.c | 36 + test/test-pipeline/init.c | 6 +- test/test-pipeline/main.h | 4 + test/test-pipeline/pipeline_hash.c | 26 +- test/test/Makefile | 19 +- test/test/autotest.py | 13 +- test/test/autotest_data.py | 1087 +- test/test/autotest_runner.py | 519 +- test/test/commands.c | 5 +- test/test/meson.build | 22 +- test/test/process.h | 29 + test/test/resource.c | 33 +- test/test/resource.h | 33 +- test/test/test_bpf.c | 1926 + test/test/test_cmdline_cirbuf.c | 2 +- test/test/test_cmdline_ipaddr.c | 2 - test/test/test_common.c | 38 + test/test/test_compressdev.c | 1486 + test/test/test_compressdev_test_buffer.h | 295 + test/test/test_cryptodev.c | 472 +- test/test/test_cryptodev.h | 5 +- test/test/test_cryptodev_aes_test_vectors.h | 109 +- test/test/test_cryptodev_asym.c | 1369 + test/test/test_cryptodev_asym_util.h | 42 + test/test/test_cryptodev_blockcipher.c | 103 +- test/test/test_cryptodev_blockcipher.h | 4 +- test/test/test_cryptodev_des_test_vectors.h | 90 +- test/test/test_cryptodev_dh_test_vectors.h | 80 + test/test/test_cryptodev_dsa_test_vectors.h | 117 + test/test/test_cryptodev_hash_test_vectors.h | 158 +- test/test/test_cryptodev_mod_test_vectors.h | 103 + test/test/test_cryptodev_rsa_test_vectors.h | 88 + test/test/test_devargs.c | 103 - test/test/test_distributor_perf.c | 3 +- test/test/test_eal_flags.c | 92 +- test/test/test_event_crypto_adapter.c | 928 + test/test/test_event_eth_rx_adapter.c | 344 +- test/test/test_event_timer_adapter.c | 1830 + test/test/test_fbarray.c | 576 + test/test/test_flow_classify.c | 20 +- test/test/test_hash.c | 12 + test/test/test_hash_multiwriter.c | 58 +- test/test/test_hash_perf.c | 36 +- test/test/test_hash_readwrite.c | 637 + test/test/test_interrupts.c | 39 +- test/test/test_kni.c | 43 +- test/test/test_link_bonding.c | 31 +- test/test/test_link_bonding_mode4.c | 8 +- test/test/test_link_bonding_rssconf.c | 12 +- test/test/test_malloc.c | 32 +- test/test/test_memory.c | 27 +- test/test/test_mempool.c | 46 +- test/test/test_memzone.c | 227 +- test/test/test_meter.c | 194 +- test/test/test_mp_secondary.c | 26 - test/test/test_pmd_perf.c | 35 +- test/test/test_pmd_ring.c | 6 +- test/test/test_power_acpi_cpufreq.c | 46 +- test/test/test_power_kvm_vm.c | 21 +- test/test/test_reorder.c | 47 +- test/test/test_resource.c | 33 +- test/test/test_service_cores.c | 116 + test/test/test_table.c | 11 + test/test/test_table.h | 6 + test/test/test_table_combined.c | 4 +- test/test/test_table_pipeline.c | 14 +- test/test/test_table_tables.c | 6 +- test/test/virtual_pmd.c | 6 +- usertools/cpu_layout.py | 36 +- usertools/dpdk-devbind.py | 19 +- usertools/dpdk-pmdinfo.py | 2 + 2160 files changed, 368426 insertions(+), 189040 deletions(-) create mode 100644 app/pdump/meson.build create mode 100644 app/proc-info/Makefile create mode 100644 app/proc-info/main.c create mode 100644 app/proc-info/meson.build delete mode 100644 app/proc_info/Makefile delete mode 100644 app/proc_info/main.c create mode 100644 app/test-bbdev/meson.build create mode 100644 app/test-bbdev/test_vectors/bbdev_null.data delete mode 100644 app/test-bbdev/test_vectors/bbdev_vector_null.data delete mode 100644 app/test-bbdev/test_vectors/bbdev_vector_td_default.data delete mode 100644 app/test-bbdev/test_vectors/bbdev_vector_te_default.data create mode 100644 app/test-bbdev/test_vectors/turbo_dec_c1_k40_r0_e17280_sbd_negllr.data create mode 100644 app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data create mode 100644 app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_low_snr.data create mode 100644 app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_negllr.data create mode 100644 app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_posllr.data create mode 100644 app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_sbd_negllr.data create mode 100644 app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_sbd_posllr.data create mode 100644 app/test-bbdev/test_vectors/turbo_dec_c2_k3136_r0_e4920_sbd_negllr.data create mode 100644 app/test-bbdev/test_vectors/turbo_dec_c2_k3136_r0_e4920_sbd_negllr_crc24b.data create mode 100644 app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1190_rm.data create mode 100644 app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1194_rm.data create mode 100644 app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1196_rm.data create mode 100644 app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e272_rm.data create mode 100644 app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e120_rm_rvidx.data create mode 100644 app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e18444.data create mode 100644 app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e18448_crc24a.data create mode 100644 app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data create mode 100644 app/test-bbdev/test_vectors/turbo_enc_c2_k5952_r0_e17868_crc24b.data create mode 100644 app/test-bbdev/test_vectors/turbo_enc_c3_k4800_r2_e14412_crc24b.data create mode 100644 app/test-bbdev/test_vectors/turbo_enc_c4_k4800_r2_e14412_crc24b.data create mode 120000 app/test-bbdev/turbo_dec_default.data create mode 120000 app/test-bbdev/turbo_enc_default.data create mode 100644 app/test-crypto-perf/meson.build create mode 100644 app/test-pmd/bpf_cmd.c create mode 100644 app/test-pmd/bpf_cmd.h create mode 100644 app/test-pmd/softnicfwd.c delete mode 100644 app/test-pmd/tm.c create mode 100644 config/arm/arm64_dpaa2_linuxapp_gcc create mode 100644 config/arm/arm64_dpaa_linuxapp_gcc create mode 100644 config/defconfig_arm64-stingray-linuxapp-gcc create mode 100755 devtools/check-symbol-change.sh create mode 100755 devtools/check-symbol-maps.sh create mode 100644 devtools/cocci/strlcpy.cocci create mode 100755 devtools/test-meson-builds.sh create mode 100644 doc/guides/compressdevs/features/default.ini create mode 100644 doc/guides/compressdevs/features/isal.ini create mode 100644 doc/guides/compressdevs/features/octeontx.ini create mode 100644 doc/guides/compressdevs/features/qat.ini create mode 100644 doc/guides/compressdevs/features/zlib.ini create mode 100644 doc/guides/compressdevs/index.rst create mode 100644 doc/guides/compressdevs/isal.rst create mode 100644 doc/guides/compressdevs/octeontx.rst create mode 100644 doc/guides/compressdevs/overview.rst create mode 100644 doc/guides/compressdevs/qat_comp.rst create mode 100644 doc/guides/compressdevs/zlib.rst create mode 100644 doc/guides/cryptodevs/ccp.rst create mode 100644 doc/guides/cryptodevs/features/ccp.ini delete mode 100644 doc/guides/cryptodevs/features/mrvl.ini create mode 100644 doc/guides/cryptodevs/features/mvsam.ini create mode 100644 doc/guides/cryptodevs/features/virtio.ini delete mode 100644 doc/guides/cryptodevs/mrvl.rst create mode 100644 doc/guides/cryptodevs/mvsam.rst create mode 100644 doc/guides/cryptodevs/virtio.rst create mode 100644 doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst create mode 100644 doc/guides/nics/axgbe.rst create mode 100644 doc/guides/nics/features/axgbe.ini create mode 100644 doc/guides/nics/features/cxgbevf.ini create mode 100644 doc/guides/nics/features/ifcvf.ini delete mode 100644 doc/guides/nics/features/mrvl.ini create mode 100644 doc/guides/nics/features/mvpp2.ini create mode 100644 doc/guides/nics/features/netvsc.ini create mode 100644 doc/guides/nics/features/softnic.ini create mode 100644 doc/guides/nics/ifc.rst create mode 100644 doc/guides/nics/img/szedata2_nfb200g_architecture.svg delete mode 100644 doc/guides/nics/mrvl.rst create mode 100644 doc/guides/nics/mvpp2.rst create mode 100644 doc/guides/nics/netvsc.rst create mode 100644 doc/guides/nics/softnic.rst create mode 100644 doc/guides/prog_guide/bpf_lib.rst create mode 100644 doc/guides/prog_guide/compressdev.rst create mode 100644 doc/guides/prog_guide/event_crypto_adapter.rst create mode 100644 doc/guides/prog_guide/event_timer_adapter.rst create mode 100644 doc/guides/prog_guide/img/event_crypto_adapter_op_forward.svg create mode 100644 doc/guides/prog_guide/img/event_crypto_adapter_op_new.svg create mode 100644 doc/guides/prog_guide/img/stateful-op.svg create mode 100644 doc/guides/prog_guide/img/stateless-op-shared.svg create mode 100644 doc/guides/prog_guide/img/stateless-op.svg create mode 100644 doc/guides/prog_guide/img/turbo_tb_decode.svg create mode 100644 doc/guides/prog_guide/img/turbo_tb_encode.svg create mode 100644 doc/guides/prog_guide/switch_representation.rst create mode 100644 doc/guides/rawdevs/dpaa2_cmdif.rst create mode 100644 doc/guides/rawdevs/dpaa2_qdma.rst create mode 100644 doc/guides/rawdevs/ifpga_rawdev.rst create mode 100644 doc/guides/rawdevs/index.rst create mode 100644 doc/guides/rel_notes/release_18_05.rst create mode 100644 doc/guides/rel_notes/release_18_08.rst delete mode 100644 doc/guides/sample_app_ug/img/ip_pipelines_1.svg delete mode 100644 doc/guides/sample_app_ug/img/ip_pipelines_2.svg delete mode 100644 doc/guides/sample_app_ug/img/ip_pipelines_3.svg delete mode 100644 doc/guides/sample_app_ug/img/master_slave_proc.png delete mode 100644 doc/guides/sample_app_ug/img/slave_proc_recov.png create mode 100644 doc/guides/sample_app_ug/vhost_crypto.rst create mode 100644 drivers/baseband/Makefile create mode 100644 drivers/baseband/null/Makefile create mode 100644 drivers/baseband/null/bbdev_null.c create mode 100644 drivers/baseband/null/rte_pmd_bbdev_null_version.map create mode 100644 drivers/baseband/turbo_sw/Makefile create mode 100644 drivers/baseband/turbo_sw/bbdev_turbo_software.c create mode 100644 drivers/baseband/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map delete mode 100644 drivers/bbdev/Makefile delete mode 100644 drivers/bbdev/null/Makefile delete mode 100644 drivers/bbdev/null/bbdev_null.c delete mode 100644 drivers/bbdev/null/rte_pmd_bbdev_null_version.map delete mode 100644 drivers/bbdev/turbo_sw/Makefile delete mode 100644 drivers/bbdev/turbo_sw/bbdev_turbo_software.c delete mode 100644 drivers/bbdev/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map create mode 100644 drivers/bus/dpaa/meson.build create mode 100644 drivers/bus/fslmc/mc/dpdmai.c create mode 100644 drivers/bus/fslmc/mc/fsl_dpdmai.h create mode 100644 drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h create mode 100644 drivers/bus/fslmc/meson.build create mode 100644 drivers/bus/ifpga/Makefile create mode 100644 drivers/bus/ifpga/ifpga_bus.c create mode 100644 drivers/bus/ifpga/ifpga_common.c create mode 100644 drivers/bus/ifpga/ifpga_common.h create mode 100644 drivers/bus/ifpga/ifpga_logs.h create mode 100644 drivers/bus/ifpga/meson.build create mode 100644 drivers/bus/ifpga/rte_bus_ifpga.h create mode 100644 drivers/bus/ifpga/rte_bus_ifpga_version.map create mode 100644 drivers/bus/vmbus/Makefile create mode 100644 drivers/bus/vmbus/linux/Makefile create mode 100644 drivers/bus/vmbus/linux/vmbus_bus.c create mode 100644 drivers/bus/vmbus/linux/vmbus_uio.c create mode 100644 drivers/bus/vmbus/meson.build create mode 100644 drivers/bus/vmbus/private.h create mode 100644 drivers/bus/vmbus/rte_bus_vmbus.h create mode 100644 drivers/bus/vmbus/rte_bus_vmbus_version.map create mode 100644 drivers/bus/vmbus/rte_vmbus_reg.h create mode 100644 drivers/bus/vmbus/vmbus_bufring.c create mode 100644 drivers/bus/vmbus/vmbus_channel.c create mode 100644 drivers/bus/vmbus/vmbus_common.c create mode 100644 drivers/bus/vmbus/vmbus_common_uio.c create mode 100644 drivers/common/Makefile create mode 100644 drivers/common/meson.build create mode 100644 drivers/common/octeontx/Makefile create mode 100644 drivers/common/octeontx/meson.build create mode 100644 drivers/common/octeontx/octeontx_mbox.c create mode 100644 drivers/common/octeontx/octeontx_mbox.h create mode 100644 drivers/common/octeontx/rte_common_octeontx_version.map create mode 100644 drivers/common/qat/Makefile create mode 100644 drivers/common/qat/meson.build create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros.h create mode 100644 drivers/common/qat/qat_adf/icp_qat_fw.h create mode 100644 drivers/common/qat/qat_adf/icp_qat_fw_comp.h create mode 100644 drivers/common/qat/qat_adf/icp_qat_fw_la.h create mode 100644 drivers/common/qat/qat_adf/icp_qat_hw.h create mode 100644 drivers/common/qat/qat_common.c create mode 100644 drivers/common/qat/qat_common.h create mode 100644 drivers/common/qat/qat_device.c create mode 100644 drivers/common/qat/qat_device.h create mode 100644 drivers/common/qat/qat_logs.c create mode 100644 drivers/common/qat/qat_logs.h create mode 100644 drivers/common/qat/qat_qp.c create mode 100644 drivers/common/qat/qat_qp.h create mode 100644 drivers/compress/Makefile create mode 100644 drivers/compress/isal/Makefile create mode 100644 drivers/compress/isal/isal_compress_pmd.c create mode 100644 drivers/compress/isal/isal_compress_pmd_ops.c create mode 100644 drivers/compress/isal/isal_compress_pmd_private.h create mode 100644 drivers/compress/isal/meson.build create mode 100644 drivers/compress/isal/rte_pmd_isal_version.map create mode 100644 drivers/compress/meson.build create mode 100644 drivers/compress/octeontx/Makefile create mode 100644 drivers/compress/octeontx/include/zip_regs.h create mode 100644 drivers/compress/octeontx/meson.build create mode 100644 drivers/compress/octeontx/otx_zip.c create mode 100644 drivers/compress/octeontx/otx_zip.h create mode 100644 drivers/compress/octeontx/otx_zip_pmd.c create mode 100644 drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map create mode 100644 drivers/compress/qat/meson.build create mode 100644 drivers/compress/qat/qat_comp.c create mode 100644 drivers/compress/qat/qat_comp.h create mode 100644 drivers/compress/qat/qat_comp_pmd.c create mode 100644 drivers/compress/qat/qat_comp_pmd.h create mode 100644 drivers/compress/qat/rte_pmd_qat_version.map create mode 100644 drivers/compress/zlib/Makefile create mode 100644 drivers/compress/zlib/meson.build create mode 100644 drivers/compress/zlib/rte_pmd_zlib_version.map create mode 100644 drivers/compress/zlib/zlib_pmd.c create mode 100644 drivers/compress/zlib/zlib_pmd_ops.c create mode 100644 drivers/compress/zlib/zlib_pmd_private.h create mode 100644 drivers/crypto/ccp/Makefile create mode 100644 drivers/crypto/ccp/ccp_crypto.c create mode 100644 drivers/crypto/ccp/ccp_crypto.h create mode 100644 drivers/crypto/ccp/ccp_dev.c create mode 100644 drivers/crypto/ccp/ccp_dev.h create mode 100644 drivers/crypto/ccp/ccp_pci.c create mode 100644 drivers/crypto/ccp/ccp_pci.h create mode 100644 drivers/crypto/ccp/ccp_pmd_ops.c create mode 100644 drivers/crypto/ccp/ccp_pmd_private.h create mode 100644 drivers/crypto/ccp/meson.build create mode 100644 drivers/crypto/ccp/rte_ccp_pmd.c create mode 100644 drivers/crypto/ccp/rte_pmd_ccp_version.map create mode 100644 drivers/crypto/dpaa2_sec/meson.build create mode 100644 drivers/crypto/dpaa_sec/meson.build delete mode 100644 drivers/crypto/mrvl/Makefile delete mode 100644 drivers/crypto/mrvl/rte_mrvl_compat.h delete mode 100644 drivers/crypto/mrvl/rte_mrvl_pmd.c delete mode 100644 drivers/crypto/mrvl/rte_mrvl_pmd_ops.c delete mode 100644 drivers/crypto/mrvl/rte_mrvl_pmd_private.h delete mode 100644 drivers/crypto/mrvl/rte_pmd_mrvl_version.map create mode 100644 drivers/crypto/mvsam/Makefile create mode 100644 drivers/crypto/mvsam/meson.build create mode 100644 drivers/crypto/mvsam/rte_mrvl_compat.h create mode 100644 drivers/crypto/mvsam/rte_mrvl_pmd.c create mode 100644 drivers/crypto/mvsam/rte_mrvl_pmd_ops.c create mode 100644 drivers/crypto/mvsam/rte_mrvl_pmd_private.h create mode 100644 drivers/crypto/mvsam/rte_pmd_mvsam_version.map create mode 100644 drivers/crypto/openssl/compat.h delete mode 100644 drivers/crypto/qat/Makefile create mode 100644 drivers/crypto/qat/README delete mode 100644 drivers/crypto/qat/qat_adf/adf_transport_access_macros.h delete mode 100644 drivers/crypto/qat/qat_adf/icp_qat_fw.h delete mode 100644 drivers/crypto/qat/qat_adf/icp_qat_fw_la.h delete mode 100644 drivers/crypto/qat/qat_adf/icp_qat_hw.h delete mode 100644 drivers/crypto/qat/qat_adf/qat_algs.h delete mode 100644 drivers/crypto/qat/qat_adf/qat_algs_build_desc.c delete mode 100644 drivers/crypto/qat/qat_crypto.c delete mode 100644 drivers/crypto/qat/qat_crypto.h delete mode 100644 drivers/crypto/qat/qat_crypto_capabilities.h delete mode 100644 drivers/crypto/qat/qat_logs.h delete mode 100644 drivers/crypto/qat/qat_qp.c create mode 100644 drivers/crypto/qat/qat_sym.c create mode 100644 drivers/crypto/qat/qat_sym.h create mode 100644 drivers/crypto/qat/qat_sym_capabilities.h create mode 100644 drivers/crypto/qat/qat_sym_pmd.c create mode 100644 drivers/crypto/qat/qat_sym_pmd.h create mode 100644 drivers/crypto/qat/qat_sym_session.c create mode 100644 drivers/crypto/qat/qat_sym_session.h delete mode 100644 drivers/crypto/qat/rte_pmd_qat_version.map delete mode 100644 drivers/crypto/qat/rte_qat_cryptodev.c create mode 100644 drivers/crypto/virtio/Makefile create mode 100644 drivers/crypto/virtio/meson.build create mode 100644 drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map create mode 100644 drivers/crypto/virtio/virtio_crypto_algs.h create mode 100644 drivers/crypto/virtio/virtio_crypto_capabilities.h create mode 100644 drivers/crypto/virtio/virtio_cryptodev.c create mode 100644 drivers/crypto/virtio/virtio_cryptodev.h create mode 100644 drivers/crypto/virtio/virtio_logs.h create mode 100644 drivers/crypto/virtio/virtio_pci.c create mode 100644 drivers/crypto/virtio/virtio_pci.h create mode 100644 drivers/crypto/virtio/virtio_ring.h create mode 100644 drivers/crypto/virtio/virtio_rxtx.c create mode 100644 drivers/crypto/virtio/virtqueue.c create mode 100644 drivers/crypto/virtio/virtqueue.h create mode 100644 drivers/event/dpaa/meson.build create mode 100644 drivers/event/dpaa2/meson.build create mode 100644 drivers/event/octeontx/ssovf_probe.c create mode 100644 drivers/event/octeontx/timvf_evdev.c create mode 100644 drivers/event/octeontx/timvf_evdev.h create mode 100644 drivers/event/octeontx/timvf_probe.c create mode 100644 drivers/event/octeontx/timvf_worker.c create mode 100644 drivers/event/octeontx/timvf_worker.h create mode 100644 drivers/mempool/bucket/Makefile create mode 100644 drivers/mempool/bucket/meson.build create mode 100644 drivers/mempool/bucket/rte_mempool_bucket.c create mode 100644 drivers/mempool/bucket/rte_mempool_bucket_version.map create mode 100644 drivers/mempool/dpaa/meson.build create mode 100644 drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h create mode 100644 drivers/mempool/dpaa2/meson.build create mode 100644 drivers/mempool/dpaa2/rte_dpaa2_mempool.h delete mode 100644 drivers/mempool/octeontx/octeontx_mbox.c delete mode 100644 drivers/mempool/octeontx/octeontx_mbox.h delete mode 100644 drivers/mempool/octeontx/octeontx_ssovf.c create mode 100644 drivers/net/ark/meson.build create mode 100644 drivers/net/avp/meson.build create mode 100644 drivers/net/axgbe/Makefile create mode 100644 drivers/net/axgbe/axgbe_common.h create mode 100644 drivers/net/axgbe/axgbe_dev.c create mode 100644 drivers/net/axgbe/axgbe_ethdev.c create mode 100644 drivers/net/axgbe/axgbe_ethdev.h create mode 100644 drivers/net/axgbe/axgbe_i2c.c create mode 100644 drivers/net/axgbe/axgbe_logs.h create mode 100644 drivers/net/axgbe/axgbe_mdio.c create mode 100644 drivers/net/axgbe/axgbe_phy.h create mode 100644 drivers/net/axgbe/axgbe_phy_impl.c create mode 100644 drivers/net/axgbe/axgbe_rxtx.c create mode 100644 drivers/net/axgbe/axgbe_rxtx.h create mode 100644 drivers/net/axgbe/axgbe_rxtx_vec_sse.c create mode 100644 drivers/net/axgbe/meson.build create mode 100644 drivers/net/axgbe/rte_pmd_axgbe_version.map delete mode 100644 drivers/net/bnx2x/LICENSE.bnx2x_pmd create mode 100644 drivers/net/bnx2x/meson.build create mode 100644 drivers/net/bnxt/bnxt_flow.c create mode 100644 drivers/net/bnxt/bnxt_util.c create mode 100644 drivers/net/bnxt/bnxt_util.h create mode 100644 drivers/net/bnxt/meson.build create mode 100644 drivers/net/bonding/rte_eth_bond_flow.c create mode 100644 drivers/net/cxgbe/base/t4_tcb.h create mode 100644 drivers/net/cxgbe/base/t4vf_hw.c create mode 100644 drivers/net/cxgbe/base/t4vf_hw.h create mode 100644 drivers/net/cxgbe/clip_tbl.c create mode 100644 drivers/net/cxgbe/clip_tbl.h create mode 100644 drivers/net/cxgbe/cxgbe_filter.c create mode 100644 drivers/net/cxgbe/cxgbe_filter.h create mode 100644 drivers/net/cxgbe/cxgbe_flow.c create mode 100644 drivers/net/cxgbe/cxgbe_flow.h create mode 100644 drivers/net/cxgbe/cxgbe_ofld.h create mode 100644 drivers/net/cxgbe/cxgbe_pfvf.h create mode 100644 drivers/net/cxgbe/cxgbevf_ethdev.c create mode 100644 drivers/net/cxgbe/cxgbevf_main.c create mode 100644 drivers/net/cxgbe/meson.build create mode 100644 drivers/net/dpaa/meson.build create mode 100644 drivers/net/dpaa2/dpaa2_pmd_logs.h create mode 100644 drivers/net/dpaa2/meson.build create mode 100644 drivers/net/e1000/e1000_logs.c create mode 100644 drivers/net/ena/meson.build create mode 100644 drivers/net/enic/meson.build create mode 100644 drivers/net/failsafe/meson.build create mode 100644 drivers/net/i40e/i40e_vf_representor.c create mode 100644 drivers/net/ifc/Makefile create mode 100644 drivers/net/ifc/base/ifcvf.c create mode 100644 drivers/net/ifc/base/ifcvf.h create mode 100644 drivers/net/ifc/base/ifcvf_osdep.h create mode 100644 drivers/net/ifc/ifcvf_vdpa.c create mode 100644 drivers/net/ifc/meson.build create mode 100644 drivers/net/ifc/rte_pmd_ifc_version.map create mode 100644 drivers/net/ixgbe/ixgbe_vf_representor.c create mode 100644 drivers/net/kni/meson.build create mode 100644 drivers/net/liquidio/meson.build delete mode 100644 drivers/net/liquidio/rte_pmd_lio_version.map create mode 100644 drivers/net/liquidio/rte_pmd_liquidio_version.map create mode 100644 drivers/net/mlx4/mlx4_mr.h create mode 100644 drivers/net/mlx5/mlx5_mr.h create mode 100644 drivers/net/mlx5/mlx5_nl.c create mode 100644 drivers/net/mlx5/mlx5_nl_flow.c delete mode 100644 drivers/net/mrvl/Makefile delete mode 100644 drivers/net/mrvl/mrvl_ethdev.c delete mode 100644 drivers/net/mrvl/mrvl_ethdev.h delete mode 100644 drivers/net/mrvl/mrvl_qos.c delete mode 100644 drivers/net/mrvl/mrvl_qos.h delete mode 100644 drivers/net/mrvl/rte_pmd_mrvl_version.map create mode 100644 drivers/net/mvpp2/Makefile create mode 100644 drivers/net/mvpp2/meson.build create mode 100644 drivers/net/mvpp2/mrvl_ethdev.c create mode 100644 drivers/net/mvpp2/mrvl_ethdev.h create mode 100644 drivers/net/mvpp2/mrvl_flow.c create mode 100644 drivers/net/mvpp2/mrvl_qos.c create mode 100644 drivers/net/mvpp2/mrvl_qos.h create mode 100644 drivers/net/mvpp2/rte_pmd_mvpp2_version.map create mode 100644 drivers/net/netvsc/Makefile create mode 100644 drivers/net/netvsc/hn_ethdev.c create mode 100644 drivers/net/netvsc/hn_logs.h create mode 100644 drivers/net/netvsc/hn_nvs.c create mode 100644 drivers/net/netvsc/hn_nvs.h create mode 100644 drivers/net/netvsc/hn_rndis.c create mode 100644 drivers/net/netvsc/hn_rndis.h create mode 100644 drivers/net/netvsc/hn_rxtx.c create mode 100644 drivers/net/netvsc/hn_var.h create mode 100644 drivers/net/netvsc/meson.build create mode 100644 drivers/net/netvsc/ndis.h create mode 100644 drivers/net/netvsc/rndis.h create mode 100644 drivers/net/netvsc/rte_pmd_netvsc_version.map create mode 100644 drivers/net/nfp/meson.build delete mode 100644 drivers/net/nfp/nfp_net_eth.h delete mode 100644 drivers/net/nfp/nfp_nfpu.c delete mode 100644 drivers/net/nfp/nfp_nfpu.h delete mode 100644 drivers/net/nfp/nfp_nspu.c delete mode 100644 drivers/net/nfp/nfp_nspu.h create mode 100644 drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h create mode 100644 drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h create mode 100644 drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h create mode 100644 drivers/net/nfp/nfpcore/nfp6000/nfp6000.h create mode 100644 drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h create mode 100644 drivers/net/nfp/nfpcore/nfp_cpp.h create mode 100644 drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c create mode 100644 drivers/net/nfp/nfpcore/nfp_cppcore.c create mode 100644 drivers/net/nfp/nfpcore/nfp_crc.c create mode 100644 drivers/net/nfp/nfpcore/nfp_crc.h create mode 100644 drivers/net/nfp/nfpcore/nfp_hwinfo.c create mode 100644 drivers/net/nfp/nfpcore/nfp_hwinfo.h create mode 100644 drivers/net/nfp/nfpcore/nfp_mip.c create mode 100644 drivers/net/nfp/nfpcore/nfp_mip.h create mode 100644 drivers/net/nfp/nfpcore/nfp_mutex.c create mode 100644 drivers/net/nfp/nfpcore/nfp_nffw.c create mode 100644 drivers/net/nfp/nfpcore/nfp_nffw.h create mode 100644 drivers/net/nfp/nfpcore/nfp_nsp.c create mode 100644 drivers/net/nfp/nfpcore/nfp_nsp.h create mode 100644 drivers/net/nfp/nfpcore/nfp_nsp_cmds.c create mode 100644 drivers/net/nfp/nfpcore/nfp_nsp_eth.c create mode 100644 drivers/net/nfp/nfpcore/nfp_resource.c create mode 100644 drivers/net/nfp/nfpcore/nfp_resource.h create mode 100644 drivers/net/nfp/nfpcore/nfp_rtsym.c create mode 100644 drivers/net/nfp/nfpcore/nfp_rtsym.h create mode 100644 drivers/net/nfp/nfpcore/nfp_target.h delete mode 100644 drivers/net/qede/LICENSE.qede_pmd create mode 100644 drivers/net/sfc/base/ef10_image.c create mode 100644 drivers/net/sfc/base/ef10_signed_image_layout.h create mode 100644 drivers/net/sfc/base/efx_regs_mcdi_aoe.h create mode 100644 drivers/net/sfc/base/medford2_impl.h create mode 100644 drivers/net/sfc/base/medford2_nic.c create mode 100644 drivers/net/sfc/sfc_ef10_essb_rx.c create mode 100644 drivers/net/sfc/sfc_ef10_rx_ev.h create mode 100644 drivers/net/softnic/conn.c create mode 100644 drivers/net/softnic/conn.h create mode 100644 drivers/net/softnic/firmware.cli create mode 100644 drivers/net/softnic/hash_func.h create mode 100644 drivers/net/softnic/hash_func_arm64.h create mode 100644 drivers/net/softnic/meson.build create mode 100644 drivers/net/softnic/parser.c create mode 100644 drivers/net/softnic/parser.h create mode 100644 drivers/net/softnic/rte_eth_softnic_action.c create mode 100644 drivers/net/softnic/rte_eth_softnic_cli.c create mode 100644 drivers/net/softnic/rte_eth_softnic_link.c create mode 100644 drivers/net/softnic/rte_eth_softnic_mempool.c create mode 100644 drivers/net/softnic/rte_eth_softnic_pipeline.c create mode 100644 drivers/net/softnic/rte_eth_softnic_swq.c create mode 100644 drivers/net/softnic/rte_eth_softnic_tap.c create mode 100644 drivers/net/softnic/rte_eth_softnic_thread.c delete mode 100644 drivers/net/softnic/rte_pmd_eth_softnic_version.map create mode 100644 drivers/net/softnic/rte_pmd_softnic_version.map create mode 100644 drivers/net/szedata2/meson.build delete mode 100644 drivers/net/szedata2/szedata2_iobuf.c delete mode 100644 drivers/net/szedata2/szedata2_iobuf.h create mode 100644 drivers/net/szedata2/szedata2_logs.h create mode 100644 drivers/net/tap/tap_log.h create mode 100644 drivers/net/vhost/meson.build create mode 100644 drivers/net/virtio/meson.build create mode 100644 drivers/raw/dpaa2_cmdif/Makefile create mode 100644 drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c create mode 100644 drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h create mode 100644 drivers/raw/dpaa2_cmdif/meson.build create mode 100644 drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h create mode 100644 drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif_version.map create mode 100644 drivers/raw/dpaa2_qdma/Makefile create mode 100644 drivers/raw/dpaa2_qdma/dpaa2_qdma.c create mode 100644 drivers/raw/dpaa2_qdma/dpaa2_qdma.h create mode 100644 drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h create mode 100644 drivers/raw/dpaa2_qdma/meson.build create mode 100644 drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h create mode 100644 drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma_version.map create mode 100644 drivers/raw/ifpga_rawdev/Makefile create mode 100644 drivers/raw/ifpga_rawdev/base/Makefile create mode 100644 drivers/raw/ifpga_rawdev/base/README create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_api.c create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_api.h create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_compat.h create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_defines.h create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_fme.c create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_hw.h create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_port.c create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_port_error.c create mode 100644 drivers/raw/ifpga_rawdev/base/meson.build create mode 100644 drivers/raw/ifpga_rawdev/base/opae_debug.c create mode 100644 drivers/raw/ifpga_rawdev/base/opae_debug.h create mode 100644 drivers/raw/ifpga_rawdev/base/opae_hw_api.c create mode 100644 drivers/raw/ifpga_rawdev/base/opae_hw_api.h create mode 100644 drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c create mode 100644 drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h create mode 100644 drivers/raw/ifpga_rawdev/base/opae_osdep.h create mode 100644 drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h create mode 100644 drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h create mode 100644 drivers/raw/ifpga_rawdev/ifpga_rawdev.c create mode 100644 drivers/raw/ifpga_rawdev/ifpga_rawdev.h create mode 100644 drivers/raw/ifpga_rawdev/meson.build create mode 100644 drivers/raw/ifpga_rawdev/rte_pmd_ifpga_rawdev_version.map create mode 100644 drivers/raw/meson.build create mode 100644 drivers/raw/skeleton_rawdev/meson.build create mode 100644 examples/ethtool/meson.build create mode 100644 examples/ip_pipeline/action.c create mode 100644 examples/ip_pipeline/action.h delete mode 100644 examples/ip_pipeline/app.h create mode 100644 examples/ip_pipeline/cli.c create mode 100644 examples/ip_pipeline/cli.h create mode 100644 examples/ip_pipeline/common.h delete mode 100644 examples/ip_pipeline/config/action.cfg delete mode 100644 examples/ip_pipeline/config/action.sh delete mode 100644 examples/ip_pipeline/config/action.txt delete mode 100755 examples/ip_pipeline/config/diagram-generator.py delete mode 100644 examples/ip_pipeline/config/edge_router_downstream.cfg delete mode 100644 examples/ip_pipeline/config/edge_router_downstream.sh delete mode 100644 examples/ip_pipeline/config/edge_router_upstream.cfg delete mode 100644 examples/ip_pipeline/config/edge_router_upstream.sh delete mode 100644 examples/ip_pipeline/config/firewall.cfg delete mode 100644 examples/ip_pipeline/config/firewall.sh delete mode 100644 examples/ip_pipeline/config/firewall.txt delete mode 100644 examples/ip_pipeline/config/flow.cfg delete mode 100644 examples/ip_pipeline/config/flow.sh delete mode 100644 examples/ip_pipeline/config/flow.txt delete mode 100644 examples/ip_pipeline/config/ip_pipeline.cfg delete mode 100644 examples/ip_pipeline/config/ip_pipeline.sh delete mode 100644 examples/ip_pipeline/config/kni.cfg delete mode 100644 examples/ip_pipeline/config/l2fwd.cfg delete mode 100644 examples/ip_pipeline/config/l3fwd.cfg delete mode 100644 examples/ip_pipeline/config/l3fwd.sh delete mode 100644 examples/ip_pipeline/config/l3fwd_arp.cfg delete mode 100644 examples/ip_pipeline/config/l3fwd_arp.sh delete mode 100644 examples/ip_pipeline/config/network_layers.cfg delete mode 100644 examples/ip_pipeline/config/network_layers.sh delete mode 100755 examples/ip_pipeline/config/pipeline-to-core-mapping.py delete mode 100644 examples/ip_pipeline/config/tap.cfg delete mode 100644 examples/ip_pipeline/config/tm_profile.cfg delete mode 100644 examples/ip_pipeline/config_check.c delete mode 100644 examples/ip_pipeline/config_parse.c delete mode 100644 examples/ip_pipeline/config_parse_tm.c create mode 100644 examples/ip_pipeline/conn.c create mode 100644 examples/ip_pipeline/conn.h delete mode 100644 examples/ip_pipeline/cpu_core_map.c delete mode 100644 examples/ip_pipeline/cpu_core_map.h create mode 100644 examples/ip_pipeline/examples/firewall.cli create mode 100644 examples/ip_pipeline/examples/flow.cli create mode 100644 examples/ip_pipeline/examples/kni.cli create mode 100644 examples/ip_pipeline/examples/l2fwd.cli create mode 100644 examples/ip_pipeline/examples/route.cli create mode 100644 examples/ip_pipeline/examples/route_ecmp.cli create mode 100644 examples/ip_pipeline/examples/rss.cli create mode 100644 examples/ip_pipeline/examples/tap.cli create mode 100644 examples/ip_pipeline/hash_func.h create mode 100644 examples/ip_pipeline/hash_func_arm64.h delete mode 100644 examples/ip_pipeline/init.c create mode 100644 examples/ip_pipeline/kni.c create mode 100644 examples/ip_pipeline/kni.h create mode 100644 examples/ip_pipeline/link.c create mode 100644 examples/ip_pipeline/link.h create mode 100644 examples/ip_pipeline/mempool.c create mode 100644 examples/ip_pipeline/mempool.h create mode 100644 examples/ip_pipeline/pipeline.c delete mode 100644 examples/ip_pipeline/pipeline/hash_func.h delete mode 100644 examples/ip_pipeline/pipeline/hash_func_arm64.h delete mode 100644 examples/ip_pipeline/pipeline/pipeline_actions_common.h delete mode 100644 examples/ip_pipeline/pipeline/pipeline_common_be.c delete mode 100644 examples/ip_pipeline/pipeline/pipeline_common_be.h delete mode 100644 examples/ip_pipeline/pipeline/pipeline_common_fe.c delete mode 100644 examples/ip_pipeline/pipeline/pipeline_common_fe.h delete mode 100644 examples/ip_pipeline/pipeline/pipeline_firewall.c delete mode 100644 examples/ip_pipeline/pipeline/pipeline_firewall.h delete mode 100644 examples/ip_pipeline/pipeline/pipeline_firewall_be.c delete mode 100644 examples/ip_pipeline/pipeline/pipeline_firewall_be.h delete mode 100644 examples/ip_pipeline/pipeline/pipeline_flow_actions.c delete mode 100644 examples/ip_pipeline/pipeline/pipeline_flow_actions.h delete mode 100644 examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c delete mode 100644 examples/ip_pipeline/pipeline/pipeline_flow_actions_be.h delete mode 100644 examples/ip_pipeline/pipeline/pipeline_flow_classification.c delete mode 100644 examples/ip_pipeline/pipeline/pipeline_flow_classification.h delete mode 100644 examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c delete mode 100644 examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h delete mode 100644 examples/ip_pipeline/pipeline/pipeline_master.c delete mode 100644 examples/ip_pipeline/pipeline/pipeline_master.h delete mode 100644 examples/ip_pipeline/pipeline/pipeline_master_be.c delete mode 100644 examples/ip_pipeline/pipeline/pipeline_master_be.h delete mode 100644 examples/ip_pipeline/pipeline/pipeline_passthrough.c delete mode 100644 examples/ip_pipeline/pipeline/pipeline_passthrough.h delete mode 100644 examples/ip_pipeline/pipeline/pipeline_passthrough_be.c delete mode 100644 examples/ip_pipeline/pipeline/pipeline_passthrough_be.h delete mode 100644 examples/ip_pipeline/pipeline/pipeline_routing.c delete mode 100644 examples/ip_pipeline/pipeline/pipeline_routing.h delete mode 100644 examples/ip_pipeline/pipeline/pipeline_routing_be.c delete mode 100644 examples/ip_pipeline/pipeline/pipeline_routing_be.h delete mode 100644 examples/ip_pipeline/pipeline_be.h create mode 100644 examples/ip_pipeline/swq.c create mode 100644 examples/ip_pipeline/swq.h create mode 100644 examples/ip_pipeline/tap.c create mode 100644 examples/ip_pipeline/tap.h delete mode 100644 examples/ip_pipeline/thread_fe.c delete mode 100644 examples/ip_pipeline/thread_fe.h create mode 100644 examples/ip_pipeline/tmgr.c create mode 100644 examples/ip_pipeline/tmgr.h create mode 100644 examples/l3fwd-power/main.h create mode 100644 examples/l3fwd-power/perf_core.c create mode 100644 examples/l3fwd-power/perf_core.h delete mode 100644 examples/multi_process/l2fwd_fork/Makefile delete mode 100644 examples/multi_process/l2fwd_fork/flib.c delete mode 100644 examples/multi_process/l2fwd_fork/flib.h delete mode 100644 examples/multi_process/l2fwd_fork/main.c create mode 100644 examples/multi_process/meson.build create mode 100644 examples/netmap_compat/meson.build create mode 100644 examples/performance-thread/meson.build create mode 100644 examples/quota_watermark/meson.build create mode 100644 examples/server_node_efd/meson.build create mode 100644 examples/vhost_crypto/Makefile create mode 100644 examples/vhost_crypto/main.c create mode 100644 examples/vhost_crypto/meson.build create mode 100644 examples/vm_power_manager/guest_cli/parse.c create mode 100644 examples/vm_power_manager/guest_cli/parse.h create mode 100644 examples/vm_power_manager/meson.build create mode 100644 examples/vm_power_manager/oob_monitor.h create mode 100644 examples/vm_power_manager/oob_monitor_nop.c create mode 100644 examples/vm_power_manager/oob_monitor_x86.c create mode 100644 examples/vm_power_manager/parse.c create mode 100644 examples/vm_power_manager/parse.h create mode 100644 kernel/Makefile create mode 100644 kernel/freebsd/BSDmakefile.meson create mode 100644 kernel/freebsd/Makefile create mode 100644 kernel/freebsd/contigmem/BSDmakefile create mode 100644 kernel/freebsd/contigmem/Makefile create mode 100644 kernel/freebsd/contigmem/contigmem.c create mode 100644 kernel/freebsd/contigmem/meson.build create mode 100644 kernel/freebsd/meson.build create mode 100644 kernel/freebsd/nic_uio/BSDmakefile create mode 100644 kernel/freebsd/nic_uio/Makefile create mode 100644 kernel/freebsd/nic_uio/meson.build create mode 100644 kernel/freebsd/nic_uio/nic_uio.c create mode 100644 kernel/linux/Makefile create mode 100644 kernel/linux/igb_uio/Kbuild create mode 100644 kernel/linux/igb_uio/Makefile create mode 100644 kernel/linux/igb_uio/compat.h create mode 100644 kernel/linux/igb_uio/igb_uio.c create mode 100644 kernel/linux/igb_uio/meson.build create mode 100644 kernel/linux/kni/Makefile create mode 100644 kernel/linux/kni/compat.h create mode 100644 kernel/linux/kni/ethtool/README create mode 100644 kernel/linux/kni/ethtool/igb/e1000_82575.c create mode 100644 kernel/linux/kni/ethtool/igb/e1000_82575.h create mode 100644 kernel/linux/kni/ethtool/igb/e1000_api.c create mode 100644 kernel/linux/kni/ethtool/igb/e1000_api.h create mode 100644 kernel/linux/kni/ethtool/igb/e1000_defines.h create mode 100644 kernel/linux/kni/ethtool/igb/e1000_hw.h create mode 100644 kernel/linux/kni/ethtool/igb/e1000_i210.c create mode 100644 kernel/linux/kni/ethtool/igb/e1000_i210.h create mode 100644 kernel/linux/kni/ethtool/igb/e1000_mac.c create mode 100644 kernel/linux/kni/ethtool/igb/e1000_mac.h create mode 100644 kernel/linux/kni/ethtool/igb/e1000_manage.c create mode 100644 kernel/linux/kni/ethtool/igb/e1000_manage.h create mode 100644 kernel/linux/kni/ethtool/igb/e1000_mbx.c create mode 100644 kernel/linux/kni/ethtool/igb/e1000_mbx.h create mode 100644 kernel/linux/kni/ethtool/igb/e1000_nvm.c create mode 100644 kernel/linux/kni/ethtool/igb/e1000_nvm.h create mode 100644 kernel/linux/kni/ethtool/igb/e1000_osdep.h create mode 100644 kernel/linux/kni/ethtool/igb/e1000_phy.c create mode 100644 kernel/linux/kni/ethtool/igb/e1000_phy.h create mode 100644 kernel/linux/kni/ethtool/igb/e1000_regs.h create mode 100644 kernel/linux/kni/ethtool/igb/igb.h create mode 100644 kernel/linux/kni/ethtool/igb/igb_ethtool.c create mode 100644 kernel/linux/kni/ethtool/igb/igb_main.c create mode 100644 kernel/linux/kni/ethtool/igb/igb_param.c create mode 100644 kernel/linux/kni/ethtool/igb/igb_regtest.h create mode 100644 kernel/linux/kni/ethtool/igb/igb_vmdq.c create mode 100644 kernel/linux/kni/ethtool/igb/igb_vmdq.h create mode 100644 kernel/linux/kni/ethtool/igb/kcompat.h create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe.h create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_82598.c create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_82598.h create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_82599.c create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_82599.h create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_api.c create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_api.h create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_common.c create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_common.h create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_dcb.h create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_ethtool.c create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_fcoe.h create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_main.c create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_mbx.h create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_osdep.h create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_phy.c create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_phy.h create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_type.h create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_x540.c create mode 100644 kernel/linux/kni/ethtool/ixgbe/ixgbe_x540.h create mode 100644 kernel/linux/kni/ethtool/ixgbe/kcompat.c create mode 100644 kernel/linux/kni/ethtool/ixgbe/kcompat.h create mode 100644 kernel/linux/kni/kni_dev.h create mode 100644 kernel/linux/kni/kni_ethtool.c create mode 100644 kernel/linux/kni/kni_fifo.h create mode 100644 kernel/linux/kni/kni_misc.c create mode 100644 kernel/linux/kni/kni_net.c create mode 100644 kernel/linux/meson.build create mode 100644 kernel/meson.build create mode 100644 lib/librte_bpf/Makefile create mode 100644 lib/librte_bpf/bpf.c create mode 100644 lib/librte_bpf/bpf_def.h create mode 100644 lib/librte_bpf/bpf_exec.c create mode 100644 lib/librte_bpf/bpf_impl.h create mode 100644 lib/librte_bpf/bpf_jit_x86.c create mode 100644 lib/librte_bpf/bpf_load.c create mode 100644 lib/librte_bpf/bpf_load_elf.c create mode 100644 lib/librte_bpf/bpf_pkt.c create mode 100644 lib/librte_bpf/bpf_validate.c create mode 100644 lib/librte_bpf/meson.build create mode 100644 lib/librte_bpf/rte_bpf.h create mode 100644 lib/librte_bpf/rte_bpf_ethdev.h create mode 100644 lib/librte_bpf/rte_bpf_version.map create mode 100644 lib/librte_compressdev/Makefile create mode 100644 lib/librte_compressdev/meson.build create mode 100644 lib/librte_compressdev/rte_comp.c create mode 100644 lib/librte_compressdev/rte_comp.h create mode 100644 lib/librte_compressdev/rte_compressdev.c create mode 100644 lib/librte_compressdev/rte_compressdev.h create mode 100644 lib/librte_compressdev/rte_compressdev_internal.h create mode 100644 lib/librte_compressdev/rte_compressdev_pmd.c create mode 100644 lib/librte_compressdev/rte_compressdev_pmd.h create mode 100644 lib/librte_compressdev/rte_compressdev_version.map create mode 100644 lib/librte_cryptodev/rte_crypto_asym.h delete mode 100644 lib/librte_eal/bsdapp/contigmem/BSDmakefile delete mode 100644 lib/librte_eal/bsdapp/contigmem/Makefile delete mode 100644 lib/librte_eal/bsdapp/contigmem/contigmem.c delete mode 100644 lib/librte_eal/bsdapp/contigmem/meson.build create mode 100644 lib/librte_eal/bsdapp/eal/eal_alarm_private.h create mode 100644 lib/librte_eal/bsdapp/eal/eal_cpuflags.c create mode 100644 lib/librte_eal/bsdapp/eal/eal_dev.c create mode 100644 lib/librte_eal/bsdapp/eal/eal_memalloc.c delete mode 100644 lib/librte_eal/bsdapp/nic_uio/BSDmakefile delete mode 100644 lib/librte_eal/bsdapp/nic_uio/Makefile delete mode 100644 lib/librte_eal/bsdapp/nic_uio/meson.build delete mode 100644 lib/librte_eal/bsdapp/nic_uio/nic_uio.c create mode 100644 lib/librte_eal/common/eal_common_class.c create mode 100644 lib/librte_eal/common/eal_common_fbarray.c create mode 100644 lib/librte_eal/common/eal_common_memalloc.c create mode 100644 lib/librte_eal/common/eal_common_uuid.c create mode 100644 lib/librte_eal/common/eal_memalloc.h create mode 100644 lib/librte_eal/common/include/rte_class.h create mode 100644 lib/librte_eal/common/include/rte_fbarray.h create mode 100644 lib/librte_eal/common/include/rte_uuid.h create mode 100644 lib/librte_eal/common/malloc_mp.c create mode 100644 lib/librte_eal/common/malloc_mp.h create mode 100644 lib/librte_eal/linuxapp/eal/eal_cpuflags.c create mode 100644 lib/librte_eal/linuxapp/eal/eal_dev.c create mode 100644 lib/librte_eal/linuxapp/eal/eal_memalloc.c delete mode 100644 lib/librte_eal/linuxapp/igb_uio/Kbuild delete mode 100644 lib/librte_eal/linuxapp/igb_uio/Makefile delete mode 100644 lib/librte_eal/linuxapp/igb_uio/compat.h delete mode 100644 lib/librte_eal/linuxapp/igb_uio/igb_uio.c delete mode 100644 lib/librte_eal/linuxapp/igb_uio/meson.build delete mode 100644 lib/librte_eal/linuxapp/kni/Makefile delete mode 100644 lib/librte_eal/linuxapp/kni/compat.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/README delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_defines.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_hw.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_osdep.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_regs.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/igb.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/igb_param.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/igb_regtest.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_dcb.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_fcoe.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_mbx.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_type.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.h delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c delete mode 100644 lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h delete mode 100644 lib/librte_eal/linuxapp/kni/kni_dev.h delete mode 100644 lib/librte_eal/linuxapp/kni/kni_ethtool.c delete mode 100644 lib/librte_eal/linuxapp/kni/kni_fifo.h delete mode 100644 lib/librte_eal/linuxapp/kni/kni_misc.c delete mode 100644 lib/librte_eal/linuxapp/kni/kni_net.c create mode 100644 lib/librte_ethdev/Makefile create mode 100644 lib/librte_ethdev/ethdev_profile.c create mode 100644 lib/librte_ethdev/ethdev_profile.h create mode 100644 lib/librte_ethdev/meson.build create mode 100644 lib/librte_ethdev/rte_dev_info.h create mode 100644 lib/librte_ethdev/rte_eth_ctrl.h create mode 100644 lib/librte_ethdev/rte_ethdev.c create mode 100644 lib/librte_ethdev/rte_ethdev.h create mode 100644 lib/librte_ethdev/rte_ethdev_core.h create mode 100644 lib/librte_ethdev/rte_ethdev_driver.h create mode 100644 lib/librte_ethdev/rte_ethdev_pci.h create mode 100644 lib/librte_ethdev/rte_ethdev_vdev.h create mode 100644 lib/librte_ethdev/rte_ethdev_version.map create mode 100644 lib/librte_ethdev/rte_flow.c create mode 100644 lib/librte_ethdev/rte_flow.h create mode 100644 lib/librte_ethdev/rte_flow_driver.h create mode 100644 lib/librte_ethdev/rte_mtr.c create mode 100644 lib/librte_ethdev/rte_mtr.h create mode 100644 lib/librte_ethdev/rte_mtr_driver.h create mode 100644 lib/librte_ethdev/rte_tm.c create mode 100644 lib/librte_ethdev/rte_tm.h create mode 100644 lib/librte_ethdev/rte_tm_driver.h delete mode 100644 lib/librte_ether/Makefile delete mode 100644 lib/librte_ether/ethdev_profile.c delete mode 100644 lib/librte_ether/ethdev_profile.h delete mode 100644 lib/librte_ether/meson.build delete mode 100644 lib/librte_ether/rte_dev_info.h delete mode 100644 lib/librte_ether/rte_eth_ctrl.h delete mode 100644 lib/librte_ether/rte_ethdev.c delete mode 100644 lib/librte_ether/rte_ethdev.h delete mode 100644 lib/librte_ether/rte_ethdev_core.h delete mode 100644 lib/librte_ether/rte_ethdev_driver.h delete mode 100644 lib/librte_ether/rte_ethdev_pci.h delete mode 100644 lib/librte_ether/rte_ethdev_vdev.h delete mode 100644 lib/librte_ether/rte_ethdev_version.map delete mode 100644 lib/librte_ether/rte_flow.c delete mode 100644 lib/librte_ether/rte_flow.h delete mode 100644 lib/librte_ether/rte_flow_driver.h delete mode 100644 lib/librte_ether/rte_mtr.c delete mode 100644 lib/librte_ether/rte_mtr.h delete mode 100644 lib/librte_ether/rte_mtr_driver.h delete mode 100644 lib/librte_ether/rte_tm.c delete mode 100644 lib/librte_ether/rte_tm.h delete mode 100644 lib/librte_ether/rte_tm_driver.h create mode 100644 lib/librte_eventdev/rte_event_crypto_adapter.c create mode 100644 lib/librte_eventdev/rte_event_crypto_adapter.h create mode 100644 lib/librte_eventdev/rte_event_timer_adapter.c create mode 100644 lib/librte_eventdev/rte_event_timer_adapter.h create mode 100644 lib/librte_eventdev/rte_event_timer_adapter_pmd.h create mode 100644 lib/librte_gso/gso_udp4.c create mode 100644 lib/librte_gso/gso_udp4.h delete mode 100644 lib/librte_hash/rte_cuckoo_hash_x86.h create mode 100644 lib/librte_mempool/rte_mempool_ops_default.c create mode 100644 lib/librte_pipeline/rte_port_in_action.c create mode 100644 lib/librte_pipeline/rte_port_in_action.h create mode 100644 lib/librte_pipeline/rte_table_action.c create mode 100644 lib/librte_pipeline/rte_table_action.h create mode 100644 lib/librte_rawdev/meson.build create mode 100644 lib/librte_table/rte_table_hash_cuckoo.h create mode 100644 lib/librte_vhost/rte_vdpa.h create mode 100644 lib/librte_vhost/rte_vhost_crypto.h create mode 100644 lib/librte_vhost/vdpa.c create mode 100644 lib/librte_vhost/vhost_crypto.c create mode 100644 lib/librte_vhost/virtio_crypto.h create mode 100644 test/bpf/dummy.c create mode 100644 test/bpf/mbuf.h create mode 100644 test/bpf/t1.c create mode 100644 test/bpf/t2.c create mode 100644 test/bpf/t3.c create mode 100644 test/test/test_bpf.c create mode 100644 test/test/test_compressdev.c create mode 100644 test/test/test_compressdev_test_buffer.h create mode 100644 test/test/test_cryptodev_asym.c create mode 100644 test/test/test_cryptodev_asym_util.h create mode 100644 test/test/test_cryptodev_dh_test_vectors.h create mode 100644 test/test/test_cryptodev_dsa_test_vectors.h create mode 100644 test/test/test_cryptodev_mod_test_vectors.h create mode 100644 test/test/test_cryptodev_rsa_test_vectors.h delete mode 100644 test/test/test_devargs.c create mode 100644 test/test/test_event_crypto_adapter.c create mode 100644 test/test/test_event_timer_adapter.c create mode 100644 test/test/test_fbarray.c create mode 100644 test/test/test_hash_readwrite.c diff --git a/.gitignore b/.gitignore index 6df5ba06..9105e26c 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ doc/guides/cryptodevs/overview_feature_table.txt doc/guides/cryptodevs/overview_cipher_table.txt doc/guides/cryptodevs/overview_auth_table.txt doc/guides/cryptodevs/overview_aead_table.txt +doc/guides/compressdevs/overview_feature_table.txt cscope.out.po cscope.out.in cscope.out diff --git a/GNUmakefile b/GNUmakefile index d07fef0d..ae80720e 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -12,7 +12,7 @@ export RTE_SDK # directory list # -ROOTDIRS-y := buildtools lib drivers app +ROOTDIRS-y := buildtools lib kernel drivers app ROOTDIRS- := test include $(RTE_SDK)/mk/rte.sdkroot.mk diff --git a/MAINTAINERS b/MAINTAINERS index a646ca3e..9fd258fa 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -27,6 +27,41 @@ M: Thomas Monjalon M: Ferruh Yigit T: git://dpdk.org/dpdk +Next-net Tree +M: Ferruh Yigit +T: git://dpdk.org/next/dpdk-next-net + +Next-net-intel Tree +M: Qi Zhang +M: Beilei Xing +T: git://dpdk.org/next/dpdk-next-net-intel + +Next-net-mlx Tree +M: Shahaf Shuler +T: git://dpdk.org/next/dpdk-next-net-mlx + +Next-virtio Tree +M: Maxime Coquelin +M: Tiwei Bie +T: git://dpdk.org/next/dpdk-next-virtio + +Next-crypto Tree +M: Pablo de Lara +M: Akhil Goyal +T: git://dpdk.org/next/dpdk-next-crypto + +Next-eventdev Tree +M: Jerin Jacob +T: git://dpdk.org/next/dpdk-next-eventdev + +Next-qos Tree +M: Cristian Dumitrescu +T: git://dpdk.org/next/dpdk-next-qos + +Next-pipeline Tree +M: Cristian Dumitrescu +T: git://dpdk.org/next/dpdk-next-pipeline + Stable Branches M: Yuanhan Liu M: Luca Boccassi @@ -48,11 +83,13 @@ F: devtools/check-dup-includes.sh F: devtools/check-maintainers.sh F: devtools/check-git-log.sh F: devtools/check-includes.sh +F: devtools/check-symbol-maps.sh F: devtools/checkpatches.sh F: devtools/get-maintainer.sh F: devtools/git-log-fixes.sh F: devtools/load-devel-config F: devtools/test-build.sh +F: devtools/test-meson-builds.sh F: license/ @@ -87,6 +124,7 @@ M: Neil Horman F: lib/librte_compat/ F: doc/guides/rel_notes/deprecation.rst F: devtools/validate-abi.sh +F: devtools/check-symbol-change.sh F: buildtools/check-experimental-syms.sh Driver information @@ -113,7 +151,6 @@ F: test/test/test_common.c F: test/test/test_cpuflags.c F: test/test/test_cycles.c F: test/test/test_debug.c -F: test/test/test_devargs.c F: test/test/test_eal* F: test/test/test_errno.c F: test/test/test_interrupts.c @@ -130,13 +167,18 @@ F: test/test/test_version.c Memory Allocation M: Anatoly Burakov +F: lib/librte_eal/common/include/rte_fbarray.h F: lib/librte_eal/common/include/rte_mem* F: lib/librte_eal/common/include/rte_malloc.h F: lib/librte_eal/common/*malloc* +F: lib/librte_eal/common/eal_common_fbarray.c F: lib/librte_eal/common/eal_common_mem* F: lib/librte_eal/common/eal_hugepages.h +F: lib/librte_eal/linuxapp/eal/eal_mem* +F: lib/librte_eal/bsdapp/eal/eal_mem* F: doc/guides/prog_guide/env_abstraction_layer.rst F: test/test/test_func_reentrancy.c +F: test/test/test_fbarray.c F: test/test/test_malloc.c F: test/test/test_memory.c F: test/test/test_memzone.c @@ -156,7 +198,7 @@ F: test/test/test_mp_secondary.c F: examples/multi_process/ F: doc/guides/sample_app_ug/multi_process.rst -Service Cores - EXPERIMENTAL +Service Cores M: Harry van Haaren F: lib/librte_eal/common/include/rte_service.h F: lib/librte_eal/common/include/rte_service_component.h @@ -171,13 +213,13 @@ F: test/test/test_bitmap.c ARM v7 M: Jan Viktorin -M: Jianbo Liu +M: Gavin Hu F: lib/librte_eal/common/arch/arm/ F: lib/librte_eal/common/include/arch/arm/ ARM v8 M: Jerin Jacob -M: Jianbo Liu +M: Gavin Hu F: lib/librte_eal/common/include/arch/arm/*_64.h F: lib/librte_net/net_crc_neon.h F: lib/librte_acl/acl_run_neon.* @@ -209,7 +251,7 @@ F: doc/guides/linux_gsg/ Linux UIO M: Ferruh Yigit -F: lib/librte_eal/linuxapp/igb_uio/ +F: kernel/linux/igb_uio/ F: drivers/bus/pci/linux/*uio* Linux VFIO @@ -225,11 +267,11 @@ F: doc/guides/freebsd_gsg/ FreeBSD contigmem M: Bruce Richardson -F: lib/librte_eal/bsdapp/contigmem/ +F: kernel/freebsd/contigmem/ FreeBSD UIO M: Bruce Richardson -F: lib/librte_eal/bsdapp/nic_uio/ +F: kernel/freebsd/nic_uio/ Core Libraries @@ -237,6 +279,7 @@ Core Libraries Memory pool M: Olivier Matz +M: Andrew Rybchenko F: lib/librte_mempool/ F: drivers/mempool/Makefile F: drivers/mempool/ring/ @@ -260,29 +303,34 @@ F: test/test/test_mbuf.c Ethernet API M: Thomas Monjalon +M: Ferruh Yigit +M: Andrew Rybchenko T: git://dpdk.org/next/dpdk-next-net -F: lib/librte_ether/ +F: lib/librte_ethdev/ F: devtools/test-null.sh Flow API M: Adrien Mazarguil T: git://dpdk.org/next/dpdk-next-net -F: lib/librte_ether/rte_flow* +F: app/test-pmd/cmdline_flow.c +F: doc/guides/prog_guide/rte_flow.rst +F: lib/librte_ethdev/rte_flow* Traffic Management API - EXPERIMENTAL M: Cristian Dumitrescu T: git://dpdk.org/next/dpdk-next-tm -F: lib/librte_ether/rte_tm* +F: lib/librte_ethdev/rte_tm* Traffic Metering and Policing API - EXPERIMENTAL M: Cristian Dumitrescu -F: lib/librte_ether/rte_mtr* +F: lib/librte_ethdev/rte_mtr* Baseband API - EXPERIMENTAL M: Amr Mokhtar +T: git://dpdk.org/next/dpdk-next-crypto F: lib/librte_bbdev/ F: doc/guides/prog_guide/bbdev.rst -F: drivers/bbdev/ +F: drivers/baseband/ F: doc/guides/bbdevs/ F: app/test-bbdev/ F: doc/guides/tools/testbbdev.rst @@ -300,9 +348,21 @@ F: examples/l2fwd-crypto/ Security API - EXPERIMENTAL M: Akhil Goyal M: Declan Doherty +T: git://dpdk.org/next/dpdk-next-crypto F: lib/librte_security/ F: doc/guides/prog_guide/rte_security.rst +Compression API - EXPERIMENTAL +M: Fiona Trahe +M: Pablo de Lara +M: Ashish Gupta +T: git://dpdk.org/next/dpdk-next-crypto +F: lib/librte_compressdev/ +F: drivers/compress/ +F: test/test/test_compressdev* +F: doc/guides/prog_guide/compressdev.rst +F: doc/guides/compressdevs/features/default.ini + Eventdev API M: Jerin Jacob T: git://dpdk.org/next/dpdk-next-eventdev @@ -317,6 +377,20 @@ F: lib/librte_eventdev/*eth_rx_adapter* F: test/test/test_event_eth_rx_adapter.c F: doc/guides/prog_guide/event_ethernet_rx_adapter.rst +Eventdev Timer Adapter API - EXPERIMENTAL +M: Erik Gabriel Carrillo +T: git://dpdk.org/next/dpdk-next-eventdev +F: lib/librte_eventdev/*timer_adapter* +F: test/test/test_event_timer_adapter.c +F: doc/guides/prog_guide/event_timer_adapter.rst + +Eventdev Crypto Adapter API - EXPERIMENTAL +M: Abhinandan Gujjar +T: git://dpdk.org/next/dpdk-next-eventdev +F: lib/librte_eventdev/*crypto_adapter* +F: test/test/test_event_crypto_adapter.c +F: doc/guides/prog_guide/event_crypto_adapter.rst + Raw device API - EXPERIMENTAL M: Shreyansh Jain M: Hemant Agrawal @@ -326,9 +400,22 @@ F: test/test/test_rawdev.c F: doc/guides/prog_guide/rawdev.rst +Memory Pool Drivers +------------------- + +Bucket memory pool +M: Artem V. Andreev +M: Andrew Rybchenko +F: drivers/mempool/bucket/ + + Bus Drivers ----------- +Intel FPGA bus +M: Rosen Xu +F: drivers/bus/ifpga/ + NXP buses M: Hemant Agrawal M: Shreyansh Jain @@ -339,9 +426,12 @@ PCI bus driver F: drivers/bus/pci/ VDEV bus driver -M: Jianfeng Tan F: drivers/bus/vdev/ +VMBUS bus driver +M: Stephen Hemminger +F: drivers/bus/vmbus/ + Networking Drivers ------------------ @@ -351,6 +441,7 @@ F: doc/guides/nics/features/default.ini Link bonding M: Declan Doherty +M: Chas Williams F: drivers/net/bonding/ F: doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst F: test/test/test_link_bonding* @@ -359,7 +450,7 @@ F: doc/guides/nics/features/bonding.ini Linux KNI M: Ferruh Yigit -F: lib/librte_eal/linuxapp/kni/ +F: kernel/linux/kni/ F: lib/librte_kni/ F: doc/guides/prog_guide/kernel_nic_interface.rst F: test/test/test_kni.c @@ -380,6 +471,12 @@ F: drivers/net/ena/ F: doc/guides/nics/ena.rst F: doc/guides/nics/features/ena.ini +AMD axgbe +M: Ravi Kumar +F: drivers/net/axgbe/ +F: doc/guides/nics/axgbe.rst +F: doc/guides/nics/features/axgbe.ini + Atomic Rules ARK M: Shepard Siegel M: Ed Czeck @@ -412,6 +509,7 @@ F: doc/guides/nics/features/liquidio.ini Cavium OcteonTX M: Santosh Shukla M: Jerin Jacob +F: drivers/common/octeontx/ F: drivers/mempool/octeontx/ F: drivers/net/octeontx/ F: doc/guides/nics/octeontx.rst @@ -462,48 +560,63 @@ M: Qi Zhang M: Xiao Wang T: git://dpdk.org/next/dpdk-next-net-intel F: drivers/net/fm10k/ +F: doc/guides/nics/fm10k.rst F: doc/guides/nics/features/fm10k*.ini Intel avf M: Jingjing Wu M: Wenzhuo Lu +T: git://dpdk.org/next/dpdk-next-net-intel F: drivers/net/avf/ F: doc/guides/nics/features/avf*.ini +Intel ifc +M: Xiao Wang +T: git://dpdk.org/next/dpdk-next-net-intel +F: drivers/net/ifc/ +F: doc/guides/nics/ifc.rst +F: doc/guides/nics/features/ifc*.ini + +Marvell mvpp2 +M: Tomasz Duszynski +M: Dmitri Epshtein +M: Natalie Samsonov +F: drivers/net/mvpp2/ +F: doc/guides/nics/mvpp2.rst +F: doc/guides/nics/features/mvpp2.ini + Mellanox mlx4 -M: Adrien Mazarguil +M: Matan Azrad +M: Shahaf Shuler T: git://dpdk.org/next/dpdk-next-net-mlx F: drivers/net/mlx4/ F: doc/guides/nics/mlx4.rst F: doc/guides/nics/features/mlx4.ini Mellanox mlx5 -M: Adrien Mazarguil -M: Nelio Laranjeiro +M: Shahaf Shuler M: Yongseok Koh T: git://dpdk.org/next/dpdk-next-net-mlx F: drivers/net/mlx5/ F: doc/guides/nics/mlx5.rst F: doc/guides/nics/features/mlx5.ini -Marvell mrvl -M: Jacek Siuda -M: Tomasz Duszynski -M: Dmitri Epshtein -M: Natalie Samsonov -M: Jianbo Liu -F: drivers/net/mrvl/ -F: doc/guides/nics/mrvl.rst -F: doc/guides/nics/features/mrvl.ini - Microsoft vdev_netvsc - EXPERIMENTAL M: Matan Azrad F: drivers/net/vdev_netvsc/ F: doc/guides/nics/vdev_netvsc.rst F: doc/guides/nics/features/vdev_netvsc.ini +Microsoft Hyper-V netvsc - EXPERIMENTAL +M: Stephen Hemminger +M: K. Y. Srinivasan +M: Haiyang Zhang +F: drivers/net/netvsc/ +F: doc/guides/nics/netvsc.rst +F: doc/guides/nics/features/netvsc.ini + Netcope szedata2 -M: Matej Vido +M: Jan Remes F: drivers/net/szedata2/ F: doc/guides/nics/szedata2.rst F: doc/guides/nics/features/szedata2.ini @@ -552,14 +665,15 @@ F: doc/guides/nics/sfc_efx.rst F: doc/guides/nics/features/sfc_efx.ini VMware vmxnet3 -M: Shrikrishna Khare +M: Yong Wang F: drivers/net/vmxnet3/ F: doc/guides/nics/vmxnet3.rst F: doc/guides/nics/features/vmxnet3.ini Vhost-user -M: Yuanhan Liu M: Maxime Coquelin +M: Tiwei Bie +M: Zhihong Wang T: git://dpdk.org/next/dpdk-next-virtio F: lib/librte_vhost/ F: doc/guides/prog_guide/vhost_lib.rst @@ -567,19 +681,21 @@ F: examples/vhost/ F: doc/guides/sample_app_ug/vhost.rst F: examples/vhost_scsi/ F: doc/guides/sample_app_ug/vhost_scsi.rst +F: examples/vhost_crypto/ Vhost PMD -M: Tetsuya Mukawa -M: Yuanhan Liu M: Maxime Coquelin +M: Tiwei Bie +M: Zhihong Wang T: git://dpdk.org/next/dpdk-next-virtio F: drivers/net/vhost/ +F: doc/guides/nics/vhost.rst F: doc/guides/nics/features/vhost.ini Virtio PMD -M: Yuanhan Liu M: Maxime Coquelin M: Tiwei Bie +M: Zhihong Wang T: git://dpdk.org/next/dpdk-next-virtio F: drivers/net/virtio/ F: doc/guides/nics/virtio.rst @@ -599,7 +715,7 @@ F: doc/guides/nics/pcap_ring.rst F: doc/guides/nics/features/pcap.ini Tap PMD -M: Pascal Mazon +M: Keith Wiles F: drivers/net/tap/ F: doc/guides/nics/tap.rst F: doc/guides/nics/features/tap.ini @@ -627,11 +743,14 @@ Fail-safe PMD M: Gaetan Rivet F: drivers/net/failsafe/ F: doc/guides/nics/fail_safe.rst +F: doc/guides/nics/features/failsafe.ini Softnic PMD M: Jasvinder Singh M: Cristian Dumitrescu F: drivers/net/softnic/ +F: doc/guides/nics/features/softnic.ini +F: doc/guides/nics/softnic.rst Crypto Drivers @@ -640,6 +759,12 @@ M: Pablo de Lara T: git://dpdk.org/next/dpdk-next-crypto F: doc/guides/cryptodevs/features/default.ini +AMD CCP Crypto +M: Ravi Kumar +F: drivers/crypto/ccp/ +F: doc/guides/cryptodevs/ccp.rst +F: doc/guides/cryptodevs/features/ccp.ini + ARMv8 Crypto M: Jerin Jacob F: drivers/crypto/armv8/ @@ -668,6 +793,7 @@ M: John Griffin M: Fiona Trahe M: Deepak Kumar Jain F: drivers/crypto/qat/ +F: drivers/common/qat/ F: doc/guides/cryptodevs/qat.rst F: doc/guides/cryptodevs/features/qat.ini @@ -678,14 +804,12 @@ F: doc/guides/cryptodevs/kasumi.rst F: doc/guides/cryptodevs/features/kasumi.ini Marvell Mrvl -M: Jacek Siuda M: Tomasz Duszynski M: Dmitri Epshtein M: Natalie Samsonov -M: Jianbo Liu -F: drivers/crypto/mrvl/ -F: doc/guides/cryptodevs/mrvl.rst -F: doc/guides/cryptodevs/features/mrvl.ini +F: drivers/crypto/mvsam/ +F: doc/guides/cryptodevs/mvsam.rst +F: doc/guides/cryptodevs/features/mvsam.ini Null Crypto M: Declan Doherty @@ -719,6 +843,12 @@ F: drivers/crypto/snow3g/ F: doc/guides/cryptodevs/snow3g.rst F: doc/guides/cryptodevs/features/snow3g.ini +Virtio +M: Jay Zhou +F: drivers/crypto/virtio/ +F: doc/guides/cryptodevs/virtio.rst +F: doc/guides/cryptodevs/features/virtio.ini + ZUC M: Pablo de Lara F: drivers/crypto/zuc/ @@ -726,6 +856,35 @@ F: doc/guides/cryptodevs/zuc.rst F: doc/guides/cryptodevs/features/zuc.ini +Compression Drivers +------------------- +M: Pablo de Lara +T: git://dpdk.org/next/dpdk-next-crypto + +Cavium OCTEONTX zipvf +M: Ashish Gupta +F: drivers/compress/octeontx/ +F: doc/guides/compressdevs/octeontx.rst +F: doc/guides/compressdevs/features/octeontx.ini + +Intel QuickAssist +M: Fiona Trahe +F: drivers/compress/qat/ +F: drivers/common/qat/ + +ISA-L +M: Lee Daly +F: drivers/compress/isal/ +F: doc/guides/compressdevs/isal.rst +F: doc/guides/compressdevs/features/isal.ini + +ZLIB +M: Sunila Sahu +F: drivers/compress/zlib/ +F: doc/guides/compressdevs/zlib.rst +F: doc/guides/compressdevs/features/zlib.ini + + Eventdev Drivers ---------------- M: Jerin Jacob @@ -737,11 +896,9 @@ M: Santosh Shukla F: drivers/event/octeontx/ F: doc/guides/eventdevs/octeontx.rst -NXP DPAA2 eventdev -M: Hemant Agrawal -M: Nipun Gupta -F: drivers/event/dpaa2/ -F: doc/guides/eventdevs/dpaa2.rst +Cavium OCTEONTX timvf +M: Pavan Nikhilesh +F: drivers/event/octeontx/timvf_* NXP DPAA eventdev M: Hemant Agrawal @@ -749,6 +906,12 @@ M: Sunil Kumar Kori F: drivers/event/dpaa/ F: doc/guides/eventdevs/dpaa.rst +NXP DPAA2 eventdev +M: Hemant Agrawal +M: Nipun Gupta +F: drivers/event/dpaa2/ +F: doc/guides/eventdevs/dpaa2.rst + Software Eventdev PMD M: Harry van Haaren F: drivers/event/sw/ @@ -763,6 +926,26 @@ F: drivers/event/opdl/ F: doc/guides/eventdevs/opdl.rst +Rawdev Drivers +-------------- + +Intel FPGA +M: Rosen Xu +M: Tianfei zhang +F: drivers/raw/ifpga_rawdev/ +F: doc/guides/rawdevs/ifpga_rawdev.rst + +NXP DPAA2 QDMA +M: Nipun Gupta +F: drivers/raw/dpaa2_qdma/ +F: doc/guides/rawdevs/dpaa2_qdma.rst + +DPAA2 CMDIF +M: Nipun Gupta +F: drivers/raw/dpaa2_cmdif/ +F: doc/guides/rawdevs/dpaa2_cmdif.rst + + Packet processing ----------------- @@ -792,7 +975,6 @@ F: doc/guides/prog_guide/generic_receive_offload_lib.rst Generic Segmentation Offload M: Jiayu Hu -M: Mark Kavanagh F: lib/librte_gso/ F: doc/guides/prog_guide/generic_segmentation_offload_lib.rst @@ -837,6 +1019,7 @@ F: doc/guides/prog_guide/pdump_lib.rst F: app/pdump/ F: doc/guides/tools/pdump.rst + Packet Framework ---------------- M: Cristian Dumitrescu @@ -903,6 +1086,7 @@ F: test/test/test_meter.c F: examples/qos_meter/ F: doc/guides/sample_app_ug/qos_metering.rst + Other libraries --------------- @@ -965,6 +1149,13 @@ Latency statistics M: Reshma Pattan F: lib/librte_latencystats/ +BPF - EXPERIMENTAL +M: Konstantin Ananyev +F: lib/librte_bpf/ +F: test/bpf/ +F: test/test/test_bpf.c +F: doc/guides/prog_guide/bpf_lib.rst + Test Applications ----------------- @@ -988,6 +1179,7 @@ F: test/test/virtual_pmd.h Driver testing tool M: Wenzhuo Lu M: Jingjing Wu +M: Bernard Iremonger F: app/test-pmd/ F: doc/guides/testpmd_app_ug/ @@ -1006,7 +1198,7 @@ F: test/test/test_event_ring.c Procinfo tool M: Maryam Tahhan M: Reshma Pattan -F: app/proc_info/ +F: app/proc-info/ F: doc/guides/tools/proc_info.rst @@ -1086,7 +1278,7 @@ M: John McNamara F: examples/skeleton/ F: doc/guides/sample_app_ug/skeleton.rst -M: Jijiang Liu +M: Xiaoyun Li F: examples/tep_termination/ F: examples/vmdq/ diff --git a/app/Makefile b/app/Makefile index 0eaed538..069fa984 100644 --- a/app/Makefile +++ b/app/Makefile @@ -4,7 +4,7 @@ include $(RTE_SDK)/mk/rte.vars.mk DIRS-$(CONFIG_RTE_TEST_PMD) += test-pmd -DIRS-$(CONFIG_RTE_PROC_INFO) += proc_info +DIRS-$(CONFIG_RTE_PROC_INFO) += proc-info DIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += pdump ifeq ($(CONFIG_RTE_LIBRTE_BBDEV),y) diff --git a/app/meson.build b/app/meson.build index 0088de46..99e0b93e 100644 --- a/app/meson.build +++ b/app/meson.build @@ -1,5 +1,57 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation -subdir('test-pmd') -subdir('test-eventdev') +apps = ['pdump', + 'proc-info', + 'test-bbdev', + 'test-crypto-perf', + 'test-eventdev', + 'test-pmd'] + +# for BSD only +lib_execinfo = cc.find_library('execinfo', required: false) + +foreach app:apps + build = true + name = app + allow_experimental_apis = false + sources = [] + includes = [] + cflags = machine_args + objs = [] # other object files to link against, used e.g. for + # instruction-set optimized versions of code + + # use "deps" for internal DPDK dependencies, and "ext_deps" for + # external package/library requirements + ext_deps = [] + deps = [] + + subdir(name) + + if build + dep_objs = [] + foreach d:deps + dep_objs += get_variable(get_option('default_library') + + '_rte_' + d) + endforeach + dep_objs += lib_execinfo + + link_libs = [] + if get_option('default_library') == 'static' + link_libs = dpdk_drivers + endif + + if allow_experimental_apis + cflags += '-DALLOW_EXPERIMENTAL_API' + endif + + executable('dpdk-' + name, + sources, + c_args: cflags, + link_whole: link_libs, + dependencies: dep_objs, + install_rpath: join_paths(get_option('prefix'), + driver_install_path), + install: true) + endif +endforeach diff --git a/app/pdump/main.c b/app/pdump/main.c index f6865bdb..ac228712 100644 --- a/app/pdump/main.c +++ b/app/pdump/main.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #define CMD_LINE_OPT_PDUMP "pdump" @@ -36,11 +37,10 @@ #define PDUMP_RING_SIZE_ARG "ring-size" #define PDUMP_MSIZE_ARG "mbuf-size" #define PDUMP_NUM_MBUFS_ARG "total-num-mbufs" -#define CMD_LINE_OPT_SER_SOCK_PATH "server-socket-path" -#define CMD_LINE_OPT_CLI_SOCK_PATH "client-socket-path" -#define VDEV_PCAP "net_pcap_%s_%d,tx_pcap=%s" -#define VDEV_IFACE "net_pcap_%s_%d,tx_iface=%s" +#define VDEV_NAME_FMT "net_pcap_%s_%d" +#define VDEV_PCAP_ARGS_FMT "tx_pcap=%s" +#define VDEV_IFACE_ARGS_FMT "tx_iface=%s" #define TX_STREAM_SIZE 64 #define MP_NAME "pdump_pool_%d" @@ -139,8 +139,6 @@ struct parse_val { int num_tuples; static struct rte_eth_conf port_conf_default; volatile uint8_t quit_signal; -static char server_socket_path[PATH_MAX]; -static char client_socket_path[PATH_MAX]; /**< display usage */ static void @@ -153,11 +151,7 @@ pdump_usage(const char *prgname) " tx-dev=," "[ring-size=default:16384]," "[mbuf-size=default:2176]," - "[total-num-mbufs=default:65535]'\n" - "[--server-socket-path=" - "default:/var/run/.dpdk/ (or) ~/.dpdk/]\n" - "[--client-socket-path=" - "default:/var/run/.dpdk/ (or) ~/.dpdk/]\n", + "[total-num-mbufs=default:65535]'\n", prgname); } @@ -382,8 +376,6 @@ launch_args_parse(int argc, char **argv, char *prgname) int option_index; static struct option long_option[] = { {"pdump", 1, 0, 0}, - {"server-socket-path", 1, 0, 0}, - {"client-socket-path", 1, 0, 0}, {NULL, 0, 0, 0} }; @@ -404,23 +396,6 @@ launch_args_parse(int argc, char **argv, char *prgname) return -1; } } - - if (!strncmp(long_option[option_index].name, - CMD_LINE_OPT_SER_SOCK_PATH, - sizeof(CMD_LINE_OPT_SER_SOCK_PATH))) { - snprintf(server_socket_path, - sizeof(server_socket_path), "%s", - optarg); - } - - if (!strncmp(long_option[option_index].name, - CMD_LINE_OPT_CLI_SOCK_PATH, - sizeof(CMD_LINE_OPT_CLI_SOCK_PATH))) { - snprintf(client_socket_path, - sizeof(client_socket_path), "%s", - optarg); - } - break; default: pdump_usage(prgname); @@ -554,11 +529,10 @@ configure_vdev(uint16_t port_id) { struct ether_addr addr; const uint16_t rxRings = 0, txRings = 1; - const uint8_t nb_ports = rte_eth_dev_count(); int ret; uint16_t q; - if (port_id > nb_ports) + if (!rte_eth_dev_is_valid_port(port_id)) return -1; ret = rte_eth_dev_configure(port_id, rxRings, txRings, @@ -597,6 +571,7 @@ create_mp_ring_vdev(void) uint16_t portid; struct pdump_tuples *pt = NULL; struct rte_mempool *mbuf_pool = NULL; + char vdev_name[SIZE]; char vdev_args[SIZE]; char ring_name[SIZE]; char mempool_name[SIZE]; @@ -646,17 +621,28 @@ create_mp_ring_vdev(void) } /* create vdevs */ + snprintf(vdev_name, sizeof(vdev_name), + VDEV_NAME_FMT, RX_STR, i); (pt->rx_vdev_stream_type == IFACE) ? - snprintf(vdev_args, SIZE, VDEV_IFACE, RX_STR, i, - pt->rx_dev) : - snprintf(vdev_args, SIZE, VDEV_PCAP, RX_STR, i, - pt->rx_dev); - if (rte_eth_dev_attach(vdev_args, &portid) < 0) { + snprintf(vdev_args, sizeof(vdev_args), + VDEV_IFACE_ARGS_FMT, pt->rx_dev) : + snprintf(vdev_args, sizeof(vdev_args), + VDEV_PCAP_ARGS_FMT, pt->rx_dev); + if (rte_eal_hotplug_add("vdev", vdev_name, + vdev_args) < 0) { cleanup_rings(); rte_exit(EXIT_FAILURE, "vdev creation failed:%s:%d\n", __func__, __LINE__); } + if (rte_eth_dev_get_port_by_name(vdev_name, + &portid) != 0) { + rte_eal_hotplug_remove("vdev", vdev_name); + cleanup_rings(); + rte_exit(EXIT_FAILURE, + "cannot find added vdev %s:%s:%d\n", + vdev_name, __func__, __LINE__); + } pt->rx_vdev_id = portid; /* configure vdev */ @@ -665,18 +651,29 @@ create_mp_ring_vdev(void) if (pt->single_pdump_dev) pt->tx_vdev_id = portid; else { - (pt->tx_vdev_stream_type == IFACE) ? - snprintf(vdev_args, SIZE, VDEV_IFACE, TX_STR, i, - pt->tx_dev) : - snprintf(vdev_args, SIZE, VDEV_PCAP, TX_STR, i, - pt->tx_dev); - if (rte_eth_dev_attach(vdev_args, - &portid) < 0) { + snprintf(vdev_name, sizeof(vdev_name), + VDEV_NAME_FMT, TX_STR, i); + (pt->rx_vdev_stream_type == IFACE) ? + snprintf(vdev_args, sizeof(vdev_args), + VDEV_IFACE_ARGS_FMT, pt->tx_dev) : + snprintf(vdev_args, sizeof(vdev_args), + VDEV_PCAP_ARGS_FMT, pt->tx_dev); + if (rte_eal_hotplug_add("vdev", vdev_name, + vdev_args) < 0) { cleanup_rings(); rte_exit(EXIT_FAILURE, "vdev creation failed:" "%s:%d\n", __func__, __LINE__); } + if (rte_eth_dev_get_port_by_name(vdev_name, + &portid) != 0) { + rte_eal_hotplug_remove("vdev", + vdev_name); + cleanup_rings(); + rte_exit(EXIT_FAILURE, + "cannot find added vdev %s:%s:%d\n", + vdev_name, __func__, __LINE__); + } pt->tx_vdev_id = portid; /* configure vdev */ @@ -694,17 +691,28 @@ create_mp_ring_vdev(void) rte_strerror(rte_errno)); } + snprintf(vdev_name, sizeof(vdev_name), + VDEV_NAME_FMT, RX_STR, i); (pt->rx_vdev_stream_type == IFACE) ? - snprintf(vdev_args, SIZE, VDEV_IFACE, RX_STR, i, - pt->rx_dev) : - snprintf(vdev_args, SIZE, VDEV_PCAP, RX_STR, i, - pt->rx_dev); - if (rte_eth_dev_attach(vdev_args, &portid) < 0) { + snprintf(vdev_args, sizeof(vdev_args), + VDEV_IFACE_ARGS_FMT, pt->rx_dev) : + snprintf(vdev_args, sizeof(vdev_args), + VDEV_PCAP_ARGS_FMT, pt->rx_dev); + if (rte_eal_hotplug_add("vdev", vdev_name, + vdev_args) < 0) { cleanup_rings(); rte_exit(EXIT_FAILURE, "vdev creation failed:%s:%d\n", __func__, __LINE__); } + if (rte_eth_dev_get_port_by_name(vdev_name, + &portid) != 0) { + rte_eal_hotplug_remove("vdev", vdev_name); + cleanup_rings(); + rte_exit(EXIT_FAILURE, + "cannot find added vdev %s:%s:%d\n", + vdev_name, __func__, __LINE__); + } pt->rx_vdev_id = portid; /* configure vdev */ configure_vdev(pt->rx_vdev_id); @@ -720,16 +728,27 @@ create_mp_ring_vdev(void) rte_strerror(rte_errno)); } + snprintf(vdev_name, sizeof(vdev_name), + VDEV_NAME_FMT, TX_STR, i); (pt->tx_vdev_stream_type == IFACE) ? - snprintf(vdev_args, SIZE, VDEV_IFACE, TX_STR, i, - pt->tx_dev) : - snprintf(vdev_args, SIZE, VDEV_PCAP, TX_STR, i, - pt->tx_dev); - if (rte_eth_dev_attach(vdev_args, &portid) < 0) { + snprintf(vdev_args, sizeof(vdev_args), + VDEV_IFACE_ARGS_FMT, pt->tx_dev) : + snprintf(vdev_args, sizeof(vdev_args), + VDEV_PCAP_ARGS_FMT, pt->tx_dev); + if (rte_eal_hotplug_add("vdev", vdev_name, + vdev_args) < 0) { cleanup_rings(); rte_exit(EXIT_FAILURE, "vdev creation failed\n"); } + if (rte_eth_dev_get_port_by_name(vdev_name, + &portid) != 0) { + rte_eal_hotplug_remove("vdev", vdev_name); + cleanup_rings(); + rte_exit(EXIT_FAILURE, + "cannot find added vdev %s:%s:%d\n", + vdev_name, __func__, __LINE__); + } pt->tx_vdev_id = portid; /* configure vdev */ @@ -745,22 +764,6 @@ enable_pdump(void) struct pdump_tuples *pt; int ret = 0, ret1 = 0; - if (server_socket_path[0] != 0) - ret = rte_pdump_set_socket_dir(server_socket_path, - RTE_PDUMP_SOCKET_SERVER); - if (ret == 0 && client_socket_path[0] != 0) { - ret = rte_pdump_set_socket_dir(client_socket_path, - RTE_PDUMP_SOCKET_CLIENT); - } - if (ret < 0) { - cleanup_pdump_resources(); - rte_exit(EXIT_FAILURE, - "failed to set socket paths of server:%s, " - "client:%s\n", - server_socket_path, - client_socket_path); - } - for (i = 0; i < num_tuples; i++) { pt = &pdump_t[i]; if (pt->dir == RTE_PDUMP_FLAG_RXTX) { @@ -863,6 +866,9 @@ main(int argc, char **argv) if (diag < 0) rte_panic("Cannot init EAL\n"); + if (rte_eth_dev_count_avail() == 0) + rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n"); + argc -= diag; argv += (diag - 3); diff --git a/app/pdump/meson.build b/app/pdump/meson.build new file mode 100644 index 00000000..988cb4eb --- /dev/null +++ b/app/pdump/meson.build @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +sources = files('main.c') +allow_experimental_apis = true +deps = ['ethdev', 'kvargs', 'pdump'] diff --git a/app/proc-info/Makefile b/app/proc-info/Makefile new file mode 100644 index 00000000..9e87f524 --- /dev/null +++ b/app/proc-info/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2015 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +APP = dpdk-procinfo + +CFLAGS += -DALLOW_EXPERIMENTAL_API +CFLAGS += $(WERROR_FLAGS) + +# all source are stored in SRCS-y + +SRCS-y := main.c + +include $(RTE_SDK)/mk/rte.app.mk diff --git a/app/proc-info/main.c b/app/proc-info/main.c new file mode 100644 index 00000000..c20effa4 --- /dev/null +++ b/app/proc-info/main.c @@ -0,0 +1,672 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Maximum long option length for option parsing. */ +#define MAX_LONG_OPT_SZ 64 +#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1 + +#define MAX_STRING_LEN 256 + +/**< mask of enabled ports */ +static uint32_t enabled_port_mask; +/**< Enable stats. */ +static uint32_t enable_stats; +/**< Enable xstats. */ +static uint32_t enable_xstats; +/**< Enable collectd format*/ +static uint32_t enable_collectd_format; +/**< FD to send collectd format messages to STDOUT*/ +static int stdout_fd; +/**< Host id process is running on */ +static char host_id[MAX_LONG_OPT_SZ]; +/**< Enable metrics. */ +static uint32_t enable_metrics; +/**< Enable stats reset. */ +static uint32_t reset_stats; +/**< Enable xstats reset. */ +static uint32_t reset_xstats; +/**< Enable memory info. */ +static uint32_t mem_info; +/**< Enable displaying xstat name. */ +static uint32_t enable_xstats_name; +static char *xstats_name; + +/**< Enable xstats by ids. */ +#define MAX_NB_XSTATS_IDS 1024 +static uint32_t nb_xstats_ids; +static uint64_t xstats_ids[MAX_NB_XSTATS_IDS]; + +/**< display usage */ +static void +proc_info_usage(const char *prgname) +{ + printf("%s [EAL options] -- -p PORTMASK\n" + " -m to display DPDK memory zones, segments and TAILQ information\n" + " -p PORTMASK: hexadecimal bitmask of ports to retrieve stats for\n" + " --stats: to display port statistics, enabled by default\n" + " --xstats: to display extended port statistics, disabled by " + "default\n" + " --metrics: to display derived metrics of the ports, disabled by " + "default\n" + " --xstats-name NAME: to display single xstat id by NAME\n" + " --xstats-ids IDLIST: to display xstat values by id. " + "The argument is comma-separated list of xstat ids to print out.\n" + " --stats-reset: to reset port statistics\n" + " --xstats-reset: to reset port extended statistics\n" + " --collectd-format: to print statistics to STDOUT in expected by collectd format\n" + " --host-id STRING: host id used to identify the system process is running on\n", + prgname); +} + +/* + * Parse the portmask provided at run time. + */ +static int +parse_portmask(const char *portmask) +{ + char *end = NULL; + unsigned long pm; + + errno = 0; + + /* parse hexadecimal string */ + pm = strtoul(portmask, &end, 16); + if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || + (errno != 0)) { + printf("%s ERROR parsing the port mask\n", __func__); + return -1; + } + + if (pm == 0) + return -1; + + return pm; + +} + +/* + * Parse ids value list into array + */ +static int +parse_xstats_ids(char *list, uint64_t *ids, int limit) { + int length; + char *token; + char *ctx = NULL; + char *endptr; + + length = 0; + token = strtok_r(list, ",", &ctx); + while (token != NULL) { + ids[length] = strtoull(token, &endptr, 10); + if (*endptr != '\0') + return -EINVAL; + + length++; + if (length >= limit) + return -E2BIG; + + token = strtok_r(NULL, ",", &ctx); + } + + return length; +} + +static int +proc_info_preparse_args(int argc, char **argv) +{ + char *prgname = argv[0]; + int i; + + for (i = 0; i < argc; i++) { + /* Print stats or xstats to STDOUT in collectd format */ + if (!strncmp(argv[i], "--collectd-format", MAX_LONG_OPT_SZ)) { + enable_collectd_format = 1; + stdout_fd = dup(STDOUT_FILENO); + close(STDOUT_FILENO); + } + if (!strncmp(argv[i], "--host-id", MAX_LONG_OPT_SZ)) { + if ((i + 1) == argc) { + printf("Invalid host id or not specified\n"); + proc_info_usage(prgname); + return -1; + } + snprintf(host_id, sizeof(host_id), "%s", argv[i+1]); + } + } + + if (!strlen(host_id)) { + int err = gethostname(host_id, MAX_LONG_OPT_SZ-1); + + if (err) + strcpy(host_id, "unknown"); + } + + return 0; +} + +/* Parse the argument given in the command line of the application */ +static int +proc_info_parse_args(int argc, char **argv) +{ + int opt; + int option_index; + char *prgname = argv[0]; + static struct option long_option[] = { + {"stats", 0, NULL, 0}, + {"stats-reset", 0, NULL, 0}, + {"xstats", 0, NULL, 0}, + {"metrics", 0, NULL, 0}, + {"xstats-reset", 0, NULL, 0}, + {"xstats-name", required_argument, NULL, 1}, + {"collectd-format", 0, NULL, 0}, + {"xstats-ids", 1, NULL, 1}, + {"host-id", 0, NULL, 0}, + {NULL, 0, 0, 0} + }; + + if (argc == 1) + proc_info_usage(prgname); + + /* Parse command line */ + while ((opt = getopt_long(argc, argv, "p:m", + long_option, &option_index)) != EOF) { + switch (opt) { + /* portmask */ + case 'p': + enabled_port_mask = parse_portmask(optarg); + if (enabled_port_mask == 0) { + printf("invalid portmask\n"); + proc_info_usage(prgname); + return -1; + } + break; + case 'm': + mem_info = 1; + break; + case 0: + /* Print stats */ + if (!strncmp(long_option[option_index].name, "stats", + MAX_LONG_OPT_SZ)) + enable_stats = 1; + /* Print xstats */ + else if (!strncmp(long_option[option_index].name, "xstats", + MAX_LONG_OPT_SZ)) + enable_xstats = 1; + else if (!strncmp(long_option[option_index].name, + "metrics", + MAX_LONG_OPT_SZ)) + enable_metrics = 1; + /* Reset stats */ + if (!strncmp(long_option[option_index].name, "stats-reset", + MAX_LONG_OPT_SZ)) + reset_stats = 1; + /* Reset xstats */ + else if (!strncmp(long_option[option_index].name, "xstats-reset", + MAX_LONG_OPT_SZ)) + reset_xstats = 1; + break; + case 1: + /* Print xstat single value given by name*/ + if (!strncmp(long_option[option_index].name, + "xstats-name", MAX_LONG_OPT_SZ)) { + enable_xstats_name = 1; + xstats_name = optarg; + printf("name:%s:%s\n", + long_option[option_index].name, + optarg); + } else if (!strncmp(long_option[option_index].name, + "xstats-ids", + MAX_LONG_OPT_SZ)) { + nb_xstats_ids = parse_xstats_ids(optarg, + xstats_ids, MAX_NB_XSTATS_IDS); + + if (nb_xstats_ids <= 0) { + printf("xstats-id list parse error.\n"); + return -1; + } + + } + break; + default: + proc_info_usage(prgname); + return -1; + } + } + return 0; +} + +static void +meminfo_display(void) +{ + printf("----------- MEMORY_SEGMENTS -----------\n"); + rte_dump_physmem_layout(stdout); + printf("--------- END_MEMORY_SEGMENTS ---------\n"); + + printf("------------ MEMORY_ZONES -------------\n"); + rte_memzone_dump(stdout); + printf("---------- END_MEMORY_ZONES -----------\n"); + + printf("------------- TAIL_QUEUES -------------\n"); + rte_dump_tailq(stdout); + printf("---------- END_TAIL_QUEUES ------------\n"); +} + +static void +nic_stats_display(uint16_t port_id) +{ + struct rte_eth_stats stats; + uint8_t i; + + static const char *nic_stats_border = "########################"; + + rte_eth_stats_get(port_id, &stats); + printf("\n %s NIC statistics for port %-2d %s\n", + nic_stats_border, port_id, nic_stats_border); + + printf(" RX-packets: %-10"PRIu64" RX-errors: %-10"PRIu64 + " RX-bytes: %-10"PRIu64"\n", stats.ipackets, stats.ierrors, + stats.ibytes); + printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); + printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64 + " TX-bytes: %-10"PRIu64"\n", stats.opackets, stats.oerrors, + stats.obytes); + + printf("\n"); + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { + printf(" Stats reg %2d RX-packets: %-10"PRIu64 + " RX-errors: %-10"PRIu64 + " RX-bytes: %-10"PRIu64"\n", + i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); + } + + printf("\n"); + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { + printf(" Stats reg %2d TX-packets: %-10"PRIu64 + " TX-bytes: %-10"PRIu64"\n", + i, stats.q_opackets[i], stats.q_obytes[i]); + } + + printf(" %s############################%s\n", + nic_stats_border, nic_stats_border); +} + +static void +nic_stats_clear(uint16_t port_id) +{ + printf("\n Clearing NIC stats for port %d\n", port_id); + rte_eth_stats_reset(port_id); + printf("\n NIC statistics for port %d cleared\n", port_id); +} + +static void collectd_resolve_cnt_type(char *cnt_type, size_t cnt_type_len, + const char *cnt_name) { + char *type_end = strrchr(cnt_name, '_'); + + if ((type_end != NULL) && + (strncmp(cnt_name, "rx_", strlen("rx_")) == 0)) { + if (strncmp(type_end, "_errors", strlen("_errors")) == 0) + strncpy(cnt_type, "if_rx_errors", cnt_type_len); + else if (strncmp(type_end, "_dropped", strlen("_dropped")) == 0) + strncpy(cnt_type, "if_rx_dropped", cnt_type_len); + else if (strncmp(type_end, "_bytes", strlen("_bytes")) == 0) + strncpy(cnt_type, "if_rx_octets", cnt_type_len); + else if (strncmp(type_end, "_packets", strlen("_packets")) == 0) + strncpy(cnt_type, "if_rx_packets", cnt_type_len); + else if (strncmp(type_end, "_placement", + strlen("_placement")) == 0) + strncpy(cnt_type, "if_rx_errors", cnt_type_len); + else if (strncmp(type_end, "_buff", strlen("_buff")) == 0) + strncpy(cnt_type, "if_rx_errors", cnt_type_len); + else + /* Does not fit obvious type: use a more generic one */ + strncpy(cnt_type, "derive", cnt_type_len); + } else if ((type_end != NULL) && + (strncmp(cnt_name, "tx_", strlen("tx_"))) == 0) { + if (strncmp(type_end, "_errors", strlen("_errors")) == 0) + strncpy(cnt_type, "if_tx_errors", cnt_type_len); + else if (strncmp(type_end, "_dropped", strlen("_dropped")) == 0) + strncpy(cnt_type, "if_tx_dropped", cnt_type_len); + else if (strncmp(type_end, "_bytes", strlen("_bytes")) == 0) + strncpy(cnt_type, "if_tx_octets", cnt_type_len); + else if (strncmp(type_end, "_packets", strlen("_packets")) == 0) + strncpy(cnt_type, "if_tx_packets", cnt_type_len); + else + /* Does not fit obvious type: use a more generic one */ + strncpy(cnt_type, "derive", cnt_type_len); + } else if ((type_end != NULL) && + (strncmp(cnt_name, "flow_", strlen("flow_"))) == 0) { + if (strncmp(type_end, "_filters", strlen("_filters")) == 0) + strncpy(cnt_type, "operations", cnt_type_len); + else if (strncmp(type_end, "_errors", strlen("_errors")) == 0) + strncpy(cnt_type, "errors", cnt_type_len); + else if (strncmp(type_end, "_filters", strlen("_filters")) == 0) + strncpy(cnt_type, "filter_result", cnt_type_len); + } else if ((type_end != NULL) && + (strncmp(cnt_name, "mac_", strlen("mac_"))) == 0) { + if (strncmp(type_end, "_errors", strlen("_errors")) == 0) + strncpy(cnt_type, "errors", cnt_type_len); + } else { + /* Does not fit obvious type, or strrchr error: */ + /* use a more generic type */ + strncpy(cnt_type, "derive", cnt_type_len); + } +} + +static void +nic_xstats_by_name_display(uint16_t port_id, char *name) +{ + uint64_t id; + + printf("###### NIC statistics for port %-2d, statistic name '%s':\n", + port_id, name); + + if (rte_eth_xstats_get_id_by_name(port_id, name, &id) == 0) + printf("%s: %"PRIu64"\n", name, id); + else + printf("Statistic not found...\n"); + +} + +static void +nic_xstats_by_ids_display(uint16_t port_id, uint64_t *ids, int len) +{ + struct rte_eth_xstat_name *xstats_names; + uint64_t *values; + int ret, i; + static const char *nic_stats_border = "########################"; + + values = malloc(sizeof(*values) * len); + if (values == NULL) { + printf("Cannot allocate memory for xstats\n"); + return; + } + + xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * len); + if (xstats_names == NULL) { + printf("Cannot allocate memory for xstat names\n"); + free(values); + return; + } + + if (len != rte_eth_xstats_get_names_by_id( + port_id, xstats_names, len, ids)) { + printf("Cannot get xstat names\n"); + goto err; + } + + printf("###### NIC extended statistics for port %-2d #########\n", + port_id); + printf("%s############################\n", nic_stats_border); + ret = rte_eth_xstats_get_by_id(port_id, ids, values, len); + if (ret < 0 || ret > len) { + printf("Cannot get xstats\n"); + goto err; + } + + for (i = 0; i < len; i++) + printf("%s: %"PRIu64"\n", + xstats_names[i].name, + values[i]); + + printf("%s############################\n", nic_stats_border); +err: + free(values); + free(xstats_names); +} + +static void +nic_xstats_display(uint16_t port_id) +{ + struct rte_eth_xstat_name *xstats_names; + uint64_t *values; + int len, ret, i; + static const char *nic_stats_border = "########################"; + + len = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); + if (len < 0) { + printf("Cannot get xstats count\n"); + return; + } + values = malloc(sizeof(*values) * len); + if (values == NULL) { + printf("Cannot allocate memory for xstats\n"); + return; + } + + xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * len); + if (xstats_names == NULL) { + printf("Cannot allocate memory for xstat names\n"); + free(values); + return; + } + if (len != rte_eth_xstats_get_names_by_id( + port_id, xstats_names, len, NULL)) { + printf("Cannot get xstat names\n"); + goto err; + } + + printf("###### NIC extended statistics for port %-2d #########\n", + port_id); + printf("%s############################\n", + nic_stats_border); + ret = rte_eth_xstats_get_by_id(port_id, NULL, values, len); + if (ret < 0 || ret > len) { + printf("Cannot get xstats\n"); + goto err; + } + + for (i = 0; i < len; i++) { + if (enable_collectd_format) { + char counter_type[MAX_STRING_LEN]; + char buf[MAX_STRING_LEN]; + size_t n; + + collectd_resolve_cnt_type(counter_type, + sizeof(counter_type), + xstats_names[i].name); + n = snprintf(buf, MAX_STRING_LEN, + "PUTVAL %s/dpdkstat-port.%u/%s-%s N:%" + PRIu64"\n", host_id, port_id, counter_type, + xstats_names[i].name, values[i]); + if (n > sizeof(buf) - 1) + n = sizeof(buf) - 1; + ret = write(stdout_fd, buf, n); + if (ret < 0) + goto err; + } else { + printf("%s: %"PRIu64"\n", xstats_names[i].name, + values[i]); + } + } + + printf("%s############################\n", + nic_stats_border); +err: + free(values); + free(xstats_names); +} + +static void +nic_xstats_clear(uint16_t port_id) +{ + printf("\n Clearing NIC xstats for port %d\n", port_id); + rte_eth_xstats_reset(port_id); + printf("\n NIC extended statistics for port %d cleared\n", port_id); +} + +static void +metrics_display(int port_id) +{ + struct rte_metric_value *metrics; + struct rte_metric_name *names; + int len, ret; + static const char *nic_stats_border = "########################"; + + len = rte_metrics_get_names(NULL, 0); + if (len < 0) { + printf("Cannot get metrics count\n"); + return; + } + if (len == 0) { + printf("No metrics to display (none have been registered)\n"); + return; + } + + metrics = rte_malloc("proc_info_metrics", + sizeof(struct rte_metric_value) * len, 0); + if (metrics == NULL) { + printf("Cannot allocate memory for metrics\n"); + return; + } + + names = rte_malloc(NULL, sizeof(struct rte_metric_name) * len, 0); + if (names == NULL) { + printf("Cannot allocate memory for metrcis names\n"); + rte_free(metrics); + return; + } + + if (len != rte_metrics_get_names(names, len)) { + printf("Cannot get metrics names\n"); + rte_free(metrics); + rte_free(names); + return; + } + + if (port_id == RTE_METRICS_GLOBAL) + printf("###### Non port specific metrics #########\n"); + else + printf("###### metrics for port %-2d #########\n", port_id); + printf("%s############################\n", nic_stats_border); + ret = rte_metrics_get_values(port_id, metrics, len); + if (ret < 0 || ret > len) { + printf("Cannot get metrics values\n"); + rte_free(metrics); + rte_free(names); + return; + } + + int i; + for (i = 0; i < len; i++) + printf("%s: %"PRIu64"\n", names[i].name, metrics[i].value); + + printf("%s############################\n", nic_stats_border); + rte_free(metrics); + rte_free(names); +} + +int +main(int argc, char **argv) +{ + int ret; + int i; + char c_flag[] = "-c1"; + char n_flag[] = "-n4"; + char mp_flag[] = "--proc-type=secondary"; + char *argp[argc + 3]; + uint16_t nb_ports; + + /* preparse app arguments */ + ret = proc_info_preparse_args(argc, argv); + if (ret < 0) { + printf("Failed to parse arguments\n"); + return -1; + } + + argp[0] = argv[0]; + argp[1] = c_flag; + argp[2] = n_flag; + argp[3] = mp_flag; + + for (i = 1; i < argc; i++) + argp[i + 3] = argv[i]; + + argc += 3; + + ret = rte_eal_init(argc, argp); + if (ret < 0) + rte_panic("Cannot init EAL\n"); + + argc -= ret; + argv += (ret - 3); + + if (!rte_eal_primary_proc_alive(NULL)) + rte_exit(EXIT_FAILURE, "No primary DPDK process is running.\n"); + + /* parse app arguments */ + ret = proc_info_parse_args(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid argument\n"); + + if (mem_info) { + meminfo_display(); + return 0; + } + + nb_ports = rte_eth_dev_count_avail(); + if (nb_ports == 0) + rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n"); + + /* If no port mask was specified*/ + if (enabled_port_mask == 0) + enabled_port_mask = 0xffff; + + RTE_ETH_FOREACH_DEV(i) { + if (enabled_port_mask & (1 << i)) { + if (enable_stats) + nic_stats_display(i); + else if (enable_xstats) + nic_xstats_display(i); + else if (reset_stats) + nic_stats_clear(i); + else if (reset_xstats) + nic_xstats_clear(i); + else if (enable_xstats_name) + nic_xstats_by_name_display(i, xstats_name); + else if (nb_xstats_ids > 0) + nic_xstats_by_ids_display(i, xstats_ids, + nb_xstats_ids); + else if (enable_metrics) + metrics_display(i); + } + } + + /* print port independent stats */ + if (enable_metrics) + metrics_display(RTE_METRICS_GLOBAL); + + ret = rte_eal_cleanup(); + if (ret) + printf("Error from rte_eal_cleanup(), %d\n", ret); + + return 0; +} diff --git a/app/proc-info/meson.build b/app/proc-info/meson.build new file mode 100644 index 00000000..9c148e36 --- /dev/null +++ b/app/proc-info/meson.build @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +sources = files('main.c') +allow_experimental_apis = true +deps = ['ethdev', 'metrics'] diff --git a/app/proc_info/Makefile b/app/proc_info/Makefile deleted file mode 100644 index 9e87f524..00000000 --- a/app/proc_info/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2010-2015 Intel Corporation - -include $(RTE_SDK)/mk/rte.vars.mk - -APP = dpdk-procinfo - -CFLAGS += -DALLOW_EXPERIMENTAL_API -CFLAGS += $(WERROR_FLAGS) - -# all source are stored in SRCS-y - -SRCS-y := main.c - -include $(RTE_SDK)/mk/rte.app.mk diff --git a/app/proc_info/main.c b/app/proc_info/main.c deleted file mode 100644 index 2f53e3ca..00000000 --- a/app/proc_info/main.c +++ /dev/null @@ -1,668 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2017 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Maximum long option length for option parsing. */ -#define MAX_LONG_OPT_SZ 64 -#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1 - -#define MAX_STRING_LEN 256 - -/**< mask of enabled ports */ -static uint32_t enabled_port_mask; -/**< Enable stats. */ -static uint32_t enable_stats; -/**< Enable xstats. */ -static uint32_t enable_xstats; -/**< Enable collectd format*/ -static uint32_t enable_collectd_format; -/**< FD to send collectd format messages to STDOUT*/ -static int stdout_fd; -/**< Host id process is running on */ -static char host_id[MAX_LONG_OPT_SZ]; -/**< Enable metrics. */ -static uint32_t enable_metrics; -/**< Enable stats reset. */ -static uint32_t reset_stats; -/**< Enable xstats reset. */ -static uint32_t reset_xstats; -/**< Enable memory info. */ -static uint32_t mem_info; -/**< Enable displaying xstat name. */ -static uint32_t enable_xstats_name; -static char *xstats_name; - -/**< Enable xstats by ids. */ -#define MAX_NB_XSTATS_IDS 1024 -static uint32_t nb_xstats_ids; -static uint64_t xstats_ids[MAX_NB_XSTATS_IDS]; - -/**< display usage */ -static void -proc_info_usage(const char *prgname) -{ - printf("%s [EAL options] -- -p PORTMASK\n" - " -m to display DPDK memory zones, segments and TAILQ information\n" - " -p PORTMASK: hexadecimal bitmask of ports to retrieve stats for\n" - " --stats: to display port statistics, enabled by default\n" - " --xstats: to display extended port statistics, disabled by " - "default\n" - " --metrics: to display derived metrics of the ports, disabled by " - "default\n" - " --xstats-name NAME: to display single xstat id by NAME\n" - " --xstats-ids IDLIST: to display xstat values by id. " - "The argument is comma-separated list of xstat ids to print out.\n" - " --stats-reset: to reset port statistics\n" - " --xstats-reset: to reset port extended statistics\n" - " --collectd-format: to print statistics to STDOUT in expected by collectd format\n" - " --host-id STRING: host id used to identify the system process is running on\n", - prgname); -} - -/* - * Parse the portmask provided at run time. - */ -static int -parse_portmask(const char *portmask) -{ - char *end = NULL; - unsigned long pm; - - errno = 0; - - /* parse hexadecimal string */ - pm = strtoul(portmask, &end, 16); - if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || - (errno != 0)) { - printf("%s ERROR parsing the port mask\n", __func__); - return -1; - } - - if (pm == 0) - return -1; - - return pm; - -} - -/* - * Parse ids value list into array - */ -static int -parse_xstats_ids(char *list, uint64_t *ids, int limit) { - int length; - char *token; - char *ctx = NULL; - char *endptr; - - length = 0; - token = strtok_r(list, ",", &ctx); - while (token != NULL) { - ids[length] = strtoull(token, &endptr, 10); - if (*endptr != '\0') - return -EINVAL; - - length++; - if (length >= limit) - return -E2BIG; - - token = strtok_r(NULL, ",", &ctx); - } - - return length; -} - -static int -proc_info_preparse_args(int argc, char **argv) -{ - char *prgname = argv[0]; - int i; - - for (i = 0; i < argc; i++) { - /* Print stats or xstats to STDOUT in collectd format */ - if (!strncmp(argv[i], "--collectd-format", MAX_LONG_OPT_SZ)) { - enable_collectd_format = 1; - stdout_fd = dup(STDOUT_FILENO); - close(STDOUT_FILENO); - } - if (!strncmp(argv[i], "--host-id", MAX_LONG_OPT_SZ)) { - if ((i + 1) == argc) { - printf("Invalid host id or not specified\n"); - proc_info_usage(prgname); - return -1; - } - strncpy(host_id, argv[i+1], sizeof(host_id)); - } - } - - if (!strlen(host_id)) { - int err = gethostname(host_id, MAX_LONG_OPT_SZ-1); - - if (err) - strcpy(host_id, "unknown"); - } - - return 0; -} - -/* Parse the argument given in the command line of the application */ -static int -proc_info_parse_args(int argc, char **argv) -{ - int opt; - int option_index; - char *prgname = argv[0]; - static struct option long_option[] = { - {"stats", 0, NULL, 0}, - {"stats-reset", 0, NULL, 0}, - {"xstats", 0, NULL, 0}, - {"metrics", 0, NULL, 0}, - {"xstats-reset", 0, NULL, 0}, - {"xstats-name", required_argument, NULL, 1}, - {"collectd-format", 0, NULL, 0}, - {"xstats-ids", 1, NULL, 1}, - {"host-id", 0, NULL, 0}, - {NULL, 0, 0, 0} - }; - - if (argc == 1) - proc_info_usage(prgname); - - /* Parse command line */ - while ((opt = getopt_long(argc, argv, "p:m", - long_option, &option_index)) != EOF) { - switch (opt) { - /* portmask */ - case 'p': - enabled_port_mask = parse_portmask(optarg); - if (enabled_port_mask == 0) { - printf("invalid portmask\n"); - proc_info_usage(prgname); - return -1; - } - break; - case 'm': - mem_info = 1; - break; - case 0: - /* Print stats */ - if (!strncmp(long_option[option_index].name, "stats", - MAX_LONG_OPT_SZ)) - enable_stats = 1; - /* Print xstats */ - else if (!strncmp(long_option[option_index].name, "xstats", - MAX_LONG_OPT_SZ)) - enable_xstats = 1; - else if (!strncmp(long_option[option_index].name, - "metrics", - MAX_LONG_OPT_SZ)) - enable_metrics = 1; - /* Reset stats */ - if (!strncmp(long_option[option_index].name, "stats-reset", - MAX_LONG_OPT_SZ)) - reset_stats = 1; - /* Reset xstats */ - else if (!strncmp(long_option[option_index].name, "xstats-reset", - MAX_LONG_OPT_SZ)) - reset_xstats = 1; - break; - case 1: - /* Print xstat single value given by name*/ - if (!strncmp(long_option[option_index].name, - "xstats-name", MAX_LONG_OPT_SZ)) { - enable_xstats_name = 1; - xstats_name = optarg; - printf("name:%s:%s\n", - long_option[option_index].name, - optarg); - } else if (!strncmp(long_option[option_index].name, - "xstats-ids", - MAX_LONG_OPT_SZ)) { - nb_xstats_ids = parse_xstats_ids(optarg, - xstats_ids, MAX_NB_XSTATS_IDS); - - if (nb_xstats_ids <= 0) { - printf("xstats-id list parse error.\n"); - return -1; - } - - } - break; - default: - proc_info_usage(prgname); - return -1; - } - } - return 0; -} - -static void -meminfo_display(void) -{ - printf("----------- MEMORY_SEGMENTS -----------\n"); - rte_dump_physmem_layout(stdout); - printf("--------- END_MEMORY_SEGMENTS ---------\n"); - - printf("------------ MEMORY_ZONES -------------\n"); - rte_memzone_dump(stdout); - printf("---------- END_MEMORY_ZONES -----------\n"); - - printf("------------- TAIL_QUEUES -------------\n"); - rte_dump_tailq(stdout); - printf("---------- END_TAIL_QUEUES ------------\n"); -} - -static void -nic_stats_display(uint16_t port_id) -{ - struct rte_eth_stats stats; - uint8_t i; - - static const char *nic_stats_border = "########################"; - - rte_eth_stats_get(port_id, &stats); - printf("\n %s NIC statistics for port %-2d %s\n", - nic_stats_border, port_id, nic_stats_border); - - printf(" RX-packets: %-10"PRIu64" RX-errors: %-10"PRIu64 - " RX-bytes: %-10"PRIu64"\n", stats.ipackets, stats.ierrors, - stats.ibytes); - printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); - printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64 - " TX-bytes: %-10"PRIu64"\n", stats.opackets, stats.oerrors, - stats.obytes); - - printf("\n"); - for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { - printf(" Stats reg %2d RX-packets: %-10"PRIu64 - " RX-errors: %-10"PRIu64 - " RX-bytes: %-10"PRIu64"\n", - i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); - } - - printf("\n"); - for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { - printf(" Stats reg %2d TX-packets: %-10"PRIu64 - " TX-bytes: %-10"PRIu64"\n", - i, stats.q_opackets[i], stats.q_obytes[i]); - } - - printf(" %s############################%s\n", - nic_stats_border, nic_stats_border); -} - -static void -nic_stats_clear(uint16_t port_id) -{ - printf("\n Clearing NIC stats for port %d\n", port_id); - rte_eth_stats_reset(port_id); - printf("\n NIC statistics for port %d cleared\n", port_id); -} - -static void collectd_resolve_cnt_type(char *cnt_type, size_t cnt_type_len, - const char *cnt_name) { - char *type_end = strrchr(cnt_name, '_'); - - if ((type_end != NULL) && - (strncmp(cnt_name, "rx_", strlen("rx_")) == 0)) { - if (strncmp(type_end, "_errors", strlen("_errors")) == 0) - strncpy(cnt_type, "if_rx_errors", cnt_type_len); - else if (strncmp(type_end, "_dropped", strlen("_dropped")) == 0) - strncpy(cnt_type, "if_rx_dropped", cnt_type_len); - else if (strncmp(type_end, "_bytes", strlen("_bytes")) == 0) - strncpy(cnt_type, "if_rx_octets", cnt_type_len); - else if (strncmp(type_end, "_packets", strlen("_packets")) == 0) - strncpy(cnt_type, "if_rx_packets", cnt_type_len); - else if (strncmp(type_end, "_placement", - strlen("_placement")) == 0) - strncpy(cnt_type, "if_rx_errors", cnt_type_len); - else if (strncmp(type_end, "_buff", strlen("_buff")) == 0) - strncpy(cnt_type, "if_rx_errors", cnt_type_len); - else - /* Does not fit obvious type: use a more generic one */ - strncpy(cnt_type, "derive", cnt_type_len); - } else if ((type_end != NULL) && - (strncmp(cnt_name, "tx_", strlen("tx_"))) == 0) { - if (strncmp(type_end, "_errors", strlen("_errors")) == 0) - strncpy(cnt_type, "if_tx_errors", cnt_type_len); - else if (strncmp(type_end, "_dropped", strlen("_dropped")) == 0) - strncpy(cnt_type, "if_tx_dropped", cnt_type_len); - else if (strncmp(type_end, "_bytes", strlen("_bytes")) == 0) - strncpy(cnt_type, "if_tx_octets", cnt_type_len); - else if (strncmp(type_end, "_packets", strlen("_packets")) == 0) - strncpy(cnt_type, "if_tx_packets", cnt_type_len); - else - /* Does not fit obvious type: use a more generic one */ - strncpy(cnt_type, "derive", cnt_type_len); - } else if ((type_end != NULL) && - (strncmp(cnt_name, "flow_", strlen("flow_"))) == 0) { - if (strncmp(type_end, "_filters", strlen("_filters")) == 0) - strncpy(cnt_type, "operations", cnt_type_len); - else if (strncmp(type_end, "_errors", strlen("_errors")) == 0) - strncpy(cnt_type, "errors", cnt_type_len); - else if (strncmp(type_end, "_filters", strlen("_filters")) == 0) - strncpy(cnt_type, "filter_result", cnt_type_len); - } else if ((type_end != NULL) && - (strncmp(cnt_name, "mac_", strlen("mac_"))) == 0) { - if (strncmp(type_end, "_errors", strlen("_errors")) == 0) - strncpy(cnt_type, "errors", cnt_type_len); - } else { - /* Does not fit obvious type, or strrchr error: */ - /* use a more generic type */ - strncpy(cnt_type, "derive", cnt_type_len); - } -} - -static void -nic_xstats_by_name_display(uint16_t port_id, char *name) -{ - uint64_t id; - - printf("###### NIC statistics for port %-2d, statistic name '%s':\n", - port_id, name); - - if (rte_eth_xstats_get_id_by_name(port_id, name, &id) == 0) - printf("%s: %"PRIu64"\n", name, id); - else - printf("Statistic not found...\n"); - -} - -static void -nic_xstats_by_ids_display(uint16_t port_id, uint64_t *ids, int len) -{ - struct rte_eth_xstat_name *xstats_names; - uint64_t *values; - int ret, i; - static const char *nic_stats_border = "########################"; - - values = malloc(sizeof(*values) * len); - if (values == NULL) { - printf("Cannot allocate memory for xstats\n"); - return; - } - - xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * len); - if (xstats_names == NULL) { - printf("Cannot allocate memory for xstat names\n"); - free(values); - return; - } - - if (len != rte_eth_xstats_get_names_by_id( - port_id, xstats_names, len, ids)) { - printf("Cannot get xstat names\n"); - goto err; - } - - printf("###### NIC extended statistics for port %-2d #########\n", - port_id); - printf("%s############################\n", nic_stats_border); - ret = rte_eth_xstats_get_by_id(port_id, ids, values, len); - if (ret < 0 || ret > len) { - printf("Cannot get xstats\n"); - goto err; - } - - for (i = 0; i < len; i++) - printf("%s: %"PRIu64"\n", - xstats_names[i].name, - values[i]); - - printf("%s############################\n", nic_stats_border); -err: - free(values); - free(xstats_names); -} - -static void -nic_xstats_display(uint16_t port_id) -{ - struct rte_eth_xstat_name *xstats_names; - uint64_t *values; - int len, ret, i; - static const char *nic_stats_border = "########################"; - - len = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); - if (len < 0) { - printf("Cannot get xstats count\n"); - return; - } - values = malloc(sizeof(*values) * len); - if (values == NULL) { - printf("Cannot allocate memory for xstats\n"); - return; - } - - xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * len); - if (xstats_names == NULL) { - printf("Cannot allocate memory for xstat names\n"); - free(values); - return; - } - if (len != rte_eth_xstats_get_names_by_id( - port_id, xstats_names, len, NULL)) { - printf("Cannot get xstat names\n"); - goto err; - } - - printf("###### NIC extended statistics for port %-2d #########\n", - port_id); - printf("%s############################\n", - nic_stats_border); - ret = rte_eth_xstats_get_by_id(port_id, NULL, values, len); - if (ret < 0 || ret > len) { - printf("Cannot get xstats\n"); - goto err; - } - - for (i = 0; i < len; i++) { - if (enable_collectd_format) { - char counter_type[MAX_STRING_LEN]; - char buf[MAX_STRING_LEN]; - - collectd_resolve_cnt_type(counter_type, - sizeof(counter_type), - xstats_names[i].name); - sprintf(buf, "PUTVAL %s/dpdkstat-port.%u/%s-%s N:%" - PRIu64"\n", host_id, port_id, counter_type, - xstats_names[i].name, values[i]); - ret = write(stdout_fd, buf, strlen(buf)); - if (ret < 0) - goto err; - } else { - printf("%s: %"PRIu64"\n", xstats_names[i].name, - values[i]); - } - } - - printf("%s############################\n", - nic_stats_border); -err: - free(values); - free(xstats_names); -} - -static void -nic_xstats_clear(uint16_t port_id) -{ - printf("\n Clearing NIC xstats for port %d\n", port_id); - rte_eth_xstats_reset(port_id); - printf("\n NIC extended statistics for port %d cleared\n", port_id); -} - -static void -metrics_display(int port_id) -{ - struct rte_metric_value *metrics; - struct rte_metric_name *names; - int len, ret; - static const char *nic_stats_border = "########################"; - - len = rte_metrics_get_names(NULL, 0); - if (len < 0) { - printf("Cannot get metrics count\n"); - return; - } - if (len == 0) { - printf("No metrics to display (none have been registered)\n"); - return; - } - - metrics = rte_malloc("proc_info_metrics", - sizeof(struct rte_metric_value) * len, 0); - if (metrics == NULL) { - printf("Cannot allocate memory for metrics\n"); - return; - } - - names = rte_malloc(NULL, sizeof(struct rte_metric_name) * len, 0); - if (names == NULL) { - printf("Cannot allocate memory for metrcis names\n"); - rte_free(metrics); - return; - } - - if (len != rte_metrics_get_names(names, len)) { - printf("Cannot get metrics names\n"); - rte_free(metrics); - rte_free(names); - return; - } - - if (port_id == RTE_METRICS_GLOBAL) - printf("###### Non port specific metrics #########\n"); - else - printf("###### metrics for port %-2d #########\n", port_id); - printf("%s############################\n", nic_stats_border); - ret = rte_metrics_get_values(port_id, metrics, len); - if (ret < 0 || ret > len) { - printf("Cannot get metrics values\n"); - rte_free(metrics); - rte_free(names); - return; - } - - int i; - for (i = 0; i < len; i++) - printf("%s: %"PRIu64"\n", names[i].name, metrics[i].value); - - printf("%s############################\n", nic_stats_border); - rte_free(metrics); - rte_free(names); -} - -int -main(int argc, char **argv) -{ - int ret; - int i; - char c_flag[] = "-c1"; - char n_flag[] = "-n4"; - char mp_flag[] = "--proc-type=secondary"; - char *argp[argc + 3]; - uint16_t nb_ports; - - /* preparse app arguments */ - ret = proc_info_preparse_args(argc, argv); - if (ret < 0) { - printf("Failed to parse arguments\n"); - return -1; - } - - argp[0] = argv[0]; - argp[1] = c_flag; - argp[2] = n_flag; - argp[3] = mp_flag; - - for (i = 1; i < argc; i++) - argp[i + 3] = argv[i]; - - argc += 3; - - ret = rte_eal_init(argc, argp); - if (ret < 0) - rte_panic("Cannot init EAL\n"); - - argc -= ret; - argv += (ret - 3); - - if (!rte_eal_primary_proc_alive(NULL)) - rte_exit(EXIT_FAILURE, "No primary DPDK process is running.\n"); - - /* parse app arguments */ - ret = proc_info_parse_args(argc, argv); - if (ret < 0) - rte_exit(EXIT_FAILURE, "Invalid argument\n"); - - if (mem_info) { - meminfo_display(); - return 0; - } - - nb_ports = rte_eth_dev_count(); - if (nb_ports == 0) - rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n"); - - /* If no port mask was specified*/ - if (enabled_port_mask == 0) - enabled_port_mask = 0xffff; - - for (i = 0; i < nb_ports; i++) { - if (enabled_port_mask & (1 << i)) { - if (enable_stats) - nic_stats_display(i); - else if (enable_xstats) - nic_xstats_display(i); - else if (reset_stats) - nic_stats_clear(i); - else if (reset_xstats) - nic_xstats_clear(i); - else if (enable_xstats_name) - nic_xstats_by_name_display(i, xstats_name); - else if (nb_xstats_ids > 0) - nic_xstats_by_ids_display(i, xstats_ids, - nb_xstats_ids); - else if (enable_metrics) - metrics_display(i); - } - } - - /* print port independent stats */ - if (enable_metrics) - metrics_display(RTE_METRICS_GLOBAL); - - ret = rte_eal_cleanup(); - if (ret) - printf("Error from rte_eal_cleanup(), %d\n", ret); - - return 0; -} diff --git a/app/test-bbdev/Makefile b/app/test-bbdev/Makefile index 9aedd776..6da0c8e0 100644 --- a/app/test-bbdev/Makefile +++ b/app/test-bbdev/Makefile @@ -20,4 +20,6 @@ SRCS-$(CONFIG_RTE_TEST_BBDEV) += test_bbdev.c SRCS-$(CONFIG_RTE_TEST_BBDEV) += test_bbdev_perf.c SRCS-$(CONFIG_RTE_TEST_BBDEV) += test_bbdev_vector.c +LDLIBS += -lm + include $(RTE_SDK)/mk/rte.app.mk diff --git a/app/test-bbdev/meson.build b/app/test-bbdev/meson.build new file mode 100644 index 00000000..653907de --- /dev/null +++ b/app/test-bbdev/meson.build @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +sources = files('main.c', + 'test_bbdev.c', + 'test_bbdev_perf.c', + 'test_bbdev_vector.c') +allow_experimental_apis = true +deps = ['bbdev', 'bus_vdev'] diff --git a/app/test-bbdev/test-bbdev.py b/app/test-bbdev/test-bbdev.py index ce781497..acab9eb1 100755 --- a/app/test-bbdev/test-bbdev.py +++ b/app/test-bbdev/test-bbdev.py @@ -33,7 +33,7 @@ parser.add_argument("-p", "--testapp-path", default=dpdk_path + "/" + dpdk_target + "/app/testbbdev") parser.add_argument("-e", "--eal-params", help="EAL arguments which are passed to the test app", - default="--vdev=bbdev_null0") + default="--vdev=baseband_null0") parser.add_argument("-t", "--timeout", type=int, help="Timeout in seconds", @@ -45,7 +45,7 @@ parser.add_argument("-v", "--test-vector", nargs="+", help="Specifies paths to the test vector files.", default=[dpdk_path + - "/app/test-bbdev/test_vectors/bbdev_vector_null.data"]) + "/app/test-bbdev/test_vectors/bbdev_null.data"]) parser.add_argument("-n", "--num-ops", type=int, help="Number of operations to process on device.", diff --git a/app/test-bbdev/test_bbdev.c b/app/test-bbdev/test_bbdev.c index 10579ea0..a914817b 100644 --- a/app/test-bbdev/test_bbdev.c +++ b/app/test-bbdev/test_bbdev.c @@ -273,7 +273,7 @@ test_bbdev_configure_stop_queue(void) /* Valid queue configuration */ ts_params->qconf.queue_size = info.drv.queue_size_lim; - ts_params->qconf.priority = info.drv.max_queue_priority; + ts_params->qconf.priority = info.drv.max_ul_queue_priority; /* Device - started; queue - started */ rte_bbdev_start(dev_id); @@ -413,14 +413,7 @@ test_bbdev_configure_invalid_queue_configure(void) ts_params->qconf.queue_size); ts_params->qconf.queue_size = info.drv.queue_size_lim; - ts_params->qconf.priority = info.drv.max_queue_priority + 1; - TEST_ASSERT_FAIL(rte_bbdev_queue_configure(dev_id, queue_id, - &ts_params->qconf), - "Failed test for rte_bbdev_queue_configure: " - "invalid value qconf.queue_size: %u", - ts_params->qconf.queue_size); - - ts_params->qconf.priority = info.drv.max_queue_priority; + ts_params->qconf.priority = info.drv.max_ul_queue_priority; queue_id = info.num_queues; TEST_ASSERT_FAIL(rte_bbdev_queue_configure(dev_id, queue_id, &ts_params->qconf), @@ -902,12 +895,12 @@ test_bbdev_callback(void) "Failed to callback rgstr for RTE_BBDEV_EVENT_UNKNOWN"); rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_UNKNOWN, NULL); - TEST_ASSERT(event_status == 0, + TEST_ASSERT(event_status == (int) RTE_BBDEV_EVENT_UNKNOWN, "Failed test for rte_bbdev_pmd_callback_process " "for RTE_BBDEV_EVENT_UNKNOWN "); rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_ERROR, NULL); - TEST_ASSERT(event_status == 0, + TEST_ASSERT(event_status == (int) RTE_BBDEV_EVENT_UNKNOWN, "Failed test for rte_bbdev_pmd_callback_process: " "event RTE_BBDEV_EVENT_ERROR was not registered "); @@ -926,12 +919,12 @@ test_bbdev_callback(void) event_status = -1; rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_UNKNOWN, NULL); - TEST_ASSERT(event_status == 0, + TEST_ASSERT(event_status == (int) RTE_BBDEV_EVENT_UNKNOWN, "Failed test for rte_bbdev_pmd_callback_process " "for RTE_BBDEV_EVENT_UNKNOWN "); rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_ERROR, NULL); - TEST_ASSERT(event_status == 1, + TEST_ASSERT(event_status == (int) RTE_BBDEV_EVENT_ERROR, "Failed test for rte_bbdev_pmd_callback_process " "for RTE_BBDEV_EVENT_ERROR "); @@ -945,12 +938,12 @@ test_bbdev_callback(void) event_status = -1; rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_UNKNOWN, NULL); - TEST_ASSERT(event_status == 0, + TEST_ASSERT(event_status == (int) RTE_BBDEV_EVENT_UNKNOWN, "Failed test for rte_bbdev_pmd_callback_process " "for RTE_BBDEV_EVENT_UNKNOWN "); rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_ERROR, NULL); - TEST_ASSERT(event_status == 0, + TEST_ASSERT(event_status == (int) RTE_BBDEV_EVENT_UNKNOWN, "Failed test for rte_bbdev_pmd_callback_process: " "event RTE_BBDEV_EVENT_ERROR was unregistered "); @@ -999,7 +992,7 @@ test_bbdev_callback(void) "for RTE_BBDEV_EVENT_ERROR "); rte_bbdev_pmd_callback_process(dev2, RTE_BBDEV_EVENT_ERROR, NULL); - TEST_ASSERT(event_status == 1, + TEST_ASSERT(event_status == (int) RTE_BBDEV_EVENT_ERROR, "Failed test for rte_bbdev_pmd_callback_process in dev2 " "for RTE_BBDEV_EVENT_ERROR "); @@ -1013,7 +1006,7 @@ test_bbdev_callback(void) "in dev 2 "); rte_bbdev_pmd_callback_process(dev2, RTE_BBDEV_EVENT_UNKNOWN, NULL); - TEST_ASSERT(event_status == 0, + TEST_ASSERT(event_status == (int) RTE_BBDEV_EVENT_UNKNOWN, "Failed test for rte_bbdev_pmd_callback_process in dev2" " for RTE_BBDEV_EVENT_UNKNOWN "); @@ -1033,7 +1026,7 @@ test_bbdev_callback(void) " for RTE_BBDEV_EVENT_UNKNOWN "); rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_UNKNOWN, NULL); - TEST_ASSERT(event_status == 0, + TEST_ASSERT(event_status == (int) RTE_BBDEV_EVENT_UNKNOWN, "Failed test for rte_bbdev_pmd_callback_process in dev2 " "for RTE_BBDEV_EVENT_UNKNOWN "); diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c index 00f3b085..6861edc4 100644 --- a/app/test-bbdev/test_bbdev_perf.c +++ b/app/test-bbdev/test_bbdev_perf.c @@ -4,6 +4,7 @@ #include #include +#include #include #include @@ -83,6 +84,30 @@ struct thread_params { struct test_op_params *op_params; }; +#ifdef RTE_BBDEV_OFFLOAD_COST +/* Stores time statistics */ +struct test_time_stats { + /* Stores software enqueue total working time */ + uint64_t enq_sw_tot_time; + /* Stores minimum value of software enqueue working time */ + uint64_t enq_sw_min_time; + /* Stores maximum value of software enqueue working time */ + uint64_t enq_sw_max_time; + /* Stores turbo enqueue total working time */ + uint64_t enq_tur_tot_time; + /* Stores minimum value of turbo enqueue working time */ + uint64_t enq_tur_min_time; + /* Stores maximum value of turbo enqueue working time */ + uint64_t enq_tur_max_time; + /* Stores dequeue total working time */ + uint64_t deq_tot_time; + /* Stores minimum value of dequeue working time */ + uint64_t deq_min_time; + /* Stores maximum value of dequeue working time */ + uint64_t deq_max_time; +}; +#endif + typedef int (test_case_function)(struct active_device *ad, struct test_op_params *op_params); @@ -609,10 +634,32 @@ allocate_buffers_on_socket(struct rte_bbdev_op_data **buffers, const int len, return (*buffers == NULL) ? TEST_FAILED : TEST_SUCCESS; } +static void +limit_input_llr_val_range(struct rte_bbdev_op_data *input_ops, + uint16_t n, int8_t max_llr_modulus) +{ + uint16_t i, byte_idx; + + for (i = 0; i < n; ++i) { + struct rte_mbuf *m = input_ops[i].data; + while (m != NULL) { + int8_t *llr = rte_pktmbuf_mtod_offset(m, int8_t *, + input_ops[i].offset); + for (byte_idx = 0; byte_idx < input_ops[i].length; + ++byte_idx) + llr[byte_idx] = round((double)max_llr_modulus * + llr[byte_idx] / INT8_MAX); + + m = m->next; + } + } +} + static int fill_queue_buffers(struct test_op_params *op_params, struct rte_mempool *in_mp, struct rte_mempool *hard_out_mp, struct rte_mempool *soft_out_mp, uint16_t queue_id, + const struct rte_bbdev_op_cap *capabilities, uint16_t min_alignment, const int socket_id) { int ret; @@ -649,6 +696,10 @@ fill_queue_buffers(struct test_op_params *op_params, "Couldn't init rte_bbdev_op_data structs"); } + if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) + limit_input_llr_val_range(*queue_ops[DATA_INPUT], n, + capabilities->cap.turbo_dec.max_llr_modulus); + return 0; } @@ -995,6 +1046,7 @@ run_test_case_on_device(test_case_function *test_case_func, uint8_t dev_id, struct active_device *ad; unsigned int burst_sz = get_burst_sz(); enum rte_bbdev_op_type op_type = test_vector.op_type; + const struct rte_bbdev_op_cap *capabilities = NULL; ad = &active_devs[dev_id]; @@ -1027,9 +1079,20 @@ run_test_case_on_device(test_case_function *test_case_func, uint8_t dev_id, goto fail; } - if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) + if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) { + /* Find Decoder capabilities */ + const struct rte_bbdev_op_cap *cap = info.drv.capabilities; + while (cap->type != RTE_BBDEV_OP_NONE) { + if (cap->type == RTE_BBDEV_OP_TURBO_DEC) { + capabilities = cap; + break; + } + } + TEST_ASSERT_NOT_NULL(capabilities, + "Couldn't find Decoder capabilities"); + create_reference_dec_op(op_params->ref_dec_op); - else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC) + } else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC) create_reference_enc_op(op_params->ref_enc_op); for (i = 0; i < ad->nb_queues; ++i) { @@ -1038,6 +1101,7 @@ run_test_case_on_device(test_case_function *test_case_func, uint8_t dev_id, ad->hard_out_mbuf_pool, ad->soft_out_mbuf_pool, ad->queue_ids[i], + capabilities, info.drv.min_alignment, socket_id); if (f_ret != TEST_SUCCESS) { @@ -1104,7 +1168,6 @@ dequeue_event_callback(uint16_t dev_id, double in_len; struct thread_params *tp = cb_arg; - RTE_SET_USED(ret_param); queue_id = tp->queue_id; @@ -1649,25 +1712,28 @@ throughput_test(struct active_device *ad, } static int -operation_latency_test_dec(struct rte_mempool *mempool, +latency_test_dec(struct rte_mempool *mempool, struct test_buffers *bufs, struct rte_bbdev_dec_op *ref_op, int vector_mask, uint16_t dev_id, uint16_t queue_id, const uint16_t num_to_process, uint16_t burst_sz, - uint64_t *total_time) + uint64_t *total_time, uint64_t *min_time, uint64_t *max_time) { int ret = TEST_SUCCESS; uint16_t i, j, dequeued; struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST]; - uint64_t start_time = 0; + uint64_t start_time = 0, last_time = 0; for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) { uint16_t enq = 0, deq = 0; bool first_time = true; + last_time = 0; if (unlikely(num_to_process - dequeued < burst_sz)) burst_sz = num_to_process - dequeued; - rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz); + ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz); + TEST_ASSERT_SUCCESS(ret, + "rte_bbdev_dec_op_alloc_bulk() failed"); if (test_vector.op_type != RTE_BBDEV_OP_NONE) copy_reference_dec_op(ops_enq, burst_sz, dequeued, bufs->inputs, @@ -1692,11 +1758,15 @@ operation_latency_test_dec(struct rte_mempool *mempool, deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id, &ops_deq[deq], burst_sz - deq); if (likely(first_time && (deq > 0))) { - *total_time += rte_rdtsc_precise() - start_time; + last_time = rte_rdtsc_precise() - start_time; first_time = false; } } while (unlikely(burst_sz != deq)); + *max_time = RTE_MAX(*max_time, last_time); + *min_time = RTE_MIN(*min_time, last_time); + *total_time += last_time; + if (test_vector.op_type != RTE_BBDEV_OP_NONE) { ret = validate_dec_op(ops_deq, burst_sz, ref_op, vector_mask); @@ -1711,25 +1781,28 @@ operation_latency_test_dec(struct rte_mempool *mempool, } static int -operation_latency_test_enc(struct rte_mempool *mempool, +latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op, uint16_t dev_id, uint16_t queue_id, const uint16_t num_to_process, uint16_t burst_sz, - uint64_t *total_time) + uint64_t *total_time, uint64_t *min_time, uint64_t *max_time) { int ret = TEST_SUCCESS; uint16_t i, j, dequeued; struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST]; - uint64_t start_time = 0; + uint64_t start_time = 0, last_time = 0; for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) { uint16_t enq = 0, deq = 0; bool first_time = true; + last_time = 0; if (unlikely(num_to_process - dequeued < burst_sz)) burst_sz = num_to_process - dequeued; - rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz); + ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz); + TEST_ASSERT_SUCCESS(ret, + "rte_bbdev_enc_op_alloc_bulk() failed"); if (test_vector.op_type != RTE_BBDEV_OP_NONE) copy_reference_enc_op(ops_enq, burst_sz, dequeued, bufs->inputs, @@ -1753,11 +1826,15 @@ operation_latency_test_enc(struct rte_mempool *mempool, deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id, &ops_deq[deq], burst_sz - deq); if (likely(first_time && (deq > 0))) { - *total_time += rte_rdtsc_precise() - start_time; + last_time += rte_rdtsc_precise() - start_time; first_time = false; } } while (unlikely(burst_sz != deq)); + *max_time = RTE_MAX(*max_time, last_time); + *min_time = RTE_MIN(*min_time, last_time); + *total_time += last_time; + if (test_vector.op_type != RTE_BBDEV_OP_NONE) { ret = validate_enc_op(ops_deq, burst_sz, ref_op); TEST_ASSERT_SUCCESS(ret, "Validation failed!"); @@ -1771,7 +1848,7 @@ operation_latency_test_enc(struct rte_mempool *mempool, } static int -operation_latency_test(struct active_device *ad, +latency_test(struct active_device *ad, struct test_op_params *op_params) { int iter; @@ -1781,9 +1858,12 @@ operation_latency_test(struct active_device *ad, const uint16_t queue_id = ad->queue_ids[0]; struct test_buffers *bufs = NULL; struct rte_bbdev_info info; - uint64_t total_time = 0; + uint64_t total_time, min_time, max_time; const char *op_type_str; + total_time = max_time = 0; + min_time = UINT64_MAX; + TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST), "BURST_SIZE should be <= %u", MAX_BURST); @@ -1798,36 +1878,66 @@ operation_latency_test(struct active_device *ad, info.dev_name, burst_sz, num_to_process, op_type_str); if (op_type == RTE_BBDEV_OP_TURBO_DEC) - iter = operation_latency_test_dec(op_params->mp, bufs, + iter = latency_test_dec(op_params->mp, bufs, op_params->ref_dec_op, op_params->vector_mask, ad->dev_id, queue_id, num_to_process, - burst_sz, &total_time); + burst_sz, &total_time, &min_time, &max_time); else - iter = operation_latency_test_enc(op_params->mp, bufs, + iter = latency_test_enc(op_params->mp, bufs, op_params->ref_enc_op, ad->dev_id, queue_id, - num_to_process, burst_sz, &total_time); + num_to_process, burst_sz, &total_time, + &min_time, &max_time); if (iter <= 0) return TEST_FAILED; - printf("\toperation avg. latency: %lg cycles, %lg us\n", + printf("\toperation latency:\n" + "\t\tavg latency: %lg cycles, %lg us\n" + "\t\tmin latency: %lg cycles, %lg us\n" + "\t\tmax latency: %lg cycles, %lg us\n", (double)total_time / (double)iter, (double)(total_time * 1000000) / (double)iter / + (double)rte_get_tsc_hz(), (double)min_time, + (double)(min_time * 1000000) / (double)rte_get_tsc_hz(), + (double)max_time, (double)(max_time * 1000000) / (double)rte_get_tsc_hz()); return TEST_SUCCESS; } +#ifdef RTE_BBDEV_OFFLOAD_COST +static int +get_bbdev_queue_stats(uint16_t dev_id, uint16_t queue_id, + struct rte_bbdev_stats *stats) +{ + struct rte_bbdev *dev = &rte_bbdev_devices[dev_id]; + struct rte_bbdev_stats *q_stats; + + if (queue_id >= dev->data->num_queues) + return -1; + + q_stats = &dev->data->queues[queue_id].queue_stats; + + stats->enqueued_count = q_stats->enqueued_count; + stats->dequeued_count = q_stats->dequeued_count; + stats->enqueue_err_count = q_stats->enqueue_err_count; + stats->dequeue_err_count = q_stats->dequeue_err_count; + stats->offload_time = q_stats->offload_time; + + return 0; +} + static int offload_latency_test_dec(struct rte_mempool *mempool, struct test_buffers *bufs, struct rte_bbdev_dec_op *ref_op, uint16_t dev_id, uint16_t queue_id, const uint16_t num_to_process, - uint16_t burst_sz, uint64_t *enq_total_time, - uint64_t *deq_total_time) + uint16_t burst_sz, struct test_time_stats *time_st) { - int i, dequeued; + int i, dequeued, ret; struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST]; uint64_t enq_start_time, deq_start_time; + uint64_t enq_sw_last_time, deq_last_time; + struct rte_bbdev_stats stats; for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) { uint16_t enq = 0, deq = 0; @@ -1843,24 +1953,54 @@ offload_latency_test_dec(struct rte_mempool *mempool, struct test_buffers *bufs, bufs->soft_outputs, ref_op); - /* Start time measurment for enqueue function offload latency */ - enq_start_time = rte_rdtsc(); + /* Start time meas for enqueue function offload latency */ + enq_start_time = rte_rdtsc_precise(); do { enq += rte_bbdev_enqueue_dec_ops(dev_id, queue_id, &ops_enq[enq], burst_sz - enq); } while (unlikely(burst_sz != enq)); - *enq_total_time += rte_rdtsc() - enq_start_time; + + ret = get_bbdev_queue_stats(dev_id, queue_id, &stats); + TEST_ASSERT_SUCCESS(ret, + "Failed to get stats for queue (%u) of device (%u)", + queue_id, dev_id); + + enq_sw_last_time = rte_rdtsc_precise() - enq_start_time - + stats.offload_time; + time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time, + enq_sw_last_time); + time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time, + enq_sw_last_time); + time_st->enq_sw_tot_time += enq_sw_last_time; + + time_st->enq_tur_max_time = RTE_MAX(time_st->enq_tur_max_time, + stats.offload_time); + time_st->enq_tur_min_time = RTE_MIN(time_st->enq_tur_min_time, + stats.offload_time); + time_st->enq_tur_tot_time += stats.offload_time; /* ensure enqueue has been completed */ rte_delay_ms(10); - /* Start time measurment for dequeue function offload latency */ - deq_start_time = rte_rdtsc(); + /* Start time meas for dequeue function offload latency */ + deq_start_time = rte_rdtsc_precise(); + /* Dequeue one operation */ do { + deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id, + &ops_deq[deq], 1); + } while (unlikely(deq != 1)); + + deq_last_time = rte_rdtsc_precise() - deq_start_time; + time_st->deq_max_time = RTE_MAX(time_st->deq_max_time, + deq_last_time); + time_st->deq_min_time = RTE_MIN(time_st->deq_min_time, + deq_last_time); + time_st->deq_tot_time += deq_last_time; + + /* Dequeue remaining operations if needed*/ + while (burst_sz != deq) deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id, &ops_deq[deq], burst_sz - deq); - } while (unlikely(burst_sz != deq)); - *deq_total_time += rte_rdtsc() - deq_start_time; rte_bbdev_dec_op_free_bulk(ops_enq, deq); dequeued += deq; @@ -1873,12 +2013,13 @@ static int offload_latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op, uint16_t dev_id, uint16_t queue_id, const uint16_t num_to_process, - uint16_t burst_sz, uint64_t *enq_total_time, - uint64_t *deq_total_time) + uint16_t burst_sz, struct test_time_stats *time_st) { - int i, dequeued; + int i, dequeued, ret; struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST]; uint64_t enq_start_time, deq_start_time; + uint64_t enq_sw_last_time, deq_last_time; + struct rte_bbdev_stats stats; for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) { uint16_t enq = 0, deq = 0; @@ -1893,24 +2034,53 @@ offload_latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs, bufs->hard_outputs, ref_op); - /* Start time measurment for enqueue function offload latency */ - enq_start_time = rte_rdtsc(); + /* Start time meas for enqueue function offload latency */ + enq_start_time = rte_rdtsc_precise(); do { enq += rte_bbdev_enqueue_enc_ops(dev_id, queue_id, &ops_enq[enq], burst_sz - enq); } while (unlikely(burst_sz != enq)); - *enq_total_time += rte_rdtsc() - enq_start_time; + + ret = get_bbdev_queue_stats(dev_id, queue_id, &stats); + TEST_ASSERT_SUCCESS(ret, + "Failed to get stats for queue (%u) of device (%u)", + queue_id, dev_id); + + enq_sw_last_time = rte_rdtsc_precise() - enq_start_time - + stats.offload_time; + time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time, + enq_sw_last_time); + time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time, + enq_sw_last_time); + time_st->enq_sw_tot_time += enq_sw_last_time; + + time_st->enq_tur_max_time = RTE_MAX(time_st->enq_tur_max_time, + stats.offload_time); + time_st->enq_tur_min_time = RTE_MIN(time_st->enq_tur_min_time, + stats.offload_time); + time_st->enq_tur_tot_time += stats.offload_time; /* ensure enqueue has been completed */ rte_delay_ms(10); - /* Start time measurment for dequeue function offload latency */ - deq_start_time = rte_rdtsc(); + /* Start time meas for dequeue function offload latency */ + deq_start_time = rte_rdtsc_precise(); + /* Dequeue one operation */ do { + deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id, + &ops_deq[deq], 1); + } while (unlikely(deq != 1)); + + deq_last_time = rte_rdtsc_precise() - deq_start_time; + time_st->deq_max_time = RTE_MAX(time_st->deq_max_time, + deq_last_time); + time_st->deq_min_time = RTE_MIN(time_st->deq_min_time, + deq_last_time); + time_st->deq_tot_time += deq_last_time; + + while (burst_sz != deq) deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id, &ops_deq[deq], burst_sz - deq); - } while (unlikely(burst_sz != deq)); - *deq_total_time += rte_rdtsc() - deq_start_time; rte_bbdev_enc_op_free_bulk(ops_enq, deq); dequeued += deq; @@ -1918,13 +2088,20 @@ offload_latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs, return i; } +#endif static int -offload_latency_test(struct active_device *ad, +offload_cost_test(struct active_device *ad, struct test_op_params *op_params) { +#ifndef RTE_BBDEV_OFFLOAD_COST + RTE_SET_USED(ad); + RTE_SET_USED(op_params); + printf("Offload latency test is disabled.\n"); + printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n"); + return TEST_SKIPPED; +#else int iter; - uint64_t enq_total_time = 0, deq_total_time = 0; uint16_t burst_sz = op_params->burst_sz; const uint16_t num_to_process = op_params->num_to_process; const enum rte_bbdev_op_type op_type = test_vector.op_type; @@ -1932,6 +2109,12 @@ offload_latency_test(struct active_device *ad, struct test_buffers *bufs = NULL; struct rte_bbdev_info info; const char *op_type_str; + struct test_time_stats time_st; + + memset(&time_st, 0, sizeof(struct test_time_stats)); + time_st.enq_sw_min_time = UINT64_MAX; + time_st.enq_tur_min_time = UINT64_MAX; + time_st.deq_min_time = UINT64_MAX; TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST), "BURST_SIZE should be <= %u", MAX_BURST); @@ -1949,48 +2132,82 @@ offload_latency_test(struct active_device *ad, if (op_type == RTE_BBDEV_OP_TURBO_DEC) iter = offload_latency_test_dec(op_params->mp, bufs, op_params->ref_dec_op, ad->dev_id, queue_id, - num_to_process, burst_sz, &enq_total_time, - &deq_total_time); + num_to_process, burst_sz, &time_st); else iter = offload_latency_test_enc(op_params->mp, bufs, op_params->ref_enc_op, ad->dev_id, queue_id, - num_to_process, burst_sz, &enq_total_time, - &deq_total_time); + num_to_process, burst_sz, &time_st); if (iter <= 0) return TEST_FAILED; - printf("\tenq offload avg. latency: %lg cycles, %lg us\n", - (double)enq_total_time / (double)iter, - (double)(enq_total_time * 1000000) / (double)iter / - (double)rte_get_tsc_hz()); - - printf("\tdeq offload avg. latency: %lg cycles, %lg us\n", - (double)deq_total_time / (double)iter, - (double)(deq_total_time * 1000000) / (double)iter / - (double)rte_get_tsc_hz()); + printf("\tenq offload cost latency:\n" + "\t\tsoftware avg %lg cycles, %lg us\n" + "\t\tsoftware min %lg cycles, %lg us\n" + "\t\tsoftware max %lg cycles, %lg us\n" + "\t\tturbo avg %lg cycles, %lg us\n" + "\t\tturbo min %lg cycles, %lg us\n" + "\t\tturbo max %lg cycles, %lg us\n", + (double)time_st.enq_sw_tot_time / (double)iter, + (double)(time_st.enq_sw_tot_time * 1000000) / + (double)iter / (double)rte_get_tsc_hz(), + (double)time_st.enq_sw_min_time, + (double)(time_st.enq_sw_min_time * 1000000) / + rte_get_tsc_hz(), (double)time_st.enq_sw_max_time, + (double)(time_st.enq_sw_max_time * 1000000) / + rte_get_tsc_hz(), (double)time_st.enq_tur_tot_time / + (double)iter, + (double)(time_st.enq_tur_tot_time * 1000000) / + (double)iter / (double)rte_get_tsc_hz(), + (double)time_st.enq_tur_min_time, + (double)(time_st.enq_tur_min_time * 1000000) / + rte_get_tsc_hz(), (double)time_st.enq_tur_max_time, + (double)(time_st.enq_tur_max_time * 1000000) / + rte_get_tsc_hz()); + + printf("\tdeq offload cost latency - one op:\n" + "\t\tavg %lg cycles, %lg us\n" + "\t\tmin %lg cycles, %lg us\n" + "\t\tmax %lg cycles, %lg us\n", + (double)time_st.deq_tot_time / (double)iter, + (double)(time_st.deq_tot_time * 1000000) / + (double)iter / (double)rte_get_tsc_hz(), + (double)time_st.deq_min_time, + (double)(time_st.deq_min_time * 1000000) / + rte_get_tsc_hz(), (double)time_st.deq_max_time, + (double)(time_st.deq_max_time * 1000000) / + rte_get_tsc_hz()); return TEST_SUCCESS; +#endif } +#ifdef RTE_BBDEV_OFFLOAD_COST static int offload_latency_empty_q_test_dec(uint16_t dev_id, uint16_t queue_id, const uint16_t num_to_process, uint16_t burst_sz, - uint64_t *deq_total_time) + uint64_t *deq_tot_time, uint64_t *deq_min_time, + uint64_t *deq_max_time) { int i, deq_total; struct rte_bbdev_dec_op *ops[MAX_BURST]; - uint64_t deq_start_time; + uint64_t deq_start_time, deq_last_time; /* Test deq offload latency from an empty queue */ - deq_start_time = rte_rdtsc_precise(); + for (i = 0, deq_total = 0; deq_total < num_to_process; ++i, deq_total += burst_sz) { + deq_start_time = rte_rdtsc_precise(); + if (unlikely(num_to_process - deq_total < burst_sz)) burst_sz = num_to_process - deq_total; rte_bbdev_dequeue_dec_ops(dev_id, queue_id, ops, burst_sz); + + deq_last_time = rte_rdtsc_precise() - deq_start_time; + *deq_max_time = RTE_MAX(*deq_max_time, deq_last_time); + *deq_min_time = RTE_MIN(*deq_min_time, deq_last_time); + *deq_tot_time += deq_last_time; } - *deq_total_time = rte_rdtsc_precise() - deq_start_time; return i; } @@ -1998,31 +2215,45 @@ offload_latency_empty_q_test_dec(uint16_t dev_id, uint16_t queue_id, static int offload_latency_empty_q_test_enc(uint16_t dev_id, uint16_t queue_id, const uint16_t num_to_process, uint16_t burst_sz, - uint64_t *deq_total_time) + uint64_t *deq_tot_time, uint64_t *deq_min_time, + uint64_t *deq_max_time) { int i, deq_total; struct rte_bbdev_enc_op *ops[MAX_BURST]; - uint64_t deq_start_time; + uint64_t deq_start_time, deq_last_time; /* Test deq offload latency from an empty queue */ - deq_start_time = rte_rdtsc_precise(); for (i = 0, deq_total = 0; deq_total < num_to_process; ++i, deq_total += burst_sz) { + deq_start_time = rte_rdtsc_precise(); + if (unlikely(num_to_process - deq_total < burst_sz)) burst_sz = num_to_process - deq_total; rte_bbdev_dequeue_enc_ops(dev_id, queue_id, ops, burst_sz); + + deq_last_time = rte_rdtsc_precise() - deq_start_time; + *deq_max_time = RTE_MAX(*deq_max_time, deq_last_time); + *deq_min_time = RTE_MIN(*deq_min_time, deq_last_time); + *deq_tot_time += deq_last_time; } - *deq_total_time = rte_rdtsc_precise() - deq_start_time; return i; } +#endif static int offload_latency_empty_q_test(struct active_device *ad, struct test_op_params *op_params) { +#ifndef RTE_BBDEV_OFFLOAD_COST + RTE_SET_USED(ad); + RTE_SET_USED(op_params); + printf("Offload latency empty dequeue test is disabled.\n"); + printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n"); + return TEST_SKIPPED; +#else int iter; - uint64_t deq_total_time = 0; + uint64_t deq_tot_time, deq_min_time, deq_max_time; uint16_t burst_sz = op_params->burst_sz; const uint16_t num_to_process = op_params->num_to_process; const enum rte_bbdev_op_type op_type = test_vector.op_type; @@ -2030,6 +2261,9 @@ offload_latency_empty_q_test(struct active_device *ad, struct rte_bbdev_info info; const char *op_type_str; + deq_tot_time = deq_max_time = 0; + deq_min_time = UINT64_MAX; + TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST), "BURST_SIZE should be <= %u", MAX_BURST); @@ -2044,20 +2278,29 @@ offload_latency_empty_q_test(struct active_device *ad, if (op_type == RTE_BBDEV_OP_TURBO_DEC) iter = offload_latency_empty_q_test_dec(ad->dev_id, queue_id, - num_to_process, burst_sz, &deq_total_time); + num_to_process, burst_sz, &deq_tot_time, + &deq_min_time, &deq_max_time); else iter = offload_latency_empty_q_test_enc(ad->dev_id, queue_id, - num_to_process, burst_sz, &deq_total_time); + num_to_process, burst_sz, &deq_tot_time, + &deq_min_time, &deq_max_time); if (iter <= 0) return TEST_FAILED; - printf("\tempty deq offload avg. latency: %lg cycles, %lg us\n", - (double)deq_total_time / (double)iter, - (double)(deq_total_time * 1000000) / (double)iter / - (double)rte_get_tsc_hz()); + printf("\tempty deq offload\n" + "\t\tavg. latency: %lg cycles, %lg us\n" + "\t\tmin. latency: %lg cycles, %lg us\n" + "\t\tmax. latency: %lg cycles, %lg us\n", + (double)deq_tot_time / (double)iter, + (double)(deq_tot_time * 1000000) / (double)iter / + (double)rte_get_tsc_hz(), (double)deq_min_time, + (double)(deq_min_time * 1000000) / rte_get_tsc_hz(), + (double)deq_max_time, (double)(deq_max_time * 1000000) / + rte_get_tsc_hz()); return TEST_SUCCESS; +#endif } static int @@ -2067,9 +2310,9 @@ throughput_tc(void) } static int -offload_latency_tc(void) +offload_cost_tc(void) { - return run_test_case(offload_latency_test); + return run_test_case(offload_cost_test); } static int @@ -2079,9 +2322,9 @@ offload_latency_empty_q_tc(void) } static int -operation_latency_tc(void) +latency_tc(void) { - return run_test_case(operation_latency_test); + return run_test_case(latency_test); } static int @@ -2105,7 +2348,7 @@ static struct unit_test_suite bbdev_validation_testsuite = { .setup = testsuite_setup, .teardown = testsuite_teardown, .unit_test_cases = { - TEST_CASE_ST(ut_setup, ut_teardown, operation_latency_tc), + TEST_CASE_ST(ut_setup, ut_teardown, latency_tc), TEST_CASES_END() /**< NULL terminate unit test array */ } }; @@ -2115,9 +2358,18 @@ static struct unit_test_suite bbdev_latency_testsuite = { .setup = testsuite_setup, .teardown = testsuite_teardown, .unit_test_cases = { - TEST_CASE_ST(ut_setup, ut_teardown, offload_latency_tc), + TEST_CASE_ST(ut_setup, ut_teardown, latency_tc), + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; + +static struct unit_test_suite bbdev_offload_cost_testsuite = { + .suite_name = "BBdev Offload Cost Tests", + .setup = testsuite_setup, + .teardown = testsuite_teardown, + .unit_test_cases = { + TEST_CASE_ST(ut_setup, ut_teardown, offload_cost_tc), TEST_CASE_ST(ut_setup, ut_teardown, offload_latency_empty_q_tc), - TEST_CASE_ST(ut_setup, ut_teardown, operation_latency_tc), TEST_CASES_END() /**< NULL terminate unit test array */ } }; @@ -2135,4 +2387,5 @@ static struct unit_test_suite bbdev_interrupt_testsuite = { REGISTER_TEST_COMMAND(throughput, bbdev_throughput_testsuite); REGISTER_TEST_COMMAND(validation, bbdev_validation_testsuite); REGISTER_TEST_COMMAND(latency, bbdev_latency_testsuite); +REGISTER_TEST_COMMAND(offload, bbdev_offload_cost_testsuite); REGISTER_TEST_COMMAND(interrupt, bbdev_interrupt_testsuite); diff --git a/app/test-bbdev/test_bbdev_vector.c b/app/test-bbdev/test_bbdev_vector.c index addef057..81b8ee78 100644 --- a/app/test-bbdev/test_bbdev_vector.c +++ b/app/test-bbdev/test_bbdev_vector.c @@ -144,6 +144,8 @@ op_decoder_flag_strtoul(char *token, uint32_t *op_flag_value) *op_flag_value = RTE_BBDEV_TURBO_MAP_DEC; else if (!strcmp(token, "RTE_BBDEV_TURBO_DEC_SCATTER_GATHER")) *op_flag_value = RTE_BBDEV_TURBO_DEC_SCATTER_GATHER; + else if (!strcmp(token, "RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP")) + *op_flag_value = RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP; else { printf("The given value is not a turbo decoder flag\n"); return -1; @@ -889,8 +891,7 @@ test_bbdev_vector_read(const char *filename, goto exit; } - memset(entry, 0, strlen(line) + 1); - strncpy(entry, line, strlen(line)); + strcpy(entry, line); /* check if entry ends with , or = */ if (entry[strlen(entry) - 1] == ',' @@ -912,7 +913,8 @@ test_bbdev_vector_read(const char *filename, } entry = entry_extended; - strncat(entry, line, strlen(line)); + /* entry has been allocated accordingly */ + strcpy(&entry[strlen(entry)], line); if (entry[strlen(entry) - 1] != ',') break; diff --git a/app/test-bbdev/test_vectors/bbdev_null.data b/app/test-bbdev/test_vectors/bbdev_null.data new file mode 100644 index 00000000..c9a9abe9 --- /dev/null +++ b/app/test-bbdev/test_vectors/bbdev_null.data @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_NONE \ No newline at end of file diff --git a/app/test-bbdev/test_vectors/bbdev_vector_null.data b/app/test-bbdev/test_vectors/bbdev_vector_null.data deleted file mode 100644 index c9a9abe9..00000000 --- a/app/test-bbdev/test_vectors/bbdev_vector_null.data +++ /dev/null @@ -1,5 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2017 Intel Corporation - -op_type = -RTE_BBDEV_OP_NONE \ No newline at end of file diff --git a/app/test-bbdev/test_vectors/bbdev_vector_td_default.data b/app/test-bbdev/test_vectors/bbdev_vector_td_default.data deleted file mode 100644 index b5c30278..00000000 --- a/app/test-bbdev/test_vectors/bbdev_vector_td_default.data +++ /dev/null @@ -1,54 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2017 Intel Corporation - -op_type = -RTE_BBDEV_OP_TURBO_DEC - -input0 = -0x7f007f00, 0x7f817f00, 0x767f8100, 0x817f8100, 0x81008100, 0x7f818100, 0x81817f00, 0x7f818100, -0x86007f00, 0x7f818100, 0x887f8100, 0x81815200, 0x81008100, 0x817f7f00, 0x7f7f8100, 0x9e817f00, -0x7f7f0000, 0xb97f0000, 0xa7810000, 0x7f7f4a7f, 0x7f810000, 0x7f7f7f7f, 0x81720000, 0x40658181, -0x84810000, 0x817f0000, 0x81810000, 0x7f818181, 0x7f810000, 0x81815a81, 0x817f0000, 0x7a867f7b, -0x817f0000, 0x6b7f0000, 0x7f810000, 0x81818181, 0x817f0000, 0x7f7f817f, 0x7f7f0000, 0xab7f4f7f, -0x817f0000, 0x817f6c00, 0x81810000, 0x817f8181, 0x7f810000, 0x81816981, 0x7f7f0000, 0x007f8181 - -hard_output0 = -0xa7d6732e, 0x61 - -soft_output0 = -0x7f7f7f7f, 0x81817f7f, 0x7f817f81, 0x817f7f81, 0x81817f81, 0x81817f81, 0x8181817f, 0x7f81817f, -0x7f81817f, 0x7f817f7f, 0x81817f7f, 0x817f8181, 0x81818181, 0x817f7f7f, 0x7f818181, 0x817f817f, -0x81818181, 0x81817f7f, 0x7f817f81, 0x7f81817f, 0x817f7f7f, 0x817f7f7f, 0x7f81817f, 0x817f817f, -0x81817f7f, 0x81817f7f, 0x81817f7f, 0x7f817f7f, 0x817f7f81, 0x7f7f8181, 0x81817f81, 0x817f7f7f, -0x7f7f8181 - -e = -17280 - -k = -40 - -rv_index = -1 - -iter_max = -8 - -iter_min = -4 - -expected_iter_count = -8 - -ext_scale = -15 - -num_maps = -0 - -op_flags = -RTE_BBDEV_TURBO_SOFT_OUTPUT, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE, RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN, -RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT - -expected_status = -OK diff --git a/app/test-bbdev/test_vectors/bbdev_vector_te_default.data b/app/test-bbdev/test_vectors/bbdev_vector_te_default.data deleted file mode 100644 index 883a76cf..00000000 --- a/app/test-bbdev/test_vectors/bbdev_vector_te_default.data +++ /dev/null @@ -1,33 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2017 Intel Corporation - -op_type = -RTE_BBDEV_OP_TURBO_ENC - -input0 = -0x11d2bcac, 0x4d - -output0 = -0xd2399179, 0x640eb999, 0x2cbaf577, 0xaf224ae2, 0x9d139927, 0xe6909b29, 0xa25b7f47, 0x2aa224ce, -0x79f2 - -e = -272 - -k = -40 - -ncb = -192 - -rv_index = -0 - -code_block_mode = -1 - -op_flags = -RTE_BBDEV_TURBO_RATE_MATCH - -expected_status = -OK diff --git a/app/test-bbdev/test_vectors/turbo_dec_c1_k40_r0_e17280_sbd_negllr.data b/app/test-bbdev/test_vectors/turbo_dec_c1_k40_r0_e17280_sbd_negllr.data new file mode 100644 index 00000000..d98210ff --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_dec_c1_k40_r0_e17280_sbd_negllr.data @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_DEC + +input0 = +0x7f007f00, 0x7f817f00, 0x767f8100, 0x817f8100, 0x81008100, 0x7f818100, 0x81817f00, 0x7f818100, +0x86007f00, 0x7f818100, 0x887f8100, 0x81815200, 0x81008100, 0x817f7f00, 0x7f7f8100, 0x9e817f00, +0x7f7f0000, 0xb97f0000, 0xa7810000, 0x7f7f4a7f, 0x7f810000, 0x7f7f7f7f, 0x81720000, 0x40658181, +0x84810000, 0x817f0000, 0x81810000, 0x7f818181, 0x7f810000, 0x81815a81, 0x817f0000, 0x7a867f7b, +0x817f0000, 0x6b7f0000, 0x7f810000, 0x81818181, 0x817f0000, 0x7f7f817f, 0x7f7f0000, 0xab7f4f7f, +0x817f0000, 0x817f6c00, 0x81810000, 0x817f8181, 0x7f810000, 0x81816981, 0x7f7f0000, 0x007f8181 + +hard_output0 = +0xa7d6732e, 0x61 + +soft_output0 = +0x7f7f7f7f, 0x81817f7f, 0x7f817f81, 0x817f7f81, 0x81817f81, 0x81817f81, 0x8181817f, 0x7f81817f, +0x7f81817f, 0x7f817f7f, 0x81817f7f, 0x817f8181, 0x81818181, 0x817f7f7f, 0x7f818181, 0x817f817f, +0x81818181, 0x81817f7f, 0x7f817f81, 0x7f81817f, 0x817f7f7f, 0x817f7f7f, 0x7f81817f, 0x817f817f, +0x81817f7f, 0x81817f7f, 0x81817f7f, 0x7f817f7f, 0x817f7f81, 0x7f7f8181, 0x81817f81, 0x817f7f7f, +0x7f7f8181 + +e = +17280 + +k = +40 + +rv_index = +1 + +iter_max = +8 + +iter_min = +4 + +expected_iter_count = +8 + +ext_scale = +15 + +num_maps = +0 + +code_block_mode = +1 + +op_flags = +RTE_BBDEV_TURBO_SOFT_OUTPUT, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE, +RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN, RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT + +expected_status = +OK diff --git a/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data new file mode 100644 index 00000000..3472c992 --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data @@ -0,0 +1,643 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_DEC + +input0 = +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0xCE000000, 0x2F00F6D9, 0xCC1AF942, 0x22F8F4E1, 0xBED8FAFF, 0x43DC19B5, 0x35E91CC0, 0x5B070D30, +0xF9DACDFD, 0x170121DA, 0x012AEF53, 0x39E7D90E, 0xD5FBEFDD, 0xCCF0FDC8, 0xD62B0D54, 0xEAE601BE, +0xE5001328, 0x0CF00DC7, 0x3EE0E4B8, 0xC2D8EB00, 0xBE1EEBF7, 0xCDF5E7E4, 0x4FDFF4FA, 0xF402DA2A, +0xFEFEE32B, 0xD62CDA55, 0x17DE0106, 0xF92411B5, 0x301B2142, 0xE50C09E3, 0x51030EDB, 0x48DB2A4C, +0x03E420BC, 0x1AE1DCF8, 0xB6EE0D3A, 0xC2FADF22, 0xBF211508, 0x3228E8AF, 0xE0170BEE, 0x26F8091F, +0xDBFA02D2, 0xC810FE19, 0xE4DE1005, 0x18FF0D29, 0xE723F04B, 0xFDF2F1E7, 0xCF05DBD3, 0x0412F616, +0x00DC234C, 0x1501EDD7, 0xD9EB4FED, 0x1C03BC2B, 0xE2FDF6D4, 0xE9EC10C4, 0x2426034D, 0xEDE33B45, +0x05DC2304, 0xEDE81411, 0x11E416BD, 0x03D1DAA8, 0x04F72431, 0x30F15718, 0xF3F33534, 0xED043AD3, +0x1CE8BC11, 0xEDE1EBF7, 0x242C0454, 0x0118D740, 0x07F3D01B, 0x0205D623, 0x15233E05, 0xF2EBE5ED, +0xED2D3BAC, 0xEBF83CD1, 0xFCF9D421, 0x0B0934E0, 0x0A09E1E0, 0x1AEB0EC4, 0xF8FEDF26, 0xFCE8D53F, +0x20E10846, 0xE327F5B1, 0x18E340F5, 0xF71ED0BB, 0x0EF5CBE3, 0xF4F11C36, 0x141BC343, 0xF4E7E5F1, +0x30205809, 0x10EAC713, 0x22DBB603, 0x250AFC1E, 0xE4F3BB1A, 0x2316FA12, 0xDEE105F7, 0x12FDC7D4, +0xFC212507, 0xDB290300, 0x1500ED00, 0x1EABBA27, 0xF14ECA2D, 0xF2D93625, 0x23F20502, 0xF0B9C8E6, +0xEAD83EE1, 0x0C331CFF, 0x23D205F5, 0x0EC61906, 0xF128C9EF, 0xEF3CC800, 0x0B9DCDED, 0xF3C2CC3C, +0x23EB04E9, 0x0B51CD13, 0xEADEC3D7, 0x222D06FB, 0xF0331904, 0x16C93F0C, 0xDAE5FE0F, 0x1F14B80C, +0x001B28ED, 0x3F3B67F3, 0xEEEDE9EC, 0x1CE94514, 0x1D4145EF, 0xD8BA5019, 0xFC06D41E, 0x17BDC0DF, +0x16D712E4, 0xE8041001, 0xEC2AEDDB, 0x11BE3803, 0xEBD2ECE5, 0xEF5A17F9, 0x2BFF52CD, 0xE958C126, +0xEE1A1531, 0x0EBF1BF2, 0x18BC401A, 0x2AC2521C, 0xEE4817EA, 0x07482E20, 0x141314E0, 0xF41DCD15, +0xDB10B40C, 0xC23700E9, 0x4221EB10, 0x4BE7E607, 0xEEF9DC0E, 0x62AC16DF, 0xE2E1C5D3, 0x27E4F70A, +0x3213020D, 0x4ABE0A14, 0xACC1DE1B, 0xD636D416, 0xDFAF020E, 0xCE230828, 0x2615F6FB, 0xEA1002EE, +0xD1351117, 0xB71C07F3, 0x03C9DF0C, 0x2B52DB0F, 0xD9AB02D7, 0xD20702D3, 0x48EEFB21, 0x3AE5E116, +0xE728EE0E, 0x33C50F00, 0x363EF5EC, 0xD4FBF217, 0xB00FFDDE, 0x2333281A, 0x3624040C, 0x02DB0E04, +0x3FDE27FD, 0x173716FB, 0xF20BEFF1, 0x3EF0191E, 0xB6B01719, 0x1BC7DD27, 0xE80AF3EE, 0x20BE101E, +0xF42EF8E6, 0x40531BF9, 0x4646172B, 0x44DA1E1D, 0xC7F5E503, 0xF3E8F0E4, 0xA4E2E5EF, 0x42B935F6, +0xEB1D1A1E, 0xE500ED0B, 0x2909F2CF, 0xCE0E0037, 0xD1320AA6, 0xEB01F9D9, 0x45E7ECC0, 0x4CFEE3D7, +0x0EE024B8, 0xD3F31B1A, 0xE846056E, 0xF522EFFB, 0x5F1AE443, 0xCAF93620, 0x2CE9F23E, 0xDDE604F2, +0x3BDDFC04, 0xC8EBEE13, 0xC11FF0F6, 0x18EBE9EC, 0xFAEEF1C7, 0xB6EFDDEA, 0xF8E123F7, 0x4EF4E1CC, +0x430C271C, 0x3BE01CB8, 0xABE813EF, 0xCAEED439, 0xC1FAF3D1, 0x4E2F1656, 0x11E4DB0C, 0xDAF816DF, +0x44E3FE0C, 0x2B1D1C0B, 0x3300FD28, 0xC320F4F9, 0x2A1DEB0A, 0xEA2502B4, 0x41061322, 0xC81518C2, +0x1F2510FD, 0xD1E1F7F7, 0xBD1906F1, 0xF3C9E55F, 0xB3D91A4F, 0x0D0D26CC, 0xBD071B20, 0x41141BC4, +0x2A1B19F2, 0x4902FDD5, 0xD6F9DE2E, 0x19190341, 0x09E80F11, 0x31FC1E2C, 0xEC1DF8F4, 0xCC0B14E2, +0x51DFF3B7, 0x1AF028C8, 0x0F0E0E37, 0x2B041A25, 0x1A03FD25, 0xC5030FD5, 0xBCFAED22, 0x9A1F1CB9, +0xCCE8C2F0, 0xEF1BF4BD, 0x21EC163C, 0xBAED07C6, 0x03F7E3D0, 0xC503DB2A, 0xC20C13E4, 0xD4F8EA30, +0x391B0544, 0x59F3EECB, 0xAD0A311D, 0xFE0C2C33, 0x250CDBE4, 0xC5E80341, 0x3B0914CE, 0xC0F2ED36, +0xC204182B, 0x3B0C16E5, 0xFFFF1429, 0xC316D8C3, 0xE408EB1F, 0x3713F4C4, 0xE4F2F01A, 0x0A280C01, +0xEC0FE318, 0xCB16153E, 0x5C04F2D4, 0x281F35F8, 0xE4D80051, 0x4E0A0CE3, 0xB107DB21, 0xD1DED9FB, +0x380AFACE, 0x50CC105C, 0xD9D80100, 0xF40ECCE6, 0xDADC03B4, 0x0AE5E243, 0x0C22E4FA, 0x08CD1F5A, +0xE2D1F7A9, 0x12CE39A5, 0xFFDDD7FB, 0x032C2BAC, 0xF1D9E700, 0x14003B28, 0xFC08D420, 0x08093131, +0xFEFFD7D7, 0xBB219348, 0x07DC2E4C, 0xDF0DB8CB, 0x22EB06EC, 0x0BE034F8, 0xF10FC819, 0x1623EEFA, +0x0D1DE6BA, 0xDA1AB142, 0x1C16F4EE, 0x27F500E2, 0x1137C85E, 0x1C0BF434, 0xF004C924, 0x02EBD9ED, +0x1BF943D2, 0xE7F80F30, 0xEFFE39D9, 0x2CFA5323, 0x04242DFB, 0xF420E347, 0x15F43C33, 0xE9F41134, +0x20EBB8C2, 0x0205D523, 0xECE03DB8, 0x10F3E935, 0x180BC11E, 0x12E2C7BA, 0xE70B4233, 0xEED8EAB0, +0x110E3936, 0xFEE327F5, 0x12E60017, 0xFE4026F3, 0xED26C518, 0xE5E9F2FF, 0x200C08EE, 0xEF1439E4, +0xE92010ED, 0xE1B80908, 0x18584021, 0x24FBFC31, 0x00AE2823, 0x040CDC2A, 0xDE34FBE5, 0x1439C30D, +0x0DB7E6EF, 0xE6090F21, 0xECAF3C20, 0x10DFE8D8, 0x0CC0CCF9, 0x15061318, 0x043723DD, 0xEA3A3DF0, +0xEC18C413, 0xFE2BD611, 0xE3BC45FE, 0x16FCC11D, 0x120E39DC, 0xEC00141B, 0x13F03B27, 0x24D2FC18, +0x0917E1FB, 0x113BEAEE, 0xE9FE11EE, 0x2C505427, 0x0D0B3529, 0x15E73E1E, 0x0FD91AF1, 0xDFF8B6FF, +0xF413CCE1, 0x1EEFB9EA, 0xDCD04C17, 0xCEC25AF8, 0x1AB2BE17, 0xD8BEAFDA, 0xE6CEBD19, 0xFCC42CF6, +0xDE19FA15, 0x250203F2, 0xA90042DB, 0x242A2F1A, 0x004D0401, 0x0B4C2825, 0xACB61DDD, 0x07422CDE, +0xCDFEDFE7, 0xAFF8F527, 0x35052820, 0x0B34F2DD, 0xD648E2F3, 0x3DDF02E0, 0xFE42EBF8, 0xC1F6251A, +0xBA0DE9E2, 0x442FE2E5, 0xD014E308, 0x2DB409EC, 0x50F8FADC, 0x264BD8E0, 0x2E0FFEDC, 0x1E2D051A, +0x0FD20AFB, 0xCE1CE7FA, 0x35CDF6F4, 0xFC49F3F4, 0x642FDCDF, 0x3625C507, 0xDF140FFC, 0xDB21F913, +0xC7D5FDF9, 0xCEE112FE, 0xD1D4F7F8, 0x5D410704, 0xD914CC19, 0xCCF800EC, 0xE3E00C1F, 0xCD05F408, +0xC952F4DD, 0x444EF02A, 0x1E15E526, 0xD73909ED, 0xFA1900EF, 0xFFB6210F, 0x06EB27DE, 0x0B5BDF13, +0x1B4F1D33, 0xCDF5F4D9, 0x00F40C1D, 0xC6031CDC, 0xF91C13F4, 0x061321C6, 0xBFDD2206, 0xEE13193C, +0xFF1C17BB, 0xA91E27F5, 0x25ECD13C, 0x0CF90321, 0xBA1FE5B8, 0x50151F3D, 0xE935285D, 0x330A10CE, +0x3EE6F542, 0xE80AEA1E, 0x45251103, 0xC014E3EC, 0x2D2C1854, 0x9F0E0536, 0x2E14C7C3, 0xECE50642, +0xE6E214F7, 0x1021F208, 0xBCDC19FB, 0x4B281BFF, 0x0CDFDEB7, 0x21E91CEE, 0xC22FF856, 0xA2F3EAE4, +0x5810CB18, 0xE416D03E, 0x25E3F40C, 0xF4180340, 0xFE0EE4C9, 0x433C2763, 0x38F11BE7, 0xE5FC11D4, +0x1E11F4C6, 0x491D0ABB, 0xC0E72140, 0xB226E84F, 0x122E26AB, 0xEAF116E7, 0xD5E4EE0B, 0xF4FB0323, +0xDCE61D42, 0x05F1FCE7, 0x0DDCDDB4, 0xF9E42000, 0x181610EE, 0xE1F246CA, 0xEDEDC53B, 0x14DEC5B6, +0x1E02F629, 0xDEE749F0, 0x1C02BDD7, 0x1EF3F5CB, 0x11E8C640, 0xFA272301, 0xF3FC352C, 0xFDD52553, +0x09191FF1, 0xDB11FEE9, 0xF7EB1FEE, 0xFFF9292E, 0x0A2032B8, 0x0C1BCDBC, 0xDDFFFBD9, 0x1BEABDEF, +0x19F84130, 0xECE5ECF3, 0x271302EB, 0xE7EB423D, 0x1E2146B7, 0x14E13CF7, 0x16FE112B, 0xEB211308, +0xDAD802B0, 0x0017D811, 0xFE26D901, 0x0D0ACBE2, 0xF2ED1A3B, 0xFBDED206, 0x18F0F1C8, 0xD1EFA9C6, +0xE5E1BDF7, 0x1514C314, 0xF9FCD1D3, 0x02EB2BED, 0x0EEB1BC2, 0x2427B5FF, 0x0B2233FA, 0xFE0FDBE8, +0xF7DDCE04, 0x03F42BE3, 0x0525D202, 0xF9B20021, 0xEF0FE926, 0xF505E318, 0xF7DF1F23, 0xEADF12F8, +0xF12C3807, 0xDF01FA04, 0x1D23F426, 0xF1231A05, 0x20F549FB, 0xE0F0F9E3, 0xDB67B418, 0xC6E5623F, +0x212AF90D, 0xE6C7BD02, 0x24CA4C11, 0xE249F6F1, 0xE8383F21, 0xFC212310, 0xF7561F07, 0x25C4032E, +0x070EDE14, 0x1437151A, 0xE93C4010, 0xF10CE615, 0xDB0D4D1C, 0xFAF6221B, 0xF5F3E21E, 0xE2470B1B, +0xE71BC01F, 0x07B0220C, 0xF311CAD8, 0xFF052816, 0x1DC20BDC, 0xE7BB40EB, 0xF5DEE21E, 0x013527FB, +0x0FF519F2, 0x1AE9BDE4, 0x2B0F53F0, 0x13C6C5E8, 0xF12636ED, 0x10BD38FE, 0xDD0606E4, 0xDA5C4FDE, +0x1329C5CC, 0x0DC8CA00, 0xD33956F0, 0x0700D711, 0x31B1DE02, 0x210C09D9, 0x0C2EF81B, 0xF5FFE406, +0x27C1E2DA, 0xCB1801E8, 0x38C70EF0, 0x0D1EF0EF, 0x142E1C0B, 0x0B00EB06, 0x5D441DD8, 0x1E4035E5, +0xFD3C0AE8, 0x3732DCEC, 0xC8B5F00A, 0xB9BF1023, 0x0AF22018, 0xC6DDE21A, 0x18EF1306, 0x1AC8F1E9, +0x1F020EF0, 0xEA0209DB, 0x41CFEEDA, 0x21E31AF8, 0x4BBD070A, 0xEECFDD1A, 0xD4DB15F8, 0x430FFC02, +0x15361A18, 0xCF17ECF1, 0xE42009EF, 0xC21CF4F8, 0x3DDFEAF3, 0x3E0916F9, 0xF028151F, 0x03CA1700, +0x983B26F1, 0xDE2AC112, 0x209FF9FD, 0x1935F7C6, 0xED18F1F3, 0xBFD0EA0F, 0xC5071908, 0x1F4E13DE, +0x17F00926, 0xF80DEE19, 0x4DF01FE6, 0xB7D2DB18, 0x0AF5E0F9, 0x4043E21D, 0x4AD517E5, 0xB0CFDE03, +0xFB22D90A, 0xDF5023F9, 0xDABDF828, 0x1911011A, 0xBDF30FE8, 0xCEAA1A1A, 0xB0D2F62D, 0x3AAC28F9, +0x42D6ED2D, 0xA83D1B02, 0x1831D1EA, 0xF11D10F7, 0x1D201AF5, 0x394F0C07, 0xB327F0D9, 0x3254DA01, +0xEBF40AD5, 0x26A413E4, 0x5113FECC, 0xD4F32915, 0xDDB3FD1A, 0xC0D2FB25, 0x02DEE7FB, 0xC33DDB05, +0x01021615, 0x0B1D27DA, 0xCBC2E2F6, 0x193CF3E9, 0x27E2F1EC, 0x5A1BFF0A, 0xE8CF320D, 0xBD4DF0F8, +0x0F291A26, 0x1817E7FF, 0xEFCB10F0, 0xD40CE9F3, 0x0606041B, 0xFABC2322, 0xE0E9DEE5, 0x1BD80710, +0xC015F301, 0x4718E8EE, 0x69D81E11, 0x19F6BF00, 0xF60F1E00, 0x3002E225, 0x29EA08C1, 0xF4EA00EF, +0x3910E5E7, 0xDB0B12E4, 0x2A3E0265, 0x3BE8FE11, 0xBBEFEEE9, 0x151BE2BE, 0x1E2314FB, 0x4C2DF754, +0xD5F823E0, 0x10200247, 0x01DB194D, 0x1B17D93F, 0x5806F42E, 0xFA19D040, 0x040BDF1D, 0xFCCB25A4, +0x39D9DC00, 0x2C0EEFE7, 0x20F3FDCB, 0xC2E70841, 0x3EEFEAE9, 0x32EDEAC5, 0xAC1BF70D, 0x0C15D4C3, +0x4CE7E5BF, 0x26F024E9, 0x061AFE0D, 0xDE13DEC5, 0x09DE0706, 0xFC0C1FE5, 0xD41CDC0B, 0xCC1DFBBB, +0xC6020CDB, 0xCD16133E, 0x3FF30CCC, 0x0B2D16AB, 0x3800E3D9, 0x3AFCF0DC, 0xE4FFED27, 0xFCF9F3E0, +0x05FF24D9, 0xE1F9DE2F, 0x391908F2, 0xC8F211CB, 0xEAEF003D, 0xDC1C040D, 0x25F74ECF, 0xE41F0CBA, +0xDF1708EE, 0xE016F83F, 0x1D124515, 0xF325344D, 0xE114F63C, 0x261FFEB9, 0x02F02739, 0xF12018F8, +0xD9124F3A, 0x1E1A45F3, 0x1504142C, 0xDDF506E3, 0xEAEAC113, 0xDE1AFB42, 0xF113C93B, 0x1721C207, +0xE3D4BB54, 0x070A2FCE, 0x0C0AE41D, 0xF90D3035, 0xE6DCF2B4, 0xFB15D4C3, 0xFE2425FC, 0x03002B28, +0xE50AF433, 0x07EC2FC4, 0x07E921EF, 0xFE09DAE1, 0xE21EF6B9, 0xF3E6360D, 0x17EE3F39, 0xE7FA0E22, +0xF5151DED, 0x1EF945DE, 0x0D1236EB, 0x0AFD32D6, 0x0928CFFF, 0xDCDAFD02, 0xFB272D00, 0xCE025B25, +0xEA0D3ECB, 0xE3E2BBF5, 0x06D32354, 0xF4DDCCB6, 0xE4002245, 0x213AB706, 0xECCCC3ED, 0x3CED9C0B, +0x15D23C14, 0x0BE7E3F9, 0x1FE70A0E, 0xF30D350F, 0x1928F01B, 0xD50AAE01, 0x120AC6E1, 0x0ED4C91E, +0xFFF7D804, 0x0D0CCA1F, 0x0433DBE4, 0xEA0DEFF5, 0x36FB5E1C, 0xEB11ECDC, 0x23C0FB17, 0xEBC63C17, +0x272800EF, 0xFC34D4FF, 0x0AB8E2F5, 0x01FB2AE0, 0x090630DD, 0xFE0AD522, 0x1B10BD1D, 0xDF520719, +0xF12436D5, 0x1D20BAFC, 0xE442BCF8, 0x05E0DDE6, 0x183FF1F8, 0xF816E118, 0xE3C9F4EE, 0xF2F51AF0, +0xFE5126E4, 0x1E31F7D7, 0xF9362F09, 0x15E43CF2, 0x0D3EE50C, 0xF74931EA, 0x23EEB621, 0x02B9DAE9, +0xDFE5B71F, 0xFD46D50D, 0x103E181F, 0xDF580717, 0x00C3EC31, 0x0CF714EC, 0x4ACCE41E, 0xF2E0DEF4, +0xB0431A08, 0xBD38D8E6, 0xEED6E510, 0x41B8EA02, 0xEC36E61F, 0xBEE9EBF3, 0xBE28E712, 0xF6051BFF, +0x2FE5E222, 0x2ADCFAF2, 0x3834FF04, 0x2AD1100D, 0xF4C502F8, 0x4DE7E414, 0x07A5DA10, 0xD1A92133, +0xB6ACFAD2, 0xCF51222C, 0xCD1BF8D7, 0x3F16F50E, 0xEEF4EA12, 0xA52F161D, 0xC4B3CE07, 0xB622EDDC, +0xDE2ADE06, 0x4FC8FB02, 0x6EDED9F1, 0xFC19B9F9, 0xD5B4DDF1, 0xDFC30423, 0xDFFBF9EA, 0x012C07DD, +0x12AF2805, 0x1A2EEAD6, 0xF8A1F207, 0xC0BA21CA, 0xFFDCE9E3, 0x36BFDA03, 0x05DD0E19, 0xF905DDFB, +0x32AEDF23, 0xFDFB09D5, 0x45D5DA23, 0x4B3E1CFD, 0x0905DDEA, 0xDFE2DE00, 0xB1FB07DD, 0x33EA2813, +0x02ECF5C3, 0xED0CD91B, 0x0D1BEABD, 0x4A17E5C0, 0xF707DFDF, 0xEE191F41, 0x19EC15C5, 0x4112F1EB, +0xD4F01AE9, 0xBF21FC06, 0x19DCE605, 0x2F0C10E4, 0x181FF947, 0xC00AEF1E, 0xF72EE8AB, 0x42F8E2E1, +0xA3EFE7C7, 0x31133514, 0x1EF608E2, 0x0DECF6EC, 0x0CECE5C5, 0x3DD31D54, 0xECE21545, 0xAE0CECE5, +0x49202B08, 0x0EFE2026, 0x13F41B35, 0x1B0BEB32, 0x1719F30F, 0xECDAF0FF, 0xECE9ECC0, 0xF212EC3B, +0x3DE8E6EF, 0x13F2EB1B, 0xB008EBE0, 0xBAE22845, 0x1A111DEA, 0xDBFC0EDD, 0x3F29FE50, 0xCDF817E0, +0x4B030CD5, 0xEADCDE05, 0xC31312C6, 0x272EEAA9, 0x340201DA, 0x17F400EF, 0xED1F1608, 0x24D24B55, +0xF4091DCE, 0xF51633C2, 0x03D32B56, 0x2325FA4D, 0xDEDA05FD, 0x01F72831, 0x04F12C19, 0x2716B1C3, +0x1B19F3C0, 0x10E2C909, 0x050C2233, 0x03F22519, 0x1C06F422, 0xE7D70EAF, 0x03D725AF, 0x13EC14EB, +0xF6F631E3, 0xF900DE28, 0x1FE2F7F6, 0xFBE9D2C1, 0xFBEFDDE9, 0xE110F7C8, 0x1C290C50, 0x27D9FFFF, +0x07E421BB, 0x3BE89CF0, 0xF50733D0, 0x2514B2EC, 0xF5EB1DED, 0x0619D241, 0x1FE8B840, 0xEEEA3B3E, +0xED1F1648, 0xFF15D73E, 0x0123D805, 0x0310D5E8, 0x010ED819, 0xECE53B43, 0x03E1D547, 0x01D9D6FF, +0xDFE3B745, 0x1C14443C, 0xDF0908E1, 0x03FE2ADA, 0x1306C5D2, 0xDD001004, 0x1F3AF818, 0x1A3BF3ED, +0x1EBC47EE, 0xF83E1F1B, 0xE21445EA, 0x2101B713, 0xE81AF126, 0x1930BFF3, 0x1533C408, 0xEDBFC60C, +0x060F2EE8, 0x06BDD11A, 0x04F524E4, 0xD94C001E, 0x0334DCDD, 0x06F1D30C, 0xFBDBD219, 0x0EBE1A03, +0x08D230E5, 0x250B4DFA, 0x0903311C, 0x14233C25, 0x1ABAF205, 0x30C5581D, 0x02FAD712, 0xDF2108DE, +0x02DDDB07, 0x1D47F606, 0x0EEF191F, 0x1B3AF2E9, 0xF9E52E12, 0xF0B1C8F4, 0x290CAF28, 0x0013D8E5, +0xE1B50AEC, 0x1A224322, 0xFDE8DB06, 0xF8A531EF, 0x0DD1CCCD, 0xE81AC007, 0x201048F3, 0x1800F1E8, +0x000BD8D9, 0xEC2A13E2, 0xF3EE3502, 0x282F0117, 0x08F8E006, 0xFE542620, 0x19E9C0D3, 0xDD140412, +0x1119C6EB, 0xF6C2E10E, 0x1EC50BEB, 0x15BD13ED, 0x143DC51B, 0xE5F2BE14, 0xF552E41A, 0xE10FF62A, +0x2A0B5218, 0xE33A0CE2, 0x02D7D612, 0xE1A7B800, 0xD8C10031, 0x0810E0E9, 0xD93AB0E8, 0xF8113011, +0x15B73C17, 0xFE46D520, 0xF6C51EE3, 0xD51A52ED, 0x1A0ABD0E, 0xFBF8DEE2, 0xE245BA20, 0x2225FB1E, +0xEC3A3CFD, 0x19340EEE, 0x08FE20F4, 0x1F154727, 0x05F8D213, 0x16F2EEE1, 0x22B6FA1A, 0xF404CCDF, +0xE22E4624, 0xFB3FDDFB, 0xE6F7BF17, 0xF0B6E71F, 0xF013E723, 0x17081114, 0x24EBFCE0, 0xFBE52DEC, +0xE9C310F3, 0x26F04E15, 0xF145E8E8, 0x122B17E2, 0x0D40CCFD, 0x43CA96E8, 0x421D0F00, 0x27DCE70B, +0x233B00FC, 0xCC44FAEE, 0xE0BC0BE3, 0x1BE7071C, 0x01C7F20F, 0xB29B28F0, 0xFB02DBC4, 0x32092326, +0xE648F5E2, 0x4707F2DF, 0xDF36E2E0, 0x1CC308F2, 0xBBA50DEB, 0x1DE71D34, 0xCE36F5F1, 0xCDF90AF2, +0x0FDE0CE0, 0xCF0C1906, 0xE0E509E4, 0xBC37F9F3, 0xDA06E510, 0x1FD3FDDF, 0xDB09F705, 0xF445031E, +0x24311CE2, 0x55EEFDF8, 0x3CEDD216, 0xAFF01516, 0xCA3D29E8, 0xCCAEF1EA, 0xE0F60CD5, 0x4CECF7E2, +0xDCD5DBEB, 0xAEC2FC02, 0x4825D7EA, 0x252D2004, 0x24F604FB, 0xD9D7051F, 0x1AF201FF, 0x3DE70EE6, +0x43BCEC0F, 0x43D8E5E4, 0x20EB1BFF, 0x2AAFF814, 0x021A02D8, 0xD80C26F2, 0xDB00001C, 0x2BF2FCE6, +0x181F0347, 0xAFE8F0F1, 0xE31D2A0B, 0xD015F4C2, 0xEF02072B, 0x361717EF, 0xFA25F2B3, 0xEEFADE22, +0xEE21EBF8, 0xD6F5EA1D, 0xBF160212, 0x03E6190F, 0x452126B7, 0x32D91DFF, 0x2D000A28, 0x25F205CA, +0xDEED043B, 0x36DCFA04, 0x13EAF112, 0x141915F1, 0xF2F0EC38, 0xBC20E6F8, 0x0BF2E519, 0xD0F11D19, +0xC7F6F932, 0xBFE2EFB9, 0xA4F919DF, 0xFFEACCEE, 0x3BDBDAFC, 0x2B1E1245, 0x5CEBFC14, 0xDB10CD18, +0xB718FE40, 0x0BE92012, 0x2414E2C5, 0x3B09FCE0, 0x17DF1306, 0x212F12A9, 0xC71407C4, 0x0901EFD7, +0x22EAE23F, 0xE7E805C1, 0xC6F21037, 0x4A111218, 0x1AEC22EC, 0x27E30EF6, 0x0C000134, 0x1BF10D37, +0xFD1F2A09, 0x1AEFF3E9, 0x160F3EC8, 0xEC173B3F, 0xE8F711E1, 0x050A22CD, 0x15263DB1, 0x07012127, +0x1D22BBB7, 0x34E3A5BB, 0x19210FB8, 0x17D41254, 0xD822B1FA, 0xE6DE0E06, 0x1519ED40, 0xDDE54CBE, +0x1EFC0BD4, 0xFE0D2A35, 0xE8EF40C7, 0x09E5E1F3, 0xF60631DE, 0xD6EDADC5, 0xE61A0FBE, 0x091C30BC, +0x0514DE3C, 0x1D1A45BF, 0x03E0DC08, 0xF4FE1C25, 0x1D260BFF, 0xF6FD3126, 0x1013C8EA, 0xFE18DB3F, +0xDF114916, 0x1806BF2D, 0x0E0535DC, 0xE5ED0DEB, 0x1EE445F5, 0x16DC12B4, 0x23FA4B2F, 0xFFD6DAAF, +0xECF714E2, 0x273D0066, 0x1B15F313, 0x1D0CF533, 0x17003E28, 0x1FD3F654, 0x00EFB117, 0x1C13BBD9, +0xF5C21D15, 0xF1CF3715, 0x244A0309, 0x09321FDD, 0xEFF81609, 0xE1ED091F, 0xFDFA2A16, 0xF742D022, +0xDCECB51B, 0x16FAEE14, 0x140C3C21, 0xEA0AED1B, 0x24E94CE2, 0x17E03F12, 0x1E01BAF9, 0xE82E4128, +0xEE311506, 0xF7FEE1F6, 0x0661DFD9, 0xDFB1FAC8, 0xFF282928, 0xEA3412FF, 0x104FC80C, 0x00ABD927, +0xEE32EA2D, 0x153B3D09, 0xFCB82DEC, 0xE9E8C021, 0x09BECEF0, 0xFCCA2D19, 0xDF09F90F, 0x013F2AE1, +0xFC44D4E9, 0xFD24D6E5, 0x1C0CF3FB, 0x2601021C, 0x1AB6BED9, 0x04B7DCDD, 0x102BC8E0, 0x09331EFC, +0xE8ACC1F5, 0x222E4BD4, 0xF715E1FA, 0x23484BED, 0xEB3913E0, 0xDFE4B711, 0x0C411B0D, 0x18FEE800, +0x42231127, 0x493B19FC, 0x39BCDE12, 0xFB46121B, 0x452DDDE2, 0xEA0DE3FB, 0x2109EF1A, 0x1B3CF81E, +0xE228F2EC, 0x48ADF6FF, 0xC2C0202C, 0x9F3FEB19, 0x192BC8E9, 0x2B0F0F04, 0x4F2DFD19, 0x1045D9FB, +0x0B3FE8E2, 0xF2FBE316, 0xBC0EE623, 0xB301E419, 0x1F2226D9, 0xBE0009FA, 0x27121A27, 0xF95002EA, +0x4714DED8, 0xBAE6E1ED, 0xC5191EF2, 0x3B2E14F1, 0xFB0FEC06, 0xDAB123E7, 0x3DDBFFD9, 0xC318ECFD, +0xDFD014EF, 0xB23D0608, 0x11E326EB, 0xC4CAE90B, 0xD847EC0D, 0xDFC3FFE2, 0x55BD0815, 0x17F9D3E5, +0x3EE91121, 0xF9C41712, 0x22AFE0ED, 0xCDE7FBD7, 0x1BC9F5F0, 0x0422F4F0, 0x200EDC06, 0xC3F700E6, +0x3B20EBF7, 0xD115EDEE, 0xC3ED08EA, 0x5AFEEC29, 0xC21F3247, 0x3315163E, 0xE330F659, 0xEAF5F5CC, +0x1716113E, 0x4CE611BF, 0xD517DBC2, 0xB2EF043A, 0x45DD25B5, 0xC0151E13, 0x131CE843, 0xFFE4EB44, +0x5FFB27DC, 0x381336EB, 0xB8EFF017, 0xECF720CF, 0x200B131D, 0x0622F805, 0xBA07DD30, 0xD9061FDE, +0x30EA013D, 0x40300858, 0x9AD917B2, 0x270FC1C9, 0x1209FFCF, 0x201716EE, 0x2A050723, 0x1BEF0239, +0xBF18F310, 0xA83719A0, 0xA9DDD105, 0xE70F3037, 0xDE0D0E1B, 0xE1D4F955, 0x0CF00AC8, 0xE80FE4C9, +0x1BE2100A, 0x40FB0EDC, 0xDD1A17BD, 0x4E1604C2, 0x5CE7270F, 0x2BF635E3, 0x00D80350, 0xFD0027DB, +0xDD22B54A, 0xEAE11247, 0xF705CE22, 0x01F2D736, 0x1D17443E, 0x1D0145D8, 0x18181111, 0x04FEDCDA, +0x01EAD7C3, 0x05DC23B5, 0x1FCFB959, 0x1A0A431E, 0xFCDC2CFC, 0xD61452EC, 0xECF23DCB, 0x31D6A652, +0x261BFE0D, 0x0CD73552, 0xEAFB11D3, 0x10291952, 0xF7F41F34, 0xEC1E3CF6, 0x180A41E2, 0x03FBDB23, +0x2026F9B3, 0x0C02E42A, 0x08F030C9, 0x0E19E70E, 0x0F1519C4, 0xDCE703F2, 0x100CC833, 0x0105272C, +0x01252903, 0x0DCCCB5C, 0xD21AAA0E, 0x2917AFC0, 0x1BEA0E3F, 0x2A1B52BE, 0xF2E8CAC0, 0x1DDDBB4C, +0x0215D73D, 0x0DF7E5E1, 0x08E230F5, 0x0EDFCA4A, 0x19FE0ED9, 0xE6EC42C4, 0xFB1D23F4, 0x22090632, +0xEA073F21, 0x06ED2115, 0x180BBFCC, 0xE6E20EF6, 0xE512F23A, 0xE21BF7F4, 0xF9F22E37, 0xF82B2F53, +0x1CF94421, 0x16D8C2B1, 0xD9F4FF1C, 0x23EC4B14, 0xF5F03339, 0xFE172B40, 0x1C2F0D57, 0x230A0632, +0xECDA3CFD, 0x1306C52D, 0x01D3D856, 0x1D2846FF, 0xF9D82150, 0x1B0CF31D, 0xEE03C626, 0x1AFFBED8, +0xD605AEDD, 0x2403FD25, 0x01EC28C3, 0xF1E1C8F7, 0x06E7D140, 0x09CF325A, 0xF2151AC3, 0xE7F8F120, +0x0A2BE353, 0x23E5FBBD, 0x1C1645C1, 0xF716D011, 0xF6ED1D15, 0xFB2123B7, 0xE2070A2E, 0x16F7C2E0, +0xE6100D18, 0xFB19D440, 0x2FF15619, 0x1F0FB938, 0x0101272A, 0xF8F42F35, 0xF3DFE649, 0x21E549F4, +0x0D430000, 0x400EE51B, 0x452E181A, 0xF1411DFB, 0xF4DC1919, 0xF247E3FC, 0x33141AE1, 0xF81CF515, +0x3115200D, 0x3FCFF713, 0xE9DA17F6, 0x0BF31102, 0x17D1E3E5, 0x0E9712FA, 0x2EAFE6C0, 0x5900F928, +0x50D8CE27, 0xCDACD9FF, 0x41BB0B2B, 0x2AC9181E, 0x13E1020F, 0x0708EC08, 0x09D220E1, 0x1EC0E206, +0x1F2BF6E9, 0xD23809FD, 0x46D3FBF0, 0xF617E2FB, 0xF4F61EEF, 0x31D71BE2, 0x1AD4F7FE, 0xB534F103, +0xDE15DD0C, 0xC7FBFA12, 0x1931F0DC, 0x4AD8F1F7, 0x4937DEFF, 0xF908210F, 0x35D121E0, 0xECCC0EFA, +0x34CBEDF5, 0x062A0CF3, 0xE4B12103, 0xD73DF427, 0xDCE8FE15, 0xB4FC04F0, 0x0B06DC23, 0x16391D22, +0x23051310, 0xE6FCFADC, 0x1C95F2DB, 0xE3DE0DBD, 0xDB270A05, 0xB4EEFC00, 0x1845DD16, 0x0149F11D, +0xC6D52621, 0x1E34EE03, 0x17D20AF4, 0xA10611FA, 0x20F937DE, 0x5D17F8DF, 0xA00435EF, 0x3ECEC7DC, +0xF9D8EB0A, 0xFC332200, 0x1DE9DBF5, 0x2E580CEE, 0x3E1AF9D1, 0xC64715F2, 0xD7CB121F, 0xEFF400F2, +0x084CEAE4, 0x064DE0DD, 0x2B10DE25, 0xC1FB0218, 0xD4F8EADE, 0x32D6FBE1, 0x0F65F602, 0xEC32E83C, +0xD2F613F5, 0xC526F9E1, 0xF0F51302, 0xF5B017E3, 0xFA0FE4D7, 0xFACBDFE8, 0xC0F222F2, 0xD630E81A, +0xD6DEFEF8, 0x33A602F9, 0xC826F633, 0x10E51002, 0x0308E9F3, 0xE011251F, 0xBAEAF8EA, 0x26F21E12, +0x0000021A, 0xE00E08CB, 0xE0ED0715, 0xE71CC0F4, 0x0A1ECE0A, 0xDB260303, 0xE7E2BEF5, 0xDDE705F0, +0xDA0BFEE3, 0xF6D83250, 0xF4FA1CD3, 0x2805FFDC, 0x1DDBBAB4, 0x1611C218, 0x18E13F09, 0x1C1D0DF5, +0xDC25B403, 0x2BF6AE32, 0x180F4136, 0x39FC61D3, 0xF3DE1BFA, 0x1AD94100, 0xE8F8BFDF, 0x0826E0FE, +0x1AE142B8, 0xF80F20C8, 0x0116D9EE, 0x22E84BF1, 0xEB22C3B6, 0xEE01EBD7, 0x0EE8CA10, 0xD8FA00D1, +0xEB073DD1, 0x1EE3BBBB, 0x12EFC639, 0x3708A121, 0xDFDB49B3, 0x28D85000, 0x0C003429, 0x000D28CA, +0x12D1C6A9, 0x0D061B2D, 0xEBDCC304, 0xEF05C8D4, 0xDACD02A5, 0xF3F7CAE2, 0xE5144313, 0xFC222B4A, +0x23F4B51C, 0x18270FFE, 0xEDDC3CB4, 0xE9213F49, 0x1EF309CB, 0xE3E5BB44, 0x0AF6E333, 0xE1EF473A, +0x080DD036, 0x0FEB38EE, 0xFE0629DE, 0xDAFCB224, 0x0DF9CB21, 0x07D3DFAC, 0x06F822D0, 0x1E1ABAF1, +0x00F32835, 0xE00AF71E, 0x08E2D009, 0xE003F9D5, 0xD5F2AECA, 0xF4EFCCEA, 0x0B0E33E5, 0x0D271A00, +0x1232EB59, 0x07DBDFB4, 0xD2ECAAED, 0x17FC40D4, 0xDAEB0313, 0x07E8E010, 0x07F5201E, 0x13173CC1, +0xDBDCFDFD, 0x24ECFBC3, 0xF2D9E6FE, 0x420695DE, 0xFC172C11, 0xDF0F06C9, 0xF413CC3C, 0x1B11F217, +0xE01148C7, 0x1EEEBA3B, 0xDDDCFCB3, 0x24E1B4F6, 0xF4DA3303, 0xF1FC192C, 0xEAFC11D4, 0xEAD41354, +0x22ED06EC, 0x23090000, 0x0BE7FBE1, 0xE0BFE4F0, 0xF2C4F818, 0xE5BEE6EC, 0xDD3E0DE7, 0xEB360516, +0xFBC6ECF2, 0xFCEC23EE, 0x0A20DD13, 0x12211FF8, 0xF23A1706, 0xCC36E6EF, 0x1C26F5F2, 0x2F05F303, +0xF540FA23, 0xFE4FE3E9, 0x23CED927, 0x18C2FA0A, 0xD1A411EA, 0x1A4306CB, 0x474E0EE6, 0x21331FDB, +0x2C33070B, 0xC60804F6, 0x515A12DF, 0xE8FDD7CE, 0xD7171126, 0x1043FF11, 0x48F8E81C, 0xCF48E021, +0xF9CA09E0, 0x2B26210F, 0x1F10FD02, 0xC9ECF8E7, 0xF9D30FED, 0x33222205, 0xC44E0A05, 0x41381425, +0xCEC21811, 0xCFFFF5EA, 0xC0E7F727, 0xF5481810, 0xE247E3E1, 0x11150AE0, 0x40C116EE, 0xFDEC18E9, +0xCF0F2415, 0x46E5F6E6, 0x430D1DF3, 0x11EEE61A, 0xA45017EA, 0xF1BBCB28, 0x35DC19E3, 0xECBC0CFB, +0x3631ED1D, 0x202C0EF8, 0xE7D2F8FC, 0x441910FA, 0xBED4E4F2, 0xCE3DE6FD, 0xD8CB0B15, 0xDE3C01F2, +0xD7F1F914, 0xFDCE00E7, 0xBDEF240A, 0x2CC5E616, 0x1C16FCED, 0xE805F412, 0xE4F11023, 0x14110DE8, +0xE2ED1418, 0xD7F7F6EC, 0xF8E9001E, 0x18C3E111, 0xAA4BF1EA, 0x2BB02DDE, 0xEB18FDD9, 0xEECFED11, +0xAEF8E9F7, 0xB0D129E0, 0x1952D9FA, 0x07D40ED6, 0xB13EDF03, 0xC74C28E9, 0xD7DB12DC, 0x0B11FFFD, +0xCEFC1D17, 0xC34CF623, 0x4A011523, 0x0410DDDA, 0xF2072317, 0x33101B21, 0x35F7F4E7, 0xF2CBF3E0, +0xAACDE6F4, 0x00002F0B, 0xE51344C5, 0x1211E938, 0x091EE1F6, 0x26CD02A4, 0xE028F84F, 0x2522B206, +0x1BFDF3D5, 0x0C23CC06, 0xE809C031, 0x19D30F54, 0x1B0DBD1C, 0x2EE256F5, 0xE11A080E, 0x17DF11B8, +0x16E4C20C, 0xEECA3B5F, 0x05EA2D11, 0x140AC432, 0xE0F6481D, 0xE024094B, 0x16F6121D, 0x14083C20, +0x200A481F, 0xE1F308E5, 0xFB0623D1, 0xDE1EF946, 0xEE211606, 0x15EC3D13, 0xF402E4DA, 0xF1D8E700, +0xE6D0F358, 0xEEF9C621, 0xE3F3BB36, 0x0A171E3F, 0x0D0ACBE1, 0x09071F21, 0x04DDDDB5, 0x051422C4, +0x07F2211B, 0xFD1BD5F3, 0xEFFE3926, 0x261602C1, 0xF40F3438, 0xEE07EB21, 0xF4DA1CB2, 0x241EFCBA, +0x2600B2D8, 0x11DC174C, 0x1A0542D4, 0xDF1349C6, 0x19E10F47, 0x09131F16, 0x010A27E2, 0x0A12E2C5, +0x01E229F5, 0xE3090AE1, 0xDEFEB629, 0x150E1319, 0x221D050B, 0xE3F4F6CD, 0x21F6071D, 0x23FC4C24, +0x1312C6EA, 0x12F01738, 0xEFFF3AD7, 0x1FFD48D5, 0x1C0B0CE4, 0xF5F71C30, 0x03F725E0, 0x0D38349F, +0xEB0614DE, 0xFB03DCD5, 0xEAC9EDA2, 0xDEF8B6E1, 0xDD0B0532, 0x17ECC2C3, 0x27E1FEB9, 0xEF14EA14, +0xF9FC2F2C, 0x160AC232, 0x1C0445DD, 0xE92311FB, 0x06162F3D, 0x03F12538, 0x0AFE1E26, 0x06E72141, +0xE1E6F841, 0x3221A5B7, 0x00E6D70D, 0xF8F8E030, 0x24FF4CD8, 0xFB02DCDB, 0x162313FB, 0xEAF1C2CA, +0x190CBFCC, 0xEFF4E834, 0x60370000, 0x25C139F2, 0xF731FCEA, 0xF734E009, 0xF10AE00B, 0xD2C81AE2, +0x26590710, 0x1ACE0231, 0xF41D0EF5, 0xD62BE4F6, 0xEF340303, 0x0AB9EA0C, 0xBA251EE1, 0xCE6BE204, +0xC1530A42, 0xC411E82A, 0xD4CD15E9, 0x35D4040C, 0xBEBE0C04, 0x01D4E7E5, 0xE9CAD9FD, 0x58F1EFF2, +0xAC46D1E6, 0x21DED4E2, 0x2C240706, 0xE0330304, 0x4931F80B, 0xF44C2109, 0xB74A1DDB, 0x2AC320DE, +0xBC2D02EC, 0x07EEE4FB, 0x2DCB20E9, 0xC910FBF4, 0xAF52F217, 0xD720D72B, 0xDBCC02F8, 0x1342FDF3, +0x26B315E7, 0x3D450325, 0xC7C0151E, 0xE9471219, 0x42E4111F, 0x4AA3E60B, 0x5537DF36, 0xB4E92E0F, +0xDCFEDC10, 0x18F1FCDA, 0xBFFA10E7, 0xD706E721, 0x2BD20022, 0x16500305, 0x4CC2EF29, 0xD71F2417, +0x20120009, 0xE7EC08EA, 0xE520F1EC, 0x42C8F3F8, 0xF23A19F0, 0x34111BEE, 0xCBF00DE8, 0x02F50DE8, +0xC3F0DA1C, 0x67CC1518, 0x09463FF4, 0xFCF7E11D, 0x01F6DC20, 0x2F1D27E1, 0x2816F9F4, 0xAA1800EE, +0x2E592DF0, 0xF4DC07CE, 0xCF49E504, 0xD33BF6E0, 0x17040514, 0xC445EE25, 0x4C33EB1D, 0xBB39250C, +0x4BAFE310, 0x58F8222A, 0xDD34CF1F, 0x4CCBFBF4, 0x32FDDC0D, 0xCF230ADC, 0x2D13F7FC, 0x49D6FA15, +0x09E2E002, 0x4425E20A, 0x170D1C02, 0x47DDF0E4, 0x2A441F05, 0xC0B502E4, 0x40B71823, 0xCFEE19DF, +0xD34A0917, 0xE7370521, 0x0000F1F1, 0x1CFDBD25, 0x1B080D30, 0xC61F61F7, 0x05122C3A, 0xDEF606CF, +0xF805D023, 0xD91D50F6, 0x1E0D0AE6, 0xE111F7E9, 0x1B1F42F7, 0xE9221106, 0xF7101EC8, 0x061C2FBD, +0xE8E711F2, 0xFB0E2335, 0x0DFDE6D5, 0x10E7E742, 0xF1FFE62A, 0x04F424CC, 0xF82B1F54, 0x10F8E821, +0xF40B1C33, 0xE2F3BA1B, 0xFBFBD32C, 0xF6E531BD, 0x12F7171E, 0x13073BDF, 0xDDEFFAC8, 0xDC1DFB0B, +0x0D00E4D9, 0x120AC7E2, 0xFEE1D546, 0xD9EFFEEA, 0x1EF0F737, 0x021225E9, 0xD4DE54FA, 0xF3FC1B2D, +0xF80DD1CC, 0x1EE1470A, 0x01E6D7F2, 0xE1E6F70D, 0xF9F0DEE9, 0xFB14D2C5, 0xE603F2D5, 0xD82750FF, +0x011AD941, 0xE2040A23, 0xE425BD4C, 0xFC28DC01, 0x22D94BFF, 0x0AE6E242, 0x16EDC2C5, 0xD90B011C, +0x050AD232, 0xDB114E18, 0xF2EB3512, 0x1AE642F3, 0xCFF1A7E7, 0xF220C9F8, 0x171FC148, 0x0735225D, +0xF2FF1926, 0x14D914FF, 0x0DFC36DC, 0x191BF00E, 0xE50CF3CB, 0x07F5221C, 0x2305FAD4, 0x1FD9094F, +0x25FFB4D6, 0x170FC1CA, 0x0FF3C91A, 0x02E82BF0, 0xDFEA07ED, 0xECE9143F, 0xE2E44645, 0xF2F6E71E, +0x26FAFE21, 0xFD17DB3F, 0xF0F6181E, 0xF818D0F0, 0xDD24B5B5, 0x00E8D8BF, 0xFE08DA20, 0x1104C6DC, +0x22FCB5D4, 0xE5FFF3D7, 0xE4CEF5A6, 0xF1EBE6EC, 0xEC06EBD3, 0xF5E91DC1, 0xE70540D4, 0xFC1A2D0E, +0x08E7E10F, 0xFAFE22D9, 0x06F82130, 0x09E80000, 0xF4F51F10, 0x49EE1CE2, 0xEEFADF17, 0x44ECE9DE, +0x0ECEE415, 0x354C1AF6, 0xCC020DDC, 0x50100C25, 0x1F362818, 0xCCB6090D, 0xE5C30C21, 0xFDDA0DEC, +0x350DDA01, 0x131A0C1A, 0x1AAB15F2, 0x2B590DD2, 0xC4A80331, 0xC517EC30, 0xCED3EEEF, 0xCC44F505, +0xF40DF5E3, 0xE2341BE5, 0x20C50AF3, 0xF9D308ED, 0xE345DF06, 0x3DF10CE4, 0x203D15E6, 0x1F4BF8EB, +0x1D37F8DD, 0xFEDC0BF1, 0xC14726FC, 0x0EDD1820, 0xCC221A05, 0xC1CB0D06, 0xC1CAE90C, 0x45EC180F, +0x9A3AE3EB, 0x15D23EED, 0xABCDEE06, 0x5EB52DF4, 0x19D33523, 0x0C41F1FB, 0xE8F8E4E8, 0xCD24F01F, +0x1239F604, 0xE6EA1611, 0xF23E0E11, 0x14F01A16, 0x450F14E8, 0x44BD1DE8, 0x3DE3E41C, 0x13E316F5, +0xEABEECF6, 0xEE00121B, 0x3AEEEB28, 0xE33AED15, 0x229AF5ED, 0xBF26FAC1, 0xE7351A02, 0xA6D80FF3, +0x5303CEFF, 0xCF082ADB, 0xB7BFF6E1, 0x3521E0E6, 0x07120DFA, 0x25E9E0EA, 0xB423FDEE, 0x0E012305, +0x1DF21B28, 0xCE1AF6E6, 0x2AF50AF2, 0xF80602E4, 0x0AB71FDF, 0x1F2A1E21, 0x0316F8FE, 0x0F4F2512, +0xEBCE1A27, 0xC0B3EC0A, 0x23E818DA, 0xEB0AFBEF, 0x164F13E3, 0xCDE61326, 0xC4DF0BF3, 0xBE3C14FA, +0x51E8E714, 0x10F2D710, 0xD3DB191B, 0xEA1F06FD, 0x344311F7, 0x21D60C1A, 0x301207FE, 0x2FF3F9EA, +0xEED1F9E4, 0xD335EAF9, 0xFA0505F3, 0x4B62DDDD, 0xD130DC3A, 0xD1D60708, 0x21D508FD, 0x4112FAFE, +0xEE431816, 0xAE0BEAE4, 0x03E0D51D, 0xD0AEDC07, 0x092BF72A, 0x07C92003, 0x20BDDEF1, 0xDC45F9E5, +0xCC1905E4, 0x0AE50CF2, 0x53D41E0D, 0xC928D505, 0x48B90F00, 0xF9CCDF1F, 0x3D4D21F4, 0xCB0B15DB, +0xF0050D1D, 0xE1F2E8DE, 0x36E5091A, 0x3EF20E0C, 0xD8E6161A, 0x241D00F3, 0xC1E7FD0C, 0xCFB8EA0F, +0xE33CF6DF, 0xF1AA0CEB, 0x45F11A2E, 0x3BFDE3E7, 0x28971425, 0x2DDC00BF, 0xE9DFFC03, 0x05B6EE07, +0xF1DE23DD, 0xB6E01807, 0x1F31DDF7, 0xEB390A0A, 0xD44B13F0, 0x11E2FC22, 0x5021E80A, 0x072E28FA, +0xEEBBDF07, 0xEC30E9E3, 0xD2F2EDF8, 0x3AE8061B, 0x4CB6EF10, 0xCBD2DC23, 0x302F0D07, 0xB73308FA, +0xAFE821F6, 0xD63FD6F0, 0x4CF20216, 0xBFEC23E6, 0x17DB19ED, 0xC9E8EF03, 0x3A34F10F, 0x404711F3, +0x1814181E, 0xEF17F1EB, 0x551F17EE, 0xD7D62E09, 0xF519FFFD, 0xFC16E4F1, 0x28E62412, 0xDD2600F2, +0xFD220501, 0x5DE52407, 0xB9B6CC0E, 0x180220DE, 0x27351026, 0xFA13010D, 0x050222EC, 0x2BE0DDDA, +0xCEB2FC08, 0x0CD00A27, 0xC3FBE509, 0xF6F51422, 0xEEAC1EE3, 0x10CFEAD5, 0x1FDBE9F7, 0x47C20AFE, +0x3AC8E2E9, 0xC80DEEEF, 0xECD9F1E5, 0x0FCEEC00, 0x1F2CE70B, 0xE5BCF8FC, 0x37B10DE4, 0x4200F1DA, +0xFB081A27, 0x385C231F, 0xC7210FCB, 0xFEF610F9, 0x26E30000, 0xEBF8132F, 0x051ADEBF, 0xEDE13CBA, +0x1819C0BF, 0xFE0F2AE7, 0xFC1D24F5, 0xDBE90311, 0x02F02918, 0x190FF037, 0x1CE10B46, 0x0AD0CF58, +0x170410DD, 0xF72231FA, 0x1D15BBED, 0x16053EDD, 0x0A1D1D45, 0xF412E316, 0x191510EE, 0xE3DE0BF9, +0x10113838, 0x02D92AB2, 0xFF07D7E0, 0x21ECF83C, 0xEDEBC4C2, 0xE8EDC13B, 0xF9042025, 0xDB35B4A4, +0x1AE3BF45, 0xFEEB2514, 0xEDFAC522, 0x0B08E3E0, 0x13F2151A, 0xF3D7E551, 0x1D110BE8, 0x25170312, +0x19C9F1A2, 0xEA0C1234, 0xE3FBF52D, 0xF3DC1BFD, 0x11F518CC, 0xFDF92521, 0xF61E31F6, 0xE0EA47EF, +0x09E6E142, 0x09F8E031, 0xDDF9B52F, 0xDAE34EBA, 0x160111D8, 0x1524C34C, 0xF01C170C, 0xDDE14BB9, +0xE7EEC0EB, 0x34005CD8, 0x17E1C2F7, 0x0F1B1943, 0xFD11DCEA, 0xD5EF5417, 0x08D73052, 0x1CDAF4B2, +0x162AED52, 0xFBE9D3EF, 0xF31C1CBC, 0xF225CA03, 0xEB231405, 0x13FFEC2A, 0xDAF4B135, 0xDC21FDB8, +0xDBF90420, 0x0F23184B, 0xE40945E1, 0xFE03DBD4, 0xDF1B48BE, 0x0CE01C49, 0x1200C728, 0xEFF016C7, +0xFC12D43A, 0x0AE9CDC1, 0x15101318, 0xDFE949C1, 0x2314B6C4, 0xFADF2349, 0xFCE4D5F4, 0x1EFA462E, +0x03DEDBFA, 0x19230F05, 0x210D061B, 0xFF2327FB, 0xDA21B207, 0x1302ECD6, 0xEF14E93D, 0xE4010D27, +0xEEE7E942, 0x1B1DF30B, 0x1A180D3F, 0xFF17DAC1, 0x0000F2FA, 0xB1351ADD, 0x4FE626F3, 0x2A12D80E, +0x1434FEEA, 0x36F2EC0D, 0x42C6F2E7, 0xB8C1E512, 0xE23821E9, 0x0E3B0AF0, 0xFC3E1BEE, 0xB631DB15, +0x0623DDF8, 0xD81B2306, 0x28B5FFF3, 0xEF270023, 0x1718E9FF, 0x33EB110F, 0x30F0F5ED, 0x1426F8E7, +0xC727ECFD, 0xB34010FF, 0x3CFFDBE9, 0x44141326, 0x5EA2E4EC, 0x3325CBC9, 0xD8130BFC, 0x0C1E0016, +0xF92EE4F7, 0x1109DEFA, 0x2632E9E0, 0x520EFEF6, 0x342ED619, 0x3229F4F9, 0x0D0DF700, 0x1D111BE5, +0x15BC0C18, 0xA728ED1B, 0x54283000, 0x47C32B00, 0xCAEE1FEB, 0x11EEF2E9, 0xC00C1717, 0xB215181C, +0x030CDA13, 0x480325E4, 0xEDCAE126, 0xD8D915F2, 0x23DAFF01, 0x16240503, 0x374AED04, 0x2904F1DE, +0x3D3DFF25, 0xB3CF1516, 0xDE0A25F8, 0x3306F9E2, 0xCDB9F622, 0xBCD0F5E0, 0xEC291DF8, 0xA8DE14FE, +0x07FC2F06, 0xCB3CE0DC, 0xFBC4F213, 0x2ED0DE15, 0x35DEFB08, 0x38C8F305, 0xCF36F010, 0x3ACE0A0E, +0xEC3A1209, 0x2152EDEF, 0x40440729, 0xCC04E91C, 0x0C56F424, 0xB04A1C2E, 0x28CAD721, 0xCDD8FF0F, +0xCC1CF400, 0xE6BDF3F3, 0x0F020E1C, 0xFBFD19D9, 0x3DC12326, 0x00D8EA17, 0xCB2ED900, 0x5008F3FA, +0xEF2B28DF, 0xA726E9FE, 0xEAF5CFFD, 0x3FE4EEE3, 0x3F27180B, 0x3C47EA01, 0xC8B5EBE0, 0xBEC1F023, +0xD7231A16, 0xE15501FA, 0x40AAF82E, 0x4511E72D, 0xF8DAE317, 0x20030000, 0x0BF7E232, 0x0BF1CDE6, +0xCFF25835, 0xFAE12DF7, 0xCFEAA613, 0x22FFF928, 0xE8E93FC1, 0xE327BAFE, 0x1E030A24, 0xDFDFFAF8, +0x06F2D3E6, 0x00FED8D6, 0xE7DFF2F8, 0x10FB39DD, 0xDBD74CB0, 0x072BDEAE, 0x1EE90A11, 0x26DB4DFD, +0x16E8EEC0, 0x17FEC126, 0xEB03C4DB, 0xFCF924D0, 0xDBD8FC00, 0xE510F3E9, 0xEBF6C4E2, 0xFF2127F8, +0x0DE335F6, 0xE1E7B840, 0x1724C203, 0x0D23E5FB, 0xFD1ADB0F, 0x2C0C5434, 0x22EEFBC6, 0xDB130316, +0x1EE60A0F, 0x27E94F3F, 0xF7E21F46, 0xF1D837FF, 0xECFB3B22, 0x330FA638, 0x0CE21CF7, 0x22FFB728, +0x281AB042, 0xF714CE13, 0x11E0C707, 0x11D9E9FE, 0xD9EBFF3E, 0xF8043023, 0x1C15BCC3, 0xCAC95DA0, +0x13ECEB15, 0xDFEDB73B, 0x201CF8BD, 0xF033185B, 0x160B12CD, 0x01092631, 0x0E06192D, 0x0E001BD8, +0xEC0FEC37, 0xE4E143B9, 0xDD2405B4, 0x1EDAF501, 0xEFFF16D6, 0xD8F70130, 0xF1EF3738, 0xFBEB2E3D, +0x1F0147D6, 0xFAF52EE4, 0x00D92800, 0xE31B440D, 0x02112617, 0xF1DCE6B4, 0x17E3C144, 0xE117B9C1, +0x10F1E719, 0x0C15E3EE, 0xE315F53C, 0xE5260D4E, 0x02F8DB31, 0xEA14EF3C, 0x4B178DC1, 0xDC070320, +0x0F1CC8F4, 0xEAFB122D, 0xE71042C8, 0xEBE8EC40, 0x0EEDE6C5, 0xEDEF1618, 0xFFDE2AFA, 0x260B4DCE, +0xF3D9E600, 0xF816DFC2, 0xEA26EE4E, 0x14F23CCA, 0xD51AAD43, 0x0000133C, 0xFF5116EB, 0xCEF7D9D7, +0x3EFCF7E2, 0xE13E16DC, 0x39D5F7EA, 0x583312FD, 0x3FEB2FF6, 0x35B3E9EE, 0xDD01F424, 0x26230528, +0x5EB4FFFB, 0xB91E3623, 0x0AE11E09, 0xF6201F09, 0x4CC6E208, 0x1CBA2413, 0xC4370D1E, 0xD1001310, +0x35150627, 0x122AF4ED, 0x0BF71502, 0xC618E3E1, 0xF202ED10, 0xF2C31A25, 0xB3F71BEB, 0x2312DB1F, +0xC20804EA, 0x11FAEBDF, 0xA8DE18DE, 0x28113006, 0xE9CC0016, 0x4021120C, 0xEFC41908, 0xEB51E9EB, +0x331113D7, 0x5ACBF5E8, 0x10C431F4, 0x0927E914, 0xB403E0FE, 0xFBE925DA, 0x1B24DD10, 0xB43B0CFB, +0xE7C52413, 0xB94A0F13, 0x1DD7E1DE, 0xE5D10A01, 0xAE560C07, 0x290E292E, 0xF3F3001B, 0x5A131B1B, +0xB4E0CE15, 0xBAE6DDF7, 0xC801E20D, 0xEEC61028, 0x16F1EB11, 0x2FC5EEE6, 0x4D2FF814, 0x2DB526FA, +0x15B105DD, 0x2932EDD9, 0x0FD1FEF6, 0x2DD719F9, 0x29F30601, 0x3AFFFFE5, 0x44BFEFDA, 0xA437E5E8, +0x0E38350F, 0xDF07E510, 0x06AA0821, 0x0052222E, 0xEFEB27D5, 0x1ED11814, 0x0B0E0908, 0x2DA11D1A, +0xEB3405C9, 0x161C140C, 0xC2FBEF0D, 0x02E8E923, 0xD5BADAF1, 0x24C303E3, 0x374F04EA, 0xE8F10FD8, +0x21B01118, 0xB536F8D9, 0x3BF1DDF2, 0xFD0012E6, 0x2F4FDB28, 0x0D2CF826, 0xBEBC1C04, 0xC64C1A1B, +0xC62911DB, 0xF3AAEEFF, 0x40D5E52E, 0xA21C18FC, 0x18ED36F3, 0x00000F15, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 + +hard_output0 = +0x7FE75E62, 0xB98D686A, 0x16FBEDEE, 0xB22861F2, 0x0A1180B7, 0x666C4407, 0x1D4C462A, 0xB81A3416, +0xC6948E41, 0x3CCAE97E, 0x55352D5A, 0x85474DAC, 0xB8DBF013, 0xB9AF7B16, 0xEABEE865, 0x1EB02EF7, +0xBDE86A3E, 0x24EDAAC9, 0x88A3D4B9, 0x8CA15C44, 0x42487190, 0xF4BF8BFF, 0x05EA0214, 0x4708DC42, +0x222993B3, 0x24AAA18B, 0x4D49B2EA, 0xBBA8E2B3, 0x40426630, 0x5676818F, 0x1B79CC4B, 0x1595A8CD, +0x9088385F, 0x351D2A23, 0x0EDC4446, 0x8F17D03E, 0xEA8B3464, 0x8799E718, 0x79E11F5A, 0xE771403F, +0x38C65263, 0xA969930A, 0x59942FBB, 0x74138F31, 0x80518035, 0xB42D7BE0, 0x0C4A7E00, 0x4ACA8CB5, +0x76C89CCB, 0xB56F2806, 0x1C8CF4E5, 0xA192BBF1, 0x1139DA94, 0x46B1C7C9, 0x8B630748, 0xA882704B, +0x69DA8BE2, 0xF2508EA5, 0x05E4AEAF, 0x5AA0A9C8, 0x6177DDCE, 0x8D06A0E1, 0xECD023CA, 0x34FC0800, +0x5AE69AAD, 0xFE8E2F72, 0x5ED3DC49, 0x8B46EB93, 0xC0C3D55F, 0xAC8C8B89, 0x54A20A93, 0x81F6659B, +0x412B7086, 0x09D572AB, 0x85891AB0, 0x96DC6C57, 0xD7117C38, 0xFBA06F16, 0x9EAAFCE5, 0x44AD2185, +0x618D398E, 0xD9AC89E7, 0x031F1D0B, 0xE7D05975, 0x9E500DF3, 0x06BFEF20, 0x42888C91, 0xD6AF7FA6, +0xC839E4C7, 0x0E7B7724, 0x48693BD9, 0xD17B6519, 0x48004F1A, 0xF5A147D5, 0x521C8AD6, 0x26E19AE7, +0xAA17116F, 0x7196631D, 0x89643E51, 0x8CE6B02E, 0x128B9CF6, 0x2F45C206, 0x4EB6C731, 0xCFB17835, +0xCFE85026, 0xA4B9B9B0, 0xD2C152AC, 0xA4F89035, 0x79ED0C5F, 0xBC030479, 0x51D91130, 0x570C019C, +0x4E0306B7, 0x27978CCD, 0x5F8DA612, 0x33C121FE, 0x4407BB22, 0x6FF06E2F, 0x5D7F378B, 0x491B9A93, +0xA0BDE688, 0x0EA89618, 0xE91C3661, 0x27C69F6D, 0xC46463FD, 0x67649FE9, 0x8F6E077F, 0x4D456A5F, +0x396B7218, 0x0C26C824, 0xA4E8FD71, 0xDCA1C865, 0x6F1CC581, 0x97C974D3, 0x38D0F6FC, 0x04190419, +0xED8C7FC5, 0x794BA619, 0x0090798D, 0x3044F72D, 0xBB77EEEF, 0xF9BCFDE1, 0x2C1580F7, 0xC0292305, +0x86827E71, 0x356C7D12, 0xA84DBECC, 0xCA53E3D9, 0x236E73F0, 0x9FD2AFBA, 0x652E7A66, 0x744A1147, +0xBFE9AF5B, 0x59F2552F, 0xBD77D3C0, 0xCD726524, 0x1D56A0FB, 0x85207831, 0xB6F1861B, 0x369CF946, +0x71CA18BD, 0x5EE3F7CF, 0x2C3FC902, 0x4301214D, 0x53A64F16, 0x2956B34F, 0x3DF40618, 0x41A36C68, +0xF7BB2B5F, 0x0C5CD46D, 0x3E4354AC, 0xC29AFEF7, 0xC1A310AF, 0xFFEF128F, 0xBE1C8D14, 0x8B123664, +0x854B7E00, 0xAE175861, 0xE4067FE2, 0xA264F00A, 0x988634EF, 0xEC9E38E7, 0xFE2968A6, 0x2B30719D, +0xFE3DC88D, 0x09E222C8, 0x6B4F1B50, 0x436C7137, 0x161DFE10, 0xA68C403E, 0x12A2856F, 0xAAB8DFF0 + +k = +6144 + +e = +10376 + +iter_max = +8 + +iter_min = +4 + +expected_iter_count = +8 + +ext_scale = +15 + +num_maps = +0 + +code_block_mode = +1 + +op_flags = +RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE, RTE_BBDEV_TURBO_CRC_TYPE_24B, +RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN + +expected_status = +OK \ No newline at end of file diff --git a/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_low_snr.data b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_low_snr.data new file mode 100644 index 00000000..8ff71aac --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_low_snr.data @@ -0,0 +1,643 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_DEC + +input0 = +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x16000000, 0x081112e9, 0xcd2d1fac, 0xf9fdf4db, 0x110edf1a, 0x0dd9e84f, 0xdbe9e611, 0x0fde0206, +0x43df19f9, 0x071ee4f7, 0x1f1322c5, 0xd4e5f6f3, 0xe3fafcde, 0xd002f52a, 0x4427f801, 0xbadce403, +0xce101dc9, 0x09fff729, 0xd5f91f2e, 0xf3f0fee8, 0x4f061cde, 0x4a2b2753, 0x1d24dffb, 0xd3040bdb, +0xcaf2fbe6, 0x1cd9f350, 0x00fff42a, 0x49d02759, 0x4005de2d, 0xd3fb1923, 0x36cbfc5c, 0x020a0de1, +0x39072621, 0x3b0defe5, 0xb4ebec13, 0xddee2416, 0x24f206e5, 0xd1e1fcb9, 0x0e150713, 0x21f6191f, +0x0d0bf9e3, 0xf0061bd2, 0xae2717ff, 0x0de9d740, 0x13031a2b, 0x4c0215d6, 0x4534dc5c, 0xa1f41c1c, +0x001b3842, 0xeaf83ecf, 0x2519b4be, 0x1d220afb, 0x05162312, 0xee00ea29, 0x25ef4d39, 0x0cc03398, +0x08f5e0ce, 0xb7e68f43, 0x14f7ec31, 0xe7f5f11e, 0xfd29d6ae, 0x0af133c9, 0x03fdd5db, 0x0bca1d5f, +0x00172840, 0x061add0e, 0x0516ddee, 0xe10cf735, 0x1709101f, 0xdf0908d0, 0x06fe2dd6, 0x18ed11c6, +0xd315ac14, 0x06f0d3c8, 0x1ed9474f, 0xef223806, 0x28f3ffe5, 0x0eeb1aee, 0x080de0cb, 0x29f452e4, +0xdbd702b0, 0x041b2cbd, 0x1df30b34, 0x1927f102, 0x2010b719, 0xf0fde8d5, 0xe9eaef3e, 0xf10b36ce, +0xdff4fa1b, 0xfd0cd5e4, 0x04f923d0, 0x110f3936, 0x06fe2e26, 0x00e42844, 0x11e839c0, 0x13ecea13, +0xf621e349, 0xff332600, 0xe604f1f5, 0x0533d425, 0xf240c9f5, 0x0b5933e8, 0xdf1bf931, 0x02c929f2, +0xeee6eb0f, 0x12a2160f, 0xfdde26ca, 0xf016c8f9, 0xdd03b5ee, 0xe2ee09da, 0xfdf4db17, 0xdb0603e4, +0xffda2821, 0xf6f9cefd, 0xd5ca5421, 0x12f03af2, 0x182f0fe9, 0x10423808, 0x0c19e41a, 0x27af02f0, +0xfc14d42a, 0x08c9d114, 0x274700f2, 0xe8c110e1, 0x0e0f19e9, 0xeb58c3e7, 0xe20fbbd1, 0xe52e0e1a, +0xf145e607, 0xe316bc1d, 0xe3e04511, 0xd93c0009, 0x27db01ed, 0x0a30cd03, 0x23434af8, 0xe13f09e6, +0xe4d00de9, 0x07202208, 0xdb3ffd07, 0x0955cfea, 0xeae8112c, 0x041324f1, 0x1011e815, 0xf6d01f17, +0xe5eb0cf9, 0x17be00ee, 0xa2dbeee6, 0xb2e73603, 0x08ebda0f, 0x250fe0ed, 0xe61a0218, 0xf1dbf2f3, +0x420b18fd, 0xa1e9e5e4, 0xd9e1caf0, 0xe7cffe08, 0xeba8f009, 0xc45213d0, 0x20a1ed29, 0x28fa0737, +0x1ad900dd, 0x1bc10dfe, 0xab360c18, 0x023bd3f1, 0xacfd2514, 0x4d382c25, 0xf25adbf1, 0x1eda1a32, +0xf11e0902, 0x23a5e70a, 0x1f32fcce, 0x0ef0f8f5, 0x2029e6e9, 0xdfda0701, 0x1860fafe, 0xd8e5f139, +0x5612010d, 0xf2572d15, 0xba23192f, 0x2ac1e2fb, 0xe91e02e8, 0x44d2100b, 0x08f9e307, 0x5edddfe0, +0x240fc906, 0xc1c3fbe7, 0x0f231614, 0xaa111806, 0x1e2d2eea, 0xc6ba0a06, 0xedc9121e, 0x1ffc150e, +0xd11b0a23, 0xe800f80d, 0x2f27f101, 0x06110638, 0xd01fdef8, 0x15ee0717, 0xba0e131a, 0xcde51f43, +0xd21df6bc, 0x051006c8, 0x2616ddc3, 0x3ef8fee0, 0xf3cde9a4, 0x3bd81b4f, 0x25001229, 0xf6eb023e, +0xd81ee3b9, 0x1cecff14, 0x29fef4d9, 0x9f2d0255, 0x121e38f5, 0x18eb16c2, 0x271a1041, 0xf7f700cf, +0x28f3e1e4, 0x49e90011, 0x4b0fde1a, 0xd9e723f1, 0x0de402bd, 0x261f1b0a, 0xbe0afe1e, 0x0823e606, +0x3d041f2c, 0xf1f21436, 0xec1ae7f1, 0x02ebebc3, 0x52f1da1a, 0xc9e5d60d, 0xa9dc0e04, 0xc504d22d, +0x09fd1325, 0x3706e02e, 0x26dc0f4b, 0x2012fe3b, 0xeef109e7, 0xb509161f, 0xf01dde45, 0x13d51853, +0xc823ebfb, 0x2d23f0fc, 0xcbf6fbcd, 0xd505f22d, 0xee0afd1e, 0xa6e615bd, 0xa020ce07, 0x411937c0, +0xbcd318ab, 0x2a1d1cf5, 0xc400ff28, 0xe32014f7, 0x2bf30b1b, 0x0e03fc25, 0xe1e41bf3, 0x2f1b0943, +0x06060721, 0xc601de26, 0xd3faee2e, 0xe51c05bc, 0x0411f4c8, 0xcfe7dcbf, 0x2d12083a, 0x0bedfb3a, +0x0efc1d2c, 0xca1f1909, 0x13f80e30, 0x3a21eab7, 0x3c2b1252, 0xf2e4140b, 0x2527e5b1, 0x2ffe03d6, +0x44e4f90b, 0xcf21e449, 0xd8e9f6c2, 0xeef8001f, 0xfb151513, 0x48f9de22, 0x2ad6e0ad, 0xe63602a3, +0xdae50e0d, 0xe3e5ff0c, 0xc721f449, 0xa0d5efac, 0xe70039d8, 0xc7f71031, 0x2b22eefa, 0xe5fb04d3, +0x0021f2f9, 0x2bce28a5, 0xdffefa00, 0x1af642e2, 0x1d190bf1, 0xecf515cd, 0x0af4cf34, 0xf406cbd1, +0x08ebd013, 0x14173b40, 0xe40e0bca, 0x29ff51d9, 0xd303abda, 0xe303f5d5, 0xf6facddf, 0xecef3cc8, +0x1324ecb4, 0xd213563c, 0xe00ff91a, 0xef121639, 0xf5d2cdaa, 0x1914f013, 0xe80f3fe8, 0xe31c0bbc, +0xe3d5bcad, 0xe610be38, 0xe72df155, 0x1cef44c7, 0x13ccc5a3, 0xe1e20909, 0xecf2c337, 0xf2fd35da, +0xe5cd435b, 0xdc1304c5, 0x050722d2, 0x121fea46, 0x36f8a1e1, 0x1223c6fb, 0x18040fd4, 0xef1517ee, +0xf8e32ff5, 0xf22cc953, 0xfe04d7dc, 0x10e7390e, 0x1ef4091b, 0xdbf2fde6, 0x1afa432e, 0x2211b5e8, +0x211eb8ba, 0xff1529c4, 0xdaea004e, 0x11e539ed, 0x13b4eaf2, 0xe10af7db, 0x11e9c81e, 0x1eeef6f0, +0x17b73fea, 0x09c61fde, 0xeefdebef, 0xd92fffdc, 0xd7e05007, 0xfe42d9f8, 0xd2feaae6, 0xf3001cd9, +0xee12c727, 0xe5d90cea, 0x2c21ac01, 0xe3de0b08, 0x265703fa, 0x1a410d30, 0x2f1f5719, 0xe742bff6, +0xfc2e23e5, 0x2746b105, 0x262efe1d, 0xe43ff4fb, 0xf01018e9, 0xd9014e18, 0xef043927, 0x23fb0524, +0x27094e23, 0x19ce41e1, 0xf70a200b, 0x1daff5e3, 0x274eb22a, 0xf612cd26, 0xf903df17, 0xe3f1ba25, +0xe45bbc18, 0x062a2132, 0x11bfea01, 0xf4fe34e7, 0x0637ddda, 0xede0c510, 0x02022509, 0xfbbf2c26, +0xeda8c619, 0xecb0edd0, 0xeb00d0d8, 0xf054edf7, 0x0356182c, 0x5fd3252e, 0xd74e3805, 0xc2100026, +0xe4161517, 0xf106f412, 0x26f219df, 0x440bfde5, 0x1b031d1d, 0xe6c4f325, 0xd349f213, 0x2b0605df, +0x4bdbfcdf, 0xe32e23fd, 0x49460a07, 0xcff022e3, 0xf9c2f617, 0xdfbc2116, 0xed2f071b, 0xd6a91406, +0xc33bfe2f, 0xc623eb12, 0xc2d0ee06, 0x243aeb07, 0x1fe8fb13, 0xffc409f0, 0x1ed6d814, 0xde4bf602, +0x31dcfb22, 0xf5da0afd, 0x0afd1e03, 0x1cc01fdc, 0xf82bf4e8, 0xcccce003, 0xfb0f0df4, 0xd2ff23e8, +0x0845fbd9, 0x16e821e2, 0xe5c5eef0, 0x14bbf4ec, 0x1cefeb1d, 0xffcd0de9, 0x5bbad90b, 0x0630331d, +0xf826dd09, 0xc62c2001, 0x00ed12fc, 0x16e8eb40, 0xf3e4120c, 0xc105e52d, 0xca181610, 0xb9e0f149, +0xfc011f28, 0xfc17233f, 0xc5f12318, 0xea1dedbb, 0x37e91240, 0x47d9f0b2, 0x19271e01, 0x09240ffb, +0x1edd1f4b, 0x02240ab3, 0xf3e6d90e, 0xfd03e425, 0xb12325b5, 0xb92cd9ac, 0xcee71e41, 0x47fa0bdf, +0x0fcb1fa3, 0xd406e7de, 0x31defcfa, 0x300af71e, 0xfa0608dd, 0x10e922c1, 0xb2f3e936, 0xf336265e, +0x0610e517, 0x16e72242, 0xab23134b, 0xbaddd34b, 0x1afee225, 0xfc100d38, 0x44e6dcbe, 0x02ede4c4, +0xbe19da41, 0x2ff1e6c9, 0xea14f8c3, 0xc21bed0d, 0xac1017e8, 0x35202df8, 0xd9f10dca, 0x03fc02dc, +0xf2e425bb, 0x29efe5e9, 0x3ce10046, 0x09153000, 0x0b061d2e, 0x1f1a0abe, 0xfb18d2c0, 0x0008d730, +0xf3071b30, 0x17dfeff9, 0xc5d09da8, 0x23d7fbaf, 0x07e921ee, 0x16e4ee45, 0x1624c304, 0x01f828d0, +0x20ddb8fb, 0x030325d5, 0x0424d5fc, 0x0216dac3, 0xe5040c23, 0xd71552ec, 0x28ef00e9, 0x16de13fb, +0x1520edb8, 0x0416db12, 0xdeee07c7, 0x07fc20d4, 0x08fadf2e, 0xf0f639ce, 0x30f0a7e8, 0xef0d39e6, +0x210d071c, 0xf2f235cb, 0x05192242, 0xd6095130, 0x2605ff2d, 0x19f8bf30, 0x1320eb48, 0xfbd72e51, +0x01d92701, 0xe127f601, 0xdc0a051f, 0xfdf1dc37, 0x0b0bce1e, 0x09ff1f29, 0x1507c3df, 0xf7e3e1bb, +0xdb204df8, 0x11e517bd, 0x3004a7dc, 0x0d1300cc, 0xf42134ea, 0xe9b9c1f8, 0x1fbef720, 0x07bcdfe5, +0xeccf3ce5, 0x17301109, 0xf2b4e6f7, 0x1258c723, 0xdaf84dd0, 0xd507ae1f, 0x011d2a22, 0xfd3f25f5, +0xfd4edb17, 0xf9cadf26, 0x144dc3f1, 0x09003124, 0xec503b27, 0xfed1d628, 0x0ffbc9f8, 0xf2e619de, +0xf832dff1, 0xf3d034f5, 0x1dea0b09, 0xe80e0fef, 0xf1dd38e6, 0xf8fce104, 0xde1f07db, 0x06ccdef7, +0x24554b0c, 0xcde0a6d2, 0xe2e0baf8, 0xeae6ef09, 0xe31b440f, 0xe5c6f3f2, 0xf447ccee, 0x1944f2e1, +0xe33fbb1b, 0xebad3eea, 0x0b1de32b, 0x0d24e50c, 0x18e711fc, 0x2830b110, 0x14413cf8, 0xe7e7f219, +0x1c49bc0e, 0x0734e0df, 0xe255f50c, 0x540045d2, 0xe3c0d3e3, 0xef0ff518, 0x34c017e6, 0x052af517, +0xff4e2402, 0xc4eadada, 0x00d3ed12, 0x4bc727fa, 0xaaf62211, 0x28e1d21e, 0x530a00f7, 0xfd40d5e1, +0x0dafdbe8, 0x2b0de5d6, 0xc722fce5, 0xca241007, 0xe1eb0efc, 0xfa63f8ee, 0x28dfdec5, 0x07420107, +0xccf6dfe7, 0xcb1df4e1, 0xd9def40b, 0x47ef0105, 0x1041e0e9, 0xe849e919, 0x27f10fdf, 0xef2c02e7, +0xe74fe805, 0x4cb7f2d9, 0xe7ee2521, 0xee430f17, 0x5b09161b, 0xdce2cd1f, 0x2343050a, 0xc22c05e5, +0x5ae7eafc, 0x9749cef1, 0x4dea4221, 0x0e2726ee, 0x24a8e6ff, 0xcc5afcd0, 0xbcd2f432, 0xf514e306, +0xbd0ee414, 0xc4dbe41b, 0x310a1302, 0x090cf8e2, 0xd5dce11d, 0x1c13fdfb, 0xeacbf515, 0xc5eeeef3, +0x3027eee9, 0x29420701, 0xc01700e6, 0xc4b7e910, 0xaaebec21, 0x129fd2ee, 0x2ec5eac7, 0x0eecf9ec, +0xe10b1aec, 0x57b3f7e3, 0x3939d1db, 0xcf42f012, 0x163ef719, 0xd5111217, 0xe92203e9, 0xf13eef05, +0xece5e6ea, 0x3301ecf4, 0xfff6f527, 0x39ded9e2, 0x110deffa, 0x262fe91a, 0xb7bd0207, 0x39d8dfe4, +0x0a2e1101, 0x2adf1ef9, 0xe52ffef9, 0xb70ff307, 0x1cdd22e7, 0x2b3bf4fb, 0xe4eb02ec, 0xd80bf4ed, +0xfebaff1d, 0xcd12271e, 0xfeee0aea, 0xc70726e9, 0x0f4fefdf, 0xd9b6e6da, 0xf5d300df, 0xccbc1dfa, +0x34270de4, 0xf0d3f400, 0xc649e7fb, 0x18deeede, 0x4d0f0600, 0x2bdf2549, 0x1f07fe21, 0xf0e6f60f, +0x0c1b1743, 0x0f0a1b1d, 0x46efe7e9, 0x0ae6e242, 0x0ff61f33, 0xae21e84a, 0x500ad6e2, 0xfcddd74b, +0x4a17ddc2, 0xe9d9dd4f, 0xc7241205, 0xb9e8110f, 0xda111ec7, 0x37e1fe46, 0x39f3f1e5, 0x000512d2, +0x0e05d92d, 0xfeec193c, 0xf519daf0, 0x1e291d52, 0x362bf652, 0xbce9f311, 0x30fae5d2, 0xe6fcf7db, +0x36ec0fc4, 0xddfa0f23, 0xf1f605e3, 0x1ed1e857, 0xb9def6b6, 0xc0091ee0, 0xcff1e8ca, 0x3fe5090d, +0xf3edea15, 0x23111b16, 0x0207fbd2, 0x1d2cdbac, 0xbdf6f6cd, 0x1a311ba8, 0x420af2cd, 0x1bde1ab7, +0xc512f217, 0x0fe3edbc, 0x1afe19d6, 0x200bf1e3, 0x100900c8, 0xfe1a29f2, 0x12c51663, 0xdfe606f2, +0xf7ea1f12, 0xf841df96, 0xe2f50a33, 0xe9f5efcd, 0x13fdeb25, 0x2524044d, 0x0d1a35be, 0xfaee23c7, +0xf0043824, 0xda200108, 0x04f225c9, 0xe101f7d6, 0xf30dcb36, 0x07fb22dd, 0xd1fba92c, 0xdf03f824, +0xd0efa8e9, 0xf5dd1d05, 0xfe332a5c, 0xf4221bb6, 0xe5ddbdb5, 0x1401c3d9, 0xfc12d33a, 0x1b14bd3b, +0xddf2fb36, 0xf0e13709, 0x12d9c601, 0x0cf7cce1, 0x1b06bc22, 0x22d6b7ae, 0xf7291eae, 0x15f81321, +0x1a04f2db, 0xe425f404, 0x05f32435, 0x060f2218, 0x0bd93250, 0x26f9fd20, 0xfe2fd5aa, 0x121deb45, +0xf6e731bf, 0xf90c2e1d, 0xe9dbc2b3, 0xfb0edd36, 0x1500b93e, 0x281db01e, 0x061ddff5, 0x2712fe0a, +0x0f461ae9, 0xe32f0b1f, 0xed691507, 0xf61d32bf, 0x0d24e5f5, 0xfae422fd, 0x03d126f4, 0xee0ac6f9, +0xd8a4b11e, 0xebf51335, 0x15f7c31e, 0x07fd22e1, 0xe40a0cdb, 0xdd04b5e3, 0xd1f25825, 0xec033de6, +0x12fee925, 0x292051da, 0xdff54af7, 0x34d0a4e3, 0xf53de209, 0xedeceb14, 0x0f4cca13, 0xe3340b25, +0xf023e90c, 0xf6be32fa, 0x2254fae7, 0x07c1212b, 0x31015816, 0x1bd24227, 0x1438edf9, 0x151f3df0, +0x02d1270a, 0x0fffc9f9, 0x0d4235da, 0xf638e2e6, 0x241ffc0f, 0x1d380b08, 0x1a93bdf0, 0x1b4f44bb, +0xfe57da26, 0x24c8fbd1, 0x1112c711, 0x1f3ef6eb, 0x00173116, 0x100311f8, 0xf2dfe7db, 0x292ce5f8, +0x46e1fffb, 0x45c71ef7, 0x2bc31c11, 0x4abcfceb, 0x00e0dd1c, 0x202628f9, 0x42f2f801, 0x0bb41b1a, +0x0d16e225, 0xd2f11a12, 0xf6ec06e6, 0x13ef1deb, 0x48c51617, 0x09ff1f13, 0x314b1f28, 0x1ecf0823, +0xf9adf6f7, 0xd4da222a, 0xef0f0501, 0xe146e8e8, 0xfe4308e1, 0x4803dbe5, 0x21bc2025, 0x0fc5f9e4, +0xd341e813, 0xa9d9fae6, 0xaaf5d1ff, 0xb546d2e3, 0x12e2221f, 0xbee715f6, 0xd325e60f, 0x1adc05fd, +0xf533f3fb, 0xf3d0e3f6, 0xcb11e5f7, 0x33fb0ce9, 0xa3be0b23, 0x29b8351a, 0x312f0120, 0xebbb0afa, +0xc31a12e3, 0x0c47150d, 0x21c21be2, 0xe1570715, 0x19e0082e, 0xc8f2f800, 0x2a0b11cd, 0xcd3001a8, +0x2f1e0bf7, 0x1909071f, 0x1c15f2ed, 0x4612f53b, 0x40021d27, 0x3108e821, 0x0c1108c7, 0xcd0d1ce4, +0xe51c0b44, 0xb4ebf412, 0xf1dddc04, 0xd6f7e6e1, 0xc32efe56, 0xfd10ea18, 0xdb2eda55, 0x48ff0329, +0xe2fce12c, 0xd5f9f5df, 0x2cda04fe, 0xdf0dfdcb, 0xc5e70741, 0xdf16eec2, 0x3ce1faf7, 0xcc0eec35, +0xfa28f450, 0xf1e72210, 0x451de7f5, 0x4dfde324, 0xfdf3db35, 0x31eadc3d, 0x391c09bc, 0xcc09ee1f, +0xdaf90dd1, 0xf6e1ff09, 0x22f8e1d0, 0xe70efae5, 0x13faf02e, 0x271e15ba, 0x03df0007, 0x11ff2627, +0xb8e9173f, 0x38ecdfec, 0xc4ed0f3b, 0xe4d91402, 0xd5010c29, 0x050400d3, 0x1e01ba26, 0xe3390b60, +0xe11d0a0c, 0x2adaaefe, 0x1222e906, 0x0ff6e7ce, 0xebeb3cc4, 0xfd082b31, 0xf52a1e52, 0xf1dde7b4, +0x0ee8e6c1, 0x0c2ae352, 0x0df3cbca, 0x131bebf2, 0x23214bf9, 0xd6f5ad1c, 0x21fff8d6, 0xe3e00a48, +0xf00119d8, 0x03d02ca7, 0xf1fcc92c, 0x0c0b1b1c, 0xf4daccb1, 0x131ac50f, 0xd6295250, 0xe4f6f3e2, +0xe9f91221, 0xe6e0be49, 0xdcebb5c3, 0x1f1bf8f3, 0x27f6b2e3, 0x1a1c0e45, 0x0504d224, 0x0918e20f, +0xe4dabb4f, 0x0a001d27, 0xec193b42, 0x08e9d1c2, 0x0c09e51e, 0xff28d700, 0x0d0ae5cf, 0x0704202c, +0xfce0d408, 0x0c0b341e, 0x27ee00ea, 0x1206c7d1, 0x1000e9d8, 0xf8000830, 0xe334f5e1, 0x0bc81ef4, +0xeed33b10, 0x280050fb, 0x02d1d928, 0x0e3be608, 0xdc130412, 0xf6ffceea, 0x03142a28, 0xfeea2aec, +0xfdc4d613, 0xf1151aed, 0x1425c4ee, 0xfdd0db04, 0x10df1807, 0x0af7cd07, 0x20c108e2, 0xf61bce17, +0xe6e6f10e, 0x03d0da0e, 0x20edf808, 0x37dc60eb, 0x1bfd0e04, 0x18b73f24, 0xe8f91021, 0x0ed31ade, +0x1fe50afc, 0x07e020f3, 0x0c2ccb07, 0xedd91405, 0xf700e202, 0xdce903d9, 0xf4b31c12, 0xfcbbd425, +0x08d431e2, 0x1f00b905, 0xd04158d9, 0xd2acabe8, 0x18e911d3, 0xedca3c12, 0xe467bb0f, 0xd9c350c2, +0x00c12816, 0x082220e9, 0xfe692bfa, 0xfc322cc0, 0x0d1ae5f5, 0x1de446f2, 0xe136b80d, 0xf2001af2, +0x1be3bdd8, 0x013e29f4, 0x272301ea, 0x0a021f04, 0x22d1b5da, 0xedafecfa, 0xf958d0d8, 0xb1f48ad1, +0xf816d01b, 0xe7caf2ee, 0xf7e11ef1, 0x0ae31df7, 0x0feb180b, 0xece61513, 0x3f10670e, 0x0baa1c17, +0xf233e6d2, 0xce46a7f5, 0xe6c642e2, 0x033225ee, 0x0b3de20a, 0x00f7d814, 0xf8cccf20, 0xe2ee0bf4, +0xd93601ea, 0xefdf3a0e, 0x2202fb07, 0xe5c70c25, 0x01da29ee, 0xf8182101, 0xdef7b610, 0xeb0aed1e, +0x0017d8e2, 0x2fa4a9ef, 0x0c25cd35, 0x05f62304, 0x08e7e01f, 0xe11409f2, 0xf5e81ded, 0xeefeeb10, +0xfe122b25, 0xe9153f16, 0xe34f44ed, 0x1cca0c27, 0xf3dbcb0e, 0x08c231fc, 0xe2cfea00, 0xf031f5f7, +0xc7e91809, 0xdfe5ef10, 0x1234fa0e, 0xee28ea0c, 0xc6061700, 0xccb8eede, 0xed190b20, 0x21441510, +0xb1e007e5, 0xe1c7d9f9, 0xcacbf611, 0x3acb0f0c, 0xd7e4110d, 0x41c2010c, 0xef0418ea, 0xffc917dc, +0xc7e4270f, 0x40e3eff5, 0xaf4c19f6, 0x413f28db, 0x2e12e616, 0xd1e6fbeb, 0x1008f80f, 0x070719e0, +0xbdebdfde, 0x16f31b12, 0x2e1c12e6, 0x41eafaf4, 0xccc7e6ef, 0x26ff0bef, 0xc201fed9, 0xe733ead8, +0x373cf10a, 0xfdd5f213, 0xe1dcdcfc, 0xa9400904, 0xe432d018, 0x15340c0a, 0xcd3aedf4, 0x24210cef, +0x02d90407, 0x43c827ff, 0x27341c11, 0x1226fef4, 0x192cebfe, 0x280cf204, 0x96ff00e4, 0xb5dd4204, +0x1a2adc52, 0x1bf00ec8, 0x1d07f320, 0x09ed0a15, 0xb5e3200a, 0x12f7de1f, 0xe915eaee, 0x3bf11219, +0x3df1ec19, 0xe3ecec13, 0x1ce6f543, 0x0d18f4ef, 0x21ea1a13, 0xc8f507ce, 0x22e511be, 0x0b1cfaf3, +0xcfe8e3f0, 0xc104f6dc, 0x301817f0, 0xb825084d, 0x3fe920c2, 0xeff3171a, 0xc7e01809, 0x2efc10d3, +0xb602f92b, 0xdecb215c, 0x0d13053c, 0x3908e5d1, 0xb5efefc7, 0x44e0dcb9, 0xe8161def, 0x1de8f040, +0x2f140b3c, 0xc5ec07ec, 0xd403ed25, 0x0aeefce9, 0xd412e23b, 0xf4f804e0, 0xf218e5c1, 0xca17e612, +0xc5050ed3, 0x1afc142b, 0x2e2c0dab, 0x39ecfb13, 0x1de6100e, 0xf81ef4ba, 0xf600211d, 0x12c03968, +0xe8f90f2e, 0xfbe42c44, 0x260bfee3, 0x051b22be, 0xe50842df, 0xfef6d632, 0x130514d3, 0xe6def24a, +0x09213107, 0xdfddfa05, 0x0418d310, 0xf00739d1, 0x0afa1ed3, 0x1bfff4d7, 0xfddc2b4c, 0xf1e0c9b8, +0x04e1d4f6, 0xd423acb5, 0x320c5b1b, 0x160eefcb, 0xe11347ec, 0xedfc16d4, 0x10f8c7e0, 0x220e4ae6, +0xf0f5c834, 0x272002f8, 0xdf0df9cb, 0xe02ff8a9, 0xd7df5106, 0x152512b2, 0x02fe2bd6, 0x1707112f, +0x2519b2bf, 0x051e2eba, 0x0bdbcefd, 0xf1eae8c2, 0x08dee007, 0x02142b3c, 0x001628ef, 0xe21fb9f6, +0xe0e9f7c1, 0x31ed593c, 0xe4ff442a, 0x16e1c247, 0xe6e3be0b, 0xdbfc0324, 0x001110c7, 0xda2affe8, +0x05b6d302, 0xf94a2023, 0xe5350dde, 0x1ee0f60d, 0x282a00f8, 0x28ba50ff, 0x08e5211f, 0xf1f8190d, +0xd5abad20, 0x16fbc22c, 0xf8cc1fdd, 0xda1efe0d, 0xeb0ec3f5, 0x014c26e7, 0xf0e118dc, 0x1d11bb08, +0xe4bc44e8, 0xd4d0541d, 0xe43a44f8, 0x28445112, 0xdae0fde5, 0x13e3c6f8, 0xe11b09f4, 0x2ad2520d, +0xef59eafb, 0x1dcaf530, 0xde4d06f2, 0xf617e325, 0xf3c5e512, 0xf4b1cc13, 0xf91d2128, 0xd8affff4, +0xe54d43d8, 0x1a060fdc, 0xe7c3be22, 0x2441b415, 0xf3171a19, 0x180ff011, 0x30e4a919, 0xfaed2ff4, +0xd54253ec, 0xe7df0e1a, 0x232afa08, 0xe9f7effe, 0x1cb5f31f, 0xf4083422, 0xdfd4b620, 0x2223fc00, +0xddcf06fb, 0x6cc004f8, 0x041c4418, 0x4b04230d, 0xc6f92225, 0xf61aee22, 0xba001d0d, 0x06c3e2d9, +0xd5c2dd15, 0x240ffde9, 0x405a0319, 0xe3a01731, 0xde3c0b37, 0x2206faeb, 0x27fcfb22, 0x33af00dc, +0x3ba60a29, 0x2cc913cd, 0x4ef505f1, 0xccbbda1c, 0x203df4e2, 0xe407f815, 0x1a15f321, 0x18d30eed, +0x2d1f10fa, 0x3c47fb09, 0x3cf9eb1e, 0x4cf114e0, 0x0caadde6, 0x594ee5d2, 0x1ed6d026, 0x2229f602, +0x18f4faff, 0xddf5f0e4, 0xaf07fae3, 0xdc0fd8df, 0x14c3fce7, 0xf2d2edea, 0xb106e6fb, 0xdb0ada22, +0xc6bdfce1, 0xcdf111e4, 0x1dcf0ae7, 0x27c4f6f7, 0xe5e5ffeb, 0x20380d0c, 0x4b3e070f, 0x322300e9, +0xede80a0f, 0x65f7ebe0, 0xcbddc306, 0x572b0d53, 0xb0082f2f, 0x5d0729e0, 0xbf0f3537, 0x18c2199a, +0x57e3110c, 0x4512d23b, 0x18f01e17, 0xf9f3f11a, 0xc6f1df37, 0xeae1ef47, 0x0df111c9, 0x19fce6d4, +0xd3f90fd2, 0x08f70520, 0x15f4e0cc, 0xefff12d8, 0xaee6ea0f, 0xc1db2afd, 0x1cdaeab3, 0xbe0bf433, +0xe6e01b47, 0x5503f125, 0x30fbd422, 0x3723f7b4, 0xcb1510c3, 0x09fdf4d5, 0xeeee1e39, 0x0ae7ebc0, +0x02d81d00, 0xbc33db5a, 0xc501e429, 0x12e0edb8, 0x03f1eac8, 0xf0eb25c4, 0x09e4e7bd, 0xd71de0f6, +0xc8ee0017, 0xdf00f0d9, 0x4209f8e2, 0x3100e6d8, 0xe2190942, 0xd5070921, 0x0b0c02e3, 0xee00e4c7, +0xf5eaceef, 0x2011f7e9, 0xdf1106c7, 0xfede29fa, 0x1c1f0d0a, 0x1f0d08e6, 0x0d2ccc54, 0xf6e61df3, +0x121f1647, 0x1610c3c8, 0xded7b6af, 0x1ef8f61f, 0xf9172ef0, 0x391f60f7, 0xf3ebcced, 0x273050a9, +0x1305ebd3, 0x16221206, 0x12e5c6f4, 0x0006d822, 0xecfa152f, 0xf9f92f2e, 0x1fe2470b, 0x2425fc4e, +0x33285cb1, 0x070430d3, 0xe421bd48, 0xede1c5b9, 0xf4ea1c12, 0xe1c809a1, 0xfa05d223, 0xedf83a2f, +0xc4259d03, 0x1f180810, 0xdb10fee8, 0x11daeab2, 0xec2614fe, 0xb31e74ba, 0x14143b14, 0x07ff2fd7, +0xfb23d305, 0x25fffcda, 0xe7dbbefe, 0x13e43abc, 0x070c301c, 0xff0726d1, 0x16dc3eb3, 0x04ee2dc6, +0xdeff05d7, 0xf8dfd0fa, 0xde13fb15, 0x0b1acebe, 0xe2f1f6e6, 0xee1d3a0b, 0x3003572b, 0xf9d32fab, +0x0420ddb8, 0xcef05919, 0xd126a9b2, 0xdac9fda1, 0xe0ee083a, 0x30e259ba, 0xecf1ed19, 0xd0da58fe, +0x071f2e46, 0xe2da4602, 0xf3cecba5, 0xd8e20045, 0x18d6ef53, 0x11dbe9b3, 0x1819c00e, 0x09f930e0, +0xf2183640, 0x250bfd1e, 0xcae3a2bb, 0x17d13fa8, 0xefe6c8bf, 0x1ff8f620, 0xfaee22c7, 0x0afdce25, +0xd9010029, 0x1727c2ff, 0xf2221bf9, 0xd8e10147, 0x0726224d, 0xe0ea4811, 0x1a2e0ea9, 0x0d3635a3, +0xf0f1e7e8, 0x0c0234d9, 0x0ef8c920, 0xecf4151d, 0xe817c112, 0x0e0637dd, 0xddf305cb, 0x16e2ef0b, +0xf1c10000, 0xfd2ce717, 0x092ada04, 0xb929e202, 0x2902e1ff, 0xcecaff25, 0xd23c09f2, 0xee2107eb, +0xd9c0e908, 0xd3bc0018, 0xb3bc051d, 0x29ab261b, 0x0b0bff2c, 0xdf5ae41c, 0xe6ef07cf, 0x2ea10fea, +0x1db5fac8, 0x083a0cde, 0x4ff5e1ee, 0x4643281c, 0x08dee1e6, 0x1a0e1ffa, 0x2a30f11b, 0x0b4a02f8, +0xe1f5e321, 0xf5eaf71e, 0x21e3e312, 0xccfffaf6, 0x3932f5d8, 0xe3efeff5, 0x314df518, 0xbdc709db, +0x1336e5ef, 0xeb2bec0d, 0xc4a6ec02, 0xe3b2eccf, 0x33210b27, 0x1a2b0a08, 0xb536f304, 0x51f7def2, +0xa946d6e1, 0x0f1fd2e2, 0xd5cee6f8, 0xe60303f5, 0xdc25f225, 0xab360303, 0xd9aa2c0f, 0x30f9ffd2, +0x611ff8e0, 0xc6fa3909, 0x33241323, 0x023ff505, 0xc3f827e8, 0x1601ebdf, 0x35d91228, 0x0cf70d01, +0xbc271ce1, 0xbae8e400, 0xed09e310, 0x13ec16e2, 0xbffb1513, 0x3452e724, 0xff010cd7, 0x53ddd9d8, +0x18da2cfc, 0x3b28f102, 0x1ae1ed00, 0x051c0ff7, 0xe16b23f4, 0xfb4f0844, 0x091fdcd8, 0xbea1e109, +0xbcdc1a36, 0xd908e5fc, 0xd7f301e1, 0xbd1d02e5, 0x2319e5f4, 0xed0cfbf0, 0xed46eae5, 0x3442ebe2, +0x0323f4e6, 0xd62a25fb, 0xbec7fd03, 0x3841e6ee, 0xd3cc1018, 0x4d06faf4, 0x31422521, 0x15b1f8e7, +0x354b12d9, 0xe3ca0dde, 0x1fd30cf1, 0x25ddf705, 0xf5e8fdfb, 0x0a121df0, 0x5bc81e17, 0xd8e8cdef, +0x000000f1, 0xdfec4a13, 0x0d1e1cba, 0x0ff918d1, 0x00de274a, 0x07242105, 0xe5030cd4, 0xe0f40734, +0xee02ead7, 0x15dc3d4d, 0x0de3cc46, 0xe817f1ee, 0xfbe3d40b, 0x0d09cbe2, 0xd22256b6, 0x1904bf2d, +0xea17ee10, 0xe019f842, 0x15ebc33d, 0x39ef6217, 0xe6f8f1df, 0xf1001ad8, 0x1cf90dde, 0x19e40fbc, +0xe9e6ef0e, 0x0103d72a, 0xeffe3929, 0xf7d83151, 0x11273901, 0x28fdb0dc, 0x07f22f36, 0xddddb505, +0xe0e3f8ba, 0xf3dc354d, 0xe613f1eb, 0x09f4e2e3, 0xe70df1cc, 0x28d950b1, 0x001128e9, 0xfe312658, +0xf624cdb5, 0xee1217eb, 0x1ac04268, 0xd414ab15, 0x0d191b0f, 0x0e021b26, 0xe10cba1b, 0xfc09d4cf, +0x1b2b0c53, 0x0be8343f, 0x04d92c01, 0x01f4d733, 0x0d01cad8, 0x16f2121b, 0xddebfbc4, 0xdff2fa37, +0x1dea0bee, 0x1b0cf4cd, 0x2ff7a9cf, 0x13da15fd, 0x35fb5c2d, 0x0b0fcd36, 0x0e19ca40, 0xef1e380b, +0x0303d5db, 0x02fdda24, 0xe30845e0, 0x14fc1424, 0xeef115ca, 0x04e5d40e, 0xde2bfaad, 0x2ddaaaff, +0xf2e91a3e, 0xd51e530b, 0xdde405f3, 0xd908b11f, 0x0fe0c807, 0xecf0c318, 0x20064723, 0xf814d03c, +0xe9081120, 0xe0f808cf, 0xdbf4b235, 0xfdf225e6, 0x10da19fd, 0x170fef19, 0xedc53a64, 0xea133e16, +0x1725effd, 0xe4fbbcdc, 0x0208dadf, 0x1ff50933, 0xf706d0d2, 0x0901cfd6, 0x130515d2, 0x1e1e0909, +0xe01247e9, 0xbe3f0000, 0x35dbe517, 0xdfcd0d03, 0xff29f9f5, 0xb0c0d900, 0x30eed8e7, 0xdabff816, +0xaa4203e7, 0xd9f62f19, 0x5453ff1d, 0xdbd32bd6, 0xec07fd06, 0xeedd14df, 0x462debfc, 0x173e1d04, +0xe423efea, 0x4f6cf505, 0xd701da45, 0xb13602d9, 0xa2fd28f3, 0xeec7c9dc, 0xfae21610, 0xd4c9de0a, +0xd5db04f0, 0x1503fd03, 0x0360eddb, 0x2abf2639, 0x34fdfde7, 0x462bf5dc, 0x3c1f1e04, 0x1d04ec09, +0xbdd20cdd, 0x1c561a06, 0xfb1d0c2e, 0x19b824f6, 0xd1f4f120, 0xbe23061b, 0x1dcb1bfb, 0x22f10cf4, +0xc5f8fa18, 0x5227ec20, 0x33d1d501, 0x03caf6f8, 0xd3fc260d, 0xb9150523, 0x5811e114, 0x211bd0e8, +0xcd1d08f2, 0x44c2f50b, 0xf3d6e417, 0x0535e5ff, 0xfbccddf2, 0xc102def4, 0xf5d01825, 0xfb27e207, +0xeff62300, 0x3a1417e1, 0x17b6efec, 0xddd9ef23, 0x2ddefb02, 0x0a2bfbfb, 0xfb561e03, 0x2ceedd2e, +0x17f50416, 0xc4f3efe3, 0xb9e2ece5, 0xe3e71f0b, 0x47e5f4f1, 0xebdbe1f3, 0xe4e514fc, 0xfc280c0d, +0xfdbb2501, 0x0508251c, 0x0d0add1f, 0xcb201b1e, 0x2e480df9, 0xf3f2061f, 0xc7181b19, 0xe42310f0, +0x32b3f4fc, 0xe5c70b26, 0x1ceef3ee, 0x3def0d16, 0x2309ec18, 0x3544fb1e, 0x5050f3e5, 0x000d29d9, +0xef5b271a, 0x57b0ea33, 0xecc6d0d7, 0xd619ecee, 0xc60101f2, 0x3954efd9, 0xf2db11d4, 0xf7b31afc, +0x433d20da, 0x0000e515, 0x06f5d234, 0xdef0b6c8, 0xed1dc4f4, 0xe8fef126, 0xe215bb3d, 0x14fcedd5, +0x0deb1aed, 0xe528f2b0, 0xe0e84840, 0xdd1ab542, 0xe91ec10b, 0xe11ff648, 0xbf0968e0, 0x0ddccab5, +0x0b1ae30e, 0xe6020ed7, 0x07f42f33, 0xf4e91cef, 0xfbe9d3c0, 0x24e4fb44, 0x12fa162f, 0xe9063f21, +0x01f6d931, 0x22f806df, 0x1d04f423, 0xdffc4923, 0xe4eff3c8, 0x190af11e, 0x18f4111c, 0x170311da, +0x16f73e20, 0xdbcb04a2, 0x0dfc1bdc, 0xedf43a1d, 0x10cfc8a6, 0xfb402d68, 0xf2273500, 0x21fc4a24, +0xfae2d345, 0xe7001028, 0xeaf1ed36, 0x00f329e6, 0x0d1234c6, 0xdf27f8ff, 0xecf8eb20, 0x09f432e5, +0x0bfacdde, 0xeffbe9de, 0x14e73c40, 0x2007f821, 0xe005b9dc, 0xeafe3eda, 0x0701d0d7, 0x29015027, +0xe9fdee24, 0xe31ff408, 0x24244db4, 0x03f5dcce, 0xf3151c3c, 0xf5da1e01, 0xe01b0843, 0x02db2b4c, +0xdcf34c35, 0x07f620e1, 0xf40e1b19, 0x0cffcc27, 0xf9112ec8, 0x3c0e9bca, 0x1615123e, 0xf7ddcf04, +0xfe0326da, 0x2a26524d, 0x09031fd4, 0xe6dc0dfd, 0xfbf32ecb, 0x18121115, 0x231d050a, 0x011a2abe, +0xfc0bdc34, 0x0a0632d1, 0xe3dabbb2, 0xeee2c60a, 0x11eae9ee, 0x031ad6bd, 0xf6131e3a, 0x0d22cbb6, +0x13103c18, 0xf5161c3e, 0xf01ae8f2, 0xf306e42d, 0xed04c623, 0xf1211a08, 0xdbf34d34, 0xe9e6eff2, +0xd90db1cb, 0x0aedcf3b, 0x3f110000, 0x2dffe818, 0x20fbfbda, 0x08fc07dd, 0xd6182025, 0x9d27fff0, +0xb5e63c01, 0x295323f2, 0x06d401d5, 0xa0ee22fd, 0x15ec3816, 0xba24ed14, 0xe13be1fb, 0x39360aed, +0xb60f10f3, 0x15e2dee7, 0x174aedf6, 0x1146ee22, 0xee16171e, 0xed1d1512, 0xd7fdec0b, 0x110200db, +0xfb5116db, 0xf50923d8, 0xd7bae3e1, 0xc960021e, 0x14400e38, 0x33d01419, 0xc737f407, 0xdc6011f0, +0xebfd05c8, 0x120ced25, 0x0845e9e4, 0xfaf620e3, 0x394923e1, 0x5217efde, 0xc1c8d6ef, 0x030fe810, +0xf418db19, 0x0ff3e510, 0x0ec7e71c, 0xfbaae5ef, 0x27c823d2, 0xc3c6ff0f, 0x0ed0ecef, 0xbbd11b07, +0xb8411cf8, 0x42e6e1e7, 0xafcee50e, 0x29bdd8f7, 0x4cf6ff1b, 0xefe2dc1e, 0xc518e9f5, 0xc313ec10, +0x0ccfea14, 0xba27e309, 0x0c9ae201, 0x19d31bc3, 0x4aec0e06, 0xfa0e23ec, 0xeac5211b, 0xe9281313, +0xdd711101, 0xed41fab7, 0xbb2f14e7, 0xd6a1e4fa, 0xb6b3fec9, 0xd0b32324, 0xcbdbf8db, 0x4fb60cfc, +0x3503d9df, 0x06c3f225, 0xd2c7de15, 0xbefff9ef, 0x0aec1ada, 0xbe401eec, 0xc92fe519, 0xb6bf0ff9, +0xddbcde1a, 0x2047fb1b, 0xcbbaf91f, 0xa7fdf3e2, 0x30eed025, 0x435b07e9, 0x2efc1b34, 0x0ac70723, +0xbefce311, 0x4ca719dd, 0x1d04db31, 0xead20a24, 0x2a5b12fa, 0xc613ffce, 0xb52c12ea, 0x03bd23fc, +0xc9b9dc1b, 0xd0e40e1f, 0x000007f4, 0x0cde354a, 0xd2dcaafc, 0x21d908fe, 0x1bf5f3ce, 0xe40d0d1a, +0xcd06a42f, 0x0fe7180f, 0xda0c4e35, 0xfe28db00, 0x13efc5e9, 0x08032024, 0xff35275d, 0x22eeb639, +0xeb2bc3ad, 0xe9ec3e14, 0x2523fd06, 0x06f3d1cc, 0xdb2903b0, 0x0fdfc8f9, 0x18e50ff4, 0x0efbcb2e, +0x11fae8d3, 0x33e4a545, 0xe22af6ae, 0x1f10ba38, 0xe30bf633, 0x0c06e322, 0x0eeb1b3e, 0x09f1cf18, +0x10ef1939, 0xe9f21036, 0xee17e9c2, 0x1aec0f13, 0xeff4ea1c, 0x16dbedb4, 0x03eb253d, 0x1400c5d9, +0xfd00db28, 0xdcda03b2, 0xcdff5a28, 0x0bd6ceae, 0x24f8fbd0, 0xfdecd5c4, 0xdfe4faf3, 0x0cedccec, +0x0807e030, 0xf6213208, 0xebdfc208, 0xf312cbea, 0x1d21ba49, 0x03f02517, 0xda0e4ee6, 0x14ed3dea, +0x090ae132, 0xfdefd5c7, 0xe019f9f2, 0xf4121dc7, 0xe7febfda, 0x27ee0116, 0x18fbbf23, 0xff04d625, +0x252d4eab, 0x271d000c, 0xf8ed3116, 0x2009f831, 0x1b17bd3f, 0x201209c6, 0x2af95220, 0xee0816d0, +0xd6e352bc, 0x240f04e6, 0xf21d19f6, 0xf41b1cf3, 0x171b3ff4, 0x0c26cd4e, 0xfe05d6d4, 0x021dd7f5, +0x22cf4a59, 0x14efec39, 0x1c0abbce, 0x12d3c655, 0xe8fb4023, 0x07e82fc1, 0xdae6b30e, 0x1d0af5e1, +0x27f0ffc8, 0x01ed29eb, 0xe625be02, 0xdfe2490b, 0xf11c1945, 0xca21a1f9, 0xe91810c0, 0x20190842, +0xf1fec929, 0xe2e9b9ef, 0xe814c0c4, 0x6d0c0000, 0x14d0451b, 0x11aeebf8, 0x393d16d6, 0xa9dbeeec, +0xdbf7d103, 0xa45e0320, 0x1ab1cbc9, 0x0b25f2d9, 0xbf111e02, 0x2b2e1818, 0xb2000305, 0x1608dbd8, +0x210e1220, 0xfdf1fa1b, 0x2d9e2519, 0x01d0fcc7, 0xd5b8d9f8, 0x08d603df, 0xce25e0fe, 0xea000a03, +0x234b12d9, 0xe72ffa23, 0xedf00efa, 0x361614e9, 0x2c3a0e13, 0xe114fd11, 0x2a3a0915, 0xbfe3fdef, +0xd95a180b, 0xbd20fece, 0xbd391c07, 0x610de4ee, 0xbb0d38e4, 0xee08e4e5, 0x10fe1720, 0x2f0fe8d9, +0xc92af919, 0xdf24f001, 0x64f0f9fc, 0x1551c317, 0xccf61229, 0xbd39f41e, 0xe9ece511, 0xf9f21214, +0x1dc5e01a, 0x3ac00aec, 0xbf1012e8, 0x2f00e8e8, 0xdffc07d9, 0x1477f9dc, 0x0c301450, 0xfb0d1cf9, +0xd92ddd1b, 0x43df0006, 0xcc541cf9, 0x0de40d2d, 0x043c1b0b, 0x48ec24eb, 0xcc27e0eb, 0x2ea1f401, +0x02e00536, 0xec2c25f7, 0x380dec04, 0xe9faf01c, 0xb01b10de, 0x41f527f3, 0x31a6191c, 0xdfeef6ce, +0x4632f916, 0x2a2c1ef6, 0xff17fefb, 0x3fc2d9ef, 0xc3e717eb, 0xe70915f2, 0xc7110fe1, 0xe9caf0e9, +0x3a26eff2, 0x44361301, 0x40f11b0e, 0xedc81818, 0x09d8ebf1, 0x2013e2ff, 0x35ccf8eb, 0xaa2c0ef4, +0xb033d2fd, 0x03dd280b, 0xd0d926fa, 0x4203f7ff, 0xcff31a25, 0xe8cef7e5, 0x403ef00a, 0x0bd8e817, +0x30251d00, 0xaa10f8fd, 0x3ea7d218, 0xa708e9ce, 0xdde231e0, 0xc3e5040a, 0xeda7140d, 0xb51715cf, +0xd2dc22f0, 0xedb70603, 0xfc19ecdf, 0xb2e2ddf1, 0x1e06d90b, 0x28ee0a21, 0xb3c90117, 0x21212510, +0x31b8f8f8, 0xeae0f8df, 0x48bbeef7, 0xd20e20e2, 0xd029f9e6, 0xdd01f9ff, 0x0726fa27, 0x3a0edf01, +0x160ced1a, 0x1cb412e3, 0xd24c0b23, 0x01b50624, 0xe1e526dd, 0x321ef7f3, 0x200e0af5, 0x362bf819, +0x04130efd, 0x1af72315, 0xc7f3f31f, 0x2ad8f0e4, 0x47f1fe00, 0xa9411fe6, 0x0be92ee7, 0x1f2be4ef, +0x14c30804, 0x5c1214ea, 0xf1cd34ea, 0x5535e7f5, 0x3410d30e, 0xfa2e0c18, 0x4218defa, 0xbcbd19f0, +0xe6fd1c1c, 0xf5fe0eda, 0xf5e0e426, 0xb1041d08, 0xbf06da24, 0xc0d41823, 0xcfaae8fc, 0x25b0f82e, +0xd9d5fc29, 0xc8ccff02, 0xf04cf00b, 0x32d0e725, 0xda090af9, 0x5604ff1f, 0x2fef2edd, 0xdce2f818, +0x1fd30409, 0x4c300afb, 0xe018dcf8, 0xb54308f1, 0x281fdde5, 0xc202fff8, 0x3ca11525, 0xc241ecc9, +0xc06d15e7, 0xc11ce8bb, 0xf51a180c, 0xb43b1cf2, 0xcd2b2413, 0xefd3f4fc, 0x3e30e8fb, 0x43df1708, +0xd44ae4fa, 0xe912fd22, 0xeb16eee9, 0xe1aa12ee, 0xe2be0a2e, 0x24f90a19, 0x02b1fbdf, 0x4f33d9d9, +0x1fe4280c, 0x0336f60c, 0x09bc250e, 0x2b36e11d, 0xf2fffd0e, 0xfd33e727, 0xd2ecdb0c, 0xebdc0614, +0x16ec13fb, 0x2427ee15, 0x0a5d03ff, 0x2f0a1e36, 0x08e20000, 0xe0eaf813, 0xfe1c2bf5, 0x300c57cc, +0x13e8c510, 0xe2e50a43, 0x0cdd35fa, 0x040f2c37, 0x0001d828, 0x4404932d, 0xdaddfe4c, 0x012d29ab, +0xee1ec6ba, 0x1b0ef236, 0x16f6121e, 0xd4faab2d, 0xe9e5eef3, 0xe80d4035, 0xe5f642ce, 0xd8010028, +0x141d3c0c, 0xe7110fe9, 0x14eac5ee, 0xed1c3af3, 0xd616523f, 0xdef7facf, 0xf2e01b47, 0x2211fa16, +0x290cb033, 0x29e05007, 0x300d58e5, 0xf82b30ad, 0xf01c370d, 0xd4ebadc4, 0x080c1fe5, 0x1324eb4c, +0xec13c5eb, 0x08fbcfdc, 0xe8ff4029, 0x08e920ee, 0x02fddb2b, 0xf20ce6cd, 0xe6184140, 0xfd1624c2, +0x27f0b0e8, 0x1b15423d, 0x151c3d0b, 0x21e0f9f7, 0x16e1c148, 0x11161812, 0x0b0e3435, 0x07fb20dd, +0xe30045d7, 0xebecc33c, 0xe10b471d, 0x1ceb0cc3, 0xf020e809, 0xf412e53a, 0xe905c124, 0x2eef56c7, +0x10dbc804, 0x13181541, 0x0913cfc5, 0x0be3cef4, 0x34ed5dc5, 0xff10da37, 0xd827ff4f, 0x11d81701, +0x13e81540, 0xcddd5a04, 0x090ce233, 0x22db49fd, 0x3d09651f, 0xf5edcc3b, 0x25224c06, 0xf7c8e15f, +0x190dc0e4, 0xf81fe009, 0x08272fb1, 0xec11c417, 0xefe7e9f2, 0x13e1eab9, 0xe6fdf325, 0xf1f01918, +0x0c27cb01, 0x0e0d36e6, 0x1a0bbf1c, 0x1122c649, 0x0020d9b8, 0x2120f9f9, 0xd50f531a, 0xdbeeb416, +0xf0fdc9d5, 0x1ee20bf6, 0x24ebfcec, 0xebf1c31a, 0x00003030, 0xd1570709, 0x5a3707d1, 0x482033f1, +0xfb262008, 0xb8fddcfd, 0x33301fdb, 0x39fff4f8, 0xf9ac11d8, 0x0115e0d4, 0x0401d814, 0xda3323d8, +0xc72803f5, 0x0bba1100, 0x48e11de2, 0xe81f1f09, 0xfe3af00a, 0xd105da12, 0xacc1fa23, 0x1bc92d16, +0x05f90cf1, 0x49f52421, 0x35e6211d, 0xe52f0df3, 0xdf50f3f9, 0xf6c3f9d8, 0xf3df1e15, 0xc45e1bf8, +0xf9c6ebcb, 0x07ec2111, 0xace8dfed, 0xc606d311, 0x3f161223, 0x3e071712, 0x5e3ae9de, 0xdef5c912, +0x34fcfae2, 0xd0e9f525, 0x0431f9ee, 0xc2bdddf6, 0x0aee161b, 0x0827e217, 0xdf322001, 0xcfbd06f5, +0xe0daf71c, 0xde3bf802, 0xe11c0613, 0x013ff7f3, 0x1df1d917, 0x46ecf419, 0x40d41e15, 0xd22618fb, +0xa1360702, 0x1d25c8f3, 0xe0310a04, 0x0939f8f7, 0xe8a51e11, 0x1fbe10cd, 0x4126091b, 0x3c2ee6fe, +0xdae21406, 0x2624fff6, 0x37380205, 0xb4db0ff1, 0xfddd2504, 0xe51eda05, 0x09c1f30a, 0x15f420e9, +0xb138ed1b, 0x26142811, 0xc41e0315, 0xba1eedf7, 0xe3ce1e0a, 0x9cef0b0a, 0xfbe2c4e9, 0x370f23f6, +0xc4f9f019, 0xdb5fece0, 0x140f03c8, 0x1ec115e6, 0xbecd0be8, 0xd1cce60b, 0xc3b007f4, 0xe9ca1628, +0x1adbf00e, 0xbdbc0efc, 0xc930e51d, 0xb42cf2f9, 0xc2a723fc, 0x35fa17cf, 0x539b0edd, 0xfaffd63d, +0xe4b62228, 0xe3fc0d22, 0x4ab40a24, 0x3a0cdd24, 0x13bc121c, 0x15e30000, 0xfcee2c3a, 0xf7f2e1e7, +0x1528c2b0, 0xe51c0df4, 0xdc03fbdc, 0xffe12af8, 0x1c294451, 0xfc18db10, 0x1f0048d8, 0xdc06032f, +0x0bdee2fa, 0x04162cef, 0x09e7d0c0, 0x0c24e34b, 0xbddc6bb4, 0x09f72032, 0xe11d4746, 0x0bf2e4e6, +0x2122fab5, 0xd60653d3, 0x30dda84a, 0xfded2b3a, 0x1de80b10, 0x08ead13f, 0xd3055522, 0x2610fe37, +0x34fc5cd4, 0x1fecf614, 0x0cf41ce4, 0x16f7ed1f, 0xe6150e12, 0x0c00e427, 0x11ee3917, 0x0eee363a, +0xe3de0cfa, 0x24054c23, 0x2efb5623, 0x27d00058, 0xe71441eb, 0x231b4bbc, 0xf3141c3d, 0xb9f66f32, +0xd3ecab3c, 0xeb04122d, 0x1a1ebef7, 0x0c0fcce6, 0xeae81240, 0x14ec3c3c, 0x41ea9711, 0xfeded64a, +0x00ccd95c, 0x22fef926, 0xec063b2d, 0xeae93f3f, 0x1ffff827, 0x25264cb3, 0x180a101e, 0xdcfc0523, +0xf1dbe7b2, 0xfbe424f4, 0xe9053ed3, 0xdafc4ddc, 0xea23c2b4, 0xf2fe1a26, 0x252403fd, 0xfd0e2537, +0xd01b59f3, 0x101be80d, 0xd9dd4fb5, 0x1edf0a08, 0x27e702bf, 0xd9f400cc, 0xf914d214, 0x1debf53e, +0x1524ee05, 0x121f3a0a, 0x01dcd604, 0x3901a029, 0x1dfef629, 0x11dc1804, 0x23f00538, 0xfd1124c8, +0x32215a07, 0xf7173112, 0xdc0e4dc9, 0xe00008d8, 0x1c1dbc0b, 0x200c09cc, 0x0805e0dc, 0x14e03bb8, +0x2bf3ac36, 0x2f0aaacf, 0xeadc124c, 0xff16d8ee, 0x01f3da1a, 0x0000163b, 0xfaf213ed, 0x0a08221a, +0xcebf1f20, 0x46dd0a1a, 0x3fcd1e05, 0xe7cee8f5, 0xf23410f6, 0xedf0e50b, 0xef42ec17, 0x08fdeae6, +0x975ce0db, 0xca19bf34, 0x241b0df0, 0x27bd04f2, 0xf8f8001b, 0xccbfdfe0, 0xf607f4e6, 0xec4ce3e0, +0x17d614db, 0xdfd0effd, 0xf751fa07, 0xc7081fd8, 0x225011e0, 0x16e7fbd8, 0x2af71210, 0xef32fd1e, +0xf5e9ea0a, 0x11f2e411, 0x26cceae5, 0x4e00fff3, 0x724f2728, 0x29ea4a26, 0x1aca00ee, 0x460df30e, +0x201c1ee4, 0x46dff7f3, 0xd3d41ffa, 0x3beb04fc, 0x4401ed14, 0xd3b3e4da, 0xc03cfb25, 0x1c1de7eb, +0xcccc0bf4, 0x2ae10df5, 0x402802f7, 0x0117e901, 0xc201d9f0, 0xf7fc15d9, 0xfedc2024, 0x24c82503, +0xc1fafdef, 0xfef2e9de, 0xd4c1da1a, 0xaff104e8, 0xc12a29e6, 0xb625e8fd, 0xd7c72204, 0xb5c4ffef, +0xcd45dd15, 0x25330a1d, 0x3ff0030b, 0xa9b2e917, 0x25c12fd9, 0xc23a0218, 0xd2bfea11, 0xe3170518, +0x282a0aee, 0xda0300ff, 0xfeb6fe26, 0xebd5db22, 0x1ce6edfd, 0xe4d4f5f2, 0x16f2f404, 0x26c8ee1a, +0xe2e9feef, 0xa8fbf5ee, 0x01073123, 0xe843d920, 0x1416f0e6, 0x0b24ebed, 0x5123e204, 0x0000d7fa, +0xbd290000, 0xbef21bfe, 0x19eee7e7, 0x0000f1e9, 0x00000000, 0xfa490000, 0x17e0df22, 0x000012f7, +0x00000000, 0xbadc0000, 0x0f3fe1fd, 0x00001a17, 0x00000000, 0x31c60000, 0x09ee0000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 + +hard_output0 = +0xcddb5769, 0x627f0a91, 0xbcf446fb, 0xc930d133, 0xdd7b82f0, 0xbd591856, 0xb73094b3, 0x32436132, +0xac268fdc, 0xeeb03845, 0x0087cc79, 0xd372eb62, 0xb595a1bd, 0x511605f7, 0xa428b83f, 0xfb9380a8, +0xe43c2efb, 0x0a04017a, 0x674608a3, 0xc2464812, 0x208bc693, 0xb03d0580, 0xdc62b803, 0xc2808805, +0xccaf7f16, 0x848dacef, 0xedf69b49, 0x222d2bd9, 0x78ffa7a5, 0x749cb755, 0x9f4abc69, 0xa27f3a72, +0x9652d070, 0x8a83be54, 0xa9820b7f, 0x8b00e52a, 0xb02159e4, 0xeddabc6a, 0x47b69064, 0xf161d47f, +0xc243b0df, 0xebb2ac39, 0x4fd6cddb, 0xc0d38ba3, 0xd9ddef17, 0x8c638875, 0xa132cfef, 0x5893a4f6, +0x1a29ce5f, 0x9072f3eb, 0x486bda03, 0xf5250cda, 0xdc10edbc, 0x392e6b7f, 0x4664de06, 0xfde2f3f2, +0x37c7d442, 0x32b60d29, 0xbcb7c8ff, 0xa1542cd0, 0x91b36e18, 0x231f9e19, 0x97cbc4a6, 0x51ceb1df, +0xc02dedf8, 0x4bfd9728, 0x569deda7, 0xc0790501, 0x132e47b0, 0xb8229185, 0x81cbe103, 0xa30dbf5f, +0x72c54db6, 0x4abdbdf7, 0x557d21d3, 0x3b75be17, 0xf540476b, 0xc48d78e1, 0x767d44ec, 0x87dc6193, +0x7a4c52fc, 0x306c3548, 0xa66e51b3, 0xfc627273, 0x5f349268, 0xf5e74974, 0x232388b6, 0x01d8e49e, +0xbf29f75f, 0x378a9d02, 0x59a45656, 0x46fb25b1, 0x75219b62, 0x4f0d6f71, 0x220c6763, 0xdde47b70, +0x8c0dce8f, 0x3e3db100, 0xabad4262, 0x800eddcb, 0x32416450, 0x61d260ce, 0x6e2a0fac, 0x73cdb847, +0xeee01947, 0xe5a3a37f, 0xa710e211, 0x98ecaa36, 0x69197474, 0x6e73e13f, 0xb2100f2e, 0x7714b2fd, +0xec3d81cb, 0xc50e5194, 0x5c69a3d8, 0xce0bdc13, 0x08e7df1c, 0x5fb014a5, 0x75b5801e, 0xf57d3b75, +0xac5dd149, 0xa5096b77, 0x4b5da0af, 0x656506d3, 0x3823d675, 0x3840530c, 0x1c0980ba, 0x3bd4150f, +0x3df8c0f8, 0xa2834aa4, 0x479ea855, 0x4f4557b5, 0x15d730db, 0xe6809738, 0x0aec8f7d, 0xf5e2188e, +0xa9a0fac7, 0x785c3166, 0x049dd244, 0x72cada2a, 0x5655bab3, 0xff73ca65, 0xb3dc51f1, 0x259f555b, +0xdaa1933d, 0x0f716b8f, 0x1e29bde6, 0xbe8bf3a9, 0x908b5a60, 0x667636c0, 0xfce80b0b, 0xcf43d154, +0xfffc60f5, 0x324b1632, 0x1609e45b, 0xf8e09aed, 0x1a01f6ee, 0x013cea78, 0xdd89ba42, 0xfe5bca8f, +0x894e3aef, 0xda292f76, 0x457a93bd, 0x73a9c254, 0x7b5ca9d1, 0x947355ba, 0xb8a797c8, 0x3b5e97f4, +0x946889ec, 0xd49316a3, 0xa5383830, 0x55f63bbf, 0x47b3a4ca, 0x415bd007, 0x1227c60a, 0x3610fab0, +0xa68c75fc, 0xa1441c62, 0x68a02596, 0x6d083d07, 0xceeb9057, 0x915054bc, 0x6f2c7fa8, 0x62be6166, +0x963703a3, 0x5af6e614, 0x5bc3e4f0, 0xf517a95e, 0x408a2bfd, 0xab7fe7a8, 0x37aac030, 0x9956f2f5 + +k = +6144 + +e = +10376 + +iter_max = +8 + +iter_min = +4 + +expected_iter_count = +8 + +ext_scale = +15 + +num_maps = +0 + +code_block_mode = +1 + +op_flags = +RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE, RTE_BBDEV_TURBO_CRC_TYPE_24B, +RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN + +expected_status = +OK \ No newline at end of file diff --git a/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_negllr.data b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_negllr.data new file mode 100644 index 00000000..e946963a --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_negllr.data @@ -0,0 +1,645 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_DEC + +input0 = +0x7f7fb2e1, 0x467f05ef, 0x3cabba6a, 0x1f826cac, 0xbe817fe7, 0x584dab0f, 0xdcb55f81, 0xe47f915f, +0x37b1a534, 0x4b478145, 0x9db5727f, 0x7fa6943f, 0x819550ea, 0xd18eb72a, 0xa0ba9c10, 0xcf7f817f, +0x915a957f, 0x2c699d0a, 0x89815c0f, 0x7f569358, 0x1002587f, 0x54314130, 0x817f1b7e, 0x81a85e2a, +0x5981bdf2, 0x0c81b00f, 0x6781be0e, 0x297fb17f, 0xfd811fb3, 0x7f077fdb, 0xd37fc2ab, 0x817f1024, +0x81817f7f, 0x3d848ec6, 0x4486cd9c, 0x7f6d7f81, 0x1d7ffa93, 0x813648ca, 0xdc7f125e, 0xaa81a798, +0x60838ef4, 0x815fcdbd, 0x81cb9d93, 0x817f8188, 0xc8c3d697, 0x817af2d6, 0x36888781, 0x818117ee, +0x37a67fbf, 0x89f464b2, 0x7f1e7f81, 0x7fe5c17f, 0x8ca1effc, 0xed7f82cf, 0x4a7f5ac6, 0x7fffb8d6, +0x427f7f73, 0x747f7f06, 0xdfcd22a9, 0x7fb99981, 0x76f77381, 0x2cd70f12, 0xd8b138e1, 0x81a4ea29, +0x222db281, 0xc3a07fd2, 0x217f8177, 0xd1cd87c4, 0xd181679f, 0x7f7fdee2, 0x7c81a72f, 0x6e7f5a7f, +0x77b0c96d, 0xff7f8110, 0xe8c6ad46, 0x376ade0a, 0x447f687f, 0x5916d238, 0x88816b81, 0xda7f6d88, +0xe2b2b55f, 0x750f7fc8, 0xc624d47f, 0xc6837f79, 0x81de5272, 0xd0847f29, 0x81ad587f, 0x3e7f7f61, +0xc803e070, 0x237ff600, 0x8c01815e, 0x814e0e81, 0x81657eef, 0xab2a62bc, 0x73d42cb4, 0xaaf03a8a, +0x817fa57f, 0x5e7fe2c3, 0x6deda37f, 0x6e87818d, 0x767f6e4d, 0x5c76819a, 0x8182b6f0, 0xdcd3037f, +0x81184c7f, 0x7ff1501c, 0x22c7f159, 0x45a6817f, 0x9b53c5e7, 0x57966a5e, 0x0a7f7f81, 0x4a5c155e, +0xf37fff81, 0xad97f8f2, 0x052f397f, 0x79638181, 0x7f7f7f7f, 0x81e3fae4, 0x7659cf03, 0xbd7a5a81, +0x46bcab7f, 0xa57f69a1, 0xba818186, 0x38a347a2, 0x91a88ec0, 0xd76087fc, 0x81818b8c, 0x7fa3424e, +0x7f89ef7f, 0x81ac183d, 0x3aa8912a, 0xc4937da6, 0x7a4c136b, 0x81568e05, 0x8182812b, 0xebb17f1d, +0x84b87f4c, 0xf4eab6c9, 0x81034197, 0x8183bd0a, 0xa1b92676, 0xceca2ef1, 0x9b7f7f7f, 0x5d813181, +0x817fea35, 0x0a6c81b0, 0x9a559b4f, 0x93987691, 0xad819510, 0xa47ab324, 0xfa577f7f, 0xbba8ac3e, +0x6694b753, 0xee63c4d7, 0x17b4ce81, 0x83c0dc81, 0x6a7f4481, 0x81d87fe0, 0x7f5cc5c0, 0xf828ea77, +0xbb817fa3, 0x3e816619, 0x817f6aab, 0x26417f7f, 0x0c40817f, 0x9cf01834, 0x32b37f7f, 0xe74fd0a7, +0x7f7f4167, 0x0e7f6ae0, 0x813d81b2, 0x8172027f, 0x4ea6c275, 0x814525de, 0x46817f7f, 0x7fcb2cd1, +0x7f8198ca, 0x3d7f60aa, 0x8d7fac81, 0xe2c081ed, 0x656f8181, 0xb2ae9365, 0x6ddd8d81, 0x86e0be65, +0x5495ec58, 0xcb981829, 0x429ee669, 0x81445fc5, 0xd3435724, 0xce51b323, 0x92b16972, 0xf0137f8e, +0x7f199a7f, 0x81be813c, 0x8155817f, 0xd481037f, 0x167f8181, 0x5ad131d4, 0x7fbc4149, 0x81046d60, +0x87174b9c, 0x94bc63ba, 0x68817c7f, 0x12ef7f8d, 0x7a81f4f6, 0x7f7f6db9, 0x4823417e, 0xa4989a48, +0x7e4c787f, 0xa47f8123, 0x819afb20, 0xbf7f447f, 0x8d95af5c, 0x33a6812e, 0x07c75cd5, 0x337f8151, +0xa2e5e15a, 0x847f7209, 0xf9f4b934, 0x72a77f2f, 0x1a9f2f7e, 0xac224bb2, 0x81467f0c, 0x3f8fd28d, +0x4a78b07a, 0x5269a71e, 0x4fd57fb2, 0x0b5262bf, 0xea39ca0a, 0x577f3d1e, 0x509481d7, 0xb7816b48, +0x35c31d7f, 0x818bdc1d, 0x24087fb9, 0x7fe27f07, 0x55866481, 0x7f9edd28, 0x07811ed1, 0x1b317fb2, +0x8cb19dee, 0x396920a6, 0xd965dd85, 0x9ed5ff47, 0xc97f7f7f, 0x7f8c1d55, 0x06a69701, 0x9e348881, +0x7f6cb281, 0x388181d1, 0xd07d61ac, 0x7fdef7f1, 0x08177f7f, 0xd87fa1c0, 0xfe9ebeb5, 0x813d3430, +0x2181811e, 0xf4dab306, 0xa36bbb7f, 0x05e87f7f, 0x957ff3cd, 0xe4814219, 0x7fbf3f81, 0x818a7fab, +0x43e081d9, 0x87c08729, 0x0db52785, 0x8181b0da, 0x4c94697e, 0x817fa9b2, 0x7f7f81d9, 0x8bda8193, +0xb6811781, 0x81796220, 0xdb082ac1, 0x2c7f5d3b, 0x8c8103f3, 0x4dcbd01e, 0x7fc1a87f, 0x457f7f23, +0x5e26bad6, 0x7f8174f3, 0x30109f08, 0xef8526cc, 0xe8690849, 0xd2b981b0, 0x7f7f7fb5, 0x8156ef7f, +0x921a7f98, 0x7fe6681e, 0x244e497c, 0x5ef68149, 0xbfbcaa7f, 0xf5011204, 0xfe7f8163, 0x8cd08f31, +0xc88a6b43, 0x7c8105b6, 0xf6d881aa, 0x074c2c2d, 0xd6815c7f, 0x1d15d265, 0xce816f0c, 0x14c77f2e, +0x9a7f08c4, 0x1a8b8ed2, 0xbe9637ed, 0xb59fac5a, 0xa4d07f91, 0x7f847fab, 0xd981597f, 0x7328575d, +0xb83af760, 0xb536b941, 0x81c1937f, 0x16b7814d, 0x3f7fb081, 0x7f41e102, 0x727f77ea, 0x9558c381, +0x48ac819e, 0xa87f99f2, 0x819e7f05, 0x92816eb0, 0x59447f81, 0x4f6cb056, 0x818163a8, 0xe00a64e2, +0x81bb7fa3, 0x7f816de7, 0x86ac6a50, 0x48a277b0, 0x8e37cd81, 0x7f88e646, 0x023351d8, 0x1ee7a64b, +0x8145f239, 0x81810859, 0x557f7f7f, 0x6971d292, 0x813d7fdf, 0xe86cade7, 0xdab67f7f, 0x6d4f247f, +0x9df2297f, 0x1e8172ec, 0x3f81a56d, 0x907f02d3, 0x45663970, 0x817f46a4, 0x817fd1ce, 0x9843b181, +0x7fdf817f, 0x784881da, 0xd8a281c2, 0xd77f88d8, 0x8c81817f, 0xeb625633, 0x73fe4eae, 0x57787f19, +0x7f81ea7f, 0xca8143c8, 0x767cbf1d, 0x7fbc81e1, 0x7f9bbeb5, 0x7fea8a12, 0x7f5c7fde, 0xb5fe1685, +0xa4da81c8, 0x7f6b542e, 0x81814b7f, 0x6252f7d3, 0x55eebb81, 0x6a0824cf, 0x586128f0, 0xded58181, +0xe32a819b, 0x79c67f20, 0xf2812781, 0x7f58c369, 0x7ffa2d7f, 0x817f81f4, 0x7ffc2c07, 0x457f9981, +0xff4281c7, 0xd78e6955, 0x3e10e57f, 0x632b817f, 0x7f0197d1, 0x813e7feb, 0xf981ac81, 0x8f81317f, +0xc37f137d, 0x01918101, 0x81b007a4, 0xaab059ad, 0x7f7f81f7, 0x6ac590a9, 0x817f81a0, 0x68816067, +0x0ae08182, 0xb47fb268, 0x33d94a7f, 0xbf43d243, 0xccff8e4e, 0x44ebe23f, 0x0d4cfd2e, 0x7f81947f, +0x81d1cee8, 0xda0e16a3, 0x34a74376, 0x117b81ae, 0x1d853f6b, 0x81d6d2ed, 0x8d7f5be6, 0xcf1fb4a0, +0x3e1ec13a, 0x7fed0cf4, 0x65d3817f, 0x0a1fec83, 0x7fc4811f, 0x666381c1, 0x8147c754, 0xa9cf7f53, +0xa076cf81, 0x4c7a4712, 0x1a814ff5, 0x9e7f8175, 0x8181698b, 0x7f1a7f63, 0x8191f19f, 0xb1037fa8, +0x7f52aa2e, 0x42b65c09, 0x94afab7f, 0xbda37dda, 0x81f2d66b, 0x3e737f1f, 0xcb816031, 0xd56181ef, +0x2c6ffb81, 0x44852329, 0x1b4b81cc, 0x055a7bb7, 0x4c6f2e1d, 0xb14ae1b7, 0x7f43c2c4, 0x2368b80d, +0x7f7ffb9f, 0x990b0523, 0x74b33ea5, 0xd64c8199, 0x98ed81a2, 0x311bc8e9, 0x599963cf, 0xb0eeed4c, +0x8181ed70, 0x42023428, 0xd00270bc, 0x812a7fd3, 0x7fc4101a, 0x3448d8db, 0x068814a8, 0xfeb5701c, +0x46db1481, 0x3cc5b001, 0x8181c2b3, 0x9c3d7f7f, 0x7f13e346, 0x2b75819c, 0xf57fcfe1, 0xfdaac581, +0x7f814c6f, 0x7f186b39, 0x7f818150, 0xea7f5b7f, 0x816776b0, 0xb8657f57, 0x7f447f5e, 0xc3628122, +0x84849d7f, 0x7f8cb914, 0x92813dd5, 0x81557a3a, 0x6b4cbf7f, 0xc6818b94, 0x7fbb7f98, 0x3c88812d, +0x8de4267f, 0x8dee81fd, 0x37f98131, 0x3d8164d2, 0xd97f8181, 0x3ccaebf9, 0x8165cb7f, 0x63cb81d3, +0x817fea94, 0x48817fa1, 0x4ea0b16b, 0x3cb5ab6f, 0x3148f975, 0x8c7f1ab5, 0x7fae5a87, 0xad06c881, +0x0681c187, 0x81a5a11b, 0xe29ea383, 0xf07f536d, 0x81b0e702, 0xc37fbb59, 0x9e6167e8, 0x3f55c9be, +0x819a40a9, 0x6450b823, 0x15927b65, 0x72a149d4, 0x6c280b5c, 0x5d7fd8f3, 0x998d831a, 0xbb7f817f, +0x10ab3f81, 0x96f65101, 0x257f743a, 0x7f04a024, 0xe0c981b4, 0x81163a22, 0x8e197481, 0x21addfdc, +0x7f2dcdaa, 0x787f4407, 0x6b7f81ce, 0x9fd252c5, 0x85e8979b, 0xb049819d, 0x8f7f7f4d, 0xbe284c7f, +0x78135181, 0x5d33db0a, 0x8123817f, 0x81496a7f, 0x81889281, 0x8145bf37, 0x2e9f7fdf, 0xe4543a2d, +0x811e6024, 0x7f62e6ba, 0xaad07f1f, 0x4f73894f, 0x4c7f8181, 0x57817f06, 0xd27feb81, 0xcf1a815c, +0xa57fbe7f, 0x684f7fdc, 0x81f6cbc0, 0xb72bf82f, 0xda6d448f, 0xc6157f36, 0x7f81407f, 0x9a8c4719, +0x5fdf81dd, 0xaf817f0f, 0x813c11da, 0xd7ec1181, 0x570186e7, 0x8181f41e, 0xe5b8a2bb, 0x819051f3, +0x2a7fd68f, 0x37a13ac3, 0x5b81fd38, 0x6342817f, 0x84697f6e, 0x7f5eddfa, 0x6abc81d1, 0x8151d9d8, +0x3e5c6c53, 0x2b397fc6, 0x60d58548, 0xc540b213, 0x67815781, 0x89983030, 0x767c7f81, 0x71effe3a, +0x817feb42, 0x05818897, 0x810ea318, 0x99bb0e8a, 0xf1dd4b7b, 0xae6dcbae, 0x8155adc0, 0x15b4c0c1, +0x81ac9b14, 0x895057be, 0x9599d781, 0x38968fcd, 0x8171e981, 0x7a62d80c, 0x8681509a, 0x7f5d489b, +0xa381467f, 0x7fa12952, 0xa07f8109, 0x5acd5d7f, 0xbb7fc77f, 0x531f3429, 0x7fb77f5a, 0x566d89f6, +0x817d4c23, 0x406a81e6, 0x1381c99c, 0xad89d258, 0xa67f0cad, 0x510b916e, 0x7ffa81a1, 0x0b6a11e1, +0x2be02284, 0x7f4e4338, 0x817fd023, 0x058174bf, 0x677f950a, 0x97a6a01d, 0x81817f81, 0x136881da, +0x717f7f9a, 0xbf811404, 0x6714c965, 0x51828181, 0xcaf329a1, 0x8881bc0a, 0x7f7d8305, 0x2c7f5a7f, +0x7f7f63e9, 0x797fced4, 0x818154f5, 0xf13681c4, 0xc04b1d81, 0x0e81d244, 0xbf3f66bf, 0x5a3f2d7f, +0xfa931714, 0xce485844, 0x263ae55b, 0x7fd37f81, 0x79477ee4, 0x7f8187e0, 0x81ab8144, 0x7f8d7f7f, +0x6f715068, 0x7f5577f6, 0x7f487f41, 0x81813b84, 0x33431981, 0x81a181c6, 0xca1b667f, 0xe75a7f71, +0x168128b0, 0xe0a092d5, 0x6e8164de, 0x2e817f61, 0xc4b1818d, 0xea7f8a32, 0x1098817f, 0x14a18156, +0xdd568862, 0xd3176a4b, 0x81308181, 0x697b3faa, 0xe256cecb, 0x697f1691, 0x4e228c05, 0x7fa6682a, +0x7449816e, 0x4558a15f, 0x9f513638, 0x7f7f819c, 0x7fb77aa1, 0x5eaf4912, 0x14817f7f, 0x0853817c, +0xa47fbe41, 0xdfd77f0e, 0x81371d7f, 0xc9cd7299, 0x81d0673a, 0xb133492f, 0xfd7f4003, 0x897f7f1c, +0x1990d405, 0x9f3ee2bd, 0xc168817f, 0xc6f57fa4, 0x83ee483e, 0x90f781f6, 0x81dcb39d, 0xd37fe881, +0x28819181, 0x83da81d4, 0x7251dc7a, 0x7928ffc3, 0x7bca8181, 0x7f7f817b, 0xf6fa907f, 0x37916bd4, +0x0d3fbc7b, 0xde81bdd0, 0xa0147fdc, 0xbf0f7f0b, 0x25958132, 0x7f6c9931, 0x7f44812c, 0xd4868181, +0x6d9622ad, 0x817f30b6, 0x3c7f817f, 0x81238823, 0x7f2d5c81, 0x6f81531b, 0x947faff2, 0xa5ea038c, +0x4268817f, 0x814f8cfc, 0x81481809, 0x814d7f1a, 0x3c87507f, 0xa47f1219, 0x4f65f27f, 0x5d7f2c21, +0x7f4631a5, 0x5a81475a, 0x7f81ef60, 0x73aaa890, 0x327f7f7f, 0x81b614c1, 0x7fc6477f, 0xe5b7a6be, +0x55816e73, 0x7ffaffb1, 0xb93d1b81, 0x3ce80fe2, 0x37ec6194, 0xe681e2db, 0x81c1d37f, 0x33a2bad3, +0x582b8181, 0x7fdc7f25, 0x81978169, 0x9733486f, 0x3de13ef3, 0x247fd5cf, 0xa91d817f, 0x606a7f56, +0x817f8178, 0xe61d7b08, 0x7fa08181, 0x058bd29b, 0x28237fbf, 0x4f81b057, 0x488d8103, 0xe1817fa5, +0x81666297, 0xfabc75c8, 0x7ff75edb, 0x4a62fab5, 0x687fad84, 0xa7221c28, 0x773a5a6e, 0xf27f8581, +0x7f7f9569, 0xa31d812f, 0x81d48457, 0x7f88bcfe, 0xd743b981, 0x7f608102, 0x18de2185, 0x81d77fc4, +0x937f337f, 0xe67fe52e, 0x95d481da, 0x887f6071, 0x81301872, 0xd9088132, 0x7f7f5a81, 0x8181aba8, +0x7f9a7fcd, 0x7f7f4136, 0x3d87d3a8, 0xd581d381, 0x5d306ed0, 0x615b98dc, 0x812b3281, 0xdbe5c443, +0x817f7fe9, 0x815462da, 0x8122da0a, 0xb5817d81, 0xe12bd56c, 0xfa819a68, 0x907f8183, 0x236aa83f, +0x1b576c9b, 0x5b6424ab, 0x3f3e3953, 0xec81507f, 0xddb43aa9, 0x818134f1, 0x81c37fb0, 0xbf4a7fcf, +0x810b7f7c, 0x705b81d5, 0x23636581, 0x3caf7f9d, 0xbe765d81, 0x189a11ae, 0x8194cabc, 0x6063c5b0, +0x0e3c5353, 0x388235bb, 0x81904632, 0x2b9781d3, 0xd78db758, 0x953fe437, 0x7423f22d, 0x81ad9c95, +0x7fd74748, 0x329b582d, 0xe0e07f7f, 0x7f81ae8e, 0x962fe50c, 0x3b7faf1d, 0xcaeb8e37, 0x7f54276a, +0x927f6c9a, 0x828143c1, 0x90456105, 0x7f757ff6, 0x796681eb, 0x7478b225, 0x70d852bb, 0x7f280bdb, +0x56597f47, 0x51e6fff8, 0x1e7f818d, 0xdc4a8175, 0x10712985, 0x2ee6daa1, 0x81c7d046, 0xa1a36e79, +0x9adf7f7f, 0x812ee70d, 0x91bc1062, 0x443b7fa9, 0x818f47b3, 0x7f817f3b, 0x7981a09c, 0x7f9c817f, +0x1b46d2cc, 0x786179de, 0xd97f61f2, 0x81b4f213, 0xd381874a, 0x7f4f7fc4, 0x9c81c3d5, 0x17d59842, +0x7fc9a8fc, 0x996840b5, 0xd5a79281, 0x88378181, 0x7fd4ca81, 0x81ba7fee, 0x5dae297b, 0xc8fa0e5c, +0x987a5757, 0x5debc016, 0xa64eb973, 0x61a51b81, 0x66509c7f, 0xeb7ad965, 0x7ada817f, 0x4d5bf7bc, +0x8876b8a4, 0x7d73b4d0, 0x762cb5ad, 0x6ec14d66, 0x6bfdad7f, 0x7f7f810e, 0x81bc2b15, 0x0f817f9c, +0xc72fe940, 0xc9168b0c, 0x7f6e8d79, 0x7f81fadd, 0x706181f6, 0x678181e7, 0x3f68c5e8, 0xa06d8195, +0x78d73d7f, 0x7295109a, 0x05897f55, 0xbdc07281, 0x81817fe3, 0xcd157f30, 0x489ca757, 0x53468119, +0x3f6a81b8, 0x03fe7c43, 0xb511cda3, 0xe581a17f, 0x5e3e3ec9, 0x7f507f96, 0xbe12a481, 0x81006ddd, +0x49322cb4, 0x27f7a81e, 0xc6085618, 0x8148d270, 0x68668181, 0xec5ed498, 0xa011833a, 0x81475300, +0x071d3a63, 0xa2c55142, 0x7fe9767f, 0x6e3c21fe, 0xeb63814a, 0x7fd57ffd, 0x9f91630b, 0x475f219f, +0xbbb5c7bc, 0x686de142, 0xb652909f, 0x6b8e5a7f, 0x7f885858, 0x7f027ee1, 0x7f818191, 0x818881d4, +0x1b7c017e, 0x8581aac9, 0x627fa790, 0xaf837f7f, 0x69064681, 0x311d32e9, 0x6f7f0ba1, 0x7b81a1c5, +0x7f4f8144, 0x81e38144, 0x528daa3d, 0x1c098175, 0xba6b0ac7, 0x732614ab, 0x5b2c5d7f, 0x81815ce8, +0xb37f7fbb, 0xc87f8ec5, 0x81b08181, 0x5bcb8136, 0x7f2524c5, 0xf5e881d8, 0x50ceaf7f, 0xf72387a1, +0xd4c74e74, 0x663f7fe8, 0x26ae4a7f, 0x5d7f6358, 0x8178bbdb, 0x46f38102, 0xb0327f53, 0xa019f297, +0xeff57f12, 0x7f7f79e2, 0x8120817f, 0x73e4b322, 0x999c6349, 0x92ba973e, 0x7f7fbc5c, 0x81946449, +0xaf4b817f, 0xca523d00, 0x81817fbf, 0x3b145a81, 0xec54886e, 0x81cb173e, 0x5f02667f, 0x967a8181, +0x8a817f81, 0x3cb93c41, 0xfa811336, 0x74feb5a9, 0x821b9081, 0xaea865af, 0xf17f4e71, 0x52810059, +0xaebc3b36, 0x6a8f7fd2, 0x109de3eb, 0x4c555f48, 0x8166cc78, 0x7f4c7b48, 0xc7d1d381, 0x1381537f, +0xa0ab5c1f, 0xa00d0039, 0x81157f81, 0x2f206f81, 0xacb45b81, 0x2ec382f3, 0x7ff0383e, 0x44dc5aeb, +0xc37f81cd, 0x43553306, 0xa63d7f81, 0x7f7f8140, 0x7f7f818a, 0x7d82bd27, 0x817f8b7f, 0xa3579535, +0xc2e47871, 0x81815a0b, 0x49977081, 0x724cc372, 0xc681c27f, 0x2b7cabf0, 0x3081a6ac, 0xec7f964e, +0x7fda3ad3, 0x422671a4, 0x60008158, 0x7f738e7f, 0x538e6f81, 0xb73e0ab0, 0x796a6551, 0x81ad178b, +0x19731c7f, 0x1bf8c26a, 0x4acb7fd4, 0x5121b547, 0xda547fff, 0x7e505ff7, 0x817fff48, 0xa0813934, +0x3d4f77d1, 0x83f327ef, 0xc7937f6b, 0x357c4a84, 0xc183ca7f, 0xbd6081db, 0x30e0d783, 0x492aa57f, +0xbba6671e, 0xb062baef, 0x7f817f20, 0x7f267f58, 0x7fb94b3f, 0x447f5b21, 0xb864997f, 0x7fb057f8, +0xb1401a7f, 0x7fb57f9d, 0xefbb8175, 0x66892581, 0x7f06fa7f, 0x817a90f0, 0x7bd5793b, 0xc37ead24, +0x6feaa4be, 0x3a548cde, 0x8e81f98c, 0x81b78199, 0xe6c11940, 0x4d647f2f, 0xbc5c8181, 0x8175fa91, +0x8188864b, 0x85591890, 0x47ce5d57, 0x38a4efb7, 0xce5e739f, 0x81737fd4, 0x6d7f635e, 0x9c861774, +0x89818262, 0x45bc8114, 0xd1552a89, 0xc68174f8, 0xaf9081d8, 0xcb08e35b, 0xf5c404b2, 0x2ce96576, +0x11895a36, 0xcf20e0ea, 0x2548e7dc, 0x8f774c59, 0xf85a8f56, 0x7f81ddae, 0x7f7f627f, 0x1481491c, +0x7f97661b, 0x787f81e0, 0xa2192f7f, 0x6cb37fb0, 0x8181ae27, 0x3f7fafec, 0xce7f5a86, 0xae067f7f, +0x3c1c8181, 0x817eb443, 0x8e741041, 0x5a4ef2ba, 0x61029de4, 0x9d637e34, 0x819d7fca, 0x8181cd7f, +0x8175b96c, 0xbc4f6ecf, 0xf826aead, 0x567fa36d, 0x81968197, 0x9f9dcf3e, 0x265c8131, 0x81d9deab, +0xc21cf772, 0x7f6bd907, 0x313f795b, 0x817f197f, 0x09ecfe43, 0x6114402f, 0xe0995a81, 0x5f263281, +0xbe7f8158, 0x7f7f26dd, 0xfd7f7f7f, 0x3da2c609, 0x6ce3810d, 0x7f81cd1b, 0x81d68809, 0x42be9c6c, +0xf3d37f5c, 0x7f81e031, 0x774e0c30, 0x7f8f468f, 0x9ebb42b2, 0x7f817fc0, 0x1376817f, 0x25f78181, +0x7f8eb3be, 0x20228505, 0x229fe181, 0x81ac277f, 0x4f308129, 0xb8818111, 0x81d47fc7, 0x818181f0, +0xc27fd34a, 0x9dbc810a, 0x8481d981, 0x61817581, 0x8a2c817f, 0xe7815625, 0x96a7946f, 0x3a7f5d5d, +0x835f2fc1, 0xb6082dd7, 0x567f687f, 0x838181b6, 0xc663e9ad, 0x517f7fbb, 0xa570adb0, 0x12cfc2b4, +0xc1587f39, 0xbd8140ba, 0x7f814366, 0xd00981ec, 0xc27f217f, 0x5cd1b2ad, 0x81992c7f, 0xb1b57f62, +0x3bb9b1de, 0x9213cd8a, 0x814f136e, 0xa5c27fc3, 0x947f7f59, 0x8ef30ddd, 0xd3f290e9, 0x39bc7f8c, +0x7f814e81, 0x5381ff22, 0x7f81f04f, 0x0adde113, 0x5b93867f, 0x687f8111, 0x7f7f1c60, 0xb856814f, +0x7f8e8196, 0x5cfb66fc, 0x20310ab4, 0xa2599964, 0x973f0065, 0x817f95d6, 0xa43b7f7f, 0x816bb1c3, +0x7c7581e9, 0x521770cf, 0x81456be1, 0x5e26818d, 0x3b54057f, 0xd4e231d6, 0x977f0346, 0xe777814e, +0x81b6095a, 0x7b337fd7, 0x18bb202b, 0x219b7cf9, 0x2bdeaa81, 0x55dcd1d2, 0x8123905c, 0xe463fd74, +0xd16ec17b, 0xd90f4e3b, 0x817fd97f, 0x817f7f7f, 0x816afd35, 0x3dd87f51, 0xa62f0e5f, 0x9f4766ce, +0xa933c505, 0x81049129, 0x627f3e7f, 0x025ea181, 0x7b5e4119, 0x0d11aff9, 0x817faaa2, 0xe3bc817f, +0x7fb27f81, 0xc07bc500, 0x9451c181, 0x1fc6811e, 0x7f9f9e3b, 0x7f4c1a91, 0x7f1f7148, 0x8358819a, +0x81119a81, 0x16d55f0c, 0xc07f7f2e, 0x467f9991, 0x237f8c7f, 0x81638101, 0x2aacb9ae, 0x4f439f32, +0xc65e96aa, 0x7f826508, 0x7fa86ab6, 0x602ddbc4, 0xd07fd00a, 0xb0e381dd, 0xca81cc2e, 0x7f7b7f15, +0x7f81b9bd, 0x21627f38, 0xa481ed5c, 0xeee8d79b, 0x64369481, 0xb97f81b1, 0xf443d7e8, 0x50a410b8, +0xd87ffbbe, 0x18589760, 0x814998d5, 0xc1814b2f, 0x7fc33b7f, 0xd25b7335, 0xb6b2987f, 0x988a7f48, +0x9aa4fb81, 0x597fd920, 0x7f4e7f9a, 0x7f819892, 0x81daeac9, 0x7f538fcf, 0xac7f8198, 0xd2529c90, +0x0ff00ba8, 0x765cb53e, 0x7f0fd3ab, 0x0d81cb33, 0x81989961, 0xc13c7f25, 0x20d6a6bf, 0x7f817fd0, +0x6f6594db, 0xec8c68eb, 0x638194a0, 0x3b89815c, 0x81734c65, 0xaeb69ec3, 0xd20b6440, 0x2f71b6dc, +0xc990816e, 0xb6077fda, 0x7f4cc48a, 0x816aa8e8, 0x817f397a, 0xfb8181b7, 0xe64d11e2, 0xe97f1576, +0x10706f7f, 0x9b935d4c, 0xaf7f3547, 0x7f175ec9, 0x7632a981, 0x2f61d4f4, 0x7a597f81, 0x499e7f00, +0x7f81315e, 0x81c4230c, 0x8cb66a2e, 0xe76a8105, 0x81817fc6, 0x7f0e50bd, 0x83ba337f, 0x81009281, +0xcac68465, 0x617fd326, 0x893981e4, 0x7f8ed9b3, 0x59467f7f, 0x0181e258, 0x7f81b281, 0xc9816629, +0x7fbc8181, 0x08e1cadd, 0xa8328181, 0x76b6e071, 0xb36d7feb, 0x505490d7, 0x09d5dd31, 0xba81632d, +0x7f7f6fd0, 0x9e45813c, 0x7c4c4f70, 0xd97f5090, 0x247f335c, 0x357fbb4d, 0x267fd5c8, 0x26d08181, +0x51447fa8, 0x81358efb, 0xb9197f52, 0x40aadca1, 0x7f6781aa, 0xaf272a09, 0x8177a4b2, 0x497fb515, +0x819b7f7f, 0x8b817f41, 0x257f7f92, 0xa8d2244e, 0x477af9ef, 0xa8817f36, 0x5ca681f5, 0x7f1a76cb, +0x8181c5f1, 0xc4ddc7aa, 0x81a1c2bc, 0x7f466fa2, 0x625c237f, 0x3ddd1f44, 0x4f54fdde, 0xa281f2d5, +0x95d14e95, 0xf26e7fd4, 0x9a813a90, 0x8154361c, 0x327f7f42, 0x7fa2983b, 0xd9c28197, 0xf00d1898, +0xb95848d4, 0x387f81f0, 0x9f8e7f4a, 0x1c5cb9ec, 0x1f63b5fc, 0x187f7dcd, 0x7f1c7f05, 0xa855e2cc, +0x5a747f98, 0x7fa1b4ee, 0xe8e51aaa, 0x7f81ec81, 0x38818175, 0x81815f07, 0x84888181, 0x81810852, +0x316e0060, 0x819b1c6e, 0x3d814577, 0x5ecc5d81, 0x5e81814c, 0x7f76914d, 0xfc7f814b, 0xd2819d96, +0x70a34ad0, 0x7fd544f3, 0x71e51581, 0x817d45a4, 0x7f479a68, 0x42447fae, 0x9a974f7f, 0x817fbbd2, +0x989f367f, 0x78fa30c0, 0xbb902e75, 0x7fda7f02, 0x7f82c5b8, 0x33c325c4, 0xdf89815c, 0x81c37f33, +0x52f37ffc, 0x7fcc4702, 0x81488124, 0x7f548196, 0x4214a4a3, 0xa034b302, 0x6a817e04, 0xc67f795e, +0x17a98157, 0x7f7f7f5b, 0x7f7f4c56, 0x7fe0b181, 0x7f907fba, 0x42f82fd9, 0x223ea236, 0x3d2025d1, +0x097f81b1, 0x81a35969, 0x373cb170, 0x44a57c3b, 0x26bc5a7f, 0x34657ff6, 0xc83b3d3f, 0x99be6c2c, +0x1c811f7f, 0x5c7f81ac, 0x33fa49f6, 0xa5c38181, 0x81ed8144, 0xc88a652a, 0x7f817fe3, 0x953cfc91, +0x466681d3, 0x366f16d6, 0x187f2ff5, 0x077ff351, 0xbe1b817f, 0x3884ec81, 0xb17f8181, 0x9a39437f, +0x476fac61, 0x7f911514, 0x24b0038f, 0x5647567d, 0x8143c7ae, 0x81b03a31, 0xec7f085e, 0xc10d7f55, +0x0a50b9f6, 0x7f1fd244, 0x7f81a1c7, 0xc48148ca, 0x877fc97f, 0x81115e37, 0x827ffdf3, 0x48b741c2, +0x7f9fc5c0, 0xf87f81ce, 0x43a1029c, 0x2a78427f, 0xff9a164d, 0x7ff981f6, 0x1b7f81aa, 0x6f48ca6a, +0x547f7f2a, 0xa9ce5cf0, 0xc07f7f7f, 0x7f6fa54d, 0x58817fbb, 0x7f96fa32, 0xfdc18381, 0x7abd7e7f, +0xc604b981, 0x64217ffe, 0x8893bd7d, 0x7f140281, 0x7759b9e3, 0xcc648129, 0x81c981c6, 0xaf2ba351, +0x9ecd819f, 0xac816ff2, 0xb516672e, 0x4157ab81, 0x81d21796, 0x35643433, 0xf13d5a7f, 0x9881d003, +0x5081bbd0, 0x7d7fbdcc, 0x6ba50d3a, 0x8170677f, 0x85b29f81, 0x8aa72210, 0xfbaaa381, 0x7f0e8f34, +0xcf367b7f, 0xe60399ab, 0x7f7f7f01, 0x817f7f11, 0x81818932, 0xe39a6e31, 0x8d3b8181, 0x107d2bc1, +0x908164f7, 0xe41a7fc5, 0xe944503a, 0xeb4b6559, 0x22817f81, 0x816ea8ff, 0x1557e37f, 0xaa7f701c, +0x817f828e, 0x738106c4, 0x7e0e7f05, 0xac768181, 0x17898158, 0xc081ea33, 0x827f9958, 0x3b7fd4fc, +0x16181f7f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x0c7f3a86, 0x7750813d, 0xd2b385bc, 0x6f7f7f72, 0x7d487fa8, 0x7f7f8d65, 0xb6578196, 0x4d93815a, +0xf9a54748, 0x317d6469, 0xe0466f8a, 0xef869693, 0x3da5a9e2, 0x7f7f05a1, 0x14c68157, 0xc2148135, +0xad9ed181, 0x7a7f6081, 0xf10d659b, 0x4a38dce0, 0x114d7f2a, 0x2bc7f79f, 0x0f7881f1, 0x423e5553, +0x9c2b817f, 0xde7ffb85, 0xe18117ca, 0x67fd7f7f, 0x3fbbf57f, 0x9b34ad7a, 0x487a543f, 0xf06322f8, +0x9ce93023, 0x3581818a, 0x6a7f4d69, 0xad3d627f, 0xbd812c92, 0x8be4cc7f, 0x23b47fc4, 0xc87f819b, +0x104d6e7f, 0xcfcb8134, 0x0d8152cf, 0x467f7f19, 0x113f1ec5, 0x589bff9e, 0x457fa8c8, 0x2a1246d0, +0x731e257f, 0x40e460e8, 0x13d3057f, 0x817c8de1, 0xc09dc798, 0xab81d4f1, 0xa1a49d8c, 0xbf50df7f, +0xbc8192f7, 0x73aa4d5a, 0x1c817f43, 0x428fd6fc, 0xc282884e, 0xb9f67fd8, 0xd7777f4d, 0x1aa2d89a, +0xa5537f81, 0x8878b631, 0x4bdfd881, 0x7f21b57f, 0xcd7f7f7c, 0x4d2aa1d0, 0x3e138144, 0x3c7fe433, +0x146e5fe3, 0xa53b7f85, 0xa9537ffd, 0x819eda1c, 0xf93e2881, 0xc3814f7f, 0x018d69b4, 0xf87fa19f, +0xb09f9144, 0x6ec23f94, 0xdf7f39c6, 0x48087f7f, 0xfc818184, 0x18815bc3, 0xc764598b, 0x11ed68c7, +0x81df81bd, 0x7feceb38, 0xc90a473a, 0x81817f81, 0xef3a537f, 0x7f4d3873, 0xf45c846f, 0xfeb68afc, +0xd326d181, 0x7fe4ae7f, 0x3e817fb5, 0x81c95de9, 0xbebdcd7f, 0x23053726, 0xdd8d8181, 0x1d545233, +0xe37c933d, 0x81e52c1d, 0xc0ab2d81, 0x81e2667f, 0xcf2e70db, 0x9370319c, 0x21add116, 0xe6c34081, +0x076912e7, 0xa781aca0, 0xd6f5a5d0, 0xa9af5d7f, 0x2e817c77, 0x81e1661c, 0xd5308181, 0xae7f6ec4, +0x45815295, 0x2dbc2581, 0x50bde53d, 0xcd8cb1bc, 0x2e67977f, 0x32fc789f, 0x2b81ca3f, 0xef7f7f61, +0x19a7d611, 0x81427f81, 0xe98181fc, 0xd4397d7f, 0xd9ab7fb7, 0x819c8c92, 0xd5478339, 0xf0555aae, +0xfe09f481, 0x8128527f, 0x337f1081, 0x6dae3aa2, 0xccb67f81, 0xb1d8d87f, 0xc2d3dd02, 0x97af814d, +0x812f1c5a, 0xc50c8181, 0xcd70610a, 0x7f555df4, 0x567f7f23, 0x0a0d917f, 0x4b81815c, 0x184a5b8e, +0x5054c67f, 0x7f7f8ced, 0xf0b31f7f, 0x11698131, 0xe0ac5442, 0x57ffc57f, 0xb681cc02, 0xb78c8193, +0xa629b48c, 0xf2a8d43b, 0x59167d37, 0x81818bf2, 0xc87c817f, 0xbf7f8e7f, 0xcdbc2243, 0x2ed78e74, +0xfe817d61, 0xa3613c01, 0x25e281af, 0xbd6a7f36, 0xe57f4b7f, 0x81898161, 0x2a415081, 0x50b07fb6, +0xe77f9c72, 0xd43681aa, 0xd07fb648, 0x3c7fbd5b, 0xa555c475, 0x8169dc64, 0x09106247, 0xf9b660ab, +0x389542ac, 0x7fa22c4e, 0x81304e71, 0x29208c90, 0x317fa43a, 0x8281819d, 0x7f212065, 0xc0682681, +0x3f2b5dc7, 0x87d86d81, 0x5c7f4192, 0x7f54332b, 0xc564a449, 0x7f817f58, 0x3c6576aa, 0x147f816d, +0x4957d091, 0xbdaba3a7, 0xa17fa381, 0x49628138, 0x372f815b, 0x81676748, 0x8dba817f, 0xc89e8129, +0x866fc855, 0x43ba4a81, 0x8f4b7ec2, 0xa47f054d, 0xc3b07185, 0x7f8d3ad0, 0x2c7f9f8e, 0xc17f27d3, +0x33956381, 0x8e12a081, 0x7be52e32, 0x44b28122, 0xe4947f6d, 0x6eae4f7f, 0x34df816a, 0xd7938168, +0xa7a9528b, 0x7f81ec45, 0x41857f6f, 0xbc28817f, 0xcbd67c06, 0xaeb678d2, 0x529a547f, 0x44813b6e, +0xd44f247f, 0xbe1b9879, 0x877f4996, 0x5b9a8dcd, 0x2e558281, 0x1d817f84, 0xda7f8186, 0xdda9f4a6, +0xe698ae70, 0x7fb54c8a, 0x0aae8550, 0x857ff8fb, 0x1ac42e8e, 0x3f27814b, 0xe27fdc2d, 0x2db1eb81, +0xca817e5a, 0xec7f62e2, 0xc7dea079, 0x7f7a7f26, 0x7fdfbf81, 0x7fe89da8, 0xa9187264, 0xf98192c9, +0x6af88181, 0x667e3981, 0xf97f3c48, 0x7fdf817f, 0x3b9bbc81, 0x55bfbc81, 0x096fa6e1, 0x1b75a67f, +0x357f83ed, 0x7f7f6f7f, 0xf3b43812, 0x9d4c0d92, 0xa8817f4d, 0xb96c877f, 0x34818178, 0x5d7f997f, +0xdb7f5c7f, 0x39b056a4, 0x33bcffaf, 0xb398b57f, 0x285a4bf1, 0xb181574b, 0xc1e56d31, 0x4b717fd2, +0x56337f81, 0xed938157, 0x7f4b7f47, 0x49a3812b, 0x2e667fcd, 0xcd817240, 0x8b5470e6, 0xf693814e, +0xd3e07f7a, 0x7f7a4781, 0xf44cd60c, 0xfd546281, 0xef995981, 0x81e70628, 0xd3b97f47, 0x07819aa8, +0x707f447f, 0x9181c079, 0x72ae8173, 0x988d2a78, 0x7f814478, 0xb57fa076, 0xde7fb260, 0x17817f3b, +0xf045c31b, 0x7f7feb13, 0xbee5d07f, 0x5f8192ae, 0x0c9381f7, 0x28817f7b, 0x985964d5, 0x81817d81, +0xb381b799, 0x38b0817f, 0x3a887f0f, 0xf353fe81, 0x53817fdb, 0x5c814d56, 0x39b415ff, 0xc9c78c59, +0xb2817fad, 0x5884bec5, 0xd629b07f, 0xb67f81cb, 0xcb81abe9, 0xab7f9b7f, 0x35815781, 0x85878181, +0xb0b2177f, 0x86223495, 0x32cd7f69, 0x3c664743, 0x1d7981c2, 0x817f817f, 0x16cadc06, 0x03786a7f, +0x03818107, 0x5a8dd1d1, 0x58e57f20, 0x7f818152, 0x444c50f0, 0x3d81810c, 0x66ae7f62, 0x39d01614, +0xbd817f81, 0x789b7fa6, 0xadcd819c, 0xd37f8171, 0x291ebf81, 0xd5923ac9, 0xc37f63bf, 0x1c7981ae, +0xda5e969d, 0x248c5899, 0x8fbdc4b3, 0x6c50d2cb, 0x2a5fab0b, 0xc42d818d, 0xa4776781, 0xaa6281fd, +0x20534bc6, 0xb729727f, 0x6481de5c, 0xffc8a5e9, 0x707f25f2, 0xc674ba57, 0xe2ff9cb8, 0x02433e79, +0x108e223b, 0x617f9cd7, 0xb5507f81, 0xa6f63ee6, 0x09b45856, 0x7e7fe581, 0xe4f8417f, 0xe11d98be, +0x44817f7f, 0xc9c581df, 0xf7817fb0, 0xac6da566, 0xc681ed7f, 0x3249810f, 0xab55b281, 0x35e64c81, +0x61e9b982, 0x8d8c4781, 0x4eafd663, 0x7fb8302e, 0x40a9bcb1, 0x17b17f9e, 0x817f92c0, 0xe35d2fb3, +0x4ca815dd, 0x817086b9, 0x389b817f, 0x3c0d955f, 0x447f7f1a, 0x7f11307f, 0xada0815a, 0xafca7f7f, +0xce48b124, 0x7fd94e81, 0x2edc81a4, 0xaa098a97, 0x14030263, 0x97813a7f, 0x8181d97f, 0x2581c9e6, +0xe57f7f05, 0x16880f81, 0xa6e88184, 0x818490a2, 0xd5a87f94, 0x8b81a081, 0x38813781, 0x14a59a5a, +0x486b8c40, 0x893449c7, 0x1f03d659, 0x81bb2831, 0xe0d8c66b, 0xa1bb7066, 0x1b7f8c71, 0x07815581, +0x3820cb84, 0x8d3926b5, 0x81f27fd2, 0x46ff3a84, 0xfddc057f, 0x16d21ad1, 0xdbc27852, 0xf9817f34, +0x7f81397f, 0x6c0f8101, 0xf22a100c, 0x7c998171, 0x4b68bad6, 0x7f588114, 0xc3465c81, 0x439e037f, +0xd9817f52, 0xf1bd5594, 0xf93f9f81, 0x39447f7f, 0x46dca006, 0x93f30f7f, 0x6e215e81, 0x092ffdc3, +0xad49e8a0, 0x7f4e7fba, 0x9b9c4fc6, 0x8131f77f, 0x0700b27f, 0xf0de61ab, 0x258181fb, 0xd8e8bb56, +0x36932155, 0x9c7f2981, 0xb2bfe055, 0x7f0095b9, 0xaf73327f, 0x70817fbb, 0xb3a281d6, 0xfac08a53, +0x24e4727f, 0xcbb5dbbb, 0x38be99fa, 0xbcb94e7f, 0xdf2653d0, 0x93df5efe, 0xfcdc697f, 0xe754909d, +0xc6810060, 0x7f0ce4d1, 0xaec4a5ca, 0x4a7f81bd, 0xc17f817a, 0x7feecc3f, 0x2981285d, 0x3e0e6e81, +0xab81814f, 0xbf27187f, 0x12409322, 0x1c9dc451, 0xc48e03e7, 0x64013b81, 0xb5847aa7, 0x350a1b84, +0x27458181, 0xa1237f61, 0x9714287f, 0x817f8170, 0x1a7f7f09, 0xf07fecc7, 0xd686d4c9, 0x9abd7fa9, +0x449dc0bc, 0xc1177f9d, 0x7f547f9a, 0x7fbd7fe3, 0xcab19d2b, 0xc981b27f, 0xdc8ba548, 0xe95304a6, +0x22225820, 0xd29262ec, 0xf27f9f78, 0x1b94427f, 0x4738a436, 0x7f814fe0, 0x52817f7f, 0x1d818181, +0xf8d92d89, 0x24817fa3, 0x66d6aadb, 0x4e55b9ca, 0x7f81de8c, 0x7f814e64, 0x24141e31, 0xa5232778, +0x34e27f81, 0xdc87c78f, 0xc016dfb1, 0xce91829c, 0xe662c230, 0xda74e681, 0xcfb57bb8, 0xf18179ff, +0xcc818481, 0x4736ed45, 0xcc6ee881, 0x3a815a81, 0x81b77fb2, 0x56816f2f, 0xe82bcb89, 0x3875ad5f, +0xe05197e1, 0x1a7bc17f, 0x31a3f841, 0x7f90a77f, 0x1df3b910, 0x583f815b, 0x257f814c, 0xee7f7f8e, +0x7dbdc37f, 0xb6810a1c, 0xce816d85, 0x459bf72d, 0xd4fe0481, 0x0d49024c, 0xbfa3a055, 0xed9baf30, +0xf27f7f7f, 0x7dd24f9b, 0xc481817e, 0x7f899052, 0x074d5bdb, 0x2335c3fe, 0x00759f7f, 0x251bacbb, +0x147fc935, 0x0b180994, 0x9f7fa637, 0x387ff150, 0xcbac8967, 0xf37f60cb, 0xe4c216be, 0x7829816a, +0xd7b89981, 0xf8ae2592, 0xbeca818c, 0xf3817a81, 0xc681017f, 0x2f2f2e8f, 0xfe577f1b, 0xd503d778, +0x2d7f7f67, 0xb281c0f0, 0xf7d7812c, 0x817f9598, 0x1af6fb36, 0x157f3781, 0x5e31a27f, 0x038181ae, +0xf9fb81ad, 0x7a8eec81, 0xc8652e7f, 0xc67ba487, 0xbfce7f7f, 0xdf02a27f, 0xf9c1813d, 0xcaac0f90, +0xdc448181, 0xb17f921f, 0x99be8181, 0x7fc09993, 0x11ad1bc3, 0x5c7532bf, 0x25a8ce28, 0x0ab6a823, +0xc3c29055, 0xad55b6ea, 0xed81e0dd, 0x8caa6553, 0xff817fc8, 0x7f70d513, 0x433671dc, 0x5db37f6d, +0x7f318170, 0x55bd4dd5, 0x00c181b7, 0x41c3bb90, 0x441a818e, 0x84e78140, 0xad8a2c81, 0x2bc38158, +0x82654c81, 0x29d6600e, 0x227f7fc8, 0xa43d577f, 0x2b8364a3, 0x144e2a57, 0x07817f1c, 0x4a8171cd, +0x47d5a981, 0x3c7d0781, 0xba636c7f, 0x2c7a7f7f, 0x90cf187a, 0x6b4c3671, 0x3a2c436d, 0xef81de81, +0x4a828117, 0x814dabc0, 0xf930e17f, 0x4c3265c0, 0xe5ef7a7f, 0x819b8504, 0xd5427f81, 0xd881e992, +0x946a45fe, 0x310162a7, 0x14819c81, 0x7f817f4a, 0x552fcf42, 0x7f707f18, 0xb9dc4881, 0xa17f81c2, +0x9e476d98, 0x7f7f7ff0, 0x37662d4e, 0xdb7f99cc, 0xf9fe8cc1, 0x1f4c7fd1, 0x3588817f, 0x547abea9, +0x356e7f7f, 0xab1e857f, 0xda0e7f5c, 0x9d777f7f, 0x451f1bb6, 0xa82e087f, 0xb71d347f, 0x0916d881, +0x177e817e, 0x7f5a66a4, 0x237f2e6e, 0x816dafde, 0x2f8124c1, 0xcee67158, 0x067f667f, 0xe1258187, +0x3a3165f7, 0x1f7fac81, 0xf636364d, 0xf2818e36, 0x0b9e207f, 0xed2d7e81, 0xaa6e2281, 0xcb50b6a3, +0x237f72c6, 0x95819fa0, 0xd7d0c888, 0x39988141, 0xc8b07f8d, 0x5a43c942, 0xf3cd687f, 0xa2e77aec, +0x9909bb1b, 0x7ff8e681, 0x5d39837c, 0x5625817f, 0xa9738110, 0x8b604bb2, 0xf3ffa881, 0x5f359eb5, +0x07bf7f49, 0x7f63c67f, 0xc9db7fcb, 0xa65881c5, 0xf2818130, 0xa5817f81, 0x26b4174f, 0xc06f817f, +0x3b8181c2, 0xbfeb4d6f, 0x9e857f91, 0x58a9a1b7, 0x67147fdc, 0xcd257081, 0xb78d4081, 0x3749ebb8, +0x22ac7f89, 0x4af84134, 0xe3f97943, 0x2d8187c6, 0x027c8181, 0x81c1bfef, 0xc32b2267, 0x42257f81, +0xb49b3eb3, 0xa3d47fc2, 0x27ff4d81, 0x69b4a46e, 0x81818181, 0x7fd5817f, 0x00a97d91, 0x10897f7f, +0xd87f0d3b, 0xcbec8b7f, 0x7f814de6, 0x8286209d, 0x06e2bf7c, 0x81e28881, 0xae64afdd, 0xabc5667f, +0xee22bee0, 0xe157d171, 0xb39a5bd6, 0xabb6d1c0, 0x53c19d81, 0x447fdb81, 0xfc086996, 0xd65f0481, +0xc87f4d35, 0x313a12db, 0x5d81a1a0, 0x01231072, 0x327f81d6, 0x7eb1ba7b, 0xe9485dad, 0x5781767f, +0x047f2481, 0x817f5b50, 0xda064bd7, 0x7ff07b3c, 0xfde4a68a, 0x7f83816b, 0xe67f7f34, 0x1a56f181, +0xe2bbb681, 0xe4687f74, 0xd5a20bd8, 0x81d581a6, 0xb2395342, 0x977f0fa3, 0x81e3b5dd, 0xf1cd8147, +0xe34f48a1, 0x258142c3, 0xe24d510b, 0x46d6816a, 0x2a608ae6, 0x5781ef81, 0xcce97f0d, 0x677f7f81, +0xaa2381bc, 0xbde77f4d, 0xefa38185, 0x81a67f75, 0x6cceb856, 0x4976bb25, 0x3efc977f, 0x86cdf691, +0x13497f81, 0x7fe7730a, 0xd71d8163, 0x8c7f7fbd, 0x1bec817a, 0x8154b897, 0xc71e45b6, 0xbed568c3, +0x256c6514, 0xd481af7f, 0x19ad697f, 0x817f745f, 0xd27fa187, 0xd5e769d9, 0xd816cf7f, 0x1aa4815b, +0x035e7ff6, 0x85de810b, 0x3ea73657, 0x216281b2, 0xe48144b7, 0x3277dfe8, 0xf8c57f20, 0x907f5091, +0xf481b081, 0x9d8152cf, 0x0e6a81c0, 0x7fff7f06, 0xf8884d81, 0x687f81b3, 0x2657ba35, 0x527fc24c, +0x9945694c, 0x1faa4e5b, 0x37819334, 0x57f52a7f, 0xec7f817f, 0x817fa0a8, 0xe3a8818a, 0x16beacdb, +0x2ad2b97f, 0xb981245b, 0x359a8181, 0x7f97265e, 0x9de4387f, 0xed7f058a, 0x0e817f81, 0x4e69bdb5, +0xc5e38552, 0x77482e81, 0x6c817542, 0x447f2990, 0x07b1f9dc, 0xc4247f82, 0x077f7f81, 0xe58bdca6, +0x55e07f74, 0x0e7fe47f, 0x04883e81, 0x4981f87f, 0x7d7fce29, 0xe07f7d7f, 0xee817fa6, 0x7262da81, +0xe158813b, 0xe35e41b9, 0xf17f6d65, 0xef7560ea, 0xae408493, 0x97812300, 0xf29f816e, 0x557fd5aa, +0x328bb778, 0xa27f7f25, 0xde7fb5cb, 0x03577f0b, 0x577f18a5, 0xb9816d20, 0xe28d2a7f, 0x248c7fb9, +0xff5fb965, 0x93b27f9c, 0x2d817581, 0x41958195, 0x3a7ff044, 0x7f7f6faa, 0xdb81b784, 0x5081603d, +0x363ff38d, 0xc747d8b3, 0x537f812b, 0x8176a97f, 0x1d8181d3, 0x81811929, 0xe893d4c7, 0x3c7fcf7f, +0xf6385c81, 0xaa63b42e, 0x81c3a781, 0x488142a7, 0xcbef89be, 0x3f748159, 0x50c52932, 0x298198d7, +0x14c68e7f, 0x7fd8a798, 0x085a632f, 0x7f1d7f7f, 0xb2e34289, 0x88c3b262, 0xb044817b, 0xbf7f238a, +0x5ae6b554, 0x9e833081, 0x607f985d, 0x0203226c, 0x333f63fb, 0xc77f8181, 0xe8297f7f, 0x9dea7f33, +0x6f2e1f54, 0x2e41327f, 0xff0f7f81, 0x7ffa7f30, 0x343f74a6, 0x81f59f81, 0xaac67dbb, 0x6f81da1f, +0x79ec8153, 0x78eae674, 0xec7f6e71, 0xa46a48bf, 0xe5810efa, 0x0f41b9a7, 0x3a1124a5, 0x5ff97f7f, +0x0bd5be75, 0x7f8181f0, 0x242f409e, 0xf47ac606, 0x1e96d94f, 0x43a66a81, 0x18fc815c, 0xa51a7f7f, +0x1185d27f, 0xa00c810f, 0x078ae481, 0x71d7866c, 0xd47ff796, 0x813cf240, 0x45814d48, 0xbc6c3618, +0xa8b937ea, 0x838e7f7d, 0x19b3817f, 0x7f73be95, 0x00fa46b3, 0x4c43655a, 0x20f67fc2, 0x98fdda7f, +0xe879817f, 0x4c9e7f2d, 0xcd7fefc2, 0x9681a909, 0x1e59e7b3, 0x35818136, 0x9e3a977f, 0xaf7f207e, +0x9633227f, 0x44450e81, 0xc2c25f7f, 0x7f1a3f81, 0x08acf783, 0xd083a838, 0xd2627f59, 0x6d327f32, +0x362c2b5e, 0xee8e507f, 0x48b431c5, 0x00816381, 0x1e6c1150, 0x7df48149, 0x36f94f81, 0xc5667844, +0xf7cbf52c, 0x520081b8, 0xc6967f7f, 0xc88981b8, 0x328181ee, 0x697fa9f2, 0xc99a7f81, 0xb5a08181, +0x1d74dc89, 0x643ab5cb, 0xeb81f9be, 0x987f277f, 0xad38747f, 0x61778c52, 0x59817de7, 0x05ae8144, +0x217aa1de, 0xc0817fe9, 0xa1a6b4dc, 0xece08189, 0x4118207f, 0x9a8181a8, 0xcf0f81c9, 0xba81d081, +0x138181b8, 0xd537957f, 0xfc7f9b8e, 0x7fc9607f, 0x16c085fe, 0x81fa7fb6, 0xf7847f66, 0xc20ee0fd, +0xf983ec97, 0xce209f7f, 0xe2815922, 0x39b07aa8, 0xd3a82353, 0x6ee38287, 0xc60d7f29, 0xcf478f57, +0x594d731b, 0x0e8c42fc, 0x6a702ce3, 0x7b81b301, 0x3181a450, 0x7fe94481, 0xe581b66f, 0xd87f7f2a, +0xc1b385ee, 0x81816964, 0xbb7f5093, 0x8151817f, 0x51e3547f, 0xb3fa7549, 0xca81b044, 0x258143bf, +0xb18181c8, 0x7f54eb08, 0xd4c9db57, 0x68a981b2, 0x4ef17f69, 0x917f977f, 0xda38865e, 0xe97f3e5f, +0x0bbc63dc, 0xec34daaf, 0x1dc38186, 0xb381810e, 0xcbacafd2, 0xdb3ec56a, 0x22817381, 0xbc44cc4d, +0xd681d0b0, 0xe614c8a9, 0x53577f7f, 0xdabc3012, 0x353ecdc2, 0x81728179, 0xb660cf7f, 0xef49ca49, +0xc645dd7f, 0xd16a7f81, 0x1fb99d7f, 0x7d36d57b, 0x04ce7f46, 0x602adede, 0x1142cbb1, 0x60de427f, +0x29814ccb, 0xe9ad7d99, 0x127b2881, 0x5c5aa08f, 0x38c0db9f, 0x7f76d981, 0x63b32381, 0xdd41ce81, +0xce14e57f, 0x74ed7398, 0xdd81e6eb, 0xb6f2d581, 0xa581b77f, 0x468c81d7, 0x5f2d88b8, 0xf481fd21, +0x6ec1e381, 0x71568485, 0xcbfa7f6c, 0xa02e0c9b, 0xd9a09a7f, 0x813d8176, 0xcf43c52b, 0x31a8bca1, +0x2432dce5, 0x78277f94, 0x474e592e, 0xdefb7f81, 0xff147f51, 0xbb7f3858, 0x25f1e591, 0x36fad833, +0x0ac87f39, 0x815d9781, 0xaebaaa81, 0x4981815f, 0xd7817781, 0xb44838b5, 0x32626981, 0x417fe2f6, +0xdf638c92, 0x8624067f, 0x4432e69f, 0xd8688a7f, 0x06815215, 0x25bbbb7f, 0x5b5b6f63, 0x417b5b81, +0xcc4a7b7f, 0x6ea47fd3, 0xe866b481, 0xc630c9aa, 0x083781ed, 0x69de817f, 0x0381f9f2, 0x84812ce2, +0x027f4dd9, 0x7f5a9e81, 0x00ea319c, 0x222b9a9b, 0x34a143f0, 0x7fefd1d0, 0xb4947981, 0x11474bc1, +0xcd44df7f, 0xa2878439, 0xd6f040b9, 0x6f86817f, 0xc67f7f81, 0x7f693443, 0x8d21b1ce, 0xdab1aed5, +0x3f17ae71, 0x45b7ba7f, 0x05229255, 0xb181d8f5, 0x688681e3, 0xea81e181, 0xf57f817f, 0x7fdc5881, +0x6e817f7f, 0x817f537a, 0x3987247f, 0x300a7f05, 0x3128057f, 0xe731b77f, 0x4881b50b, 0xb86dae65, +0x00c015a0, 0x81397552, 0xb9bcab24, 0xf5818181, 0x57d18190, 0x81c3648d, 0xed813a67, 0x4c5d3a06, +0x3581a4e3, 0x64db6467, 0x21af7d27, 0x5adf7f57, 0x66d9c99f, 0x5b183375, 0xbf4c41e5, 0x577f7ff0, +0x4e818181, 0xf295493c, 0xd2c28170, 0x5c3a7f8e, 0x09c18181, 0x7f7f7f9c, 0xae675fa9, 0xf6d781a4, +0x66ba10dc, 0x7f882981, 0xfe7f7f7f, 0x7f7ffea4, 0x2798d845, 0x899cb6fb, 0x4982bcd9, 0xc0815ab0, +0x558181aa, 0x52110702, 0xd179e3c5, 0x482923bd, 0x187f8181, 0x81b3f6d9, 0xfd96a581, 0x2c5d1a81, +0x6fde23dd, 0x9e815063, 0xfba49bcc, 0xfa775f4d, 0xb1b8b681, 0x81894f1b, 0x3c8163fa, 0x2d7f3d81, +0xc57f6681, 0xbd7f7e81, 0x137f7f81, 0x6b3d6f81, 0xe5e2610d, 0xb4683186, 0x7f6e817f, 0x90c12abc, +0x13a3502a, 0xdddeaa8c, 0x07a67f79, 0x1a5481ce, 0xb87fb01b, 0x0a4b1e7f, 0xd77ffcb7, 0x03535781, +0x5a5f697f, 0x02ca7966, 0x379638bb, 0xc507c941, 0x5e7fdf8e, 0x7ce0c99b, 0xa162ccfd, 0xff8196cc, +0xd43aa081, 0x67798185, 0xd6277f2e, 0x7f6dcb7f, 0x217fcd76, 0x2c88fb91, 0x06815b8f, 0xe37f7dae, +0xe937257f, 0x648181ae, 0x21652c4f, 0xbf81685b, 0x36abe921, 0x3cb5de30, 0xa88e8162, 0xd87f812b, +0xb981d14d, 0x75c181a7, 0xf7c08aa1, 0x57904e7f, 0xf6f688d8, 0x7fcf6281, 0xd33dbf7f, 0xc07f9a73, +0x197f8135, 0xcabf8bc6, 0x3f81c37f, 0x487fac4f, 0x8ddc924b, 0x50257f7a, 0xf75e1496, 0x28d2d48f, +0xcd91817f, 0x2a7f81b2, 0xbd7fb67f, 0x3b877fe1, 0x407fd8fd, 0xbc815f20, 0x7f50e581, 0x8bdfae4d, +0xb1eb7f81, 0x07819f40, 0x3765274c, 0xde7fd081, 0xa0817f7f, 0x8dbe5081, 0xfa436f7f, 0x4848463b, +0xc5c45cdb, 0x7f8b1865, 0x5bf381b6, 0x45aa497f, 0x7f813694, 0x54f15918, 0x72886c49, 0xe3fb31b0, +0x0db1a582, 0x4aadce1b, 0x39926d8b, 0x9a30a85f, 0x1b606181, 0x1c6d165b, 0xfb057f4b, 0xcd7f81ec, +0x12bf7ffb, 0x7f5f91d4, 0xca8158b7, 0x86134234, 0xf0f17f3c, 0x7f4e05c4, 0x34814f7f, 0xe5c1106a, +0x2d50c67f, 0xb57fa18b, 0xb7816481, 0x7fffaadb, 0x607f817f, 0x5048ea81, 0x82495d02, 0x17819b7b, +0xaba67981, 0xb5817f7f, 0xee57a2b6, 0x3591a602, 0x2885a153, 0x429ba160, 0x757fd0f5, 0x344e9d7f, +0xc77a7f8d, 0x2a7f8160, 0x3198b27f, 0x81c552bd, 0x0374e058, 0x31605de1, 0x40d0817f, 0xb281a37c, +0x156ce27f, 0x91893090, 0x06f58181, 0xc4487f6d, 0x0a7f7f9d, 0xc3a17141, 0xc03c39c5, 0xb581b2b5, +0x526a7f7f, 0x7f7f7f4e, 0x7fec7f7f, 0x95cead7f, 0x161ba8d2, 0x8ad0d081, 0xe4f19557, 0x5bb6fca9, +0x2d813a6d, 0xddbb7f81, 0x1cb1e281, 0xc2b43734, 0xd07f4641, 0x7f81bb39, 0xae9b1b81, 0xbe817f36, +0x1caf7f81, 0x7f704922, 0x108f777f, 0x81662d7f, 0x479ca35b, 0x818513c2, 0xa41d8607, 0xd081b235, +0xf6817fcb, 0x7f015960, 0xadb15f5b, 0x81be2abf, 0xf2b0e29f, 0x81819224, 0x38a7407f, 0xc9817b09, +0xf307556c, 0x7f818181, 0x0281817f, 0x3fce810c, 0x18b47cd4, 0x81e2a53c, 0xe359840f, 0xe27f81ad, +0xa1665081, 0x7ae062b6, 0xe0b27f35, 0xcd376e73, 0xd9a6d373, 0x817ffe7f, 0x02810946, 0x06498116, +0x9b7fbb1b, 0x8d2b812a, 0xff14f781, 0x937de081, 0xb77ff44b, 0x4b2732c7, 0x0720bd96, 0x9e647f13, +0xb831d474, 0x811d8183, 0xa5db837f, 0x8aa1b04a, 0xd4377cfd, 0x7f814241, 0xee8fec39, 0xcc81d881, +0x2e29d770, 0x7ff76366, 0x7fbd363b, 0x74bbd13e, 0x2e817192, 0x9f6b81db, 0x44913bb3, 0x327f0de1, +0x2d7f8881, 0x54f2afaa, 0x107f513c, 0x4c8191d1, 0x117fe67f, 0xffa21764, 0x2c931727, 0xd09ba693, +0xf74e4882, 0x51ed8bc6, 0x3a23d683, 0x47437f70, 0x7fad8181, 0x9a610813, 0xa57ff47f, 0xbe1b5b9a, +0xf4bf8c33, 0x43074944, 0xe4ef729b, 0x1e7165d6, 0x74e081c6, 0xe281810c, 0x2a4b81e4, 0x10ede221, +0xc8375d7f, 0x81c07f81, 0x49cb7f83, 0x5f589c22, 0x276441a9, 0x7fe67f0b, 0x4d7f1d7f, 0x1872814a, +0x05b6a577, 0x8140fa5d, 0xe47f9bc3, 0x44166064, 0x73896729, 0xb5c0867f, 0x247f445f, 0xccc92994, +0x29611b37, 0x27531258, 0xeac27f9c, 0xd43f4e7f, 0xc27f170f, 0x6d9bfc37, 0xfc819181, 0x077f598a, +0xe0c5d398, 0x7f23182f, 0xed171743, 0x20bff85d, 0xb0d0be7f, 0xb8938149, 0x45017f32, 0xcf2cc65d, +0xdf7fc481, 0x7f380777, 0x81b8f0dc, 0xf6696596, 0xff007f29, 0x896e1abf, 0xfaac2d81, 0x5bc357ba, +0x6a397f2e, 0xbfaeb45a, 0xd64e2b7f, 0xe49b4587, 0x565c6c51, 0x7f206a7f, 0xc381540b, 0xfc3d7f73, +0xd4ec9f39, 0x887b7f27, 0xe3c1e07f, 0xb721523e, 0x0310819f, 0xc0c793ce, 0x387da141, 0x814f61ac, +0xd1347fbe, 0x19244c1f, 0x8d3d58f9, 0x813a7fc9, 0xfae27f54, 0x7f39f481, 0xe37f818d, 0x034632d9, +0x5b06c07f, 0x65ab8b36, 0x127f34b5, 0x18a681ad, 0x73510e20, 0x2f2ffa85, 0x6814ca9e, 0x5b7f7ead, +0x08da93da, 0xc826da54, 0xc1686fd2, 0xb6f00fbc, 0x28bb5dbb, 0x7fe5297a, 0xb5ed8181, 0xccd9067f, +0x177f1561, 0x71b41a19, 0xd9817f7f, 0x505b50c5, 0x309dc978, 0xb97f4f81, 0x4e81c219, 0x2d81d04a, +0xdf3d5fa1, 0xfc6e5c30, 0xf7869481, 0xfc81e47f, 0x67b5817f, 0x7f72577f, 0x2c938136, 0xd576ad7f, +0x63beb36f, 0x7fb3817f, 0xce56ca81, 0xd801e58d, 0x425f2522, 0x8a25817a, 0xdc7673c9, 0xd7b57fca, +0x927fbe2a, 0x149b9881, 0x9ad781a9, 0x214b9d4d, 0x2dae1287, 0xfa772ca4, 0x3b7fdd35, 0xf59e4174, +0xad3a28f5, 0x8144698d, 0x1ca792df, 0xdb7c7fd1, 0xe96d497f, 0x567fa7d9, 0x489b70cb, 0x3442de7f, +0xa2554c37, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x4dc76c95, 0x29b78ec8, 0x3d927f7f, 0x307f93c2, 0x8181da7f, 0x99f741df, 0x6e1d417f, 0x1bbda834, +0x954b5a9f, 0xc6817f07, 0x7fc01005, 0x3b5a81e4, 0x73cd7f5b, 0x7f817f3c, 0x8e7f812b, 0x59d48a03, +0x982475ef, 0x65a1c068, 0x400cc79b, 0x81f231fe, 0x840329e0, 0x1f6054eb, 0xf8813d3a, 0x7f4a9296, +0xbff677c5, 0x7f3c6f0c, 0x7f2890cd, 0xe0aaaad6, 0x81ae9428, 0x7f8117f1, 0x7f097f88, 0x7f27cfcc, +0xca67205e, 0x815f7f37, 0x7f0ac07f, 0x717f95d2, 0x7f9232a7, 0x2bb803d4, 0x3c12c7e7, 0x937c81ea, +0x7f1cdd3b, 0x0622b6c0, 0x62d71247, 0x811cb00b, 0xaef9e981, 0x0f818ff5, 0x740e6f53, 0x30616410, +0x81fed3e0, 0x49167f3a, 0x4577315b, 0xe49130fa, 0x817c7f23, 0x7ffef7ec, 0x4fb47f17, 0xb365b1f8, +0x81a09c38, 0x7f818134, 0x9568e717, 0x4ead1921, 0x8ebd9806, 0x7f911cf8, 0x8c7f5efc, 0xf29114a8, +0x90bc6ae9, 0x0b5f26c1, 0x81bc9281, 0xf0fb5532, 0xc89ab77f, 0x7ec0b9d4, 0xf00bdcf7, 0x7f7f7a24, +0x097ff51e, 0x64d62dfc, 0x7f7f819e, 0x34c05cc3, 0xa2957f19, 0x81101e05, 0x7fd68181, 0x1dff81af, +0x7f81dc24, 0x7b3c6769, 0x7ac490dc, 0x814032c5, 0x9d81747f, 0x7fb46342, 0x72607faa, 0xd40c8161, +0xa5ab574f, 0xf0838bba, 0x33fd7f47, 0x289f81be, 0x817f480f, 0x4bfa552d, 0x8ca1d5d5, 0x817f492b, +0x418125e0, 0xe45d5af0, 0x811b1751, 0xe04d81f8, 0x7ff2917f, 0x65811a53, 0xe65da37f, 0xbafdd439, +0x53818189, 0x752c7ebb, 0xc301567f, 0xb0816f27, 0x708166db, 0xc0b57f49, 0x3a818169, 0x4f16189c, +0x13d6e83b, 0xd258a20d, 0x7fadbdb2, 0x7f93560d, 0x81fd5399, 0x6014dab2, 0x9cf45f1b, 0x107f71d5, +0x352e8109, 0x5a5d6efa, 0xb458ef47, 0xb4a96098, 0x7ab91c37, 0x81bd762e, 0x816cb0d7, 0x3f9819d4, +0x8113b97f, 0x7f09fa33, 0x9e7ffa99, 0xe98181ce, 0x4ffb3064, 0x7f0681ea, 0x81ef06ad, 0x385abdf5, +0x1414457f, 0x18dabc5a, 0xc27e0d7f, 0xd87f8115, 0x703f7fcb, 0x300b7ff1, 0xb081f55d, 0x024d78f8, +0x72949a16, 0x90603c38, 0x7fba6fd4, 0x4820e44f, 0xd57f81af, 0x9c7f7ef9, 0x61c6b27f, 0x81b6141f, +0x7c52514a, 0x5f817f78, 0x668119a7, 0xe441aa7f, 0x53819553, 0x7f721521, 0xbd7f7f17, 0x593081de, +0x8b252de8, 0x7f7f81c1, 0xaca9436e, 0xb5e0b104, 0x81e49d81, 0x75b48102, 0x5c278118, 0x9f6c36c9, +0x509481fa, 0x53d1e640, 0x52108181, 0xeb7f7fff, 0xde417381, 0xa781cb33, 0x4fab55f4, 0x3e7d75f1, +0xf17f0c1a, 0xe8e1817f, 0xcf1f7f81, 0x96d8ba40, 0x1318fc72, 0xd7ae3d1d, 0xf7e88126, 0x32bfe432, +0x453f694b, 0x7f7fbab6, 0xeae7de30, 0x3d81d7d1, 0x438d9cc7, 0x387f4a05, 0x7f46ab81, 0x635bdf10, +0xc281ae24, 0xc5c67fc4, 0x81acb53d, 0xba4f4d04, 0x424f72cc, 0xbf7faa1f, 0x55de1de8, 0x81429a1d, +0xdec94147, 0xff007f42, 0x3fa74821, 0x0cb74e9c, 0xff81627e, 0x6343810f, 0xf7393529, 0x7f44ad81, +0x7f811f39, 0x81e981a0, 0x8c8c4421, 0xc4819190, 0x81816881, 0x7f7fd234, 0x3688c16c, 0x9c34b357, +0x6046e3f6, 0x86123bdc, 0xbd45957f, 0xe6cd777f, 0xe0951ce7, 0x9b7f63a6, 0x817f29c1, 0x7f5a81ca, +0xde7ffcbe, 0x812c3207, 0xa099537b, 0x2a2f81d0, 0x16817f9a, 0xbb81817c, 0x7f81e538, 0x957a4200, +0x81753bab, 0xaf63a506, 0x3081691c, 0xb0a47f2c, 0x0f157feb, 0x8e4330fe, 0x635b8189, 0x7f81ee34, +0x7f40caae, 0xbce88e05, 0xabb52199, 0xd2ba102f, 0x7f7f8173, 0x810b32ab, 0x2e222781, 0x7f819951, +0xfbc73dca, 0x5d81d181, 0x7fb09d81, 0x7faa3098, 0x7e81435b, 0x61e995d6, 0x7f7974f3, 0x7fef81f9, +0x402faba7, 0x7a647f3c, 0x815c6826, 0xba2b2ffe, 0x7f095b7f, 0x7f9236b9, 0x9953636b, 0x344c64d2, +0xd784acc0, 0x7f818426, 0x61ab43f8, 0xd5813c70, 0x7f817f81, 0xc9867fa8, 0x6f4b4181, 0x3881b1df, +0x69813f27, 0x81d0780c, 0x17c84a36, 0xbf908ccc, 0x7f5e812b, 0x317f7fea, 0x7f817881, 0xf731c537, +0x7fb221e7, 0xbae09bf7, 0xcc5d947f, 0xcfc13926, 0x6e0db67f, 0xdf17be28, 0xcaa4ea2f, 0x9870810f, +0x357f701d, 0xa281815d, 0x91497468, 0x5fbcbeb0, 0x7fd54d81, 0x7d5e91f3, 0x8d6555f9, 0x35d416e3, +0x9a7f8c11, 0xd875779b, 0x6b7c6681, 0x24f77f50, 0x288bd781, 0x7fad2b50, 0x637f1a23, 0x527f2ea7, +0x277f7fc3, 0x819c13cb, 0xe1bb65f1, 0x48151c1b, 0x8e7fd27f, 0x7f7065ac, 0xfe0bb14d, 0x588e7f0c, +0x819d7f34, 0x81c455a5, 0x4f6b7fae, 0x7fb56506, 0x7fcfd5a2, 0x3e6981dd, 0xcc814569, 0xc5fb811a, +0xc0e07fbc, 0x817f8153, 0xac356315, 0xc94db6d8, 0x7f88e57f, 0x81e57fd0, 0x7f1f6c0f, 0x78d581e1, +0x7f2137f6, 0x1c57815a, 0x627f55c0, 0x69e4dbcb, 0xa68181c3, 0xb9907fea, 0x55897fa0, 0x7f54a91d, +0x3e7bb314, 0x7f167f5f, 0x75cc8181, 0x177f2fd0, 0x6d39f7e9, 0x7f817fd3, 0x7f6eb0ea, 0x817fc7ad, +0xae5b5b04, 0x86cd281f, 0x3528fe7f, 0x87217fff, 0x87860479, 0x8b7f4510, 0x7faa816c, 0xb9d62c33, +0xdd1c6ccc, 0xb48104fd, 0xbf508f81, 0x34994944, 0x8181b843, 0x74e47f47, 0xc6baf38e, 0x624b06cc, +0xfdadfaeb, 0x948189ac, 0x7f7f992d, 0x813edf00, 0x81c3587f, 0x5eb2819f, 0xcee54cef, 0x277f38d9, +0x8a187fb6, 0x9736ed05, 0xed70487f, 0x4f307fa9, 0x81ba975e, 0x81dd81ed, 0xd0da7f88, 0xf24881cc, +0x12e656d0, 0x7f9b6a1f, 0xcf7fcf7f, 0x78787ebd, 0x7f8319f2, 0x7c7fe9ed, 0x29b381ae, 0xfa4f81cb, +0x13cc81e2, 0x9a2aa0a8, 0x817f8112, 0xfda22318, 0x7f65e85a, 0x727f6812, 0x7f814b0b, 0xcfe2bcd5, +0x6b816ce3, 0xb2965ab7, 0x819981bf, 0x73799635, 0x82854b71, 0xbc3e8132, 0x91c714a2, 0x81817f2e, +0x2cc55af1, 0xb87fb038, 0x7f7f1718, 0x02ba14fe, 0x5e348128, 0x817ff97f, 0x81d03daa, 0xde37813b, +0x443781c0, 0x0e4e32e2, 0x85a7ad81, 0xba276c16, 0xb61ed9c6, 0x81ca27d3, 0x7f762e7f, 0x4181d581, +0x7f7f5508, 0x6eb1b250, 0x5ad47f4a, 0x7fd56756, 0xbc817f81, 0xd481a9ba, 0xf8a74db3, 0x7d1aae25, +0x81397f36, 0x7f62a402, 0xab3b3081, 0x4c45c301, 0x3eb57f88, 0x6d2e7fdd, 0xeb1ac681, 0x813881c0, +0x7fd127b4, 0x67e57f90, 0x4fcb6d3a, 0xc566afe7, 0xc58bdb7f, 0xe0486a7a, 0x363f8181, 0x7fdd8191, +0x67149cc2, 0xfe7fcf00, 0x7feeb9dc, 0xccabbd95, 0x7fab347f, 0x98eda0d8, 0x7faf81a3, 0x3b8c7f0a, +0x1c387fa7, 0x7f055ed9, 0x567fbd7f, 0x4d7f7244, 0xbb8175af, 0xef7f7f5f, 0xa9a58119, 0xada9eec7, +0x592da3bd, 0xb5a98177, 0xe17f813e, 0x94816af7, 0x69ec7fc5, 0x7f29dd4a, 0x9481c981, 0x5d3198da, +0xf84e81bb, 0x41817f7c, 0x7fb31b60, 0x7f7f5e03, 0xea81c490, 0x7681571c, 0x9b54b581, 0x812d993b, +0x7fefd381, 0x7f907fe3, 0x7f81a862, 0x758181bc, 0x46d6507f, 0xae81a50a, 0xd1562a1d, 0x3dcb58d2, +0x489e40fd, 0x7f8fa98a, 0xb07f7f81, 0x238b7b40, 0x49686384, 0xca817f4f, 0x607f7fd7, 0x3a0d81ab, +0xb0fa42e3, 0x7f81563d, 0x829a817f, 0x51d48167, 0xcc817781, 0xd54f5638, 0x7fee1218, 0x81a49e22, +0x0081ec2f, 0x5f5eecf5, 0x6bbc9ce7, 0xa28138d6, 0x86811081, 0x7f0283cf, 0xfff67fb7, 0x81c2814a, +0xc7817f52, 0x85818146, 0x8781a77f, 0x8181e9f6, 0x4731f881, 0xf51a35d8, 0x236931b2, 0x6eaefadb, +0x7f81c61c, 0x947f3d7f, 0xbcf43b52, 0xd5814849, 0x17b6e07f, 0x7f6a81b6, 0x817f8163, 0x817f595f, +0x7fa5ae33, 0x2b63a90a, 0x77627e81, 0x81816660, 0xae7f6281, 0x26c6dedd, 0x757ff9c4, 0x7f7fce30, +0xab9d7800, 0x68fc444b, 0x3d7ff681, 0x93264a1d, 0x81361db5, 0x817fb6d3, 0x1681136f, 0xc5655027, +0x792781e2, 0xeaada9c7, 0x34d1817f, 0x81687fc6, 0xa88d451a, 0xdce97f0d, 0x443935cd, 0x810a68b9, +0x73ef4023, 0x66372979, 0x9a31076e, 0x81813dee, 0x56b11a7f, 0xc76a81da, 0xe0e48181, 0x6681b8b7, +0xa99858ec, 0x7fb38de5, 0x7f6f817f, 0x92168381, 0x53819b02, 0xc3c27f2d, 0x5299d77f, 0x81e47f60, +0x687ffbc6, 0x7f5f9df9, 0x0ab4c09d, 0x44856927, 0x5e63405f, 0x7f9fe864, 0x8c884d25, 0x794dd01f, +0x7f81fb68, 0x31bed5f5, 0x3e658638, 0x3e7f8151, 0x7f7ba8e4, 0x7f304a8b, 0x37c18157, 0x7fb2c6fe, +0x7fa39bef, 0xa37ff9a1, 0x29983fc2, 0x777facc9, 0x547ff37b, 0xe6815b17, 0xec5a457f, 0xacade303, +0x7f2603dc, 0x697fcd23, 0x7f7fd042, 0x4a81a54d, 0xd481bf87, 0x7f4e7f0c, 0x0d5c9d05, 0x26ca3c2c, +0x4f817fe3, 0xc9368b0f, 0xaddb8181, 0xc210e075, 0x248f7f81, 0x697f7f11, 0x817f2f81, 0xef697ff1, +0xd98181b9, 0x810a4f50, 0xbb9e045a, 0x8ea3771d, 0x787f6931, 0xfbb9812d, 0xb778f981, 0x7f354b27, +0xfc813658, 0x25818137, 0x7f3402a1, 0x27eabd01, 0x81c18181, 0xfd7fa22c, 0x9784817f, 0x7fa57fb3, +0xd881f3ce, 0x887f60bf, 0xdc816b0c, 0x3890b818, 0xc37fa281, 0x8f4681f5, 0xb07fb08b, 0x82ad24d0, +0xc189ab02, 0xe3fddb8f, 0x7f744610, 0x435781b0, 0x8104f653, 0x41bb81be, 0x9a4c717f, 0xf9d4c1e4, +0xbdc36404, 0x8ff9818a, 0x54d3ab9c, 0x24aec8ef, 0x81ac5bc8, 0x81908100, 0x74f05a0f, 0x0c5b7f2d, +0x5279a3e3, 0x2d8169b9, 0x7fdc4897, 0x7f5f89bb, 0x9f457581, 0xde817fd7, 0x59813d7f, 0x67817fe7, +0x7f817fd5, 0x7f5978ca, 0x23ab813a, 0x70819ce8, 0x41575433, 0xb17fa99e, 0x697f7f7f, 0x60aeb15a, +0x7fa5a9df, 0x39818181, 0x818ea205, 0xbd819fb2, 0x7f0fa491, 0x814981d8, 0xb9c186e4, 0x817f29e5, +0x81afcd17, 0x842f81ce, 0x058b8e1d, 0xea3da29e, 0x379f5ab7, 0x223c84c9, 0x93bcd8bc, 0xc895baea, +0x6b7d3cc9, 0x7f2b8152, 0x7feccb81, 0x7f71634c, 0x810f7f29, 0x89c14aff, 0xc0ce05d7, 0x7aed49e1, +0x817f4d0c, 0xeeaa0afb, 0xd7817f81, 0x6681d76a, 0xa37f4881, 0x38708103, 0x7f8181cb, 0x0a8f6639, +0x8581aec3, 0xe82836d2, 0xda7f4781, 0x40d67ff3, 0xcc335f7f, 0xce207f44, 0xff9181f8, 0xbae52614, +0xfccdf700, 0x9dc19f54, 0x5d7faefc, 0x5bcc2357, 0x817f7fb1, 0x6f5a9dd7, 0x81af5633, 0x810a7f59, +0x551c8a18, 0x4b816452, 0xe981d940, 0x81d175f6, 0xee7f5746, 0x276f51b7, 0x16ae817e, 0x652063ba, +0xd84e5e44, 0x7f385f2a, 0xa2e1b28f, 0x7f817fd2, 0xd8477f34, 0x508f31fc, 0x7e357fc7, 0x7fa07f7f, +0x7f6d7f52, 0x7f717038, 0x7f819c3a, 0x2f973937, 0x71ac7d7f, 0x790950c2, 0x9aa97f81, 0x7f427ffe, +0xf3f974a1, 0x8107812c, 0x817fb77f, 0x4d458151, 0xc93d2395, 0xb497d049, 0x4b955c87, 0x047eaef1, +0xcc6c07cc, 0x5eff94ad, 0x3bed1246, 0x74d8c531, 0x815afc81, 0xaccf6eca, 0xbe4e3f64, 0x079b37b9, +0xa0898136, 0xfe8a81b9, 0xb7df45fc, 0x7f7f7157, 0x725581a3, 0x7f0c56ec, 0xe6430781, 0x4356def3, +0x54ca7fc7, 0x234f3eee, 0x7f270367, 0x704981c1, 0xfe623836, 0x816d2725, 0x7b9d9e81, 0x3b49d3ae, +0xd2925440, 0x819a7fa5, 0xb025c9a5, 0xbbd77fc1, 0x81737f81, 0xbcb891ae, 0x442ab466, 0x812e41cd, +0xbdb081a8, 0x8e7f5c2d, 0x8458dfb7, 0xd77f8130, 0x9bcbbc47, 0x783a7f5f, 0x27c4d502, 0xadf8cb31, +0xa51e9baa, 0x814d812f, 0x7f1a7f8c, 0x1874f4be, 0x4ac57f81, 0xc7b67f37, 0xc0817ff9, 0x8c81e1ae, +0x6f7f9331, 0xa17f7f9f, 0x7bec5242, 0x81b79554, 0xa27f7f7f, 0x181da812, 0x52e681f5, 0x5c3781fb, +0x895981ae, 0x7f7fa003, 0x9555817f, 0x32391fdf, 0x0c7fff6a, 0xb681981d, 0x1b93a683, 0x7fc3d495, +0x83f5810b, 0x854399a0, 0x997fd57f, 0xe87f6929, 0x7fbafb81, 0x7fe201e3, 0x7f7234b3, 0xb6889ebb, +0x8ea7cdcf, 0xd27fab96, 0x9a562c7f, 0x6c7f7fd8, 0x2277467f, 0xb3108ed9, 0x818d0171, 0x81bc477f, +0x650719f9, 0xba7fe501, 0x7b5796ca, 0x3fc581eb, 0x9a90469b, 0x7f041112, 0x527f7947, 0x8d3d4657, +0x7f627fdd, 0x64053a56, 0x817739c6, 0x812268ee, 0x69ed9b7f, 0x3d7f7f1a, 0x7f2264f4, 0xb76c55f2, +0xd06981fa, 0xa72359d6, 0x3708812f, 0x7f7a4c29, 0x467a227f, 0x685eb4c5, 0xeeeb81c1, 0x2999b5e3, +0x7f7fa0be, 0x73586e92, 0xc7817f0a, 0xc32e7f4f, 0x819b9046, 0x8179acee, 0x29b1a517, 0x557f0ef9, +0x8116589d, 0x557f26f3, 0x861970dd, 0xcb817fef, 0x9bb6ba72, 0xaee4ddfa, 0xd17f81c9, 0x257f76bf, +0xb04b4df4, 0x8f7f8124, 0x818181fc, 0x4fcc8f06, 0x2f7cc181, 0x4900141a, 0xf0709c16, 0x6865b5fd, +0xd9ab9128, 0xa2321900, 0xa1a6de7f, 0x58e47f23, 0x54ac4a45, 0x1af1e618, 0xed633c24, 0xde7f7ff7, +0xd48139b1, 0x5a640dcf, 0x63d9de7f, 0x81669358, 0x4f4571bb, 0x7f7fa692, 0x7debaf85, 0x9d7f7f10, +0xd9c34e9a, 0x708981e8, 0xfc838ac6, 0x430bf6b2, 0x81a1a4ee, 0x7f8181ad, 0x35e98d7b, 0x4e3681de, +0x7f7256e9, 0x7fa3b47f, 0x5a813e7f, 0x8128aa15, 0xff42357c, 0x581e98a3, 0x81534281, 0xe58196fb, +0x11b90c6b, 0x7f81e5e5, 0x7473f57f, 0xa9475b2c, 0x24d77fa7, 0xa27f591e, 0x7fba81a5, 0xc9f19310, +0x3c64d05f, 0x86aca9f0, 0x69b57f01, 0xbb747fb6, 0xdd81e923, 0xbe814a26, 0xe6811b22, 0x7f817fdf, +0x7f81e46a, 0x647f8170, 0xf2812d9b, 0x7f765fa5, 0x812dfe0e, 0x2584ed72, 0x811a7f7f, 0x417f7fee, +0x81d28138, 0x7f8e7fff, 0x9c7e28d0, 0xa71eb5f5, 0x65993e7c, 0x927ff0e8, 0x7f5cd4ce, 0x2886b35e, +0x7f81d3bf, 0xc2d1441e, 0x2f2cbadd, 0x8c9c4cb4, 0x34dba16b, 0xaa37c5c8, 0x5881ec6f, 0xfc7f64a4, +0x81bb8128, 0x7395489a, 0x7f647f7f, 0x44ef7f9a, 0xc3b4d6a1, 0x8693af02, 0x87fab099, 0x7f7f2f05, +0x0191d5f8, 0x0b4281cb, 0x615b990b, 0x554b7f4c, 0x7f7f66a2, 0x817f793b, 0x5681aa97, 0x21aea0bc, +0x81925b0b, 0x81c69839, 0xb115814b, 0x9e81d8b0, 0xc70c6f7f, 0x8f14de16, 0x3f81b4bc, 0xaa0c25dd, +0x81f8346b, 0x6981226b, 0x81b1817f, 0x7f2081e4, 0xa2848117, 0xe4cbcc32, 0x817081d5, 0x817dbf75, +0x157f2614, 0x5a0477d6, 0x8b818105, 0x9e7f34d4, 0x48da3235, 0x397ff6d2, 0xa02f7092, 0x817f8b13, +0x307b35ca, 0x783c7fe6, 0xa2c29081, 0x555b8101, 0xf764ee86, 0x463a8149, 0x577f837e, 0xb1948151, +0xbd2f86f5, 0x6e5fc338, 0x7f67a373, 0x55817003, 0xa1e17f77, 0x8a817fff, 0x49365e44, 0x817f8933, +0x209ba8a9, 0x7f311515, 0x7f519140, 0xa9549352, 0xbd817f8a, 0xafc4a005, 0x620d407f, 0x88302fd2, +0x5cf481f2, 0xbd7fff40, 0x7f7f9cfb, 0x91ae81fc, 0x73f15c81, 0x93814581, 0xc7e8818c, 0xc481a1fa, +0x99818124, 0x7fecdbf3, 0x207d8d39, 0x64818328, 0x7f9dbf5d, 0x2181f9c2, 0x817f7f8c, 0x367c1801, +0x2a019011, 0x815f5d39, 0x81817fb8, 0x7f22815e, 0xb3ceca8f, 0xa6560501, 0xfb548181, 0x2b7ffe18, +0x5394bb1c, 0x7f9981d9, 0x7f7fce32, 0x815ba6e4, 0xabd0d732, 0x7f5c81fa, 0x7f369f77, 0xa42b7f51, +0x900db9cd, 0x9993bde1, 0x7fc8819a, 0x116f9310, 0x8ef767b0, 0x7a817f02, 0x56819b81, 0x29aee511, +0x7f2b8d3a, 0x818a442a, 0x555c3ff5, 0x1d6c4929, 0x7ea9697f, 0x780b8fa6, 0x097fa426, 0x9b7f89d9, +0x165081de, 0x45a184c0, 0x405c7fc7, 0x6bbcd6d3, 0x7b81837f, 0x33867f12, 0x878f7f62, 0xc6cf1002, +0xad819f7f, 0x3df9422a, 0x77ca9e81, 0x684f7fca, 0x30847f81, 0x81817fda, 0xedc6a081, 0x682c00eb, +0xca767fdd, 0x1f7fb0e3, 0x377f8a7f, 0x5e85d200, 0xb9237f1f, 0xf2817f2d, 0x0dd88132, 0x81cc0037, +0x739b39bb, 0xb47fb83c, 0xdf7f8181, 0x7f54f610, 0xbc9e3f06, 0x5762b272, 0x81ebc628, 0x7f9845b9, +0x7d901f63, 0x81ce7f36, 0x0ebd8e7d, 0x9a81d963, 0x644c3f81, 0x227fbdeb, 0x8e2d5f81, 0x54efc59a, +0x94ed5511, 0x817f944b, 0x38393d81, 0x592f7ad2, 0x75814881, 0x63275d7c, 0x4a65de10, 0xeb818ff6, +0x70f93fc1, 0xbbe83610, 0x7d94beba, 0xfd077f2f, 0x7f818181, 0xbb7f47d1, 0x4c8f7f6d, 0x587fa2fb, +0xbd817f02, 0xae7f8123, 0x4fb981ac, 0x3fd2c446, 0x817f7fa1, 0xb28d81ce, 0x7f813fbf, 0x5a8181a8, +0x2f6355c9, 0x75624fb5, 0x811842d0, 0x70a38146, 0xea52aef5, 0x7fffa3f0, 0x8f7fdcb0, 0xe31d81e5, +0x7fe15a51, 0x21894817, 0x9c810f41, 0xe9428145, 0x7f819edc, 0x7feb1761, 0x81368181, 0x2c71f15e, +0x26813c54, 0xf09c69aa, 0xd17751da, 0x25a1c23c, 0x7f812281, 0x4317b30f, 0x66908149, 0x81869b95, +0x062874c1, 0x843481a4, 0x81743c89, 0x813e9cae, 0xd8397fd1, 0x7b495f55, 0x7f7f7f53, 0xc0b419e7, +0xacf11d1b, 0x90a51003, 0x9efa5bb6, 0x77b8989f, 0x8b1ea17f, 0xc46537c3, 0xcca93d93, 0xa955d1db, +0xc42743ed, 0x7f8a6442, 0x79ab8152, 0xfea48ff7, 0xfbbb819c, 0xa1be81ed, 0xa481da42, 0x477f7fd0, +0xcc1acdfb, 0x767f1533, 0x99cdda46, 0xfb9c8109, 0x7f757f7f, 0x747f88d6, 0xb5404df1, 0x8b69b739, +0xc762670a, 0x8125bded, 0x6481a452, 0x943e5aef, 0x7fe3557f, 0x8181bfc5, 0xb14d907f, 0x7f747f9c, +0x54b4ff28, 0xe19d7fc2, 0x7f25887f, 0x7f6cc0b5, 0x34653ba1, 0xacd70015, 0x7901cc3b, 0xb081815e, +0x812781fd, 0x871cd81d, 0xd1818494, 0xa97fc1ed, 0x81817f86, 0x9f3cf7da, 0x9857bb81, 0x9d1661d1, +0x167f5814, 0x037fd0e4, 0x7f337f7f, 0x7fd40904, 0x376a4a69, 0x7f77a308, 0x73b61d36, 0xb4a318cc, +0x7f9cc3e7, 0xe0d55532, 0xab58707f, 0x9c467f07, 0x8c7f2581, 0x60155a41, 0x7f3fd21d, 0x7f7f59a3, +0x38176d64, 0xf77fd7a6, 0x0ed2ae81, 0x7f81cdec, 0x3c0f81a2, 0x747d9be6, 0x818c5f83, 0x432e7cfb, +0x4a44b96b, 0xc8748423, 0x6b818181, 0xe34914d7, 0x980b64bb, 0x7fbe165f, 0x7f104bbd, 0x5c7f9d02, +0x7f307fcc, 0x8399c1a3, 0x7233316c, 0x92aae7a6, 0x81deda79, 0x812805f0, 0x7f566985, 0xdeffbea8, +0x2bbf427f, 0x7f819e05, 0x90811687, 0x817f81a0, 0xb581c481, 0xa05cfdd4, 0xe4983e52, 0x813e81e7, +0x2b5ba42c, 0xf75aaf0c, 0x4bbc8181, 0x817fa502, 0x1a7f2d7f, 0xe97f0700, 0x724a5029, 0xe3700308, +0xae7fb93c, 0x7f9581ba, 0x819d7f3d, 0x2b2ef875, 0x341c52ae, 0x21b23dfc, 0x6a82a2e1, 0xaa81ecc7, +0xcf817ff6, 0x7fa17fb5, 0x1fc21a81, 0x817f895d, 0x54973cd6, 0x81a8c919, 0x9f4c7f38, 0x89817fd1, +0x325d7fea, 0x959581c9, 0x711638c8, 0x07bb81c4, 0x1bf0818c, 0x346b81aa, 0x5d55817f, 0x7f7f8192, +0x3b7f81d5, 0xe783f367, 0x863038b6, 0x7f53589b, 0x7f4de47f, 0x128165ac, 0x7fa6ccfa, 0xc19f8132, +0xc3b3483e, 0xa63d628e, 0x2d707fd1, 0xd93a1319, 0x9197277f, 0xd77f0fbf, 0x8156da7f, 0x88819a98, +0x3eb240f2, 0x81007fbe, 0x067fe653, 0xd3aa8150, 0xa625819b, 0x767ab13b, 0x937f817b, 0xd08181e1, +0xa47f81b9, 0x3481bd4a, 0x65507fa7, 0xb79181d1, 0xbf1d7d3e, 0xb2818178, 0x8f329d1c, 0x2081b481, +0x7b84a853, 0x7b679ce3, 0x9ba49881, 0x695ab122, 0x23be9081, 0xd7322a4a, 0x21db3fad, 0x81fc8145, +0x84a1bad2, 0x9f307bb8, 0x81815581, 0xb84f1450, 0x6d3acdb7, 0xfd64811d, 0x7f8132e6, 0xd7458fee, +0x81c54610, 0x7f7f73d1, 0x81a5c3bf, 0xe15f7fa0, 0x3c7f517f, 0x047f7fd0, 0x261e438d, 0xb3c47f34, +0x5c81a1f6, 0xa2c62f5a, 0xe67f9255, 0x4df92530, 0x58877578, 0x817fdbd6, 0x9381d867, 0xb3815eec, +0x3d8b8ad4, 0x814aab55, 0xa5477f7b, 0xed7f98d4, 0xae7f3681, 0x3b938138, 0x58e37f90, 0x7356b00c, +0x754181fd, 0x3181503e, 0xc58dc451, 0x818b9cbe, 0x747fcbc4, 0x0918d1f1, 0x5b5ae00e, 0x761d6116, +0x535ba62c, 0x8c0f22b4, 0xc98fb9ad, 0xad7f6d1d, 0x4f7fec7f, 0x1b437f35, 0x9004b258, 0x787c8d15, +0xd04b81e6, 0x703d10f0, 0x7f7f8b89, 0x1674b61c, 0x9789ac81, 0xac6c7f58, 0x0e81ac48, 0x0881812d, +0x117f7cc2, 0xa6bd8145, 0x59817f7f, 0x89cbb4fb, 0x697f9a81, 0x813e5281, 0xf57f4898, 0xee994b1d, +0x60228cc5, 0x4c341db8, 0x7f6e7f63, 0x107f8151, 0x14397f55, 0x81a951d6, 0xb762ed40, 0x81c39dd6, +0x546a0438, 0x7f8e88d7, 0x81810e7f, 0x7f7f654c, 0x3b412da8, 0x8aabcfe0, 0x4b83444b, 0x65bea3ee, +0x507fb43f, 0x16c3c7c5, 0xe322a2d9, 0xfd5a0fd9, 0xc757577f, 0x6cd0b9af, 0xc6078100, 0x3256b728, +0xc47fdc2f, 0x81dc4bb9, 0xd8d02073, 0xc38671f2, 0xbb9104b4, 0x7fb281b6, 0xcb817f81, 0x819f44af, +0x29f781bc, 0xdfc231b9, 0x7fae7f43, 0x575b7f79, 0xf4c4414e, 0xe807d15c, 0xd08181ae, 0x8e81cd9f, +0x942472c9, 0x816362dc, 0x9cba9281, 0xdc6fce74, 0x2b7f7f35, 0x37810c4f, 0xe8709e85, 0x5c7f984d, +0x8153bc24, 0xbfe581bd, 0x9a7f9c84, 0x3be9e533, 0xb6938127, 0x82ca63d5, 0x08eeb19f, 0x59ebeb0f, +0x4d7f14f7, 0x3c558f40, 0x62d26981, 0x8181f3a8, 0xe42585ea, 0x7f08817f, 0xaf8181a0, 0x0c5d16cd, +0xa4dfee3d, 0x3a8182fa, 0x6381883a, 0x7fe9d01c, 0x5e659f90, 0x7f7f812f, 0x4a9d8179, 0xb1e4acf3, +0x1b8189f9, 0xd983a4d8, 0x817f81df, 0xa2cf1f55, 0x817f337f, 0x7fa3564e, 0x14ae7c44, 0x4ca3859d, +0x81b37fa2, 0xf8a0b7b1, 0x7f79c0cc, 0x7f3010f6, 0xa5be8c16, 0x2420b129, 0x7f7898f1, 0x7e818139, +0x7fc919eb, 0x61819a67, 0x88928b15, 0x7eade0d3, 0x4e7fc323, 0x8deb5fe6, 0x7f3f7f9c, 0x81817fbf, +0x989026db, 0xef81c203, 0x81e4a1e1, 0x35939d41, 0xbbaae870, 0x3d217fca, 0xddd9a1ce, 0x7f7f81a3, +0x5bf18122, 0x81c55a52, 0xc95c450c, 0x7f5a03e6, 0x65654a14, 0x75d401f6, 0xe0cb297f, 0x5f3c8132, +0xf7a312d3, 0x81c6d3af, 0x978e8181, 0x1476539f, 0x502534be, 0xd2909112, 0x6ad663ad, 0x7f7fbbac, +0x8b70da23, 0x4d78af31, 0x7f8159f5, 0x81bad147, 0x348151dd, 0x5e68eace, 0xc2737fa0, 0xf97fa32e, +0x7e7f81de, 0xa68baddb, 0xbb39817f, 0x7f9bc454, 0x6d09c327, 0xc37f3c33, 0x0a865f7f, 0xba8281e1, +0x1a3ff406, 0x652c3fd1, 0x94af367b, 0xb381d8cb, 0x4045cce5, 0x7f7fd613, 0x795f7f42, 0xb1818193, +0x7f371d3c, 0xef816b41, 0x2ca91acc, 0x48b5aa83, 0xaae807de, 0x26813f13, 0x81817f2a, 0x6083604f, +0xd29e81f6, 0x7fb218ca, 0x4aa50170, 0xc2d74a22, 0x4f4971a3, 0xe9dc5f10, 0x7fb8bedf, 0x1f93814a, +0xa97f7f0e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 + +hard_output0= +0xb5bc6ac8, 0xf5373664, 0x1310345c, 0xd5bae4e7, 0x1fc9e83e, 0xebfdfded, 0x84bd86ab, 0xb7aabe00, +0x60b44fea, 0xb9067464, 0x30325378, 0xa9195955, 0xf70c6e5c, 0x90922632, 0xc90b1cdb, 0xf2f5fb69, +0x73056b63, 0x1a33bf3f, 0x17755b5c, 0xc58bff6d, 0x2f4390b2, 0x2869d508, 0xe7c7dfe8, 0x38552963, +0x21da5367, 0x07282b9b, 0xa4767105, 0x1e294251, 0xe350a940, 0xb8a6aa27, 0xed12d778, 0xf10d9ece, +0xab93527f, 0xcf2da7e7, 0x68f6d0b1, 0x811f4bca, 0x577b06b2, 0x3234f13e, 0x30bab7df, 0x8dc47655, +0xbb843bed, 0x86da3aba, 0x30950c97, 0xdd096d7a, 0xa871fd6c, 0x8bee4e6b, 0x8fea30d0, 0x6c05b4d2, +0xf3e144d3, 0xd24ebb1f, 0x065635e5, 0x8d3f2cf9, 0x536c6c6a, 0xfbb0a5d0, 0x3d707b42, 0xc44d5982, +0xa5f4ad8f, 0xf32c0970, 0x1bccf1a6, 0x05916020, 0xa64fb176, 0x5ede6a35, 0xaf4966da, 0x9df5e0e7, +0x75042abc, 0x9ef10481, 0x11ddcbc8, 0xa0f5518c, 0xd5c23418, 0x2393d558, 0xfbe7dfeb, 0xed1c64c2, +0x86a36508, 0xde2dfb1e, 0xb8d0fef9, 0x24505232, 0xc894e71c, 0xbcc752a0, 0x40b74e83, 0x90d23c8c, +0x728e4a61, 0x108f0b08, 0x66f522ee, 0xc258d851, 0x35a31c44, 0x11311b5b, 0xfd3d5be9, 0x5ae448ff, +0x4f64994b, 0x5b8247a9, 0x4021114d, 0x2f0b6e82, 0x5eaa9828, 0x50ac71c0, 0xfb86ee52, 0x0dc1ac9b, +0xbbd47645, 0x8f357115, 0x978ceea0, 0xd557db99, 0x99b30388, 0xfc9a8a1c, 0x0f75be1a, 0x50143e22, +0x8840989b, 0x738ec50e, 0xe6b2783d, 0xf67899c8, 0x27ebed69, 0x6c415a16, 0x3a6cc2dc, 0xcd4e4e5d, +0x6cb12b2e, 0xdb88d7c0, 0x79cd1582, 0xbc422413, 0xe72ad2f4, 0x8eaac30f, 0x0bd86747, 0x6d87f69d, +0x15d62038, 0x4b375630, 0x0d51b859, 0x16db2cb2, 0xf210603a, 0x0abeb833, 0x55c694d0, 0xe57ca43b, +0x0ba94428, 0x1398a406, 0xe47d3889, 0x5a20203d, 0x250d7a1a, 0xd930ffec, 0x03992e79, 0xf2759376, +0x024ec121, 0x91fc3a2c, 0xb7e11cc5, 0x4ff7d459, 0xb8700134, 0xd6e61758, 0x4eba0a32, 0xb747e3ec, +0x7073fad7, 0xded80f99, 0x331e2f1b, 0xfa1f1bed, 0x056424a2, 0x1d1d95e0, 0x550b9ec8, 0x51ee2a38, +0x19525153, 0xd70c4cd5, 0x0d6cd7ad, 0xe44d1cf2, 0x30dfecda, 0xdacd7fe8, 0x7321d795, 0xddf48ef4, +0xe271e6a4, 0x9c1feecb, 0x951fcd7b, 0x8acc5a03, 0x3fb83527, 0xe306de74, 0x7b9cd6ee, 0x8e140885, +0xd4c91e8d, 0xe8c39733, 0x0f02f87f, 0xfb06b1b9, 0x0dc9349c, 0xf76bae8e, 0x4f642a07, 0x3d48a9aa, +0xe3ea323a, 0xa1cd5c8a, 0x40aa0e70, 0x132042d3, 0xa9732f6c, 0xd15a00c4, 0x43d3b046, 0x9a51ebd4, +0xc46ee0ed, 0xe2a2148b, 0xf5c478f0, 0x1fb01cf3, 0xf4f321ec, 0xd973811f, 0x11ad11b9, 0x5c67adda + +e = +34560 + +k = +6144 + +rv_index = +0 + +iter_max = +8 + +iter_min = +4 + +expected_iter_count = +8 + +ext_scale = +15 + +num_maps = +0 + +code_block_mode = +1 + +op_flags = +RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN + +expected_status = +OK diff --git a/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_posllr.data b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_posllr.data new file mode 100644 index 00000000..fe4f5eef --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_posllr.data @@ -0,0 +1,645 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_DEC + +input0 = +0x81814e1f, 0xba81fb11, 0xc4554696, 0xe17e9454, 0x427f8119, 0xa8b355f1, 0x244ba17f, 0x1c816fa1, +0xc94f5bcc, 0xb5b97fbb, 0x634b8e81, 0x815a6cc1, 0x7f6bb016, 0x2f7249d6, 0x604664f0, 0x31817f81, +0x6fa66b81, 0xd49763f6, 0x777fa4f1, 0x81aa6da8, 0xf0fea881, 0xaccfbfd0, 0x7f81e582, 0x7f58a2d6, +0xa77f430e, 0xf47f50f1, 0x997f42f2, 0xd7814f81, 0x037fe14d, 0x81f98125, 0x2d813e55, 0x7f81f0dc, +0x7f7f8181, 0xc37c723a, 0xbc7a3364, 0x8193817f, 0xe381066d, 0x7fcab836, 0x2481eea2, 0x567f5968, +0xa07d720c, 0x7fa13343, 0x7f35636d, 0x7f817f78, 0x383d2a69, 0x7f860e2a, 0xca78797f, 0x7f7fe912, +0xc95a8141, 0x770c9c4e, 0x81e2817f, 0x811b3f81, 0x745f1104, 0x13817e31, 0xb681a63a, 0x8101482a, +0xbe81818d, 0x8c8181fa, 0x2133de57, 0x8147677f, 0x8a098d7f, 0xd429f1ee, 0x284fc81f, 0x7f5c16d7, +0xded34e7f, 0x3d60812e, 0xdf817f89, 0x2f33793c, 0x2f7f9961, 0x8181221e, 0x847f59d1, 0x9281a681, +0x89503793, 0x01817ff0, 0x183a53ba, 0xc99622f6, 0xbc819881, 0xa7ea2ec8, 0x787f957f, 0x26819378, +0x1e4e4ba1, 0x8bf18138, 0x3adc2c81, 0x3a7d8187, 0x7f22ae8e, 0x307c81d7, 0x7f53a881, 0xc281819f, +0x38fd2090, 0xdd810a00, 0x74ff7fa2, 0x7fb2f27f, 0x7f9b8211, 0x55d69e44, 0x8d2cd44c, 0x5610c676, +0x7f815b81, 0xa2811e3d, 0x93135d81, 0x92797f73, 0x8a8192b3, 0xa48a7f66, 0x7f7e4a10, 0x242dfd81, +0x7fe8b481, 0x810fb0e4, 0xde390fa7, 0xbb5a7f81, 0x65ad3b19, 0xa96a96a2, 0xf681817f, 0xb6a4eba2, +0x0d81017f, 0x5369080e, 0xfbd1c781, 0x879d7f7f, 0x81818181, 0x7f1d061c, 0x8aa731fd, 0x4386a67f, +0xba445581, 0x5b81975f, 0x467f7f7a, 0xc85db95e, 0x6f587240, 0x29a07904, 0x7f7f7574, 0x815dbeb2, +0x81771181, 0x7f54e8c3, 0xc6586fd6, 0x3c6d835a, 0x86b4ed95, 0x7faa72fb, 0x7f7e7fd5, 0x154f81e3, +0x7c4881b4, 0x0c164a37, 0x7ffdbf69, 0x7f7d43f6, 0x5f47da8a, 0x3236d20f, 0x65818181, 0xa37fcf7f, +0x7f8116cb, 0xf6947f50, 0x66ab65b1, 0x6d688a6f, 0x537f6bf0, 0x5c864ddc, 0x06a98181, 0x455854c2, +0x9a6c49ad, 0x129d3c29, 0xe94c327f, 0x7d40247f, 0x9681bc7f, 0x7f288120, 0x81a43b40, 0x08d81689, +0x457f815d, 0xc27f9ae7, 0x7f819655, 0xdabf8181, 0xf4c07f81, 0x6410e8cc, 0xce4d8181, 0x19b13059, +0x8181bf99, 0xf2819620, 0x7fc37f4e, 0x7f8efe81, 0xb25a3e8b, 0x7fbbdb22, 0xba7f8181, 0x8135d42f, +0x817f6836, 0xc381a056, 0x7381547f, 0x1e407f13, 0x9b917f7f, 0x4e526d9b, 0x9323737f, 0x7a20429b, +0xac6b14a8, 0x3568e8d7, 0xbe621a97, 0x7fbca13b, 0x2dbda9dc, 0x32af4ddd, 0x6e4f978e, 0x10ed8172, +0x81e76681, 0x7f427fc4, 0x7fab7f81, 0x2c7ffd81, 0xea817f7f, 0xa62fcf2c, 0x8144bfb7, 0x7ffc93a0, +0x79e9b564, 0x6c449d46, 0x987f8481, 0xee118173, 0x867f0c0a, 0x81819347, 0xb8ddbf82, 0x5c6866b8, +0x82b48881, 0x5c817fdd, 0x7f6605e0, 0x4181bc81, 0x736b51a4, 0xcd5a7fd2, 0xf939a42b, 0xcd817faf, +0x5e1b1fa6, 0x7c818ef7, 0x070c47cc, 0x8e5981d1, 0xe661d182, 0x54deb54e, 0x7fba81f4, 0xc1712e73, +0xb6885086, 0xae9759e2, 0xb12b814e, 0xf5ae9e41, 0x16c736f6, 0xa981c3e2, 0xb06c7f29, 0x497f95b8, +0xcb3de381, 0x7f7524e3, 0xdcf88147, 0x811e81f9, 0xab7a9c7f, 0x816223d8, 0xf97fe22f, 0xe5cf814e, +0x744f6312, 0xc797e05a, 0x279b237b, 0x622b01b9, 0x37818181, 0x8174e3ab, 0xfa5a69ff, 0x62cc787f, +0x81944e7f, 0xc87f7f2f, 0x30839f54, 0x8122090f, 0xf8e98181, 0x28815f40, 0x0262424b, 0x7fc3ccd0, +0xdf7f7fe2, 0x0c264dfa, 0x5d954581, 0xfb188181, 0x6b810d33, 0x1c7fbee7, 0x8141c17f, 0x7f768155, +0xbd207f27, 0x794079d7, 0xf34bd97b, 0x7f7f5026, 0xb46c9782, 0x7f81574e, 0x81817f27, 0x75267f6d, +0x4a7fe97f, 0x7f879ee0, 0x25f8d63f, 0xd481a3c5, 0x747ffd0d, 0xb33530e2, 0x813f5881, 0xbb8181dd, +0xa2da462a, 0x817f8c0d, 0xd0f061f8, 0x117bda34, 0x1897f8b7, 0x2e477f50, 0x8181814b, 0x7faa1181, +0x6ee68168, 0x811a98e2, 0xdcb2b784, 0xa20a7fb7, 0x41445681, 0x0bffeefc, 0x02817f9d, 0x743071cf, +0x387695bd, 0x847ffb4a, 0x0a287f56, 0xf9b4d4d3, 0x2a7fa481, 0xe3eb2e9b, 0x327f91f4, 0xec3981d2, +0x6681f83c, 0xe675722e, 0x426ac913, 0x4b6154a6, 0x5c30816f, 0x817c8155, 0x277fa781, 0x8dd8a9a3, +0x48c609a0, 0x4bca47bf, 0x7f3f6d81, 0xea497fb3, 0xc181507f, 0x81bf1ffe, 0x8e818916, 0x6ba83d7f, +0xb8547f62, 0x5881670e, 0x7f6281fb, 0x6e7f9250, 0xa7bc817f, 0xb19450aa, 0x7f7f9d58, 0x20f69c1e, +0x7f45815d, 0x817f9319, 0x7a5496b0, 0xb85e8950, 0x72c9337f, 0x81781aba, 0xfecdaf28, 0xe2195ab5, +0x7fbb0ec7, 0x7f7ff8a7, 0xab818181, 0x978f2e6e, 0x7fc38121, 0x18945319, 0x264a8181, 0x93b1dc81, +0x630ed781, 0xe27f8e14, 0xc17f5b93, 0x7081fe2d, 0xbb9ac790, 0x7f81ba5c, 0x7f812f32, 0x68bd4f7f, +0x81217f81, 0x88b87f26, 0x285e7f3e, 0x29817828, 0x747f7f81, 0x159eaacd, 0x8d02b252, 0xa98881e7, +0x817f1681, 0x367fbd38, 0x8a8441e3, 0x81447f1f, 0x8165424b, 0x811676ee, 0x81a48122, 0x4b02ea7b, +0x5c267f38, 0x8195acd2, 0x7f7fb581, 0x9eae092d, 0xab12457f, 0x96f8dc31, 0xa89fd810, 0x222b7f7f, +0x1dd67f65, 0x873a81e0, 0x0e7fd97f, 0x81a83d97, 0x8106d381, 0x7f817f0c, 0x8104d4f9, 0xbb81677f, +0x01be7f39, 0x297297ab, 0xc2f01b81, 0x9dd57f81, 0x81ff692f, 0x7fc28115, 0x077f547f, 0x717fcf81, +0x3d81ed83, 0xff6f7fff, 0x7f50f95c, 0x5650a753, 0x81817f09, 0x963b7057, 0x7f817f60, 0x987fa099, +0xf6207f7e, 0x4c814e98, 0xcd27b681, 0x41bd2ebd, 0x340172b2, 0xbc151ec1, 0xf3b403d2, 0x817f6c81, +0x7f2f3218, 0x26f2ea5d, 0xcc59bd8a, 0xef857f52, 0xe37bc195, 0x7f2a2e13, 0x7381a51a, 0x31e14c60, +0xc2e23fc6, 0x8113f40c, 0x9b2d7f81, 0xf6e1147d, 0x813c7fe1, 0x9a9d7f3f, 0x7fb939ac, 0x573181ad, +0x608a317f, 0xb486b9ee, 0xe67fb10b, 0x62817f8b, 0x7f7f9775, 0x81e6819d, 0x7f6f0f61, 0x4ffd8158, +0x81ae56d2, 0xbe4aa4f7, 0x6c515581, 0x435d8326, 0x7f0e2a95, 0xc28d81e1, 0x357fa0cf, 0x2b9f7f11, +0xd491057f, 0xbc7bddd7, 0xe5b57f34, 0xfba68549, 0xb491d2e3, 0x4fb61f49, 0x81bd3e3c, 0xdd9848f3, +0x81810561, 0x67f5fbdd, 0x8c4dc25b, 0x2ab47f67, 0x68137f5e, 0xcfe53817, 0xa7679d31, 0x501213b4, +0x7f7f1390, 0xbefeccd8, 0x30fe9044, 0x7fd6812d, 0x813cf0e6, 0xccb82825, 0xfa78ec58, 0x024b90e4, +0xba25ec7f, 0xc43b50ff, 0x7f7f3e4d, 0x64c38181, 0x81ed1dba, 0xd58b7f64, 0x0b81311f, 0x03563b7f, +0x817fb491, 0x81e895c7, 0x817f7fb0, 0x1681a581, 0x7f998a50, 0x489b81a9, 0x81bc81a2, 0x3d9e7fde, +0x7c7c6381, 0x817447ec, 0x6e7fc32b, 0x7fab86c6, 0x95b44181, 0x3a7f756c, 0x81458168, 0xc4787fd3, +0x731cda81, 0x73127f03, 0xc9077fcf, 0xc37f9c2e, 0x27817f7f, 0xc4361507, 0x7f9b3581, 0x9d357f2d, +0x7f81166c, 0xb87f815f, 0xb2604f95, 0xc44b5591, 0xcfb8078b, 0x7481e64b, 0x8152a679, 0x53fa387f, +0xfa7f3f79, 0x7f5b5fe5, 0x1e625d7d, 0x1081ad93, 0x7f5019fe, 0x3d8145a7, 0x629f9918, 0xc1ab3742, +0x7f66c057, 0x9cb048dd, 0xeb6e859b, 0x8e5fb72c, 0x94d8f5a4, 0xa381280d, 0x67737de6, 0x45817f81, +0xf055c17f, 0x6a0aafff, 0xdb818cc6, 0x81fc60dc, 0x20377f4c, 0x7feac6de, 0x72e78c7f, 0xdf532124, +0x81d33356, 0x8881bcf9, 0x95817f32, 0x612eae3b, 0x7b186965, 0x50b77f63, 0x718181b3, 0x42d8b481, +0x88edaf7f, 0xa3cd25f6, 0x7fdd7f81, 0x7fb79681, 0x7f786e7f, 0x7fbb41c9, 0xd2618121, 0x1cacc6d3, +0x7fe2a0dc, 0x819e1a46, 0x563081e1, 0xb18d77b1, 0xb4817f7f, 0xa97f81fa, 0x2e81157f, 0x31e67fa4, +0x5b814281, 0x98b18124, 0x7f0a3540, 0x49d508d1, 0x2693bc71, 0x3aeb81ca, 0x817fc081, 0x6674b9e7, +0xa1217f23, 0x517f81f1, 0x7fc4ef26, 0x2914ef7f, 0xa9ff7a19, 0x7f7f0ce2, 0x1b485e45, 0x7f70af0d, +0xd6812a71, 0xc95fc63d, 0xa57f03c8, 0x9dbe7f81, 0x7c978192, 0x81a22306, 0x96447f2f, 0x7faf2728, +0xc2a494ad, 0xd5c7813a, 0xa02b7bb8, 0x3bc04eed, 0x997fa97f, 0x7768d0d0, 0x8a84817f, 0x8f1102c6, +0x7f8115be, 0xfb7f7869, 0x7ff25de8, 0x6745f276, 0x0f23b585, 0x52933552, 0x7fab5340, 0xeb4c403f, +0x7f5465ec, 0x77b0a942, 0x6b67297f, 0xc86a7133, 0x7f8f177f, 0x869e28f4, 0x7a7fb066, 0x81a3b865, +0x5d7fba81, 0x815fd7ae, 0x60817ff7, 0xa633a381, 0x45813981, 0xade1ccd7, 0x814981a6, 0xaa93770a, +0x7f83b4dd, 0xc0967f1a, 0xed7f3764, 0x53772ea8, 0x5a81f453, 0xaff56f92, 0x81067f5f, 0xf596ef1f, +0xd520de7c, 0x81b2bdc8, 0x7f8130dd, 0xfb7f8c41, 0x99816bf6, 0x695a60e3, 0x7f7f817f, 0xed987f26, +0x8f818166, 0x417fecfc, 0x99ec379b, 0xaf7e7f7f, 0x360dd75f, 0x787f44f6, 0x81837dfb, 0xd481a681, +0x81819d17, 0x8781322c, 0x7f7fac0b, 0x0fca7f3c, 0x40b5e37f, 0xf27f2ebc, 0x41c19a41, 0xa6c1d381, +0x066de9ec, 0x32b8a8bc, 0xdac61ba5, 0x812d817f, 0x87b9821c, 0x817f7920, 0x7f557fbc, 0x81738181, +0x918fb098, 0x81ab890a, 0x81b881bf, 0x7f7fc57c, 0xcdbde77f, 0x7f5f7f3a, 0x36e59a81, 0x19a6818f, +0xea7fd850, 0x20606e2b, 0x927f9c22, 0xd27f819f, 0x3c4f7f73, 0x168176ce, 0xf0687f81, 0xec5f7faa, +0x23aa789e, 0x2de996b5, 0x7fd07f7f, 0x9785c156, 0x1eaa3235, 0x9781ea6f, 0xb2de74fb, 0x815a98d6, +0x8cb77f92, 0xbba85fa1, 0x61afcac8, 0x81817f64, 0x8149865f, 0xa251b7ee, 0xec7f8181, 0xf8ad7f84, +0x5c8142bf, 0x212981f2, 0x7fc9e381, 0x37338e67, 0x7f3099c6, 0x4fcdb7d1, 0x0381c0fd, 0x778181e4, +0xe7702cfb, 0x61c21e43, 0x3f987f81, 0x3a0b815c, 0x7d12b8c2, 0x70097f0a, 0x7f244d63, 0x2d81187f, +0xd87f6f7f, 0x7d267f2c, 0x8eaf2486, 0x87d8013d, 0x85367f7f, 0x81817f85, 0x0a067081, 0xc96f952c, +0xf3c14485, 0x227f4330, 0x60ec8124, 0x41f181f5, 0xdb6b7fce, 0x819467cf, 0x81bc7fd4, 0x2c7a7f7f, +0x936ade53, 0x7f81d04a, 0xc4817f81, 0x7fdd78dd, 0x81d3a47f, 0x917fade5, 0x6c81510e, 0x5b16fd74, +0xbe987f81, 0x7fb17404, 0x7fb8e8f7, 0x7fb381e6, 0xc479b081, 0x5c81eee7, 0xb19b0e81, 0xa381d4df, +0x81bacf5b, 0xa67fb9a6, 0x817f11a0, 0x8d565870, 0xce818181, 0x7f4aec3f, 0x813ab981, 0x1b495a42, +0xab7f928d, 0x8106014f, 0x47c3e57f, 0xc418f11e, 0xc9149f6c, 0x1a7f1e25, 0x7f3f2d81, 0xcd5e462d, +0xa8d57f7f, 0x812481db, 0x7f697f97, 0x69cdb891, 0xc31fc20d, 0xdc812b31, 0x57e37f81, 0xa09681aa, +0x7f817f88, 0x1ae385f8, 0x81607f7f, 0xfb752e65, 0xd8dd8141, 0xb17f50a9, 0xb8737ffd, 0x1f7f815b, +0x7f9a9e69, 0x06448b38, 0x8109a225, 0xb69e064b, 0x9881537c, 0x59dee4d8, 0x89c6a692, 0x0e817b7f, +0x81816b97, 0x5de37fd1, 0x7f2c7ca9, 0x81784402, 0x29bd477f, 0x81a07ffe, 0xe822df7b, 0x7f29813c, +0x6d81cd81, 0x1a811bd2, 0x6b2c7f26, 0x7881a08f, 0x7fd0e88e, 0x27f87fce, 0x8181a67f, 0x7f7f5558, +0x81668133, 0x8181bfca, 0xc3792d58, 0x2b7f2d7f, 0xa3d09230, 0x9fa56824, 0x7fd5ce7f, 0x251b3cbd, +0x7f818117, 0x7fac9e26, 0x7fde26f6, 0x4b7f837f, 0x1fd52b94, 0x067f6698, 0x70817f7d, 0xdd9658c1, +0xe5a99465, 0xa59cdc55, 0xc1c2c7ad, 0x147fb081, 0x234cc657, 0x7f7fcc0f, 0x7f3d8150, 0x41b68131, +0x7ff58184, 0x90a57f2b, 0xdd9d9b7f, 0xc4518163, 0x428aa37f, 0xe866ef52, 0x7f6c3644, 0xa09d3b50, +0xf2c4adad, 0xc87ecb45, 0x7f70bace, 0xd5697f2d, 0x297349a8, 0x6bc11cc9, 0x8cdd0ed3, 0x7f53646b, +0x8129b9b8, 0xce65a8d3, 0x20208181, 0x817f5272, 0x6ad11bf4, 0xc58151e3, 0x361572c9, 0x81acd996, +0x6e819466, 0x7e7fbd3f, 0x70bb9ffb, 0x818b810a, 0x879a7f15, 0x8c884edb, 0x9028ae45, 0x81d8f525, +0xaaa781b9, 0xaf1a0108, 0xe2817f73, 0x24b67f8b, 0xf08fd77b, 0xd21a265f, 0x7f3930ba, 0x5f5d9287, +0x66218181, 0x7fd219f3, 0x6f44f09e, 0xbcc58157, 0x7f71b94d, 0x817f81c5, 0x877f6064, 0x81647f81, +0xe5ba2e34, 0x889f8722, 0x27819f0e, 0x7f4c0eed, 0x2d7f79b6, 0x81b1813c, 0x647f3d2b, 0xe92b68be, +0x81375804, 0x6798c04b, 0x2b596e7f, 0x78c97f7f, 0x812c367f, 0x7f468112, 0xa352d785, 0x3806f2a4, +0x6886a9a9, 0xa31540ea, 0x5ab2478d, 0x9f5be57f, 0x9ab06481, 0x1586279b, 0x86267f81, 0xb3a50944, +0x788a485c, 0x838d4c30, 0x8ad44b53, 0x923fb39a, 0x95035381, 0x81817ff2, 0x7f44d5eb, 0xf17f8164, +0x39d117c0, 0x37ea75f4, 0x81927387, 0x817f0623, 0x909f7f0a, 0x997f7f19, 0xc1983b18, 0x60937f6b, +0x8829c381, 0x8e6bf066, 0xfb7781ab, 0x43408e7f, 0x7f7f811d, 0x33eb81d0, 0xb86459a9, 0xadba7fe7, +0xc1967f48, 0xfd0284bd, 0x4bef335d, 0x1b7f5f81, 0xa2c2c237, 0x81b0816a, 0x42ee5c7f, 0x7f009323, +0xb7ced44c, 0xd90958e2, 0x3af8aae8, 0x7fb82e90, 0x989a7f7f, 0x14a22c68, 0x60ef7dc6, 0x7fb9ad00, +0xf9e3c69d, 0x5e3bafbe, 0x81178a81, 0x92c4df02, 0x159d7fb6, 0x812b8103, 0x616f9df5, 0xb9a1df61, +0x454b3944, 0x98931fbe, 0x4aae7061, 0x9572a681, 0x8178a8a8, 0x81fe821f, 0x817f7f6f, 0x7f787f2c, +0xe584ff82, 0x7b7f5637, 0x9e815970, 0x517d8181, 0x97faba7f, 0xcfe3ce17, 0x9181f55f, 0x857f5f3b, +0x81b17fbc, 0x7f1d7fbc, 0xae7356c3, 0xe4f77f8b, 0x4695f639, 0x8ddaec55, 0xa5d4a381, 0x7f7fa418, +0x4d818145, 0x3881723b, 0x7f507f7f, 0xa5357fca, 0x81dbdc3b, 0x0b187f28, 0xb0325181, 0x09dd795f, +0x2c39b28c, 0x9ac18118, 0xda52b681, 0xa3819da8, 0x7f884525, 0xba0d7ffe, 0x50ce81ad, 0x60e70e69, +0x110b81ee, 0x8181871e, 0x7fe07f81, 0x8d1c4dde, 0x67649db7, 0x6e4669c2, 0x818144a4, 0x7f6c9cb7, +0x51b57f81, 0x36aec300, 0x7f7f8141, 0xc5eca67f, 0x14ac7892, 0x7f35e9c2, 0xa1fe9a81, 0x6a867f7f, +0x767f817f, 0xc447c4bf, 0x067fedca, 0x8c024b57, 0x7ee5707f, 0x52589b51, 0x0f81b28f, 0xae7f00a7, +0x5244c5ca, 0x9671812e, 0xf0631d15, 0xb4aba1b8, 0x7f9a3488, 0x81b485b8, 0x392f2d7f, 0xed7fad81, +0x6055a4e1, 0x60f300c7, 0x7feb817f, 0xd1e0917f, 0x544ca57f, 0xd23d7e0d, 0x8110c8c2, 0xbc24a615, +0x3d817f33, 0xbdabcdfa, 0x5ac3817f, 0x81817fc0, 0x81817f76, 0x837e43d9, 0x7f817581, 0x5da96bcb, +0x3e1c888f, 0x7f7fa6f5, 0xb769907f, 0x8eb43d8e, 0x3a7f3e81, 0xd5845510, 0xd07f5a54, 0x14816ab2, +0x8126c62d, 0xbeda8f5c, 0xa0007fa8, 0x818d7281, 0xad72917f, 0x49c2f650, 0x87969baf, 0x7f53e975, +0xe78de481, 0xe5083e96, 0xb635812c, 0xafdf4bb9, 0x26ac8101, 0x82b0a109, 0x7f8101b8, 0x607fc7cc, +0xc3b1892f, 0x7d0dd911, 0x396d8195, 0xcb84b67c, 0x3f7d3681, 0x43a07f25, 0xd020297d, 0xb7d65b81, +0x455a99e2, 0x509e4611, 0x817f81e0, 0x81da81a8, 0x8147b5c1, 0xbc81a5df, 0x489c6781, 0x8150a908, +0x4fc0e681, 0x814b8163, 0x11457f8b, 0x9a77db7f, 0x81fa0681, 0x7f867010, 0x852b87c5, 0x3d8253dc, +0x91165c42, 0xc6ac7422, 0x727f0774, 0x7f497f67, 0x1a3fe7c0, 0xb39c81d1, 0x44a47f7f, 0x7f8b066f, +0x7f787ab5, 0x7ba7e870, 0xb932a3a9, 0xc85c1149, 0x32a28d61, 0x7f8d812c, 0x93819da2, 0x647ae98c, +0x777f7e9e, 0xbb447fec, 0x2fabd677, 0x3a7f8c08, 0x51707f28, 0x35f81da5, 0x0b3cfc4e, 0xd4179b8a, +0xef77a6ca, 0x31e02016, 0xdbb81924, 0x7189b4a7, 0x08a671aa, 0x817f2352, 0x81819e81, 0xec7fb7e4, +0x81699ae5, 0x88817f20, 0x5ee7d181, 0x944d8150, 0x7f7f52d9, 0xc1815114, 0x3281a67a, 0x52fa8181, +0xc4e47f7f, 0x7f824cbd, 0x728cf0bf, 0xa6b20e46, 0x9ffe631c, 0x639d82cc, 0x7f638136, 0x7f7f3381, +0x7f8b4794, 0x44b19231, 0x08da5253, 0xaa815d93, 0x7f6a7f69, 0x616331c2, 0xdaa47fcf, 0x7f272255, +0x3ee4098e, 0x819527f9, 0xcfc187a5, 0x7f81e781, 0xf71402bd, 0x9fecc0d1, 0x2067a67f, 0xa1dace7f, +0x42817fa8, 0x8181da23, 0x03818181, 0xc35e3af7, 0x941d7ff3, 0x817f33e5, 0x7f2a78f7, 0xbe426494, +0x0d2d81a4, 0x817f20cf, 0x89b2f4d0, 0x8171ba71, 0x6245be4e, 0x817f8140, 0xed8a7f81, 0xdb097f7f, +0x81724d42, 0xe0de7bfb, 0xde611f7f, 0x7f54d981, 0xb1d07fd7, 0x487f7fef, 0x7f2c8139, 0x7f7f7f10, +0x3e812db6, 0x63447ff6, 0x7c7f277f, 0x9f7f8b7f, 0x76d47f81, 0x197faadb, 0x6a596c91, 0xc681a3a3, +0x7da1d13f, 0x4af8d329, 0xaa819881, 0x7d7f7f4a, 0x3a9d1753, 0xaf818145, 0x5b905350, 0xee313e4c, +0x3fa881c7, 0x437fc046, 0x817fbd9a, 0x30f77f14, 0x3e81df81, 0xa42f4e53, 0x7f67d481, 0x4f4b819e, +0xc5474f22, 0x6eed3376, 0x7fb1ed92, 0x5b3e813d, 0x6c8181a7, 0x720df323, 0x2d0e7017, 0xc7448174, +0x817fb27f, 0xad7f01de, 0x817f10b1, 0xf6231fed, 0xa56d7a81, 0x98817fef, 0x8181e4a0, 0x48aa7fb1, +0x81727f6a, 0xa4059a04, 0xe0cff64c, 0x5ea7679c, 0x69c1009b, 0x7f816b2a, 0x5cc58181, 0x7f954f3d, +0x848b7f17, 0xaee99031, 0x7fbb951f, 0xa2da7f73, 0xc5acfb81, 0x2c1ecf2a, 0x6981fdba, 0x19897fb2, +0x7f4af7a6, 0x85cd8129, 0xe845e0d5, 0xdf658407, 0xd522567f, 0xab242f2e, 0x7fdd70a4, 0x1c9d038c, +0x2f923f85, 0x27f1b2c5, 0x7f812781, 0x7f818181, 0x7f9603cb, 0xc32881af, 0x5ad1f2a1, 0x61b99a32, +0x57cd3bfb, 0x7ffc6fd7, 0x9e81c281, 0xfea25f7f, 0x85a2bfe7, 0xf3ef5107, 0x7f81565e, 0x1d447f81, +0x814e817f, 0x40853b00, 0x6caf3f7f, 0xe13a7fe2, 0x816162c5, 0x81b4e66f, 0x81e18fb8, 0x7da87f66, +0x7fef667f, 0xea2ba1f4, 0x408181d2, 0xba81676f, 0xdd817481, 0x7f9d7fff, 0xd6544752, 0xb1bd61ce, +0x3aa26a56, 0x817e9bf8, 0x8158964a, 0xa0d3253c, 0x308130f6, 0x501d7f23, 0x367f34d2, 0x818581eb, +0x817f4743, 0xdf9e81c8, 0x5c7f13a4, 0x12182965, 0x9cca6c7f, 0x47817f4f, 0x0cbd2918, 0xb05cf048, +0x28810542, 0xe8a869a0, 0x7fb7682b, 0x3f7fb5d1, 0x813dc581, 0x2ea58dcb, 0x4a4e6881, 0x687681b8, +0x665c057f, 0xa78127e0, 0x81b28166, 0x817f686e, 0x7f261637, 0x81ad7131, 0x54817f68, 0x2eae6470, +0xf110f558, 0x8aa44bc2, 0x81f12d55, 0xf37f35cd, 0x7f68679f, 0x3fc481db, 0xe02a5a41, 0x817f8130, +0x919b6c25, 0x14749815, 0x9d7f6c60, 0xc5777fa4, 0x7f8db49b, 0x524a623d, 0x2ef59cc0, 0xd18f4a24, +0x37707f92, 0x4af98126, 0x81b43c76, 0x7f965818, 0x7f81c786, 0x057f7f49, 0x1ab3ef1e, 0x1781eb8a, +0xf0909181, 0x656da3b4, 0x5181cbb9, 0x81e9a237, 0x8ace577f, 0xd19f2c0c, 0x86a7817f, 0xb7628100, +0x817fcfa2, 0x7f3cddf4, 0x744a96d2, 0x19967ffb, 0x7f7f813a, 0x81f2b043, 0x7d46cd81, 0x7f006e7f, +0x363a7c9b, 0x9f812dda, 0x77c77f1c, 0x8172274d, 0xa7ba8181, 0xff7f1ea8, 0x817f4e7f, 0x377f9ad7, +0x81447f7f, 0xf81f3623, 0x58ce7f7f, 0x8a4a208f, 0x4d938115, 0xb0ac7029, 0xf72b23cf, 0x467f9dd3, +0x81819130, 0x62bb7fc4, 0x84b4b190, 0x2781b070, 0xdc81cda4, 0xcb8145b3, 0xda812b38, 0xda307f7f, +0xafbc8158, 0x7fcb7205, 0x47e781ae, 0xc056245f, 0x81997f56, 0x51d9d6f7, 0x7f895c4e, 0xb7814beb, +0x7f658181, 0x757f81bf, 0xdb81816e, 0x582edcb2, 0xb9860711, 0x587f81ca, 0xa45a7f0b, 0x81e68a35, +0x7f7f3b0f, 0x3c233956, 0x7f5f3e44, 0x81ba915e, 0x9ea4dd81, 0xc323e1bc, 0xb1ac0322, 0x5e7f0e2b, +0x6b2fb26b, 0x0e92812c, 0x667fc670, 0x7faccae4, 0xce8181be, 0x815e68c5, 0x273e7f69, 0x10f3e868, +0x47a8b82c, 0xc8817f10, 0x617281b6, 0xe4a44714, 0xe19d4b04, 0xe8818333, 0x81e481fb, 0x58ab1e34, +0xa68c8168, 0x815f4c12, 0x181be656, 0x817f147f, 0xc87f7f8b, 0x7f7fa1f9, 0x7c787f7f, 0x7f7ff8ae, +0xcf9200a0, 0x7f65e492, 0xc37fbb89, 0xa234a37f, 0xa27f7fb4, 0x818a6fb3, 0x04817fb5, 0x2e7f636a, +0x905db630, 0x812bbc0d, 0x8f1beb7f, 0x7f83bb5c, 0x81b96698, 0xbebc8152, 0x6669b181, 0x7f81452e, +0x6861ca81, 0x8806d040, 0x4570d28b, 0x812681fe, 0x817e3b48, 0xcd3ddb3c, 0x21777fa4, 0x7f3d81cd, +0xae0d8104, 0x8134b9fe, 0x7fb87fdc, 0x81ac7f6a, 0xbeec5c5d, 0x60cc4dfe, 0x967f82fc, 0x3a8187a2, +0xe9577fa9, 0x818181a5, 0x8181b4aa, 0x81204f7f, 0x81708146, 0xbe08d127, 0xdec25eca, 0xc3e0db2f, +0xf7817f4f, 0x7f5da797, 0xc9c44f90, 0xbc5b84c5, 0xda44a681, 0xcc9b810a, 0x38c5c3c1, 0x674294d4, +0xe47fe181, 0xa4817f54, 0xcd06b70a, 0x5b3d7f7f, 0x7f137fbc, 0x38769bd6, 0x817f811d, 0x6bc4046f, +0xba9a7f2d, 0xca91ea2a, 0xe881d10b, 0xf9810daf, 0x42e57f81, 0xc87c147f, 0x4f817f7f, 0x66c7bd81, +0xb991549f, 0x816febec, 0xdc50fd71, 0xaab9aa83, 0x7fbd3952, 0x7f50c6cf, 0x1481f8a2, 0x3ff381ab, +0xf6b0470a, 0x81e12ebc, 0x817f5f39, 0x3c7fb836, 0x79813781, 0x7fefa2c9, 0x7e81030d, 0xb849bf3e, +0x81613b40, 0x08817f32, 0xbd5ffe64, 0xd688be81, 0x0166eab3, 0x81077f0a, 0xe5817f56, 0x91b83696, +0xac8181d6, 0x5732a410, 0x40818181, 0x81915bb3, 0xa87f8145, 0x816a06ce, 0x033f7d7f, 0x86438281, +0x3afc477f, 0x9cdf8102, 0x786d4383, 0x81ecfe7f, 0x89a7471d, 0x349c7fd7, 0x7f377f3a, 0x51d55daf, +0x62337f61, 0x547f910e, 0x4bea99d2, 0xbfa9557f, 0x7f2ee96a, 0xcb9ccccd, 0x0fc3a681, 0x687f30fd, +0xb07f4530, 0x83814334, 0x955bf3c6, 0x7f909981, 0x7b4e617f, 0x7659def0, 0x05565d7f, 0x81f271cc, +0x31ca8581, 0x1afd6755, 0x818181ff, 0x7f8181ef, 0x7f7f77ce, 0x1d6692cf, 0x73c57f7f, 0xf083d53f, +0x707f9c09, 0x1ce6813b, 0x17bcb0c6, 0x15b59ba7, 0xde7f817f, 0x7f925801, 0xeba91d81, 0x568190e4, +0x7f817e72, 0x8d7ffa3c, 0x82f281fb, 0x548a7f7f, 0xe9777fa8, 0x407f16cd, 0x7e8167a8, 0xc5812c04, +0xeae8e181, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0xf481c67a, 0x89b07fc3, 0x2e4d7b44, 0x9181818e, 0x83b88158, 0x8181739b, 0x4aa97f6a, 0xb36d7fa6, +0x075bb9b8, 0xcf839c97, 0x20ba9176, 0x117a6a6d, 0xc35b571e, 0x8181fb5f, 0xec3a7fa9, 0x3eec7fcb, +0x53622f7f, 0x8681a07f, 0x0ff39b65, 0xb6c82420, 0xefb381d6, 0xd5390961, 0xf1887f0f, 0xbec2abad, +0x64d57f81, 0x2281057b, 0x1f7fe936, 0x99038181, 0xc1450b81, 0x65cc5386, 0xb886acc1, 0x109dde08, +0x6417d0dd, 0xcb7f7f76, 0x9681b397, 0x53c39e81, 0x437fd46e, 0x751c3481, 0xdd4c813c, 0x38817f65, +0xf0b39281, 0x31357fcc, 0xf37fae31, 0xba8181e7, 0xefc1e23b, 0xa8650162, 0xbb815838, 0xd6eeba30, +0x8de2db81, 0xc01ca018, 0xed2dfb81, 0x7f84731f, 0x40633968, 0x557f2c0f, 0x5f5c6374, 0x41b02181, +0x447f6e09, 0x8d56b3a6, 0xe47f81bd, 0xbe712a04, 0x3e7e78b2, 0x470a8128, 0x298981b3, 0xe65e2866, +0x5bad817f, 0x78884acf, 0xb521287f, 0x81df4b81, 0x33818184, 0xb3d65f30, 0xc2ed7fbc, 0xc4811ccd, +0xec92a11d, 0x5bc5817b, 0x57ad8103, 0x7f6226e4, 0x07c2d87f, 0x3d7fb181, 0xff73974c, 0x08815f61, +0x50616fbc, 0x923ec16c, 0x2181c73a, 0xb8f88181, 0x047f7f7c, 0xe87fa53d, 0x399ca775, 0xef139839, +0x7f217f43, 0x811415c8, 0x37f6b9c6, 0x7f7f817f, 0x11c6ad81, 0x81b3c88d, 0x0ca47c91, 0x024a7604, +0x2dda2f7f, 0x811c5281, 0xc27f814b, 0x7f37a317, 0x42433381, 0xddfbc9da, 0x23737f7f, 0xe3acaecd, +0x1d846dc3, 0x7f1bd4e3, 0x4055d37f, 0x7f1e9a81, 0x31d29025, 0x6d90cf64, 0xdf532fea, 0x1a3dc07f, +0xf997ee19, 0x597f5460, 0x2a0b5b30, 0x5751a381, 0xd27f8489, 0x7f1f9ae4, 0x2bd07f7f, 0x5281923c, +0xbb7fae6b, 0xd344db7f, 0xb0431bc3, 0x33744f44, 0xd2996981, 0xce048861, 0xd57f36c1, 0x1181819f, +0xe7592aef, 0x7fbe817f, 0x177f7f04, 0x2cc78381, 0x27558149, 0x7f64746e, 0x2bb97dc7, 0x10aba652, +0x02f70c7f, 0x7fd8ae81, 0xcd81f07f, 0x9352c65e, 0x344a817f, 0x4f282881, 0x3e2d23fe, 0x69517fb3, +0x7fd1e4a6, 0x3bf47f7f, 0x33909ff6, 0x81aba30c, 0xaa8181dd, 0xf6f36f81, 0xb57f7fa4, 0xe8b6a572, +0xb0ac3a81, 0x81817413, 0x104de181, 0xef977fcf, 0x2054acbe, 0xa9013b81, 0x4a7f34fe, 0x49747f6d, +0x5ad74c74, 0x0e582cc5, 0xa7ea83c9, 0x7f7f750e, 0x38847f81, 0x41817281, 0x3344debd, 0xd229728c, +0x027f839f, 0x5d9fc4ff, 0xdb1e7f51, 0x439681ca, 0x1b81b581, 0x7f777f9f, 0xd6bfb07f, 0xb050814a, +0x1981648e, 0x2cca7f56, 0x30814ab8, 0xc48143a5, 0x5bab3c8b, 0x7f97249c, 0xf7f09eb9, 0x074aa055, +0xc86bbe54, 0x815ed4b2, 0x7fd0b28f, 0xd7e07470, 0xcf815cc6, 0x7e7f7f63, 0x81dfe09b, 0x4098da7f, +0xc1d5a339, 0x7928937f, 0xa481bf6e, 0x81accdd5, 0x3b9c5cb7, 0x817f81a8, 0xc49b8a56, 0xec817f93, +0xb7a9306f, 0x43555d59, 0x5f815d7f, 0xb79e7fc8, 0xc9d17fa5, 0x7f9999b8, 0x73467f81, 0x38627fd7, +0x7a9138ab, 0xbd46b67f, 0x71b5823e, 0x5c81fbb3, 0x3d508f7b, 0x8173c630, 0xd4816172, 0x3f81d92d, +0xcd6b9d7f, 0x72ee607f, 0x851bd2ce, 0xbc4e7fde, 0x1c6c8193, 0x9252b181, 0xcc217f96, 0x296d7f98, +0x5957ae75, 0x817f14bb, 0xbf7b8191, 0x44d87f81, 0x352a84fa, 0x524a882e, 0xae66ac81, 0xbc7fc592, +0x2cb1dc81, 0x42e56887, 0x7981b76a, 0xa5667333, 0xd2ab7e7f, 0xe37f817c, 0x26817f7a, 0x23570c5a, +0x1a685290, 0x814bb476, 0xf6527bb0, 0x7b810805, 0xe63cd272, 0xc1d97fb5, 0x1e8124d3, 0xd34f157f, +0x367f82a6, 0x14819e1e, 0x39226087, 0x818681da, 0x8121417f, 0x81186358, 0x57e88e9c, 0x077f6e37, +0x96087f7f, 0x9a82c77f, 0x0781c4b8, 0x81217f81, 0xc565447f, 0xab41447f, 0xf7915a1f, 0xe58b5a81, +0xcb817d13, 0x81819181, 0x0d4cc8ee, 0x63b4f36e, 0x587f81b3, 0x47947981, 0xcc7f7f88, 0xa3816781, +0x2581a481, 0xc750aa5c, 0xcd440151, 0x4d684b81, 0xd8a6b50f, 0x4f7fa9b5, 0x3f1b93cf, 0xb58f812e, +0xaacd817f, 0x136d7fa9, 0x81b581b9, 0xb75d7fd5, 0xd29a8133, 0x337f8ec0, 0x75ac901a, 0x0a6d7fb2, +0x2d208186, 0x8186b97f, 0x0cb42af4, 0x03ac9e7f, 0x1167a77f, 0x7f19fad8, 0x2d4781b9, 0xf97f6658, +0x9081bc81, 0x6f7f4087, 0x8e527f8d, 0x6873d688, 0x817fbc88, 0x4b81608a, 0x22814ea0, 0xe97f81c5, +0x10bb3de5, 0x818115ed, 0x421b3081, 0xa17f6e52, 0xf46d7f09, 0xd87f8185, 0x68a79c2b, 0x7f7f837f, +0x4d7f4967, 0xc8507f81, 0xc67881f1, 0x0dad027f, 0xad7f8125, 0xa47fb3aa, 0xc74ceb01, 0x373974a7, +0x4e7f8153, 0xa87c423b, 0x2ad75081, 0x4a817f35, 0x357f5517, 0x55816581, 0xcb7fa97f, 0x7b797f7f, +0x504ee981, 0x7adecc6b, 0xce338197, 0xc49ab9bd, 0xe3877f3e, 0x7f817f81, 0xea3624fa, 0xfd889681, +0xfd7f7ff9, 0xa6732f2f, 0xa81b81e0, 0x817f7fae, 0xbcb4b010, 0xc37f7ff4, 0x9a52819e, 0xc730eaec, +0x437f817f, 0x8865815a, 0x53337f64, 0x2d817f8f, 0xd7e2417f, 0x2b6ec637, 0x3d819d41, 0xe4877f52, +0x26a26a63, 0xdc74a867, 0x71433c4d, 0x94b02e35, 0xd6a155f5, 0x3cd37f73, 0x5c89997f, 0x569e7f03, +0xe0adb53a, 0x49d78e81, 0x9c7f22a4, 0x01385b17, 0x9081db0e, 0x3a8c46a9, 0x1e016448, 0xfebdc287, +0xf072dec5, 0x9f816429, 0x4bb0817f, 0x5a0ac21a, 0xf74ca8aa, 0x82811b7f, 0x1c08bf81, 0x1fe36842, +0xbc7f8181, 0x373b7f21, 0x097f8150, 0x54935b9a, 0x3a7f1381, 0xceb77ff1, 0x55ab4e7f, 0xcb1ab47f, +0x9f17477e, 0x7374b97f, 0xb2512a9d, 0x8148d0d2, 0xc057444f, 0xe94f8162, 0x7f816e40, 0x1da3d14d, +0xb458eb23, 0x7f907a47, 0xc8657f81, 0xc4f36ba1, 0xbc8181e6, 0x81efd081, 0x53607fa6, 0x51368181, +0x32b84fdc, 0x8127b27f, 0xd2247f5c, 0x56f77669, 0xecfdfe9d, 0x697fc681, 0x7f7f2781, 0xdb7f371a, +0x1b8181fb, 0xea78f17f, 0x5a187f7c, 0x7f7c705e, 0x2b58816c, 0x757f607f, 0xc87fc97f, 0xec5b66a6, +0xb89574c0, 0x77ccb739, 0xe1fd2aa7, 0x7f45d8cf, 0x20283a95, 0x5f45909a, 0xe581748f, 0xf97fab7f, +0xc8e0357c, 0x73c7da4b, 0x7f0e812e, 0xba01c67c, 0x0324fb81, 0xea2ee62f, 0x253e88ae, 0x077f81cc, +0x817fc781, 0x94f17fff, 0x0ed6f0f4, 0x84677f8f, 0xb598462a, 0x81a87fec, 0x3dbaa47f, 0xbd62fd81, +0x277f81ae, 0x0f43ab6c, 0x07c1617f, 0xc7bc8181, 0xba2460fa, 0x6d0df181, 0x92dfa27f, 0xf7d1033d, +0x53b71860, 0x81b28146, 0x6564b13a, 0x7fcf0981, 0xf9004e81, 0x10229f55, 0xdb7f7f05, 0x281845aa, +0xca6ddfab, 0x6481d77f, 0x4e4120ab, 0x81006b47, 0x518dce81, 0x907f8145, 0x4d5e7f2a, 0x064076ad, +0xdc1c8e81, 0x354b2545, 0xc8426706, 0x4447b281, 0x21daad30, 0x6d21a202, 0x04249781, 0x19ac7063, +0x3a7f00a0, 0x81f41c2f, 0x523c5b36, 0xb6817f43, 0x3f817f86, 0x811234c1, 0xd77fd8a3, 0xc2f2927f, +0x557f7fb1, 0x41d9e881, 0xeec06dde, 0xe4633caf, 0x3c72fd19, 0x9cffc57f, 0x4b7c8659, 0xcbf6e57c, +0xd9bb7f7f, 0x5fdd819f, 0x69ecd881, 0x7f817f90, 0xe68181f7, 0x10811439, 0x2a7a2c37, 0x66438157, +0xbc634044, 0x3fe98163, 0x81ac8166, 0x8143811d, 0x364f63d5, 0x377f4e81, 0x24755bb8, 0x17adfc5a, +0xdedea8e0, 0x2e6e9e14, 0x0e816188, 0xe56cbe81, 0xb9c85cca, 0x817fb120, 0xae7f8181, 0xe37f7f7f, +0x0827d377, 0xdc7f815d, 0x9a2a5625, 0xb2ab4736, 0x817f2274, 0x817fb29c, 0xdcece2cf, 0x5bddd988, +0xcc1e817f, 0x24793971, 0x40ea214f, 0x326f7e64, 0x1a9e3ed0, 0x268c1a7f, 0x314b8548, 0x0f7f8701, +0x347f7c7f, 0xb9ca13bb, 0x3492187f, 0xc67fa67f, 0x7f49814e, 0xaa7f91d1, 0x18d53577, 0xc88b53a1, +0x20af691f, 0xe6853f81, 0xcf5d08bf, 0x81705981, 0xe30d47f0, 0xa8c17fa5, 0xdb817fb4, 0x12818172, +0x83433d81, 0x4a7ff6e4, 0x327f937b, 0xbb6509d3, 0x2c02fc7f, 0xf3b7feb4, 0x415d60ab, 0x136551d0, +0x0e818181, 0x832eb165, 0x3c7f7f82, 0x817770ae, 0xf9b3a525, 0xddcb3d02, 0x008b6181, 0xdbe55445, +0xec8137cb, 0xf5e8f76c, 0x61815ac9, 0xc8810fb0, 0x35547799, 0x0d81a035, 0x1c3eea42, 0x88d77f96, +0x2948677f, 0x0852db6e, 0x42367f74, 0x0d7f867f, 0x3a7fff81, 0xd1d1d271, 0x02a981e5, 0x2bfd2988, +0xd3818199, 0x4e7f4010, 0x09297fd4, 0x7f816b68, 0xe60a05ca, 0xeb81c97f, 0xa2cf5e81, 0xfd7f7f52, +0x07057f53, 0x8672147f, 0x389bd281, 0x3a855c79, 0x41328181, 0x21fe5e81, 0x073f7fc3, 0x3654f170, +0x24bc7f7f, 0x4f816ee1, 0x67427f7f, 0x8140676d, 0xef53e53d, 0xa48bce41, 0xdb5832d8, 0xf64a58dd, +0x3d3e70ab, 0x53ab4a16, 0x137f2023, 0x74569bad, 0x017f8138, 0x81902bed, 0xbdca8f24, 0xa34d8193, +0x81cf7f90, 0xab43b32b, 0x003f7f49, 0xbf3d4570, 0xbce67f72, 0x7c197fc0, 0x5376d47f, 0xd53d7fa8, +0x7e9bb47f, 0xd72aa0f2, 0xde818138, 0x5cc3a981, 0xd57d9c5d, 0xecb2d6a9, 0xf97f81e4, 0xb67f8f33, +0xb92b577f, 0xc483f97f, 0x469d9481, 0xd4868181, 0x7031e886, 0x95b4ca8f, 0xc6d4bd93, 0x117f227f, +0xb67e7fe9, 0x7fb35540, 0x07d01f81, 0xb4ce9b40, 0x1b118681, 0x7f657bfc, 0x2bbe817f, 0x287f176e, +0x6c96bb02, 0xcfff9e59, 0xec7f647f, 0x817f81b6, 0xabd131be, 0x819081e8, 0x4724b87f, 0x5f817f3e, +0x62b99368, 0x81818110, 0xc99ad3b2, 0x25816734, 0x0702743f, 0xe1b4812f, 0xcb787f81, 0xac864257, +0xcb928181, 0x55e27b81, 0x26f281a4, 0x63898181, 0xbbe1e54a, 0x58d2f881, 0x49e3cc81, 0xf7ea287f, +0xe9827f82, 0x81a69a5c, 0xdd81d292, 0x7f935122, 0xd17fdc3f, 0x321a8fa8, 0xfa819a81, 0x1fdb7f79, +0xc6cf9b09, 0xe181547f, 0x0acacab3, 0x0e7f72ca, 0xf562e081, 0x13d3827f, 0x5692de7f, 0x35b04a5d, +0xdd818e3a, 0x6b7f6160, 0x29303878, 0xc7687fbf, 0x38508173, 0xa6bd37be, 0x0d339881, 0x5e198614, +0x67f745e5, 0x81081a7f, 0xa3c77d84, 0xaadb7f81, 0x578d7ff0, 0x75a0b54e, 0x0d01587f, 0xa1cb624b, +0xf94181b7, 0x819d3a81, 0x37258135, 0x5aa87f3b, 0x0e7f7fd0, 0x5b7f817f, 0xda4ce9b1, 0x40917f81, +0xc57f7f3e, 0x4115b391, 0x627b816f, 0xa8575f49, 0x99ec8124, 0x33db907f, 0x4973c07f, 0xc9b71548, +0xde548177, 0xb608bfcc, 0x1d0787bd, 0xd37f793a, 0xfe847f7f, 0x7f3f4111, 0x3dd5de99, 0xbedb817f, +0x4c65c24d, 0x5d2c813e, 0xd901b37f, 0x974c5c92, 0x7f7f7f7f, 0x812b7f81, 0x0057836f, 0xf0778181, +0x2881f3c5, 0x35147581, 0x817fb31a, 0x7e7ae063, 0xfa1e4184, 0x7f1e787f, 0x529c5123, 0x553b9a81, +0x12de4220, 0x1fa92f8f, 0x4d66a52a, 0x554a2f40, 0xad3f637f, 0xbc81257f, 0x04f8976a, 0x2aa1fc7f, +0x3881b3cb, 0xcfc6ee25, 0xa37f5f60, 0xffddf08e, 0xce817f2a, 0x824f4685, 0x17b8a353, 0xa97f8a81, +0xfc81dc7f, 0x7f81a5b0, 0x26fab529, 0x811085c4, 0x031c5a76, 0x817d7f95, 0x1a8181cc, 0xe6aa0f7f, +0x1e454a7f, 0x1c98818c, 0x2b5ef528, 0x7f2b7f5a, 0x4ec7adbe, 0x6981f15d, 0x7f1d4b23, 0x0f337fb9, +0x1db1b85f, 0xdb7fbe3d, 0x1eb3aff5, 0xba2a7f96, 0xd6a0761a, 0xa97f117f, 0x341781f3, 0x9981817f, +0x56dd7f44, 0x431981b3, 0x115d7f7b, 0x7f5a818b, 0x943248aa, 0xb78a45db, 0xc2046981, 0x7a330a6f, +0xedb7817f, 0x81198df6, 0x29e37f9d, 0x74818143, 0xe5147f86, 0x7fac4869, 0x39e2bb4a, 0x422b983d, +0xdb949bec, 0x2c7f5181, 0xe7539781, 0x7f818ca1, 0x2e815f79, 0x2b199727, 0x28ea3181, 0xe65c7fa5, +0xfda2810a, 0x7b227ff5, 0xc259caa9, 0xdf9e7f4e, 0x1c7fbc49, 0xce892118, 0x083b81e0, 0x7081b06f, +0x0c7f507f, 0x637fae31, 0xf2967f40, 0x810181fa, 0x0878b37f, 0x98817f4d, 0xdaa946cb, 0xae813eb4, +0x67bb97b4, 0xe156b2a5, 0xc97f6dcc, 0xa90bd681, 0x14817f81, 0x7f816058, 0x1d587f76, 0xea425425, +0xd62e4781, 0x477fdca5, 0xcb667f7f, 0x8169daa2, 0x631cc881, 0x1381fb76, 0xf27f817f, 0xb297434b, +0x3b1d7bae, 0x89b8d27f, 0x947f8bbe, 0xbc81d770, 0xf94f0724, 0x3cdc817e, 0xf981817f, 0x1b75245a, +0xab20818c, 0xf2811c81, 0xfc78c27f, 0xb77f0881, 0x838132d7, 0x20818381, 0x127f815a, 0x8e9e267f, +0x1fa87fc5, 0x1da2bf47, 0x0f81939b, 0x118ba016, 0x52c07c6d, 0x697fdd00, 0x0e617f92, 0xab812b56, +0xce754988, 0x5e8181db, 0x22814b35, 0xfda981f5, 0xa981e85b, 0x477f93e0, 0x1e73d681, 0xdc748147, +0x01a1479b, 0x6d4e8164, 0xd37f8b7f, 0xbf6b7f6b, 0xc68110bc, 0x81819156, 0x257f497c, 0xb07fa0c3, +0xcac10d73, 0x39b9284d, 0xad817fd5, 0x7f8a5781, 0xe37f7f2d, 0x7f7fe7d7, 0x186d2c39, 0xc4813181, +0x0ac8a47f, 0x569d4cd2, 0x7f3d597f, 0xb87fbe59, 0x35117742, 0xc18c7fa7, 0xb03bd7ce, 0xd77f6829, +0xec3a7281, 0x81285968, 0xf8a69dd1, 0x81e38181, 0x4e1dbe77, 0x783d4e9e, 0x50bc7f85, 0x4181dd76, +0xa61a4bac, 0x627dd07f, 0xa08168a3, 0xfefdde94, 0xcdc19d05, 0x39817f7f, 0x18d78181, 0x631681cd, +0x91d2e1ac, 0xd2bfce81, 0x01f1817f, 0x810681d0, 0xccc18c5a, 0x7f0b617f, 0x563a8345, 0x917f26e1, +0x87147fad, 0x88161a8c, 0x1481928f, 0x5c96b841, 0x1b7ff206, 0xf1bf4759, 0xc6efdc5b, 0xa1078181, +0xf52b428b, 0x817f7f10, 0xdcd1c062, 0x0c863afa, 0xe26a27b1, 0xbd5a967f, 0xe8047fa4, 0x5be68181, +0xef7b2e81, 0x60f47ff1, 0xf9761c7f, 0x8f297a94, 0x2c81096a, 0x7fc40ec0, 0xbb7fb3b8, 0x4494cae8, +0x5847c916, 0x7d728183, 0xe74d7f81, 0x818d426b, 0x0006ba4d, 0xb4bd9ba6, 0xe00a813e, 0x68032681, +0x18877f81, 0xb46281d3, 0x3381113e, 0x6a7f57f7, 0xe2a7194d, 0xcb7f7fca, 0x62c66981, 0x5181e082, +0x6acdde81, 0xbcbbf27f, 0x3e3ea181, 0x81e6c17f, 0xf854097d, 0x307d58c8, 0x2e9e81a7, 0x93ce81ce, +0xcad4d5a2, 0x1272b081, 0xb84ccf3b, 0x007f9d7f, 0xe294efb0, 0x830c7fb7, 0xca07b17f, 0x3b9a88bc, +0x09350bd4, 0xae007f48, 0x3a6a8181, 0x38777f48, 0xce7f7f12, 0x9781570e, 0x3766817f, 0x4b607f7f, +0xe38c2477, 0x9cc64b35, 0x157f0742, 0x6881d981, 0x53c88c81, 0x9f8974ae, 0xa77f8319, 0xfb527fbc, +0xdf865f22, 0x407f8117, 0x5f5a4c24, 0x14207f77, 0xbfe8e081, 0x667f7f58, 0x31f17f37, 0x467f307f, +0xed7f7f48, 0x2bc96b81, 0x04816572, 0x8137a081, 0xea407b02, 0x7f06814a, 0x097c819a, 0x3ef22003, +0x077d1469, 0x32e06181, 0x1e7fa7de, 0xc7508658, 0x2d58ddad, 0x921d7e79, 0x3af381d7, 0x31b971a9, +0xa7b38de5, 0xf274be04, 0x9690d41d, 0x857f4dff, 0xcf7f5cb0, 0x8117bc7f, 0x1b7f4a91, 0x288181d6, +0x3f4d7b12, 0x7f7f979c, 0x4581b06d, 0x7faf7f81, 0xaf1dac81, 0x4d068bb7, 0x367f50bc, 0xdb7fbd41, +0x4f7f7f38, 0x81ac15f8, 0x2c3725a9, 0x98577f4e, 0xb20f8197, 0x6f816981, 0x26c87aa2, 0x1781c2a1, +0xf5449d24, 0x14cc2651, 0xe33d7f7a, 0x4d7f7ff2, 0x3554512e, 0x25c23b96, 0xde7f8d7f, 0x44bc34b3, +0x2a7f3050, 0x1aec3857, 0xada98181, 0x2644d0ee, 0xcbc2333e, 0x7f8e7f87, 0x4aa03181, 0x11b736b7, +0x3abb2381, 0x2f96817f, 0xe1476381, 0x83ca2b85, 0xfc3281ba, 0xa0d62222, 0xefbe354f, 0xa022be81, +0xd77fb435, 0x17538367, 0xee85d87f, 0xa4a66071, 0xc8402561, 0x818a277f, 0x9d4ddd7f, 0x23bf327f, +0x32ec1b81, 0x8c138d68, 0x237f1a15, 0x4a0e2b7f, 0x5b7f4981, 0xba747f29, 0xa1d37848, 0x0c7f03df, +0x923f1d7f, 0x8faa7c7b, 0x35068194, 0x60d2f465, 0x27606681, 0x7fc37f8a, 0x31bd3bd5, 0xcf58445f, +0xdcce241b, 0x88d9816c, 0xb9b2a7d2, 0x2205817f, 0x01ec81af, 0x4581c8a8, 0xdb0f1b6f, 0xca0628cd, +0xf63881c7, 0x7fa3697f, 0x5246567f, 0xb77f7fa1, 0x297f897f, 0x4cb8c84b, 0xce9e977f, 0xbf811e0a, +0x219d746e, 0x7adcfa81, 0xbcce1a61, 0x28987681, 0xfa7faeeb, 0xdb454581, 0xa5a5919d, 0xbf85a57f, +0x34b68581, 0x925c812d, 0x189a4c7f, 0x3ad03756, 0xf8c97f13, 0x97227f81, 0xfd7f070e, 0x7c7fd41e, +0xfe81b327, 0x81a6627f, 0x0016cf64, 0xded56665, 0xcc5fbd10, 0x81112f30, 0x4c6c877f, 0xefb9b53f, +0x33bc2181, 0x5e797cc7, 0x2a10c047, 0x917a7f81, 0x3a81817f, 0x8197ccbd, 0x73df4f32, 0x264f522b, +0xc1e9528f, 0xbb494681, 0xfbde6eab, 0x4f7f280b, 0x987a7f1d, 0x167f1f7f, 0x0b817f81, 0x8124a87f, +0x927f8181, 0x7f81ad86, 0xc779dc81, 0xd0f681fb, 0xcfd8fb81, 0x19cf4981, 0xb87f4bf5, 0x4893529b, +0x0040eb60, 0x7fc78bae, 0x474455dc, 0x0b7f7f7f, 0xa92f7f70, 0x7f3d9c73, 0x137fc699, 0xb4a3c6fa, +0xcb7f5c1d, 0x9c259c99, 0xdf5183d9, 0xa62181a9, 0x9a273761, 0xa5e8cd8b, 0x41b4bf1b, 0xa9818110, +0xb27f7f7f, 0x0e6bb7c4, 0x2e3e7f90, 0xa4c68172, 0xf73f7f7f, 0x81818164, 0x5299a157, 0x0a297f5c, +0x9a46f024, 0x8178d77f, 0x02818181, 0x8181025c, 0xd96828bb, 0x77644a05, 0xb77e4427, 0x407fa650, +0xab7f7f56, 0xaeeff9fe, 0x2f871d3b, 0xb8d7dd43, 0xe8817f7f, 0x7f4d0a27, 0x036a5b7f, 0xd4a3e67f, +0x9122dd23, 0x627fb09d, 0x055c6534, 0x0689a1b3, 0x4f484a7f, 0x7f77b1e5, 0xc47f9d06, 0xd381c37f, +0x3b819a7f, 0x4381827f, 0xed81817f, 0x95c3917f, 0x1b1e9ff3, 0x4c98cf7a, 0x81927f81, 0x703fd644, +0xed5db0d6, 0x23225674, 0xf95a8187, 0xe6ac7f32, 0x488150e5, 0xf6b5e281, 0x29810449, 0xfdada97f, +0xa6a19781, 0xfe36879a, 0xc96ac845, 0x3bf937bf, 0xa2812172, 0x84203765, 0x5f9e3403, 0x017f6a34, +0x2cc6607f, 0x99877f7b, 0x2ad981d2, 0x81933581, 0xdf81338a, 0xd478056f, 0xfa7fa571, 0x1d818352, +0x17c9db81, 0x9c7f7f52, 0xdf9bd4b1, 0x417f98a5, 0xca5517df, 0xc44b22d0, 0x58727f9e, 0x28817fd5, +0x477f2fb3, 0x8b3f7f59, 0x0940765f, 0xa970b281, 0x0a0a7828, 0x81319e7f, 0x2dc34181, 0x4081668d, +0xe7817fcb, 0x3641753a, 0xc17f3d81, 0xb88154b1, 0x73246eb5, 0xb0db8186, 0x09a2ec6a, 0xd82e2c71, +0x336f7f81, 0xd6817f4e, 0x43814a81, 0xc579811f, 0xc0812803, 0x447fa1e0, 0x81b01b7f, 0x752152b3, +0x4f15817f, 0xf97f61c0, 0xc99bd9b4, 0x2281307f, 0x607f8181, 0x7342b07f, 0x06bd9181, 0xb8b8bac5, +0x3b3ca425, 0x8175e89b, 0xa50d7f4a, 0xbb56b781, 0x817fca6c, 0xac0fa7e8, 0x8e7894b7, 0x1d05cf50, +0xf34f5b7e, 0xb65332e5, 0xc76e9375, 0x66d058a1, 0xe5a09f7f, 0xe493eaa5, 0x05fb81b5, 0x33817f14, +0xee418105, 0x81a16f2c, 0x367fa849, 0x7aedbecc, 0x100f81c4, 0x81b2fb3c, 0xcc7fb181, 0x1b3ff096, +0xd3b03a81, 0x4b815f75, 0x497f9c7f, 0x81015625, 0xa0817f81, 0xb0b8167f, 0x7eb7a3fe, 0xe97f6585, +0x555a877f, 0x4b7f8181, 0x12a95e4a, 0xcb6f5afe, 0xd87b5fad, 0xbe655fa0, 0x8b81300b, 0xccb26381, +0x39868173, 0xd6817fa0, 0xcf684e81, 0x7f3bae43, 0xfd8c20a8, 0xcfa0a31f, 0xc0307f81, 0x4e7f5d84, +0xeb941e81, 0x6f77d070, 0xfa0b7f7f, 0x3cb88193, 0xf6818163, 0x3d5f8fbf, 0x40c4c73b, 0x4b7f4e4b, +0xae968181, 0x818181b2, 0x81148181, 0x6b325381, 0xeae5582e, 0x7630307f, 0x1c0f6ba9, 0xa54a0457, +0xd37fc693, 0x2345817f, 0xe44f1e7f, 0x3e4cc9cc, 0x3081babf, 0x817f45c7, 0x5265e57f, 0x427f81ca, +0xe451817f, 0x8190b7de, 0xf0718981, 0x7f9ad381, 0xb9645da5, 0x7f7bed3e, 0x5ce37af9, 0x307f4ecb, +0x0a7f8135, 0x81ffa7a0, 0x534fa1a5, 0x7f42d641, 0x0e501e61, 0x7f7f6edc, 0xc859c081, 0x377f85f7, +0x0df9ab94, 0x817f7f7f, 0xfe7f7f81, 0xc1327ff4, 0xe84c842c, 0x7f1e5bc4, 0x1da77cf1, 0x1e817f53, +0x5f9ab07f, 0x86209e4a, 0x204e81cb, 0x33c9928d, 0x275a2d8d, 0x7f810281, 0xfe7ff7ba, 0xfab77fea, +0x658145e5, 0x73d57fd6, 0x01ec097f, 0x6d83207f, 0x49810cb5, 0xb5d9ce39, 0xf9e0436a, 0x629c81ed, +0x48cf2c8c, 0x7fe37f7d, 0x5b257d81, 0x765f50b6, 0x2cc98403, 0x817fbebf, 0x127114c7, 0x347f287f, +0xd2d72990, 0x81099d9a, 0x8143cac5, 0x8c452fc2, 0xd27f8f6e, 0x61957f25, 0xbc6fc54d, 0xce81f31f, +0xd381787f, 0xac0e5156, 0xf081afc4, 0xb47f6f2f, 0xef811a81, 0x015ee99c, 0xd46de9d9, 0x30655a6d, +0x09b2b87e, 0xaf13753a, 0xc6dd2a7d, 0xb9bd8190, 0x81537f7f, 0x669ff8ed, 0x5b810c81, 0x42e5a566, +0x0c4174cd, 0xbdf9b7bc, 0x1c118e65, 0xe28f9b2a, 0x8c207f3a, 0x1e7f7ff4, 0xd6b57f1c, 0xf0131edf, +0x38c9a381, 0x7f40817f, 0xb735817d, 0xa1a864de, 0xd99cbf57, 0x811a81f5, 0xb381e381, 0xe88e7fb6, +0xfb4a5b89, 0x7fc006a3, 0x1c81653d, 0xbceaa09c, 0x8d7799d7, 0x4b407a81, 0xdc81bca1, 0x3437d76c, +0xd79fe5c9, 0xd9adeea8, 0x163e8164, 0x2cc1b281, 0x3e81e9f1, 0x936504c9, 0x047f6f7f, 0xf981a776, +0x203b2d68, 0x81dde8d1, 0x13e9e9bd, 0xe04108a3, 0x50304281, 0x486d7fb7, 0xbbff81ce, 0x31d43aa3, +0x21813c7f, 0x81c8f989, 0x7f481024, 0x0a979b6a, 0x010081d7, 0x7792e641, 0x0654d37f, 0xa53da946, +0x96c781d2, 0x41524ca6, 0x2ab2d581, 0x1c65bb79, 0xaaa494af, 0x81e09681, 0x3d7facf5, 0x04c3818d, +0x2c1461c7, 0x788581d9, 0x1d3f2081, 0x49dfaec2, 0xfdf07f61, 0x40396d32, 0xc8835fbf, 0x7fb19f54, +0x2fcc8142, 0xe7dcb4e1, 0x73c3a807, 0x7fc68137, 0x061e81ac, 0x81c70c7f, 0x1d817f73, 0xfdbace27, +0xa5fa4081, 0x9b5575ca, 0xee81cc4b, 0xe85a7f53, 0x8daff2e0, 0xd1d1067b, 0x98ec3662, 0xa5818253, +0xf8266d26, 0x38da26ac, 0x3f98912e, 0x4a10f144, 0xd845a345, 0x811bd786, 0x4b137f7f, 0x3427fa81, +0xe981eb9f, 0x8f4ce6e7, 0x277f8181, 0xb0a5b03b, 0xd0633788, 0x4781b17f, 0xb27f3ee7, 0xd37f30b6, +0x21c3a15f, 0x0492a4d0, 0x097a6c7f, 0x047f1c81, 0x994b7f81, 0x818ea981, 0xd46d7fca, 0x2b8a5381, +0x9d424d91, 0x814d7f81, 0x32aa367f, 0x28ff1b73, 0xbea1dbde, 0x76db7f86, 0x248a8d37, 0x294b8136, +0x6e8142d6, 0xec65687f, 0x66297f57, 0xdfb563b3, 0xd352ee79, 0x0689d45c, 0xc58123cb, 0x0b62bf8c, +0x53c6d80b, 0x7fbc9773, 0xe4596e21, 0x2584812f, 0x1793b781, 0xaa815927, 0xb8659035, 0xccbe2281, +0x5eabb4c9, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0xb339946b, 0xd7497238, 0xc36e8181, 0xd0816d3e, 0x7f7f2681, 0x6709bf21, 0x92e3bf81, 0xe54358cc, +0x6bb5a661, 0x3a7f81f9, 0x8140f0fb, 0xc5a67f1c, 0x8d3381a5, 0x817f81c4, 0x72817fd5, 0xa72c76fd, +0x68dc8b11, 0x9b5f4098, 0xc0f43965, 0x7f0ecf02, 0x7cfdd720, 0xe1a0ac15, 0x087fc3c6, 0x81b66e6a, +0x410a893b, 0x81c491f4, 0x81d87033, 0x2056562a, 0x7f526cd8, 0x817fe90f, 0x81f78178, 0x81d93134, +0x3699e0a2, 0x7fa181c9, 0x81f64081, 0x8f816b2e, 0x816ece59, 0xd548fd2c, 0xc4ee3919, 0x6d847f16, +0x81e423c5, 0xfade4a40, 0x9e29eeb9, 0x7fe450f5, 0x5207177f, 0xf17f710b, 0x8cf291ad, 0xd09f9cf0, +0x7f022d20, 0xb7ea81c6, 0xbb89cfa5, 0x1c6fd006, 0x7f8481dd, 0x81020914, 0xb14c81e9, 0x4d9b4f08, +0x7f6064c8, 0x817f7fcc, 0x6b9819e9, 0xb253e7df, 0x724368fa, 0x816fe408, 0x7481a204, 0x0e6fec58, +0x70449617, 0xf5a1da3f, 0x7f446e7f, 0x1005abce, 0x38664981, 0x8240472c, 0x10f52409, 0x818186dc, +0xf7810be2, 0x9c2ad304, 0x81817f62, 0xcc40a43d, 0x5e6b81e7, 0x7ff0e2fb, 0x812a7f7f, 0xe3017f51, +0x817f24dc, 0x85c49997, 0x863c7024, 0x7fc0ce3b, 0x637f8c81, 0x814c9dbe, 0x8ea08156, 0x2cf47f9f, +0x5b55a9b1, 0x107d7546, 0xcd0381b9, 0xd8617f42, 0x7f81b8f1, 0xb506abd3, 0x745f2b2b, 0x7f81b7d5, +0xbf7fdb20, 0x1ca3a610, 0x7fe5e9af, 0x20b37f08, 0x810e6f81, 0x9b7fe6ad, 0x1aa35d81, 0x46032cc7, +0xad7f7f77, 0x8bd48245, 0x3dffaa81, 0x507f91d9, 0x907f9a25, 0x404b81b7, 0xc67f7f97, 0xb1eae864, +0xed2a18c5, 0x2ea85ef3, 0x8153434e, 0x816daaf3, 0x7f03ad67, 0xa0ec264e, 0x640ca1e5, 0xf0818f2b, +0xcbd27ff7, 0xa6a39206, 0x4ca811b9, 0x4c57a068, 0x8647e4c9, 0x7f438ad2, 0x7f945029, 0xc168e72c, +0x7fed4781, 0x81f706cd, 0x62810667, 0x177f7f32, 0xb105d09c, 0x81fa7f16, 0x7f11fa53, 0xc8a6430b, +0xececbb81, 0xe82644a6, 0x3e82f381, 0x28817feb, 0x90c18135, 0xd0f5810f, 0x507f0ba3, 0xfeb38808, +0x8e6c66ea, 0x70a0c4c8, 0x8146912c, 0xb8e01cb1, 0x2b817f51, 0x64818207, 0x9f3a4e81, 0x7f4aece1, +0x84aeafb6, 0xa17f8188, 0x9a7fe759, 0x1cbf5681, 0xad7f6bad, 0x818eebdf, 0x438181e9, 0xa7d07f22, +0x75dbd318, 0x81817f3f, 0x5457bd92, 0x4b204ffc, 0x7f1c637f, 0x8b4c7ffe, 0xa4d97fe8, 0x6194ca37, +0xb06c7f06, 0xad2f1ac0, 0xaef07f7f, 0x15818101, 0x22bf8d7f, 0x597f35cd, 0xb155ab0c, 0xc2838b0f, +0x0f81f4e6, 0x181f7f81, 0x31e1817f, 0x6a2846c0, 0xede8048e, 0x2952c3e3, 0x09187fda, 0xce411cce, +0xbbc197b5, 0x8181464a, 0x161922d0, 0xc37f292f, 0xbd736439, 0xc881b6fb, 0x81ba557f, 0x9da521f0, +0x3e7f52dc, 0x3b3a813c, 0x7f544bc3, 0x46b1b3fc, 0xbeb18e34, 0x418156e1, 0xab22e318, 0x7fbe66e3, +0x2237bfb9, 0x010081be, 0xc159b8df, 0xf449b264, 0x017f9e82, 0x9dbd7ff1, 0x09c7cbd7, 0x81bc537f, +0x817fe1c7, 0x7f177f60, 0x7474bcdf, 0x3c7f6f70, 0x7f7f987f, 0x81812ecc, 0xca783f94, 0x64cc4da9, +0xa0ba1d0a, 0x7aeec524, 0x43bb6b81, 0x1a338981, 0x206be419, 0x65819d5a, 0x7f81d73f, 0x81a67f36, +0x22810442, 0x7fd4cef9, 0x6067ad85, 0xd6d17f30, 0xea7f8166, 0x457f7f84, 0x817f1bc8, 0x6b86be00, +0x7f8bc555, 0x519d5bfa, 0xd07f97e4, 0x505c81d4, 0xf1eb8115, 0x72bdd002, 0x9da57f77, 0x817f12cc, +0x81c03652, 0x441872fb, 0x554bdf67, 0x2e46f0d1, 0x81817f8d, 0x7ff5ce55, 0xd2ded97f, 0x817f67af, +0x0539c336, 0xa37f2f7f, 0x8150637f, 0x8156d068, 0x827fbda5, 0x9f176b2a, 0x81878c0d, 0x81117f07, +0xc0d15559, 0x869c81c4, 0x7fa498da, 0x46d5d102, 0x81f7a581, 0x816eca47, 0x67ad9d95, 0xccb49c2e, +0x297c5440, 0x817f7cda, 0x9f55bd08, 0x2b7fc490, 0x817f817f, 0x377a8158, 0x91b5bf7f, 0xc87f4f21, +0x977fc1d9, 0x7f3088f4, 0xe938b6ca, 0x41707434, 0x81a27fd5, 0xcf818116, 0x817f887f, 0x09cf3bc9, +0x814edf19, 0x46206509, 0x34a36c81, 0x313fc7da, 0x92f34a81, 0x21e942d8, 0x365c16d1, 0x68907ff1, +0xcb8190e3, 0x5e7f7fa3, 0x6fb78c98, 0xa1444250, 0x812bb37f, 0x83a26f0d, 0x739bab07, 0xcb2cea1d, +0x668174ef, 0x288b8965, 0x95849a7f, 0xdc0981b0, 0xd875297f, 0x8153d5b0, 0x9d81e6dd, 0xae81d259, +0xd981813d, 0x7f64ed35, 0x1f459b0f, 0xb8ebe4e5, 0x72812e81, 0x81909b54, 0x02f54fb3, 0xa87281f4, +0x7f6381cc, 0x7f3cab5b, 0xb1958152, 0x814b9bfa, 0x81312b5e, 0xc2977f23, 0x347fbb97, 0x3b057fe6, +0x40208144, 0x7f817fad, 0x54cb9deb, 0x37b34a28, 0x81781b81, 0x7f1b8130, 0x81e194f1, 0x882b7f1f, +0x81dfc90a, 0xe4a97fa6, 0x9e81ab40, 0x971c2535, 0x5a7f7f3d, 0x47708116, 0xab778160, 0x81ac57e3, +0xc2854dec, 0x81ea81a1, 0x8b347f7f, 0xe981d130, 0x93c70917, 0x817f812d, 0x81925016, 0x7f813953, +0x52a5a5fc, 0x7a33d8e1, 0xcbd80281, 0x79df8101, 0x797afc87, 0x7581bbf0, 0x81567f94, 0x472ad4cd, +0x23e49434, 0x4c7ffc03, 0x41b0717f, 0xcc67b7bc, 0x7f7f48bd, 0x8c1c81b9, 0x3a460d72, 0x9eb5fa34, +0x03530615, 0x6c7f7754, 0x818167d3, 0x7fc22100, 0x7f3da881, 0xa24e7f61, 0x321bb411, 0xd981c827, +0x76e8814a, 0x69ca13fb, 0x1390b881, 0xb1d08157, 0x7f4669a2, 0x7f237f13, 0x30268178, 0x0eb87f34, +0xee1aaa30, 0x816596e1, 0x31813181, 0x88888243, 0x817de70e, 0x84811713, 0xd74d7f52, 0x06b17f35, +0xed347f1e, 0x66d66058, 0x7f817fee, 0x035edde8, 0x819b18a6, 0x8e8198ee, 0x817fb5f5, 0x311e442b, +0x957f941d, 0x4e6aa649, 0x7f677f41, 0x8d876acb, 0x7e7bb58f, 0x44c27fce, 0x6f39ec5e, 0x7f7f81d2, +0xd43ba60f, 0x488150c8, 0x8181e9e8, 0xfe46ec02, 0xa2cc7fd8, 0x7f810781, 0x7f30c356, 0x22c97fc5, +0xbcc97f40, 0xf2b2ce1e, 0x7b59537f, 0x46d994ea, 0x4ae2273a, 0x7f36d92d, 0x818ad281, 0xbf7f2b7f, +0x8181abf8, 0x924f4eb0, 0xa62c81b6, 0x812b99aa, 0x447f817f, 0x2c7f5746, 0x0859b34d, 0x83e652db, +0x7fc781ca, 0x819e5cfe, 0x55c5d07f, 0xb4bb3dff, 0xc24b8178, 0x93d28123, 0x15e63a7f, 0x7fc87f40, +0x812fd94c, 0x991b8170, 0xb13593c6, 0x3b9a5119, 0x3b752581, 0x20b89686, 0xcac17f7f, 0x81237f6f, +0x99ec643e, 0x02813100, 0x81124724, 0x3455436b, 0x8155cc81, 0x68136028, 0x81517f5d, 0xc57481f6, +0xe4c88159, 0x81fba227, 0xaa814381, 0xb3818ebc, 0x457f8b51, 0x118181a1, 0x575b7fe7, 0x53571239, +0xa7d35d43, 0x4b577f89, 0x1f817fc2, 0x6c7f9609, 0x9714813b, 0x81d723b6, 0x6c7f377f, 0xa3cf6826, +0x08b27f45, 0xbf7f8184, 0x814de5a0, 0x8181a2fd, 0x167f3c70, 0x8a7fa9e4, 0x65ac4b7f, 0x7fd367c5, +0x81112d7f, 0x8170811d, 0x817f589e, 0x8b7f7f44, 0xba2ab081, 0x527f5bf6, 0x2faad6e3, 0xc335a82e, +0xb862c003, 0x81715776, 0x5081817f, 0xdd7585c0, 0xb7989d7c, 0x367f81b1, 0xa0818129, 0xc6f37f55, +0x5006be1d, 0x817faac3, 0x7e667f81, 0xaf2c7f99, 0x347f897f, 0x2bb1aac8, 0x8112eee8, 0x7f5c62de, +0x007f14d1, 0xa1a2140b, 0x95446419, 0x5e7fc82a, 0x7a7ff07f, 0x81fe7d31, 0x010a8149, 0x7f3e7fb6, +0x397f81ae, 0x7b7f7fba, 0x797f5981, 0x7f7f170a, 0xb9cf087f, 0x0be6cb28, 0xdd97cf4e, 0x92520625, +0x817f3ae4, 0x6c81c381, 0x440cc5ae, 0x2b7fb8b7, 0xe94a2081, 0x81967f4a, 0x7f817f9d, 0x7f81a7a1, +0x815b52cd, 0xd59d57f6, 0x899e827f, 0x7f7f9aa0, 0x52819e7f, 0xda3a2223, 0x8b81073c, 0x818132d0, +0x55638800, 0x9804bcb5, 0xc3810a7f, 0x6ddab6e3, 0x7fcae34b, 0x7f814a2d, 0xea7fed91, 0x3b9bb0d9, +0x87d97f1e, 0x16535739, 0xcc2f7f81, 0x7f98813a, 0x5873bbe6, 0x241781f3, 0xbcc7cb33, 0x7ff69847, +0x8d11c0dd, 0x9ac9d787, 0x66cff992, 0x7f7fc312, 0xaa4fe681, 0x39967f26, 0x201c7f7f, 0x9a7f4849, +0x5768a814, 0x814d731b, 0x81917f81, 0x6eea7d7f, 0xad7f65fe, 0x3d3e81d3, 0xae672981, 0x7f1c81a0, +0x9881053a, 0x81a16307, 0xf64c4063, 0xbc7b97d9, 0xa29dc0a1, 0x8161189c, 0x7478b3db, 0x87b330e1, +0x817f0598, 0xcf422b0b, 0xc29b7ac8, 0xc2817faf, 0x8185581c, 0x81d0b675, 0xc93f7fa9, 0x814e3a02, +0x815d6511, 0x5d81075f, 0xd768c13e, 0x89815437, 0xac810d85, 0x1a7fa5e9, 0x14a6bb81, 0x54531dfd, +0x81dafd24, 0x978133dd, 0x818130be, 0xb67f5bb3, 0x2c7f4179, 0x81b281f4, 0xf3a463fb, 0xda36c4d4, +0xb17f811d, 0x37ca75f1, 0x53257f7f, 0x3ef0208b, 0xdc71817f, 0x978181ef, 0x7f81d17f, 0x1197810f, +0x277f7f47, 0x7ff6b1b0, 0x4562fca6, 0x725d89e3, 0x888197cf, 0x05477fd3, 0x4988077f, 0x81cbb5d9, +0x047fcaa8, 0xdb7f7fc9, 0x81ccfe5f, 0xd91643ff, 0x7f3f7f7f, 0x03815ed4, 0x697c7f81, 0x815b814d, +0x287f0d32, 0x7881a041, 0x247f95f4, 0xc87048e8, 0x3d815e7f, 0x71ba7f0b, 0x50815075, 0x7e53dc30, +0x3f7755fe, 0x1d032571, 0x818cbaf0, 0xbda97f50, 0x7ffc0aad, 0xbf457f42, 0x66b48f81, 0x072c3f1c, +0x433d9cfc, 0x71077f76, 0xac2d5564, 0xdc523811, 0x7f54a538, 0x7f707f00, 0x8c10a6f1, 0xf4a581d3, +0xae875d1d, 0xd37f9747, 0x8124b869, 0x81a17745, 0x61bb8b7f, 0x227f8129, 0xa77fc381, 0x997f8119, +0x817f812b, 0x81a78836, 0xdd557fc6, 0x907f6418, 0xbfa9accd, 0x4f815762, 0x97818181, 0xa0524fa6, +0x815b5721, 0xc77f7f7f, 0x7f725efb, 0x437f614e, 0x81f15c6f, 0x7fb77f28, 0x473f7a1c, 0x7f81d71b, +0x7f5133e9, 0x7cd17f32, 0xfb7572e3, 0x16c35e62, 0xc961a649, 0xdec47c37, 0x6d442844, 0x386b4616, +0x9583c437, 0x81d57fae, 0x8114357f, 0x818f9db4, 0x7ff181d7, 0x773fb601, 0x4032fb29, 0x8613b71f, +0x7f81b3f4, 0x1256f605, 0x297f817f, 0x9a7f2996, 0x5d81b87f, 0xc8907ffd, 0x817f7f35, 0xf6719ac7, +0x7b7f523d, 0x18d8ca2e, 0x2681b97f, 0xc02a810d, 0x34cda181, 0x32e081bc, 0x016f7f08, 0x461bdaec, +0x04330900, 0x633f61ac, 0xa3815204, 0xa534dda9, 0x7f81814f, 0x91a66329, 0x7f51aacd, 0x7ff681a7, +0xabe476e8, 0xb57f9cae, 0x177f27c0, 0x7f2f8b0a, 0x1281a9ba, 0xd991af49, 0xea527f82, 0x9be09d46, +0x28b2a2bc, 0x81c8a1d6, 0x5e1f4e71, 0x817f812e, 0x28b981cc, 0xb071cf04, 0x82cb8139, 0x81608181, +0x819381ae, 0x818f90c8, 0x817f64c6, 0xd169c7c9, 0x8f548381, 0x87f7b03e, 0x6657817f, 0x81be8102, +0x0d078c5f, 0x7ff97fd4, 0x7f814981, 0xb3bb7faf, 0x37c3dd6b, 0x4c6930b7, 0xb56ba479, 0xfc82520f, +0x3494f934, 0xa2016c53, 0xc513eeba, 0x8c283bcf, 0x7fa6047f, 0x54319236, 0x42b2c19c, 0xf965c947, +0x60777fca, 0x02767f47, 0x4921bb04, 0x81818fa9, 0x8eab7f5d, 0x81f4aa14, 0x1abdf97f, 0xbdaa220d, +0xac368139, 0xddb1c212, 0x81d9fd99, 0x90b77f3f, 0x029ec8ca, 0x7f93d9db, 0x8563627f, 0xc5b72d52, +0x2e6eacc0, 0x7f66815b, 0x50db375b, 0x4529813f, 0x7f8d817f, 0x44486f52, 0xbcd64c9a, 0x7fd2bf33, +0x43507f58, 0x7281a4d3, 0x7ca82149, 0x29817fd0, 0x653544b9, 0x88c681a1, 0xd93c2bfe, 0x530835cf, +0x5be26556, 0x7fb37fd1, 0x81e68174, 0xe88c0c42, 0xb63b817f, 0x394a81c9, 0x407f8107, 0x747f1f52, +0x91816dcf, 0x5f818161, 0x8514aebe, 0x7f496bac, 0x5e818181, 0xe8e358ee, 0xae1a7f0b, 0xa4c97f05, +0x77a77f52, 0x818160fd, 0x6bab7f81, 0xcec7e121, 0xf4810196, 0x4a7f68e3, 0xe56d5a7d, 0x813d2c6b, +0x7d0b7ff5, 0x7bbd6760, 0x67812b81, 0x188197d7, 0x8146057f, 0x811eff1d, 0x818ecc4d, 0x4a786245, +0x72593331, 0x2e81556a, 0x66aad481, 0x94818128, 0xde89ba81, 0x4df07227, 0x7f73ff8f, 0x7f44b981, +0x9bf9e707, 0x46811bff, 0x85a96a36, 0xc13b7f15, 0x6670ba65, 0x81fcefee, 0xae8187b9, 0x73c3baa9, +0x819e8123, 0x9cfbc6aa, 0x7f89c73a, 0x7fde9812, 0x97136581, 0xc38181e6, 0x81de9c0c, 0x4994ab0e, +0x30977f06, 0x59dda72a, 0xc9f87fd1, 0x8186b4d7, 0xba86de81, 0x98a24c3b, 0x12157f3f, 0xd7674b1d, +0x81816042, 0x8da8926e, 0x397f81f6, 0x3dd281b1, 0x7f6570ba, 0x7f875412, 0xd74f5be9, 0xab81f207, +0x7feaa863, 0xab81da0d, 0x7ae79023, 0x357f8111, 0x654a468e, 0x521c2306, 0x2f817f37, 0xdb818a41, +0x50b5b30c, 0x71817fdc, 0x7f7f7f04, 0xb13471fa, 0xd1843f7f, 0xb700ece6, 0x109064ea, 0x989b4b03, +0x27556fd8, 0x5ecee700, 0x5f5a2281, 0xa81c81dd, 0xac54b6bb, 0xe60f1ae8, 0x139dc4dc, 0x22818109, +0x2c7fc74f, 0xa69cf331, 0x9d272281, 0x7f9a6da8, 0xb1bb8f45, 0x81815a6e, 0x8315517b, 0x638181f0, +0x273db266, 0x90777f18, 0x047d763a, 0xbdf50a4e, 0x7f5f5c12, 0x817f7f53, 0xcb177385, 0xb2ca7f22, +0x818eaa17, 0x815d4c81, 0xa67fc281, 0x7fd856eb, 0x01becb84, 0xa8e2685d, 0x7fadbe7f, 0x1b7f6a05, +0xef47f495, 0x817f1b1b, 0x8c8d0b81, 0x57b9a5d4, 0xdc298159, 0x5e81a7e2, 0x81467f5b, 0x370f6df0, +0xc49c30a1, 0x7a545710, 0x974b81ff, 0x458c814a, 0x237f17dd, 0x427fb6da, 0x1a7fe5de, 0x817f8121, +0x817f1c96, 0x9c817f90, 0x0e7fd365, 0x818aa15b, 0x7fd302f2, 0xdb7c138e, 0x7fe68181, 0xbf818112, +0x7f2e7fc8, 0x81728101, 0x6482d830, 0x59e24b0b, 0x9b67c284, 0x6e811018, 0x81a42c32, 0xd87a4da2, +0x817f2d41, 0x3e2fbce2, 0xd1d44623, 0x7464b44c, 0xcc255f95, 0x56c93b38, 0xa87f1491, 0x04819c5c, +0x7f457fd8, 0x8d6bb866, 0x819c8181, 0xbc118166, 0x3d4c2a5f, 0x7a6d51fe, 0x79065067, 0x8181d1fb, +0xff6f2b08, 0xf5be7f35, 0x9fa567f5, 0xabb581b4, 0x81819a5e, 0x7f8187c5, 0xaa7f5669, 0xdf526044, +0x7f6ea5f5, 0x7f3a68c7, 0x4feb7fb5, 0x627f2850, 0x39f49181, 0x71ec22ea, 0xc17f4c44, 0x56f4db23, +0x7f08cc95, 0x977fde95, 0x7f4f7f81, 0x81e07f1c, 0x5e7c7fe9, 0x1c3534ce, 0x7f907f2b, 0x7f83418b, +0xeb81daec, 0xa6fc892a, 0x757f7ffb, 0x6281cc2c, 0xb826cecb, 0xc7810a2e, 0x60d1906e, 0x7f8175ed, +0xd085cb36, 0x88c4811a, 0x5e3e707f, 0xaba57fff, 0x099c127a, 0xbac67fb7, 0xa9817d82, 0x4f6c7faf, +0x43d17a0b, 0x92a13dc8, 0x81995d8d, 0xab7f90fd, 0x5f1f8189, 0x767f8101, 0xb7caa2bc, 0x7f8177cd, +0xe0655857, 0x81cfebeb, 0x81af6fc0, 0x57ac6dae, 0x437f8176, 0x513c60fb, 0x9ef3c081, 0x78d0d12e, +0xa40c7f0e, 0x438101c0, 0x81816405, 0x6f527f04, 0x8d0fa47f, 0x6d7fbb7f, 0x39187f74, 0x3c7f5f06, +0x677f7fdc, 0x8114250d, 0xe08373c7, 0x9c7f7dd8, 0x816341a3, 0xdf7f073e, 0x7f818174, 0xca84e8ff, +0xd6ff70ef, 0x7fa1a3c7, 0x7f7f8148, 0x81de7fa2, 0x4d323671, 0x5aaafbff, 0x05ac7f7f, 0xd58102e8, +0xad6c45e4, 0x81677f27, 0x818132ce, 0x7fa55a1c, 0x553029ce, 0x81a47f06, 0x81ca6189, 0x5cd581af, +0x70f34733, 0x676d431f, 0x81387f66, 0xef916df0, 0x72099950, 0x867f81fe, 0xaa7f657f, 0xd7521bef, +0x81d573c6, 0x7f76bcd6, 0xaba4c10b, 0xe394b7d7, 0x82579781, 0x88f5715a, 0xf7815cda, 0x65817727, +0xeab07f22, 0xbb5f7c40, 0xc0a48139, 0x95442a2d, 0x857f7d81, 0xcd7a81ee, 0x7971819e, 0x3a31f0fe, +0x537f6181, 0xc307bed6, 0x8936627f, 0x98b18136, 0xd07c817f, 0x7f7f8126, 0x133a607f, 0x98d40015, +0x368a8123, 0xe181501d, 0xc9817681, 0xa27b2e00, 0x47dd81e1, 0x0e7f81d3, 0xf3287fce, 0x7f3400c9, +0x8d65c745, 0x4c8148c4, 0x21817f7f, 0x81ac0af0, 0x4462c1fa, 0xa99e4e8e, 0x7f153ad8, 0x8168bb47, +0x8370e19d, 0x7f3281ca, 0xf2437283, 0x667f279d, 0x9cb4c17f, 0xde814315, 0x72d3a17f, 0xac113b66, +0x6c13abef, 0x7f816cb5, 0xc8c7c37f, 0xa7d1862e, 0x8b7fb87f, 0x9dd9a384, 0xb69b22f0, 0x157f710a, +0x9007c13f, 0x4518caf0, 0x836c4246, 0x03f981d1, 0x817f7f7f, 0x4581b92f, 0xb4718193, 0xa8815e05, +0x437f81fe, 0x52817fdd, 0xb1477f54, 0xc12e3cba, 0x7f81815f, 0x4e737f32, 0x817fc141, 0xa67f7f58, +0xd19dab37, 0x8b9eb14b, 0x7fe8be30, 0x905d7fba, 0x16ae520b, 0x81015d10, 0x71812450, 0x1de37f1b, +0x811fa6af, 0xdf77b8e9, 0x647ff1bf, 0x17be7fbb, 0x817f6224, 0x8115e99f, 0x7fca7f7f, 0xd48f0fa2, +0xda7fc4ac, 0x10649756, 0x2f89af26, 0xdb5f3ec4, 0x817fde7f, 0xbde94df1, 0x9a707fb7, 0x7f7a656b, +0xfad88c3f, 0x7ccc7f5c, 0x7f8cc477, 0x7fc26452, 0x28c7812f, 0x85b7a1ab, 0x818181ad, 0x404ce719, +0x540fe3e5, 0x705bf0fd, 0x6206a54a, 0x89486861, 0x75e25f81, 0x3c9bc93d, 0x3457c36d, 0x57ab2f25, +0x3cd9bd13, 0x81769cbe, 0x87557fae, 0x025c7109, 0x05457f64, 0x5f427f13, 0x5c7f26be, 0xb9818130, +0x34e63305, 0x8a81ebcd, 0x673326ba, 0x05647ff7, 0x818b8181, 0x8c81782a, 0x4bc0b30f, 0x759749c7, +0x399e99f6, 0x7fdb4313, 0x9c7f5cae, 0x6cc2a611, 0x811dab81, 0x7f7f413b, 0x4fb37081, 0x818c8164, +0xac4c01d8, 0x1f63813e, 0x81db7881, 0x8194404b, 0xcc9bc55f, 0x542900eb, 0x87ff34c5, 0x507f7fa2, +0x7fd97f03, 0x79e428e3, 0x2f7f7c6c, 0x57813f13, 0x7f7f817a, 0x61c40926, 0x68a9457f, 0x63ea9f2f, +0xea81a8ec, 0xfd81301c, 0x81cd8181, 0x812cf7fc, 0xc996b697, 0x81895df8, 0x8d4ae3ca, 0x4c5de834, +0x81643d19, 0x202babce, 0x55a89081, 0x64ba81f9, 0x7481db7f, 0xa0eba6bf, 0x81c12ee3, 0x8181a75d, +0xc8e9939c, 0x0981295a, 0xf22e527f, 0x817f3314, 0xc4f17f5e, 0x8c83651a, 0x7f74a17d, 0xbdd28405, +0xb6bc4795, 0x388c7cdd, 0x957f7f7f, 0x1db7ec29, 0x68f59c45, 0x8142eaa1, 0x81f0b543, 0xa48163fe, +0x81d08134, 0x7d673f5d, 0x8ecdcf94, 0x6e56195a, 0x7f222687, 0x7fd8fb10, 0x81aa977b, 0x22014258, +0xd541be81, 0x817f62fb, 0x707fea79, 0x7f817f60, 0x4b7f3c7f, 0x60a4032c, 0x1c68c2ae, 0x7fc27f19, +0xd5a55cd4, 0x09a651f4, 0xb5447f7f, 0x7f815bfe, 0xe681d381, 0x1781f900, 0x8eb6b0d7, 0x1d90fdf8, +0x528147c4, 0x816b7f46, 0x7f6381c3, 0xd5d2088b, 0xcce4ae52, 0xdf4ec304, 0x967e5e1f, 0x567f1439, +0x317f810a, 0x815f814b, 0xe13ee67f, 0x7f8177a3, 0xac69c42a, 0x7f5837e7, 0x61b481c8, 0x777f812f, +0xcea38116, 0x6b6b7f37, 0x8feac838, 0xf9457f3c, 0xe5107f74, 0xcc957f56, 0xa3ab7f81, 0x81817f6e, +0xc5817f2b, 0x197d0d99, 0x7ad0c84a, 0x81ada865, 0x81b31c81, 0xee7f9b54, 0x815a3406, 0x3f617fce, +0x3d4db8c2, 0x5ac39e72, 0xd390812f, 0x27c6ede7, 0x6f69d981, 0x2981f141, 0x7faa2681, 0x787f6668, +0xc24ec00e, 0x7f008142, 0xfa811aad, 0x2d567fb0, 0x5adb7f65, 0x8a864fc5, 0x6d817f85, 0x307f7f1f, +0x5c817f47, 0xcc7f43b6, 0x9bb08159, 0x496f7f2f, 0x41e383c2, 0x4e7f7f88, 0x71ce63e4, 0xe07f4c7f, +0x857c58ad, 0x8599641d, 0x655c687f, 0x97a64fde, 0xdd42707f, 0x29ced6b6, 0xdf25c153, 0x7f047fbb, +0x7c5f462e, 0x61d08548, 0x7f7fab7f, 0x48b1ecb0, 0x93c63349, 0x039c7fe3, 0x817fce1a, 0x29bb7112, +0x7f3bbaf0, 0x81818d2f, 0x7f5b3d41, 0x1fa18160, 0xc481af81, 0xfc818130, 0xdae2bd73, 0x4d3c81cc, +0xa47f5f0a, 0x5e3ad1a6, 0x1a816eab, 0xb307dbd0, 0xa8798b88, 0x7f81252a, 0x6d7f2899, 0x4d7fa214, +0xc375762c, 0x7fb655ab, 0x5bb98185, 0x1381682c, 0x5281ca7f, 0xc56d7fc8, 0xa81d8170, 0x8daa50f4, +0x8bbf7f03, 0xcf7fb0c2, 0x3b733caf, 0x7f756442, 0x8c81353c, 0xf7e82f0f, 0xa5a620f2, 0x8ae39fea, +0xada55ad4, 0x74f1de4c, 0x37714753, 0x538193e3, 0xb1811481, 0xe5bd81cb, 0x70fc4ea8, 0x888473eb, +0x30b57f1a, 0x90c3f010, 0x81817577, 0xea8c4ae4, 0x6977547f, 0x549481a8, 0xf27f54b8, 0xf87f7fd3, +0xef81843e, 0x5a437fbb, 0xa77f8181, 0x77354c05, 0x9781667f, 0x7fc2ae7f, 0x0b81b868, 0x1267b5e3, +0xa0de743b, 0xb4cce348, 0x8192819d, 0xf0817faf, 0xecc781ab, 0x7f57af2a, 0x499e13c0, 0x7f3d632a, +0xac96fcc8, 0x81727829, 0x7f7ff281, 0x81819bb4, 0xc5bfd358, 0x76553120, 0xb57dbcb5, 0x9b425d12, +0xb0814cc1, 0xea3d393b, 0x1dde5e27, 0x03a6f127, 0x39a9a981, 0x94304751, 0x3af97f00, 0xceaa49d8, +0x3c8124d1, 0x7f24b547, 0x2830e08d, 0x3d7a8f0e, 0x456ffc4c, 0x814e7f4a, 0x357f817f, 0x7f61bc51, +0xd7097f44, 0x213ecf47, 0x815281bd, 0xa9a58187, 0x0c3cbfb2, 0x18f92fa4, 0x307f7f52, 0x727f3361, +0x6cdc8e37, 0x7f9d9e24, 0x64466e7f, 0x2491328c, 0xd58181cb, 0xc97ff4b1, 0x1890627b, 0xa48168b3, +0x7fad44dc, 0x411b7f43, 0x6681647c, 0xc5171bcd, 0x4a6d7fd9, 0x7e369d2b, 0xf8124f61, 0xa71515f1, +0xb381ec09, 0xc4ab71c0, 0x9e2e977f, 0x7f7f0d58, 0x1cdb7b16, 0x81f87f81, 0x517f7f60, 0xf4a3ea33, +0x5c2112c3, 0xc67f7e06, 0x9d7f78c6, 0x811730e4, 0xa29b6170, 0x81817fd1, 0xb6637f87, 0x4f1c540d, +0xe57f7707, 0x277d5c28, 0x7f817f21, 0x5e31e1ab, 0x7f81cd81, 0x815daab2, 0xec5284bc, 0xb45d7b63, +0x7f4d815e, 0x0860494f, 0x81874034, 0x81d0f00a, 0x5b4274ea, 0xdce04fd7, 0x8188680f, 0x827f7fc7, +0x8137e715, 0x9f7f6699, 0x786e75eb, 0x8253202d, 0xb2813ddd, 0x7315a11a, 0x81c18164, 0x7f7f8141, +0x6870da25, 0x117f3efd, 0x7f1c5f1f, 0xcb6d63bf, 0x45561890, 0xc3df8136, 0x23275f32, 0x81817f5d, +0xa50f7fde, 0x7f3ba6ae, 0x37a4bbf4, 0x81a6fd1a, 0x9b9bb6ec, 0x8b2cff0a, 0x2035d781, 0xa1c47fce, +0x095dee2d, 0x7f3a2d51, 0x69727f7f, 0xec8aad61, 0xb0dbcc42, 0x2e706fee, 0x962a9d53, 0x81814554, +0x759026dd, 0xb38851cf, 0x817fa70b, 0x7f462fb9, 0xcc7faf23, 0xa2981632, 0x3e8d8160, 0x07815dd2, +0x82817f22, 0x5a755325, 0x45c77f81, 0x81653cac, 0x93f73dd9, 0x3d81c4cd, 0xf67aa181, 0x467e7f1f, +0xe6c10cfa, 0x9bd4c12f, 0x6c51ca85, 0x4d7f2835, 0xc0bb341b, 0x81812aed, 0x87a181be, 0x4f7f7f6d, +0x81c9e3c4, 0x117f95bf, 0xd457e634, 0xb84b567d, 0x5618f922, 0xda7fc1ed, 0x7f7f81d6, 0xa07da0b1, +0x2e627f0a, 0x814ee836, 0xb65bff90, 0x3e29b6de, 0xb1b78f5d, 0x1724a1f0, 0x81484221, 0xe16d7fb6, +0x578181f2, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 + +hard_output0= +0xb5bc6ac8, 0xf5373664, 0x1310345c, 0xd5bae4e7, 0x1fc9e83e, 0xebfdfded, 0x84bd86ab, 0xb7aabe00, +0x60b44fea, 0xb9067464, 0x30325378, 0xa9195955, 0xf70c6e5c, 0x90922632, 0xc90b1cdb, 0xf2f5fb69, +0x73056b63, 0x1a33bf3f, 0x17755b5c, 0xc58bff6d, 0x2f4390b2, 0x2869d508, 0xe7c7dfe8, 0x38552963, +0x21da5367, 0x07282b9b, 0xa4767105, 0x1e294251, 0xe350a940, 0xb8a6aa27, 0xed12d778, 0xf10d9ece, +0xab93527f, 0xcf2da7e7, 0x68f6d0b1, 0x811f4bca, 0x577b06b2, 0x3234f13e, 0x30bab7df, 0x8dc47655, +0xbb843bed, 0x86da3aba, 0x30950c97, 0xdd096d7a, 0xa871fd6c, 0x8bee4e6b, 0x8fea30d0, 0x6c05b4d2, +0xf3e144d3, 0xd24ebb1f, 0x065635e5, 0x8d3f2cf9, 0x536c6c6a, 0xfbb0a5d0, 0x3d707b42, 0xc44d5982, +0xa5f4ad8f, 0xf32c0970, 0x1bccf1a6, 0x05916020, 0xa64fb176, 0x5ede6a35, 0xaf4966da, 0x9df5e0e7, +0x75042abc, 0x9ef10481, 0x11ddcbc8, 0xa0f5518c, 0xd5c23418, 0x2393d558, 0xfbe7dfeb, 0xed1c64c2, +0x86a36508, 0xde2dfb1e, 0xb8d0fef9, 0x24505232, 0xc894e71c, 0xbcc752a0, 0x40b74e83, 0x90d23c8c, +0x728e4a61, 0x108f0b08, 0x66f522ee, 0xc258d851, 0x35a31c44, 0x11311b5b, 0xfd3d5be9, 0x5ae448ff, +0x4f64994b, 0x5b8247a9, 0x4021114d, 0x2f0b6e82, 0x5eaa9828, 0x50ac71c0, 0xfb86ee52, 0x0dc1ac9b, +0xbbd47645, 0x8f357115, 0x978ceea0, 0xd557db99, 0x99b30388, 0xfc9a8a1c, 0x0f75be1a, 0x50143e22, +0x8840989b, 0x738ec50e, 0xe6b2783d, 0xf67899c8, 0x27ebed69, 0x6c415a16, 0x3a6cc2dc, 0xcd4e4e5d, +0x6cb12b2e, 0xdb88d7c0, 0x79cd1582, 0xbc422413, 0xe72ad2f4, 0x8eaac30f, 0x0bd86747, 0x6d87f69d, +0x15d62038, 0x4b375630, 0x0d51b859, 0x16db2cb2, 0xf210603a, 0x0abeb833, 0x55c694d0, 0xe57ca43b, +0x0ba94428, 0x1398a406, 0xe47d3889, 0x5a20203d, 0x250d7a1a, 0xd930ffec, 0x03992e79, 0xf2759376, +0x024ec121, 0x91fc3a2c, 0xb7e11cc5, 0x4ff7d459, 0xb8700134, 0xd6e61758, 0x4eba0a32, 0xb747e3ec, +0x7073fad7, 0xded80f99, 0x331e2f1b, 0xfa1f1bed, 0x056424a2, 0x1d1d95e0, 0x550b9ec8, 0x51ee2a38, +0x19525153, 0xd70c4cd5, 0x0d6cd7ad, 0xe44d1cf2, 0x30dfecda, 0xdacd7fe8, 0x7321d795, 0xddf48ef4, +0xe271e6a4, 0x9c1feecb, 0x951fcd7b, 0x8acc5a03, 0x3fb83527, 0xe306de74, 0x7b9cd6ee, 0x8e140885, +0xd4c91e8d, 0xe8c39733, 0x0f02f87f, 0xfb06b1b9, 0x0dc9349c, 0xf76bae8e, 0x4f642a07, 0x3d48a9aa, +0xe3ea323a, 0xa1cd5c8a, 0x40aa0e70, 0x132042d3, 0xa9732f6c, 0xd15a00c4, 0x43d3b046, 0x9a51ebd4, +0xc46ee0ed, 0xe2a2148b, 0xf5c478f0, 0x1fb01cf3, 0xf4f321ec, 0xd973811f, 0x11ad11b9, 0x5c67adda + +e = +34560 + +k = +6144 + +rv_index = +0 + +iter_max = +8 + +iter_min = +4 + +expected_iter_count = +8 + +ext_scale = +15 + +num_maps = +0 + +code_block_mode = +1 + +op_flags = +RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN + +expected_status = +OK diff --git a/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_sbd_negllr.data b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_sbd_negllr.data new file mode 100644 index 00000000..b5ef6246 --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_sbd_negllr.data @@ -0,0 +1,1224 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation +op_type = +RTE_BBDEV_OP_TURBO_DEC + +input0 = +0x0a45ef00, 0xb2bdc60f, 0xc810d206, 0xf21cc300, 0xb0c93da1, 0xaae019d7, 0x23ba3c29, 0xa61d1e09, +0x202906d1, 0xd2b61ef3, 0x59e7f241, 0x2ec8daec, 0x68015520, 0x0912f4a3, 0x01282329, 0xa1fd1439, +0x0701231b, 0x0fdcba0a, 0xbe97c6c3, 0x0438e652, 0xd5f644d4, 0xbd0e5f4b, 0xfcb6d0d4, 0x0825b15a, +0x362e2fc8, 0xbbd5abda, 0x0df8c12d, 0xd016b5de, 0x1e439a0c, 0x44c94242, 0x00e2e8c5, 0x0639d241, +0xef6aa40b, 0x90de9def, 0x43e0ea14, 0x31dd07cf, 0xbad70a05, 0xcffc228a, 0x00293bd7, 0x6038080c, +0xdaeb3e20, 0xdd260c4c, 0xaa41fb3c, 0x6eeef0d4, 0x5b02c0f3, 0x14d6ac69, 0xfef0ce44, 0xc5abccf2, +0x2a0f00c4, 0xd6cadb30, 0x38e212cf, 0x5e9abc29, 0xf105fce4, 0xde34e024, 0xb9d42365, 0x281eb22e, +0xb219c055, 0x6504b01e, 0x465602ab, 0x1233a4e7, 0xa9ebf4cf, 0x63c1ed3f, 0xdbe9b71f, 0xf994579c, +0x22f359b5, 0x3606379d, 0xae30fa1e, 0x1d6e290c, 0xc6e0440a, 0x2f129132, 0x1b317bf6, 0xcfdbc119, +0x32022857, 0xaef168dc, 0xa1251d37, 0x65eec43b, 0x9630e70e, 0xe9e1fd98, 0x3e02d8ab, 0xf348af3e, +0xf7b0f027, 0x2ff021db, 0xecae5bd4, 0x1b2f3e34, 0xbb2511c0, 0xd611ddad, 0xf951d2d6, 0xb1dd0191, +0xc325cf35, 0x58bdf4b7, 0x36094dd7, 0x07cd3b44, 0x02c4ae4d, 0x812af6d9, 0x32f63731, 0x31103329, +0xac0033ff, 0x817f583f, 0xc4817f88, 0x8d81790a, 0xa6a2817f, 0x7f81910a, 0x7fc5ed7f, 0xbf2f7f8d, +0x7ff14707, 0x49cc3bda, 0xb04d5a2d, 0xd8d392b0, 0x7f69d3e1, 0x83ae43ad, 0x99b7da75, 0x3a7f7fd3, +0xd46d6fd2, 0x4f7fc524, 0x137f812f, 0x587fcd8a, 0x81c481bf, 0x9caa6184, 0x0bc3a499, 0xe2901a23, +0xfeb59b6f, 0x7f818171, 0xf68ed39d, 0x8113a975, 0x81dd6681, 0x7ffe707f, 0x5836757f, 0x48a98122, +0x7f724081, 0x81588447, 0x59f8b799, 0x7f6dbab0, 0x817f8f09, 0x13c3ecb6, 0x7ff98d64, 0xc4911e81, +0x33922f9b, 0x05c9e85c, 0xa19071b3, 0xec1ca24e, 0x02a48181, 0x813b8196, 0x7fca7d51, 0x7f81814d, +0x00815911, 0x242a7f5f, 0x29d6ee98, 0x8a61887f, 0x4e815e7f, 0x773e811d, 0x8e65d1a7, 0x8d514860, +0x3081b248, 0x7f2393ab, 0x815d2e31, 0x817f4be2, 0x81818519, 0xa07f677f, 0x0defa853, 0x22811c4c, +0xbe81d32d, 0x2d7fdc7f, 0xd8f3195c, 0xf69bc13a, 0x7f7fdae1, 0x2a56717f, 0xd4811c7c, 0xbe218c81, +0x81a556d3, 0x3f43a8c4, 0x6a95b0cf, 0x427f79db, 0x959cbc5c, 0x9f00dd19, 0xa1e8c5d4, 0x59814997, +0x4e35eb7f, 0xf87f348b, 0x76749124, 0xab7f7f1c, 0xf0816c81, 0x8c62b45d, 0x744ec34f, 0x329a7fce, +0x9048b815, 0x0076dcd0, 0x812d2981, 0x98d5cb15, 0xd29652cc, 0x2cd15e33, 0xc2557f91, 0x03517f6a, +0xfc1cc134, 0x0f7f6a00, 0x81939c0e, 0x7f4677a9, 0x7f597f5e, 0x4f972a86, 0x81b2ab81, 0x207f7f69, +0x85b9b234, 0xc1857fac, 0xedaa7c08, 0x7f50057f, 0x7f1dc26d, 0x7fa47f81, 0x7ff57f76, 0xb3bca5cc, +0x6b31d550, 0xce3a6583, 0xdac01f7f, 0x81184838, 0x65239c09, 0xde415bf5, 0x7f7f3881, 0x097fdc7a, +0x81698160, 0xa8da57db, 0x3281530a, 0x628d057f, 0xad7381f2, 0x18a35579, 0x3d909f7f, 0xbf7f7f81, +0x8181eb36, 0x6bd45881, 0x578c7520, 0x417fdc89, 0x307f5bad, 0x667f8181, 0xe1b44f6e, 0x817f7f2b, +0xd55cb62e, 0x8aa0ab9a, 0x81e42e47, 0xbc925270, 0x77aa4a90, 0x56247581, 0x8ff5f670, 0x7d7f9cc7, +0x3a013a2e, 0x10810005, 0x815eab7e, 0x812fe1c6, 0x81f0b47f, 0x7f2b8c03, 0x7f7fc07f, 0x7e497281, +0xd1d70cd5, 0xd981b501, 0x0c63b57f, 0xd8a8ea7f, 0xdeaece7f, 0xa08107f0, 0x9f54e62e, 0xa8cfc431, +0x7f985ee1, 0x811ae887, 0x7f81df4d, 0xc081d1bb, 0x81a15a9a, 0x7f44bf05, 0x037f057f, 0xf22c7f9d, +0x7f7f7f7f, 0x81856e03, 0xbcb08381, 0x46bb372d, 0x7f7bd59c, 0x8157e815, 0xa1910b3a, 0x5c537f7f, +0x3e81717f, 0x4851ac7f, 0x813b7f83, 0x867fb25e, 0x098131ca, 0xb06fc77f, 0x7f60e97f, 0xa25f5c46, +0xe82eae48, 0x40bf987f, 0x817f81e2, 0xf5b2c831, 0x810597de, 0x045c7f4b, 0x81e33f36, 0x81aaf35e, +0x81817fc6, 0xe700587f, 0x93b37fea, 0x9f81fc97, 0x4def727f, 0x6bc07fe7, 0x7f811076, 0x81248175, +0x0a7e5cf6, 0xcd7f7f81, 0x7f49f37e, 0x8181917f, 0x7f70df81, 0xd17f81b5, 0x1f6b4ef7, 0xa21d6b8b, +0x7fb0461a, 0x5c027581, 0x81819bb4, 0x816ee78f, 0xad7f817b, 0xe481a10a, 0xa1cb8d81, 0x32813e3a, +0x947f7f81, 0x8184bff3, 0xa96cd072, 0xeb0c5881, 0x814ab385, 0xe3f67f7f, 0x584a81c9, 0xdbc5c781, +0x78816e49, 0x817f8a81, 0x7f3f7fff, 0x56d89f40, 0x4397e427, 0x7f29b20d, 0x7f597fad, 0x35817f65, +0x0a7f3b19, 0x61c97f81, 0xc6817a65, 0xaa5ceb7f, 0xfc427fef, 0xb8684c75, 0x447fbaa3, 0x4d7fae7f, +0x8196e3bb, 0xe1588132, 0x7ff27f34, 0x8173bff4, 0x7f705f6d, 0x7f7f817f, 0xa353354c, 0x7f58ca67, +0x7a5a7f9c, 0x1e81ee7f, 0x98d681d9, 0x9e60c443, 0x7f7f39a3, 0xc79bc87f, 0x3ae8827d, 0x9f812e81, +0x7f6f8170, 0xa987947f, 0x2481aa81, 0x538fdd7f, 0x237f1442, 0x14e99a84, 0x6e62b068, 0x7b810541, +0x73a57fad, 0x69977881, 0x9be9cd7f, 0x9a48537c, 0xfccc7f47, 0x7f40a457, 0xbc63b4b8, 0x74bb447e, +0x36817f12, 0xd371cd1f, 0x7f1ed17f, 0x36624bbe, 0x726c811b, 0x4abe5c58, 0x81de39c1, 0x7b5ae996, +0xaa818105, 0xa881bebd, 0x5e7f6edb, 0xa8d08165, 0xd495f17f, 0x7fd06098, 0x7fb157fc, 0xc0f661d3, +0xd09f812a, 0x7f8ef77f, 0x69477f00, 0xf45f8481, 0x0f7fa07f, 0x97f17f7f, 0x6ceaac7f, 0x7f7f8163, +0x7fbcbe98, 0x698b697f, 0x79c0da81, 0x8b81e681, 0x81817f36, 0x6b814881, 0x7f918ec6, 0xb67aed0e, +0xc5020b85, 0x81ee8c18, 0x7ff650a5, 0x814f6233, 0x508139a1, 0x814e6aa1, 0xa055487f, 0x3ed75817, +0x4f7f81da, 0x1ddcfa81, 0x7f7f1dbc, 0x825b6454, 0x2ee6819b, 0x73eb6861, 0xf7fe9516, 0xe3816dc5, +0x527f3f7f, 0x550d8fb9, 0xf3f82681, 0x5954b562, 0x7e7f20bc, 0x817f6b4f, 0x8108bc22, 0x17fb8113, +0x7b040f33, 0x586282d5, 0x078c5c7f, 0xe17fc493, 0xdd813545, 0x9ba17f6e, 0x7fccfad5, 0x916f7fa3, +0x21ce7f1f, 0x1a037f81, 0x8e4d0081, 0x7a360731, 0x167fd77f, 0x96762a84, 0xca5660e3, 0x45f0d87a, +0x7fd151ae, 0x9e7f22a6, 0x7f817f8c, 0x1501b9cb, 0x886c4184, 0xea627f6c, 0xc53e7f08, 0x1a63d6eb, +0x481b4a73, 0xca816575, 0x167f7f7f, 0x15814549, 0x6d985e81, 0xa60b1f62, 0xa1818181, 0x33af7f7f, +0x816c7ff7, 0x7f81b67f, 0x08602281, 0x9a81815b, 0xe6787f3f, 0x7aba4f81, 0x5015817f, 0x1d02d55e, +0xbaf3e826, 0xc34ca8cb, 0x503e7c82, 0x647a7f60, 0x7f810873, 0x81149d63, 0x7f818181, 0x7f7ff3d1, +0x11d8dce2, 0x7fe3634c, 0xb63c535b, 0x810e6181, 0x81277f54, 0x817fa2dd, 0x34c34476, 0x848a65f8, +0x96f911b0, 0x9aa76464, 0x8200816e, 0x6d7f56a6, 0xcdb9e57f, 0x874e836a, 0x93a363a6, 0x41c09883, +0x8144c072, 0x52a77fef, 0xe8ded5e2, 0xf6857f81, 0x81b79f4c, 0x7f7f71a2, 0x2b5852bc, 0x1f7b43b0, +0x4c5aa37f, 0x557f3d2a, 0xa17fb581, 0x7349d204, 0x4042ec2b, 0x89cd96bb, 0xd3368281, 0x7f7b8181, +0x0f28f5cd, 0xe8aa4d23, 0x88628b33, 0x8181817f, 0x758197af, 0x37b43b4a, 0xc081c1a5, 0x8e3c4881, +0x7fcb0983, 0x55fe14e4, 0x734c7f20, 0x89267c21, 0x7781a4b7, 0x7f7f4eb3, 0x81ac8fa2, 0xddc20981, +0x7f9b2659, 0x2d7fc65e, 0x818181e8, 0x6a176a89, 0xaa7fb68e, 0x5c5446d2, 0xda7dcc81, 0xc3a5e054, +0x7881477f, 0x7057146f, 0x00764b7f, 0x7fa87f7f, 0xa4ff8181, 0xf07f7f7f, 0xa37a5cd3, 0x28a881b1, +0x13e0cb4f, 0x8f7f9804, 0x3d343181, 0x567fda8a, 0x5828c7d0, 0x434fe70a, 0x7fd5fe78, 0x1f818181, +0x686103cf, 0x62aab5ee, 0x5506cb88, 0x5428ad7f, 0x51908c1a, 0x6d5db4ef, 0x3f7f686a, 0xa6a15a8d, +0x917f7f53, 0xb77fea86, 0x7f816aa2, 0x6ae581d7, 0x54ad634a, 0xd59ca328, 0x6d815bfa, 0x5f470046, +0x23818188, 0x817a9419, 0x7f57dc81, 0xb02a81ad, 0xe986757e, 0xd9810681, 0x81f7be26, 0xbcb5cf7f, +0x63776b56, 0x4358bc47, 0x528aa47b, 0x9e7f7181, 0xd0818100, 0x0d811a7f, 0x7f818155, 0xbe207fc3, +0xb70d393c, 0x812bbd48, 0x7f7f7d0e, 0x81b5ab00, 0x1ecb8681, 0x24c67fcd, 0x2fc7ed01, 0x5503a881, +0x7f3d7fb4, 0x9a81559e, 0x6508d5f4, 0x08b56b7d, 0x96d84e10, 0x7fac9ec1, 0x817ca281, 0xd9b01081, +0xaf81d3a7, 0x8102b34b, 0xa0f98181, 0x7f7f929e, 0x3cf6d023, 0x990ed581, 0x147f817f, 0x81483a81, +0x68375130, 0x487f1451, 0xa0973d81, 0x87d4d4f7, 0x90633e22, 0xbc7f45e0, 0x2c4ea77f, 0x0811896e, +0x8d7f52e9, 0x8120aeb0, 0x3d159d81, 0x93cb0097, 0xce81bb81, 0x74194855, 0x4e7f3f26, 0x817f819f, +0x4531814f, 0x517f7fbb, 0x4981a87f, 0x4c810f4e, 0x3239b67f, 0xa17f194c, 0x81e58e81, 0x7f4890e5, +0xb07ffa3c, 0x937fa181, 0x447fa516, 0xbab5000e, 0x887f7f7f, 0x8181b17f, 0x7f82d4ad, 0x7f828159, +0x81b35c57, 0x23bcb1dd, 0x819446c7, 0x7fbf9ea6, 0x817f7fc1, 0x33817f81, 0x5cfe7fb6, 0x7f81fc61, +0x91477f4c, 0x88994381, 0x65bb447f, 0x198d61ae, 0x817f9f7f, 0x557cbcb8, 0x81fab781, 0x1bab3f7d, +0x7f812298, 0x7f44fadc, 0x1dc1c665, 0x7fde3a8d, 0x94c37f2b, 0xc7d8eb23, 0xdaae8181, 0x129c68bc, +0x7f819111, 0x7f32ce2c, 0xf0d17f02, 0x7f6a817f, 0x5cd564e0, 0x7f7fc47f, 0xd6995c9d, 0x70a7d476, +0x3b7ff299, 0x7f2f237f, 0x4381ac1f, 0x0bd67fb2, 0x81ba594d, 0xa6777fd5, 0x881cc254, 0x8189977f, +0x7f813b3e, 0xc17f7f7f, 0x3baa3dc9, 0x81007f57, 0x7f810295, 0x81f7a1c3, 0x7f65de7f, 0x4ca87f53, +0x407f81b9, 0x7f436fa6, 0x399f9581, 0x7f177f86, 0xbc698194, 0x447fd081, 0x81663d37, 0x01faee9b, +0xc485ff7f, 0xed6ff281, 0x4c6713c4, 0x28b0487f, 0x7f88e8c9, 0x8169016d, 0x7f7f71dd, 0x474bf37f, +0xb756b143, 0x95caeed0, 0xec7f872d, 0x437f23e1, 0xb42b3030, 0x662f8d76, 0xd4818f71, 0x8161fd50, +0x8863663e, 0x78256b06, 0x661b549c, 0x8e817fb4, 0x06b98354, 0x5a905ec1, 0xec960281, 0x2c30bbe3, +0x937f7f63, 0x6ade543f, 0x7f7f9f5e, 0x98dac336, 0x81327f73, 0x677f6d46, 0x637f5c7a, 0x82478181, +0xedbc9014, 0x9a7f431b, 0xb2d25981, 0x7f898181, 0x81815ab1, 0x2d7fa683, 0x7f03b2b0, 0x89bc7f18, +0x81947fb8, 0x1995817f, 0x78e54c17, 0x816cb1c3, 0x1a2681e0, 0xac3a7f8a, 0xdff245bb, 0x422ada81, +0x1ed1e07f, 0x7f6f5276, 0x8481db81, 0x9a817fe4, 0x1e132dab, 0x5c7fdf7f, 0x7d81ac7f, 0x937f7fe0, +0x49568171, 0x3f81907f, 0x81466896, 0x7f667f2b, 0x577f9a7f, 0x7fd73c0b, 0xc946df59, 0xd72f767a, +0xb51d326a, 0xc77f4f7c, 0xbc814bf5, 0xdae47fab, 0x40a64f73, 0x898188ea, 0x1c751c97, 0x7f8ed37f, +0x81b9585f, 0x6eb6758e, 0x5e11b233, 0xf0a47f81, 0x81709065, 0x447fbcc6, 0x58d1819b, 0x9fa36e74, +0x817fa9f3, 0x9f506f66, 0x81cd047f, 0x187f8136, 0x9d810500, 0x64cd8eb0, 0x7f817f7f, 0xf850e2f6, +0x81b61869, 0x606a66c4, 0x81638118, 0x20dca772, 0x6287b381, 0x8e056874, 0x086d99b9, 0x54438172, +0xb281697f, 0x5c470c16, 0xb0340523, 0x7f81b96b, 0x4451b8a1, 0x7f7fe6db, 0x57887f3a, 0x14438129, +0x927758ce, 0xe27fa16a, 0x8c30bd81, 0x7b7fff47, 0x41e58175, 0x35812462, 0xe7ff4358, 0xb4c04079, +0xa87c108b, 0x81aae151, 0x3d797f8e, 0x33007f3c, 0x27c2715a, 0x188c7fba, 0xb481e081, 0xe026d96e, +0x402d8185, 0x7066ffcd, 0xc5914e7f, 0x977f655f, 0x7f68b5d9, 0xcad3235d, 0xc77f8e81, 0x1cb4817f, +0x7f473044, 0x15168159, 0x7f5c81d2, 0x7f99bd6f, 0xb7ab0006, 0xf2487f41, 0xd2de0f82, 0x6a81627f, +0x2e8e87fa, 0x25187fb3, 0x6d31b393, 0xdd3d4b81, 0xa942a11d, 0xd21281d0, 0xe6b0e17f, 0x8a5646ad, +0x907f8124, 0x7f81d2e2, 0xd8c8e17f, 0xeb8b7f81, 0x3ad8bb1a, 0x7f7fbf81, 0xcb30ddf4, 0xa09134d8, +0x8187d2bc, 0x4949168a, 0x53998181, 0xd5e21412, 0x81811cb0, 0x11349a98, 0xdab2afe4, 0xd97f7f7f, +0x7f7f8181, 0x327e7fd4, 0x97818114, 0x827b6517, 0x5f0aabbd, 0x7f905b81, 0xafdde37f, 0xcd40cf7e, +0x7f56817f, 0x95810db2, 0xaf7fd131, 0x8181811a, 0x9e7f8f73, 0xe250d481, 0x7f2abb90, 0x5f7d981f, +0xb3257f91, 0xec657f2f, 0xfa815e3a, 0x6e223481, 0x6c00eaa8, 0x7fb19394, 0x8799c181, 0x810e7fde, +0x7d478181, 0x7fdc76bd, 0x035f8102, 0x627f447f, 0x7ff7ff7f, 0x81265db0, 0x6e81ac2c, 0x8802d277, +0x81c3f781, 0xec81d259, 0x817b7d81, 0x7a5b7f7f, 0x4953ab64, 0x896a52a0, 0xb28111f8, 0xd25d8f0e, +0x7f818174, 0x813f7f3b, 0x7fff7f72, 0x0fa87f88, 0xbcfad248, 0x507dd360, 0x7fae817f, 0x81f27f81, +0x72fa4d1b, 0x5a21d2a1, 0x6381817f, 0x5fb55ab3, 0x8ec3816f, 0x257f4ab5, 0x4c74ef81, 0x19a3f27f, +0x752746c6, 0xe17f8181, 0x7f7c8199, 0xdb9981a1, 0xcb984bd7, 0x815ea881, 0xdc50e0d9, 0xb9366f24, +0x7f455dec, 0x817cb181, 0x424856f3, 0x67ab02a5, 0x0081657f, 0x105e8191, 0xeab817a7, 0x3a7f6d5a, +0x425a1503, 0xeaac317f, 0x7fbe2cd0, 0xd2819a6d, 0x34887f6b, 0xef7f817f, 0xc3577f8f, 0xb124a664, +0x9981167f, 0xb4946031, 0xb8817f7f, 0x81c570ed, 0xc9c88181, 0x3a4cdf81, 0xd9514781, 0x8948c0fe, +0x2d5a8111, 0x68817f7f, 0x6be87f81, 0xa62c0381, 0x857f7fba, 0xa8c4ab7f, 0x279cc57f, 0x98816e0b, +0x817ff70e, 0x21536d81, 0x875ca181, 0x008164f2, 0x96955a53, 0x57a53917, 0x6517faad, 0xdecd7f49, +0x81819c32, 0x7f7fc25d, 0xfd81b181, 0x9f818166, 0x9c7f107f, 0x7f15b67f, 0x81636692, 0x18f276b5, +0xbb9d08e2, 0x6c25797f, 0x417f43fc, 0xd0a37eca, 0xd4702b8f, 0x5c72ba00, 0x7f9dcdbe, 0xd4ad8122, +0x39f1a381, 0x9b419181, 0xac816ace, 0xfb7c81e6, 0xdd7f7fb9, 0x2a27bb61, 0x3781499f, 0x7f6a7f93, +0x4bbf81a5, 0x4a07e527, 0xab4f8143, 0xc2703e81, 0xb1813d81, 0x81747ba3, 0x11cb7f81, 0xd7a385fd, +0xc9d0c981, 0x647fe554, 0x811d3681, 0x18817fdc, 0x81811bef, 0xd381845e, 0x466539da, 0x1081617f, +0xb5b99261, 0x56cd7f8d, 0xaaa79076, 0x7f814a81, 0x7f7fe313, 0x7f7f8170, 0x5df9817f, 0x102fe72a, +0x0c7f79ae, 0x4368d9e1, 0x6b0af013, 0xc13ed920, 0x98ed6a7f, 0xc494d37f, 0x81816a35, 0xc27f7f4f, +0x451a7f3a, 0x4c812e15, 0x032f49b1, 0xbd7f02a1, 0x507f0d67, 0x9c5f007f, 0x8712c21b, 0x6ba7385a, +0x7fb62c58, 0x7f818bcf, 0x7f7fc57f, 0x4141698d, 0x1e817f5c, 0x813fbe97, 0x6f817fa8, 0x51637759, +0x7f4ed17f, 0x81ac2c28, 0xf1c75bfd, 0x1463c260, 0xcb7f7fcf, 0x7483675a, 0x40eb7f7f, 0xad7f81a2, +0x7f817f50, 0x66816683, 0x407f8c81, 0xaf8190b3, 0x81d347f2, 0x5a215a81, 0xca7f8132, 0xd0528ef2, +0x8129c3a0, 0xa4a7c52b, 0x0b816383, 0xbc7faf5d, 0x38d34e66, 0xff65a68b, 0x817999d7, 0x5a620463, +0x885a817f, 0xad947f81, 0x7f1c902c, 0xaa0e9003, 0xd7ccb971, 0x64a68198, 0xb2337f11, 0x81a4d5dd, +0x817f81fd, 0x7e814f81, 0x817f3da2, 0x8381fd08, 0x81a35a81, 0x7f0099e3, 0xfa1f5850, 0x6773efd6, +0x6e7e5268, 0x138e7fc5, 0x81449526, 0x815781c2, 0xca2faff4, 0xf37f7f64, 0xaa080369, 0x7fb07f5c, +0x81397fcd, 0x972dbbbe, 0x813f8e81, 0x812ed669, 0xbf76e310, 0x0be7f981, 0x81929781, 0x577f8644, +0x0cc7e94b, 0x7e1d2995, 0x7ace8119, 0x81814867, 0x617f505c, 0xb9ad7f3e, 0x3ad56e18, 0x81e5b75d, +0xca874729, 0x7f81ad9c, 0x5881813e, 0xbb240a46, 0xcc908863, 0x6fc2815b, 0xfa4bca7f, 0x8f817319, +0xfe819dae, 0x81814281, 0x867f21e9, 0xfdaa0500, 0xd08c9e41, 0x99ea3b94, 0x7fa9394c, 0x81337f7f, +0xb57f23f9, 0xc59a8181, 0x815a7fa4, 0x16c9c781, 0x9f17b97f, 0xb2817f89, 0x7fbd95a5, 0xb27f7f8e, +0xa5e0b5c9, 0xefabff4c, 0x7fb7ea7f, 0x9aec9841, 0xb0e1784b, 0x81b29d1d, 0x7fba1781, 0x81f7086b, +0x8129f27f, 0x818181ea, 0xc1ce8113, 0xfbfbaacf, 0x9d4c14ed, 0x40c1ea26, 0x6051cd3f, 0x6cd681be, +0x4c469beb, 0x17637f22, 0x81882850, 0xbc91d4be, 0x6e318122, 0x95628181, 0x6c7f7f33, 0x6c47537f, +0xa8d27f7f, 0x3de9b857, 0xc73a2c81, 0x4e7f8101, 0x3b7f817f, 0x3a78815c, 0x1a67771c, 0x5a8286a4, +0xf7b98166, 0xd3b37f81, 0x4eb17f2f, 0xc1098181, 0x969a7fc5, 0x0bfbfbb9, 0x316f8194, 0x7f6f8184, +0x484ec57f, 0x364a007f, 0x1f81817f, 0xc5b9ac81, 0xbb81b97f, 0x1f82647b, 0x2c4b4600, 0x89813d0c, +0x75ffc374, 0xad7f5e23, 0x0af481a5, 0x3d0e3eee, 0xa49481cb, 0x39815284, 0x8187f438, 0x1a7c7f7f, +0x817fa8b5, 0x7fca781e, 0xb401d779, 0x424c7fda, 0x3c429944, 0x488d7f7f, 0x78966481, 0xaf687f5d, +0x89052b37, 0xbf7f407f, 0xe07fce79, 0x9fdf45d3, 0x8181de83, 0xe67f7f5a, 0x7fe6a3fa, 0x38705b81, +0x81518232, 0x7d5d9978, 0x270372c9, 0x818568a2, 0xca7f66c8, 0x43a06a3c, 0x831b4281, 0x853a7fb0, +0x8178cf45, 0x7f7f7fbc, 0xbdb69d20, 0x525c5392, 0xc081d97b, 0x18217f16, 0xb6ec7659, 0x0861819b, +0xc48b819e, 0x817f38f2, 0x7f7f787f, 0x7f365c81, 0x64a9f87f, 0xe4e67dac, 0xd1580073, 0x81817f54, +0x597f2ced, 0x575cabd0, 0xce81d781, 0x819c81a4, 0x7f5aceb2, 0x7f57ac33, 0x81e4d87f, 0x1df5d24d, +0x7f4f7f7f, 0x7feb81e8, 0x6a81816a, 0x7f668144, 0x3431b13e, 0x3cc6b82b, 0x815dc38c, 0xc65781b0, +0xae897f81, 0x9751537a, 0x817f0e88, 0xb15e69ea, 0x6f7f7f90, 0x24e681a4, 0xd97fa74f, 0x1881fa61, +0x2e743b95, 0xeb817f7f, 0x7fcd677f, 0x317f7fec, 0x9246f573, 0x2e7fae81, 0x7eb72b7d, 0x4d8144bd, +0x3f7fcb81, 0x7f619f9d, 0x51e7b87f, 0x81688e5c, 0x0d3d55d4, 0xb9b0817f, 0xaec17fd2, 0x017f2ffb, +0xa8af3550, 0x81187f3d, 0xa033427f, 0x38c83442, 0x7f7f8181, 0xe38a35cc, 0x1f00c081, 0x7f297f7f, +0xd17f7f81, 0x6e81c637, 0xc4387945, 0x26839381, 0xd481e281, 0x0b72bf12, 0x057f9e7f, 0x5eef2c81, +0x9216b507, 0xd7906948, 0x637f627f, 0x0a11bfaa, 0xd605bd9e, 0x81ea9c81, 0x72f03c3d, 0x4f819f7f, +0xc563d7b7, 0xad5a3899, 0x7ff15105, 0x7f692e81, 0xbf79c6c9, 0x3c738181, 0x7f4a0597, 0xecb5d588, +0x7f7f2b3c, 0x888144dc, 0xbd7f6e61, 0x6b6e81e5, 0x5d5b1caf, 0x4c743b73, 0x7f727f2f, 0x667f3551, +0x8fc63881, 0x81565a6c, 0x61817f3d, 0x0aa5d083, 0x81215ea2, 0x60461f02, 0x0d7fc1ee, 0xe77f813b, +0x40d9767f, 0x1c817fa8, 0x7f815e7f, 0xa5447f7f, 0x2ac45607, 0x81417f7f, 0x00aceb81, 0x8181cfe4, +0x817f81aa, 0xaa3eda6e, 0x7fbd4adc, 0xf8bb5deb, 0xf0867fe7, 0x3f33a481, 0x819e1bb7, 0x81458b81, +0x9573148c, 0x986d1ee0, 0x45deb557, 0xcf7f688f, 0x23d5b1a9, 0xc3fdfeb0, 0x3fad633c, 0xe4be21bb, +0x81819acf, 0x567f1571, 0x5a2c130b, 0x7f14e77f, 0x37d38908, 0xe55da5d4, 0xf2e16033, 0x23db8181, +0x7f8160bf, 0x177fa17f, 0xa00f4dc8, 0x47818153, 0xf7817b81, 0x529681a0, 0xeca34413, 0x7f49a081, +0x2c9c81c3, 0x8181ae14, 0x8125425f, 0x39b1123a, 0xe4e781b8, 0x4f83e39f, 0xd298507f, 0x49e92f7f, +0x26bac981, 0xf0a27f49, 0x81d281a8, 0x993dc681, 0x48c19a95, 0x98af7a6f, 0x3baa107f, 0x899d3c00, +0x7f814467, 0xc6e821df, 0x05226d8c, 0x9a813aba, 0x8d818117, 0x81688142, 0xd9244ff9, 0xdb0da3d0, +0xbef62430, 0x55868181, 0x8176d83f, 0x33813ef2, 0x941a6534, 0x81d0741b, 0x4e37927f, 0x6b2515e2, +0x8181aa81, 0x9581605b, 0x678113a0, 0x6e7f2681, 0xc1819f81, 0x813ca072, 0x7f81b97f, 0x3d95817f, +0x81233f81, 0x911e90e0, 0x76a6d5d9, 0xc6b5057f, 0x5262b67f, 0x81812681, 0xa68110fa, 0xc74a6049, +0x478eef7f, 0x8ea225d1, 0x77fd31f8, 0x7f568422, 0x81207f81, 0x94628118, 0x81a47fc0, 0x7f637f7f, +0xa8898caf, 0x8125b97c, 0x3de89f9a, 0x7f81bb71, 0x24183337, 0x88c0437f, 0xe97f6bb5, 0xa0dc007e, +0x36dcd381, 0x887cd84a, 0x0a817381, 0x9b818176, 0x46327ffa, 0x487f926d, 0x07508107, 0x7f7ffe06, +0xcefe7f7f, 0x028172d9, 0x7f7381da, 0x81f97f58, 0x81818d0d, 0x06597fcb, 0x817f7ff5, 0x8e999e7f, +0x7fd22e8f, 0x81766ae5, 0x817f7f86, 0xca81bf7f, 0xfd144e10, 0x947ff681, 0xa9817f4f, 0x7f187748, +0x81819081, 0x8170ca74, 0x7a5d9c79, 0xbe483f81, 0x6f7f9fa0, 0x7fb0505b, 0x7fc7f15f, 0x81793081, +0xbc7bb830, 0xce7ff56d, 0x81e02681, 0xa5968113, 0xa47fd381, 0x81a68197, 0xf4ca2a7f, 0xd220acb6, +0x7f837ae6, 0x5c812609, 0x847fd94f, 0x6adf9afc, 0xb17fc822, 0xfd1b82ec, 0x8dfbf181, 0xbe008215, +0x1dfd1081, 0xd1768cc8, 0x76818144, 0x7a917f9b, 0x0c6aada1, 0x16d3654e, 0xea1a8d7a, 0x9508c955, +0xbfe88c4c, 0x593fa4d6, 0x8c45818e, 0x7f7f557f, 0x7f1dcc7f, 0x984c8181, 0x6b817f7f, 0x6c8131d9, +0x4c8185e0, 0x678457da, 0xa6bb81f1, 0x79c0ca67, 0x7fe2c433, 0x257b8381, 0x37323c7f, 0xd768283d, +0xdde15d81, 0x7996d7be, 0x7fd38110, 0x81706b66, 0x7feb685e, 0x817fba69, 0x8182ec99, 0x53c67fac, +0x7f7fc1da, 0xf8afcee6, 0x09816181, 0x8a4f9e6c, 0x5b94c2c6, 0x812b3b97, 0xd0237f7b, 0x81817f64, +0x81768181, 0x7f24b359, 0x1f326247, 0x7f7f5e38, 0x81267f42, 0xff8781be, 0x85817758, 0x7f172281, +0x81599137, 0x22423760, 0x81c8e277, 0x7f46f381, 0xbb668184, 0x7f547f7f, 0x4aa27e87, 0x217f8c35, +0x925eb643, 0x48b89ac8, 0x7f9d8181, 0xffe3a47f, 0x3e810ac3, 0x7f2c7fa0, 0x847f4681, 0x8106818d, +0x81787f10, 0x3e2a5fa5, 0x81a38181, 0xfa7f712b, 0x74dd166f, 0x0d2819a4, 0x557f426d, 0x7f818158, +0x1b817f93, 0x927f0e81, 0x7f1b9a56, 0x78c78898, 0xbb07493f, 0xd4b37f1b, 0xae8aafef, 0x7fc2c3a0, +0xb1bb3d19, 0x1189816f, 0xc2813c7f, 0xc27ff3be, 0x7f3bc183, 0xd1817c7f, 0xc6817fa9, 0x0f9ad87f, +0x7f10c96f, 0x517f7fca, 0xb9958181, 0x9870315a, 0x1c091752, 0x7f0a4746, 0x509ec654, 0x168190cf, +0x8e3d0000, 0xc0817f69, 0x7f8a6f85, 0x7fe8b634, 0x2631815a, 0x67942d85, 0x5a7f8b38, 0xa2a07e1d, +0xfa816e81, 0x3c81bc7f, 0x813b7fed, 0x81aae601, 0x7f81ba4e, 0x81817fa7, 0x32453b81, 0x8e8aa579, +0x7f81d1e2, 0x78a4847f, 0x81819b57, 0x13137779, 0x81c5557f, 0x7fd18195, 0x049928a6, 0xedd7897f, +0xa0816adf, 0xb0815ab9, 0xb2c73281, 0x7f01a4b5, 0x5ebacf94, 0x7fbb8181, 0xa97f7fd1, 0xec9d5661, +0x3da381ec, 0x4445a98f, 0x291ca97f, 0x9d948d9b, 0xf9f0d592, 0x8b1fcd81, 0x81d54fea, 0xdb81600e, +0x69a781c0, 0x817f78f0, 0x818181a4, 0x36810aa0, 0x646f9f7f, 0x70c25f34, 0x9471817f, 0x3e5081db, +0x5cc37f74, 0x7f0a814d, 0x990ba07f, 0xe55babcf, 0x59813a5b, 0x26b96e7f, 0x199c8125, 0x812e0db3, +0xe581b498, 0x8174a97f, 0x440f7ff0, 0x812d487d, 0x227f9881, 0x7fcb77b8, 0x157fc3e9, 0xdbfcff7f, +0x81085d64, 0x44a9bdaf, 0x42998481, 0xb885b098, 0x94817f94, 0x81d3367f, 0x48394f81, 0x817a697f, +0x64671052, 0xbd81153c, 0xd8637f02, 0x558cd081, 0x8485d766, 0x9ea7c1ae, 0x81b2afc6, 0x81657f40, +0x62d4f31b, 0xbd7f7f8b, 0x7b909c60, 0x2f81734e, 0x5060ab22, 0x10b62281, 0x1d83812a, 0xc7aa8866, +0x31444bc6, 0x815d6281, 0x822f8f58, 0xb75aa477, 0xc21f9a27, 0xd3545a36, 0xad30af19, 0x6b813f7f, +0x0000188d, 0x7fa14165, 0x177a549f, 0x8f9e037f, 0x1cd8f7f1, 0x1e7fb9d0, 0x557363c3, 0x7f9c1a26, +0x769fda1c, 0x7f7f8192, 0x157f7e7f, 0xcb61817f, 0x4a9d3d64, 0x8148aa58, 0x637fd2d0, 0x308481d2, +0x95a8324b, 0x7f7f3681, 0xbe407f4b, 0x2b769128, 0x8156657b, 0x7f7f7f7f, 0x45c97f0c, 0x81577f8d, +0xe90f8181, 0x817f689e, 0x2781f97f, 0x7fd1a966, 0xa07f6a14, 0xddbb7fab, 0xa53f57fe, 0x56c77f81, +0x35e0837f, 0xde818164, 0x7f5bb62f, 0x7ffe814c, 0x4a8fe8cb, 0x7f7f5b81, 0x81137fbf, 0x8157a240, +0x81048171, 0xa9d17f18, 0x8458817f, 0x81424a81, 0x9d817fb2, 0x31ef5181, 0xd081507f, 0x567b6e81, +0x91a3276b, 0x7f257f81, 0x98d9a897, 0x8eb301e8, 0x7f8a11a8, 0xac7fb482, 0x1420dd00, 0xa629e6aa, +0x98628159, 0x4a815981, 0xf081eda7, 0xaf5ac540, 0xde387936, 0xf6f2cc49, 0x7fa88152, 0x4587a0b6, +0x0549f981, 0x7f6a817f, 0x7fde8f79, 0x7fd77f81, 0xbd58b276, 0x477f5db5, 0xa3d0817f, 0xb3811743, +0x378d5f7f, 0x889c8175, 0x00d9bffb, 0xa386f71b, 0x9b9b5a7f, 0x05301691, 0x077afd81, 0xc9813d20, +0x655b8118, 0xb1810fc4, 0x2ae18160, 0x7f818141, 0x81c2db39, 0x7f3cd124, 0x52c77f7f, 0xcfdb5141, +0x8113b964, 0x0c0bd10c, 0x8137637f, 0x56bf8149, 0x5fceb17f, 0x01857f81, 0xea81917a, 0xd67a3c7f, +0x5fd93fa4, 0x93720000, 0x31e08193, 0x957faa7f, 0x30e1b019, 0x557f19fc, 0x327f5c1c, 0x81e98181, +0x567f6f7f, 0x817f60bc, 0xe4f481a2, 0xb1f2aa31, 0xba5b7f36, 0x4d2bd790, 0x914d4e38, 0x817f7722, +0x10fb7fcd, 0x2f7f3026, 0x8c7f3c92, 0xbe81392b, 0x1cae7f78, 0xb6cb6581, 0x2f52db43, 0x49cb7f71, +0x7fe6dfe9, 0x232e7e66, 0x1497965f, 0x67316ca2, 0xaf71c384, 0x727fbd7f, 0x5e7f6ab9, 0x7b5181bd, +0x38e38170, 0x48cae97f, 0x4a81669c, 0x3d2d7f7f, 0x69508352, 0xac988181, 0xe093a587, 0xbd907753, +0x817fb87f, 0x894ac8c0, 0x9f7f9ccc, 0x6336a2de, 0x7f7fd741, 0x75b723c5, 0x396e7fc6, 0xc5c0819d, +0x813c7172, 0x816a7fa6, 0x95bdf475, 0x69b21f5f, 0x817f7f06, 0x4c90685e, 0x7fea7f7f, 0x7f958f0b, +0xf6a7937f, 0x5b6caa7f, 0x5fbf7f30, 0x4c6cb506, 0x7f097f95, 0x8181d881, 0x817f34b8, 0x937f7089, +0x830181a8, 0xa6b2817f, 0x4912930e, 0x7f8fd67b, 0xf69bd281, 0x7a5fd981, 0xc4aa7f7f, 0x817f819b, +0x9c05c2f5, 0x8f579881, 0x5aa4818e, 0xc14dc0bd, 0x7fce0981, 0x147fcd41, 0x817fe75b, 0xf8e1a54f, +0x817f8981, 0x1334585f, 0x810281db, 0x146db1bd, 0x25347f7f, 0x9cbf987f, 0xb6736d0c, 0x814ab481, +0x0fd1653e, 0x7fd67170, 0xe564ce22, 0xd05df37f, 0x10871f96, 0x9dc9e03e, 0x53bc03ad, 0xc47fd1c5, +0xaa4dd88d, 0x00004ad1, 0x8a35a85a, 0xcff89253, 0x64d0819b, 0x149ab17f, 0x819f7a33, 0x49fc81c7, +0x1881d433, 0x196171c4, 0x784dbdae, 0x8193148e, 0x75b63674, 0xdf81e4ab, 0xad299a6d, 0x8168b3d3, +0xeea6426e, 0x81c99981, 0xb17f647f, 0x814ec5d2, 0x2e3b16a8, 0x81597f81, 0xa97f8181, 0x2caec714, +0x387906fd, 0x818181be, 0x7f7fbcb3, 0xd55a81e6, 0x8134ae81, 0x7fc3817f, 0x9853ee56, 0x5881999d, +0x9ea98184, 0xfa8181a6, 0xceff5978, 0x688e505f, 0x7fbbb830, 0xc678d06a, 0x3c90e3ae, 0x4b6d7f23, +0x24cd7f58, 0x7f92c181, 0xb1a97fc2, 0xba872981, 0x66ec49a3, 0x7f7f26b5, 0x7f8163b8, 0xae7f7f7f, +0xde7f3781, 0x4147d381, 0xe191cb81, 0xd45b81c3, 0x474c9e91, 0x55b546db, 0x0e81b5a6, 0xb5b976aa, +0x7f7f7f3d, 0x968a81d7, 0x7f1f9333, 0xb37f7f7f, 0x2f7f6418, 0x2532a07e, 0x8b81bf44, 0x89818144, +0xa1572ffd, 0xfebf182a, 0xe54d7f5f, 0x107f8949, 0x00210081, 0xc53345a1, 0xa2818ff6, 0x81c181e2, +0x9b81f1d5, 0xd1061965, 0xb7a47ff0, 0x81817fb0, 0x18bc6181, 0x7ccc5981, 0xbe2b9dae, 0x038f8173, +0x7f3bec4d, 0x81ec81b0, 0x817b9a6a, 0x817cb47f, 0x7fa98fb5, 0xb0355e36, 0x8dad6109, 0x4b138116, +0xa3e19d81, 0x449ab793, 0x984acd21, 0x168aeb94, 0x85baac5d, 0x7fac8173, 0x81ad81d9, 0xa34abb7f, +0x81ca817f, 0x817f6074, 0x7fbc0000, 0xc79b108a, 0xc06990ca, 0x317f12cf, 0x9281e743, 0x90c681fd, +0x17b57f3a, 0xbdd05681, 0xfafcef3d, 0x6f0a0d81, 0x4337197f, 0x7f4881af, 0xb592de71, 0x44c24881, +0x536f9532, 0x21506996, 0x68489d79, 0x4aaf4312, 0x740c9447, 0x657f6673, 0x637f7f0f, 0x81205569, +0x8fb3fe9c, 0x4881995c, 0x8163cfb0, 0x17a4817f, 0x7f59ad84, 0x6d0c30d2, 0xbdc6b981, 0x1bfa8155, +0x7f22a8ca, 0x9c9a817f, 0x3bdba778, 0xf6817eb1, 0x07858141, 0xc037817e, 0x3f2c868c, 0x8181d07f, +0x02b704dd, 0x467f6bc8, 0x4881ab7f, 0xa25c814e, 0xcb4d8e6e, 0x477c7f88, 0xd991aecb, 0x9c81b243, +0x12d6b7e6, 0x03d745a0, 0xdf0bc9d8, 0x52637f85, 0xd557817f, 0x96342cc0, 0x81423981, 0x70657f81, +0xde8181cb, 0x8a81de2b, 0xf55d3e2f, 0x2d717f81, 0xba81289e, 0x99c27f7f, 0x81c5817f, 0x90be817f, +0x918ea3dc, 0x8de39c22, 0xce577f93, 0x3f7f8186, 0x9e817f7f, 0x816c8aeb, 0x3d818e2e, 0x8181be9f, +0x0fb9429c, 0x3c7f5155, 0x81275b24, 0xa47fda70, 0x84cc88c5, 0x70797f81, 0x812eaebb, 0x16a1314f, +0x7f7f817f, 0x38b61a4c, 0x7fb7388b, 0x7fb6e681, 0x5581987f, 0x9281c37f, 0xc45b7f7f, 0x8b35b97f, +0x7f7f7f81, 0xa23c0e3b, 0x7f9b2083, 0x9cc39283, 0x8843699c, 0xc07f81dc, 0xa1f98b7f, 0x81d245b5, +0x8181597f, 0x1aa93681, 0x000001df, 0x81574196, 0x7f3f3df1, 0x6fc8c7c4, 0x5e4d7f8c, 0x81b4dc44, +0xd56f7f8b, 0x8116a381, 0xb03f5f81, 0xf5020639, 0x7f02b25c, 0x55818143, 0xab658147, 0x357f1daa, +0x296ac18e, 0x8186e57f, 0x7464272d, 0x417863e1, 0xeae67831, 0x1a605547, 0x45ffb1d5, 0x7f066c81, +0x81bfb062, 0x4cb8f381, 0x81817f7f, 0x145a4bc0, 0x2e813d7f, 0xc6524d71, 0x81818181, 0xc9d681fb, +0x2a5db57f, 0x12c97fa7, 0x317f7f48, 0xf9b88131, 0x354c1389, 0xd77f8155, 0x811b4dbe, 0x9d3d457f, +0xf9dc2f28, 0xb01c8181, 0x5a81716d, 0x7f7f3d81, 0xd87f867f, 0x817f0581, 0x564f8181, 0x7f678181, +0x5cdd7f91, 0x07ad3f96, 0xb4dd9e34, 0x7f7fd50d, 0xa67f81b6, 0x01353420, 0x6481798a, 0xa5a68181, +0x9c7f816e, 0xafc73c84, 0x427b8d32, 0x1bbb817f, 0xd45c7fa5, 0xb0c2ec48, 0xb459aa7f, 0x70818181, +0x5ec983e7, 0x81294066, 0x81447f6f, 0x9b819f5e, 0x7fb1a47f, 0x81b8a081, 0x5f91c62b, 0x7f63de81, +0xdc813ff2, 0x817f81ce, 0x3d677f0b, 0x4da9dae5, 0xcc8190d9, 0x1d7fbbfa, 0x5ffdd2b7, 0x69624b8f, +0x50963e7f, 0x7f7fa281, 0xcc4b8149, 0x8102da7f, 0x3f7f9df5, 0x435732c5, 0x7f07d881, 0xb20fe07f, +0x4896ac46, 0x44b3ed39, 0x7f7f8127, 0x9e7f81e4, 0x8181b15f, 0x7c818132, 0x7f41980b, 0x299ea18d, +0x7f196381, 0x7fc95f36, 0xbecb7f35, 0xdaa80000, 0x292a7fe2, 0x3292947f, 0x7f98e9c5, 0xb77c984e, +0x74847f81, 0x917f487f, 0x537766db, 0x30b71c7f, 0x81237f81, 0x9d7f9542, 0xfc75737f, 0x72499c3a, +0x6885625b, 0x7f061c6d, 0x818e7f81, 0x5b814381, 0x81f17f4d, 0x4d81b6cd, 0xd2f7d778, 0xe5e9d5db, +0xf7f081c2, 0xb80b0481, 0x975658f2, 0xe8b1197f, 0x81634b1a, 0x7f6bd994, 0xdbd67f7f, 0x757f3406, +0xc4d07f7f, 0x63e7507a, 0x102b7709, 0xe08cf836, 0x1db26230, 0x1a814510, 0x40679bdb, 0xf336a87f, +0x7fc3bf7f, 0x818e69c8, 0xf67aa2a3, 0x75425b7f, 0xa4b654c1, 0x7f7f5ac1, 0x5f10488d, 0x57dc7f30, +0x7d817f81, 0xfc81237c, 0x388a81d6, 0xbce67f42, 0x7f7a7f56, 0xfbb7ff87, 0x467f4681, 0x22dc9b7f, +0xba939029, 0x4a44c1a5, 0xa4be71d3, 0x7ffb3589, 0xfefae9a6, 0xa1963e4f, 0x66b3d6b3, 0x81506f83, +0xee7f32ee, 0x7ffe7f7f, 0xbf505c53, 0xd769ca7f, 0x69c267d2, 0x7f9f8346, 0x3f7f7f7f, 0x48813f51, +0x7fed8115, 0x9e81aef0, 0x7f7f22e3, 0x819fa190, 0x55457f81, 0x7f813b81, 0x251b4a0d, 0x6476818e, +0xc4d8da21, 0x52fd2d4b, 0x81943c7f, 0x273ce481, 0x7d53817f, 0xcd9d9058, 0x754151d2, 0xcb9f365b, +0xac73ecd4, 0x7ffd9a4b, 0x577f2d92, 0x41c60481, 0x81297fa9, 0x9f7f850f, 0x8c513329, 0xe854c39f, +0x34bb4a20, 0xc37f5178, 0x0787cc22, 0x6c86717f, 0x75815a48, 0x2023777f, 0xd37fdd7f, 0x6a819cf7, +0xdc44f5e3, 0x258157bd, 0xe8e7813d, 0xb9118195, 0x9a5a4581, 0x2d8c517f, 0x0c728161, 0xaec769ac, +0x1f554191, 0xfc8be381, 0xca703b7f, 0xab813d5a, 0x3f7faced, 0x707a2181, 0x7f1b8c7f, 0x7fad7f99, +0xb307377f, 0x6c9d5b81, 0x7f3bfac6, 0x8182567f, 0x5a246cdd, 0x55408105, 0x277f7f84, 0x7fa09c52, +0x817fa355, 0x404fd360, 0xecbc4281, 0xc6897f20, 0x7881ae81, 0x407f81e1, 0xfb35587f, 0x9b67fb81, +0x7f8103ad, 0x36708155, 0xab81f381, 0xa3fe6417, 0xa97f7f98, 0x3cf7cd7e, 0xae1b4dc6, 0x8ac2f749, +0x7fb35e89, 0x07e0743b, 0x7f818135, 0x81a15481, 0x93819bbc, 0x81f68114, 0x194ccd81, 0x81527f7f, +0x583ba074, 0x91654d78, 0x4e81398d, 0x0c54567f, 0xe453d054, 0xd37f8175, 0xd57f81ea, 0x345e5b7f, +0x3589262c, 0xa8b886de, 0x811b8197, 0xbbc890ee, 0x8db0b9dc, 0x9fcb817f, 0x39817f7f, 0x55391fe5, +0x7f7f3f92, 0x5a7f55d9, 0x747f3c71, 0x43e31da0, 0x67dccd81, 0x81ddffaa, 0xc32a5881, 0xb9816d7f, +0x424d7f7f, 0xb97fa435, 0x7fdb7f81, 0x48fb8182, 0x8181407f, 0xba7fa88d, 0xa16d467f, 0x81cb8a81, +0x8181a66c, 0x8c747c1b, 0xb4810470, 0x8133dc82, 0xbc77727f, 0xee981437, 0x7f2e8981, 0x26be1939, +0x12da817f, 0x81a1da61, 0x1d2af46f, 0x7f3781f5, 0x29500000, 0x657fc67d, 0x81817f7f, 0x49e406cb, +0x0b787faa, 0x7bc2643b, 0xe4e4f0ec, 0xd28175e5, 0x7f425abc, 0x900c1828, 0x7fa85f7f, 0xe8365361, +0xc5d87fa2, 0x81baffab, 0x81818612, 0xbcb5af1b, 0x7a7e5d7f, 0x81b07f7f, 0xa27aba93, 0x817fd881, +0x818481b0, 0x7f8d1c22, 0xb48c869b, 0x977f9429, 0x9a8c7fc5, 0xb8d9b270, 0x6e340e88, 0x670f7f39, +0x7f4efebd, 0x41b5b57f, 0x7f277f0c, 0x5f177f23, 0x94818592, 0x68362b87, 0x6681ea7b, 0x7f187fd2, +0xa38131ae, 0xc97f698e, 0x25bd8155, 0xe37d88d6, 0x2d018f4d, 0x391e7f7f, 0x7f7f845a, 0xe8f8ee81, +0x4beb9d63, 0x7fd47ff8, 0x5e5781ec, 0x237ffe3a, 0x8e818168, 0xa1e781e7, 0x85de7f81, 0xbaaad281, +0xa7486481, 0x555e737f, 0xa2b28f7f, 0x70635a47, 0x7f837fd8, 0x64ea8641, 0xc20c7f81, 0x0b9e738e, +0x698e8145, 0x783a5a00, 0x7f376e81, 0x7f8cbd20, 0x7f548181, 0x81149934, 0x3dad456a, 0xb4561fed, +0x815d8127, 0xaea4bb24, 0x2187755a, 0x847ff0b7, 0x7fdb9039, 0x81887695, 0x8781e111, 0xe0de037f, +0xc879f7ca, 0x7fc18381, 0x7f7ff7bf, 0x958b7f81, 0xa65fe7ad, 0x3481817f, 0x9f897b7f, 0xa2bb7f7f, +0x31018170, 0x70e08c81, 0x4c1da62b, 0x16f27ff7, 0xdf0781ed, 0xbf4081c0, 0x3a233c53, 0xf8aed938, +0xef24617b, 0x812681ab, 0xa66e4db4, 0xef9b65b3, 0x00007f44, 0x7f7f997f, 0x7f341fc7, 0x0f9b2be4, +0x7ff67f81, 0x81817e2a, 0x4b4d7f81, 0xc0706505, 0x81fc60e1, 0x30d87f9c, 0x7fff9c0d, 0xa789757f, +0x3881d769, 0x6367bf81, 0x9bae7f8d, 0x8e81bbb6, 0x61e88127, 0xc96c7fbf, 0xdf813181, 0x7f7f7de7, +0x3e817f81, 0xb97f817f, 0x8b927f81, 0x5e74742d, 0x7c49817f, 0xbc1172b1, 0x81818181, 0x6dd2d4bb, +0x98f3e058, 0x7f81efde, 0xaeee76df, 0xd57fca01, 0xf5817f81, 0x26747f81, 0xdc3f8181, 0xc335c749, +0x7f2f7f7f, 0x7f02e67f, 0xfb706975, 0x8f4efde7, 0x819b414c, 0xb14cde70, 0x22e6812e, 0x3843892d, +0x6f81ce60, 0x50c12725, 0xb4e279d5, 0x7fb1ac7f, 0xbc7f8183, 0xc7767881, 0xb6e71854, 0xb37f7f77, +0x3d7f7f7f, 0x817f6824, 0x4981ae81, 0x7f811a7f, 0x58c37f74, 0xbef5a27f, 0x92a62541, 0x8643aa3c, +0x8f838181, 0x397fe4f4, 0x8a814677, 0x93e3affa, 0xa6fa21e9, 0x7a3e7f7f, 0x332a7872, 0xf28c8176, +0x227f573d, 0xbbbb6348, 0x7fefb2de, 0x43817f69, 0xc4c37b31, 0x747fa118, 0xacb3819c, 0x7f689f89, +0x74e0604b, 0x81b57f88, 0xe925a0cf, 0x81be2181, 0x126d34f1, 0x7648d74e, 0xd760b29b, 0x04d0fda1, +0x3b858181, 0x1be20981, 0x8127ac7f, 0x8a6b8181, 0x7f616ca2, 0x37e6e881, 0x7f9b82c0, 0x7f6e7f93, +0x8dc72420, 0x752f3d39, 0x5e7fd2e5, 0x7f25c372, 0xe97f2677, 0x307f0000, 0x81383b86, 0x713de0fd, +0xe47c817f, 0xf0214e8f, 0x8108349e, 0xe0c92881, 0x7fafb0e2, 0xe939b48c, 0x4855d8ae, 0xb581e469, +0x967feb6a, 0xba543d20, 0xc47f0c62, 0x2a28e6b2, 0xd27fb09a, 0xbadf7f7a, 0xbf98d54c, 0x5f54cfa3, +0x4881248d, 0xc97f7f53, 0x17816966, 0x3450877f, 0x4ff681c8, 0xfdb8786d, 0x0209730d, 0x7fbbba84, +0xc5994cff, 0x4d31cc44, 0x7fb99400, 0x239d757f, 0xa2bd517f, 0xd5558194, 0x93818191, 0x819b8190, +0x447f9289, 0x777f3e81, 0xc2c04a7b, 0x27c38eaa, 0x437a383d, 0x7f812432, 0xbd77707f, 0x7f81ea6d, +0x40256698, 0x81a95b58, 0x2fb47f81, 0x74b64d86, 0x70f07f23, 0xd7d6bbd5, 0x817f18a6, 0xe862327f, +0x3ff56cff, 0x7f7f8197, 0xcb75c381, 0x58954f57, 0x43818176, 0xa903811d, 0x7f6abbfa, 0x8cd7a77a, +0x55814473, 0x7f819e1a, 0x557f9e89, 0xa9c955e0, 0x648191b0, 0x81a97f51, 0x1dbc1181, 0x685a6b36, +0x7f2e5ef2, 0x59819afb, 0x3f30fd68, 0xe986702b, 0x810a2581, 0xfedf7781, 0x947ffb3a, 0xa9777f29, +0x9c547f3d, 0xe36d7f07, 0x81909281, 0x2b87817f, 0x07aa817f, 0xd9137f30, 0xb791d3ff, 0xb84869c5, +0x4db4e1ce, 0x81beed66, 0x1637adce, 0x10a1897d, 0xfd817fbb, 0x5771c343, 0x3b16dc58, 0x7fbf813f, +0x7f9ba269, 0x353a7e21, 0x14f07fa6, 0x7f81815b, 0x484bb301, 0x0000c27c, 0x59141b93, 0x7f637f3e, +0x3012937f, 0xf2a2b350, 0x1d7f7f7f, 0x81b6d4ed, 0x4fc3ba54, 0x3f7f107f, 0x02af3855, 0x598c814a, +0x3eb09fd7, 0x636832b6, 0x7f9e817f, 0x7f939c7f, 0x7fa99581, 0x7f817fb1, 0x387f3475, 0x9893f771, +0x52813581, 0xc5c75881, 0x7f787887, 0xb97981d0, 0x27436262, 0xfae6f21d, 0x81cacf5d, 0x41a5de81, +0x81817d81, 0x3b2f7f9e, 0x5dc0ade8, 0x3d0e8154, 0x81bd3a0a, 0x6e818153, 0x7f818123, 0x817fc575, +0x811b669b, 0x7f037929, 0x26acac81, 0x7fb3efb6, 0x82817fc3, 0x0c81f981, 0x607a677f, 0xc8258116, +0x0ae77a50, 0x816fba35, 0x7f256549, 0x04c57f89, 0x4381075f, 0x81cd3b56, 0x8ccdad7f, 0x7fa45cd5, +0x817fb67f, 0xb7698dbe, 0x5562298b, 0x688c257f, 0x9d7fde81, 0xe57f4e81, 0x7f81c9ea, 0x281a41f9, +0x7ffdfc6c, 0xaa32217f, 0x81a08166, 0x8181b1ae, 0xc447880e, 0x2b81367f, 0x2944a47f, 0xc6de9b49, +0x81816841, 0x54fa7fa8, 0x587beb7f, 0xe3475a81, 0x81dc2cb1, 0xa95dc06d, 0x8bd7477f, 0xb05d7f81, +0xb4c19d7f, 0x43817f53, 0xde7f5c7f, 0xe3d2817f, 0x8948aadf, 0xc17f7ffb, 0xd08188c1, 0x8181204e, +0xb3b6d781, 0x7381b381, 0x787f7681, 0xee640849, 0x657f8181, 0x811b329b, 0x5c728eed, 0x0c7f59c9, +0x4cc3b12c, 0x814f7e3d, 0x5f7f7f46, 0xf9817fd9, 0xb1b5ba76, 0x1f42609e, 0x3db30000, 0x400d7f46, +0x7f7f7f81, 0x45d36281, 0x81df9581, 0x7a7f7f53, 0x8181330a, 0x7ff5c3ab, 0x9e81b4bd, 0x7f70c27f, +0xac1666b3, 0xcf7f52e2, 0x817fea30, 0x8c4b3f7f, 0xa085bde5, 0xabae307f, 0x817f7fde, 0x17bc61b4, +0x914ccc4b, 0xe1e56bae, 0xac294f88, 0x75e562cd, 0xbfbd35cd, 0xed507f81, 0x81afcf81, 0x7fdc819b, +0x5a0385e8, 0x4f2aabf2, 0x569c7f3f, 0x7fbee1bf, 0xb0407fc4, 0x6b548214, 0xbcd6877f, 0x3d6e7716, +0x9a8134a3, 0x0a7f7f81, 0x29d73eca, 0xadbe7f65, 0x7fc1bb81, 0x7f63dc7f, 0x7f815430, 0x810e2366, +0x7f36057f, 0xda39d7d0, 0xe9855ddb, 0x7fffa2f9, 0x3b9a8181, 0x7f06b781, 0x844db0a2, 0x7b1d7fa3, +0x99a795ad, 0x7b819a6a, 0x3781819a, 0x867fc788, 0xa181817f, 0xfcc3637f, 0x747f5a5a, 0xf27f690f, +0x2f8a9c2f, 0x617f7fb3, 0x81b4b1c2, 0xa2818b96, 0x7f7f7fa6, 0x20707f81, 0x7fc9817f, 0x55577fc3, +0x777b40b9, 0xdffa3781, 0x38ba0e4e, 0x4f667d32, 0x9cf081ea, 0x8187d122, 0x79af9ebc, 0x647f99c2, +0xd1a47f79, 0xaba67f7f, 0x6b270e96, 0x90c07265, 0x817f4b81, 0x71f31f65, 0x2d818692, 0x65570681, +0x81f59b98, 0xe6b181ec, 0xc5b1a58f, 0x7fb2c981, 0x7fdb5914, 0xe37f81bd, 0x7fefd823, 0x9a7f9ccb, +0x631762c2, 0x7f4e81b8, 0x813d88c1, 0x9768c97f, 0xbb867f81, 0x2cd79456, 0x00004aa7, 0x8ec66e57, +0x7f7af878, 0x747f3cb4, 0x8c774fa4, 0x7f8df013, 0x8c5c7264, 0x3aade68d, 0x81819c30, 0xb0d38147, +0xbd816181, 0x4f415cbc, 0x7f21f710, 0xf7ba5565, 0x81df367f, 0x637f7f9a, 0x7f182e7f, 0x6f81996f, +0xca547fe5, 0x637f8db9, 0xccb4fe59, 0x55ca7f81, 0x7f7f7fae, 0xceffc677, 0x2955d0f8, 0x91a07f7f, +0x7f818181, 0xebc2f87f, 0x7f213646, 0x94a2a981, 0xd1819bdc, 0x7f866084, 0x2381ff8b, 0x75b58114, +0x447f162b, 0x5275e0a3, 0x37578cc2, 0x0dc1ec31, 0xb73681a8, 0xb081978a, 0x74429a2c, 0x698859dc, +0x937fb91d, 0x7fcdc06e, 0x81b4ffff, 0x7e2b168d, 0x4b649aa9, 0xe648be08, 0x44e37b7f, 0xc0fc27e9, +0x1b16521e, 0x81577fc5, 0x7f8152a8, 0x2981ee7f, 0xf08dd19f, 0x7d93ed81, 0x814435c5, 0xe6c67f29, +0x7ffc8111, 0x87f65881, 0x3f62563a, 0xa09a81f9, 0x490f5781, 0xc70d6284, 0xfb818181, 0x56817f38, +0x87420960, 0x0d2dedb3, 0x8ef18143, 0x4c5b4a62, 0x8f947f81, 0x667f8121, 0xcc817f81, 0xb567a44c, +0x7996b182, 0x736e9881, 0x81627f7f, 0x7f8e7f81, 0x725ee43d, 0x9f436a50, 0x7f055d88, 0x93498181, +0x21d08f7f, 0x26f17f3c, 0x581d939b, 0x90595ba7, 0xf5200e81, 0x4b91b78f, 0xcb7fc693, 0xe87fd04b, +0xaf81087f, 0x14ac4a01, 0x7f7d7f81, 0xe014dd7f, 0xc2816aed, 0x79760a93, 0x7f9b817f, 0x81480000, +0x844d73a5, 0x7f8181bb, 0x819dae3f, 0xc87f8e82, 0x9d81a23e, 0x7fbd813a, 0x8181702e, 0x4fab7a67, +0xd57f70b6, 0x817c53ac, 0x1355de7f, 0x4264437f, 0x81b0ff2f, 0x16d6e094, 0x7fc40f55, 0x7f9b7edf, +0x7f5a7f81, 0x7f996e66, 0x8e932881, 0x7f817f81, 0x6d4ca679, 0x815f871e, 0x81b4817f, 0x7fa97f81, +0x5e03827f, 0xbcd8b6a8, 0xc5683edc, 0xbb007fdc, 0xea266973, 0x498e467f, 0x86b1cc7f, 0x17814738, +0x81b7ae62, 0x56fea8f3, 0x5eac534d, 0x54f67f81, 0x24add4ce, 0x811a7881, 0x81cfc383, 0x9f2f81ef, +0x7f1f41fe, 0x819e3781, 0xcc73a3b0, 0xee148181, 0x7181d87c, 0x81c1c9e2, 0xfee4727f, 0x9b608139, +0xa2ec4ace, 0x7f810c7f, 0x9a7f2288, 0x46b169e4, 0x9b40817f, 0x547f2f7f, 0x81ef4f81, 0x243fffe3, +0x8181dd3f, 0x347f6596, 0x7f59c3fa, 0xa26cc7ac, 0xf7384881, 0xbdc0a118, 0x7f8173a8, 0xabf1b3e3, +0x7e3e8eac, 0x30c07bce, 0xbca0b981, 0x75816414, 0x81377f81, 0x7f7feaa1, 0xd8287f86, 0xfbd98bd1, +0x7f987fc1, 0x81b8347f, 0x8c7f37e2, 0x987f3c7f, 0xb5f681ab, 0x347f1adc, 0x1b815481, 0x91f17f60, +0xbf85a67f, 0x6d7f2374, 0x587f3c1b, 0x74b0ae9c, 0x97a64fb4, 0x1437697f, 0xc77f3b81, 0xf4e0bbad, +0xb6892b64, 0x5ed0e47f, 0xa55c8100, 0xbbe24e10, 0x50bb6551, 0x6db5349d, 0xaaae405f, 0x4d7f4f6d, +0x989e95a5, 0xcae9bf2b, 0x811e7f4d, 0x90538181, 0x7f9f096e, 0x4126a5df, 0x1369537c, 0x81a73581, +0x722f1409, 0x8b297c54, 0xf17f5081, 0xc22b4595, 0x7f6fde57, 0xdea96095, 0x7f98814f, 0x40f8fb81, +0x697fd77f, 0x35e07f33, 0x27459a7f, 0xc0818181, 0x3e817fb2, 0xdd5eae81, 0x8a8efd53, 0x13e91281, +0x2c486ba8, 0x7f6b447f, 0x7f818120, 0x1c496781, 0xf8e45993, 0x48817f81, 0x009db045, 0x7fd9c722, +0xab817fe2, 0x73bd7951, 0x687fa97f, 0x7f7f7fb8, 0x4f447ffb, 0xfc31d9c2, 0xc1d5d865, 0x526abd82, +0x7f6e7f47, 0x6b31817e, 0x8509817f, 0x5581fcbf, 0x7f9bd8ac, 0xcc22f37f, 0x547fa07f, 0xbd4fd2bb, +0x6f49a523, 0x835e896c, 0x65458e81, 0xd0e37fd2, 0x81587fe0, 0xd95fb08b, 0xd938d43f, 0x11e67fc6, +0x7fec3c2e, 0x7f8581d5, 0x017981b9, 0x812c8133, 0x307415cb, 0x2081bd7a, 0x994d5c83, 0x53812ab3, +0x7f8190bc, 0xad811645, 0x73c1ca14, 0x94c87d32, 0xbd4a7063, 0x7f442f7f, 0x06812617, 0xc481acc0, +0xc7bacc81, 0x81de5481, 0x7fa3167f, 0x4a3a385f, 0x2b817f37, 0xae912b7f, 0x32c4cfeb, 0xc3bf3bb1, +0xa4a63e50, 0x846c7b7a, 0x5c81816a, 0x75813daf, 0xd0665307, 0x6031117f, 0x507f5429, 0x29bfc44e, +0x81b69437, 0xa4c54d61, 0x81391b7f, 0x98347fec, 0xf7da5b06, 0x7e3d8b7f, 0x7f7f1abe, 0xa955d23a, +0xb7810000, 0xa1608164, 0x5f813cfb, 0x16602281, 0x5fb6814d, 0x3c3fd67f, 0x5dae83eb, 0x58ac2c2c, +0x097f5d25, 0x6081da52, 0x7fd4818c, 0xe181d13c, 0xc66d7f2c, 0xe94a00a3, 0x2cec12a0, 0xe84c6398, +0x64398162, 0xd056816f, 0x8147e081, 0x9ceb75c0, 0x7fbec481, 0x16d15734, 0x8158cd7f, 0x369c8172, +0x2a479b81, 0x7f4e9686, 0xb1494e0f, 0xe5816226, 0x057f7f55, 0x81dba929, 0x8f1890e4, 0x5e7f817f, +0x7f7f8162, 0xfced63c7, 0x370aadc1, 0x5f09b34f, 0x7fc0be25, 0x36927fec, 0x814d0ab6, 0xfd077f60, +0x8162f9ab, 0x8185597f, 0x2bac2f66, 0x28e6aa9f, 0x814dc1c6, 0x717f3841, 0xffd1078b, 0x4f5b8a12, +0x7f429a7f, 0x7f734d7f, 0x43817faf, 0x7f4e7f52, 0x232e0524, 0x7f4158e4, 0x327f7f7f, 0x89b464d8, +0x8130a3a7, 0x7fe6ac32, 0xd1818e81, 0x427f957f, 0x8150c60e, 0x3cb50481, 0x31955f7f, 0xec427f9f, +0x99eb5f69, 0x8ac893da, 0xf97da17f, 0x7f847f73, 0x7f97ce7f, 0x7f7fe806, 0x8984629e, 0x34539cba, +0x8a64a575, 0x25297f49, 0x1c509d07, 0xd5aa7f7e, 0x74817f79, 0x81819981, 0x95815a8b, 0x9518a19f, +0x3d9183ce, 0x817f00a1, 0x30306781, 0xc67f7f7f, 0x81594a49, 0x3d620f81, 0x3481bd81, 0xc3af8e63, +0xc249dc8b, 0xe5fa637f, 0x81185512, 0xa0b48307, 0x814c817f, 0xc6dac58b, 0x8b5c781a, 0x81982c81, +0x0000b269, 0x8105f78d, 0x81ad60f7, 0x81ffb8cc, 0x917ffed4, 0x104fc0a1, 0xfa38b45b, 0xb5318137, +0xbd781466, 0x0bd8068c, 0x72c57f91, 0x8181b48e, 0x7f81aedc, 0x43677f7f, 0x7f4f7f3a, 0x437f8178, +0xe99d0b81, 0x868792bc, 0x17727f57, 0xada05e06, 0x694d707f, 0x9081e59b, 0x7f3a8181, 0xb2bae481, +0x7f81dde5, 0x3e307f7f, 0xcaa07f3a, 0x2e1a8170, 0xed0f4881, 0x297f7f61, 0x81cc815e, 0x4fec813b, +0x1a4f02b2, 0xc6e66a4e, 0xe9817f6f, 0xc2c36a02, 0x302e9f60, 0x4ea28137, 0xb9d57f32, 0x462a7f81, +0x9085bb36, 0x7f7f817f, 0x3c714908, 0x70c9c17e, 0x5a7f204b, 0x8fbf6f70, 0x97880981, 0x0cbacfdb, +0xb80f6d81, 0xb6bb3aef, 0x81691db8, 0x1081e2df, 0x7f0504a0, 0x797d5e7f, 0x006de423, 0x7f19f16f, +0x1eb28181, 0x819f7f81, 0x7f6a84b9, 0x936537f2, 0x14a87f81, 0x7fa9cb81, 0x81813a8c, 0x8182c47f, +0x56758144, 0x81c55c97, 0x86de0b81, 0x818181d9, 0x7f386281, 0x7fbb2738, 0xffd18d81, 0x17e1eb34, +0x656449b7, 0x7f7fbe33, 0xd7f681b6, 0x77313c4f, 0x7dc9151e, 0x28debefb, 0x7f7f5c62, 0xa850b25f, +0x81166b59, 0x7aea7f05, 0x325d81a1, 0x7fd06471, 0x93137fbb, 0x43a51892, 0x3e326cfe, 0xab81a942, +0xb208d017, 0x817f0781, 0x08fcca86, 0xa31a7f81, 0xeb93206a, 0xd4fa21f4, 0x684f9029, 0x7f817f57, +0xdca7812c, 0x7f7f0000, 0xf2dc5a96, 0x7f62aa7f, 0x918d1c7f, 0xfbb5add6, 0x407fc0da, 0x4d5d9f7f, +0x935d8166, 0x817da9b1, 0x205d7f3a, 0xe08b4181, 0xd8bd7f7f, 0x4f33818c, 0x8105b781, 0x2f81cd81, +0xbaf8a48d, 0x2b81aa7f, 0x90b5810d, 0xbc62c181, 0x1592f72a, 0x4d81b5fe, 0x7f81e447, 0x99d22181, +0x303e3ea5, 0xa23078a5, 0xba8a7995, 0xd5282790, 0x6681453a, 0x7ff7ab7f, 0x7f4e8195, 0x8bc48181, +0x817fd481, 0x81b98142, 0x265a8182, 0x81f768a7, 0x85f11690, 0x7f957f7a, 0x109981a4, 0xeabba365, +0x577f9057, 0x5f7fae65, 0x817f8199, 0x718e3daf, 0xd6818181, 0xd1a1cc81, 0x97a48187, 0xd8d14520, +0x497b7f10, 0x7f81d781, 0xb77f747f, 0x7f813974, 0xc52a7f7f, 0x7a292226, 0x81602ef8, 0xe481cc7f, +0x0b4266a9, 0x4722287f, 0x7648747f, 0x9c861ec6, 0x4ba9efbe, 0x2063813f, 0x5b277f81, 0x54608181, +0x81b3ae7a, 0x5b812281, 0x6c306f81, 0x4fa0bcd5, 0x540c85d5, 0x2f81817f, 0xd2c9078a, 0x4281a39a, +0x3e7fa1d8, 0xa47fb881, 0x3efe9c7f, 0x7f5f6c23, 0x4681d46f, 0x49cb81c9, 0x7f4eaa68, 0x2e7f7fac, +0xbb497fd0, 0x3a4253a8, 0x91a6aaaa, 0x4f7f5a52, 0xf9375fad, 0x8b2a7f2d, 0x746e7f81, 0x7fb0cbe0, +0x5a917fd1, 0x5b65867f, 0xe9606f9c, 0xe9f8814e, 0x3045cf65, 0x937fad52, 0x760f5a81, 0x9be4ba50, +0xb59d81e5, 0x0000d77f, 0xd481bd81, 0x27224a55, 0x61467c81, 0x91d865df, 0xffa17fe4, 0x7f8a0c68, +0x1640fd52, 0x987f7f6e, 0x4d815a5a, 0x3081b65b, 0x7d7f6c8e, 0x5b26bf60, 0x44814281, 0x5a813427, +0x81f47a3b, 0xef9281eb, 0x81994ca6, 0x7081317f, 0x7f7fd49a, 0xfb8c8e7d, 0x546ad581, 0xd6817f16, +0x7f3e4b81, 0x4f4c4898, 0x817fe22f, 0x819a37c9, 0x387f1a55, 0x8cfddd03, 0x318aa9bb, 0xcb6e2d90, +0xa47f0d1b, 0xae81c204, 0x7f797f27, 0x0a7f65ad, 0xe4ac81af, 0xb2d74d81, 0xca0fad81, 0x357f69a8, +0xad71a581, 0x5be9d4de, 0xaebe8181, 0x95817fd8, 0x8f7aedb6, 0x0a81e59e, 0xa07f20eb, 0x7e66427f, +0x56769b04, 0x2e8149f1, 0x81f6f87f, 0xc3813768, 0xbcc28850, 0x6cbd3dac, 0x7fda99dc, 0x657f7fd5, +0x7fcf7f60, 0x81233698, 0x81daf17f, 0x867f7f7f, 0x7fda7f36, 0x0c7fae20, 0x7f817d78, 0x7fd09481, +0x818f30e0, 0x7f437c7f, 0xaecc2b3e, 0xcf427fca, 0xccfd2cce, 0xefd898bc, 0x7f5b81e2, 0x1d4b812c, +0x865871ae, 0x553ab4ae, 0x69817f7f, 0x811a745a, 0xa32a163d, 0x2e967f57, 0xff817f7d, 0x70d43e9a, +0x814681ae, 0x9f817f31, 0x819b8110, 0xfca3819d, 0xc4fc45b2, 0x56b2817f, 0x7c811d7b, 0x997f8181, +0xbe0dc3d8, 0x9f5b56a6, 0x7f8181e2, 0x5d59eb29, 0xa357e4c6, 0x8161817f, 0x3c7e7f32, 0x7fd07f06, +0x817f82ad, 0x93de8341, 0x92850000, 0x0c65c06f, 0x0a4d2817, 0x7705d752, 0xbcd8687f, 0xc4397f7f, +0x1b7ffd47, 0xada5012d, 0x7f8158e5, 0xba617e10, 0xa97d811f, 0x1fb61081, 0xac41e74e, 0x8c7ea7a3, +0x997f452e, 0xb5858149, 0x5c3cb0a0, 0xc8ffab38, 0x49d65d7f, 0xbbd07c81, 0x35b06b7f, 0xcc7f7f7f, +0x50c42881, 0x707f7fde, 0x7fd67f7f, 0x7f819981, 0xd4d6a781, 0xcb103b7f, 0x7f4fee9f, 0xb3997fe0, +0x7f9381a5, 0xbc7f9a28, 0xf4aa819f, 0x7fe862df, 0x316dd1f8, 0xb4a66f81, 0x98816581, 0xdb817f2e, +0x34819ee0, 0x746c817f, 0xdc9cd3e1, 0x8e7fab2d, 0xec368b2e, 0x7f8381c8, 0x817f7f7f, 0x814de179, +0xed5b7f4d, 0x274bdfa1, 0x5851250b, 0xec811a81, 0x7f365569, 0x57935681, 0x08757781, 0x196d813e, +0xa67581b5, 0x83a7d981, 0x73988163, 0x816eb57f, 0x2ce47e40, 0x5bef6481, 0xb131155f, 0xc2f9817f, +0x519b67b4, 0x7d2c7f59, 0x7fdb8150, 0x5c7fc881, 0xca285c9d, 0x7f7f7fe6, 0x39aabd59, 0xb9b494e6, +0x81401831, 0x74247792, 0xab7dfaab, 0x817fcd81, 0x819b25e3, 0x587f337f, 0x817fd238, 0x818a332c, +0x9db6bcc3, 0x1681c227, 0x7058306d, 0x50a27f64, 0x8181a4b2, 0x7fe2a57f, 0x8d5f4777, 0x7f7f8f81, +0x6e8381f7, 0x22518136, 0xae72d0d6, 0x7f9bba7f, 0x8117d27f, 0x792b7ff0, 0xe45892e0, 0x8e6f5c34, +0x3994817f, 0xa981afca, 0x0000a592, 0x7f811d81, 0x09548181, 0x0ea8127f, 0x7f7fb49d, 0xd6690b81, +0xa1846059, 0x81d15d81, 0x6ccaf481, 0x81ddef83, 0x7fccc681, 0xab502722, 0x4620e862, 0x3981de76, +0x7f81889f, 0x5b818154, 0x797222dc, 0x4b8153a6, 0xa470816d, 0x7fb2657f, 0x81150b64, 0x89dc1f57, +0xaa636e7f, 0xe59cba67, 0xb3b2da41, 0xc7818192, 0x7637d0d9, 0x1a78a78c, 0xaf5e3f5c, 0x8181a581, +0x56285469, 0xeed47f7a, 0x697ff6a5, 0x7f7b7f1e, 0x398181cb, 0x999fe4a0, 0xc17f8816, 0x5c815aa2, +0x78717fce, 0x7f7f842c, 0xf07f4c43, 0x7f818148, 0xbc66c134, 0x8168ce22, 0xaf1791a8, 0x3522ae40, +0x95afa97d, 0x435d4e69, 0x2ab59d7f, 0x8197c47f, 0x93cfe645, 0x8dba727f, 0x227f7f81, 0xb17feb7f, +0x702a7f81, 0xebd463b7, 0x5381e929, 0x817dba7f, 0x5c811a24, 0xfa7f814d, 0x817f8197, 0x2f7f704f, +0x36817f7d, 0xe87f0d7f, 0x54b07fb6, 0x81733686, 0x8fcb7fcf, 0xd888c623, 0x2de5ebc5, 0x8f6f6569, +0x7f7981f9, 0x908136b1, 0xa93a7fb5, 0x405f8141, 0x01a54dbc, 0xb6815763, 0x8ccc3ffc, 0x5681105b, +0x4a1498bf, 0x4c6f82e5, 0xa67f556c, 0x7f5d564f, 0xdb8132d0, 0x1e958139, 0xe386811b, 0x04845a40, +0x7fbd8109, 0x833b62ec, 0x81f40717, 0x701d8181, 0x8191ee44, 0xae2d9d7f, 0x3fa17854, 0xcbcad981, +0x73c2d681, 0x5f738681, 0xb87081dd, 0x817f0000, 0x037fcda9, 0x922caef5, 0x7cc7f91e, 0x9a7fbd88, +0x81819528, 0xf2cd7f53, 0xfd7c8170, 0xfb7fb997, 0x7f7f3f7f, 0xe4818154, 0x18c4414b, 0x4fa48da4, +0x81718181, 0x817c957f, 0x7f2e1582, 0x09bc81bf, 0x5e4b817f, 0xd5590d7f, 0x7f818b44, 0x88abcf7f, +0x39508181, 0x81ab86bf, 0xba58c325, 0x65bc83ed, 0x3402857f, 0x81c61e7f, 0x8bbab505, 0x81b2aba0, +0x8153ec32, 0x6803d681, 0x819d817f, 0xb6de31a4, 0x367f7fc2, 0xb1048db9, 0x6389815b, 0x7ffb7b01, +0x8f1b817f, 0xc1817f7f, 0x04187f64, 0x45cfac7a, 0x0f1b578c, 0x0f209f24, 0x33817f7f, 0x7f7f7f81, +0xac814781, 0x5a9d3dbf, 0x62a65581, 0xcb8a7353, 0x7f81c5b8, 0xba447fa1, 0x9081774d, 0x7af9ed38, +0xb6849bce, 0xacf07c18, 0xa1894581, 0xd7634242, 0x2d0e8174, 0xdbf799d9, 0x7fe7b446, 0x84110cf7, +0x6474da81, 0x8185e120, 0x9da4f123, 0xd07fce54, 0xa9cdf7af, 0x84db817f, 0x9e9a23b7, 0x81774c7f, +0x7f818152, 0x817f5243, 0x39058181, 0xbbc91e81, 0xe3d87581, 0x81b66581, 0x7fb06a61, 0x0bcd0fdf, +0x8188dee9, 0x1cd87f92, 0xf036977f, 0x977f4d61, 0x1da12581, 0x3a7fbee0, 0x87467fa8, 0x7fe27fa3, +0x89d37f7c, 0x397c7ff4, 0x57e64171, 0xc4819181, 0x93677f41, 0x65be2517, 0xbe6c7f7f, 0xaa7f7f81, +0x255d650e, 0x098181c9, 0xe8124525, 0xc73a4949, 0x24d14b47, 0x6730f681, 0xfe251c6e, 0xbc7fa092, +0x81917f5f, 0x81d1ab81, 0xd6128193, 0x13d62e52, 0x941c14f4, 0x25b452c6, 0x7f9c947d, 0x815d3f42, +0x81c8c9d0, 0x7f524663, 0x40ae7524, 0x2f81c77e, 0x815c8483, 0x7f7fb27f, 0x7fc37f44, 0xe07f9db7, +0x7b812117, 0x1c965b7f, 0x1822ad4b, 0xccb9e67f, 0xc5b18115, 0x7f8c377f, 0xd13939cb, 0x38e8147f, +0x4e722d21, 0x9e81ef00, 0x81c0fa81, 0x812d8158, 0x9d84a57f, 0xefc32797, 0x7fc9987f, 0xa37f8199, +0x81812681, 0x81818190, 0x89a9814c, 0x7945c381, 0xa57f816d, 0x7d65af81, 0x81bb7f72, 0x1c81cd7f, +0x6d3e4e7f, 0x6cbef90d, 0xca24894d, 0xb04892b6, 0x7f7f1e81, 0xf57f5965, 0x0769a7b0, 0x698562b9, +0x16817f7f, 0xabb94bb7, 0xc35c81f3, 0xb9b5728e, 0x8181641f, 0x81d2d2be, 0x9181bb37, 0xf82b9222, +0x7bdc7ff5, 0x9b812fa1, 0x8173f4ec, 0x94810185, 0x2bd00d63, 0x814c50dd, 0x9be376e5, 0xed7f90dc, +0x817bf98c, 0xe1df634d, 0x287f81ae, 0x27a4f115, 0x62101a81, 0x2723b481, 0x9c507f66, 0x44a01769, +0xbfd13025, 0x7f815b81, 0x5d5c817f, 0xb37f7fa5, 0x7f79b2c6, 0xa1e2847f, 0x813ac57f, 0x417f8b7f, +0x4b505b55, 0x22d47fbb, 0x7f886ad7, 0xf78c7f48, 0x53a5245d, 0xdfd37f1b, 0xb37f81c4, 0x907fc99f, +0xa393f1c0, 0x7f5f7015, 0x37be3fb3, 0x7f4c9e28, 0x7f770000, 0x9b7a0531, 0x7f35cdde, 0x5b4047cf, +0x81881773, 0xdc6e9ea5, 0x517f477f, 0xb2a77f81, 0x9981472d, 0xd4c57f81, 0x6ef2a77f, 0x81d481a3, +0x3d87307f, 0x214321bd, 0x7b7f7f8e, 0x997f1cbe, 0x266681ec, 0x3639f87f, 0x687f7fed, 0xf17f8191, +0x1558ae38, 0x815ac086, 0x81247f78, 0x7f612db7, 0x128d7fc9, 0x187fbf81, 0x4a898116, 0x3a6c818d, +0x7f7fdcf1, 0x60cb3e9c, 0x81bf627f, 0xe7c17fa1, 0x52247fd2, 0x814781dc, 0x6eb67f1a, 0x9d0b7f7d, +0xc2b238f8, 0x81b1427a, 0xa1555aad, 0x103c0c29, 0x97319c81, 0x05ab3a7f, 0x811f1d7f, 0x817f8195, +0x40bffc7f, 0x3aa38f4a, 0x46e17fcb, 0x6781fc31, 0xb725a5e4, 0x427f8cbd, 0x7f857fd4, 0xca1f7f9d, +0x2f77c6b9, 0xdde30a0e, 0x7f93fca2, 0xc6aa7fc7, 0x7f9e7f7f, 0x9b78012e, 0xdda0d07f, 0x0b4c7f83, +0x7fee4b44, 0x81640552, 0x40d573c0, 0x390efbce, 0x327fb881, 0xf5e69aec, 0x81e9c7d1, 0x81717f74, +0x81817d78, 0xac6eba86, 0x41a2d07f, 0x8981da45, 0x5264b681, 0x527f46f2, 0x949e7f52, 0x7fdd7fbd, +0x81678102, 0x87756c64, 0x3d2a81ca, 0xc87f8107, 0xd17fb64a, 0xa7b553b5, 0x8191812a, 0x55ddbf7f, +0x517f7b7f, 0x897aad7f, 0x63817f8d, 0xd9547f7f, 0x43437351, 0x84818181, 0x3a7f8127, 0xccbfdf7f, +0xe1191588, 0x81c80c65, 0x7ffcf571, 0xcc147b7f, 0x00007081, 0x2b7f7f7f, 0x889b3a2b, 0x5358e78b, +0xfcb917ab, 0x81c3f74d, 0xd57faa18, 0x69937f23, 0xd7321b81, 0x5db1ad81, 0x17577f0a, 0xf48118bf, +0x81822681, 0x2981e87f, 0xc16e6c7f, 0x891d38ae, 0xf37f813f, 0x81b96b55, 0x2fcd81b1, 0x23b5f981, +0x695c4d28, 0xa0810fab, 0x6cd5ea3d, 0xefc68ec4, 0xae32887e, 0xa27f0b17, 0x7f8baa97, 0x8116b3a1, +0xa393817f, 0x817019f0, 0x1d7f8193, 0x18f0d764, 0xb27fb7c9, 0xc4da637f, 0xcd586f56, 0x7f23810d, +0x572f25f3, 0x05df7f15, 0x817f815c, 0x8b147f84, 0x0f817f6b, 0x7f1f7f7f, 0xbccee4a8, 0xcb5ad7ed, +0x33a5f88b, 0xc7817ecd, 0x8781817f, 0x817e6444, 0x6697817f, 0xf9490257, 0x83d5f581, 0x7168b332, +0xf4ed4781, 0x17e0c1c4, 0x16b9c997, 0x8581247f, 0x81887b3f, 0x2281a5c7, 0xce437f0f, 0x994c6f81, +0xbcd09735, 0x9269d57d, 0x449a7e61, 0x8c6e7f81, 0x81b38c7f, 0x81db7791, 0x62602681, 0x3246817f, +0x81bb2881, 0x6d2510b4, 0xb07fbf69, 0x49ea817f, 0x938153e7, 0xf17f425b, 0x3b817f89, 0x36b48181, +0x837c1d0a, 0x853cbd2c, 0x2950527f, 0x388de1bc, 0xfa1c7f54, 0x7b507f7f, 0xad311c42, 0x8d8ae6c3, +0x9081677f, 0x58810e81, 0x984b4881, 0x4b9f407f, 0x819a00ff, 0x857faee2, 0xa06d9fb5, 0x448979b8, +0x9cc0f17f, 0x7f2fce7f, 0xa0b9ad7f, 0x428a7f7f, 0xdf562afa, 0x7f6f0000, 0xe04a5bef, 0xa7ad2867, +0x23818146, 0x7f7f0642, 0x7f481981, 0x7f810f81, 0x99a9db81, 0x64d437cd, 0xaf7fcb6d, 0x81815311, +0x723c81bd, 0xcc7fc729, 0x81a47e49, 0x9abce744, 0x7385eb5b, 0x7f7f5b7f, 0x2bb3819d, 0x81fd7f49, +0x7f5f8198, 0x7fb6a2f3, 0xe97fc33c, 0x436c79d3, 0x5ea67fff, 0x5a7ff2ac, 0x28aa713c, 0x8181c681, +0x7f7c8846, 0xaf817f39, 0x90bcc57f, 0x841c7f4a, 0x817f8181, 0x7f4e811b, 0xb53a81ce, 0x7f451a7f, +0x5f38027f, 0x7b81e4f3, 0x817f87c6, 0x8141318c, 0x532c81a4, 0x817fc84c, 0x919d33db, 0x29f2b781, +0x7f568139, 0x4658b1a6, 0x7f69342d, 0x81ab9582, 0x367fa301, 0x47468181, 0x7f8c8181, 0x81216a81, +0x9b577f7f, 0x7f447f7f, 0x72ef4649, 0x45418103, 0xee48bb81, 0xa7027c7f, 0x0ea4237f, 0x6b717cf4, +0xa296a17f, 0x17007f7f, 0x869835c8, 0x8a7f77ec, 0x5d7b8139, 0x32688f81, 0x7fdab0b3, 0x815c7f7d, +0x06a01fb6, 0x814981de, 0xa1c681d8, 0xdc6ff522, 0xd13081b1, 0x9c5a7ff5, 0x7f7f7f5c, 0x86faa148, +0x811a696b, 0xbb7fa2c5, 0x815779bf, 0xae3b7f48, 0x8c45d6de, 0x7f867f9a, 0x3e359b7f, 0xb7c48181, +0x78c27f95, 0xc4818181, 0x81cd7f3f, 0x558a8193, 0x7f4ca874, 0x4e1eb447, 0x2744355f, 0x9020ead4, +0x16e47ff6, 0x708123b7, 0xbeb61418, 0x27fcdd50, 0xde21e5d8, 0x9500a3db, 0xefc29f4d, 0x5ef0c542, +0xe02a3bc8, 0xe91a38bf, 0x24f81e3c, 0xe0fe4f11, 0x3be6891d, 0x7fef09ae, 0x16977ff0, 0xe8b74a18, +0x1a50fa2e, 0x24c04bf9, 0x39c84714, 0xbed7f6c1, 0xaeddab44, 0xa7f9ca2d, 0x275dc01b, 0x1df6e74b, +0xc3171107, 0xbcc93481, 0x1403f685, 0xcc1c0439, 0xb602ebaa, 0xe235d0e1, 0xf1afe3e3, 0x0814c025, +0xb4f93607, 0xa709c243, 0xbbfabdd8, 0xfd3e81e7, 0x2f9ae335, 0x1c1d52e9, 0x00f133a5, 0x23eee238, +0xc625eced, 0xefd56878, 0xe3cadc03, 0x585db90a, 0x024ace2b, 0xe3d804ef, 0xdf54d5a1, 0xc9e11709, +0xc3a20ccb, 0x18c0005f, 0x52424437, 0xccaba110, 0xc75736d6, 0xa8f1401a, 0x3186aa67, 0x0b1aaebe, +0xf952cf90, 0xfa4edd16, 0x9d72bee5, 0x2824f455, 0x9a3cb150, 0x6bbfe929, 0x6a6f5f9d, 0xbfa5385f, +0xf89828bc, 0x6b6d0baf, 0xcab514c5, 0xa9baf505, 0x24cff2c2, 0x1c2511d8, 0x3abccde9, 0x7f60deef, +0xbbf4dddd, 0x11366331, 0x0241c141, 0x5111c984, 0xc17f54da, 0xed4c1bb8, 0x0af6fb57, 0xfd2c28c0, +0xe790142d, 0x6bff6403, 0x7fd8cce3, 0x3c282cc0, 0xea48f68b, 0x3ecdd5e3, 0xb917f2e5, 0xd2b25334, +0xf65b10b5, 0xfdd0d4be, 0xe6e22cc9, 0xc59ec206, 0x3f3238cc, 0xbcbe2fd0, 0x2418c910, 0x3d07f7cc, +0xa25bf9cf, 0xdb81ebfc, 0xd35b2203, 0xde2d23cc, 0x3cd706d5, 0x0e34f6f5, 0xc2d20000, 0xfef1e4e0, +0xd26ad6e1, 0xfa130b0d, 0x324b211c, 0xc5dfc3a9, 0xf83ebec9, 0x0dd627c0, 0xcee99850, 0x4fcd1533, +0x04597ff0, 0x40d0ff25, 0x045cd181, 0x908f9ca1, 0xd0417f7b, 0x2f0a2c87, 0xfef998c7, 0xcc3370f3, +0xb0f4267f, 0x1bbe5072, 0xd8d6063a, 0xd058cb32, 0x448fffad, 0xa9b50064, 0x184ebdf7, 0xfe2e3538, +0x561f16a6, 0xe7f20181, 0x449b95f9, 0x0338f7b2, 0x4012bcae, 0xd67f6797, 0x4966f6f2, 0x1dcc60c0, +0xeecec631, 0x279f81c4, 0xc9f751be, 0x75994dc8, 0x01001ded, 0xb0ba1822, 0xbb14eff9, 0xb2dae837, +0x4cf69e23, 0xf35d6ad7, 0xf69e57c9, 0x3727d2e3, 0x31b3517f, 0xc1da575d, 0x30e2c1d5, 0x54d7beef, +0x293edf19, 0xeb37d80e, 0x296cee35, 0xeff14f04, 0x232d06de, 0xb2815853, 0x2c601508, 0xa5ecb6ff, +0xb407f524, 0x4ccd9a19, 0xe448b0c2, 0x01ebd4c6, 0x52fc03a1, 0x286afce2, 0xe4d45ebb, 0x2953101d, +0xca12d31f, 0x10cb00dd, 0xd2ae6347, 0x46e82f44, 0x45d64600, 0xae393c05, 0xf7219fb9, 0xeffe09d2, +0xedfbb5d1, 0x07070413, 0xd7d6ec37, 0xa0f7a621, 0x75bd023f, 0xc45b5d37, 0x19ca9b39, 0xd1ee50b7, +0x50062231, 0x301ca07f, 0xbeadd410, 0x1ce01d02, 0x51a5fbff, 0xd9104c7f, 0x79e4f23a, 0x33e47449, +0x1ceda8ea, 0xf6d65581, 0x418dd3e3, 0x9fc1e612, 0x54f747d9, 0x839acbce, 0x0000221c, 0x031434b6, +0xcc48960f, 0x1045ea23, 0xa8d7f8a1, 0xaf01243e, 0x2bf461c7, 0x9c2139dd, 0xd42bd5d5, 0xf8c2f5d5, +0xdeb61f4b, 0xf12ac9cd, 0x107f3209, 0x818d1d3c, 0xca34572c, 0x34da0052, 0xf9a951e2, 0xdf34d209, +0x0f8b37c1, 0xa7dee3d3, 0x1a390c98, 0x1d16e135, 0x33c3ad66, 0xd9e2cca4, 0xcbabcce4, 0x2eadd581, +0x81383b81, 0xc0db251b, 0x0a6e91c3, 0xdab3c725, 0xd2293bfc, 0x22d6abb5, 0xdb524adc, 0x30cf5f24, +0xb92527e8, 0x6000b7bf, 0xfefe1fe4, 0x2cf9035e, 0x2743f125, 0xd007b3ad, 0x2dd5e43a, 0x5a35e7b9, +0xea06e5b7, 0x39f3e1aa, 0x592614f3, 0x7fc3bab7, 0xf1aefe00, 0xf3e9b9fc, 0xcd81aee6, 0xae3e31cc, +0x95d8fbc7, 0x7f26bbf8, 0xf20e57e3, 0xf9eee307, 0xfde2bff2, 0x10e8f7db, 0xfbb0de50, 0xdfaa10e8, +0x5e18ee3a, 0x0520a445, 0xddd2bc9e, 0x13c97536, 0x33cf5159, 0xfac6d2f7, 0x18ca01e5, 0x112251da, +0x0211d9b6, 0x375feb63, 0x9a25b9cf, 0xfb5bf632, 0xe5b4a803, 0x95f55e8d, 0xdbede748, 0x39aed0bf, +0x5efd9c49, 0xcc7fd13c, 0xfba1a3d7, 0xa8a80206, 0x08f7e7d3, 0xd1fac77f, 0x32fb9272, 0xe1829834, +0x45408175, 0x34e4eec0, 0x0ca4ecae, 0x15e31638, 0x1d072d02, 0xee44d6ee, 0xafa5282c, 0x4d4d9f2a, +0xcdfc0f24, 0x9dfaf345, 0xbf3839c3, 0x3268a3e3, 0x2e4eacb5, 0x93dce12c, 0x4a484f3b, 0xdf7d0000, +0xeb113c3d, 0xd4bdf13f, 0xecc0f511, 0xd4cdf8c2, 0x42fc05f9, 0x53be2def, 0xb22e49cf, 0xead92e2e, +0xf956f1cc, 0x02c821e0, 0x1da533e5, 0x1fc50531, 0x34c30f37, 0x7ccba6e4, 0xab1afe2e, 0xb93bd67f, +0xea28a8a8, 0xf3ef282e, 0xac0c507f, 0xd0cbdd53, 0xd344ea1d, 0x472a1029, 0xed099f70, 0x1240edc6, +0x7f143244, 0xbae0d3d5, 0x7a4bddfd, 0x5f07d846, 0x1cdf4aaf, 0x4fc40ac1, 0xcfca381a, 0xb67fd847, +0xd381dde6, 0xdad40d1d, 0x64cb2d07, 0x171a8bc6, 0x11110cbf, 0x2c442dff, 0xbe90f52b, 0xd75500e5, +0xd8459ef9, 0xff0bc92f, 0x44a903c8, 0xb767d7f2, 0xc281fc02, 0xca534906, 0x25fdec32, 0x5f2aaeb2, +0x121b376c, 0xe3e41dd2, 0x12ecd9f8, 0xc5071a9d, 0xfaaeee7d, 0x183a1a57, 0xadcb921d, 0x1e33a3b2, +0x72e52634, 0xc8d4e81e, 0x3b1e0200, 0x321e1608, 0x49add232, 0x0516ff41, 0xc23181d3, 0xfa4e0151, +0xa63502cb, 0xda381204, 0x72d92da5, 0x7cd7ebff, 0xce08d106, 0x61c6f034, 0x55310f68, 0xed66c357, +0xc527d609, 0xdab11518, 0x41b808e5, 0x5f21e65e, 0xd4f6f036, 0xfc40008d, 0xaa7f19a0, 0xbff0ac1b, +0x78283b60, 0x1d0a4a03, 0xd6d0d016, 0xf1f23847, 0x58d93518, 0xd6d481b7, 0xaf11e02e, 0x5c74b67f, +0xd5734f27, 0x2fb07fc2, 0x29564eff, 0xcafae603, 0x1228f673, 0x3367ce30, 0x132d1342, 0xc80c10e9, +0x68ad07f9, 0x379c0c9c, 0x3a73c010, 0xc1a534bc, 0x69b0fc14, 0xf0d3ba81, 0x0d07bbe3, 0x3319fa45, +0x38815afe, 0xc1a67850, 0x7fe740fe, 0xc43fb638, 0xa0864249, 0x07a7dc33, 0x05e606d4, 0x3c6a81ca, +0x0cdb2635, 0x5dd3f756, 0xcbf09b70, 0x53b2a5b3, 0x5f035ab0, 0xfdda1fbd, 0x0510ac20, 0xa8611f44, +0x38ceb74c, 0x5048e2e5, 0x907f0238, 0xd9ad00d9, 0x7c247736, 0x8aabe3c6, 0xf5443d27, 0x7ff84622, +0x4bcc0a34, 0x797dc7e0, 0xf914e5f2, 0xa12df5d7, 0x0fdc23f9, 0x377f50c3, 0x8f47bf82, 0xb9948a4a, +0x8135ca9e, 0x523ace17, 0xd299fb23, 0x523b5407, 0x38b42a22, 0xadee2cd8, 0xee04b9c8, 0x2de3a5e2, +0x9f132faa, 0xa0030325, 0x019996f4, 0xd6c5562a, 0xf3e19255, 0x00ff2432, 0xe8f6cf36, 0xe55a7f14, +0x7079f06f, 0x1e11ff0b, 0xcbe89aa8, 0x6b363996, 0xe61dd6f7, 0x15133821, 0xf35940f9, 0xd9b139c1, +0x2ad6e10b, 0x2a29c0c6, 0x3c6ee3ce, 0x4b0a3624, 0x23cc10df, 0x17cdb502, 0xa46eaa3f, 0x42350300, +0xed66334e, 0x1d6fc255, 0x3213e4c5, 0x23d4a65a, 0x05b9a3e9, 0xbacd0c19, 0xc9c5b5b1, 0x8e12670d, +0x4aabbe2d, 0xb815e3c7, 0x5a2dd152, 0x3ef6551c, 0xf0a1b4f3, 0xb8b8459b, 0xc52dd72e, 0xb9f4b9f7, +0xbd05dcc8, 0xfae04029, 0xb16ad8df, 0x03d167d4, 0xaf08525b, 0xdbdf3117, 0x4192d163, 0x00a2caad + +hard_output0 = +0xb5bc6ac8, 0xf5373664, 0x1310345c, 0xd5bae4e7, 0x1fc9e83e, 0xebfdfded, 0x84bd86ab, 0xb7aabe00, +0x60b44fea, 0xb9067464, 0x30325378, 0xa9195955, 0xf70c6e5c, 0x90922632, 0xc90b1cdb, 0xf2f5fb69, +0x73056b63, 0x1a33bf3f, 0x17755b5c, 0xc58bff6d, 0x2f4390b2, 0x2869d508, 0xe7c7dfe8, 0x38552963, +0x21da5367, 0x07282b9b, 0xa4767105, 0x1e294251, 0xe350a940, 0xb8a6aa27, 0xed12d778, 0xf10d9ece, +0xab93527f, 0xcf2da7e7, 0x68f6d0b1, 0x811f4bca, 0x577b06b2, 0x3234f13e, 0x30bab7df, 0x8dc47655, +0xbb843bed, 0x86da3aba, 0x30950c97, 0xdd096d7a, 0xa871fd6c, 0x8bee4e6b, 0x8fea30d0, 0x6c05b4d2, +0xf3e144d3, 0xd24ebb1f, 0x065635e5, 0x8d3f2cf9, 0x536c6c6a, 0xfbb0a5d0, 0x3d707b42, 0xc44d5982, +0xa5f4ad8f, 0xf32c0970, 0x1bccf1a6, 0x05916020, 0xa64fb176, 0x5ede6a35, 0xaf4966da, 0x9df5e0e7, +0x75042abc, 0x9ef10481, 0x11ddcbc8, 0xa0f5518c, 0xd5c23418, 0x2393d558, 0xfbe7dfeb, 0xed1c64c2, +0x86a36508, 0xde2dfb1e, 0xb8d0fef9, 0x24505232, 0xc894e71c, 0xbcc752a0, 0x40b74e83, 0x90d23c8c, +0x728e4a61, 0x108f0b08, 0x66f522ee, 0xc258d851, 0x35a31c44, 0x11311b5b, 0xfd3d5be9, 0x5ae448ff, +0x4f64994b, 0x5b8247a9, 0x4021114d, 0x2f0b6e82, 0x5eaa9828, 0x50ac71c0, 0xfb86ee52, 0x0dc1ac9b, +0xbbd47645, 0x8f357115, 0x978ceea0, 0xd557db99, 0x99b30388, 0xfc9a8a1c, 0x0f75be1a, 0x50143e22, +0x8840989b, 0x738ec50e, 0xe6b2783d, 0xf67899c8, 0x27ebed69, 0x6c415a16, 0x3a6cc2dc, 0xcd4e4e5d, +0x6cb12b2e, 0xdb88d7c0, 0x79cd1582, 0xbc422413, 0xe72ad2f4, 0x8eaac30f, 0x0bd86747, 0x6d87f69d, +0x15d62038, 0x4b375630, 0x0d51b859, 0x16db2cb2, 0xf210603a, 0x0abeb833, 0x55c694d0, 0xe57ca43b, +0x0ba94428, 0x1398a406, 0xe47d3889, 0x5a20203d, 0x250d7a1a, 0xd930ffec, 0x03992e79, 0xf2759376, +0x024ec121, 0x91fc3a2c, 0xb7e11cc5, 0x4ff7d459, 0xb8700134, 0xd6e61758, 0x4eba0a32, 0xb747e3ec, +0x7073fad7, 0xded80f99, 0x331e2f1b, 0xfa1f1bed, 0x056424a2, 0x1d1d95e0, 0x550b9ec8, 0x51ee2a38, +0x19525153, 0xd70c4cd5, 0x0d6cd7ad, 0xe44d1cf2, 0x30dfecda, 0xdacd7fe8, 0x7321d795, 0xddf48ef4, +0xe271e6a4, 0x9c1feecb, 0x951fcd7b, 0x8acc5a03, 0x3fb83527, 0xe306de74, 0x7b9cd6ee, 0x8e140885, +0xd4c91e8d, 0xe8c39733, 0x0f02f87f, 0xfb06b1b9, 0x0dc9349c, 0xf76bae8e, 0x4f642a07, 0x3d48a9aa, +0xe3ea323a, 0xa1cd5c8a, 0x40aa0e70, 0x132042d3, 0xa9732f6c, 0xd15a00c4, 0x43d3b046, 0x9a51ebd4, +0xc46ee0ed, 0xe2a2148b, 0xf5c478f0, 0x1fb01cf3, 0xf4f321ec, 0xd973811f, 0x11ad11b9, 0x5c67adda + +soft_output0 = +0x81818181, 0x7f7f7f7f, 0x7f7f7f81, 0x7f817f81, 0x7f7f8181, 0x7f7f7f7f, 0x817f817f, 0x81817f81, +0x7f817f81, 0x7f817f81, 0x7f81817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f81817f, 0x817f8181, 0x81817f81, +0x7f7f7f81, 0x817f7f81, 0x7f7f8181, 0x7f817f81, 0x7f81817f, 0x817f7f7f, 0x817f8181, 0x7f7f8181, +0x81817f7f, 0x81817f7f, 0x81817f7f, 0x817f7f7f, 0x7f7f7f7f, 0x817f7f81, 0x7f7f817f, 0x7f817f7f, +0x7f818181, 0x8181817f, 0x81818181, 0x7f817f7f, 0x7f7f817f, 0x81817f81, 0x7f7f8181, 0x817f817f, +0x7f817f81, 0x7f7f8181, 0x817f7f81, 0x81818181, 0x817f817f, 0x81817f7f, 0x7f7f8181, 0x7f7f8181, +0x8181817f, 0x817f7f81, 0x81818181, 0x817f8181, 0x7f7f817f, 0x7f7f7f81, 0x7f81817f, 0x7f81817f, +0x7f81817f, 0x8181817f, 0x7f7f7f81, 0x817f7f81, 0x7f817f7f, 0x7f7f7f7f, 0x8181817f, 0x7f81817f, +0x817f7f81, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f8181, 0x7f7f8181, 0x7f817f7f, 0x7f81817f, 0x7f7f817f, +0x81817f81, 0x81817f81, 0x81817f81, 0x817f817f, 0x7f817f7f, 0x7f81817f, 0x81818181, 0x8181817f, +0x7f817f7f, 0x817f7f7f, 0x7f7f817f, 0x817f7f81, 0x7f7f7f81, 0x81818181, 0x817f8181, 0x7f7f7f81, +0x7f817f81, 0x7f817f81, 0x81817f81, 0x7f7f7f7f, 0x7f7f8181, 0x81817f7f, 0x7f7f8181, 0x7f7f817f, +0x7f7f817f, 0x81817f7f, 0x8181817f, 0x817f8181, 0x81817f81, 0x817f7f7f, 0x817f7f81, 0x7f81817f, +0x7f7f7f7f, 0x7f817f81, 0x7f7f817f, 0x7f817f7f, 0x81818181, 0x817f7f81, 0x7f817f81, 0x7f817f81, +0x817f7f81, 0x7f818181, 0x7f81817f, 0x817f817f, 0x7f7f817f, 0x81818181, 0x7f818181, 0x8181817f, +0x817f7f81, 0x7f81817f, 0x7f7f7f7f, 0x81817f81, 0x817f8181, 0x7f818181, 0x817f8181, 0x81817f7f, +0x7f7f8181, 0x81817f81, 0x7f7f817f, 0x817f817f, 0x81818181, 0x7f81817f, 0x817f817f, 0x81818181, +0x817f8181, 0x817f8181, 0x817f8181, 0x7f817f81, 0x7f7f7f81, 0x7f7f8181, 0x7f817f7f, 0x7f7f817f, +0x7f817f81, 0x7f81817f, 0x817f7f7f, 0x7f7f8181, 0x7f817f7f, 0x7f7f8181, 0x7f7f7f81, 0x817f7f7f, +0x7f817f7f, 0x817f817f, 0x7f817f81, 0x7f817f81, 0x7f7f8181, 0x81817f81, 0x8181817f, 0x81817f81, +0x817f7f81, 0x7f818181, 0x7f7f8181, 0x817f7f81, 0x7f817f81, 0x817f7f7f, 0x7f7f8181, 0x81817f7f, +0x7f7f7f7f, 0x817f8181, 0x81817f81, 0x7f7f7f7f, 0x817f817f, 0x7f7f7f81, 0x7f7f7f81, 0x81817f7f, +0x8181817f, 0x817f8181, 0x81817f81, 0x7f7f7f81, 0x7f7f7f81, 0x81818181, 0x81817f81, 0x7f7f8181, +0x8181817f, 0x7f817f81, 0x7f817f81, 0x7f817f7f, 0x8181817f, 0x8181817f, 0x81817f81, 0x817f8181, +0x81818181, 0x7f817f7f, 0x81817f81, 0x7f817f81, 0x7f817f81, 0x7f817f7f, 0x8181817f, 0x817f8181, +0x817f7f81, 0x817f7f81, 0x7f817f81, 0x817f8181, 0x7f7f7f81, 0x7f81817f, 0x81818181, 0x8181817f, +0x7f7f8181, 0x7f7f7f81, 0x81817f7f, 0x7f818181, 0x817f7f7f, 0x7f817f7f, 0x7f7f7f81, 0x7f817f7f, +0x8181817f, 0x7f81817f, 0x817f7f7f, 0x817f817f, 0x7f7f7f7f, 0x7f817f81, 0x8181817f, 0x7f81817f, +0x7f81817f, 0x81817f7f, 0x817f7f81, 0x7f817f81, 0x7f81817f, 0x7f7f7f7f, 0x81817f81, 0x81817f7f, +0x81817f7f, 0x81817f7f, 0x7f818181, 0x8181817f, 0x7f7f8181, 0x7f818181, 0x7f7f8181, 0x7f7f8181, +0x817f7f7f, 0x81818181, 0x7f818181, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f7f817f, +0x7f7f8181, 0x7f817f7f, 0x7f817f7f, 0x817f8181, 0x7f7f817f, 0x81817f81, 0x817f8181, 0x7f7f817f, +0x81817f7f, 0x7f7f7f81, 0x7f81817f, 0x7f817f7f, 0x7f81817f, 0x7f7f817f, 0x817f817f, 0x81817f7f, +0x817f817f, 0x817f7f81, 0x81818181, 0x81817f7f, 0x817f8181, 0x817f8181, 0x817f7f7f, 0x7f7f7f7f, +0x7f81817f, 0x7f818181, 0x817f817f, 0x7f818181, 0x7f7f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f7f7f81, +0x7f7f7f7f, 0x7f7f8181, 0x7f817f81, 0x7f818181, 0x81817f81, 0x7f7f7f81, 0x817f7f81, 0x8181817f, +0x817f817f, 0x7f7f7f81, 0x7f818181, 0x81817f81, 0x817f7f81, 0x7f7f7f7f, 0x817f817f, 0x81817f7f, +0x817f7f7f, 0x81818181, 0x8181817f, 0x81817f7f, 0x7f7f7f7f, 0x817f8181, 0x7f817f81, 0x817f8181, +0x817f7f7f, 0x7f7f7f81, 0x817f8181, 0x81818181, 0x7f81817f, 0x817f7f7f, 0x7f81817f, 0x817f817f, +0x7f817f7f, 0x7f818181, 0x7f818181, 0x7f817f7f, 0x81817f7f, 0x7f817f7f, 0x7f7f817f, 0x81817f81, +0x81817f81, 0x817f7f7f, 0x81817f7f, 0x81817f81, 0x7f818181, 0x7f7f7f7f, 0x7f818181, 0x7f7f817f, +0x7f817f81, 0x81817f81, 0x7f7f7f81, 0x817f8181, 0x81818181, 0x7f81817f, 0x817f7f7f, 0x7f817f7f, +0x817f7f81, 0x7f7f817f, 0x7f7f8181, 0x7f7f8181, 0x81818181, 0x7f81817f, 0x7f81817f, 0x817f8181, +0x7f818181, 0x817f7f81, 0x817f7f7f, 0x7f817f81, 0x7f817f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f81, +0x8181817f, 0x7f81817f, 0x81818181, 0x7f7f7f81, 0x81817f81, 0x7f818181, 0x7f7f7f7f, 0x7f81817f, +0x8181817f, 0x8181817f, 0x7f7f7f7f, 0x7f818181, 0x817f7f81, 0x7f7f817f, 0x817f7f81, 0x81818181, +0x817f817f, 0x7f818181, 0x81817f81, 0x81818181, 0x817f7f81, 0x7f817f81, 0x7f7f8181, 0x817f817f, +0x817f7f7f, 0x817f8181, 0x81817f81, 0x81818181, 0x817f817f, 0x7f817f7f, 0x7f817f7f, 0x7f817f81, +0x817f7f7f, 0x81817f81, 0x817f7f81, 0x7f7f817f, 0x7f817f7f, 0x7f81817f, 0x8181817f, 0x81817f81, +0x81817f7f, 0x7f817f81, 0x7f81817f, 0x81818181, 0x7f7f817f, 0x817f7f7f, 0x7f817f81, 0x8181817f, +0x817f8181, 0x7f81817f, 0x81817f7f, 0x7f817f81, 0x81818181, 0x7f81817f, 0x7f81817f, 0x7f81817f, +0x7f7f817f, 0x7f817f7f, 0x7f81817f, 0x817f7f81, 0x7f81817f, 0x81818181, 0x7f7f8181, 0x7f818181, +0x8181817f, 0x81818181, 0x8181817f, 0x817f817f, 0x7f81817f, 0x81817f7f, 0x7f81817f, 0x7f817f7f, +0x81817f81, 0x7f81817f, 0x7f7f817f, 0x817f8181, 0x81818181, 0x7f817f81, 0x81817f81, 0x7f817f7f, +0x817f7f7f, 0x7f7f817f, 0x7f818181, 0x817f8181, 0x7f7f7f81, 0x81817f7f, 0x81817f7f, 0x7f7f7f7f, +0x7f818181, 0x7f7f8181, 0x7f81817f, 0x7f7f817f, 0x8181817f, 0x7f81817f, 0x8181817f, 0x81817f7f, +0x817f7f81, 0x817f817f, 0x7f7f7f7f, 0x817f8181, 0x7f7f8181, 0x817f817f, 0x7f817f7f, 0x81818181, +0x817f7f7f, 0x7f817f81, 0x7f7f7f7f, 0x817f8181, 0x7f7f7f81, 0x7f7f8181, 0x81817f81, 0x81817f7f, +0x7f818181, 0x817f7f81, 0x7f818181, 0x817f817f, 0x7f7f7f81, 0x817f817f, 0x7f817f81, 0x7f7f7f81, +0x7f818181, 0x7f7f817f, 0x81818181, 0x817f7f81, 0x81817f7f, 0x8181817f, 0x817f8181, 0x8181817f, +0x7f818181, 0x7f817f81, 0x8181817f, 0x7f817f7f, 0x8181817f, 0x7f817f7f, 0x7f7f7f81, 0x817f7f7f, +0x817f8181, 0x7f7f7f7f, 0x817f8181, 0x81817f81, 0x81817f7f, 0x7f7f8181, 0x81817f81, 0x7f7f7f7f, +0x7f818181, 0x7f818181, 0x7f7f7f7f, 0x8181817f, 0x7f817f7f, 0x81817f81, 0x81817f81, 0x7f7f817f, +0x7f7f7f7f, 0x817f817f, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f817f, 0x7f817f81, 0x81818181, 0x817f8181, +0x7f7f817f, 0x81817f7f, 0x7f7f7f7f, 0x7f81817f, 0x81817f7f, 0x7f817f7f, 0x81817f81, 0x7f7f7f81, +0x817f8181, 0x81818181, 0x7f81817f, 0x7f817f7f, 0x7f7f7f81, 0x7f818181, 0x817f7f7f, 0x7f7f7f7f, +0x817f7f81, 0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x7f7f8181, 0x81817f81, 0x81817f81, 0x7f7f7f81, +0x8181817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x817f7f81, 0x7f7f8181, 0x7f7f7f81, 0x817f7f7f, +0x7f817f81, 0x81818181, 0x81817f81, 0x7f81817f, 0x7f7f7f81, 0x817f817f, 0x8181817f, 0x7f818181, +0x817f817f, 0x81817f7f, 0x7f7f7f7f, 0x7f817f7f, 0x8181817f, 0x7f7f817f, 0x7f7f7f7f, 0x7f81817f, +0x7f817f7f, 0x7f7f8181, 0x7f7f8181, 0x7f7f8181, 0x81817f7f, 0x7f7f8181, 0x7f7f8181, 0x8181817f, +0x817f7f81, 0x7f81817f, 0x7f817f7f, 0x8181817f, 0x7f7f817f, 0x7f7f8181, 0x7f7f8181, 0x7f817f7f, +0x817f817f, 0x7f81817f, 0x817f7f81, 0x8181817f, 0x81817f7f, 0x81818181, 0x817f817f, 0x7f7f817f, +0x817f8181, 0x7f817f7f, 0x7f817f7f, 0x817f817f, 0x81817f81, 0x7f7f7f81, 0x7f81817f, 0x7f7f7f81, +0x8181817f, 0x817f7f7f, 0x7f7f817f, 0x7f817f7f, 0x7f7f7f81, 0x7f817f7f, 0x7f7f8181, 0x817f817f, +0x7f7f8181, 0x7f7f7f81, 0x817f8181, 0x7f7f8181, 0x81817f81, 0x8181817f, 0x7f7f817f, 0x7f817f81, +0x7f817f81, 0x7f817f81, 0x7f817f81, 0x817f8181, 0x7f7f7f7f, 0x7f817f81, 0x817f7f81, 0x7f7f7f81, +0x7f7f817f, 0x7f817f7f, 0x7f7f7f7f, 0x817f7f81, 0x81817f7f, 0x81817f7f, 0x81818181, 0x7f81817f, +0x7f7f7f7f, 0x7f7f7f81, 0x7f817f81, 0x8181817f, 0x817f817f, 0x817f817f, 0x817f817f, 0x7f817f7f, +0x81818181, 0x7f817f7f, 0x7f7f817f, 0x81818181, 0x8181817f, 0x81817f7f, 0x81818181, 0x817f817f, +0x8181817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f818181, 0x7f7f7f81, 0x817f817f, 0x7f7f817f, 0x81817f7f, +0x8181817f, 0x817f817f, 0x7f7f817f, 0x7f81817f, 0x7f7f7f81, 0x8181817f, 0x817f7f7f, 0x817f817f, +0x817f7f7f, 0x7f7f7f7f, 0x81817f81, 0x817f7f81, 0x7f817f7f, 0x81817f7f, 0x81817f81, 0x7f817f81, +0x817f7f7f, 0x817f817f, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x81817f81, 0x81818181, 0x7f817f7f, +0x8181817f, 0x817f7f7f, 0x81817f7f, 0x7f817f7f, 0x817f7f7f, 0x817f7f81, 0x7f7f7f81, 0x81817f7f, +0x817f7f81, 0x7f817f81, 0x7f7f8181, 0x7f81817f, 0x81817f7f, 0x817f7f7f, 0x81818181, 0x7f7f817f, +0x817f817f, 0x81817f81, 0x817f7f81, 0x81817f7f, 0x7f7f7f81, 0x7f817f7f, 0x81817f7f, 0x817f817f, +0x7f817f7f, 0x7f817f7f, 0x81817f7f, 0x817f7f7f, 0x81818181, 0x8181817f, 0x7f7f8181, 0x7f817f7f, +0x7f817f81, 0x7f7f8181, 0x81817f81, 0x817f7f81, 0x7f818181, 0x7f817f81, 0x7f7f7f81, 0x8181817f, +0x81817f7f, 0x817f7f7f, 0x7f818181, 0x7f818181, 0x7f81817f, 0x7f7f8181, 0x817f7f81, 0x7f817f81, +0x81817f7f, 0x81818181, 0x7f81817f, 0x7f7f8181, 0x817f817f, 0x817f7f81, 0x81817f81, 0x817f7f81, +0x81818181, 0x817f7f7f, 0x7f818181, 0x81818181, 0x7f7f817f, 0x817f817f, 0x7f7f817f, 0x7f817f7f, +0x7f817f81, 0x817f7f7f, 0x81817f81, 0x7f817f81, 0x7f817f7f, 0x817f8181, 0x817f817f, 0x7f818181, +0x817f7f81, 0x817f7f7f, 0x81817f81, 0x81818181, 0x7f81817f, 0x7f817f81, 0x7f817f81, 0x817f8181, +0x7f818181, 0x81817f81, 0x7f817f7f, 0x7f7f7f81, 0x7f7f817f, 0x817f7f81, 0x7f7f7f81, 0x81818181, +0x7f7f8181, 0x7f7f817f, 0x817f7f81, 0x81817f7f, 0x7f81817f, 0x7f81817f, 0x817f8181, 0x7f81817f, +0x8181817f, 0x81818181, 0x7f7f817f, 0x817f8181, 0x7f817f7f, 0x7f7f817f, 0x817f7f7f, 0x7f7f7f7f, +0x8181817f, 0x7f7f7f7f, 0x81817f81, 0x817f7f7f, 0x7f7f8181, 0x817f7f81, 0x7f7f817f, 0x7f81817f, +0x81817f81, 0x7f7f8181, 0x7f7f7f81, 0x7f817f81, 0x81817f7f, 0x7f817f81, 0x7f817f81, 0x7f7f817f, +0x81818181, 0x7f818181, 0x817f7f81, 0x817f7f7f, 0x7f81817f, 0x7f7f8181, 0x7f7f8181, 0x81817f7f, +0x7f817f81, 0x817f817f, 0x7f817f7f, 0x81818181, 0x7f817f7f, 0x81817f7f, 0x7f817f81, 0x81817f81, +0x7f7f7f81, 0x81818181, 0x81817f81, 0x817f7f81, 0x7f7f7f7f, 0x817f7f81, 0x7f818181, 0x7f7f8181, +0x7f7f7f81, 0x7f817f7f, 0x7f818181, 0x7f7f817f, 0x817f7f7f, 0x817f8181, 0x817f8181, 0x817f7f81, +0x7f7f8181, 0x7f817f7f, 0x817f8181, 0x81817f81, 0x817f8181, 0x7f81817f, 0x7f7f817f, 0x81817f7f, +0x81817f7f, 0x7f817f7f, 0x81817f7f, 0x81817f7f, 0x7f817f81, 0x7f7f8181, 0x7f7f817f, 0x7f7f7f7f, +0x81818181, 0x7f7f817f, 0x7f7f7f81, 0x7f817f81, 0x81818181, 0x81817f81, 0x817f7f7f, 0x7f7f817f, +0x817f7f7f, 0x7f7f7f81, 0x81817f81, 0x817f8181, 0x7f7f817f, 0x7f7f817f, 0x7f7f8181, 0x817f7f81, +0x8181817f, 0x8181817f, 0x7f7f7f81, 0x7f7f817f, 0x81817f81, 0x7f7f7f7f, 0x817f8181, 0x817f817f, +0x7f7f7f81, 0x7f817f7f, 0x7f81817f, 0x7f7f817f, 0x7f81817f, 0x817f7f81, 0x7f7f7f7f, 0x7f7f7f81, +0x81817f7f, 0x8181817f, 0x7f81817f, 0x7f7f7f81, 0x8181817f, 0x81817f7f, 0x7f818181, 0x7f817f81, +0x8181817f, 0x7f81817f, 0x817f7f7f, 0x817f7f7f, 0x8181817f, 0x817f817f, 0x81817f81, 0x7f7f8181, +0x7f7f7f7f, 0x7f81817f, 0x817f817f, 0x81817f81, 0x81817f81, 0x81817f7f, 0x81817f81, 0x81817f81, +0x7f7f817f, 0x7f7f7f7f, 0x817f7f7f, 0x7f818181, 0x7f817f7f, 0x8181817f, 0x7f7f8181, 0x817f7f7f, +0x7f7f7f81, 0x7f817f81, 0x7f7f7f7f, 0x7f817f81, 0x7f81817f, 0x7f817f7f, 0x81817f7f, 0x7f817f7f, +0x81817f81, 0x7f7f7f81, 0x7f81817f, 0x81817f81, 0x7f817f81, 0x817f8181, 0x7f817f81, 0x7f817f7f, +0x81818181, 0x817f817f, 0x81817f81, 0x7f7f817f, 0x7f7f8181, 0x81818181, 0x8181817f, 0x7f817f81, +0x81817f7f, 0x7f7f7f7f, 0x7f7f817f, 0x7f81817f, 0x81818181, 0x7f81817f, 0x817f7f7f, 0x7f817f81, +0x817f817f, 0x817f7f81, 0x81818181, 0x81817f7f, 0x817f7f81, 0x817f8181, 0x81817f7f, 0x81817f7f, +0x7f7f817f, 0x817f817f, 0x7f817f81, 0x7f817f81, 0x81818181, 0x817f817f, 0x7f7f7f81, 0x81817f7f, +0x817f817f, 0x7f7f7f7f, 0x7f7f8181, 0x817f7f7f, 0x817f7f81, 0x817f7f81, 0x817f8181, 0x8181817f, +0x81818181, 0x81817f7f, 0x81817f7f, 0x817f8181, 0x817f8181, 0x817f817f, 0x7f7f7f7f, 0x7f817f81, +0x8181817f, 0x7f818181, 0x7f818181, 0x8181817f, 0x7f7f8181, 0x817f8181, 0x7f817f81, 0x81817f7f, +0x7f7f7f81, 0x81817f81, 0x7f818181, 0x81817f7f, 0x7f7f817f, 0x7f818181, 0x81818181, 0x7f818181, +0x7f7f7f81, 0x81817f7f, 0x7f81817f, 0x7f7f8181, 0x7f81817f, 0x7f817f7f, 0x7f817f7f, 0x7f817f7f, +0x7f7f817f, 0x7f817f81, 0x7f81817f, 0x7f7f7f81, 0x81818181, 0x7f7f8181, 0x817f8181, 0x7f817f81, +0x7f817f81, 0x81817f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f81, 0x817f7f7f, 0x7f81817f, 0x7f817f7f, +0x7f7f7f81, 0x817f8181, 0x7f81817f, 0x817f817f, 0x81817f81, 0x7f7f817f, 0x7f817f7f, 0x817f817f, +0x7f7f7f7f, 0x817f7f81, 0x817f8181, 0x7f7f7f7f, 0x7f817f81, 0x7f7f7f7f, 0x7f7f817f, 0x8181817f, +0x817f7f81, 0x7f818181, 0x7f817f7f, 0x8181817f, 0x7f7f817f, 0x7f817f7f, 0x817f817f, 0x817f7f7f, +0x817f817f, 0x81817f7f, 0x81818181, 0x7f7f7f81, 0x81817f7f, 0x817f7f81, 0x817f817f, 0x81817f81, +0x7f817f7f, 0x817f7f81, 0x7f7f7f81, 0x8181817f, 0x7f7f817f, 0x817f7f81, 0x817f7f81, 0x7f818181, +0x817f7f7f, 0x8181817f, 0x817f8181, 0x7f7f817f, 0x7f818181, 0x817f8181, 0x7f7f7f81, 0x817f8181, +0x8181817f, 0x7f817f81, 0x81817f7f, 0x817f8181, 0x817f817f, 0x817f7f81, 0x81818181, 0x81818181, +0x8181817f, 0x817f8181, 0x7f7f7f81, 0x7f7f8181, 0x817f817f, 0x7f818181, 0x817f8181, 0x7f7f817f, +0x7f7f7f81, 0x7f818181, 0x7f818181, 0x7f81817f, 0x81817f81, 0x7f818181, 0x7f7f8181, 0x7f817f7f, +0x8181817f, 0x7f817f7f, 0x817f7f81, 0x7f817f81, 0x8181817f, 0x81818181, 0x817f7f7f, 0x817f817f, +0x7f7f7f7f, 0x817f8181, 0x81818181, 0x817f7f7f, 0x817f7f81, 0x7f817f7f, 0x817f7f81, 0x81817f81, +0x7f818181, 0x7f817f81, 0x7f818181, 0x7f7f7f81, 0x81817f81, 0x7f817f81, 0x7f817f7f, 0x7f81817f, +0x81817f81, 0x817f7f7f, 0x817f817f, 0x7f7f817f, 0x7f7f8181, 0x817f7f7f, 0x7f7f817f, 0x7f817f7f, +0x7f81817f, 0x7f81817f, 0x817f7f7f, 0x7f817f7f, 0x817f7f81, 0x8181817f, 0x817f7f81, 0x7f818181, +0x7f818181, 0x817f7f81, 0x7f817f81, 0x7f7f7f7f, 0x81817f81, 0x81818181, 0x7f818181, 0x7f81817f, +0x81817f81, 0x817f7f7f, 0x7f7f817f, 0x7f7f817f, 0x81817f81, 0x7f817f81, 0x7f7f8181, 0x81817f7f, +0x817f817f, 0x81817f7f, 0x7f7f7f81, 0x7f817f7f, 0x7f7f7f7f, 0x817f7f81, 0x7f817f81, 0x817f7f7f, +0x7f7f8181, 0x7f7f817f, 0x8181817f, 0x7f818181, 0x7f817f81, 0x81818181, 0x817f817f, 0x7f7f7f7f, +0x81818181, 0x7f7f8181, 0x7f7f8181, 0x7f81817f, 0x8181817f, 0x8181817f, 0x81817f7f, 0x817f817f, +0x817f7f7f, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x817f8181, 0x81818181, 0x7f7f7f7f, 0x7f7f817f, +0x817f7f7f, 0x7f7f817f, 0x817f8181, 0x7f817f81, 0x7f818181, 0x7f81817f, 0x7f81817f, 0x81818181, +0x7f81817f, 0x817f7f7f, 0x7f7f8181, 0x81818181, 0x7f817f81, 0x81818181, 0x7f817f81, 0x81818181, +0x817f8181, 0x7f7f7f81, 0x7f818181, 0x7f7f817f, 0x817f7f7f, 0x7f7f7f7f, 0x817f7f81, 0x817f7f81, +0x7f81817f, 0x817f7f7f, 0x81817f81, 0x817f7f81, 0x81818181, 0x7f7f7f7f, 0x81817f7f, 0x81817f81, +0x81817f81, 0x81817f81, 0x7f817f81, 0x817f817f, 0x817f817f, 0x817f817f, 0x7f7f817f, 0x81817f81, +0x7f7f817f, 0x8181817f, 0x817f7f81, 0x81818181, 0x817f7f7f, 0x817f8181, 0x7f7f817f, 0x817f7f7f, +0x8181817f, 0x81817f7f, 0x7f817f7f, 0x817f7f81, 0x81817f81, 0x817f7f7f, 0x81818181, 0x817f7f7f, +0x81817f81, 0x7f81817f, 0x7f81817f, 0x7f818181, 0x817f7f81, 0x817f817f, 0x7f81817f, 0x81818181, +0x81817f81, 0x81818181, 0x817f7f81, 0x81817f7f, 0x7f81817f, 0x8181817f, 0x8181817f, 0x81818181, +0x81818181, 0x81818181, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f7f, 0x81817f81, 0x817f7f81, 0x7f818181, +0x817f7f7f, 0x7f817f81, 0x7f7f7f7f, 0x7f7f8181, 0x7f81817f, 0x7f817f81, 0x817f7f7f, 0x81817f81, +0x7f7f7f7f, 0x817f7f81, 0x817f7f7f, 0x7f817f81, 0x81817f7f, 0x81817f81, 0x817f7f81, 0x81817f81, +0x817f817f, 0x7f817f81, 0x81817f7f, 0x7f7f7f81, 0x7f7f8181, 0x7f7f817f, 0x8181817f, 0x7f81817f, +0x817f8181, 0x7f7f7f81, 0x817f817f, 0x7f7f817f, 0x7f7f817f, 0x7f817f7f, 0x7f818181, 0x81817f7f, +0x81817f7f, 0x8181817f, 0x817f817f, 0x7f7f817f, 0x7f817f81, 0x7f817f7f, 0x7f818181, 0x7f81817f, +0x817f7f7f, 0x7f817f7f, 0x81817f81, 0x8181817f, 0x8181817f, 0x7f817f81, 0x8181817f, 0x8181817f, +0x81817f7f, 0x817f7f7f, 0x7f7f7f7f, 0x7f817f7f, 0x81817f81, 0x7f7f7f81, 0x817f7f7f, 0x7f817f81, +0x7f818181, 0x7f817f81, 0x81818181, 0x817f817f, 0x817f817f, 0x7f818181, 0x817f8181, 0x7f7f817f, +0x7f7f7f81, 0x817f7f7f, 0x7f81817f, 0x7f81817f, 0x7f81817f, 0x81817f81, 0x817f817f, 0x7f7f817f, +0x7f817f81, 0x817f817f, 0x7f817f7f, 0x7f81817f, 0x8181817f, 0x817f7f7f, 0x8181817f, 0x7f7f8181, +0x7f818181, 0x7f817f81, 0x7f7f817f, 0x817f8181, 0x817f817f, 0x817f7f81, 0x7f7f7f81, 0x81817f81, +0x817f8181, 0x81818181, 0x81818181, 0x7f7f817f, 0x7f7f7f81, 0x8181817f, 0x7f7f7f81, 0x7f7f7f81, +0x7f818181, 0x817f7f81, 0x7f817f7f, 0x81818181, 0x7f817f7f, 0x7f7f817f, 0x7f7f8181, 0x8181817f, +0x7f817f7f, 0x817f7f81, 0x7f7f7f81, 0x7f81817f, 0x81817f81, 0x7f817f81, 0x817f8181, 0x817f817f, +0x817f7f7f, 0x81817f7f, 0x7f818181, 0x81818181, 0x817f7f81, 0x81817f7f, 0x817f8181, 0x817f8181, +0x81817f7f, 0x8181817f, 0x7f7f817f, 0x7f7f817f, 0x7f7f817f, 0x7f817f81, 0x7f7f7f7f, 0x817f8181, +0x81817f7f, 0x7f818181, 0x7f7f7f81, 0x817f8181, 0x7f817f7f, 0x7f817f7f, 0x7f817f81, 0x817f8181, +0x817f817f, 0x7f7f7f7f, 0x81818181, 0x81818181, 0x81818181, 0x7f817f7f, 0x81818181, 0x7f81817f, +0x7f817f81, 0x8181817f, 0x817f7f7f, 0x817f817f, 0x81817f81, 0x7f817f81, 0x817f7f81, 0x81817f81, +0x7f7f8181, 0x7f817f7f, 0x817f7f7f, 0x7f81817f, 0x81818181, 0x7f7f817f, 0x7f7f7f81, 0x817f7f81, +0x817f7f7f, 0x7f817f81, 0x7f817f7f, 0x817f817f, 0x8181817f, 0x7f7f8181, 0x7f818181, 0x8181817f, +0x7f817f7f, 0x8181817f, 0x7f817f81, 0x7f81817f, 0x7f7f7f81, 0x7f7f7f81, 0x817f817f, 0x81818181, +0x7f817f81, 0x7f81817f, 0x7f7f8181, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f81, 0x7f818181, 0x7f7f7f7f, +0x7f7f8181, 0x817f8181, 0x7f7f7f7f, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f7f7f7f, 0x7f7f8181, +0x817f7f7f, 0x81817f81, 0x7f818181, 0x81817f7f, 0x7f81817f, 0x7f817f81, 0x7f817f81, 0x7f817f81, +0x7f7f817f, 0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x8181817f, 0x817f7f7f, 0x7f817f7f, 0x817f8181, +0x7f817f7f, 0x817f8181, 0x7f818181, 0x81817f7f, 0x7f817f7f, 0x8181817f, 0x7f81817f, 0x7f7f7f7f, +0x7f81817f, 0x7f817f81, 0x817f817f, 0x817f7f7f, 0x7f7f7f7f, 0x7f818181, 0x817f7f7f, 0x7f81817f, +0x81817f81, 0x7f7f7f81, 0x7f818181, 0x7f817f81, 0x81817f81, 0x7f7f7f7f, 0x817f8181, 0x817f8181, +0x7f817f7f, 0x7f817f7f, 0x817f817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x8181817f, 0x81817f81, +0x817f8181, 0x817f8181, 0x7f7f8181, 0x7f7f8181, 0x8181817f, 0x7f817f7f, 0x8181817f, 0x7f81817f, +0x7f7f7f81, 0x817f817f, 0x81817f7f, 0x817f7f81, 0x81817f7f, 0x817f7f81, 0x81817f81, 0x7f817f81, +0x817f8181, 0x7f818181, 0x7f817f81, 0x81817f81, 0x81817f81, 0x817f8181, 0x7f818181, 0x81818181, +0x817f7f81, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x8181817f, 0x81818181, 0x817f7f81, 0x7f818181, +0x81817f81, 0x7f817f7f, 0x817f817f, 0x817f7f81, 0x81817f81, 0x7f817f81, 0x7f7f817f, 0x8181817f, +0x817f7f7f, 0x7f7f817f, 0x7f817f81, 0x817f7f7f, 0x7f7f817f, 0x81818181, 0x817f7f81, 0x7f7f8181, +0x7f7f8181, 0x8181817f, 0x7f7f8181, 0x817f8181, 0x817f7f7f, 0x817f7f7f, 0x7f7f7f7f, 0x81817f81, +0x817f7f81, 0x7f7f8181, 0x7f817f81, 0x817f7f81, 0x7f7f817f, 0x817f8181, 0x7f7f7f81, 0x81817f7f, +0x817f817f, 0x7f7f7f7f, 0x817f7f7f, 0x7f81817f, 0x817f817f, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, +0x7f7f8181, 0x817f817f, 0x7f7f7f81, 0x7f7f817f, 0x817f8181, 0x7f81817f, 0x7f81817f, 0x817f7f7f, +0x7f817f7f, 0x817f817f, 0x817f7f81, 0x817f7f81, 0x81817f81, 0x7f817f81, 0x817f817f, 0x7f81817f, +0x7f7f8181, 0x7f818181, 0x7f818181, 0x817f817f, 0x7f81817f, 0x817f8181, 0x8181817f, 0x7f817f7f, +0x817f8181, 0x817f7f81, 0x81817f81, 0x7f7f7f81, 0x817f7f81, 0x7f7f7f7f, 0x81818181, 0x7f7f8181, +0x7f817f7f, 0x7f7f8181, 0x81817f7f, 0x817f817f, 0x81818181, 0x817f7f7f, 0x8181817f, 0x81817f81, +0x817f817f, 0x817f817f, 0x7f7f817f, 0x7f7f7f81, 0x7f818181, 0x7f817f7f, 0x7f7f7f81, 0x817f817f, +0x7f818181, 0x7f7f8181, 0x7f817f7f, 0x81817f81, 0x7f7f8181, 0x817f7f81, 0x817f7f7f, 0x817f8181, +0x7f7f8181, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x817f7f81, 0x81817f81, 0x817f8181, 0x817f7f7f, +0x7f7f7f81, 0x817f8181, 0x7f817f81, 0x7f817f81, 0x7f817f7f, 0x7f81817f, 0x7f7f7f7f, 0x81817f7f, +0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x7f7f7f81, 0x817f7f7f, 0x7f817f7f, 0x7f7f817f, 0x81817f7f, +0x8181817f, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x817f8181, 0x7f7f7f81, 0x7f817f81, 0x7f817f7f, +0x7f818181, 0x817f7f7f, 0x81817f81, 0x817f8181, 0x7f818181, 0x81817f7f, 0x817f8181, 0x7f7f8181, +0x81817f7f, 0x7f7f7f7f, 0x81818181, 0x7f818181, 0x817f817f, 0x7f7f7f81, 0x817f7f81, 0x817f8181, +0x7f81817f, 0x81817f7f, 0x817f8181, 0x817f817f, 0x7f818181, 0x7f7f7f7f, 0x7f818181, 0x7f7f817f, +0x7f817f81, 0x817f8181, 0x7f7f7f7f, 0x7f818181, 0x7f7f7f81, 0x7f7f817f, 0x7f817f81, 0x817f8181, +0x81818181, 0x7f818181, 0x7f7f7f7f, 0x7f818181, 0x7f81817f, 0x7f7f817f, 0x81817f81, 0x7f7f8181, +0x81818181, 0x817f7f81, 0x7f817f81, 0x7f7f8181, 0x817f7f81, 0x81817f81, 0x7f817f7f, 0x817f817f, +0x7f81817f, 0x7f818181, 0x817f8181, 0x81817f7f, 0x817f8181, 0x7f7f7f81, 0x7f818181, 0x7f7f817f, +0x817f8181, 0x817f8181, 0x7f817f81, 0x7f81817f, 0x8181817f, 0x817f7f81, 0x817f817f, 0x81818181, +0x7f817f7f, 0x817f817f, 0x817f817f, 0x7f817f7f, 0x7f817f7f, 0x817f817f, 0x7f81817f, 0x81818181, +0x7f7f7f7f, 0x7f7f7f7f, 0x817f7f81, 0x7f81817f, 0x7f7f7f81, 0x817f7f81, 0x817f8181, 0x7f818181, +0x7f7f7f7f, 0x7f7f817f, 0x7f7f7f81, 0x7f818181, 0x817f7f81, 0x81817f7f, 0x81817f7f, 0x81817f81, +0x7f7f7f81, 0x8181817f, 0x7f7f8181, 0x7f81817f, 0x7f7f7f81, 0x81817f7f, 0x7f7f7f7f, 0x7f7f817f, +0x7f817f7f, 0x81818181, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f7f, 0x817f817f, 0x81818181, 0x81817f81, +0x7f81817f, 0x7f7f8181, 0x7f81817f, 0x81818181, 0x81818181, 0x7f7f7f7f, 0x817f7f81, 0x7f81817f, +0x7f81817f, 0x7f7f8181, 0x7f818181, 0x7f818181, 0x7f817f7f, 0x8181817f, 0x81817f81, 0x817f7f81, +0x7f7f7f81, 0x7f81817f, 0x817f7f81, 0x817f8181, 0x7f81817f, 0x817f8181, 0x81817f7f, 0x7f817f81, +0x81818181, 0x7f817f81, 0x7f818181, 0x7f7f7f81, 0x7f7f8181, 0x8181817f, 0x81818181, 0x81818181, +0x81818181, 0x817f8181, 0x81817f81, 0x817f7f7f, 0x81817f81, 0x817f7f7f, 0x7f7f8181, 0x7f81817f, +0x7f7f7f81, 0x7f7f7f7f, 0x7f817f81, 0x81817f81, 0x817f817f, 0x7f7f7f81, 0x817f7f81, 0x817f7f7f, +0x7f817f7f, 0x81818181, 0x81818181, 0x7f7f817f, 0x7f817f81, 0x81817f81, 0x817f817f, 0x817f7f81, +0x81818181, 0x7f817f7f, 0x81817f7f, 0x81817f81, 0x817f817f, 0x817f7f7f, 0x7f7f817f, 0x7f7f817f, +0x7f7f8181, 0x817f7f81, 0x7f81817f, 0x7f817f7f, 0x817f7f7f, 0x817f7f7f, 0x817f7f7f, 0x7f7f7f7f, +0x7f7f7f7f, 0x7f7f7f81, 0x7f7f8181, 0x817f8181, 0x817f817f, 0x7f7f7f81, 0x81818181, 0x81817f81, +0x7f7f7f7f, 0x817f817f, 0x81818181, 0x7f7f817f, 0x7f818181, 0x7f7f8181, 0x817f8181, 0x817f7f7f, +0x7f7f8181, 0x7f817f7f, 0x817f8181, 0x817f8181, 0x81817f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f81, +0x7f818181, 0x7f817f7f, 0x81817f81, 0x7f817f7f, 0x817f7f7f, 0x817f7f7f, 0x817f817f, 0x7f7f7f7f, +0x817f8181, 0x7f7f7f7f, 0x7f81817f, 0x7f817f81, 0x81818181, 0x817f817f, 0x817f7f7f, 0x81817f7f, +0x7f81817f, 0x817f817f, 0x7f818181, 0x8181817f, 0x817f817f, 0x81818181, 0x817f7f7f, 0x817f7f81, +0x7f7f8181, 0x817f817f, 0x817f7f81, 0x7f81817f, 0x7f818181, 0x7f7f817f, 0x817f7f7f, 0x7f7f7f7f, +0x7f7f7f7f, 0x81817f81, 0x7f81817f, 0x7f7f7f7f, 0x7f7f7f81, 0x8181817f, 0x81818181, 0x81817f7f, +0x7f81817f, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f7f, 0x817f7f7f, 0x7f81817f, 0x817f817f, 0x81817f81, +0x7f7f7f81, 0x817f7f81, 0x817f7f81, 0x81818181, 0x7f7f8181, 0x81817f81, 0x7f817f7f, 0x7f818181, +0x7f818181, 0x81818181, 0x7f7f817f, 0x7f7f7f81, 0x81817f81, 0x817f7f7f, 0x817f8181, 0x7f7f817f, +0x81818181, 0x81817f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f818181, 0x7f7f817f, 0x817f7f81, 0x817f7f7f, +0x81817f7f, 0x7f81817f, 0x7f817f7f, 0x7f81817f, 0x7f7f817f, 0x7f7f8181, 0x8181817f, 0x817f7f81, +0x7f7f7f81, 0x7f817f81, 0x8181817f, 0x7f7f7f7f, 0x7f817f81, 0x7f7f7f81, 0x817f7f7f, 0x81817f81, +0x7f817f7f, 0x817f7f81, 0x817f817f, 0x81817f7f, 0x7f818181, 0x7f81817f, 0x7f817f81, 0x7f817f81, +0x7f81817f, 0x7f7f7f7f, 0x7f817f81, 0x7f818181, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f817f, +0x7f7f7f81, 0x7f818181, 0x7f818181, 0x7f818181, 0x7f7f8181, 0x817f7f81, 0x7f817f7f, 0x81818181, +0x7f81817f, 0x817f817f, 0x7f7f7f7f, 0x817f7f81, 0x7f7f817f, 0x7f7f817f, 0x7f7f8181, 0x7f7f7f81, +0x81817f81, 0x7f81817f, 0x7f7f7f81, 0x81817f81, 0x7f818181, 0x81817f81, 0x7f7f7f7f, 0x7f81817f, +0x81817f81, 0x7f817f7f, 0x7f81817f, 0x81817f81, 0x81818181, 0x7f81817f, 0x7f7f817f, 0x81817f7f, +0x7f7f817f, 0x8181817f, 0x81817f7f, 0x7f81817f, 0x81817f81, 0x8181817f, 0x7f817f81, 0x81817f7f, +0x817f7f7f, 0x7f7f8181, 0x817f8181, 0x7f81817f, 0x7f7f817f, 0x817f817f, 0x817f7f81, 0x7f7f817f, +0x7f7f8181, 0x7f818181, 0x7f7f8181, 0x81817f7f, 0x8181817f, 0x817f7f7f, 0x7f7f8181, 0x7f817f81, +0x817f817f, 0x7f817f7f, 0x81817f81, 0x7f7f817f, 0x81817f81, 0x7f7f817f, 0x81818181, 0x81817f81, +0x81817f7f, 0x7f7f7f7f, 0x7f7f7f7f, 0x817f817f, 0x7f7f8181, 0x7f7f7f7f, 0x7f817f7f, 0x7f7f817f, +0x7f81817f, 0x817f7f81, 0x817f7f81, 0x7f7f7f7f, 0x7f7f7f81, 0x81817f7f, 0x817f7f81, 0x7f7f817f, +0x7f817f7f, 0x81818181, 0x7f81817f, 0x7f817f7f, 0x81818181, 0x8181817f, 0x7f7f7f81, 0x7f818181, +0x8181817f, 0x7f7f7f7f, 0x817f817f, 0x7f818181, 0x81817f81, 0x7f818181, 0x7f81817f, 0x817f7f81, +0x81818181, 0x7f7f7f81, 0x81817f81, 0x7f7f8181, 0x7f81817f, 0x81817f7f, 0x8181817f, 0x7f817f7f, +0x7f7f8181, 0x7f7f817f, 0x7f817f7f, 0x817f817f, 0x7f818181, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f818181, +0x81817f81, 0x7f818181, 0x81817f81, 0x7f818181, 0x7f7f7f81, 0x817f817f, 0x81817f7f, 0x7f7f7f81, +0x817f817f, 0x7f817f81, 0x7f817f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f817f81, 0x8181817f, 0x817f8181, +0x7f7f7f7f, 0x8181817f, 0x817f8181, 0x81818181, 0x7f818181, 0x81818181, 0x7f7f7f81, 0x817f817f, +0x81817f81, 0x8181817f, 0x8181817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x8181817f, 0x817f7f7f, +0x7f817f7f, 0x7f7f8181, 0x7f7f7f81, 0x7f817f81, 0x81817f81, 0x81817f81, 0x81817f7f, 0x8181817f, +0x817f817f, 0x7f817f7f, 0x817f7f7f, 0x7f7f7f81, 0x817f8181, 0x7f7f7f81, 0x7f817f81, 0x81817f7f, +0x817f817f, 0x817f8181, 0x7f7f7f81, 0x7f7f7f81, 0x81817f7f, 0x7f818181, 0x7f7f8181, 0x817f817f, +0x81818181, 0x7f818181, 0x817f7f81, 0x817f8181, 0x817f817f, 0x7f817f7f, 0x7f7f7f7f, 0x8181817f, +0x7f7f7f7f, 0x7f817f7f, 0x817f817f, 0x817f817f, 0x7f817f81, 0x81818181, 0x7f817f7f, 0x7f7f8181, +0x7f7f8181, 0x81817f7f, 0x81817f7f, 0x7f7f7f81, 0x817f7f7f, 0x7f7f817f, 0x81817f7f, 0x7f7f817f, +0x817f7f81, 0x8181817f, 0x81817f81, 0x7f7f8181, 0x7f7f8181, 0x7f7f7f81, 0x81817f7f, 0x7f7f817f, +0x81817f81, 0x7f817f81, 0x81817f7f, 0x7f7f7f7f, 0x7f81817f, 0x7f7f8181, 0x7f817f7f, 0x7f7f8181, +0x817f817f, 0x817f7f7f, 0x7f817f7f, 0x817f8181, 0x817f7f7f, 0x7f81817f, 0x7f7f817f, 0x817f817f, +0x7f7f8181, 0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x7f7f817f, 0x81817f81, 0x7f817f81, 0x7f817f7f, +0x817f7f81, 0x7f7f817f, 0x8181817f, 0x817f7f7f, 0x81817f81, 0x7f817f7f, 0x817f7f7f, 0x7f7f7f7f, +0x7f817f81, 0x7f7f7f7f, 0x81817f7f, 0x7f7f7f7f, 0x7f818181, 0x817f7f81, 0x817f817f, 0x8181817f, +0x817f7f7f, 0x817f8181, 0x8181817f, 0x7f817f7f, 0x7f817f7f, 0x817f7f81, 0x81817f7f, 0x7f7f8181, +0x81818181, 0x81818181, 0x7f81817f, 0x7f817f81, 0x7f817f81, 0x817f7f81, 0x8181817f, 0x7f81817f, +0x7f7f817f, 0x7f818181, 0x81817f7f, 0x7f817f81, 0x817f8181, 0x7f7f7f81, 0x7f7f8181, 0x817f7f7f, +0x7f818181, 0x7f7f8181, 0x817f8181, 0x7f817f81, 0x7f818181, 0x81817f81, 0x817f7f7f, 0x7f818181, +0x7f817f81, 0x817f7f81, 0x817f817f, 0x7f7f7f7f, 0x817f817f, 0x81818181, 0x7f817f7f, 0x7f817f81, +0x817f7f81, 0x7f7f817f, 0x81817f81, 0x7f81817f, 0x7f7f8181, 0x817f7f81, 0x817f817f, 0x81817f81, +0x817f8181, 0x7f7f7f7f, 0x817f7f7f, 0x7f81817f, 0x7f818181, 0x7f818181, 0x7f7f7f7f, 0x817f7f81, +0x817f7f81, 0x7f7f7f81, 0x8181817f, 0x7f7f7f7f, 0x817f817f, 0x7f817f7f, 0x817f817f, 0x7f7f7f7f, +0x8181817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f817f7f, 0x7f817f7f, 0x7f817f7f, 0x817f8181, 0x7f81817f, +0x7f817f7f, 0x81817f81, 0x8181817f, 0x7f7f7f7f, 0x7f7f8181, 0x817f7f81, 0x7f817f7f, 0x817f7f7f, +0x81817f81, 0x817f7f7f, 0x81817f81, 0x7f81817f, 0x7f818181, 0x7f818181, 0x7f818181, 0x7f81817f, +0x7f7f8181, 0x817f7f81, 0x817f8181, 0x7f817f81, 0x7f817f7f, 0x81818181, 0x7f817f81, 0x81818181, +0x81817f7f, 0x81818181, 0x817f7f81, 0x81817f81, 0x81817f81, 0x7f7f7f7f, 0x81817f7f, 0x7f81817f, +0x7f7f8181, 0x81818181, 0x7f7f7f7f, 0x7f7f8181, 0x81817f7f, 0x81817f81, 0x7f7f7f81, 0x8181817f, +0x8181817f, 0x7f7f817f, 0x81817f81, 0x817f817f, 0x817f7f7f, 0x817f817f, 0x817f817f, 0x7f81817f, +0x8181817f, 0x817f7f7f, 0x7f818181, 0x817f7f7f, 0x817f8181, 0x7f7f8181, 0x817f8181, 0x817f817f, +0x817f7f81, 0x7f7f7f7f, 0x7f7f7f81, 0x8181817f, 0x7f818181, 0x7f7f8181, 0x7f81817f, 0x817f8181, +0x7f81817f, 0x7f7f8181, 0x8181817f, 0x817f7f7f, 0x817f817f, 0x8181817f, 0x8181817f, 0x81817f81, +0x7f7f817f, 0x7f817f7f, 0x817f817f, 0x7f81817f, 0x7f817f7f, 0x81817f7f, 0x7f7f8181, 0x817f7f7f, +0x7f7f7f81, 0x817f8181, 0x7f817f81, 0x817f7f81, 0x817f7f81, 0x81818181, 0x81818181, 0x81818181, +0x7f7f8181, 0x7f81817f, 0x7f817f7f, 0x7f817f7f, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f81, +0x7f817f81, 0x817f8181, 0x7f81817f, 0x81817f81, 0x7f7f7f81, 0x817f7f81, 0x817f7f7f, 0x817f8181, +0x7f7f8181, 0x7f817f7f, 0x7f818181, 0x7f7f817f, 0x7f81817f, 0x8181817f, 0x8181817f, 0x7f81817f, +0x7f817f81, 0x7f7f817f, 0x817f817f, 0x7f7f7f81, 0x817f8181, 0x7f81817f, 0x817f7f81, 0x817f817f, +0x817f817f, 0x7f817f81, 0x7f817f7f, 0x7f81817f, 0x817f817f, 0x7f7f8181, 0x7f81817f, 0x817f817f, +0x8181817f, 0x81818181, 0x81817f81, 0x817f8181, 0x81818181, 0x81817f7f, 0x817f8181, 0x7f7f8181, +0x7f7f8181, 0x7f7f817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x81817f7f, 0x7f7f7f7f, 0x817f817f, +0x817f7f7f, 0x7f7f817f, 0x7f817f81, 0x7f7f817f, 0x7f818181, 0x8181817f, 0x7f7f8181, 0x7f817f7f, +0x7f81817f, 0x7f7f8181, 0x7f817f81, 0x817f7f7f, 0x7f7f817f, 0x7f818181, 0x7f7f817f, 0x81818181, +0x7f81817f, 0x817f8181, 0x81818181, 0x81817f81, 0x817f817f, 0x81817f7f, 0x7f7f8181, 0x7f7f7f81, +0x81817f7f, 0x7f817f81, 0x7f817f7f, 0x7f7f7f7f, 0x7f7f8181, 0x817f7f7f, 0x7f81817f, 0x7f818181, +0x7f818181, 0x817f7f7f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x817f7f81, 0x7f7f7f81, 0x817f7f81, +0x7f817f7f, 0x7f7f7f7f, 0x7f818181, 0x7f817f81, 0x8181817f, 0x817f7f7f, 0x7f817f81, 0x81818181, +0x7f7f817f, 0x81817f81, 0x8181817f, 0x7f7f7f81, 0x81817f7f, 0x7f817f81, 0x81818181, 0x817f817f, +0x7f7f7f81, 0x7f81817f, 0x7f7f817f, 0x817f7f81, 0x7f7f7f7f, 0x817f8181, 0x81817f81, 0x8181817f, +0x7f7f7f7f, 0x81817f81, 0x7f7f7f7f, 0x7f7f7f7f, 0x81817f7f, 0x7f817f81, 0x7f81817f, 0x81817f81, +0x817f817f, 0x81817f81, 0x81817f7f, 0x817f7f7f, 0x7f7f7f7f, 0x7f7f817f, 0x7f818181, 0x817f817f, +0x7f7f7f7f, 0x7f817f7f, 0x8181817f, 0x817f7f7f, 0x7f7f817f, 0x7f81817f, 0x7f7f7f81, 0x817f7f7f, +0x7f817f81, 0x7f7f8181, 0x8181817f, 0x7f818181, 0x7f817f7f, 0x817f817f, 0x7f7f7f81, 0x817f817f, +0x7f7f817f, 0x7f7f8181, 0x7f7f7f7f, 0x7f7f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f7f, 0x7f81817f, +0x7f7f817f, 0x7f817f7f, 0x7f818181, 0x7f7f817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x7f817f81, +0x7f817f7f, 0x817f817f, 0x7f7f7f7f, 0x81817f81, 0x7f817f81, 0x7f817f7f, 0x81818181, 0x817f7f7f, +0x817f7f81, 0x817f7f7f, 0x817f7f7f, 0x8181817f, 0x7f7f7f81, 0x7f7f8181, 0x817f7f81, 0x817f7f81, +0x7f817f7f, 0x817f7f7f, 0x817f817f, 0x8181817f, 0x7f818181, 0x7f7f817f, 0x7f817f81, 0x7f7f817f, +0x7f817f7f, 0x7f817f81, 0x8181817f, 0x81818181, 0x7f7f7f7f, 0x7f81817f, 0x81817f7f, 0x7f817f7f, +0x817f8181, 0x7f818181, 0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x7f818181, 0x81818181, 0x7f7f7f7f, +0x8181817f, 0x81817f81, 0x817f7f81, 0x81818181, 0x81817f7f, 0x8181817f, 0x7f817f7f, 0x7f7f7f7f, +0x817f7f81, 0x817f7f81, 0x817f7f7f, 0x7f7f7f81, 0x7f7f817f, 0x81817f7f, 0x7f7f8181, 0x7f7f817f, +0x81818181, 0x7f7f7f81, 0x7f7f817f, 0x7f7f8181, 0x8181817f, 0x7f818181, 0x817f817f, 0x7f7f7f81, +0x817f7f7f, 0x7f817f7f, 0x817f7f81, 0x8181817f, 0x7f81817f, 0x817f817f, 0x8181817f, 0x7f7f7f7f, +0x817f7f7f, 0x7f818181, 0x817f7f81, 0x7f7f8181, 0x817f7f7f, 0x8181817f, 0x817f8181, 0x8181817f, +0x81817f81, 0x817f8181, 0x817f7f81, 0x7f7f8181, 0x817f8181, 0x7f817f81, 0x7f7f7f81, 0x7f818181, +0x7f817f7f, 0x7f7f8181, 0x7f7f817f, 0x81817f81, 0x81817f7f, 0x81817f7f, 0x81817f7f, 0x817f817f, +0x7f81817f, 0x7f7f7f7f, 0x817f8181, 0x8181817f, 0x7f817f81, 0x7f817f7f, 0x817f7f7f, 0x7f7f8181, +0x7f7f8181, 0x7f7f7f81, 0x7f818181, 0x81817f7f, 0x8181817f, 0x817f7f81, 0x7f7f7f7f, 0x7f81817f, +0x817f8181, 0x7f81817f, 0x8181817f, 0x81818181, 0x7f817f81, 0x81817f81, 0x7f7f7f81, 0x817f7f7f, +0x81818181, 0x7f7f7f7f, 0x81818181, 0x81818181, 0x7f7f8181, 0x7f817f81, 0x81818181, 0x817f8181, +0x81817f81, 0x7f81817f, 0x8181817f, 0x7f7f7f81, 0x8181817f, 0x81818181, 0x81817f81, 0x81817f81, +0x7f7f8181, 0x7f7f7f81, 0x7f7f7f7f, 0x8181817f, 0x7f7f8181, 0x7f818181, 0x81818181, 0x7f7f7f7f, +0x81817f7f, 0x817f7f81, 0x7f7f817f, 0x817f8181, 0x817f8181, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f7f, +0x7f7f7f81, 0x817f7f81, 0x7f81817f, 0x817f7f81, 0x817f817f, 0x7f7f7f7f, 0x81817f7f, 0x81817f81, +0x81818181, 0x7f817f7f, 0x81817f81, 0x7f7f7f81, 0x817f7f7f, 0x81818181, 0x7f7f817f, 0x7f818181, +0x7f7f7f7f, 0x81818181, 0x7f7f7f81, 0x81818181, 0x8181817f, 0x7f817f81, 0x7f7f7f7f, 0x7f817f81, +0x7f7f7f7f, 0x7f7f7f81, 0x81817f7f, 0x8181817f, 0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x7f7f817f, +0x7f7f7f81, 0x8181817f, 0x81817f7f, 0x7f81817f, 0x7f817f81, 0x817f7f81, 0x7f7f817f, 0x81817f81, +0x817f7f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f817f7f, 0x7f7f817f, 0x817f7f81, 0x817f8181, 0x7f7f7f7f, +0x7f818181, 0x81817f7f, 0x7f817f7f, 0x817f817f, 0x7f7f8181, 0x7f7f817f, 0x817f7f81, 0x7f7f8181, +0x817f7f81, 0x81817f81, 0x817f8181, 0x817f817f, 0x81818181, 0x7f817f81, 0x81817f81, 0x7f7f8181, +0x7f7f7f81, 0x817f7f81, 0x7f7f7f81, 0x7f7f7f7f, 0x81818181, 0x7f7f7f7f, 0x7f817f7f, 0x7f7f7f7f, +0x81818181, 0x8181817f, 0x7f7f7f7f, 0x7f7f817f, 0x817f8181, 0x817f817f, 0x817f7f7f, 0x817f8181, +0x7f7f8181, 0x7f7f8181, 0x817f817f, 0x7f817f81, 0x817f817f, 0x7f7f7f81, 0x817f7f7f, 0x7f7f817f, +0x7f7f7f7f, 0x817f8181, 0x817f8181, 0x7f7f817f, 0x81817f81, 0x7f7f8181, 0x7f81817f, 0x81817f7f, +0x817f7f7f, 0x817f817f, 0x7f7f8181, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f81, 0x7f81817f, +0x81817f7f, 0x817f7f7f, 0x7f817f81, 0x81818181, 0x7f7f817f, 0x817f7f81, 0x7f7f817f, 0x7f817f81, +0x81817f81, 0x81818181, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f81, 0x81817f7f, 0x81818181, 0x817f8181, +0x7f7f817f, 0x7f817f7f, 0x7f817f81, 0x817f7f81, 0x7f7f817f, 0x7f818181, 0x7f817f81, 0x7f817f81, +0x7f818181, 0x81817f7f, 0x817f7f81, 0x8181817f, 0x7f7f7f81, 0x81817f7f, 0x7f817f81, 0x8181817f, +0x7f7f817f, 0x817f817f, 0x81817f81, 0x81817f81, 0x81817f7f, 0x7f818181, 0x817f7f81, 0x817f7f81, +0x7f7f7f7f, 0x7f81817f, 0x7f817f81, 0x7f7f817f, 0x7f818181, 0x8181817f, 0x81817f81, 0x7f817f81, +0x7f81817f, 0x817f7f81, 0x7f818181, 0x817f817f, 0x817f7f7f, 0x81817f81, 0x7f817f81, 0x7f817f7f, +0x81817f7f, 0x7f7f8181, 0x81817f81, 0x81817f7f, 0x8181817f, 0x7f7f817f, 0x8181817f, 0x7f818181, +0x7f817f7f, 0x817f7f7f, 0x81817f81, 0x7f81817f, 0x81818181, 0x7f7f7f81, 0x7f7f817f, 0x7f7f7f81, +0x817f7f81, 0x817f7f7f, 0x7f817f7f, 0x7f7f8181, 0x7f817f81, 0x7f817f81, 0x7f81817f, 0x81818181, +0x7f7f7f81, 0x7f7f7f81, 0x81818181, 0x817f8181, 0x7f7f8181, 0x817f8181, 0x817f7f81, 0x8181817f, +0x81818181, 0x8181817f, 0x8181817f, 0x817f7f81, 0x7f7f8181, 0x817f7f81, 0x7f817f7f, 0x817f7f7f, +0x81818181, 0x81818181, 0x81817f7f, 0x7f81817f, 0x7f817f81, 0x81817f81, 0x7f7f7f7f, 0x817f817f, +0x7f7f7f7f, 0x7f817f7f, 0x817f8181, 0x7f7f7f81, 0x817f7f7f, 0x7f817f81, 0x7f7f817f, 0x8181817f, +0x817f7f81, 0x7f81817f, 0x817f817f, 0x7f817f7f, 0x81817f81, 0x7f7f817f, 0x817f8181, 0x817f7f7f, +0x81818181, 0x817f7f81, 0x7f81817f, 0x817f817f, 0x817f817f, 0x817f7f7f, 0x7f7f817f, 0x8181817f, +0x7f7f7f7f, 0x81817f81, 0x8181817f, 0x7f81817f, 0x817f7f81, 0x7f7f7f7f, 0x817f7f7f, 0x817f817f, +0x81818181, 0x8181817f, 0x81817f81, 0x81817f7f, 0x7f7f817f, 0x81817f81, 0x7f7f817f, 0x8181817f, +0x7f818181, 0x7f7f817f, 0x8181817f, 0x817f7f7f, 0x81817f7f, 0x7f818181, 0x81818181, 0x8181817f, +0x817f817f, 0x7f7f7f7f, 0x7f7f817f, 0x7f7f8181, 0x7f7f7f81, 0x7f7f817f, 0x7f818181, 0x81818181, +0x81818181, 0x7f817f81, 0x7f7f817f, 0x7f817f7f, 0x81817f7f, 0x7f7f817f, 0x8181817f, 0x7f7f7f7f, +0x7f7f8181, 0x7f7f817f, 0x7f7f7f7f, 0x817f817f, 0x7f7f7f7f, 0x817f7f81, 0x817f8181, 0x81818181, +0x7f818181, 0x7f818181, 0x8181817f, 0x7f7f7f7f, 0x8181817f, 0x7f817f7f, 0x7f7f817f, 0x81818181, +0x7f817f7f, 0x817f8181, 0x817f7f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f7f, 0x817f8181, 0x7f7f8181, +0x81817f81, 0x81818181, 0x7f7f817f, 0x817f7f81, 0x7f81817f, 0x8181817f, 0x81818181, 0x81817f7f, +0x7f81817f, 0x81818181, 0x7f7f8181, 0x7f817f81, 0x7f817f81, 0x7f817f7f, 0x817f817f, 0x7f818181, +0x7f817f7f, 0x7f7f7f7f, 0x817f817f, 0x817f7f7f, 0x7f81817f, 0x817f7f81, 0x81818181, 0x7f7f817f, +0x817f817f, 0x81817f81, 0x817f7f81, 0x81817f7f, 0x7f81817f, 0x7f81817f, 0x7f817f81, 0x81817f81, +0x7f7f817f, 0x8181817f, 0x7f818181, 0x7f7f7f7f, 0x81818181, 0x8181817f, 0x7f817f81, 0x8181817f, +0x7f81817f, 0x7f7f8181, 0x817f817f, 0x817f7f81, 0x7f817f81, 0x81817f7f, 0x7f818181, 0x81818181, +0x81817f81, 0x817f7f81, 0x7f81817f, 0x817f817f, 0x81817f81, 0x7f7f7f81, 0x7f818181, 0x81817f81, +0x817f7f81, 0x7f7f817f, 0x817f8181, 0x7f817f7f, 0x817f817f, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f817f, +0x817f817f, 0x817f817f, 0x817f8181, 0x7f7f7f81, 0x817f7f81, 0x817f8181, 0x7f7f7f81, 0x8181817f, +0x81817f7f, 0x7f7f7f81, 0x81817f7f, 0x817f7f81, 0x817f817f, 0x81817f7f, 0x7f7f7f7f, 0x7f81817f, +0x7f817f7f, 0x81818181, 0x81818181, 0x7f7f7f81, 0x81817f7f, 0x8181817f, 0x7f818181, 0x7f817f7f, +0x81817f81, 0x817f7f7f, 0x7f7f8181, 0x7f817f7f, 0x81817f7f, 0x8181817f, 0x817f8181, 0x817f7f81, +0x7f7f7f7f, 0x81817f7f, 0x81817f81, 0x81818181, 0x81817f7f, 0x817f817f, 0x817f817f, 0x7f7f817f, +0x7f81817f, 0x81818181, 0x7f817f81, 0x7f7f8181, 0x7f7f817f, 0x7f7f7f81, 0x81817f7f, 0x817f8181, +0x7f817f7f, 0x8181817f, 0x817f8181, 0x817f7f7f, 0x7f7f7f81, 0x8181817f, 0x817f7f81, 0x7f817f81, +0x7f7f8181, 0x7f81817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f817f7f, 0x7f7f7f81, 0x7f817f7f, 0x8181817f, +0x7f81817f, 0x817f817f, 0x7f817f81, 0x8181817f, 0x7f818181, 0x81817f81, 0x7f818181, 0x81818181, +0x7f7f7f81, 0x817f7f7f, 0x817f8181, 0x7f817f7f, 0x7f817f81, 0x817f7f81, 0x81818181, 0x7f7f7f81, +0x7f7f7f81, 0x7f7f7f81, 0x7f81817f, 0x817f817f, 0x7f7f8181, 0x7f817f7f, 0x7f817f81, 0x7f818181, +0x7f7f8181, 0x817f8181, 0x7f817f7f, 0x7f7f8181, 0x7f7f7f81, 0x817f8181, 0x7f817f7f, 0x7f817f7f, +0x7f7f817f, 0x7f7f7f81, 0x817f7f7f, 0x7f817f7f, 0x817f817f, 0x7f817f81, 0x7f7f8181, 0x81817f81, +0x7f7f8181, 0x7f818181, 0x817f7f81, 0x7f818181, 0x81817f7f, 0x817f7f7f, 0x7f7f8181, 0x81817f7f, +0x7f817f7f, 0x7f818181, 0x7f81817f, 0x81818181, 0x7f7f817f, 0x7f818181, 0x7f817f7f, 0x81817f81, +0x7f7f7f7f, 0x7f817f7f, 0x7f7f817f, 0x7f7f7f81, 0x7f7f7f7f, 0x8181817f, 0x7f817f81, 0x7f81817f, +0x7f7f7f81, 0x7f7f7f81, 0x817f817f, 0x7f7f7f81, 0x81818181, 0x7f7f7f81, 0x8181817f, 0x817f7f81, +0x7f7f817f, 0x817f817f, 0x817f8181, 0x7f7f8181, 0x81817f7f, 0x7f817f81, 0x8181817f, 0x7f81817f, +0x7f818181, 0x7f818181, 0x7f817f81, 0x7f7f7f7f, 0x7f81817f, 0x817f7f81, 0x7f7f7f81, 0x81818181, +0x7f7f7f81, 0x817f8181, 0x7f81817f, 0x8181817f, 0x7f7f7f81, 0x8181817f, 0x7f7f7f81, 0x7f81817f, +0x81817f81, 0x7f817f81, 0x81817f7f, 0x81818181, 0x817f817f, 0x817f7f7f, 0x817f8181, 0x81818181, +0x7f81817f, 0x81818181, 0x81818181, 0x7f818181, 0x7f7f7f7f, 0x81817f81, 0x8181817f, 0x7f7f817f, +0x817f7f81, 0x817f7f81, 0x817f8181, 0x7f7f8181, 0x817f8181, 0x7f7f7f81, 0x817f7f81, 0x81818181, +0x8181817f, 0x81817f81, 0x7f7f8181, 0x7f7f8181, 0x7f7f8181, 0x817f817f, 0x7f817f7f, 0x7f81817f, +0x81817f7f, 0x81818181, 0x7f81817f, 0x7f817f7f, 0x7f7f7f81, 0x817f7f81, 0x81818181, 0x7f7f7f81, +0x7f7f8181, 0x7f81817f, 0x7f7f7f7f, 0x7f7f8181, 0x81817f81, 0x7f81817f, 0x81817f81, 0x7f7f8181, +0x817f817f, 0x7f817f81, 0x7f7f8181, 0x817f817f, 0x7f817f7f, 0x817f7f81, 0x7f7f7f81, 0x7f7f817f, +0x817f8181, 0x7f7f817f, 0x8181817f, 0x81818181, 0x81817f7f, 0x7f817f7f, 0x7f7f817f, 0x81817f7f, +0x7f7f817f, 0x7f81817f, 0x81818181, 0x81817f81, 0x8181817f, 0x7f817f7f, 0x81818181, 0x7f7f7f81, +0x7f7f7f7f, 0x81817f7f, 0x81817f81, 0x7f7f817f, 0x7f7f8181, 0x8181817f, 0x81818181, 0x7f7f7f81, +0x7f817f7f, 0x7f817f7f, 0x817f7f81, 0x7f7f817f, 0x8181817f, 0x817f7f7f, 0x7f817f81, 0x817f8181, +0x7f817f7f, 0x7f817f7f, 0x7f7f817f, 0x7f7f817f, 0x7f817f7f, 0x7f817f7f, 0x7f818181, 0x817f7f81, +0x7f81817f, 0x7f817f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f7f, 0x81817f7f, 0x7f81817f, 0x81818181, +0x7f7f8181, 0x7f7f7f81, 0x7f817f81, 0x81817f81, 0x81817f81, 0x8181817f, 0x7f818181, 0x81817f81, +0x817f7f81, 0x817f7f7f, 0x7f7f7f81, 0x7f7f7f7f, 0x7f7f8181, 0x7f81817f, 0x7f7f817f, 0x7f7f817f, +0x817f7f81, 0x7f7f7f7f, 0x81817f81, 0x817f817f, 0x817f817f, 0x817f817f, 0x7f81817f, 0x7f7f8181, +0x7f818181, 0x817f8181, 0x7f7f8181, 0x7f817f81, 0x81818181, 0x7f817f81, 0x817f7f7f, 0x7f817f7f, +0x7f7f8181, 0x817f8181, 0x7f818181, 0x81817f81, 0x7f818181, 0x8181817f, 0x8181817f, 0x7f818181, +0x8181817f, 0x7f7f7f7f, 0x7f7f817f, 0x7f818181, 0x7f7f7f81, 0x7f7f817f, 0x81818181, 0x817f7f7f, +0x8181817f, 0x817f817f, 0x7f7f8181, 0x7f7f7f81, 0x81818181, 0x8181817f, 0x817f817f, 0x817f817f, +0x7f818181, 0x7f7f8181, 0x8181817f, 0x817f817f, 0x81817f7f, 0x81818181, 0x7f81817f, 0x817f817f, +0x7f7f7f7f, 0x7f817f81, 0x7f818181, 0x817f7f7f, 0x817f7f81, 0x7f817f81, 0x7f7f817f, 0x7f7f7f7f, +0x81817f81, 0x7f7f7f7f, 0x817f8181, 0x7f7f817f, 0x8181817f, 0x7f81817f, 0x7f818181, 0x7f7f7f7f, +0x7f7f7f7f, 0x7f7f7f81, 0x7f818181, 0x7f818181, 0x8181817f, 0x7f7f8181, 0x817f7f81, 0x7f817f7f, +0x817f7f7f, 0x7f7f7f81, 0x7f7f8181, 0x7f817f81, 0x817f7f81, 0x7f7f7f81, 0x7f817f7f, 0x7f7f7f81, +0x817f817f, 0x817f7f7f, 0x81818181, 0x7f818181, 0x81818181, 0x81818181, 0x7f7f7f81, 0x8181817f, +0x81818181, 0x7f817f7f, 0x817f7f81, 0x7f7f8181, 0x7f818181, 0x8181817f, 0x8181817f, 0x817f7f81, +0x7f817f7f, 0x81817f7f, 0x817f8181, 0x7f817f7f, 0x7f81817f, 0x81817f7f, 0x7f817f7f, 0x817f7f7f, +0x817f7f7f, 0x7f817f81, 0x8181817f, 0x817f7f7f, 0x7f817f81, 0x81817f81, 0x817f817f, 0x7f7f7f81, +0x7f817f81, 0x7f817f7f, 0x817f817f, 0x817f7f81, 0x7f7f7f7f, 0x81817f81, 0x7f818181, 0x81817f81, +0x817f8181, 0x7f817f7f, 0x81817f81, 0x817f7f81, 0x7f7f7f7f, 0x81817f81, 0x81817f81, 0x81817f81, +0x81817f7f, 0x7f81817f, 0x81818181, 0x7f7f8181, 0x817f817f, 0x817f8181, 0x817f7f7f, 0x8181817f, +0x81817f7f, 0x8181817f, 0x7f817f81, 0x81817f7f, 0x81818181, 0x817f7f7f, 0x7f81817f, 0x817f817f, +0x81818181, 0x817f7f81, 0x817f817f, 0x817f7f7f, 0x81817f7f, 0x7f7f8181, 0x817f7f81, 0x8181817f, +0x7f818181, 0x7f817f81, 0x8181817f, 0x7f7f7f81, 0x8181817f, 0x81818181, 0x7f7f8181, 0x7f7f7f7f, +0x7f81817f, 0x817f8181, 0x817f817f, 0x7f81817f, 0x81818181, 0x7f817f7f, 0x7f7f7f7f, 0x7f7f7f7f, +0x7f7f8181, 0x817f8181, 0x817f817f, 0x7f7f7f81, 0x817f7f7f, 0x7f7f817f, 0x7f817f7f, 0x817f8181, +0x81817f81, 0x817f8181, 0x7f81817f, 0x817f8181, 0x817f8181, 0x817f7f81, 0x7f7f817f, 0x7f818181, +0x7f7f817f, 0x7f7f7f7f, 0x817f8181, 0x7f7f7f7f, 0x8181817f, 0x817f8181, 0x81818181, 0x817f7f7f, +0x7f7f817f, 0x817f7f7f, 0x817f817f, 0x7f818181, 0x8181817f, 0x817f817f, 0x7f7f7f7f, 0x7f81817f, +0x7f817f81, 0x81818181, 0x7f817f7f, 0x817f817f, 0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x81817f81, +0x81818181, 0x81817f81, 0x7f7f817f, 0x81818181, 0x817f8181, 0x7f81817f, 0x81817f7f, 0x7f81817f, +0x817f8181, 0x81818181, 0x7f7f7f81, 0x81817f81, 0x817f7f7f, 0x817f8181, 0x7f7f817f, 0x7f7f8181, +0x7f81817f, 0x7f818181, 0x7f81817f, 0x7f7f817f, 0x7f81817f, 0x7f7f7f81, 0x7f81817f, 0x817f8181, +0x81817f81, 0x81818181, 0x817f8181, 0x81818181, 0x7f81817f, 0x7f7f8181, 0x7f81817f, 0x81817f81, +0x7f818181, 0x81818181, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x81817f7f, 0x7f81817f, 0x81818181, +0x81817f81, 0x81817f7f, 0x7f7f817f, 0x817f7f7f, 0x8181817f, 0x81817f81, 0x81817f81, 0x8181817f, +0x817f817f, 0x81817f7f, 0x7f817f7f, 0x817f817f, 0x817f7f7f, 0x8181817f, 0x7f7f7f7f, 0x817f817f, +0x817f8181, 0x7f817f7f, 0x7f817f7f, 0x7f81817f, 0x7f7f817f, 0x7f817f7f, 0x7f7f8181, 0x7f7f7f7f, +0x7f81817f, 0x817f817f, 0x7f7f817f, 0x7f7f8181, 0x7f7f7f7f, 0x817f7f7f, 0x7f81817f, 0x81818181, +0x7f7f817f, 0x7f81817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f7f8181, 0x81818181, 0x817f7f7f, 0x81818181, +0x81817f81, 0x817f817f, 0x7f817f81, 0x7f7f8181, 0x7f7f7f81, 0x81817f81, 0x7f7f7f81, 0x817f7f7f, +0x817f817f, 0x817f8181, 0x7f817f81, 0x817f7f7f, 0x7f817f7f, 0x7f7f7f81, 0x81817f7f, 0x7f7f7f81, +0x7f817f7f, 0x7f81817f, 0x817f817f, 0x7f7f8181, 0x7f81817f, 0x8181817f, 0x7f7f7f7f, 0x7f7f817f, +0x7f7f7f7f, 0x7f817f7f, 0x7f7f817f, 0x7f817f81, 0x817f817f, 0x81817f7f, 0x7f81817f, 0x8181817f, +0x817f7f81, 0x7f7f7f81, 0x81818181, 0x7f817f81, 0x81818181, 0x7f818181, 0x817f7f7f, 0x81818181, +0x81818181, 0x7f7f7f7f, 0x8181817f, 0x7f7f7f81, 0x81818181, 0x7f7f817f, 0x817f7f81, 0x7f81817f, +0x817f817f, 0x7f7f7f81, 0x7f81817f, 0x7f81817f, 0x7f818181, 0x817f817f, 0x81817f7f, 0x7f7f817f, +0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x817f817f, 0x81817f7f, 0x7f7f7f81, 0x817f817f, 0x7f7f817f, +0x81817f7f, 0x7f7f7f7f, 0x7f81817f, 0x817f7f7f, 0x7f7f8181, 0x7f7f8181, 0x7f7f817f, 0x7f81817f, +0x81817f81, 0x817f7f81, 0x7f81817f, 0x81817f7f, 0x817f8181, 0x817f8181, 0x817f7f7f, 0x7f7f7f7f, +0x7f7f7f7f, 0x81817f81, 0x817f817f, 0x81817f81, 0x7f7f7f7f, 0x817f817f, 0x817f7f81, 0x81817f81, +0x817f7f81, 0x817f7f7f, 0x81817f81, 0x817f7f81, 0x7f81817f, 0x81817f7f, 0x7f818181, 0x8181817f, +0x7f7f7f81, 0x81817f81, 0x7f817f81, 0x81818181, 0x817f8181, 0x7f7f8181, 0x81818181, 0x817f7f81, +0x817f8181, 0x81817f7f, 0x8181817f, 0x817f817f, 0x8181817f, 0x7f818181, 0x81817f81, 0x81818181, +0x817f817f, 0x817f7f7f, 0x7f7f7f7f, 0x7f7f7f81, 0x7f817f81, 0x81817f81, 0x7f7f8181, 0x8181817f, +0x817f7f81, 0x7f818181, 0x7f7f817f, 0x7f81817f, 0x81818181, 0x81817f7f, 0x7f817f7f, 0x817f8181, +0x7f7f8181, 0x81817f81, 0x7f7f8181, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f7f, 0x81817f81, +0x7f7f817f, 0x817f817f, 0x817f8181, 0x7f817f7f, 0x7f817f7f, 0x8181817f, 0x817f8181, 0x817f7f7f, +0x817f7f81, 0x817f8181, 0x8181817f, 0x7f818181, 0x7f818181, 0x7f7f7f81, 0x817f7f81, 0x7f817f81, +0x81817f81, 0x817f8181, 0x817f7f7f, 0x817f7f7f, 0x81818181, 0x7f7f7f81, 0x817f817f, 0x817f7f81, +0x81817f7f, 0x7f818181, 0x7f7f817f, 0x7f818181, 0x7f7f8181, 0x7f817f7f, 0x817f8181, 0x81817f81, +0x81818181, 0x7f817f7f, 0x81817f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f81, 0x81817f7f, 0x7f817f7f, +0x7f7f8181, 0x817f8181, 0x7f818181, 0x8181817f, 0x81817f81, 0x817f7f7f, 0x7f817f81, 0x81818181, +0x817f817f, 0x81818181, 0x817f7f81, 0x817f7f7f, 0x817f817f, 0x7f817f81, 0x7f817f7f, 0x817f7f81, +0x7f818181, 0x817f7f7f, 0x7f81817f, 0x81817f7f, 0x817f8181, 0x817f8181, 0x81818181, 0x817f7f81, +0x7f7f817f, 0x81817f7f, 0x8181817f, 0x7f7f8181, 0x7f81817f, 0x8181817f, 0x817f7f81, 0x7f81817f, +0x817f817f, 0x7f7f817f, 0x817f7f7f, 0x8181817f, 0x7f7f817f, 0x7f81817f, 0x8181817f, 0x7f81817f, +0x81818181, 0x81818181, 0x7f81817f, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f818181, 0x817f7f7f, +0x817f7f7f, 0x81817f81, 0x7f817f81, 0x7f817f7f, 0x817f8181, 0x7f81817f, 0x7f7f817f, 0x81817f7f, +0x7f7f7f7f, 0x817f7f81, 0x81817f81, 0x817f817f, 0x7f7f8181, 0x817f7f81, 0x7f7f8181, 0x81818181, +0x7f7f7f81, 0x81817f7f, 0x817f7f81, 0x8181817f, 0x7f7f7f81, 0x817f7f7f, 0x81818181, 0x81817f7f, +0x7f817f7f, 0x817f7f7f, 0x81817f7f, 0x81817f7f, 0x817f7f7f, 0x81817f7f, 0x817f8181, 0x7f81817f, +0x7f7f7f7f, 0x817f817f, 0x817f7f81, 0x8181817f, 0x817f7f7f, 0x7f817f7f, 0x8181817f, 0x81817f81, +0x817f7f81, 0x7f7f7f81, 0x7f7f7f81, 0x7f818181, 0x7f817f81, 0x81818181, 0x817f8181, 0x8181817f, +0x7f817f7f, 0x817f8181, 0x7f818181, 0x817f7f81, 0x7f817f7f, 0x7f7f8181, 0x817f817f, 0x817f8181, +0x7f81817f, 0x7f7f817f, 0x7f7f8181, 0x7f7f817f, 0x8181817f, 0x81817f81, 0x817f8181, 0x7f7f7f81, +0x8181817f, 0x8181817f, 0x8181817f, 0x7f7f7f81, 0x8181817f, 0x7f818181, 0x81818181, 0x7f7f7f81, +0x7f817f7f, 0x81818181, 0x817f8181, 0x817f8181, 0x7f7f7f7f, 0x817f7f81, 0x817f817f, 0x817f7f81, +0x81818181, 0x817f8181, 0x7f7f8181, 0x7f7f8181, 0x817f817f, 0x81817f7f, 0x7f7f8181, 0x817f8181, +0x7f7f817f, 0x7f7f817f, 0x7f817f81, 0x7f817f7f, 0x7f818181, 0x7f81817f, 0x81817f7f, 0x817f8181, +0x817f8181, 0x7f818181, 0x8181817f, 0x7f817f7f, 0x81817f81, 0x7f7f8181, 0x817f7f81, 0x817f8181, +0x7f7f7f7f, 0x817f7f7f, 0x7f817f81, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f81, 0x81818181, 0x81817f81, +0x7f817f7f, 0x7f818181, 0x817f7f81, 0x8181817f, 0x81817f7f, 0x8181817f, 0x7f81817f, 0x7f817f81, +0x817f8181, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f818181, 0x7f81817f, 0x7f7f8181, 0x7f7f7f81, +0x817f817f, 0x81817f7f, 0x7f81817f, 0x81817f7f, 0x7f818181, 0x817f8181, 0x81818181, 0x8181817f, +0x81817f7f, 0x7f817f7f, 0x817f8181, 0x81817f7f, 0x7f7f7f7f, 0x7f7f7f7f, 0x81817f81, 0x817f817f, +0x817f8181, 0x81817f81, 0x81817f81, 0x7f817f7f, 0x7f7f817f, 0x7f7f7f81, 0x7f7f7f7f, 0x8181817f, +0x81817f81, 0x7f817f7f, 0x81817f81, 0x81817f81, 0x7f817f7f, 0x7f7f817f, 0x81817f81, 0x817f8181, +0x81817f81, 0x8181817f, 0x7f7f7f81, 0x817f8181, 0x81817f7f, 0x81817f81, 0x817f8181, 0x817f8181, +0x817f817f, 0x817f7f7f, 0x81817f7f, 0x7f817f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f818181, 0x81817f81, +0x817f8181, 0x817f7f7f, 0x817f817f, 0x8181817f, 0x81818181, 0x7f817f81, 0x81817f7f, 0x81818181, +0x7f818181, 0x817f817f, 0x7f7f7f81, 0x817f817f, 0x7f7f817f, 0x817f817f, 0x7f7f7f7f, 0x817f7f7f, +0x817f7f7f, 0x7f817f81, 0x7f7f817f, 0x81817f7f, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x817f817f, +0x817f817f, 0x7f817f81, 0x7f818181, 0x7f7f7f81, 0x7f817f81, 0x81817f81, 0x81818181, 0x7f817f7f, +0x7f7f7f81, 0x817f817f, 0x7f817f7f, 0x817f7f81, 0x7f7f7f7f, 0x817f8181, 0x7f7f7f7f, 0x817f817f, +0x81818181, 0x81818181, 0x8181817f, 0x81817f81, 0x81818181, 0x817f8181, 0x7f817f7f, 0x7f817f81, +0x817f8181, 0x7f818181, 0x817f7f81, 0x817f7f7f, 0x7f7f817f, 0x8181817f, 0x817f7f7f, 0x7f817f7f, +0x7f7f7f7f, 0x7f81817f, 0x7f7f817f, 0x7f81817f, 0x81817f81, 0x817f8181, 0x7f817f81, 0x817f8181, +0x81818181, 0x817f817f, 0x81817f81, 0x817f817f, 0x817f8181, 0x817f7f7f, 0x7f7f7f7f, 0x7f7f7f7f, +0x81817f81, 0x7f81817f, 0x817f7f81, 0x7f818181, 0x81817f7f, 0x7f7f8181, 0x7f7f8181, 0x7f817f7f, +0x8181817f, 0x7f81817f, 0x8181817f, 0x81818181, 0x81817f7f, 0x7f7f817f, 0x81817f81, 0x8181817f, +0x7f81817f, 0x7f817f81, 0x7f7f817f, 0x7f817f7f, 0x7f817f7f, 0x817f8181, 0x81817f81, 0x7f7f7f7f, +0x7f817f7f, 0x81817f7f, 0x7f7f7f7f, 0x817f817f, 0x7f7f7f81, 0x817f7f81, 0x7f818181, 0x7f7f8181, +0x7f817f81, 0x7f817f7f, 0x8181817f, 0x7f7f7f81, 0x7f7f8181, 0x81818181, 0x7f7f8181, 0x7f817f7f, +0x7f7f8181, 0x7f81817f, 0x7f7f8181, 0x81818181, 0x7f817f7f, 0x7f7f8181, 0x7f817f7f, 0x8181817f, +0x7f817f7f, 0x817f7f7f, 0x81817f81, 0x7f817f81, 0x817f817f, 0x7f817f81, 0x7f817f7f, 0x81818181, +0x7f817f81, 0x7f7f8181, 0x7f81817f, 0x81818181, 0x7f7f7f81, 0x7f817f7f, 0x7f7f7f7f, 0x7f817f81, +0x7f81817f, 0x817f7f7f, 0x817f8181, 0x7f817f7f, 0x817f7f81, 0x7f7f7f7f, 0x7f817f7f, 0x7f817f81, +0x817f7f7f, 0x817f8181, 0x7f7f7f7f, 0x817f7f81, 0x81817f7f, 0x81818181, 0x817f817f, 0x81818181, +0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f7f, 0x81817f81, 0x7f81817f, 0x7f7f7f81, 0x7f7f7f81, 0x81817f7f, +0x817f7f81, 0x817f7f81, 0x7f817f7f, 0x7f7f7f7f, 0x8181817f, 0x81817f7f, 0x7f7f7f7f, 0x7f818181, +0x7f818181, 0x81818181, 0x817f8181, 0x81817f7f, 0x7f7f8181, 0x817f817f, 0x7f818181, 0x7f7f8181, +0x7f7f817f, 0x7f7f8181, 0x7f7f7f7f, 0x8181817f, 0x81818181, 0x817f8181, 0x7f7f7f7f, 0x7f81817f, +0x81817f7f, 0x7f7f8181, 0x81817f81, 0x817f7f7f, 0x817f7f7f, 0x7f817f81, 0x817f7f7f, 0x81817f7f, +0x817f817f, 0x7f7f7f81, 0x817f7f7f, 0x817f817f, 0x817f7f7f, 0x817f8181, 0x81818181, 0x7f81817f, +0x7f7f8181, 0x7f81817f, 0x81817f7f, 0x7f818181, 0x81817f81, 0x817f7f81, 0x7f7f8181, 0x7f817f81, +0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f7f, 0x817f8181, 0x8181817f, 0x817f7f81, 0x817f7f81, 0x7f817f81, +0x7f7f817f, 0x7f817f81, 0x7f817f7f, 0x7f817f81, 0x81818181, 0x81818181, 0x7f7f7f7f, 0x81818181, +0x7f817f7f, 0x81818181, 0x7f81817f, 0x7f817f81, 0x81817f7f, 0x817f7f81, 0x7f7f817f, 0x7f817f7f, +0x7f818181, 0x7f7f817f, 0x7f817f7f, 0x7f7f7f7f, 0x817f8181, 0x81817f81, 0x817f7f81, 0x7f7f7f81, +0x81818181, 0x7f817f7f, 0x81818181, 0x7f818181, 0x817f7f81, 0x7f817f7f, 0x81818181, 0x81818181, +0x7f817f81, 0x7f7f7f7f, 0x81817f7f, 0x7f818181, 0x81817f7f, 0x81817f81, 0x8181817f, 0x817f817f, +0x817f817f, 0x817f7f81, 0x7f7f8181, 0x817f7f7f, 0x817f8181, 0x7f7f7f7f, 0x7f7f7f7f, 0x817f7f81, +0x817f7f7f, 0x7f7f8181, 0x7f817f81, 0x7f81817f, 0x817f7f7f, 0x7f7f7f7f, 0x7f7f8181, 0x7f7f817f, +0x817f7f7f, 0x81818181, 0x8181817f, 0x7f817f7f, 0x7f7f8181, 0x81817f81, 0x7f81817f, 0x7f7f8181, +0x817f7f7f, 0x817f7f81, 0x7f81817f, 0x7f7f7f81, 0x7f817f81, 0x8181817f, 0x817f7f81, 0x7f817f81, +0x81817f81, 0x81817f81, 0x81817f7f, 0x7f7f817f, 0x817f7f81, 0x7f81817f, 0x817f817f, 0x7f817f81, +0x817f7f81, 0x7f7f817f, 0x7f7f817f, 0x7f7f7f7f, 0x7f81817f, 0x7f7f817f, 0x8181817f, 0x817f7f7f, +0x7f817f81, 0x7f817f7f, 0x7f81817f, 0x81817f81, 0x7f817f7f, 0x7f81817f, 0x81817f7f, 0x7f7f7f7f, +0x817f817f, 0x7f7f7f7f, 0x7f81817f, 0x817f8181, 0x81818181, 0x7f81817f, 0x7f817f7f, 0x8181817f, +0x817f817f, 0x81818181, 0x81817f81, 0x81817f81, 0x7f7f8181, 0x7f7f7f81, 0x7f7f7f7f, 0x8181817f, +0x7f817f81, 0x817f7f81, 0x81818181, 0x7f7f8181, 0x81817f7f, 0x81818181, 0x7f81817f, 0x7f7f7f7f, +0x81818181, 0x7f81817f, 0x7f7f7f81, 0x8181817f, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f8181, 0x81818181, +0x81818181, 0x7f7f8181, 0x7f817f81, 0x81817f7f, 0x7f7f817f, 0x817f7f81, 0x7f7f817f, 0x817f817f, +0x81817f7f, 0x7f7f817f, 0x7f7f7f7f, 0x81817f81, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f7f8181, +0x7f817f7f, 0x7f817f81, 0x817f8181, 0x817f7f7f, 0x7f818181, 0x7f81817f, 0x817f817f, 0x7f7f8181, +0x81817f7f, 0x7f7f7f7f, 0x817f7f81, 0x8181817f, 0x81818181, 0x7f817f81, 0x7f817f81, 0x7f81817f, +0x7f81817f, 0x7f7f817f, 0x817f7f7f, 0x7f817f7f, 0x817f7f81, 0x7f7f817f, 0x817f8181, 0x7f7f7f7f, +0x7f81817f, 0x7f7f817f, 0x817f817f, 0x817f817f, 0x7f817f7f, 0x7f7f7f7f, 0x7f81817f, 0x7f7f8181, +0x817f7f81, 0x7f7f7f81, 0x7f818181, 0x817f817f, 0x7f7f817f, 0x7f817f81, 0x817f7f81, 0x81818181, +0x81817f81, 0x7f7f7f81, 0x7f817f7f, 0x7f818181, 0x817f817f, 0x7f7f8181, 0x7f81817f, 0x7f81817f, +0x817f7f81, 0x81818181, 0x7f817f81, 0x7f7f7f81, 0x7f81817f, 0x7f818181, 0x7f817f7f, 0x81817f81, +0x81818181, 0x7f817f7f, 0x81817f81, 0x81817f81, 0x7f7f817f, 0x817f8181, 0x81818181, 0x7f81817f, +0x81817f81, 0x7f7f817f, 0x7f7f817f, 0x7f817f81, 0x817f7f7f, 0x8181817f, 0x81817f81, 0x81818181, +0x7f817f81, 0x7f7f7f7f, 0x81818181, 0x7f7f8181, 0x81817f81, 0x81817f7f, 0x7f7f817f, 0x7f7f7f7f, +0x7f7f7f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f7f7f81, 0x81817f81, 0x817f8181, 0x7f7f817f, 0x7f7f7f7f, +0x817f817f, 0x8181817f, 0x817f8181, 0x7f7f7f7f, 0x7f7f7f81, 0x81817f7f, 0x7f7f7f7f, 0x7f7f817f, +0x7f81817f, 0x7f7f7f81, 0x817f8181, 0x81817f81, 0x817f7f81, 0x7f7f817f, 0x7f81817f, 0x7f7f7f7f, +0x81818181, 0x81817f81, 0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x81817f81, 0x8181817f, 0x7f81817f, +0x81818181, 0x7f818181, 0x817f7f81, 0x81817f7f, 0x81818181, 0x817f7f7f, 0x7f7f7f81, 0x817f7f7f, +0x817f817f, 0x817f7f81, 0x81818181, 0x81817f81, 0x7f818181, 0x81817f7f, 0x817f817f, 0x7f81817f, +0x817f7f81, 0x7f81817f, 0x8181817f, 0x7f7f7f7f, 0x8181817f, 0x7f7f817f, 0x7f817f7f, 0x817f7f7f, +0x7f818181, 0x817f8181, 0x7f7f817f, 0x817f7f81, 0x7f817f81, 0x81817f81, 0x7f7f8181, 0x7f81817f, +0x7f7f817f, 0x7f817f7f, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x817f7f81, 0x81818181, 0x7f7f817f, +0x817f8181, 0x7f81817f, 0x7f817f7f, 0x81817f81, 0x7f7f817f, 0x81817f81, 0x7f7f817f, 0x81818181, +0x81817f7f, 0x7f817f7f, 0x817f8181, 0x7f7f8181, 0x817f7f81, 0x81818181, 0x7f7f8181, 0x7f817f7f, +0x7f817f7f, 0x817f817f, 0x817f7f81, 0x81817f81, 0x7f7f8181, 0x817f7f7f, 0x7f7f8181, 0x81817f81, +0x7f817f7f, 0x817f8181, 0x817f817f, 0x81817f81, 0x81817f81, 0x7f7f817f, 0x7f7f8181, 0x7f7f7f81, +0x81817f81, 0x7f81817f, 0x7f7f8181, 0x81818181, 0x817f7f7f, 0x7f81817f, 0x7f7f817f, 0x7f817f7f, +0x7f818181, 0x7f81817f, 0x817f8181, 0x7f7f7f7f, 0x7f81817f, 0x7f818181, 0x817f817f, 0x81818181, +0x7f818181, 0x7f817f81, 0x7f81817f, 0x7f7f8181, 0x817f7f81, 0x7f7f8181, 0x7f81817f, 0x7f7f7f81, +0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x7f7f8181, 0x7f818181, 0x81818181, 0x817f7f81, 0x817f8181, +0x7f7f7f7f, 0x7f818181, 0x7f7f7f7f, 0x7f7f7f81, 0x817f8181, 0x7f7f7f7f, 0x817f7f7f, 0x7f818181, +0x81818181, 0x7f7f817f, 0x81818181, 0x7f818181, 0x7f817f7f, 0x7f817f81, 0x7f7f8181, 0x81818181, +0x7f7f7f81, 0x817f8181, 0x7f7f7f7f, 0x81818181, 0x81817f7f, 0x7f817f7f, 0x7f817f7f, 0x817f817f, +0x7f817f81, 0x817f8181, 0x817f817f, 0x817f817f, 0x7f7f8181, 0x817f7f81, 0x7f7f8181, 0x7f7f7f81, +0x7f7f7f7f, 0x7f7f7f7f, 0x8181817f + +e = +34560 + +k = +6144 + +rv_index = +0 + +iter_max = +8 + +iter_min = +4 + +expected_iter_count = +8 + +ext_scale = +15 + +num_maps = +0 + +code_block_mode = +1 + +op_flags = +RTE_BBDEV_TURBO_SOFT_OUTPUT, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE, RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN, +RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT + +expected_status = +OK diff --git a/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_sbd_posllr.data b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_sbd_posllr.data new file mode 100644 index 00000000..13ad0908 --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_sbd_posllr.data @@ -0,0 +1,1225 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_DEC + +input0 = +0xf6bb1100, 0x4e433af1, 0x38f02efa, 0x0ee43d00, 0x5037c35f, 0x5620e729, 0xdd46c4d7, 0x5ae3e2f7, +0xe0d7fa2f, 0x2e4ae20d, 0xa7190ebf, 0xd2382614, 0x98ffabe0, 0xf7ee0c5d, 0xffd8ddd7, 0x5f03ecc7, +0xf9ffdde5, 0xf12446f6, 0x42693a3d, 0xfcc81aae, 0x2b0abc2c, 0x43f2a1b5, 0x044a302c, 0xf8db4fa6, +0xcad2d138, 0x452b5526, 0xf3083fd3, 0x30ea4b22, 0xe2bd66f4, 0xbc37bebe, 0x001e183b, 0xfac72ebf, +0x11965cf5, 0x70226311, 0xbd2016ec, 0xcf23f931, 0x4629f6fb, 0x3104de76, 0x00d7c529, 0xa0c8f8f4, +0x2615c2e0, 0x23daf4b4, 0x56bf05c4, 0x9212102c, 0xa5fe400d, 0xec2a5497, 0x021032bc, 0x3b55340e, +0xd6f1003c, 0x2a3625d0, 0xc81eee31, 0xa26644d7, 0x0ffb041c, 0x22cc20dc, 0x472cdd9b, 0xd8e24ed2, +0x4ee740ab, 0x9bfc50e2, 0xbaaafe55, 0xeecd5c19, 0x57150c31, 0x9d3f13c1, 0x251749e1, 0x076ca964, +0xde0da74b, 0xcafac963, 0x52d006e2, 0xe392d7f4, 0x3a20bcf6, 0xd1ee6fce, 0xe5cf850a, 0x31253fe7, +0xcefed8a9, 0x520f9824, 0x5fdbe3c9, 0x9b123cc5, 0x6ad019f2, 0x171f0368, 0xc2fe2855, 0x0db851c2, +0x095010d9, 0xd110df25, 0x1452a52c, 0xe5d1c2cc, 0x45dbef40, 0x2aef2353, 0x07af2e2a, 0x4f23ff6f, +0x3ddb31cb, 0xa8430c49, 0xcaf7b329, 0xf933c5bc, 0xfe3c52b3, 0x7fd60a27, 0xce0ac9cf, 0xcff0cdd7, +0x5400cd01, 0x7f81a8c1, 0x3c7f8178, 0x737f87f6, 0x5a5e7f81, 0x817f6ff6, 0x813b1381, 0x41d18173, +0x810fb9f9, 0xb734c526, 0x50b3a6d3, 0x282d6e50, 0x81972d1f, 0x7d52bd53, 0x6749268b, 0xc681812d, +0x2c93912e, 0xb1813bdc, 0xed817fd1, 0xa8813376, 0x7f3c7f41, 0x64569f7c, 0xf53d5c67, 0x1e70e6dd, +0x024b6591, 0x817f7f8f, 0x0a722d63, 0x7fed578b, 0x7f239a7f, 0x81029081, 0xa8ca8b81, 0xb8577fde, +0x818ec07f, 0x7fa87cb9, 0xa7084967, 0x81934650, 0x7f8171f7, 0xed3d144a, 0x8107739c, 0x3c6fe27f, +0xcd6ed165, 0xfb3718a4, 0x5f708f4d, 0x14e45eb2, 0xfe5c7f7f, 0x7fc57f6a, 0x813683af, 0x817f7fb3, +0x007fa7ef, 0xdcd681a1, 0xd72a1268, 0x769f7881, 0xb27fa281, 0x89c27fe3, 0x729b2f59, 0x73afb8a0, +0xd07f4eb8, 0x81dd6d55, 0x7fa3d2cf, 0x7f81b51e, 0x7f7f7be7, 0x60819981, 0xf31158ad, 0xde7fe4b4, +0x427f2dd3, 0xd3812481, 0x280de7a4, 0x0a653fc6, 0x8181261f, 0xd6aa8f81, 0x2c7fe484, 0x42df747f, +0x7f5baa2d, 0xc1bd583c, 0x966b5031, 0xbe818725, 0x6b6444a4, 0x610023e7, 0x5f183b2c, 0xa77fb769, +0xb2cb1581, 0x0881cc75, 0x8a8c6fdc, 0x558181e4, 0x107f947f, 0x749e4ca3, 0x8cb23db1, 0xce668132, +0x70b848eb, 0x008a2430, 0x7fd3d77f, 0x682b35eb, 0x2e6aae34, 0xd42fa2cd, 0x3eab816f, 0xfdaf8196, +0x04e43fcc, 0xf1819600, 0x7f6d64f2, 0x81ba8957, 0x81a781a2, 0xb169d67a, 0x7f4e557f, 0xe0818197, +0x7b474ecc, 0x3f7b8154, 0x135684f8, 0x81b0fb81, 0x81e33e93, 0x815c817f, 0x810b818a, 0x4d445b34, +0x95cf2bb0, 0x32c69b7d, 0x2640e181, 0x7fe8b8c8, 0x9bdd64f7, 0x22bfa50b, 0x8181c87f, 0xf7812486, +0x7f977fa0, 0x5826a925, 0xce7fadf6, 0x9e73fb81, 0x538d7f0e, 0xe85dab87, 0xc3706181, 0x4181817f, +0x7f7f15ca, 0x952ca87f, 0xa9748be0, 0xbf812477, 0xd081a553, 0x9a817f7f, 0x1f4cb192, 0x7f8181d5, +0x2ba44ad2, 0x76605566, 0x7f1cd2b9, 0x446eae90, 0x8956b670, 0xaadc8b7f, 0x710b0a90, 0x83816439, +0xc6ffc6d2, 0xf07f00fb, 0x7fa25582, 0x7fd11f3a, 0x7f104c81, 0x81d574fd, 0x81814081, 0x82b78e7f, +0x2f29f42b, 0x277f4bff, 0xf49d4b81, 0x28581681, 0x22523281, 0x607ff910, 0x61ac1ad2, 0x58313ccf, +0x8168a21f, 0x7fe61879, 0x817f21b3, 0x407f2f45, 0x7f5fa666, 0x81bc41fb, 0xfd81fb81, 0x0ed48163, +0x81818181, 0x7f7b92fd, 0x44507d7f, 0xba45c9d3, 0x81852b64, 0x7fa918eb, 0x5f6ff5c6, 0xa4ad8181, +0xc27f8f81, 0xb8af5481, 0x7fc5817d, 0x7a814ea2, 0xf77fcf36, 0x50913981, 0x81a01781, 0x5ea1a4ba, +0x18d252b8, 0xc0416881, 0x7f817f1e, 0x0b4e38cf, 0x7ffb6922, 0xfca481b5, 0x7f1dc1ca, 0x7f560da2, +0x7f7f813a, 0x1900a881, 0x6d4d8116, 0x617f0469, 0xb3118e81, 0x95408119, 0x817ff08a, 0x7fdc7f8b, +0xf682a40a, 0x3381817f, 0x81b70d82, 0x7f7f6f81, 0x8190217f, 0x2f817f4b, 0xe195b209, 0x5ee39575, +0x8150bae6, 0xa4fe8b7f, 0x7f7f654c, 0x7f921971, 0x53817f85, 0x1c7f5ff6, 0x5f35737f, 0xce7fc2c6, +0x6c81817f, 0x7f7c410d, 0x5794308e, 0x15f4a87f, 0x7fb64d7b, 0x1d0a8181, 0xa8b67f37, 0x253b397f, +0x887f92b7, 0x7f81767f, 0x81c18101, 0xaa2861c0, 0xbd691cd9, 0x81d74ef3, 0x81a78153, 0xcb7f819b, +0xf681c5e7, 0x9f37817f, 0x3a7f869b, 0x56a41581, 0x04be8111, 0x4898b48b, 0xbc81465d, 0xb3815281, +0x7f6a1d45, 0x1fa87fce, 0x810e81cc, 0x7f8d410c, 0x8190a193, 0x81817f81, 0x5dadcbb4, 0x81a83699, +0x86a68164, 0xe27f1281, 0x682a7f27, 0x62a03cbd, 0x8181c75d, 0x39653881, 0xc6187e83, 0x617fd27f, +0x81917f90, 0x57796c81, 0xdc7f567f, 0xad712381, 0xdd81ecbe, 0xec17667c, 0x929e5098, 0x857ffbbf, +0x8d5b8153, 0x9769887f, 0x65173381, 0x66b8ad84, 0x043481b9, 0x81c05ca9, 0x449d4c48, 0x8c45bc82, +0xca7f81ee, 0x2d8f33e1, 0x81e22f81, 0xca9eb542, 0x8e947fe5, 0xb642a4a8, 0x7f22c73f, 0x85a6176a, +0x567f7ffb, 0x587f4243, 0xa2819225, 0x58307f9b, 0x2c6b0f81, 0x8130a068, 0x814fa904, 0x400a9f2d, +0x30617fd6, 0x81720981, 0x97b98100, 0x0ca17c7f, 0xf1816081, 0x690f8181, 0x94165481, 0x81817f9d, +0x81444268, 0x97759781, 0x8740267f, 0x757f1a7f, 0x7f7f81ca, 0x957fb87f, 0x816f723a, 0x4a8613f2, +0x3bfef57b, 0x7f1274e8, 0x810ab05b, 0x7fb19ecd, 0xb07fc75f, 0x7fb2965f, 0x60abb881, 0xc229a8e9, +0xb1817f26, 0xe324067f, 0x8181e344, 0x7ea59cac, 0xd21a7f65, 0x8d15989f, 0x09026bea, 0x1d7f933b, +0xae81c181, 0xabf37147, 0x0d08da7f, 0xa7ac4b9e, 0x8281e044, 0x7f8195b1, 0x7ff844de, 0xe9057fed, +0x85fcf1cd, 0xa89e7e2b, 0xf974a481, 0x1f813c6d, 0x237fcbbb, 0x655f8192, 0x8134062b, 0x6f91815d, +0xdf3281e1, 0xe6fd817f, 0x72b3007f, 0x86caf9cf, 0xea812981, 0x6a8ad67c, 0x36aaa01d, 0xbb102886, +0x812faf52, 0x6281de5a, 0x817f8174, 0xebff4735, 0x7894bf7c, 0x169e8194, 0x3bc281f8, 0xe69d2a15, +0xb8e5b68d, 0x367f9b8b, 0xea818181, 0xeb7fbbb7, 0x9368a27f, 0x5af5e19e, 0x5f7f7f7f, 0xcd518181, +0x7f948109, 0x817f4a81, 0xf8a0de7f, 0x667f7fa5, 0x1a8881c1, 0x8646b17f, 0xb0eb7f81, 0xe3fe2ba2, +0x460d18da, 0x3db45835, 0xb0c2847e, 0x9c8681a0, 0x817ff88d, 0x7fec639d, 0x817f7f7f, 0x81810d2f, +0xef28241e, 0x811d9db4, 0x4ac4ada5, 0x7ff29f7f, 0x7fd981ac, 0x7f815e23, 0xcc3dbc8a, 0x7c769b08, +0x6a07ef50, 0x66599c9c, 0x7e007f92, 0x9381aa5a, 0x33471b81, 0x79b27d96, 0x6d5d9d5a, 0xbf40687d, +0x7fbc408e, 0xae598111, 0x18222b1e, 0x0a7b817f, 0x7f4961b4, 0x81818f5e, 0xd5a8ae44, 0xe185bd50, +0xb4a65d81, 0xab81c3d6, 0x5f814b7f, 0x8db72efc, 0xc0be14d5, 0x77336a45, 0x2dca7e7f, 0x81857f7f, +0xf1d80b33, 0x1856b3dd, 0x789e75cd, 0x7f7f7f81, 0x8b7f6951, 0xc94cc5b6, 0x407f3f5b, 0x72c4b87f, +0x8135f77d, 0xab02ec1c, 0x8db481e0, 0x77da84df, 0x897f5c49, 0x8181b24d, 0x7f54715e, 0x233ef77f, +0x8165daa7, 0xd3813aa2, 0x7f7f7f18, 0x96e99677, 0x56814a72, 0xa4acba2e, 0x2683347f, 0x3d5b20ac, +0x887fb981, 0x90a9ec91, 0x008ab581, 0x81588181, 0x5c017f7f, 0x10818181, 0x5d86a42d, 0xd8587f4f, +0xed2035b1, 0x718168fc, 0xc3cccf7f, 0xaa812676, 0xa8d83930, 0xbdb119f6, 0x812b0288, 0xe17f7f7f, +0x989ffd31, 0x9e564b12, 0xabfa3578, 0xacd85381, 0xaf7074e6, 0x93a34c11, 0xc1819896, 0x5a5fa673, +0x6f8181ad, 0x4981167a, 0x817f965e, 0x961b7f29, 0xac539db6, 0x2b645dd8, 0x937fa506, 0xa1b900ba, +0xdd7f7f78, 0x7f866ce7, 0x81a9247f, 0x50d67f53, 0x177a8b82, 0x277ffa7f, 0x7f0942da, 0x444b3181, +0x9d8995aa, 0xbda844b9, 0xae765c85, 0x62818f7f, 0x307f7f00, 0xf37fe681, 0x817f7fab, 0x42e0813d, +0x49f3c7c4, 0x7fd543b8, 0x818183f2, 0x7f4b5500, 0xe2357a7f, 0xdc3a8133, 0xd13913ff, 0xabfd587f, +0x81c3814c, 0x667fab62, 0x9bf82b0c, 0xf84b9583, 0x6a28b2f0, 0x8154623f, 0x7f845e7f, 0x2750f07f, +0x517f2d59, 0x7ffe4db5, 0x60077f7f, 0x81816e62, 0xc40a30dd, 0x67f22b7f, 0xec817f81, 0x7fb8c67f, +0x98c9afd0, 0xb881ecaf, 0x6069c37f, 0x792c2c09, 0x709dc2de, 0x4481bb20, 0xd4b25981, 0xf8ef7792, +0x7381ae17, 0x7fe05250, 0xc3eb637f, 0x6d350069, 0x327f457f, 0x8ce7b8ab, 0xb281c1da, 0x7f817f61, +0xbbcf7fb1, 0xaf818145, 0xb77f5881, 0xb47ff1b2, 0xcec74a81, 0x5f81e7b4, 0x7f1b727f, 0x81b8701b, +0x508106c4, 0x6d815f7f, 0xbc815bea, 0x464b00f2, 0x78818181, 0x7f7f4f81, 0x817e2c53, 0x817e7fa7, +0x7f4da4a9, 0xdd444f23, 0x7f6cba39, 0x8141625a, 0x7f81813f, 0xcd7f817f, 0xa402814a, 0x817f049f, +0x6fb981b4, 0x7867bd7f, 0x9b45bc81, 0xe7739f52, 0x7f816181, 0xab844448, 0x7f06497f, 0xe555c183, +0x817fde68, 0x81bc0624, 0xe33f3a9b, 0x8122c673, 0x6c3d81d5, 0x392815dd, 0x26527f7f, 0xee649844, +0x817f6fef, 0x81ce32d4, 0x102f81fe, 0x81967f81, 0xa42b9c20, 0x81813c81, 0x2a67a463, 0x90592c8a, +0xc5810e67, 0x81d1dd81, 0xbd7f54e1, 0xf52a814e, 0x7f46a7b3, 0x5a89812b, 0x78e43eac, 0x7f776981, +0x817fc5c2, 0x3f818181, 0xc556c337, 0x7f0081a9, 0x817ffe6b, 0x7f095f3d, 0x819b2281, 0xb45881ad, +0xc0817f47, 0x81bd915a, 0xc7616b7f, 0x81e9817a, 0x44977f6c, 0xbc81307f, 0x7f9ac3c9, 0xff061265, +0x3c7b0181, 0x13910e7f, 0xb499ed3c, 0xd850b881, 0x81781837, 0x7f97ff93, 0x81818f23, 0xb9b50d81, +0x49aa4fbd, 0x6b361230, 0x148179d3, 0xbd81dd1f, 0x4cd5d0d0, 0x9ad1738a, 0x2c7f718f, 0x7f9f03b0, +0x789d9ac2, 0x88db95fa, 0x9ae5ac64, 0x727f814c, 0xfa477dac, 0xa670a23f, 0x146afe7f, 0xd4d0451d, +0x6d81819d, 0x9622acc1, 0x818161a2, 0x68263dca, 0x7fce818d, 0x998193ba, 0x9d81a486, 0x7eb97f7f, +0x134470ec, 0x6681bde5, 0x4e2ea77f, 0x81777f7f, 0x7f7fa64f, 0xd3815a7d, 0x81fd4e50, 0x774481e8, +0x7f6c8148, 0xe76b7f81, 0x881bb4e9, 0x7f944f3d, 0xe6da7f20, 0x54c68176, 0x210ebb45, 0xbed6267f, +0xe22f2081, 0x8191ae8a, 0x7c7f257f, 0x667f811c, 0xe2edd355, 0xa4812181, 0x837f5481, 0x6d818120, +0xb7aa7f8f, 0xc17f7081, 0x7fba986a, 0x819a81d5, 0xa9816681, 0x8129c4f5, 0x37ba21a7, 0x29d18a86, +0x4be3ce96, 0x3981b184, 0x447fb50b, 0x261c8155, 0xc05ab18d, 0x777f7816, 0xe48be469, 0x81722d81, +0x7f47a8a1, 0x924a8b72, 0xa2ef4ecd, 0x105c817f, 0x7f90709b, 0xbc81443a, 0xa82f7f65, 0x615d928c, +0x7f81570d, 0x61b0919a, 0x7f33fc81, 0xe8817fca, 0x637ffb00, 0x9c337250, 0x817f8181, 0x08b01e0a, +0x7f4ae897, 0xa0969a3c, 0x7f9d7fe8, 0xe024598e, 0x9e794d7f, 0x72fb988c, 0xf8936747, 0xacbd7f8e, +0x4e7f9781, 0xa4b9f4ea, 0x50ccfbdd, 0x817f4795, 0xbcaf485f, 0x81811a25, 0xa97881c6, 0xecbd7fd7, +0x6e89a832, 0x1e815f96, 0x74d0437f, 0x858101b9, 0xbf1b7f8b, 0xcb7fdc9e, 0x1901bda8, 0x4c40c087, +0x5884f075, 0x7f561faf, 0xc3878172, 0xcd0081c4, 0xd93e8fa6, 0xe8748146, 0x4c7f207f, 0x20da2792, +0xc0d37f7b, 0x909a0133, 0x3b6fb281, 0x69819ba1, 0x81984b27, 0x362ddda3, 0x3981727f, 0xe44c7f81, +0x81b9d0bc, 0xebea7fa7, 0x81a47f2e, 0x81674391, 0x495500fa, 0x0eb881bf, 0x2e22f17e, 0x967f9e81, +0xd2727906, 0xdbe8814d, 0x93cf4d6d, 0x23c3b57f, 0x57be5fe3, 0x2eee7f30, 0x1a501f81, 0x76aaba53, +0x70817fdc, 0x817f2e1e, 0x28381f81, 0x1575817f, 0xc62845e6, 0x8181417f, 0x35d0230c, 0x606fcc28, +0x7f792e44, 0xb7b7ea76, 0xad677f7f, 0x2b1eecee, 0x7f7fe450, 0xefcc6668, 0x264e511c, 0x27818181, +0x81817f7f, 0xce82812c, 0x697f7fec, 0x7e859be9, 0xa1f65543, 0x8170a57f, 0x51231d81, 0x33c03182, +0x81aa7f81, 0x6b7ff34e, 0x51812fcf, 0x7f7f7fe6, 0x6281718d, 0x1eb02c7f, 0x81d64570, 0xa18368e1, +0x4ddb816f, 0x149b81d1, 0x067fa2c6, 0x92decc7f, 0x94001658, 0x814f6d6c, 0x79673f7f, 0x7ff28122, +0x83b97f7f, 0x81248a43, 0xfda17ffe, 0x9e81bc81, 0x81090181, 0x7fdaa350, 0x927f54d4, 0x78fe2e89, +0x7f3d097f, 0x147f2ea7, 0x7f85837f, 0x86a58181, 0xb7ad559c, 0x7796ae60, 0x4e7fef08, 0x2ea371f2, +0x817f7f8c, 0x7fc181c5, 0x8101818e, 0xf1588178, 0x44062eb8, 0xb0832da0, 0x81527f81, 0x7f0e817f, +0x8e06b3e5, 0xa6df2e5f, 0x9d7f7f81, 0xa14ba64d, 0x723d7f91, 0xdb81b64b, 0xb48c117f, 0xe75d0e81, +0x8bd9ba3a, 0x1f817f7f, 0x81847f67, 0x25677f5f, 0x3568b529, 0x7fa2587f, 0x24b02027, 0x47ca91dc, +0x81bba314, 0x7f844f7f, 0xbeb8aa0d, 0x9955fe5b, 0x007f9b81, 0xf0a27f6f, 0x1648e959, 0xc68193a6, +0xbea6ebfd, 0x1654cf81, 0x8142d430, 0x2e7f6693, 0xcc788195, 0x11817f81, 0x3da98171, 0x4fdc5a9c, +0x677fea81, 0x4c6ca0cf, 0x487f8181, 0x7f3b9013, 0x37387f7f, 0xc6b4217f, 0x27afb97f, 0x77b84002, +0xd3a67fef, 0x987f8181, 0x9518817f, 0x5ad4fd7f, 0x7b818146, 0x583c5581, 0xd9643b81, 0x687f92f5, +0x7f8109f2, 0xdfad937f, 0x79a45f7f, 0x007f9c0e, 0x6a6ba6ad, 0xa95bc7e9, 0x9be90653, 0x223381b7, +0x7f7f64ce, 0x81813ea3, 0x037f4f7f, 0x617f7f9a, 0x6481f081, 0x81eb4a81, 0x7f9d9a6e, 0xe80e8a4b, +0x4563f81e, 0x94db8781, 0xbf81bd04, 0x305d8236, 0x2c90d571, 0xa48e4600, 0x81633342, 0x2c537fde, +0xc70f5d7f, 0x65bf6f7f, 0x547f9632, 0x05847f1a, 0x23818147, 0xd6d9459f, 0xc97fb761, 0x8196816d, +0xb5417f5b, 0xb6f91bd9, 0x55b17fbd, 0x3e90c27f, 0x4f7fc37f, 0x7f8c855d, 0xef35817f, 0x295d7b03, +0x3730377f, 0x9c811bac, 0x7fe3ca7f, 0xe87f8124, 0x7f7fe511, 0x2d7f7ca2, 0xba9bc726, 0xf07f9f81, +0x4b476e9f, 0xaa338173, 0x5659708a, 0x817fb67f, 0x81811ded, 0x81817f90, 0xa3077f81, 0xf0d119d6, +0xf4818752, 0xbd98271f, 0x95f610ed, 0x3fc227e0, 0x68139681, 0x3c6c2d81, 0x7f7f96cb, 0x3e8181b1, +0xbbe681c6, 0xb47fd2eb, 0xfdd1b74f, 0x4381fe5f, 0xb081f399, 0x64a10081, 0x79ee3ee5, 0x9559c8a6, +0x814ad4a8, 0x817f7531, 0x81813b81, 0xbfbf9773, 0xe27f81a4, 0x7fc14269, 0x917f8158, 0xaf9d89a7, +0x81b22f81, 0x7f54d4d8, 0x0f39a503, 0xec9d3ea0, 0x35818131, 0x8c7d99a6, 0xc0158181, 0x53817f5e, +0x817f81b0, 0x9a7f9a7d, 0xc081747f, 0x517f704d, 0x7f2db90e, 0xa6dfa67f, 0x36817fce, 0x30ae720e, +0x7fd73d60, 0x5c593bd5, 0xf57f9d7d, 0x448151a3, 0xc82db29a, 0x019b5a75, 0x7f876729, 0xa69efc9d, +0x78a67f81, 0x536c817f, 0x81e470d4, 0x56f270fd, 0x2934478f, 0x9c5a7f68, 0x4ecd81ef, 0x7f5c2b23, +0x7f817f03, 0x827fb17f, 0x7f81c35e, 0x7d7f03f8, 0x7f5da67f, 0x8100671d, 0x06e1a8b0, 0x998d112a, +0x9282ae98, 0xed72813b, 0x7fbc6bda, 0x7fa97f3e, 0x36d1510c, 0x0d81819c, 0x56f8fd97, 0x815081a4, +0x7fc78133, 0x69d34542, 0x7fc1727f, 0x7fd22a97, 0x418a1df0, 0xf519077f, 0x7f6e697f, 0xa9817abc, +0xf43917b5, 0x82e3d76b, 0x86327fe7, 0x7f7fb899, 0x9f81b0a4, 0x475381c2, 0xc62b92e8, 0x7f1b49a3, +0x3679b9d7, 0x817f5364, 0xa87f7fc2, 0x45dcf6ba, 0x3470789d, 0x913e7fa5, 0x06b53681, 0x717f8de7, +0x027f6352, 0x7f7fbe7f, 0x7a81df17, 0x0356fb00, 0x307462bf, 0x6716c56c, 0x8157c7b4, 0x7fcd8181, +0x4b81dd07, 0x3b667f7f, 0x7fa6815c, 0xea37397f, 0x61e94781, 0x4e7f8177, 0x81436b5b, 0x4e818172, +0x5b204b37, 0x115501b4, 0x81491681, 0x661468bf, 0x501f88b5, 0x7f4e63e3, 0x8146e97f, 0x7f09f895, +0x7fd70e81, 0x7f7f7f16, 0x3f327fed, 0x05055631, 0x63b4ec13, 0xc03f16da, 0xa0af33c1, 0x942a7f42, +0xb4ba6515, 0xe99d81de, 0x7f78d8b0, 0x446f2c42, 0x92cf7fde, 0x6b9e7f7f, 0x948181cd, 0x94b9ad81, +0x582e8181, 0xc31748a9, 0x39c6d47f, 0xb2817fff, 0xc5817f81, 0xc6887fa4, 0xe69989e4, 0xa67e7a5c, +0x09477f9a, 0x2d4d817f, 0xb24f81d1, 0x3ff77f7f, 0x6a66813b, 0xf5050547, 0xcf917f6c, 0x81917f7c, +0xb8b23b81, 0xcab60081, 0xe17f7f81, 0x3b47547f, 0x457f4781, 0xe17e9c85, 0xd4b5ba00, 0x777fc3f4, +0x8b013d8c, 0x5381a2dd, 0xf60c7f5b, 0xc3f2c212, 0x5c6c7f35, 0xc77fae7c, 0x7f790cc8, 0xe6848181, +0x7f81584b, 0x813688e2, 0x4cff2987, 0xbeb48126, 0xc4be67bc, 0xb8738181, 0x886a9c7f, 0x519881a3, +0x77fbd5c9, 0x4181c081, 0x20813287, 0x6121bb2d, 0x7f7f227d, 0x1a8181a6, 0x811a5d06, 0xc890a57f, +0x7faf7ece, 0x83a36788, 0xd9fd8e37, 0x7f7b985e, 0x36819a38, 0xbd6096c4, 0x7de5be7f, 0x7bc68150, +0x7f8831bb, 0x81818144, 0x434a63e0, 0xaea4ad6e, 0x407f2785, 0xe8df81ea, 0x4a148aa7, 0xf89f7f65, +0x3c757f62, 0x7f81c80e, 0x81818881, 0x81caa47f, 0x9c570881, 0x1c1a8354, 0x2fa8008d, 0x7f7f81ac, +0xa781d413, 0xa9a45530, 0x327f297f, 0x7f647f5c, 0x81a6324e, 0x81a954cd, 0x7f1c2881, 0xe30b2eb3, +0x81b18181, 0x81157f18, 0x967f7f96, 0x819a7fbc, 0xcccf4fc2, 0xc43a48d5, 0x7fa33d74, 0x3aa97f50, +0x5277817f, 0x69afad86, 0x7f81f278, 0x4fa29716, 0x91818170, 0xdc1a7f5c, 0x278159b1, 0xe87f069f, +0xd28cc56b, 0x157f8181, 0x81339981, 0xcf818114, 0x6eba0b8d, 0xd281527f, 0x8249d583, 0xb37fbc43, +0xc181357f, 0x819f6163, 0xaf194881, 0x7f9872a4, 0xf3c3ab2c, 0x47507f81, 0x523f812e, 0xff81d105, +0x5851cbb0, 0x7fe881c3, 0x60cdbe81, 0xc838ccbe, 0x81817f7f, 0x1d76cb34, 0xe100407f, 0x81d78181, +0x2f81817f, 0x927f3ac9, 0x3cc887bb, 0xda7d6d7f, 0x2c7f1e7f, 0xf58e41ee, 0xfb816281, 0xa211d47f, +0x6eea4bf9, 0x297097b8, 0x9d819e81, 0xf6ef4156, 0x2afb4362, 0x7f16647f, 0x8e10c4c3, 0xb17f6181, +0x3b9d2949, 0x53a6c867, 0x810faffb, 0x8197d27f, 0x41873a37, 0xc48d7f7f, 0x81b6fb69, 0x144b2b78, +0x8181d5c4, 0x787fbc24, 0x4381929f, 0x95927f1b, 0xa3a5e451, 0xb48cc58d, 0x818e81d1, 0x9a81cbaf, +0x713ac87f, 0x7faaa694, 0x9f7f81c3, 0xf65b307d, 0x7fdfa25e, 0xa0bae1fe, 0xf3813f12, 0x19817fc5, +0xc0278a81, 0xe47f8158, 0x817fa281, 0x5bbc8181, 0xd63caaf9, 0x7fbf8181, 0x0054157f, 0x7f7f311c, +0x7f817f56, 0x56c22692, 0x8143b624, 0x0845a315, 0x107a8119, 0xc1cd5c7f, 0x7f62e549, 0x7fbb757f, +0x6b8dec74, 0x6893e220, 0xbb224ba9, 0x31819871, 0xdd2b4f57, 0x3d030250, 0xc1539dc4, 0x1c42df45, +0x7f7f6631, 0xaa81eb8f, 0xa6d4edf5, 0x81ec1981, 0xc92d77f8, 0x1ba35b2c, 0x0e1fa0cd, 0xdd257f7f, +0x817fa041, 0xe9815f81, 0x60f1b338, 0xb97f7fad, 0x097f857f, 0xae6a7f60, 0x145dbced, 0x81b7607f, +0xd4647f3d, 0x7f7f52ec, 0x7fdbbea1, 0xc74feec6, 0x1c197f48, 0xb17d1d61, 0x2e68b081, 0xb717d181, +0xda46377f, 0x105e81b7, 0x7f2e7f58, 0x67c33a7f, 0xb83f666b, 0x68518691, 0xc556f081, 0x7763c400, +0x817fbc99, 0x3a18df21, 0xfbde9374, 0x667fc646, 0x737f7fe9, 0x7f987fbe, 0x27dcb107, 0x25f35d30, +0x420adcd0, 0xab7a7f7f, 0x7f8a28c1, 0xcd7fc20e, 0x6ce69bcc, 0x7f308ce5, 0xb2c96e81, 0x95dbeb1e, +0x7f7f567f, 0x6b7fa0a5, 0x997fed60, 0x9281da7f, 0x3f7f617f, 0x7fc4608e, 0x817f4781, 0xc36b7f81, +0x7fddc17f, 0x6fe27020, 0x8a5a2b27, 0x3a4bfb81, 0xae9e4a81, 0x7f7fda7f, 0x5a7ff006, 0x39b6a0b7, +0xb9721181, 0x725edb2f, 0x8903cf08, 0x81aa7cde, 0x7fe0817f, 0x6c9e7fe8, 0x7f5c8140, 0x819d8181, +0x58777451, 0x7fdb4784, 0xc3186166, 0x817f458f, 0xdce8cdc9, 0x7840bd81, 0x1781954b, 0x60240082, +0xca242d7f, 0x788428b6, 0xf67f8d7f, 0x657f7f8a, 0xbace8106, 0xb8816e93, 0xf9b07ff9, 0x818102fa, +0x32028181, 0xfe7f8e27, 0x818d7f26, 0x7f0781a8, 0x7f7f73f3, 0xfaa78135, 0x7f81810b, 0x72676281, +0x812ed271, 0x7f8a961b, 0x7f81817a, 0x367f4181, 0x03ecb2f0, 0x6c810a7f, 0x577f81b1, 0x81e889b8, +0x7f7f707f, 0x7f90368c, 0x86a36487, 0x42b8c17f, 0x91816160, 0x8150b0a5, 0x81390fa1, 0x7f87d07f, +0x448548d0, 0x32810b93, 0x7f20da7f, 0x5b6a7fed, 0x5c812d7f, 0x7f5a7f69, 0x0c36d681, 0x2ee0544a, +0x817d861a, 0xa47fdaf7, 0x7c8127b1, 0x96216604, 0x4f8138de, 0x03e57e14, 0x73050f7f, 0x42007eeb, +0xe303f07f, 0x2f8a7438, 0x8a7f7fbc, 0x866f8165, 0xf496535f, 0xea2d9bb2, 0x16e67386, 0x6bf837ab, +0x411874b4, 0xa7c15c2a, 0x74bb7f72, 0x8181ab81, 0x81e33481, 0x68b47f7f, 0x957f8181, 0x947fcf27, +0xb47f7b20, 0x997ca926, 0x5a457f0f, 0x87403699, 0x811e3ccd, 0xdb857d7f, 0xc9cec481, 0x2998d8c3, +0x231fa37f, 0x876a2942, 0x812d7ff0, 0x7f90959a, 0x811598a2, 0x7f814697, 0x7f7e1467, 0xad3a8154, +0x81813f26, 0x0851321a, 0xf77f9f7f, 0x76b16294, 0xa56c3e3a, 0x7fd5c569, 0x30dd8185, 0x7f7f819c, +0x7f8a7f7f, 0x81dc4da7, 0xe1ce9eb9, 0x8181a2c8, 0x7fda81be, 0x01797f42, 0x7b7f89a8, 0x81e9de7f, +0x7fa76fc9, 0xdebec9a0, 0x7f381e89, 0x81ba0d7f, 0x459a7f7c, 0x81ac8181, 0xb65e8279, 0xdf8174cb, +0x6ea24abd, 0xb8486638, 0x81637f7f, 0x011d5c81, 0xc27ff63d, 0x81d48160, 0x7c81ba7f, 0x7ffa7f73, +0x7f8881f0, 0xc2d6a15b, 0x7f5d7f7f, 0x06818fd5, 0x8c23ea91, 0xf3d8e75c, 0xab81be93, 0x817f7fa8, +0xe57f816d, 0x6e81f27f, 0x81e566aa, 0x88397868, 0x45f9b7c1, 0x2c4d81e5, 0x52765111, 0x813e3d60, +0x4f45c3e7, 0xef777f91, 0x3e7fc481, 0x3e810d42, 0x81c53f7d, 0x2f7f8481, 0x3a7f8157, 0xf1662881, +0x81f03791, 0xaf818136, 0x476b7f7f, 0x6890cfa6, 0xe4f7e9ae, 0x81f6b9ba, 0xb0623aac, 0xea7f7031, +0x72c30000, 0x407f8197, 0x8176917b, 0x81184acc, 0xdacf7fa6, 0x996cd37b, 0xa68175c8, 0x5e6082e3, +0x067f927f, 0xc47f4481, 0x7fc58113, 0x7f561aff, 0x817f46b2, 0x7f7f8159, 0xcebbc57f, 0x72765b87, +0x817f2f1e, 0x885c7c81, 0x7f7f65a9, 0xeded8987, 0x7f3bab81, 0x812f7f6b, 0xfc67d85a, 0x13297781, +0x607f9621, 0x507fa647, 0x4e39ce7f, 0x81ff5c4b, 0xa246316c, 0x81457f7f, 0x5781812f, 0x1463aa9f, +0xc35d7f14, 0xbcbb5771, 0xd7e45781, 0x636c7365, 0x07102b6e, 0x75e1337f, 0x7f2bb116, 0x257fa0f2, +0x97597f40, 0x7f818810, 0x7f7f7f5c, 0xca7ff660, 0x9c916181, 0x903ea1cc, 0x6c8f7f81, 0xc2b07f25, +0xa43d818c, 0x81f67fb3, 0x67f56081, 0x1ba55531, 0xa77fc6a5, 0xda479281, 0xe7647fdb, 0x7fd2f34d, +0x1b7f4c68, 0x7f8c5781, 0xbcf18110, 0x7fd3b883, 0xde81687f, 0x81358948, 0xeb813d17, 0x25040181, +0x7ff8a39c, 0xbc574351, 0xbe677c7f, 0x487b5068, 0x6c7f816c, 0x7f2dca81, 0xb8c7b17f, 0x7f869781, +0x9c99f0ae, 0x437febc4, 0x289d81fe, 0xab74307f, 0x7c7b299a, 0x62593f52, 0x7f4e513a, 0x7f9b81c0, +0x9e2c0de5, 0x43818175, 0x857064a0, 0xd17f8db2, 0xb0a055de, 0xf04ade7f, 0xe37d7fd6, 0x3956789a, +0xcfbcb53a, 0x7fa39e7f, 0x7ed171a8, 0x49a65c89, 0x3ee166d9, 0x2daca6ca, 0x53d051e7, 0x957fc181, +0x0000e873, 0x815fbf9b, 0xe986ac61, 0x7162fd81, 0xe428090f, 0xe2814730, 0xab8d9d3d, 0x8164e6da, +0x8a6126e4, 0x81817f6e, 0xeb818281, 0x359f7f81, 0xb663c39c, 0x7fb856a8, 0x9d812e30, 0xd07c7f2e, +0x6b58ceb5, 0x8181ca7f, 0x42c081b5, 0xd58a6fd8, 0x7faa9b85, 0x81818181, 0xbb3781f4, 0x7fa98173, +0x17f17f7f, 0x7f819862, 0xd97f0781, 0x812f579a, 0x608196ec, 0x23458155, 0x5bc1a902, 0xaa39817f, +0xcb207d81, 0x227f7f9c, 0x81a54ad1, 0x81027fb4, 0xb6711835, 0x8181a57f, 0x7fed8141, 0x7fa95ec0, +0x7ffc7f8f, 0x572f81e8, 0x7ca87f81, 0x7fbeb67f, 0x637f814e, 0xcf11af7f, 0x307fb081, 0xaa85927f, +0x6f5dd995, 0x81db817f, 0x68275869, 0x724dff18, 0x8176ef58, 0x54814c7e, 0xece02300, 0x5ad71a56, +0x689e7fa7, 0xb67fa77f, 0x107f1359, 0x51a63bc0, 0x22c887ca, 0x0a0e34b7, 0x81587fae, 0xbb79604a, +0xfbb7077f, 0x81967f81, 0x81227187, 0x8129817f, 0x43a84e8a, 0xb981a34b, 0x5d307f81, 0x4d7fe9bd, +0xc973a181, 0x78647f8b, 0x00274105, 0x5d7a09e5, 0x6565a681, 0xfbd0ea6f, 0xf986037f, 0x377fc3e0, +0x9ba57fe8, 0x4f7ff13c, 0xd61f7fa0, 0x817f7fbf, 0x7f3e25c7, 0x81c42fdc, 0xae398181, 0x3125afbf, +0x7fed479c, 0xf4f52ff4, 0x7fc99d81, 0xaa417fb7, 0xa1324f81, 0xff7b817f, 0x167f6f86, 0x2a86c481, +0xa127c15c, 0x6d8e0000, 0xcf207f6d, 0x6b815681, 0xd01f50e7, 0xab81e704, 0xce81a4e4, 0x7f177f7f, +0xaa819181, 0x7f81a044, 0x1c0c7f5e, 0x4f0e56cf, 0x46a581ca, 0xb3d52970, 0x6fb3b2c8, 0x7f8189de, +0xf0058133, 0xd181d0da, 0x7481c46e, 0x427fc7d5, 0xe4528188, 0x4a359b7f, 0xd1ae25bd, 0xb735818f, +0x811a2117, 0xddd2829a, 0xec696aa1, 0x99cf945e, 0x518f3d7c, 0x8e814381, 0xa2819647, 0x85af7f43, +0xc81d7f90, 0xb8361781, 0xb67f9a64, 0xc3d38181, 0x97b07dae, 0x54687f7f, 0x206d5b79, 0x437089ad, +0x7f814881, 0x77b63840, 0x61816434, 0x9dca5e22, 0x818129bf, 0x8b49dd3b, 0xc792813a, 0x3b407f63, +0x7fc48f8e, 0x7f96815a, 0x6b430c8b, 0x974ee1a1, 0x7f8181fa, 0xb47098a2, 0x81168181, 0x816b71f5, +0x0a596d81, 0xa5945681, 0xa14181d0, 0xb4944bfa, 0x81f7816b, 0x7f7f287f, 0x7f81cc48, 0x6d819077, +0x7dff7f58, 0x5a4e7f81, 0xb7ee6df2, 0x81712a85, 0x0a652e7f, 0x86a1277f, 0x3c568181, 0x7f817f65, +0x64fb3e0b, 0x71a9687f, 0xa65c7f72, 0x3fb34043, 0x8132f77f, 0xec8133bf, 0x7f8119a5, 0x081f5bb1, +0x7f81777f, 0xedcca8a1, 0x7ffe7f25, 0xec934f43, 0xdbcc8181, 0x64416881, 0x4a8d93f4, 0x7fb64c7f, +0xf12f9bc2, 0x812a8f90, 0x1b9c32de, 0x30a30d81, 0xf079e16a, 0x633720c2, 0xad44fd53, 0x3c812f3b, +0x56b32873, 0x0000b62f, 0x76cb58a6, 0x31086ead, 0x9c307f65, 0xec664f81, 0x7f6186cd, 0xb7047f39, +0xe87f2ccd, 0xe79f8f3c, 0x88b34352, 0x7f6dec72, 0x8b4aca8c, 0x217f1c55, 0x53d76693, 0x7f984d2d, +0x125abe92, 0x7f37677f, 0x4f819c81, 0x7fb23b2e, 0xd2c5ea58, 0x7fa7817f, 0x57817f7f, 0xd45239ec, +0xc887fa03, 0x7f7f7f42, 0x8181444d, 0x2ba67f1a, 0x7fcc527f, 0x813d7f81, 0x68ad12aa, 0xa87f6763, +0x62577f7c, 0x067f7f5a, 0x3201a788, 0x9872b0a1, 0x814548d0, 0x3a883096, 0xc4701d52, 0xb59381dd, +0xdc3381a8, 0x816e3f7f, 0x4f57813e, 0x4679d77f, 0x9a14b75d, 0x8181da4b, 0x817f9d48, 0x52818181, +0x2281c97f, 0xbfb92d7f, 0x1f6f357f, 0x2ca57f3d, 0xb9b4626f, 0xab4bba25, 0xf27f4b5a, 0x4b478a56, +0x818181c3, 0x6a767f29, 0x81e16dcd, 0x4d818181, 0xd1819ce8, 0xdbce6082, 0x757f41bc, 0x777f7fbc, +0x5fa9d103, 0x0241e8d6, 0x1bb381a1, 0xf08177b7, 0x00df007f, 0x3bcdbb5f, 0x5e7f710a, 0x7f3f7f1e, +0x657f0f2b, 0x2ffae79b, 0x495c8110, 0x7f7f8150, 0xe8449f7f, 0x8434a77f, 0x42d56352, 0xfd717f8d, +0x81c514b3, 0x7f147f50, 0x7f856696, 0x7f844c81, 0x8157714b, 0x50cba2ca, 0x73539ff7, 0xb5ed7fea, +0x5d1f637f, 0xbc66496d, 0x68b633df, 0xea76156c, 0x7b4654a3, 0x81547f8d, 0x7f537f27, 0x5db64581, +0x7f367f81, 0x7f81a08c, 0x81440000, 0x3965f076, 0x40977036, 0xcf81ee31, 0x6e7f19bd, 0x703a7f03, +0xe94b81c6, 0x4330aa7f, 0x060411c3, 0x91f6f37f, 0xbdc9e781, 0x81b87f51, 0x4b6e228f, 0xbc3eb87f, +0xad916bce, 0xdfb0976a, 0x98b86387, 0xb651bdee, 0x8cf46cb9, 0x9b819a8d, 0x9d8181f1, 0x7fe0ab97, +0x714d0264, 0xb87f67a4, 0x7f9d3150, 0xe95c7f81, 0x81a7537c, 0x93f4d02e, 0x433a477f, 0xe5067fab, +0x81de5836, 0x64667f81, 0xc5255988, 0x0a7f824f, 0xf97b7fbf, 0x40c97f82, 0xc1d47a74, 0x7f7f3081, +0xfe49fc23, 0xba819538, 0xb87f5581, 0x5ea47fb2, 0x35b37292, 0xb9848178, 0x276f5235, 0x647f4ebd, +0xee2a491a, 0xfd29bb60, 0x21f53728, 0xae9d817b, 0x2ba97f81, 0x6accd440, 0x7fbec77f, 0x909b817f, +0x227f7f35, 0x767f22d5, 0x0ba3c2d1, 0xd38f817f, 0x467fd862, 0x673e8181, 0x7f3b7f81, 0x70427f81, +0x6f725d24, 0x731d64de, 0x32a9816d, 0xc1817f7a, 0x627f8181, 0x7f947615, 0xc37f72d2, 0x7f7f4261, +0xf147be64, 0xc481afab, 0x7fd9a5dc, 0x5c812690, 0x7c34783b, 0x9087817f, 0x7fd25245, 0xea5fcfb1, +0x81817f81, 0xc84ae6b4, 0x8149c875, 0x814a1a7f, 0xab7f6881, 0x6e7f3d81, 0x3ca58181, 0x75cb4781, +0x8181817f, 0x5ec4f2c5, 0x8165e07d, 0x643d6e7d, 0x78bd9764, 0x40817f24, 0x5f077581, 0x7f2ebb4b, +0x7f7fa781, 0xe657ca7f, 0x0000ff21, 0x7fa9bf6a, 0x81c1c30f, 0x9138393c, 0xa2b38174, 0x7f4c24bc, +0x2b918175, 0x7fea5d7f, 0x50c1a17f, 0x0bfefac7, 0x81fe4ea4, 0xab7f7fbd, 0x559b7fb9, 0xcb81e356, +0xd7963f72, 0x7f7a1b81, 0x8c9cd9d3, 0xbf889d1f, 0x161a88cf, 0xe6a0abb9, 0xbb014f2b, 0x81fa947f, +0x7f41509e, 0xb4480d7f, 0x7f7f8181, 0xeca6b540, 0xd27fc381, 0x3aaeb38f, 0x7f7f7f7f, 0x372a7f05, +0xd6a34b81, 0xee378159, 0xcf8181b8, 0x07487fcf, 0xcbb4ed77, 0x29817fab, 0x7fe5b342, 0x63c3bb81, +0x0724d1d8, 0x50e47f7f, 0xa67f8f93, 0x8181c37f, 0x28817a81, 0x7f81fb7f, 0xaab17f7f, 0x81997f7f, +0xa423816f, 0xf953c16a, 0x4c2362cc, 0x81812bf3, 0x5a817f4a, 0xffcbcce0, 0x9c7f8776, 0x5b5a7f7f, +0x64817f92, 0x5139c47c, 0xbe8573ce, 0xe5457f81, 0x2ca4815b, 0x503e14b8, 0x4ca75681, 0x907f7f7f, +0xa2377d19, 0x7fd7c09a, 0x7fbc8191, 0x657f61a2, 0x814f5c81, 0x7f48607f, 0xa16f3ad5, 0x819d227f, +0x247fc10e, 0x7f817f32, 0xc39981f5, 0xb357261b, 0x347f7027, 0xe3814506, 0xa1032e49, 0x979eb571, +0xb06ac281, 0x81815e7f, 0x34b57fb7, 0x7ffe2681, 0xc181630b, 0xbda9ce3b, 0x81f9287f, 0x4ef12081, +0xb86a54ba, 0xbc4d13c7, 0x81817fd9, 0x62817f1c, 0x7f7f4fa1, 0x847f7fce, 0x81bf68f5, 0xd7625f73, +0x81e79d7f, 0x8137a1ca, 0x423581cb, 0x26580000, 0xd7d6811e, 0xce6e6c81, 0x8168173b, 0x498468b2, +0x8c7c817f, 0x6f81b881, 0xad899a25, 0xd049e481, 0x7fdd817f, 0x63816bbe, 0x048b8d81, 0x8eb764c6, +0x987b9ea5, 0x81fae493, 0x7f72817f, 0xa57fbd7f, 0x7f0f81b3, 0xb37f4a33, 0x2e092988, 0x1b172b25, +0x09107f3e, 0x48f5fc7f, 0x69aaa80e, 0x184fe781, 0x7f9db5e6, 0x8195276c, 0x252a8181, 0x8b81ccfa, +0x3c308181, 0x9d19b086, 0xf0d589f7, 0x207408ca, 0xe34e9ed0, 0xe67fbbf0, 0xc0996525, 0x0dca5881, +0x813d4181, 0x7f729738, 0x0a865e5d, 0x8bbea581, 0x5c4aac3f, 0x8181a63f, 0xa1f0b873, 0xa92481d0, +0x837f817f, 0x047fdd84, 0xc8767f2a, 0x441a81be, 0x818681aa, 0x05490179, 0xba81ba7f, 0xde246581, +0x466d70d7, 0xb6bc3f5b, 0x5c428f2d, 0x8105cb77, 0x0206175a, 0x5f6ac2b1, 0x9a4d2a4d, 0x7fb0917d, +0x1281ce12, 0x81028181, 0x41b0a4ad, 0x29973681, 0x973e992e, 0x81617dba, 0xc1818181, 0xb87fc1af, +0x81137feb, 0x627f5210, 0x8181de1d, 0x7f615f70, 0xabbb817f, 0x817fc57f, 0xdbe5b6f3, 0x9c8a7f72, +0x3c2826df, 0xae03d3b5, 0x7f6cc481, 0xd9c41c7f, 0x83ad7f81, 0x336370a8, 0x8bbfaf2e, 0x3561caa5, +0x548d142c, 0x810366b5, 0xa981d36e, 0xbf3afc7f, 0x7fd78157, 0x61817bf1, 0x74afcdd7, 0x18ac3d61, +0xcc45b6e0, 0x3d81af88, 0xf97934de, 0x947a8f81, 0x8b7fa6b8, 0xe0dd8981, 0x2d812381, 0x967f6409, +0x24bc0b1d, 0xdb7fa943, 0x18197fc3, 0x47ef7f6b, 0x66a6bb7f, 0xd374af81, 0xf48e7f9f, 0x52399754, +0xe1abbf6f, 0x04751d7f, 0x3690c581, 0x557fc3a6, 0xc1815413, 0x9086df7f, 0x81e57481, 0x81538167, +0x4df9c981, 0x9463a57f, 0x81c5063a, 0x7f7eaa81, 0xa6dc9423, 0xabc07ffb, 0xd981817c, 0x816064ae, +0x7f815dab, 0xc0b12da0, 0x1444be7f, 0x3a7781e0, 0x887f527f, 0xc0817f1f, 0x05cba881, 0x6599057f, +0x817ffd53, 0xca907fab, 0x557f0d7f, 0x5d029ce9, 0x57818168, 0xc4093382, 0x52e5b33a, 0x763e09b7, +0x814da277, 0xf9208cc5, 0x817f7fcb, 0x7f5fac7f, 0x6d7f6544, 0x7f0a7fec, 0xe7b4337f, 0x7fae8181, +0xa8c5608c, 0x6f9bb388, 0xb27fc773, 0xf4acaa81, 0x1cad30ac, 0x2d817f8b, 0x2b817f16, 0xcca2a581, +0xcb77dad4, 0x58487a22, 0x7fe57f69, 0x45387012, 0x73504724, 0x61357f81, 0xc77f8181, 0xabc7e11b, +0x8181c16e, 0xa681ab27, 0x8c81c48f, 0xbd1de360, 0x9924337f, 0x7f230156, 0x3dd6a87f, 0x477f9381, +0xbeb38181, 0x47815ccb, 0x8125817f, 0xb8057f7e, 0x7f7fc081, 0x46815873, 0x5f93ba81, 0x7f35767f, +0x7f7f5a94, 0x748c84e5, 0x4c7ffc90, 0x7fcd247e, 0x44898e81, 0x1268ecc9, 0x81d2777f, 0xda42e7c7, +0xee267f81, 0x7f5f269f, 0xe3d60c91, 0x81c97f0b, 0xd7b00000, 0x9b813a83, 0x7f7f8181, 0xb71cfa35, +0xf5888156, 0x853e9cc5, 0x1c1c1014, 0x2e7f8b1b, 0x81bea644, 0x70f4e8d8, 0x8158a181, 0x18caad9f, +0x3b28815e, 0x7f460155, 0x7f7f7aee, 0x444b51e5, 0x8682a381, 0x7f508181, 0x5e86466d, 0x7f81287f, +0x7f7c7f50, 0x8173e4de, 0x4c747a65, 0x69816cd7, 0x6674813b, 0x48274e90, 0x92ccf278, 0x99f181c7, +0x81b20243, 0xbf4b4b81, 0x81d981f4, 0xa1e981dd, 0x6c7f7b6e, 0x98cad579, 0x9a7f1685, 0x81e8812e, +0x5d7fcf52, 0x37819772, 0xdb437fab, 0x1d83782a, 0xd3ff71b3, 0xc7e28181, 0x81817ca6, 0x1808127f, +0xb515639d, 0x812c8108, 0xa2a97f14, 0xdd8102c6, 0x727f7f98, 0x5f197f19, 0x7b22817f, 0x46562e7f, +0x59b89c7f, 0xaba28d81, 0x5e4e7181, 0x909da6b9, 0x817d8128, 0x9c167abf, 0x3ef4817f, 0xf5628d72, +0x97727fbb, 0x88c6a600, 0x81c9927f, 0x817443e0, 0x81ac7f7f, 0x7fec67cc, 0xc353bb96, 0x4caae113, +0x7fa37fd9, 0x525c45dc, 0xdf798ba6, 0x7c811049, 0x812570c7, 0x7f788a6b, 0x797f1fef, 0x2022fd81, +0x38870936, 0x813f7d7f, 0x81810941, 0x6b75817f, 0x5aa11953, 0xcc7f7f81, 0x61778581, 0x5e458181, +0xcfff7f90, 0x9020747f, 0xb4e35ad5, 0xea0e8109, 0x21f97f13, 0x41c07f40, 0xc6ddc4ad, 0x085227c8, +0x11dc9f85, 0x7fda7f55, 0x5a92b34c, 0x11659b4d, 0x000081bc, 0x81816781, 0x81cce139, 0xf165d51c, +0x810a817f, 0x7f7f82d6, 0xb5b3817f, 0x40909bfb, 0x7f04a01f, 0xd0288164, 0x810164f3, 0x59778b81, +0xc87f2997, 0x9d99417f, 0x65528173, 0x727f454a, 0x9f187fd9, 0x37948141, 0x217fcf7f, 0x81818319, +0xc27f817f, 0x47817f81, 0x756e817f, 0xa28c8cd3, 0x84b77f81, 0x44ef8e4f, 0x7f7f7f7f, 0x932e2c45, +0x680d20a8, 0x817f1122, 0x52128a21, 0x2b8136ff, 0x0b7f817f, 0xda8c817f, 0x24c17f7f, 0x3dcb39b7, +0x81d18181, 0x81fe1a81, 0x0590978b, 0x71b20319, 0x7f65bfb4, 0x4fb42290, 0xde1a7fd2, 0xc8bd77d3, +0x917f32a0, 0xb03fd9db, 0x4c1e872b, 0x814f5481, 0x44817f7d, 0x398a887f, 0x4a19e8ac, 0x4d818189, +0xc3818181, 0x7f8198dc, 0xb77f527f, 0x817fe681, 0xa83d818c, 0x420b5e81, 0x6e5adbbf, 0x7abd56c4, +0x717d7f7f, 0xc7811c0c, 0x767fba89, 0x6d1d5106, 0x5a06df17, 0x86c28181, 0xcdd6888e, 0x0e747f8a, +0xde81a9c3, 0x45459db8, 0x81114e22, 0xbd7f8197, 0x3c3d85cf, 0x8c815fe8, 0x544d7f64, 0x81986177, +0x8c20a0b5, 0x7f4b8178, 0x17db6031, 0x7f42df7f, 0xee93cc0f, 0x8ab829b2, 0x29a04e65, 0xfc30035f, +0xc57b7f7f, 0xe51ef77f, 0x7fd95481, 0x76957f7f, 0x819f945e, 0xc91a187f, 0x81657e40, 0x8192816d, +0x7339dce0, 0x8bd1c3c7, 0xa2812e1b, 0x81db3d8e, 0x1781da89, 0xd0810000, 0x7fc8c57a, 0x8fc32003, +0x1c847f81, 0x10dfb271, 0x7ff8cc62, 0x2037d87f, 0x8151501e, 0x17c74c74, 0xb8ab2852, 0x4b7f1c97, +0x6a811596, 0x46acc3e0, 0x3c81f49e, 0xd6d81a4e, 0x2e815066, 0x46218186, 0x41682bb4, 0xa1ac315d, +0xb87fdc73, 0x378181ad, 0xe97f979a, 0xccb07981, 0xb10a7f38, 0x03488893, 0xfef78df3, 0x8145467c, +0x3b67b401, 0xb3cf34bc, 0x81476c00, 0xdd638b81, 0x5e43af81, 0x2bab7f6c, 0x6d7f7f6f, 0x7f657f70, +0xbc816e77, 0x8981c27f, 0x3e40b685, 0xd93d7256, 0xbd86c8c3, 0x817fdcce, 0x43899081, 0x817f1693, +0xc0db9a68, 0x7f57a5a8, 0xd14c817f, 0x8c4ab37a, 0x901081dd, 0x292a452b, 0x7f81e85a, 0x189ece81, +0xc10b9401, 0x81817f69, 0x358b3d7f, 0xa86bb1a9, 0xbd7f7f8a, 0x57fd7fe3, 0x81964506, 0x74295986, +0xab7fbc8d, 0x817f62e6, 0xab816277, 0x5737ab20, 0x9c7f6f50, 0x7f5781af, 0xe344ef7f, 0x98a695ca, +0x81d2a20e, 0xa77f6605, 0xc1d00398, 0x177a90d5, 0x7ff6db7f, 0x0221897f, 0x6c8105c6, 0x578981d7, +0x64ac81c3, 0x1d9381f9, 0x7f706e7f, 0xd5797f81, 0xf9567f81, 0x27ed81d0, 0x496f2d01, 0x48b8973b, +0xb34c1f32, 0x7f42139a, 0xeac95332, 0xf05f7783, 0x037f8145, 0xa98f3dbd, 0xc5ea24a8, 0x81417fc1, +0x81655e97, 0xcbc682df, 0xec10815a, 0x817f7fa5, 0xb8b54dff, 0x00003e84, 0xa7ece56d, 0x819d81c2, +0xd0ee6d81, 0x0e5e4db0, 0xe3818181, 0x7f4a2c13, 0xb13d46ac, 0xc181f081, 0xfe51c8ab, 0xa7747fb6, +0xc2506129, 0x9d98ce4a, 0x81627f81, 0x816d6481, 0x81576b7f, 0x817f814f, 0xc881cc8b, 0x686d098f, +0xae7fcb7f, 0x3b39a87f, 0x81888879, 0x47877f30, 0xd9bd9e9e, 0x061a0ee3, 0x7f3631a3, 0xbf5b227f, +0x7f7f837f, 0xc5d18162, 0xa3405318, 0xc3f27fac, 0x7f43c6f6, 0x927f7fad, 0x817f7fdd, 0x7f813b8b, +0x7fe59a65, 0x81fd87d7, 0xda54547f, 0x814d114a, 0x7e7f813d, 0xf47f077f, 0xa0869981, 0x38db7fea, +0xf61986b0, 0x7f9146cb, 0x81db9bb7, 0xfc3b8177, 0xbd7ff9a1, 0x7f33c5aa, 0x74335381, 0x815ca42b, +0x7f814a81, 0x49977342, 0xab9ed775, 0x9874db81, 0x6381227f, 0x1b81b27f, 0x817f3716, 0xd8e6bf07, +0x81030494, 0x56cedf81, 0x7f607f9a, 0x7f7f4f52, 0x3cb978f2, 0xd57fca81, 0xd7bc5c81, 0x3a2265b7, +0x7f7f98bf, 0xac068158, 0xa8851581, 0x1db9a67f, 0x7f24d44f, 0x57a34093, 0x7529b981, 0x50a3817f, +0x4c3f6381, 0xbd7f81ad, 0x2281a481, 0x1d2e7f81, 0x77b85621, 0x3f818105, 0x307f783f, 0x7f7fe0b2, +0x4d4a297f, 0x8d7f4d7f, 0x88818a7f, 0x129cf8b7, 0x9b817f7f, 0x7fe5ce65, 0xa48e7213, 0xf481a737, +0xb43d4fd4, 0x7fb182c3, 0xa18181ba, 0x077f8127, 0x4f4b468a, 0xe1bea062, 0xc34d0000, 0xc0f381ba, +0x8181817f, 0xbb2d9e7f, 0x7f216b7f, 0x868181ad, 0x7f7fcdf6, 0x810b3d55, 0x627f4c43, 0x81903e81, +0x54ea9a4d, 0x3181ae1e, 0x7f8116d0, 0x74b5c181, 0x607b431b, 0x5552d081, 0x7f818122, 0xe9449f4c, +0x6fb434b5, 0x1f1b9552, 0x54d7b178, 0x8b1b9e33, 0x4143cb33, 0x13b0817f, 0x7f51317f, 0x81247f65, +0xa6fd7b18, 0xb1d6550e, 0xaa6481c1, 0x81421f41, 0x50c0813c, 0x95ac7eec, 0x442a7981, 0xc39289ea, +0x667fcc5d, 0xf681817f, 0xd729c236, 0x5342819b, 0x813f457f, 0x819d2481, 0x817facd0, 0x7ff2dd9a, +0x81cafb81, 0x26c72930, 0x177ba325, 0x81015e07, 0xc5667f7f, 0x81fa497f, 0x7cb3505e, 0x85e3815d, +0x67596b53, 0x857f6696, 0xc97f7f66, 0x7a813978, 0x5f7f7f81, 0x043d9d81, 0x8c81a6a6, 0x0e8197f1, +0xd17664d1, 0x9f81814d, 0x7f4c4f3e, 0x5e7f756a, 0x8181815a, 0xe090817f, 0x81377f81, 0xaba9813d, +0x8985c047, 0x2106c97f, 0xc846f2b2, 0xb19a83ce, 0x64107f16, 0x7f792fde, 0x87516244, 0x9c81673e, +0x2f5c8187, 0x555a8181, 0x95d9f26a, 0x70408e9b, 0x7f81b57f, 0x8f0de19b, 0xd37f7a6e, 0x9ba9fa7f, +0x7f0b6568, 0x1a4f7f14, 0x3b4f5b71, 0x814e377f, 0x8125a7ec, 0x1d817f43, 0x811128dd, 0x66816435, +0x9de99e3e, 0x81b27f48, 0x7fc3783f, 0x69983781, 0x457a817f, 0xd4296caa, 0x0000b659, 0x723a92a9, +0x81860888, 0x8c81c44c, 0x7489b15c, 0x817310ed, 0x74a48e9c, 0xc6531a73, 0x7f7f64d0, 0x502d7fb9, +0x437f9f7f, 0xb1bfa444, 0x81df09f0, 0x0946ab9b, 0x7f21ca81, 0x9d818166, 0x81e8d281, 0x917f6791, +0x36ac811b, 0x9d817347, 0x344c02a7, 0xab36817f, 0x81818152, 0x32013a89, 0xd7ab3008, 0x6f608181, +0x817f7f7f, 0x153e0881, 0x81dfcaba, 0x6c5e577f, 0x2f7f6524, 0x817aa07c, 0xdd7f0175, 0x8b4b7fec, +0xbc81ead5, 0xae8b205d, 0xc9a9743e, 0xf33f14cf, 0x49ca7f58, 0x507f6976, 0x8cbe66d4, 0x9778a724, +0x6d8147e3, 0x81334092, 0x7f4c0101, 0x82d5ea73, 0xb59c6657, 0x1ab842f8, 0xbc1d8581, 0x4004d917, +0xe5eaaee2, 0x7fa9813b, 0x817fae58, 0xd77f1281, 0x10732f61, 0x836d137f, 0x7fbccb3b, 0x1a3a81d7, +0x81047fef, 0x790aa87f, 0xc19eaac6, 0x60667f07, 0xb7f1a97f, 0x39f39e7c, 0x057f7f7f, 0xaa7f81c8, +0x79bef7a0, 0xf3d3134d, 0x720f7fbd, 0xb4a5b69e, 0x716c817f, 0x9a817fdf, 0x347f817f, 0x4b995cb4, +0x876a4f7e, 0x8d92687f, 0x7f9e8181, 0x8172817f, 0x8ea21cc3, 0x61bd96b0, 0x81fba378, 0x6db77f7f, +0xdf307181, 0xda0f81c4, 0xa8e36d65, 0x70a7a559, 0x0be0f27f, 0xb56f4971, 0x35813a6d, 0x188130b5, +0x517ff881, 0xec54b6ff, 0x8183817f, 0x20ec2381, 0x3e7f9613, 0x878af66d, 0x81657f81, 0x7fb80000, +0x7cb38d5b, 0x817f7f45, 0x7f6352c1, 0x3881727e, 0x637f5ec2, 0x81437fc6, 0x7f7f90d2, 0xb1558699, +0x2b81904a, 0x7f84ad54, 0xedab2281, 0xbe9cbd81, 0x7f5001d1, 0xea2a206c, 0x813cf1ab, 0x81658221, +0x81a6817f, 0x8167929a, 0x726dd87f, 0x817f817f, 0x93b45a87, 0x7fa179e2, 0x7f4c7f81, 0x8157817f, +0xa2fd7e81, 0x44284a58, 0x3b98c224, 0x45008124, 0x16da978d, 0xb772ba81, 0x7a4f3481, 0xe97fb9c8, +0x7f49529e, 0xaa02580d, 0xa254adb3, 0xac0a817f, 0xdc532c32, 0x7fe6887f, 0x7f313d7d, 0x61d17f11, +0x81e1bf02, 0x7f62c97f, 0x348d5d50, 0x12ec7f7f, 0x8f7f2884, 0x7f3f371e, 0x021c8e81, 0x65a07fc7, +0x5e14b632, 0x817ff481, 0x6681de78, 0xba4f971c, 0x65c07f81, 0xac81d181, 0x7f11b17f, 0xdcc1011d, +0x7f7f23c1, 0xcc819b6a, 0x81a73d06, 0x5e943954, 0x09c8b87f, 0x43405fe8, 0x817f8d58, 0x550f4d1d, +0x82c27254, 0xd0408532, 0x4460477f, 0x8b7f9cec, 0x7fc9817f, 0x8181165f, 0x28d8817a, 0x0527752f, +0x8168813f, 0x7f48cc81, 0x7481c91e, 0x6881c481, 0x4b0a7f55, 0xcc81e624, 0xe57fac7f, 0x6f0f81a0, +0x417b5a81, 0x9381dd8c, 0xa881c4e5, 0x8c505264, 0x695ab14c, 0xecc99781, 0x3981c57f, 0x0c204553, +0x4a77d59c, 0xa2301c81, 0x5ba47f00, 0x451eb2f0, 0xb0459baf, 0x934bcc63, 0x5652c0a1, 0xb381b193, +0x68626b5b, 0x361741d5, 0x7fe281b3, 0x70ad7f7f, 0x8161f792, 0xbfda5b21, 0xed97ad84, 0x7f59cb7f, +0x8ed1ecf7, 0x75d784ac, 0x0f81b07f, 0x3ed5bb6b, 0x819122a9, 0x2257a06b, 0x81687fb1, 0xc008057f, +0x97812981, 0xcb2081cd, 0xd9bb6681, 0x407f7f7f, 0xc27f814e, 0x23a2527f, 0x767203ad, 0xed17ee7f, +0xd4b89558, 0x8195bc81, 0x817f7fe0, 0xe4b7997f, 0x081ca76d, 0xb87f817f, 0x006350bb, 0x812739de, +0x557f811e, 0x8d4387af, 0x98815781, 0x81818148, 0xb1bc8105, 0x04cf273e, 0x3f2b289b, 0xae96437e, +0x819281b9, 0x95cf7f82, 0x7bf77f81, 0xab7f0441, 0x81652854, 0x34de0d81, 0xac816081, 0x43b12e45, +0x91b75bdd, 0x7da27794, 0x9bbb727f, 0x301d812e, 0x7fa88120, 0x27a15075, 0x27c82cc1, 0xef1a813a, +0x8114c4d2, 0x817b7f2b, 0xff877f47, 0x7fd47fcd, 0xd08ceb35, 0xe07f4386, 0x67b3a47d, 0xad7fd64d, +0x817f7044, 0x537feabb, 0x8d3f36ec, 0x6c3883ce, 0x43b6909d, 0x81bcd181, 0xfa7fdae9, 0x3c7f5440, +0x3946347f, 0x7f22ac7f, 0x815dea81, 0xb6c6c8a1, 0xd57f81c9, 0x526fd581, 0xce3c3115, 0x3d41c54f, +0x5c5ac2b0, 0x7c948586, 0xa47f7f96, 0x8b7fc351, 0x309aadf9, 0xa0cfef81, 0xb081acd7, 0xd7413cb2, +0x7f4a6cc9, 0x5c3bb39f, 0x7fc7e581, 0x68cc8114, 0x0926a5fa, 0x82c37581, 0x8181e642, 0x57ab2ec6, +0x497f0000, 0x5fa07f9c, 0xa17fc405, 0xeaa0de7f, 0xa14a7fb3, 0xc4c12a81, 0xa3527d15, 0xa854d4d4, +0xf781a3db, 0xa07f26ae, 0x812c7f74, 0x1f7f2fc4, 0x3a9381d4, 0x17b6005d, 0xd414ee60, 0x18b49d68, +0x9cc77f9e, 0x30aa7f91, 0x7fb9207f, 0x64158b40, 0x81423c7f, 0xea2fa9cc, 0x7fa83381, 0xca647f8e, +0xd6b9657f, 0x81b26a7a, 0x4fb7b2f1, 0x1b7f9eda, 0xfb8181ab, 0x7f2557d7, 0x71e8701c, 0xa2817f81, +0x81817f9e, 0x04139d39, 0xc9f6533f, 0xa1f74db1, 0x814042db, 0xca6e8114, 0x7fb3f64a, 0x03f981a0, +0x7f9e0755, 0x7f7ba781, 0xd554d19a, 0xd81a5661, 0x7fb33f3a, 0x8f81c8bf, 0x012ff975, 0xb1a576ee, +0x81be6681, 0x818db381, 0xbd7f8151, 0x81b281ae, 0xddd2fbdc, 0x81bfa81c, 0xce818181, 0x774c9c28, +0x7fd05d59, 0x811a54ce, 0x2f7f727f, 0xbe816b81, 0x7fb03af2, 0xc44bfc7f, 0xcf6ba181, 0x14be8161, +0x6715a197, 0x76386d26, 0x07835f81, 0x817c818d, 0x81693281, 0x818118fa, 0x777c9e62, 0xccad6446, +0x769c5b8b, 0xdbd781b7, 0xe4b063f9, 0x2b568182, 0x8c7f8187, 0x7f7f677f, 0x6b7fa675, 0x6be85f61, +0xc36f7d32, 0x7f81005f, 0xd0d0997f, 0x3a818181, 0x7fa7b6b7, 0xc39ef17f, 0xcc7f437f, 0x3d51729d, +0x3eb72475, 0x1b069d81, 0x7fe8abee, 0x604c7df9, 0x7fb47f81, 0x3a263b75, 0x75a488e6, 0x7f68d47f, +0x00004e97, 0x7ffb0973, 0x7f53a009, 0x7f014834, 0x6f81022c, 0xf0b1405f, 0x06c84ca5, 0x4bcf7fc9, +0x4388ec9a, 0xf528fa74, 0x8e3b816f, 0x7f7f4c72, 0x817f5224, 0xbd998181, 0x81b181c6, 0xbd817f88, +0x1763f57f, 0x7a796e44, 0xe98e81a9, 0x5360a2fa, 0x97b39081, 0x707f1b65, 0x81c67f7f, 0x4e461c7f, +0x817f231b, 0xc2d08181, 0x366081c6, 0xd2e67f90, 0x13f1b87f, 0xd781819f, 0x7f347fa2, 0xb1147fc5, +0xe6b1fe4e, 0x3a1a96b2, 0x177f8191, 0x3e3d96fe, 0xd0d261a0, 0xb25e7fc9, 0x472b81ce, 0xbad6817f, +0x707b45ca, 0x81817f81, 0xc48fb7f8, 0x90373f82, 0xa681e0b5, 0x71419190, 0x6978f77f, 0xf4463125, +0x48f1937f, 0x4a45c611, 0x7f97e348, 0xf07f1e21, 0x81fbfc60, 0x8783a281, 0x00931cdd, 0x81e70f91, +0xe24e7f7f, 0x7f61817f, 0x81967c47, 0x6d9bc90e, 0xec58817f, 0x8157357f, 0x7f7fc674, 0x7f7e3c81, +0xaa8b7fbc, 0x7f3ba469, 0x7a22f57f, 0x7f7f7f27, 0x81c89e7f, 0x8145d9c8, 0x012f737f, 0xe91f15cc, +0x9b9cb749, 0x818142cd, 0x290a7f4a, 0x89cfc4b1, 0x8337ebe2, 0xd8224205, 0x8181a49e, 0x58b04ea1, +0x7fea95a7, 0x861681fb, 0xcea37f5f, 0x81309c8f, 0x6ded8145, 0xbd5be86e, 0xc2ce9402, 0x557f57be, +0x4ef830e9, 0x7f81f97f, 0xf804367a, 0x5de6817f, 0x156de096, 0x2c06df0c, 0x98b170d7, 0x817f81a9, +0x24597fd4, 0x81810000, 0x0e24a66a, 0x819e5681, 0x6f73e481, 0x054b532a, 0xc0814026, 0xb3a36181, +0x6da37f9a, 0x7f83574f, 0xe0a381c6, 0x2075bf7f, 0x28438181, 0xb1cd7f74, 0x7ffb497f, 0xd17f337f, +0x46085c73, 0xd57f5681, 0x704b7ff3, 0x449e3f7f, 0xeb6e09d6, 0xb37f4b02, 0x817f1cb9, 0x672edf7f, +0xd0c2c25b, 0x5ed0885b, 0x4676876b, 0x2bd8d970, 0x9a7fbbc6, 0x81095581, 0x81b27f6b, 0x753c7f7f, +0x7f812c7f, 0x7f477fbe, 0xdaa67f7e, 0x7f099859, 0x7b0fea70, 0x816b8186, 0xf0677f5c, 0x16455d9b, +0xa98170a9, 0xa181529b, 0x7f817f67, 0x8f72c351, 0x2a7f7f7f, 0x2f5f347f, 0x695c7f79, 0x282fbbe0, +0xb78581f0, 0x817f297f, 0x49818c81, 0x817fc78c, 0x3bd68181, 0x86d7deda, 0x7fa0d208, 0x1c7f3481, +0xf5be9a57, 0xb9ded881, 0x8ab88c81, 0x647ae23a, 0xb5571142, 0xe09d7fc1, 0xa5d9817f, 0xaca07f7f, +0x7f4d5286, 0xa57fde7f, 0x94d0917f, 0xb160442b, 0xacf47b2b, 0xd17f7f81, 0x2e37f976, 0xbe7f5d66, +0xc2815f28, 0x5c81487f, 0xc2026481, 0x81a194dd, 0xba7f2c91, 0xb7357f37, 0x81b25698, 0xd2818154, +0x45b78130, 0xc6bead58, 0x6f5a5656, 0xb181a6ae, 0x07c9a153, 0x75d681d3, 0x8c92817f, 0x81503520, +0xa66f812f, 0xa59b7a81, 0x17a09164, 0x17087fb2, 0xd0bb319b, 0x6d8153ae, 0x8af1a67f, 0x651c46b0, +0x4b637f1b, 0x00002981, 0x2c7f437f, 0xd9deb6ab, 0x9fba847f, 0x6f289b21, 0x015f811c, 0x8176f498, +0xeac003ae, 0x68818192, 0xb37fa6a6, 0xd07f4aa5, 0x83819472, 0xa5da41a0, 0xbc7fbe7f, 0xa67fccd9, +0x7f0c86c5, 0x116e7f15, 0x7f67b45a, 0x907fcf81, 0x81812c66, 0x05747283, 0xac962b7f, 0x2a7f81ea, +0x81c2b57f, 0xb1b4b868, 0x7f811ed1, 0x7f66c937, 0xc881e6ab, 0x740323fd, 0xcf765745, 0x3592d370, +0x5c81f3e5, 0x527f3efc, 0x818781d9, 0xf6819b53, 0x1c547f51, 0x4e29b37f, 0x36f1537f, 0xcb819758, +0x538f5b7f, 0xa5172c22, 0x52427f7f, 0x6b7f8128, 0x7186134a, 0xf67f1b62, 0x6081e015, 0x829abe81, +0xaa8a65fc, 0xd27fb70f, 0x7f0a0881, 0x3d7fc998, 0x443e78b0, 0x9443c354, 0x81266724, 0x9b81812b, +0x813181a0, 0x7fddca68, 0x7f260f81, 0x7a818181, 0x812681ca, 0xf48152e0, 0x817f8388, 0x81306c7f, +0x7f71d020, 0x81bd8481, 0x5234d5c2, 0x31be8136, 0x3403d432, 0x11286844, 0x81a57f1e, 0xe3b57fd4, +0x7aa88f52, 0xabc64c52, 0x977f8181, 0x7fe68ca6, 0x5dd6eac3, 0xd26a81a9, 0x017f8183, 0x902cc266, +0x7fba7f52, 0x617f81cf, 0x7f657ff0, 0x045d7f63, 0x3c04bb4e, 0xaa4e7f81, 0x847fe385, 0x67817f7f, +0x42f33d28, 0x61a5aa5a, 0x817f7f1e, 0xa3a715d7, 0x5da91c3a, 0x7f9f7f81, 0xc48281ce, 0x813081fa, +0x7f817e53, 0x6d227dbf, 0x6e7b0000, 0xf49b4091, 0xf6b3d8e9, 0x89fb29ae, 0x44289881, 0x3cc78181, +0xe58103b9, 0x535bffd3, 0x817fa81b, 0x469f82f0, 0x57837fe1, 0xe14af07f, 0x54bf19b2, 0x7482595d, +0x6781bbd2, 0x4b7b7fb7, 0xa4c45060, 0x380155c8, 0xb72aa381, 0x4530847f, 0xcb509581, 0x34818181, +0xb03cd87f, 0x90818122, 0x812a8181, 0x817f677f, 0x2c2a597f, 0x35f0c581, 0x81b11261, 0x4d678120, +0x816d7f5b, 0x448166d8, 0x0c567f61, 0x81189e21, 0xcf932f08, 0x4c5a917f, 0x687f9b7f, 0x257f81d2, +0xcc7f6220, 0x8c947f81, 0x24642d1f, 0x728155d3, 0x14ca75d2, 0x817d7f38, 0x7f818181, 0x7fb31f87, +0x13a581b3, 0xd9b5215f, 0xa8afdbf5, 0x147fe67f, 0x81caab97, 0xa96daa7f, 0xf88b897f, 0xe7937fc2, +0x5a8b7f4b, 0x7d59277f, 0x8d687f9d, 0x7f924b81, 0xd41c82c0, 0xa5119c7f, 0x4fcfeba1, 0x3e077f81, +0xaf65994c, 0x83d481a7, 0x81257fb0, 0xa481387f, 0x36d8a463, 0x8181811a, 0xc75643a7, 0x474c6c1a, +0x7fc0e8cf, 0x8cdc896e, 0x55830655, 0x7f81337f, 0x7f65db1d, 0xa881cd81, 0x7f812ec8, 0x7f76cdd4, +0x634a443d, 0xea7f3ed9, 0x90a8d093, 0xb05e819c, 0x7f7f5c4e, 0x811e5b81, 0x73a1b989, 0x8181717f, +0x927d7f09, 0xdeaf7fca, 0x528e302a, 0x81654681, 0x7fe92e81, 0x87d58110, 0x1ca86e20, 0x7291a4cc, +0xc76c7f81, 0x577f5136, 0x00005b6e, 0x817fe37f, 0xf7ac7f7f, 0xf258ee81, 0x81814c63, 0x2a97f57f, +0x5f7ca0a7, 0x7f2fa37f, 0x94360c7f, 0x7f23117d, 0x81343a7f, 0x55b0d9de, 0xbae0189e, 0xc77f228a, +0x817f7861, 0xa57f7fac, 0x878ede24, 0xb57fad5a, 0x5c907f93, 0x814e9b81, 0x7febf59c, 0x7724e1a9, +0x569d9281, 0x1b644699, 0x4d4e26bf, 0x397f7f6e, 0x8ac93027, 0xe6885974, 0x51a2c1a4, 0x7f7f5b7f, +0xaad8ac97, 0x122c8186, 0x97810a5b, 0x818581e2, 0xc77f7f35, 0x67611c60, 0x3f8178ea, 0xa47fa65e, +0x888f8132, 0x81817cd4, 0x1081b4bd, 0x817f7fb8, 0x449a3fcc, 0x7f9832de, 0x51e96f58, 0xcbde52c0, +0x6b515783, 0xbda3b297, 0xd64b6381, 0x7f693c81, 0x6d311abb, 0x73468e81, 0xde81817f, 0x4f811581, +0x90d6817f, 0x152c9d49, 0xad7f17d7, 0x7f834681, 0xa47fe6dc, 0x06817fb3, 0x7f817f69, 0xd18190b1, +0xca7f8183, 0x1881f381, 0xac50814a, 0x7f8dca7a, 0x71358131, 0x28783add, 0xd31b153b, 0x71919b97, +0x81877f07, 0x707fca4f, 0x57c6814b, 0xc0a17fbf, 0xff5bb344, 0x4a7fa99d, 0x7434c104, 0xaa7ff0a5, +0xb6ec6841, 0xb4917e1b, 0x5a81ab94, 0x81a3aab1, 0x257fce30, 0xe26b7fc7, 0x1d7a7fe5, 0xfc7ca6c0, +0x81437ff7, 0x7dc59e14, 0x7f0cf9e9, 0x90e37f7f, 0x7f6f12bc, 0x52d36381, 0xc15f88ac, 0x3536277f, +0x8d3e2a7f, 0xa18d7a7f, 0x48907f23, 0x7f810000, 0xfd813357, 0x6ed4520b, 0x843907e2, 0x66814378, +0x7f7f6bd8, 0x0e3381ad, 0x03847f90, 0x05814769, 0x8181c181, 0x1c7f7fac, 0xe83cbfb5, 0xb15c735c, +0x7f8f7f7f, 0x7f846b81, 0x81d2eb7e, 0xf7447f41, 0xa2b57f81, 0x2ba7f381, 0x817f75bc, 0x78553181, +0xc7b07f7f, 0x7f557a41, 0x46a83ddb, 0x9b447d13, 0xccfe7b81, 0x7f3ae281, 0x75464bfb, 0x7f4e5560, +0x7fad14ce, 0x98fd2a7f, 0x7f637f81, 0x4a22cf5c, 0xca81813e, 0x4ffc7347, 0x9d777fa5, 0x810585ff, +0x71e57f81, 0x3f7f8181, 0xfce8819c, 0xbb315486, 0xf1e5a974, 0xf1e061dc, 0xcd7f8181, 0x8181817f, +0x547fb97f, 0xa663c341, 0x9e5aab7f, 0x35768dad, 0x817f3b48, 0x46bc815f, 0x707f89b3, 0x860713c8, +0x4a7c6532, 0x541084e8, 0x5f77bb7f, 0x299dbebe, 0xd3f27f8c, 0x25096727, 0x81194cba, 0x7ceff409, +0x9c8c267f, 0x7f7b1fe0, 0x635c0fdd, 0x308132ac, 0x57330951, 0x7c257f81, 0x6266dd49, 0x7f89b481, +0x817f7fae, 0x7f81aebd, 0xc7fb7f7f, 0x4537e27f, 0x1d288b7f, 0x7f4a9b7f, 0x8150969f, 0xf533f121, +0x7f782217, 0xe428816e, 0x10ca6981, 0x6981b39f, 0xe35fdb7f, 0xc6814220, 0x79ba8158, 0x811e815d, +0x772d8184, 0xc784810c, 0xa91abf8f, 0x3c7f6f7f, 0x6d9981bf, 0x9b42dbe9, 0x42948181, 0x5681817f, +0xdba39bf2, 0xf77f7f37, 0x18eebbdb, 0x39c6b7b7, 0xdc2fb5b9, 0x99d00a7f, 0x02dbe492, 0x4481606e, +0x7f6f81a1, 0x7f2f557f, 0x2aee7f6d, 0xed2ad2ae, 0x6ce4ec0c, 0xdb4cae3a, 0x81646c83, 0x7fa3c1be, +0x7f383730, 0x81aeba9d, 0xc0528bdc, 0xd17f3982, 0x7fa47c7d, 0x81814e81, 0x813d81bc, 0x20816349, +0x857fdfe9, 0xe46aa581, 0xe8de53b5, 0x34471a81, 0x3b4f7feb, 0x8174c981, 0x2fc7c735, 0xc818ec81, +0xb28ed3df, 0x627f1100, 0x7f40067f, 0x7fd37fa8, 0x637c5b81, 0x113dd969, 0x81376881, 0x5d817f67, +0x7f7fda7f, 0x7f7f7f70, 0x77577fb4, 0x87bb3d7f, 0x5b817f93, 0x839b517f, 0x7f45818e, 0xe47f3381, +0x93c2b281, 0x944207f3, 0x36dc77b3, 0x50b86e4a, 0x8181e27f, 0x0b81a79b, 0xf9975950, 0x977b9e47, +0xea7f8181, 0x5547b549, 0x3da47f0d, 0x474b8e72, 0x7f7f9ce1, 0x7f2e2e42, 0x6f7f45c9, 0x08d56ede, +0x8524810b, 0x657fd15f, 0x7f8d0c14, 0x6c7fff7b, 0xd530f39d, 0x7fb4b023, 0x651d8a1b, 0x13817024, +0x7f850774, 0x1f219db3, 0xd8817f52, 0xd95c0feb, 0x9ef0e67f, 0xd9dd4c7f, 0x64b0819a, 0xbc60e997, +0x412fd0db, 0x817fa57f, 0xa3a47f81, 0x4d81815b, 0x81874e3a, 0x5f1e7c81, 0x7fc63b81, 0xbf817581, +0xb5b0a5ab, 0xde2c8145, 0x81789629, 0x097481b8, 0xad5bdca3, 0x212d81e5, 0x4d817f3c, 0x70813761, +0x5d6d0f40, 0x81a190eb, 0xc942c14d, 0x81b462d8, 0x81890000, 0x6586fbcf, 0x81cb3322, 0xa5c0b931, +0x7f78e98d, 0x2492625b, 0xaf81b981, 0x4e59817f, 0x677fb9d3, 0x2c3b817f, 0x920e5981, 0x7f2c7f5d, +0xc379d081, 0xdfbddf43, 0x85818172, 0x6781e442, 0xda9a7f14, 0xcac70881, 0x98818113, 0x0f817f6f, +0xeba852c8, 0x7fa6407a, 0x7fdc8188, 0x819fd349, 0xee738137, 0xe881417f, 0xb6777fea, 0xc6947f73, +0x8181240f, 0xa035c264, 0x7f419e81, 0x193f815f, 0xaedc812e, 0x7fb97f24, 0x924a81e6, 0x63f58183, +0x3e4ec808, 0x7f4fbe86, 0x5faba653, 0xf0c4f4d7, 0x69cf647f, 0xfb55c681, 0x7fe1e381, 0x7f817f6b, +0xc0410481, 0xc65d71b6, 0xba1f8135, 0x997f04cf, 0x49db5b1c, 0xbe817443, 0x817b812c, 0x36e18163, +0xd1893a47, 0x231df6f2, 0x816d045e, 0x3a568139, 0x81628181, 0x6588ffd2, 0x23603081, 0xf5b4817d, +0x8112b5bc, 0x7f9cfbae, 0xc02b8d40, 0xc7f20532, 0xce81487f, 0x0b1a6614, 0x7f17392f, 0x7f8f818c, +0x7f7f8388, 0x5492467a, 0xbf5e3081, 0x777f26bb, 0xae9c4a7f, 0xae81ba0e, 0x6c6281ae, 0x81238143, +0x7f997ffe, 0x798b949c, 0xc3d67f36, 0x38817ff9, 0x2f814ab6, 0x594bad4b, 0x7f6f7fd6, 0xab234181, +0xaf818581, 0x77865381, 0x9d7f8173, 0x27ac8181, 0xbdbd8daf, 0x7c7f7f7f, 0xc6817fd9, 0x34412181, +0x1fe7eb78, 0x7f38f49b, 0x81040b8f, 0x34ec8581, 0x0000907f, 0xd5818181, 0x7865c6d5, 0xada81975, +0x0447e955, 0x7f3d09b3, 0x2b8156e8, 0x976d81dd, 0x29cee57f, 0xa34f537f, 0xe9a981f6, 0x0c7fe841, +0x7f7eda7f, 0xd77f1881, 0x3f929481, 0x77e3c852, 0x0d817fc1, 0x7f4795ab, 0xd1337f4f, 0xdd4b077f, +0x97a4b3d8, 0x607ff155, 0x942b16c3, 0x113a723c, 0x52ce7882, 0x5e81f5e9, 0x81755669, 0x7fea4d5f, +0x5d6d7f81, 0x7f90e710, 0xe3817f6d, 0xe810299c, 0x4e814937, 0x3c269d81, 0x33a891aa, 0x81dd7ff3, +0xa9d1db0d, 0xfb2181eb, 0x7f817fa4, 0x75ec817c, 0xf17f8195, 0x81e18181, 0x44321c58, 0x35a62913, +0xcd5b0875, 0x397f8233, 0x797f7f81, 0x7f829cbc, 0x9a697f81, 0x07b7fea9, 0x7d2b0b7f, 0x8f984dce, +0x0c13b97f, 0xe9203f3c, 0xea473769, 0x7b7fdc81, 0x7f7885c1, 0xde7f5b39, 0x32bd81f1, 0x67b4917f, +0x443069cb, 0x6e972b83, 0xbc66829f, 0x7492817f, 0x7f4d7481, 0x7f25896f, 0x9ea0da7f, 0xceba7f81, +0x7f45d87f, 0x93dbf04c, 0x50814197, 0xb7167f81, 0x6d7fad19, 0x0f81bea5, 0xc57f8177, 0xca4c7f7f, +0x7d84e3f6, 0x7bc443d4, 0xd7b0ae81, 0xc8731f44, 0x06e481ac, 0x85b08181, 0x53cfe4be, 0x73761a3d, +0x707f9981, 0xa87ff27f, 0x68b5b87f, 0xb561c081, 0x7f660001, 0x7b81521e, 0x6093614b, 0xbc778748, +0x64400f81, 0x81d13281, 0x60475381, 0xbe768181, 0x21aad606, 0x81910000, 0x20b6a511, 0x5953d899, +0xdd7f7fba, 0x8181fabe, 0x81b8e77f, 0x817ff17f, 0x6757257f, 0x9c2cc933, 0x51813593, 0x7f7fadef, +0x8ec47f43, 0x348139d7, 0x7f5c82b7, 0x664419bc, 0x8d7b15a5, 0x8181a581, 0xd54d7f63, 0x7f0381b7, +0x81a17f68, 0x814a5e0d, 0x17813dc4, 0xbd94872d, 0xa25a8101, 0xa6810e54, 0xd8568fc4, 0x7f7f3a7f, +0x818478ba, 0x517f81c7, 0x70443b81, 0x7ce481b6, 0x7f817f7f, 0x81b27fe5, 0x4bc67f32, 0x81bbe681, +0xa1c8fe81, 0x857f1c0d, 0x7f81793a, 0x7fbfcf74, 0xadd47f5c, 0x7f8138b4, 0x6f63cd25, 0xd70e497f, +0x81aa7fc7, 0xbaa84f5a, 0x8197ccd3, 0x7f556b7e, 0xca815dff, 0xb9ba7f7f, 0x81747f7f, 0x7fdf967f, +0x65a98181, 0x81bc8181, 0x8e11bab7, 0xbbbf7ffd, 0x12b8457f, 0x59fe8481, 0xf25cdd81, 0x958f840c, +0x5e6a5f81, 0xe9008181, 0x7a68cb38, 0x76818914, 0xa3857fc7, 0xce98717f, 0x8126504d, 0x7fa48183, +0xfa60e14a, 0x7fb77f22, 0x5f3a7f28, 0x24910bde, 0x2fd07f4f, 0x64a6810b, 0x818181a4, 0x7a065fb8, +0x7fe69795, 0x45815e3b, 0x7fa98741, 0x52c581b8, 0x74bb2a22, 0x817a8166, 0xc2cb6581, 0x493c7f7f, +0x883e816b, 0x3c7f7f7f, 0x7f3381c1, 0xab767f6d, 0x81b4588c, 0xb2e24cb9, 0xd9bccba1, 0x70e0162c, +0xea1c810a, 0x907fdd49, 0x424aece8, 0xd90423b0, 0x22df1b28, 0x6b005d25, 0x113e61b3, 0xa2103bbe, +0x20d6c538, 0x17e6c841, 0xdc08e2c4, 0x2002b1ef, 0xc51a77e3, 0x8111f752, 0xea698110, 0x1849b6e8, +0xe6b006d2, 0xdc40b507, 0xc738b9ec, 0x42290a3f, 0x522355bc, 0x590736d3, 0xd9a340e5, 0xe30a19b5, +0x3de9eff9, 0x4437cc7f, 0xecfd0a7b, 0x34e4fcc7, 0x4afe1556, 0x1ecb301f, 0x0f511d1d, 0xf8ec40db, +0x4c07caf9, 0x59f73ebd, 0x45064328, 0x03c27f19, 0xd1661dcb, 0xe4e3ae17, 0x000fcd5b, 0xdd121ec8, +0x3adb1413, 0x112b9888, 0x1d3624fd, 0xa8a347f6, 0xfeb632d5, 0x1d28fc11, 0x21ac2b5f, 0x371fe9f7, +0x3d5ef435, 0xe84000a1, 0xaebebcc9, 0x34555ff0, 0x39a9ca2a, 0x580fc0e6, 0xcf7a5699, 0xf5e65242, +0x07ae3170, 0x06b223ea, 0x638e421b, 0xd8dc0cab, 0x66c44fb0, 0x954117d7, 0x9691a163, 0x415bc8a1, +0x0868d844, 0x9593f551, 0x364bec3b, 0x57460bfb, 0xdc310e3e, 0xe4dbef28, 0xc6443317, 0x81a02211, +0x450c2323, 0xefca9dcf, 0xfebf3fbf, 0xafef377c, 0x3f81ac26, 0x13b4e548, 0xf60a05a9, 0x03d4d840, +0x1970ecd3, 0x95019cfd, 0x8128341d, 0xc4d8d440, 0x16b80a75, 0xc2332b1d, 0x47e90e1b, 0x2e4eadcc, +0x0aa5f04b, 0x03302c42, 0x1a1ed437, 0x3b623efa, 0xc1cec834, 0x4442d130, 0xdce837f0, 0xc3f90934, +0x5ea50731, 0x257f1504, 0x2da5defd, 0x22d3dd34, 0xc429fa2b, 0xf2cc0a0b, 0x3e2e0000, 0x020f1c20, +0x2e962a1f, 0x06edf5f3, 0xceb5dfe4, 0x3b213d57, 0x08c24237, 0xf32ad940, 0x321768b0, 0xb133ebcd, +0xfca78110, 0xc03001db, 0xfca42f7f, 0x7071645f, 0x30bf8185, 0xd1f6d479, 0x02076839, 0x34cd900d, +0x500cda81, 0xe542b08e, 0x282afac6, 0x30a835ce, 0xbc710153, 0x574b009c, 0xe8b24309, 0x02d2cbc8, +0xaae1ea5a, 0x190eff7f, 0xbc656b07, 0xfdc8094e, 0xc0ee4452, 0x2a819969, 0xb79a0a0e, 0xe334a040, +0x12323acf, 0xd9617f3c, 0x3709af42, 0x8b67b338, 0xff00e313, 0x5046e8de, 0x45ec1107, 0x4e2618c9, +0xb40a62dd, 0x0da39629, 0x0a62a937, 0xc9d92e1d, 0xcf4daf81, 0x3f26a9a3, 0xd01e3f2b, 0xac294211, +0xd7c221e7, 0x15c928f2, 0xd79412cb, 0x110fb1fc, 0xddd3fa22, 0x4e7fa8ad, 0xd4a0ebf8, 0x5b144a01, +0x4cf90bdc, 0xb43366e7, 0x1cb8503e, 0xff152c3a, 0xae04fd5f, 0xd896041e, 0x1c2ca245, 0xd7adf0e3, +0x36ee2de1, 0xf0350023, 0x2e529db9, 0xba18d1bc, 0xbb2aba00, 0x52c7c4fb, 0x09df6147, 0x1102f72e, +0x13054b2f, 0xf9f9fced, 0x292a14c9, 0x60095adf, 0x8b43fec1, 0x3ca5a3c9, 0xe73665c7, 0x2f12b049, +0xb0fadecf, 0xd0e46081, 0x42532cf0, 0xe420e3fe, 0xaf5b0501, 0x27f0b481, 0x871c0ec6, 0xcd1c8cb7, +0xe4135816, 0x0a2aab7f, 0xbf732d1d, 0x613f1aee, 0xac09b927, 0x7d663532, 0x0000dee4, 0xfdeccc4a, +0x34b86af1, 0xf0bb16dd, 0x5829085f, 0x51ffdcc2, 0xd50c9f39, 0x64dfc723, 0x2cd52b2b, 0x083e0b2b, +0x224ae1b5, 0x0fd63733, 0xf081cef7, 0x7f73e3c4, 0x36cca9d4, 0xcc2600ae, 0x0757af1e, 0x21cc2ef7, +0xf175c93f, 0x59221d2d, 0xe6c7f468, 0xe3ea1fcb, 0xcd3d539a, 0x271e345c, 0x3555341c, 0xd2532b7f, +0x7fc8c57f, 0x4025dbe5, 0xf6926f3d, 0x264d39db, 0x2ed7c504, 0xde2a554b, 0x25aeb624, 0xd031a1dc, +0x47dbd918, 0xa0004941, 0x0202e11c, 0xd407fda2, 0xd9bd0fdb, 0x30f94d53, 0xd32b1cc6, 0xa6cb1947, +0x16fa1b49, 0xc70d1f56, 0xa7daec0d, 0x813d4649, 0x0f520200, 0x0d174704, 0x337f521a, 0x52c2cf34, +0x6b280539, 0x81da4508, 0x0ef2a91d, 0x07121df9, 0x031e410e, 0xf0180925, 0x055022b0, 0x2156f018, +0xa2e812c6, 0xfbe05cbb, 0x232e4462, 0xed378bca, 0xcd31afa7, 0x063a2e09, 0xe836ff1b, 0xefdeaf26, +0xfeef274a, 0xc9a1159d, 0x66db4731, 0x05a50ace, 0x1b4c58fd, 0x6b0ba273, 0x251319b8, 0xc7523041, +0xa20364b7, 0x34812fc4, 0x055f5d29, 0x5858fefa, 0xf809192d, 0x2f063981, 0xce056e8e, 0x1f7e68cc, +0xbbc07f8b, 0xcc1c1240, 0xf45c1452, 0xeb1deac8, 0xe3f9d3fe, 0x12bc2a12, 0x515bd8d4, 0xb3b361d6, +0x3304f1dc, 0x63060dbb, 0x41c8c73d, 0xce985d1d, 0xd2b2544b, 0x6d241fd4, 0xb6b8b1c5, 0x21830000, +0x15efc4c3, 0x2c430fc1, 0x14400bef, 0x2c33083e, 0xbe04fb07, 0xad42d311, 0x4ed2b731, 0x1627d2d2, +0x07aa0f34, 0xfe38df20, 0xe35bcd1b, 0xe13bfbcf, 0xcc3df1c9, 0x84355a1c, 0x55e602d2, 0x47c52a81, +0x16d85858, 0x0d11d8d2, 0x54f4b081, 0x303523ad, 0x2dbc16e3, 0xb9d6f0d7, 0x13f76190, 0xeec0133a, +0x81eccebc, 0x46202d2b, 0x86b52303, 0xa1f928ba, 0xe421b651, 0xb13cf63f, 0x3136c8e6, 0x4a8128b9, +0x2d7f231a, 0x262cf3e3, 0x9c35d3f9, 0xe9e6753a, 0xefeff441, 0xd4bcd301, 0x42700bd5, 0x29ab001b, +0x28bb6207, 0x01f537d1, 0xbc57fd38, 0x4999290e, 0x3e7f04fe, 0x36adb7fa, 0xdb0314ce, 0xa1d6524e, +0xeee5c994, 0x1d1ce32e, 0xee142708, 0x3bf9e663, 0x06521283, 0xe8c6e6a9, 0x53356ee3, 0xe2cd5d4e, +0x8e1bdacc, 0x382c18e2, 0xc5e2fe00, 0xcee2eaf8, 0xb7532ece, 0xfbea01bf, 0x3ecf7f2d, 0x06b2ffaf, +0x5acbfe35, 0x26c8eefc, 0x8e27d35b, 0x84291501, 0x32f82ffa, 0x9f3a10cc, 0xabcff198, 0x139a3da9, +0x3bd92af7, 0x264febe8, 0xbf48f81b, 0xa1df1aa2, 0x2c0a10ca, 0x04c00073, 0x5681e760, 0x411054e5, +0x88d8c5a0, 0xe3f6b6fd, 0x2a3030ea, 0x0f0ec8b9, 0xa827cbe8, 0x2a2c7f49, 0x51ef20d2, 0xa48c4a81, +0x2b8db1d9, 0xd150813e, 0xd7aab201, 0x36061afd, 0xeed80a8d, 0xcd9932d0, 0xedd3edbe, 0x38f4f017, +0x9853f907, 0xc964f464, 0xc68d40f0, 0x3f5bcc44, 0x975004ec, 0x102d467f, 0xf3f9451d, 0xcde706bb, +0xc87fa602, 0x3f5a88b0, 0x8119c002, 0x3cc14ac8, 0x607abeb7, 0xf95924cd, 0xfb1afa2c, 0xc4967f36, +0xf425dacb, 0xa32d09aa, 0x35106590, 0xad4e5b4d, 0xa1fda650, 0x0326e143, 0xfbf054e0, 0x589fe1bc, +0xc83249b4, 0xb0b81e1b, 0x7081fec8, 0x27530027, 0x84dc89ca, 0x76551d3a, 0x0bbcc3d9, 0x8108bade, +0xb534f6cc, 0x87833920, 0x07ec1b0e, 0x5fd30b29, 0xf124dd07, 0xc981b03d, 0x71b9417e, 0x476c76b6, +0x7fcb3662, 0xaec632e9, 0x2e6705dd, 0xaec5acf9, 0xc84cd6de, 0x5312d428, 0x12fc4738, 0xd31d5b1e, +0x61edd156, 0x60fdfddb, 0xff676a0c, 0x2a3baad6, 0x0d1f6eab, 0x0001dcce, 0x180a31ca, 0x1ba681ec, +0x90871091, 0xe2ef01f5, 0x35186658, 0x95cac76a, 0x1ae32a09, 0xebedc8df, 0x0da7c007, 0x274fc73f, +0xd62a1ff5, 0xd6d7403a, 0xc4921d32, 0xb5f6cadc, 0xdd34f021, 0xe9334bfe, 0x5c9256c1, 0xbecbfd00, +0x139acdb2, 0xe3913eab, 0xceed1c3b, 0xdd2c5aa6, 0xfb475d17, 0x4633f4e7, 0x373b4b4f, 0x72ee99f3, +0xb65542d3, 0x48eb1d39, 0xa6d32fae, 0xc20aabe4, 0x105f4c0d, 0x4848bb65, 0x3bd329d2, 0x470c4709, +0x43fb2438, 0x0620c0d7, 0x4f962821, 0xfd2f992c, 0x51f8aea5, 0x2521cfe9, 0xbf6e2f9d, 0x005e3653 + +hard_output0 = +0xb5bc6ac8, 0xf5373664, 0x1310345c, 0xd5bae4e7, 0x1fc9e83e, 0xebfdfded, 0x84bd86ab, 0xb7aabe00, +0x60b44fea, 0xb9067464, 0x30325378, 0xa9195955, 0xf70c6e5c, 0x90922632, 0xc90b1cdb, 0xf2f5fb69, +0x73056b63, 0x1a33bf3f, 0x17755b5c, 0xc58bff6d, 0x2f4390b2, 0x2869d508, 0xe7c7dfe8, 0x38552963, +0x21da5367, 0x07282b9b, 0xa4767105, 0x1e294251, 0xe350a940, 0xb8a6aa27, 0xed12d778, 0xf10d9ece, +0xab93527f, 0xcf2da7e7, 0x68f6d0b1, 0x811f4bca, 0x577b06b2, 0x3234f13e, 0x30bab7df, 0x8dc47655, +0xbb843bed, 0x86da3aba, 0x30950c97, 0xdd096d7a, 0xa871fd6c, 0x8bee4e6b, 0x8fea30d0, 0x6c05b4d2, +0xf3e144d3, 0xd24ebb1f, 0x065635e5, 0x8d3f2cf9, 0x536c6c6a, 0xfbb0a5d0, 0x3d707b42, 0xc44d5982, +0xa5f4ad8f, 0xf32c0970, 0x1bccf1a6, 0x05916020, 0xa64fb176, 0x5ede6a35, 0xaf4966da, 0x9df5e0e7, +0x75042abc, 0x9ef10481, 0x11ddcbc8, 0xa0f5518c, 0xd5c23418, 0x2393d558, 0xfbe7dfeb, 0xed1c64c2, +0x86a36508, 0xde2dfb1e, 0xb8d0fef9, 0x24505232, 0xc894e71c, 0xbcc752a0, 0x40b74e83, 0x90d23c8c, +0x728e4a61, 0x108f0b08, 0x66f522ee, 0xc258d851, 0x35a31c44, 0x11311b5b, 0xfd3d5be9, 0x5ae448ff, +0x4f64994b, 0x5b8247a9, 0x4021114d, 0x2f0b6e82, 0x5eaa9828, 0x50ac71c0, 0xfb86ee52, 0x0dc1ac9b, +0xbbd47645, 0x8f357115, 0x978ceea0, 0xd557db99, 0x99b30388, 0xfc9a8a1c, 0x0f75be1a, 0x50143e22, +0x8840989b, 0x738ec50e, 0xe6b2783d, 0xf67899c8, 0x27ebed69, 0x6c415a16, 0x3a6cc2dc, 0xcd4e4e5d, +0x6cb12b2e, 0xdb88d7c0, 0x79cd1582, 0xbc422413, 0xe72ad2f4, 0x8eaac30f, 0x0bd86747, 0x6d87f69d, +0x15d62038, 0x4b375630, 0x0d51b859, 0x16db2cb2, 0xf210603a, 0x0abeb833, 0x55c694d0, 0xe57ca43b, +0x0ba94428, 0x1398a406, 0xe47d3889, 0x5a20203d, 0x250d7a1a, 0xd930ffec, 0x03992e79, 0xf2759376, +0x024ec121, 0x91fc3a2c, 0xb7e11cc5, 0x4ff7d459, 0xb8700134, 0xd6e61758, 0x4eba0a32, 0xb747e3ec, +0x7073fad7, 0xded80f99, 0x331e2f1b, 0xfa1f1bed, 0x056424a2, 0x1d1d95e0, 0x550b9ec8, 0x51ee2a38, +0x19525153, 0xd70c4cd5, 0x0d6cd7ad, 0xe44d1cf2, 0x30dfecda, 0xdacd7fe8, 0x7321d795, 0xddf48ef4, +0xe271e6a4, 0x9c1feecb, 0x951fcd7b, 0x8acc5a03, 0x3fb83527, 0xe306de74, 0x7b9cd6ee, 0x8e140885, +0xd4c91e8d, 0xe8c39733, 0x0f02f87f, 0xfb06b1b9, 0x0dc9349c, 0xf76bae8e, 0x4f642a07, 0x3d48a9aa, +0xe3ea323a, 0xa1cd5c8a, 0x40aa0e70, 0x132042d3, 0xa9732f6c, 0xd15a00c4, 0x43d3b046, 0x9a51ebd4, +0xc46ee0ed, 0xe2a2148b, 0xf5c478f0, 0x1fb01cf3, 0xf4f321ec, 0xd973811f, 0x11ad11b9, 0x5c67adda + +soft_output0 = +0x81818181, 0x7f7f7f7f, 0x7f7f7f81, 0x7f817f81, 0x7f7f8181, 0x7f7f7f7f, 0x817f817f, 0x81817f81, +0x7f817f81, 0x7f817f81, 0x7f81817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f81817f, 0x817f8181, 0x81817f81, +0x7f7f7f81, 0x817f7f81, 0x7f7f8181, 0x7f817f81, 0x7f81817f, 0x817f7f7f, 0x817f8181, 0x7f7f8181, +0x81817f7f, 0x81817f7f, 0x81817f7f, 0x817f7f7f, 0x7f7f7f7f, 0x817f7f81, 0x7f7f817f, 0x7f817f7f, +0x7f818181, 0x8181817f, 0x81818181, 0x7f817f7f, 0x7f7f817f, 0x81817f81, 0x7f7f8181, 0x817f817f, +0x7f817f81, 0x7f7f8181, 0x817f7f81, 0x81818181, 0x817f817f, 0x81817f7f, 0x7f7f8181, 0x7f7f8181, +0x8181817f, 0x817f7f81, 0x81818181, 0x817f8181, 0x7f7f817f, 0x7f7f7f81, 0x7f81817f, 0x7f81817f, +0x7f81817f, 0x8181817f, 0x7f7f7f81, 0x817f7f81, 0x7f817f7f, 0x7f7f7f7f, 0x8181817f, 0x7f81817f, +0x817f7f81, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f8181, 0x7f7f8181, 0x7f817f7f, 0x7f81817f, 0x7f7f817f, +0x81817f81, 0x81817f81, 0x81817f81, 0x817f817f, 0x7f817f7f, 0x7f81817f, 0x81818181, 0x8181817f, +0x7f817f7f, 0x817f7f7f, 0x7f7f817f, 0x817f7f81, 0x7f7f7f81, 0x81818181, 0x817f8181, 0x7f7f7f81, +0x7f817f81, 0x7f817f81, 0x81817f81, 0x7f7f7f7f, 0x7f7f8181, 0x81817f7f, 0x7f7f8181, 0x7f7f817f, +0x7f7f817f, 0x81817f7f, 0x8181817f, 0x817f8181, 0x81817f81, 0x817f7f7f, 0x817f7f81, 0x7f81817f, +0x7f7f7f7f, 0x7f817f81, 0x7f7f817f, 0x7f817f7f, 0x81818181, 0x817f7f81, 0x7f817f81, 0x7f817f81, +0x817f7f81, 0x7f818181, 0x7f81817f, 0x817f817f, 0x7f7f817f, 0x81818181, 0x7f818181, 0x8181817f, +0x817f7f81, 0x7f81817f, 0x7f7f7f7f, 0x81817f81, 0x817f8181, 0x7f818181, 0x817f8181, 0x81817f7f, +0x7f7f8181, 0x81817f81, 0x7f7f817f, 0x817f817f, 0x81818181, 0x7f81817f, 0x817f817f, 0x81818181, +0x817f8181, 0x817f8181, 0x817f8181, 0x7f817f81, 0x7f7f7f81, 0x7f7f8181, 0x7f817f7f, 0x7f7f817f, +0x7f817f81, 0x7f81817f, 0x817f7f7f, 0x7f7f8181, 0x7f817f7f, 0x7f7f8181, 0x7f7f7f81, 0x817f7f7f, +0x7f817f7f, 0x817f817f, 0x7f817f81, 0x7f817f81, 0x7f7f8181, 0x81817f81, 0x8181817f, 0x81817f81, +0x817f7f81, 0x7f818181, 0x7f7f8181, 0x817f7f81, 0x7f817f81, 0x817f7f7f, 0x7f7f8181, 0x81817f7f, +0x7f7f7f7f, 0x817f8181, 0x81817f81, 0x7f7f7f7f, 0x817f817f, 0x7f7f7f81, 0x7f7f7f81, 0x81817f7f, +0x8181817f, 0x817f8181, 0x81817f81, 0x7f7f7f81, 0x7f7f7f81, 0x81818181, 0x81817f81, 0x7f7f8181, +0x8181817f, 0x7f817f81, 0x7f817f81, 0x7f817f7f, 0x8181817f, 0x8181817f, 0x81817f81, 0x817f8181, +0x81818181, 0x7f817f7f, 0x81817f81, 0x7f817f81, 0x7f817f81, 0x7f817f7f, 0x8181817f, 0x817f8181, +0x817f7f81, 0x817f7f81, 0x7f817f81, 0x817f8181, 0x7f7f7f81, 0x7f81817f, 0x81818181, 0x8181817f, +0x7f7f8181, 0x7f7f7f81, 0x81817f7f, 0x7f818181, 0x817f7f7f, 0x7f817f7f, 0x7f7f7f81, 0x7f817f7f, +0x8181817f, 0x7f81817f, 0x817f7f7f, 0x817f817f, 0x7f7f7f7f, 0x7f817f81, 0x8181817f, 0x7f81817f, +0x7f81817f, 0x81817f7f, 0x817f7f81, 0x7f817f81, 0x7f81817f, 0x7f7f7f7f, 0x81817f81, 0x81817f7f, +0x81817f7f, 0x81817f7f, 0x7f818181, 0x8181817f, 0x7f7f8181, 0x7f818181, 0x7f7f8181, 0x7f7f8181, +0x817f7f7f, 0x81818181, 0x7f818181, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f7f817f, +0x7f7f8181, 0x7f817f7f, 0x7f817f7f, 0x817f8181, 0x7f7f817f, 0x81817f81, 0x817f8181, 0x7f7f817f, +0x81817f7f, 0x7f7f7f81, 0x7f81817f, 0x7f817f7f, 0x7f81817f, 0x7f7f817f, 0x817f817f, 0x81817f7f, +0x817f817f, 0x817f7f81, 0x81818181, 0x81817f7f, 0x817f8181, 0x817f8181, 0x817f7f7f, 0x7f7f7f7f, +0x7f81817f, 0x7f818181, 0x817f817f, 0x7f818181, 0x7f7f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f7f7f81, +0x7f7f7f7f, 0x7f7f8181, 0x7f817f81, 0x7f818181, 0x81817f81, 0x7f7f7f81, 0x817f7f81, 0x8181817f, +0x817f817f, 0x7f7f7f81, 0x7f818181, 0x81817f81, 0x817f7f81, 0x7f7f7f7f, 0x817f817f, 0x81817f7f, +0x817f7f7f, 0x81818181, 0x8181817f, 0x81817f7f, 0x7f7f7f7f, 0x817f8181, 0x7f817f81, 0x817f8181, +0x817f7f7f, 0x7f7f7f81, 0x817f8181, 0x81818181, 0x7f81817f, 0x817f7f7f, 0x7f81817f, 0x817f817f, +0x7f817f7f, 0x7f818181, 0x7f818181, 0x7f817f7f, 0x81817f7f, 0x7f817f7f, 0x7f7f817f, 0x81817f81, +0x81817f81, 0x817f7f7f, 0x81817f7f, 0x81817f81, 0x7f818181, 0x7f7f7f7f, 0x7f818181, 0x7f7f817f, +0x7f817f81, 0x81817f81, 0x7f7f7f81, 0x817f8181, 0x81818181, 0x7f81817f, 0x817f7f7f, 0x7f817f7f, +0x817f7f81, 0x7f7f817f, 0x7f7f8181, 0x7f7f8181, 0x81818181, 0x7f81817f, 0x7f81817f, 0x817f8181, +0x7f818181, 0x817f7f81, 0x817f7f7f, 0x7f817f81, 0x7f817f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f81, +0x8181817f, 0x7f81817f, 0x81818181, 0x7f7f7f81, 0x81817f81, 0x7f818181, 0x7f7f7f7f, 0x7f81817f, +0x8181817f, 0x8181817f, 0x7f7f7f7f, 0x7f818181, 0x817f7f81, 0x7f7f817f, 0x817f7f81, 0x81818181, +0x817f817f, 0x7f818181, 0x81817f81, 0x81818181, 0x817f7f81, 0x7f817f81, 0x7f7f8181, 0x817f817f, +0x817f7f7f, 0x817f8181, 0x81817f81, 0x81818181, 0x817f817f, 0x7f817f7f, 0x7f817f7f, 0x7f817f81, +0x817f7f7f, 0x81817f81, 0x817f7f81, 0x7f7f817f, 0x7f817f7f, 0x7f81817f, 0x8181817f, 0x81817f81, +0x81817f7f, 0x7f817f81, 0x7f81817f, 0x81818181, 0x7f7f817f, 0x817f7f7f, 0x7f817f81, 0x8181817f, +0x817f8181, 0x7f81817f, 0x81817f7f, 0x7f817f81, 0x81818181, 0x7f81817f, 0x7f81817f, 0x7f81817f, +0x7f7f817f, 0x7f817f7f, 0x7f81817f, 0x817f7f81, 0x7f81817f, 0x81818181, 0x7f7f8181, 0x7f818181, +0x8181817f, 0x81818181, 0x8181817f, 0x817f817f, 0x7f81817f, 0x81817f7f, 0x7f81817f, 0x7f817f7f, +0x81817f81, 0x7f81817f, 0x7f7f817f, 0x817f8181, 0x81818181, 0x7f817f81, 0x81817f81, 0x7f817f7f, +0x817f7f7f, 0x7f7f817f, 0x7f818181, 0x817f8181, 0x7f7f7f81, 0x81817f7f, 0x81817f7f, 0x7f7f7f7f, +0x7f818181, 0x7f7f8181, 0x7f81817f, 0x7f7f817f, 0x8181817f, 0x7f81817f, 0x8181817f, 0x81817f7f, +0x817f7f81, 0x817f817f, 0x7f7f7f7f, 0x817f8181, 0x7f7f8181, 0x817f817f, 0x7f817f7f, 0x81818181, +0x817f7f7f, 0x7f817f81, 0x7f7f7f7f, 0x817f8181, 0x7f7f7f81, 0x7f7f8181, 0x81817f81, 0x81817f7f, +0x7f818181, 0x817f7f81, 0x7f818181, 0x817f817f, 0x7f7f7f81, 0x817f817f, 0x7f817f81, 0x7f7f7f81, +0x7f818181, 0x7f7f817f, 0x81818181, 0x817f7f81, 0x81817f7f, 0x8181817f, 0x817f8181, 0x8181817f, +0x7f818181, 0x7f817f81, 0x8181817f, 0x7f817f7f, 0x8181817f, 0x7f817f7f, 0x7f7f7f81, 0x817f7f7f, +0x817f8181, 0x7f7f7f7f, 0x817f8181, 0x81817f81, 0x81817f7f, 0x7f7f8181, 0x81817f81, 0x7f7f7f7f, +0x7f818181, 0x7f818181, 0x7f7f7f7f, 0x8181817f, 0x7f817f7f, 0x81817f81, 0x81817f81, 0x7f7f817f, +0x7f7f7f7f, 0x817f817f, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f817f, 0x7f817f81, 0x81818181, 0x817f8181, +0x7f7f817f, 0x81817f7f, 0x7f7f7f7f, 0x7f81817f, 0x81817f7f, 0x7f817f7f, 0x81817f81, 0x7f7f7f81, +0x817f8181, 0x81818181, 0x7f81817f, 0x7f817f7f, 0x7f7f7f81, 0x7f818181, 0x817f7f7f, 0x7f7f7f7f, +0x817f7f81, 0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x7f7f8181, 0x81817f81, 0x81817f81, 0x7f7f7f81, +0x8181817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x817f7f81, 0x7f7f8181, 0x7f7f7f81, 0x817f7f7f, +0x7f817f81, 0x81818181, 0x81817f81, 0x7f81817f, 0x7f7f7f81, 0x817f817f, 0x8181817f, 0x7f818181, +0x817f817f, 0x81817f7f, 0x7f7f7f7f, 0x7f817f7f, 0x8181817f, 0x7f7f817f, 0x7f7f7f7f, 0x7f81817f, +0x7f817f7f, 0x7f7f8181, 0x7f7f8181, 0x7f7f8181, 0x81817f7f, 0x7f7f8181, 0x7f7f8181, 0x8181817f, +0x817f7f81, 0x7f81817f, 0x7f817f7f, 0x8181817f, 0x7f7f817f, 0x7f7f8181, 0x7f7f8181, 0x7f817f7f, +0x817f817f, 0x7f81817f, 0x817f7f81, 0x8181817f, 0x81817f7f, 0x81818181, 0x817f817f, 0x7f7f817f, +0x817f8181, 0x7f817f7f, 0x7f817f7f, 0x817f817f, 0x81817f81, 0x7f7f7f81, 0x7f81817f, 0x7f7f7f81, +0x8181817f, 0x817f7f7f, 0x7f7f817f, 0x7f817f7f, 0x7f7f7f81, 0x7f817f7f, 0x7f7f8181, 0x817f817f, +0x7f7f8181, 0x7f7f7f81, 0x817f8181, 0x7f7f8181, 0x81817f81, 0x8181817f, 0x7f7f817f, 0x7f817f81, +0x7f817f81, 0x7f817f81, 0x7f817f81, 0x817f8181, 0x7f7f7f7f, 0x7f817f81, 0x817f7f81, 0x7f7f7f81, +0x7f7f817f, 0x7f817f7f, 0x7f7f7f7f, 0x817f7f81, 0x81817f7f, 0x81817f7f, 0x81818181, 0x7f81817f, +0x7f7f7f7f, 0x7f7f7f81, 0x7f817f81, 0x8181817f, 0x817f817f, 0x817f817f, 0x817f817f, 0x7f817f7f, +0x81818181, 0x7f817f7f, 0x7f7f817f, 0x81818181, 0x8181817f, 0x81817f7f, 0x81818181, 0x817f817f, +0x8181817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f818181, 0x7f7f7f81, 0x817f817f, 0x7f7f817f, 0x81817f7f, +0x8181817f, 0x817f817f, 0x7f7f817f, 0x7f81817f, 0x7f7f7f81, 0x8181817f, 0x817f7f7f, 0x817f817f, +0x817f7f7f, 0x7f7f7f7f, 0x81817f81, 0x817f7f81, 0x7f817f7f, 0x81817f7f, 0x81817f81, 0x7f817f81, +0x817f7f7f, 0x817f817f, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x81817f81, 0x81818181, 0x7f817f7f, +0x8181817f, 0x817f7f7f, 0x81817f7f, 0x7f817f7f, 0x817f7f7f, 0x817f7f81, 0x7f7f7f81, 0x81817f7f, +0x817f7f81, 0x7f817f81, 0x7f7f8181, 0x7f81817f, 0x81817f7f, 0x817f7f7f, 0x81818181, 0x7f7f817f, +0x817f817f, 0x81817f81, 0x817f7f81, 0x81817f7f, 0x7f7f7f81, 0x7f817f7f, 0x81817f7f, 0x817f817f, +0x7f817f7f, 0x7f817f7f, 0x81817f7f, 0x817f7f7f, 0x81818181, 0x8181817f, 0x7f7f8181, 0x7f817f7f, +0x7f817f81, 0x7f7f8181, 0x81817f81, 0x817f7f81, 0x7f818181, 0x7f817f81, 0x7f7f7f81, 0x8181817f, +0x81817f7f, 0x817f7f7f, 0x7f818181, 0x7f818181, 0x7f81817f, 0x7f7f8181, 0x817f7f81, 0x7f817f81, +0x81817f7f, 0x81818181, 0x7f81817f, 0x7f7f8181, 0x817f817f, 0x817f7f81, 0x81817f81, 0x817f7f81, +0x81818181, 0x817f7f7f, 0x7f818181, 0x81818181, 0x7f7f817f, 0x817f817f, 0x7f7f817f, 0x7f817f7f, +0x7f817f81, 0x817f7f7f, 0x81817f81, 0x7f817f81, 0x7f817f7f, 0x817f8181, 0x817f817f, 0x7f818181, +0x817f7f81, 0x817f7f7f, 0x81817f81, 0x81818181, 0x7f81817f, 0x7f817f81, 0x7f817f81, 0x817f8181, +0x7f818181, 0x81817f81, 0x7f817f7f, 0x7f7f7f81, 0x7f7f817f, 0x817f7f81, 0x7f7f7f81, 0x81818181, +0x7f7f8181, 0x7f7f817f, 0x817f7f81, 0x81817f7f, 0x7f81817f, 0x7f81817f, 0x817f8181, 0x7f81817f, +0x8181817f, 0x81818181, 0x7f7f817f, 0x817f8181, 0x7f817f7f, 0x7f7f817f, 0x817f7f7f, 0x7f7f7f7f, +0x8181817f, 0x7f7f7f7f, 0x81817f81, 0x817f7f7f, 0x7f7f8181, 0x817f7f81, 0x7f7f817f, 0x7f81817f, +0x81817f81, 0x7f7f8181, 0x7f7f7f81, 0x7f817f81, 0x81817f7f, 0x7f817f81, 0x7f817f81, 0x7f7f817f, +0x81818181, 0x7f818181, 0x817f7f81, 0x817f7f7f, 0x7f81817f, 0x7f7f8181, 0x7f7f8181, 0x81817f7f, +0x7f817f81, 0x817f817f, 0x7f817f7f, 0x81818181, 0x7f817f7f, 0x81817f7f, 0x7f817f81, 0x81817f81, +0x7f7f7f81, 0x81818181, 0x81817f81, 0x817f7f81, 0x7f7f7f7f, 0x817f7f81, 0x7f818181, 0x7f7f8181, +0x7f7f7f81, 0x7f817f7f, 0x7f818181, 0x7f7f817f, 0x817f7f7f, 0x817f8181, 0x817f8181, 0x817f7f81, +0x7f7f8181, 0x7f817f7f, 0x817f8181, 0x81817f81, 0x817f8181, 0x7f81817f, 0x7f7f817f, 0x81817f7f, +0x81817f7f, 0x7f817f7f, 0x81817f7f, 0x81817f7f, 0x7f817f81, 0x7f7f8181, 0x7f7f817f, 0x7f7f7f7f, +0x81818181, 0x7f7f817f, 0x7f7f7f81, 0x7f817f81, 0x81818181, 0x81817f81, 0x817f7f7f, 0x7f7f817f, +0x817f7f7f, 0x7f7f7f81, 0x81817f81, 0x817f8181, 0x7f7f817f, 0x7f7f817f, 0x7f7f8181, 0x817f7f81, +0x8181817f, 0x8181817f, 0x7f7f7f81, 0x7f7f817f, 0x81817f81, 0x7f7f7f7f, 0x817f8181, 0x817f817f, +0x7f7f7f81, 0x7f817f7f, 0x7f81817f, 0x7f7f817f, 0x7f81817f, 0x817f7f81, 0x7f7f7f7f, 0x7f7f7f81, +0x81817f7f, 0x8181817f, 0x7f81817f, 0x7f7f7f81, 0x8181817f, 0x81817f7f, 0x7f818181, 0x7f817f81, +0x8181817f, 0x7f81817f, 0x817f7f7f, 0x817f7f7f, 0x8181817f, 0x817f817f, 0x81817f81, 0x7f7f8181, +0x7f7f7f7f, 0x7f81817f, 0x817f817f, 0x81817f81, 0x81817f81, 0x81817f7f, 0x81817f81, 0x81817f81, +0x7f7f817f, 0x7f7f7f7f, 0x817f7f7f, 0x7f818181, 0x7f817f7f, 0x8181817f, 0x7f7f8181, 0x817f7f7f, +0x7f7f7f81, 0x7f817f81, 0x7f7f7f7f, 0x7f817f81, 0x7f81817f, 0x7f817f7f, 0x81817f7f, 0x7f817f7f, +0x81817f81, 0x7f7f7f81, 0x7f81817f, 0x81817f81, 0x7f817f81, 0x817f8181, 0x7f817f81, 0x7f817f7f, +0x81818181, 0x817f817f, 0x81817f81, 0x7f7f817f, 0x7f7f8181, 0x81818181, 0x8181817f, 0x7f817f81, +0x81817f7f, 0x7f7f7f7f, 0x7f7f817f, 0x7f81817f, 0x81818181, 0x7f81817f, 0x817f7f7f, 0x7f817f81, +0x817f817f, 0x817f7f81, 0x81818181, 0x81817f7f, 0x817f7f81, 0x817f8181, 0x81817f7f, 0x81817f7f, +0x7f7f817f, 0x817f817f, 0x7f817f81, 0x7f817f81, 0x81818181, 0x817f817f, 0x7f7f7f81, 0x81817f7f, +0x817f817f, 0x7f7f7f7f, 0x7f7f8181, 0x817f7f7f, 0x817f7f81, 0x817f7f81, 0x817f8181, 0x8181817f, +0x81818181, 0x81817f7f, 0x81817f7f, 0x817f8181, 0x817f8181, 0x817f817f, 0x7f7f7f7f, 0x7f817f81, +0x8181817f, 0x7f818181, 0x7f818181, 0x8181817f, 0x7f7f8181, 0x817f8181, 0x7f817f81, 0x81817f7f, +0x7f7f7f81, 0x81817f81, 0x7f818181, 0x81817f7f, 0x7f7f817f, 0x7f818181, 0x81818181, 0x7f818181, +0x7f7f7f81, 0x81817f7f, 0x7f81817f, 0x7f7f8181, 0x7f81817f, 0x7f817f7f, 0x7f817f7f, 0x7f817f7f, +0x7f7f817f, 0x7f817f81, 0x7f81817f, 0x7f7f7f81, 0x81818181, 0x7f7f8181, 0x817f8181, 0x7f817f81, +0x7f817f81, 0x81817f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f81, 0x817f7f7f, 0x7f81817f, 0x7f817f7f, +0x7f7f7f81, 0x817f8181, 0x7f81817f, 0x817f817f, 0x81817f81, 0x7f7f817f, 0x7f817f7f, 0x817f817f, +0x7f7f7f7f, 0x817f7f81, 0x817f8181, 0x7f7f7f7f, 0x7f817f81, 0x7f7f7f7f, 0x7f7f817f, 0x8181817f, +0x817f7f81, 0x7f818181, 0x7f817f7f, 0x8181817f, 0x7f7f817f, 0x7f817f7f, 0x817f817f, 0x817f7f7f, +0x817f817f, 0x81817f7f, 0x81818181, 0x7f7f7f81, 0x81817f7f, 0x817f7f81, 0x817f817f, 0x81817f81, +0x7f817f7f, 0x817f7f81, 0x7f7f7f81, 0x8181817f, 0x7f7f817f, 0x817f7f81, 0x817f7f81, 0x7f818181, +0x817f7f7f, 0x8181817f, 0x817f8181, 0x7f7f817f, 0x7f818181, 0x817f8181, 0x7f7f7f81, 0x817f8181, +0x8181817f, 0x7f817f81, 0x81817f7f, 0x817f8181, 0x817f817f, 0x817f7f81, 0x81818181, 0x81818181, +0x8181817f, 0x817f8181, 0x7f7f7f81, 0x7f7f8181, 0x817f817f, 0x7f818181, 0x817f8181, 0x7f7f817f, +0x7f7f7f81, 0x7f818181, 0x7f818181, 0x7f81817f, 0x81817f81, 0x7f818181, 0x7f7f8181, 0x7f817f7f, +0x8181817f, 0x7f817f7f, 0x817f7f81, 0x7f817f81, 0x8181817f, 0x81818181, 0x817f7f7f, 0x817f817f, +0x7f7f7f7f, 0x817f8181, 0x81818181, 0x817f7f7f, 0x817f7f81, 0x7f817f7f, 0x817f7f81, 0x81817f81, +0x7f818181, 0x7f817f81, 0x7f818181, 0x7f7f7f81, 0x81817f81, 0x7f817f81, 0x7f817f7f, 0x7f81817f, +0x81817f81, 0x817f7f7f, 0x817f817f, 0x7f7f817f, 0x7f7f8181, 0x817f7f7f, 0x7f7f817f, 0x7f817f7f, +0x7f81817f, 0x7f81817f, 0x817f7f7f, 0x7f817f7f, 0x817f7f81, 0x8181817f, 0x817f7f81, 0x7f818181, +0x7f818181, 0x817f7f81, 0x7f817f81, 0x7f7f7f7f, 0x81817f81, 0x81818181, 0x7f818181, 0x7f81817f, +0x81817f81, 0x817f7f7f, 0x7f7f817f, 0x7f7f817f, 0x81817f81, 0x7f817f81, 0x7f7f8181, 0x81817f7f, +0x817f817f, 0x81817f7f, 0x7f7f7f81, 0x7f817f7f, 0x7f7f7f7f, 0x817f7f81, 0x7f817f81, 0x817f7f7f, +0x7f7f8181, 0x7f7f817f, 0x8181817f, 0x7f818181, 0x7f817f81, 0x81818181, 0x817f817f, 0x7f7f7f7f, +0x81818181, 0x7f7f8181, 0x7f7f8181, 0x7f81817f, 0x8181817f, 0x8181817f, 0x81817f7f, 0x817f817f, +0x817f7f7f, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x817f8181, 0x81818181, 0x7f7f7f7f, 0x7f7f817f, +0x817f7f7f, 0x7f7f817f, 0x817f8181, 0x7f817f81, 0x7f818181, 0x7f81817f, 0x7f81817f, 0x81818181, +0x7f81817f, 0x817f7f7f, 0x7f7f8181, 0x81818181, 0x7f817f81, 0x81818181, 0x7f817f81, 0x81818181, +0x817f8181, 0x7f7f7f81, 0x7f818181, 0x7f7f817f, 0x817f7f7f, 0x7f7f7f7f, 0x817f7f81, 0x817f7f81, +0x7f81817f, 0x817f7f7f, 0x81817f81, 0x817f7f81, 0x81818181, 0x7f7f7f7f, 0x81817f7f, 0x81817f81, +0x81817f81, 0x81817f81, 0x7f817f81, 0x817f817f, 0x817f817f, 0x817f817f, 0x7f7f817f, 0x81817f81, +0x7f7f817f, 0x8181817f, 0x817f7f81, 0x81818181, 0x817f7f7f, 0x817f8181, 0x7f7f817f, 0x817f7f7f, +0x8181817f, 0x81817f7f, 0x7f817f7f, 0x817f7f81, 0x81817f81, 0x817f7f7f, 0x81818181, 0x817f7f7f, +0x81817f81, 0x7f81817f, 0x7f81817f, 0x7f818181, 0x817f7f81, 0x817f817f, 0x7f81817f, 0x81818181, +0x81817f81, 0x81818181, 0x817f7f81, 0x81817f7f, 0x7f81817f, 0x8181817f, 0x8181817f, 0x81818181, +0x81818181, 0x81818181, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f7f, 0x81817f81, 0x817f7f81, 0x7f818181, +0x817f7f7f, 0x7f817f81, 0x7f7f7f7f, 0x7f7f8181, 0x7f81817f, 0x7f817f81, 0x817f7f7f, 0x81817f81, +0x7f7f7f7f, 0x817f7f81, 0x817f7f7f, 0x7f817f81, 0x81817f7f, 0x81817f81, 0x817f7f81, 0x81817f81, +0x817f817f, 0x7f817f81, 0x81817f7f, 0x7f7f7f81, 0x7f7f8181, 0x7f7f817f, 0x8181817f, 0x7f81817f, +0x817f8181, 0x7f7f7f81, 0x817f817f, 0x7f7f817f, 0x7f7f817f, 0x7f817f7f, 0x7f818181, 0x81817f7f, +0x81817f7f, 0x8181817f, 0x817f817f, 0x7f7f817f, 0x7f817f81, 0x7f817f7f, 0x7f818181, 0x7f81817f, +0x817f7f7f, 0x7f817f7f, 0x81817f81, 0x8181817f, 0x8181817f, 0x7f817f81, 0x8181817f, 0x8181817f, +0x81817f7f, 0x817f7f7f, 0x7f7f7f7f, 0x7f817f7f, 0x81817f81, 0x7f7f7f81, 0x817f7f7f, 0x7f817f81, +0x7f818181, 0x7f817f81, 0x81818181, 0x817f817f, 0x817f817f, 0x7f818181, 0x817f8181, 0x7f7f817f, +0x7f7f7f81, 0x817f7f7f, 0x7f81817f, 0x7f81817f, 0x7f81817f, 0x81817f81, 0x817f817f, 0x7f7f817f, +0x7f817f81, 0x817f817f, 0x7f817f7f, 0x7f81817f, 0x8181817f, 0x817f7f7f, 0x8181817f, 0x7f7f8181, +0x7f818181, 0x7f817f81, 0x7f7f817f, 0x817f8181, 0x817f817f, 0x817f7f81, 0x7f7f7f81, 0x81817f81, +0x817f8181, 0x81818181, 0x81818181, 0x7f7f817f, 0x7f7f7f81, 0x8181817f, 0x7f7f7f81, 0x7f7f7f81, +0x7f818181, 0x817f7f81, 0x7f817f7f, 0x81818181, 0x7f817f7f, 0x7f7f817f, 0x7f7f8181, 0x8181817f, +0x7f817f7f, 0x817f7f81, 0x7f7f7f81, 0x7f81817f, 0x81817f81, 0x7f817f81, 0x817f8181, 0x817f817f, +0x817f7f7f, 0x81817f7f, 0x7f818181, 0x81818181, 0x817f7f81, 0x81817f7f, 0x817f8181, 0x817f8181, +0x81817f7f, 0x8181817f, 0x7f7f817f, 0x7f7f817f, 0x7f7f817f, 0x7f817f81, 0x7f7f7f7f, 0x817f8181, +0x81817f7f, 0x7f818181, 0x7f7f7f81, 0x817f8181, 0x7f817f7f, 0x7f817f7f, 0x7f817f81, 0x817f8181, +0x817f817f, 0x7f7f7f7f, 0x81818181, 0x81818181, 0x81818181, 0x7f817f7f, 0x81818181, 0x7f81817f, +0x7f817f81, 0x8181817f, 0x817f7f7f, 0x817f817f, 0x81817f81, 0x7f817f81, 0x817f7f81, 0x81817f81, +0x7f7f8181, 0x7f817f7f, 0x817f7f7f, 0x7f81817f, 0x81818181, 0x7f7f817f, 0x7f7f7f81, 0x817f7f81, +0x817f7f7f, 0x7f817f81, 0x7f817f7f, 0x817f817f, 0x8181817f, 0x7f7f8181, 0x7f818181, 0x8181817f, +0x7f817f7f, 0x8181817f, 0x7f817f81, 0x7f81817f, 0x7f7f7f81, 0x7f7f7f81, 0x817f817f, 0x81818181, +0x7f817f81, 0x7f81817f, 0x7f7f8181, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f81, 0x7f818181, 0x7f7f7f7f, +0x7f7f8181, 0x817f8181, 0x7f7f7f7f, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f7f7f7f, 0x7f7f8181, +0x817f7f7f, 0x81817f81, 0x7f818181, 0x81817f7f, 0x7f81817f, 0x7f817f81, 0x7f817f81, 0x7f817f81, +0x7f7f817f, 0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x8181817f, 0x817f7f7f, 0x7f817f7f, 0x817f8181, +0x7f817f7f, 0x817f8181, 0x7f818181, 0x81817f7f, 0x7f817f7f, 0x8181817f, 0x7f81817f, 0x7f7f7f7f, +0x7f81817f, 0x7f817f81, 0x817f817f, 0x817f7f7f, 0x7f7f7f7f, 0x7f818181, 0x817f7f7f, 0x7f81817f, +0x81817f81, 0x7f7f7f81, 0x7f818181, 0x7f817f81, 0x81817f81, 0x7f7f7f7f, 0x817f8181, 0x817f8181, +0x7f817f7f, 0x7f817f7f, 0x817f817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x8181817f, 0x81817f81, +0x817f8181, 0x817f8181, 0x7f7f8181, 0x7f7f8181, 0x8181817f, 0x7f817f7f, 0x8181817f, 0x7f81817f, +0x7f7f7f81, 0x817f817f, 0x81817f7f, 0x817f7f81, 0x81817f7f, 0x817f7f81, 0x81817f81, 0x7f817f81, +0x817f8181, 0x7f818181, 0x7f817f81, 0x81817f81, 0x81817f81, 0x817f8181, 0x7f818181, 0x81818181, +0x817f7f81, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x8181817f, 0x81818181, 0x817f7f81, 0x7f818181, +0x81817f81, 0x7f817f7f, 0x817f817f, 0x817f7f81, 0x81817f81, 0x7f817f81, 0x7f7f817f, 0x8181817f, +0x817f7f7f, 0x7f7f817f, 0x7f817f81, 0x817f7f7f, 0x7f7f817f, 0x81818181, 0x817f7f81, 0x7f7f8181, +0x7f7f8181, 0x8181817f, 0x7f7f8181, 0x817f8181, 0x817f7f7f, 0x817f7f7f, 0x7f7f7f7f, 0x81817f81, +0x817f7f81, 0x7f7f8181, 0x7f817f81, 0x817f7f81, 0x7f7f817f, 0x817f8181, 0x7f7f7f81, 0x81817f7f, +0x817f817f, 0x7f7f7f7f, 0x817f7f7f, 0x7f81817f, 0x817f817f, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, +0x7f7f8181, 0x817f817f, 0x7f7f7f81, 0x7f7f817f, 0x817f8181, 0x7f81817f, 0x7f81817f, 0x817f7f7f, +0x7f817f7f, 0x817f817f, 0x817f7f81, 0x817f7f81, 0x81817f81, 0x7f817f81, 0x817f817f, 0x7f81817f, +0x7f7f8181, 0x7f818181, 0x7f818181, 0x817f817f, 0x7f81817f, 0x817f8181, 0x8181817f, 0x7f817f7f, +0x817f8181, 0x817f7f81, 0x81817f81, 0x7f7f7f81, 0x817f7f81, 0x7f7f7f7f, 0x81818181, 0x7f7f8181, +0x7f817f7f, 0x7f7f8181, 0x81817f7f, 0x817f817f, 0x81818181, 0x817f7f7f, 0x8181817f, 0x81817f81, +0x817f817f, 0x817f817f, 0x7f7f817f, 0x7f7f7f81, 0x7f818181, 0x7f817f7f, 0x7f7f7f81, 0x817f817f, +0x7f818181, 0x7f7f8181, 0x7f817f7f, 0x81817f81, 0x7f7f8181, 0x817f7f81, 0x817f7f7f, 0x817f8181, +0x7f7f8181, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x817f7f81, 0x81817f81, 0x817f8181, 0x817f7f7f, +0x7f7f7f81, 0x817f8181, 0x7f817f81, 0x7f817f81, 0x7f817f7f, 0x7f81817f, 0x7f7f7f7f, 0x81817f7f, +0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x7f7f7f81, 0x817f7f7f, 0x7f817f7f, 0x7f7f817f, 0x81817f7f, +0x8181817f, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x817f8181, 0x7f7f7f81, 0x7f817f81, 0x7f817f7f, +0x7f818181, 0x817f7f7f, 0x81817f81, 0x817f8181, 0x7f818181, 0x81817f7f, 0x817f8181, 0x7f7f8181, +0x81817f7f, 0x7f7f7f7f, 0x81818181, 0x7f818181, 0x817f817f, 0x7f7f7f81, 0x817f7f81, 0x817f8181, +0x7f81817f, 0x81817f7f, 0x817f8181, 0x817f817f, 0x7f818181, 0x7f7f7f7f, 0x7f818181, 0x7f7f817f, +0x7f817f81, 0x817f8181, 0x7f7f7f7f, 0x7f818181, 0x7f7f7f81, 0x7f7f817f, 0x7f817f81, 0x817f8181, +0x81818181, 0x7f818181, 0x7f7f7f7f, 0x7f818181, 0x7f81817f, 0x7f7f817f, 0x81817f81, 0x7f7f8181, +0x81818181, 0x817f7f81, 0x7f817f81, 0x7f7f8181, 0x817f7f81, 0x81817f81, 0x7f817f7f, 0x817f817f, +0x7f81817f, 0x7f818181, 0x817f8181, 0x81817f7f, 0x817f8181, 0x7f7f7f81, 0x7f818181, 0x7f7f817f, +0x817f8181, 0x817f8181, 0x7f817f81, 0x7f81817f, 0x8181817f, 0x817f7f81, 0x817f817f, 0x81818181, +0x7f817f7f, 0x817f817f, 0x817f817f, 0x7f817f7f, 0x7f817f7f, 0x817f817f, 0x7f81817f, 0x81818181, +0x7f7f7f7f, 0x7f7f7f7f, 0x817f7f81, 0x7f81817f, 0x7f7f7f81, 0x817f7f81, 0x817f8181, 0x7f818181, +0x7f7f7f7f, 0x7f7f817f, 0x7f7f7f81, 0x7f818181, 0x817f7f81, 0x81817f7f, 0x81817f7f, 0x81817f81, +0x7f7f7f81, 0x8181817f, 0x7f7f8181, 0x7f81817f, 0x7f7f7f81, 0x81817f7f, 0x7f7f7f7f, 0x7f7f817f, +0x7f817f7f, 0x81818181, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f7f, 0x817f817f, 0x81818181, 0x81817f81, +0x7f81817f, 0x7f7f8181, 0x7f81817f, 0x81818181, 0x81818181, 0x7f7f7f7f, 0x817f7f81, 0x7f81817f, +0x7f81817f, 0x7f7f8181, 0x7f818181, 0x7f818181, 0x7f817f7f, 0x8181817f, 0x81817f81, 0x817f7f81, +0x7f7f7f81, 0x7f81817f, 0x817f7f81, 0x817f8181, 0x7f81817f, 0x817f8181, 0x81817f7f, 0x7f817f81, +0x81818181, 0x7f817f81, 0x7f818181, 0x7f7f7f81, 0x7f7f8181, 0x8181817f, 0x81818181, 0x81818181, +0x81818181, 0x817f8181, 0x81817f81, 0x817f7f7f, 0x81817f81, 0x817f7f7f, 0x7f7f8181, 0x7f81817f, +0x7f7f7f81, 0x7f7f7f7f, 0x7f817f81, 0x81817f81, 0x817f817f, 0x7f7f7f81, 0x817f7f81, 0x817f7f7f, +0x7f817f7f, 0x81818181, 0x81818181, 0x7f7f817f, 0x7f817f81, 0x81817f81, 0x817f817f, 0x817f7f81, +0x81818181, 0x7f817f7f, 0x81817f7f, 0x81817f81, 0x817f817f, 0x817f7f7f, 0x7f7f817f, 0x7f7f817f, +0x7f7f8181, 0x817f7f81, 0x7f81817f, 0x7f817f7f, 0x817f7f7f, 0x817f7f7f, 0x817f7f7f, 0x7f7f7f7f, +0x7f7f7f7f, 0x7f7f7f81, 0x7f7f8181, 0x817f8181, 0x817f817f, 0x7f7f7f81, 0x81818181, 0x81817f81, +0x7f7f7f7f, 0x817f817f, 0x81818181, 0x7f7f817f, 0x7f818181, 0x7f7f8181, 0x817f8181, 0x817f7f7f, +0x7f7f8181, 0x7f817f7f, 0x817f8181, 0x817f8181, 0x81817f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f81, +0x7f818181, 0x7f817f7f, 0x81817f81, 0x7f817f7f, 0x817f7f7f, 0x817f7f7f, 0x817f817f, 0x7f7f7f7f, +0x817f8181, 0x7f7f7f7f, 0x7f81817f, 0x7f817f81, 0x81818181, 0x817f817f, 0x817f7f7f, 0x81817f7f, +0x7f81817f, 0x817f817f, 0x7f818181, 0x8181817f, 0x817f817f, 0x81818181, 0x817f7f7f, 0x817f7f81, +0x7f7f8181, 0x817f817f, 0x817f7f81, 0x7f81817f, 0x7f818181, 0x7f7f817f, 0x817f7f7f, 0x7f7f7f7f, +0x7f7f7f7f, 0x81817f81, 0x7f81817f, 0x7f7f7f7f, 0x7f7f7f81, 0x8181817f, 0x81818181, 0x81817f7f, +0x7f81817f, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f7f, 0x817f7f7f, 0x7f81817f, 0x817f817f, 0x81817f81, +0x7f7f7f81, 0x817f7f81, 0x817f7f81, 0x81818181, 0x7f7f8181, 0x81817f81, 0x7f817f7f, 0x7f818181, +0x7f818181, 0x81818181, 0x7f7f817f, 0x7f7f7f81, 0x81817f81, 0x817f7f7f, 0x817f8181, 0x7f7f817f, +0x81818181, 0x81817f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f818181, 0x7f7f817f, 0x817f7f81, 0x817f7f7f, +0x81817f7f, 0x7f81817f, 0x7f817f7f, 0x7f81817f, 0x7f7f817f, 0x7f7f8181, 0x8181817f, 0x817f7f81, +0x7f7f7f81, 0x7f817f81, 0x8181817f, 0x7f7f7f7f, 0x7f817f81, 0x7f7f7f81, 0x817f7f7f, 0x81817f81, +0x7f817f7f, 0x817f7f81, 0x817f817f, 0x81817f7f, 0x7f818181, 0x7f81817f, 0x7f817f81, 0x7f817f81, +0x7f81817f, 0x7f7f7f7f, 0x7f817f81, 0x7f818181, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f817f, +0x7f7f7f81, 0x7f818181, 0x7f818181, 0x7f818181, 0x7f7f8181, 0x817f7f81, 0x7f817f7f, 0x81818181, +0x7f81817f, 0x817f817f, 0x7f7f7f7f, 0x817f7f81, 0x7f7f817f, 0x7f7f817f, 0x7f7f8181, 0x7f7f7f81, +0x81817f81, 0x7f81817f, 0x7f7f7f81, 0x81817f81, 0x7f818181, 0x81817f81, 0x7f7f7f7f, 0x7f81817f, +0x81817f81, 0x7f817f7f, 0x7f81817f, 0x81817f81, 0x81818181, 0x7f81817f, 0x7f7f817f, 0x81817f7f, +0x7f7f817f, 0x8181817f, 0x81817f7f, 0x7f81817f, 0x81817f81, 0x8181817f, 0x7f817f81, 0x81817f7f, +0x817f7f7f, 0x7f7f8181, 0x817f8181, 0x7f81817f, 0x7f7f817f, 0x817f817f, 0x817f7f81, 0x7f7f817f, +0x7f7f8181, 0x7f818181, 0x7f7f8181, 0x81817f7f, 0x8181817f, 0x817f7f7f, 0x7f7f8181, 0x7f817f81, +0x817f817f, 0x7f817f7f, 0x81817f81, 0x7f7f817f, 0x81817f81, 0x7f7f817f, 0x81818181, 0x81817f81, +0x81817f7f, 0x7f7f7f7f, 0x7f7f7f7f, 0x817f817f, 0x7f7f8181, 0x7f7f7f7f, 0x7f817f7f, 0x7f7f817f, +0x7f81817f, 0x817f7f81, 0x817f7f81, 0x7f7f7f7f, 0x7f7f7f81, 0x81817f7f, 0x817f7f81, 0x7f7f817f, +0x7f817f7f, 0x81818181, 0x7f81817f, 0x7f817f7f, 0x81818181, 0x8181817f, 0x7f7f7f81, 0x7f818181, +0x8181817f, 0x7f7f7f7f, 0x817f817f, 0x7f818181, 0x81817f81, 0x7f818181, 0x7f81817f, 0x817f7f81, +0x81818181, 0x7f7f7f81, 0x81817f81, 0x7f7f8181, 0x7f81817f, 0x81817f7f, 0x8181817f, 0x7f817f7f, +0x7f7f8181, 0x7f7f817f, 0x7f817f7f, 0x817f817f, 0x7f818181, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f818181, +0x81817f81, 0x7f818181, 0x81817f81, 0x7f818181, 0x7f7f7f81, 0x817f817f, 0x81817f7f, 0x7f7f7f81, +0x817f817f, 0x7f817f81, 0x7f817f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f817f81, 0x8181817f, 0x817f8181, +0x7f7f7f7f, 0x8181817f, 0x817f8181, 0x81818181, 0x7f818181, 0x81818181, 0x7f7f7f81, 0x817f817f, +0x81817f81, 0x8181817f, 0x8181817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x8181817f, 0x817f7f7f, +0x7f817f7f, 0x7f7f8181, 0x7f7f7f81, 0x7f817f81, 0x81817f81, 0x81817f81, 0x81817f7f, 0x8181817f, +0x817f817f, 0x7f817f7f, 0x817f7f7f, 0x7f7f7f81, 0x817f8181, 0x7f7f7f81, 0x7f817f81, 0x81817f7f, +0x817f817f, 0x817f8181, 0x7f7f7f81, 0x7f7f7f81, 0x81817f7f, 0x7f818181, 0x7f7f8181, 0x817f817f, +0x81818181, 0x7f818181, 0x817f7f81, 0x817f8181, 0x817f817f, 0x7f817f7f, 0x7f7f7f7f, 0x8181817f, +0x7f7f7f7f, 0x7f817f7f, 0x817f817f, 0x817f817f, 0x7f817f81, 0x81818181, 0x7f817f7f, 0x7f7f8181, +0x7f7f8181, 0x81817f7f, 0x81817f7f, 0x7f7f7f81, 0x817f7f7f, 0x7f7f817f, 0x81817f7f, 0x7f7f817f, +0x817f7f81, 0x8181817f, 0x81817f81, 0x7f7f8181, 0x7f7f8181, 0x7f7f7f81, 0x81817f7f, 0x7f7f817f, +0x81817f81, 0x7f817f81, 0x81817f7f, 0x7f7f7f7f, 0x7f81817f, 0x7f7f8181, 0x7f817f7f, 0x7f7f8181, +0x817f817f, 0x817f7f7f, 0x7f817f7f, 0x817f8181, 0x817f7f7f, 0x7f81817f, 0x7f7f817f, 0x817f817f, +0x7f7f8181, 0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x7f7f817f, 0x81817f81, 0x7f817f81, 0x7f817f7f, +0x817f7f81, 0x7f7f817f, 0x8181817f, 0x817f7f7f, 0x81817f81, 0x7f817f7f, 0x817f7f7f, 0x7f7f7f7f, +0x7f817f81, 0x7f7f7f7f, 0x81817f7f, 0x7f7f7f7f, 0x7f818181, 0x817f7f81, 0x817f817f, 0x8181817f, +0x817f7f7f, 0x817f8181, 0x8181817f, 0x7f817f7f, 0x7f817f7f, 0x817f7f81, 0x81817f7f, 0x7f7f8181, +0x81818181, 0x81818181, 0x7f81817f, 0x7f817f81, 0x7f817f81, 0x817f7f81, 0x8181817f, 0x7f81817f, +0x7f7f817f, 0x7f818181, 0x81817f7f, 0x7f817f81, 0x817f8181, 0x7f7f7f81, 0x7f7f8181, 0x817f7f7f, +0x7f818181, 0x7f7f8181, 0x817f8181, 0x7f817f81, 0x7f818181, 0x81817f81, 0x817f7f7f, 0x7f818181, +0x7f817f81, 0x817f7f81, 0x817f817f, 0x7f7f7f7f, 0x817f817f, 0x81818181, 0x7f817f7f, 0x7f817f81, +0x817f7f81, 0x7f7f817f, 0x81817f81, 0x7f81817f, 0x7f7f8181, 0x817f7f81, 0x817f817f, 0x81817f81, +0x817f8181, 0x7f7f7f7f, 0x817f7f7f, 0x7f81817f, 0x7f818181, 0x7f818181, 0x7f7f7f7f, 0x817f7f81, +0x817f7f81, 0x7f7f7f81, 0x8181817f, 0x7f7f7f7f, 0x817f817f, 0x7f817f7f, 0x817f817f, 0x7f7f7f7f, +0x8181817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f817f7f, 0x7f817f7f, 0x7f817f7f, 0x817f8181, 0x7f81817f, +0x7f817f7f, 0x81817f81, 0x8181817f, 0x7f7f7f7f, 0x7f7f8181, 0x817f7f81, 0x7f817f7f, 0x817f7f7f, +0x81817f81, 0x817f7f7f, 0x81817f81, 0x7f81817f, 0x7f818181, 0x7f818181, 0x7f818181, 0x7f81817f, +0x7f7f8181, 0x817f7f81, 0x817f8181, 0x7f817f81, 0x7f817f7f, 0x81818181, 0x7f817f81, 0x81818181, +0x81817f7f, 0x81818181, 0x817f7f81, 0x81817f81, 0x81817f81, 0x7f7f7f7f, 0x81817f7f, 0x7f81817f, +0x7f7f8181, 0x81818181, 0x7f7f7f7f, 0x7f7f8181, 0x81817f7f, 0x81817f81, 0x7f7f7f81, 0x8181817f, +0x8181817f, 0x7f7f817f, 0x81817f81, 0x817f817f, 0x817f7f7f, 0x817f817f, 0x817f817f, 0x7f81817f, +0x8181817f, 0x817f7f7f, 0x7f818181, 0x817f7f7f, 0x817f8181, 0x7f7f8181, 0x817f8181, 0x817f817f, +0x817f7f81, 0x7f7f7f7f, 0x7f7f7f81, 0x8181817f, 0x7f818181, 0x7f7f8181, 0x7f81817f, 0x817f8181, +0x7f81817f, 0x7f7f8181, 0x8181817f, 0x817f7f7f, 0x817f817f, 0x8181817f, 0x8181817f, 0x81817f81, +0x7f7f817f, 0x7f817f7f, 0x817f817f, 0x7f81817f, 0x7f817f7f, 0x81817f7f, 0x7f7f8181, 0x817f7f7f, +0x7f7f7f81, 0x817f8181, 0x7f817f81, 0x817f7f81, 0x817f7f81, 0x81818181, 0x81818181, 0x81818181, +0x7f7f8181, 0x7f81817f, 0x7f817f7f, 0x7f817f7f, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f81, +0x7f817f81, 0x817f8181, 0x7f81817f, 0x81817f81, 0x7f7f7f81, 0x817f7f81, 0x817f7f7f, 0x817f8181, +0x7f7f8181, 0x7f817f7f, 0x7f818181, 0x7f7f817f, 0x7f81817f, 0x8181817f, 0x8181817f, 0x7f81817f, +0x7f817f81, 0x7f7f817f, 0x817f817f, 0x7f7f7f81, 0x817f8181, 0x7f81817f, 0x817f7f81, 0x817f817f, +0x817f817f, 0x7f817f81, 0x7f817f7f, 0x7f81817f, 0x817f817f, 0x7f7f8181, 0x7f81817f, 0x817f817f, +0x8181817f, 0x81818181, 0x81817f81, 0x817f8181, 0x81818181, 0x81817f7f, 0x817f8181, 0x7f7f8181, +0x7f7f8181, 0x7f7f817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x81817f7f, 0x7f7f7f7f, 0x817f817f, +0x817f7f7f, 0x7f7f817f, 0x7f817f81, 0x7f7f817f, 0x7f818181, 0x8181817f, 0x7f7f8181, 0x7f817f7f, +0x7f81817f, 0x7f7f8181, 0x7f817f81, 0x817f7f7f, 0x7f7f817f, 0x7f818181, 0x7f7f817f, 0x81818181, +0x7f81817f, 0x817f8181, 0x81818181, 0x81817f81, 0x817f817f, 0x81817f7f, 0x7f7f8181, 0x7f7f7f81, +0x81817f7f, 0x7f817f81, 0x7f817f7f, 0x7f7f7f7f, 0x7f7f8181, 0x817f7f7f, 0x7f81817f, 0x7f818181, +0x7f818181, 0x817f7f7f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x817f7f81, 0x7f7f7f81, 0x817f7f81, +0x7f817f7f, 0x7f7f7f7f, 0x7f818181, 0x7f817f81, 0x8181817f, 0x817f7f7f, 0x7f817f81, 0x81818181, +0x7f7f817f, 0x81817f81, 0x8181817f, 0x7f7f7f81, 0x81817f7f, 0x7f817f81, 0x81818181, 0x817f817f, +0x7f7f7f81, 0x7f81817f, 0x7f7f817f, 0x817f7f81, 0x7f7f7f7f, 0x817f8181, 0x81817f81, 0x8181817f, +0x7f7f7f7f, 0x81817f81, 0x7f7f7f7f, 0x7f7f7f7f, 0x81817f7f, 0x7f817f81, 0x7f81817f, 0x81817f81, +0x817f817f, 0x81817f81, 0x81817f7f, 0x817f7f7f, 0x7f7f7f7f, 0x7f7f817f, 0x7f818181, 0x817f817f, +0x7f7f7f7f, 0x7f817f7f, 0x8181817f, 0x817f7f7f, 0x7f7f817f, 0x7f81817f, 0x7f7f7f81, 0x817f7f7f, +0x7f817f81, 0x7f7f8181, 0x8181817f, 0x7f818181, 0x7f817f7f, 0x817f817f, 0x7f7f7f81, 0x817f817f, +0x7f7f817f, 0x7f7f8181, 0x7f7f7f7f, 0x7f7f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f7f, 0x7f81817f, +0x7f7f817f, 0x7f817f7f, 0x7f818181, 0x7f7f817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x7f817f81, +0x7f817f7f, 0x817f817f, 0x7f7f7f7f, 0x81817f81, 0x7f817f81, 0x7f817f7f, 0x81818181, 0x817f7f7f, +0x817f7f81, 0x817f7f7f, 0x817f7f7f, 0x8181817f, 0x7f7f7f81, 0x7f7f8181, 0x817f7f81, 0x817f7f81, +0x7f817f7f, 0x817f7f7f, 0x817f817f, 0x8181817f, 0x7f818181, 0x7f7f817f, 0x7f817f81, 0x7f7f817f, +0x7f817f7f, 0x7f817f81, 0x8181817f, 0x81818181, 0x7f7f7f7f, 0x7f81817f, 0x81817f7f, 0x7f817f7f, +0x817f8181, 0x7f818181, 0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x7f818181, 0x81818181, 0x7f7f7f7f, +0x8181817f, 0x81817f81, 0x817f7f81, 0x81818181, 0x81817f7f, 0x8181817f, 0x7f817f7f, 0x7f7f7f7f, +0x817f7f81, 0x817f7f81, 0x817f7f7f, 0x7f7f7f81, 0x7f7f817f, 0x81817f7f, 0x7f7f8181, 0x7f7f817f, +0x81818181, 0x7f7f7f81, 0x7f7f817f, 0x7f7f8181, 0x8181817f, 0x7f818181, 0x817f817f, 0x7f7f7f81, +0x817f7f7f, 0x7f817f7f, 0x817f7f81, 0x8181817f, 0x7f81817f, 0x817f817f, 0x8181817f, 0x7f7f7f7f, +0x817f7f7f, 0x7f818181, 0x817f7f81, 0x7f7f8181, 0x817f7f7f, 0x8181817f, 0x817f8181, 0x8181817f, +0x81817f81, 0x817f8181, 0x817f7f81, 0x7f7f8181, 0x817f8181, 0x7f817f81, 0x7f7f7f81, 0x7f818181, +0x7f817f7f, 0x7f7f8181, 0x7f7f817f, 0x81817f81, 0x81817f7f, 0x81817f7f, 0x81817f7f, 0x817f817f, +0x7f81817f, 0x7f7f7f7f, 0x817f8181, 0x8181817f, 0x7f817f81, 0x7f817f7f, 0x817f7f7f, 0x7f7f8181, +0x7f7f8181, 0x7f7f7f81, 0x7f818181, 0x81817f7f, 0x8181817f, 0x817f7f81, 0x7f7f7f7f, 0x7f81817f, +0x817f8181, 0x7f81817f, 0x8181817f, 0x81818181, 0x7f817f81, 0x81817f81, 0x7f7f7f81, 0x817f7f7f, +0x81818181, 0x7f7f7f7f, 0x81818181, 0x81818181, 0x7f7f8181, 0x7f817f81, 0x81818181, 0x817f8181, +0x81817f81, 0x7f81817f, 0x8181817f, 0x7f7f7f81, 0x8181817f, 0x81818181, 0x81817f81, 0x81817f81, +0x7f7f8181, 0x7f7f7f81, 0x7f7f7f7f, 0x8181817f, 0x7f7f8181, 0x7f818181, 0x81818181, 0x7f7f7f7f, +0x81817f7f, 0x817f7f81, 0x7f7f817f, 0x817f8181, 0x817f8181, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f7f, +0x7f7f7f81, 0x817f7f81, 0x7f81817f, 0x817f7f81, 0x817f817f, 0x7f7f7f7f, 0x81817f7f, 0x81817f81, +0x81818181, 0x7f817f7f, 0x81817f81, 0x7f7f7f81, 0x817f7f7f, 0x81818181, 0x7f7f817f, 0x7f818181, +0x7f7f7f7f, 0x81818181, 0x7f7f7f81, 0x81818181, 0x8181817f, 0x7f817f81, 0x7f7f7f7f, 0x7f817f81, +0x7f7f7f7f, 0x7f7f7f81, 0x81817f7f, 0x8181817f, 0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x7f7f817f, +0x7f7f7f81, 0x8181817f, 0x81817f7f, 0x7f81817f, 0x7f817f81, 0x817f7f81, 0x7f7f817f, 0x81817f81, +0x817f7f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f817f7f, 0x7f7f817f, 0x817f7f81, 0x817f8181, 0x7f7f7f7f, +0x7f818181, 0x81817f7f, 0x7f817f7f, 0x817f817f, 0x7f7f8181, 0x7f7f817f, 0x817f7f81, 0x7f7f8181, +0x817f7f81, 0x81817f81, 0x817f8181, 0x817f817f, 0x81818181, 0x7f817f81, 0x81817f81, 0x7f7f8181, +0x7f7f7f81, 0x817f7f81, 0x7f7f7f81, 0x7f7f7f7f, 0x81818181, 0x7f7f7f7f, 0x7f817f7f, 0x7f7f7f7f, +0x81818181, 0x8181817f, 0x7f7f7f7f, 0x7f7f817f, 0x817f8181, 0x817f817f, 0x817f7f7f, 0x817f8181, +0x7f7f8181, 0x7f7f8181, 0x817f817f, 0x7f817f81, 0x817f817f, 0x7f7f7f81, 0x817f7f7f, 0x7f7f817f, +0x7f7f7f7f, 0x817f8181, 0x817f8181, 0x7f7f817f, 0x81817f81, 0x7f7f8181, 0x7f81817f, 0x81817f7f, +0x817f7f7f, 0x817f817f, 0x7f7f8181, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f81, 0x7f81817f, +0x81817f7f, 0x817f7f7f, 0x7f817f81, 0x81818181, 0x7f7f817f, 0x817f7f81, 0x7f7f817f, 0x7f817f81, +0x81817f81, 0x81818181, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f81, 0x81817f7f, 0x81818181, 0x817f8181, +0x7f7f817f, 0x7f817f7f, 0x7f817f81, 0x817f7f81, 0x7f7f817f, 0x7f818181, 0x7f817f81, 0x7f817f81, +0x7f818181, 0x81817f7f, 0x817f7f81, 0x8181817f, 0x7f7f7f81, 0x81817f7f, 0x7f817f81, 0x8181817f, +0x7f7f817f, 0x817f817f, 0x81817f81, 0x81817f81, 0x81817f7f, 0x7f818181, 0x817f7f81, 0x817f7f81, +0x7f7f7f7f, 0x7f81817f, 0x7f817f81, 0x7f7f817f, 0x7f818181, 0x8181817f, 0x81817f81, 0x7f817f81, +0x7f81817f, 0x817f7f81, 0x7f818181, 0x817f817f, 0x817f7f7f, 0x81817f81, 0x7f817f81, 0x7f817f7f, +0x81817f7f, 0x7f7f8181, 0x81817f81, 0x81817f7f, 0x8181817f, 0x7f7f817f, 0x8181817f, 0x7f818181, +0x7f817f7f, 0x817f7f7f, 0x81817f81, 0x7f81817f, 0x81818181, 0x7f7f7f81, 0x7f7f817f, 0x7f7f7f81, +0x817f7f81, 0x817f7f7f, 0x7f817f7f, 0x7f7f8181, 0x7f817f81, 0x7f817f81, 0x7f81817f, 0x81818181, +0x7f7f7f81, 0x7f7f7f81, 0x81818181, 0x817f8181, 0x7f7f8181, 0x817f8181, 0x817f7f81, 0x8181817f, +0x81818181, 0x8181817f, 0x8181817f, 0x817f7f81, 0x7f7f8181, 0x817f7f81, 0x7f817f7f, 0x817f7f7f, +0x81818181, 0x81818181, 0x81817f7f, 0x7f81817f, 0x7f817f81, 0x81817f81, 0x7f7f7f7f, 0x817f817f, +0x7f7f7f7f, 0x7f817f7f, 0x817f8181, 0x7f7f7f81, 0x817f7f7f, 0x7f817f81, 0x7f7f817f, 0x8181817f, +0x817f7f81, 0x7f81817f, 0x817f817f, 0x7f817f7f, 0x81817f81, 0x7f7f817f, 0x817f8181, 0x817f7f7f, +0x81818181, 0x817f7f81, 0x7f81817f, 0x817f817f, 0x817f817f, 0x817f7f7f, 0x7f7f817f, 0x8181817f, +0x7f7f7f7f, 0x81817f81, 0x8181817f, 0x7f81817f, 0x817f7f81, 0x7f7f7f7f, 0x817f7f7f, 0x817f817f, +0x81818181, 0x8181817f, 0x81817f81, 0x81817f7f, 0x7f7f817f, 0x81817f81, 0x7f7f817f, 0x8181817f, +0x7f818181, 0x7f7f817f, 0x8181817f, 0x817f7f7f, 0x81817f7f, 0x7f818181, 0x81818181, 0x8181817f, +0x817f817f, 0x7f7f7f7f, 0x7f7f817f, 0x7f7f8181, 0x7f7f7f81, 0x7f7f817f, 0x7f818181, 0x81818181, +0x81818181, 0x7f817f81, 0x7f7f817f, 0x7f817f7f, 0x81817f7f, 0x7f7f817f, 0x8181817f, 0x7f7f7f7f, +0x7f7f8181, 0x7f7f817f, 0x7f7f7f7f, 0x817f817f, 0x7f7f7f7f, 0x817f7f81, 0x817f8181, 0x81818181, +0x7f818181, 0x7f818181, 0x8181817f, 0x7f7f7f7f, 0x8181817f, 0x7f817f7f, 0x7f7f817f, 0x81818181, +0x7f817f7f, 0x817f8181, 0x817f7f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f7f, 0x817f8181, 0x7f7f8181, +0x81817f81, 0x81818181, 0x7f7f817f, 0x817f7f81, 0x7f81817f, 0x8181817f, 0x81818181, 0x81817f7f, +0x7f81817f, 0x81818181, 0x7f7f8181, 0x7f817f81, 0x7f817f81, 0x7f817f7f, 0x817f817f, 0x7f818181, +0x7f817f7f, 0x7f7f7f7f, 0x817f817f, 0x817f7f7f, 0x7f81817f, 0x817f7f81, 0x81818181, 0x7f7f817f, +0x817f817f, 0x81817f81, 0x817f7f81, 0x81817f7f, 0x7f81817f, 0x7f81817f, 0x7f817f81, 0x81817f81, +0x7f7f817f, 0x8181817f, 0x7f818181, 0x7f7f7f7f, 0x81818181, 0x8181817f, 0x7f817f81, 0x8181817f, +0x7f81817f, 0x7f7f8181, 0x817f817f, 0x817f7f81, 0x7f817f81, 0x81817f7f, 0x7f818181, 0x81818181, +0x81817f81, 0x817f7f81, 0x7f81817f, 0x817f817f, 0x81817f81, 0x7f7f7f81, 0x7f818181, 0x81817f81, +0x817f7f81, 0x7f7f817f, 0x817f8181, 0x7f817f7f, 0x817f817f, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f817f, +0x817f817f, 0x817f817f, 0x817f8181, 0x7f7f7f81, 0x817f7f81, 0x817f8181, 0x7f7f7f81, 0x8181817f, +0x81817f7f, 0x7f7f7f81, 0x81817f7f, 0x817f7f81, 0x817f817f, 0x81817f7f, 0x7f7f7f7f, 0x7f81817f, +0x7f817f7f, 0x81818181, 0x81818181, 0x7f7f7f81, 0x81817f7f, 0x8181817f, 0x7f818181, 0x7f817f7f, +0x81817f81, 0x817f7f7f, 0x7f7f8181, 0x7f817f7f, 0x81817f7f, 0x8181817f, 0x817f8181, 0x817f7f81, +0x7f7f7f7f, 0x81817f7f, 0x81817f81, 0x81818181, 0x81817f7f, 0x817f817f, 0x817f817f, 0x7f7f817f, +0x7f81817f, 0x81818181, 0x7f817f81, 0x7f7f8181, 0x7f7f817f, 0x7f7f7f81, 0x81817f7f, 0x817f8181, +0x7f817f7f, 0x8181817f, 0x817f8181, 0x817f7f7f, 0x7f7f7f81, 0x8181817f, 0x817f7f81, 0x7f817f81, +0x7f7f8181, 0x7f81817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f817f7f, 0x7f7f7f81, 0x7f817f7f, 0x8181817f, +0x7f81817f, 0x817f817f, 0x7f817f81, 0x8181817f, 0x7f818181, 0x81817f81, 0x7f818181, 0x81818181, +0x7f7f7f81, 0x817f7f7f, 0x817f8181, 0x7f817f7f, 0x7f817f81, 0x817f7f81, 0x81818181, 0x7f7f7f81, +0x7f7f7f81, 0x7f7f7f81, 0x7f81817f, 0x817f817f, 0x7f7f8181, 0x7f817f7f, 0x7f817f81, 0x7f818181, +0x7f7f8181, 0x817f8181, 0x7f817f7f, 0x7f7f8181, 0x7f7f7f81, 0x817f8181, 0x7f817f7f, 0x7f817f7f, +0x7f7f817f, 0x7f7f7f81, 0x817f7f7f, 0x7f817f7f, 0x817f817f, 0x7f817f81, 0x7f7f8181, 0x81817f81, +0x7f7f8181, 0x7f818181, 0x817f7f81, 0x7f818181, 0x81817f7f, 0x817f7f7f, 0x7f7f8181, 0x81817f7f, +0x7f817f7f, 0x7f818181, 0x7f81817f, 0x81818181, 0x7f7f817f, 0x7f818181, 0x7f817f7f, 0x81817f81, +0x7f7f7f7f, 0x7f817f7f, 0x7f7f817f, 0x7f7f7f81, 0x7f7f7f7f, 0x8181817f, 0x7f817f81, 0x7f81817f, +0x7f7f7f81, 0x7f7f7f81, 0x817f817f, 0x7f7f7f81, 0x81818181, 0x7f7f7f81, 0x8181817f, 0x817f7f81, +0x7f7f817f, 0x817f817f, 0x817f8181, 0x7f7f8181, 0x81817f7f, 0x7f817f81, 0x8181817f, 0x7f81817f, +0x7f818181, 0x7f818181, 0x7f817f81, 0x7f7f7f7f, 0x7f81817f, 0x817f7f81, 0x7f7f7f81, 0x81818181, +0x7f7f7f81, 0x817f8181, 0x7f81817f, 0x8181817f, 0x7f7f7f81, 0x8181817f, 0x7f7f7f81, 0x7f81817f, +0x81817f81, 0x7f817f81, 0x81817f7f, 0x81818181, 0x817f817f, 0x817f7f7f, 0x817f8181, 0x81818181, +0x7f81817f, 0x81818181, 0x81818181, 0x7f818181, 0x7f7f7f7f, 0x81817f81, 0x8181817f, 0x7f7f817f, +0x817f7f81, 0x817f7f81, 0x817f8181, 0x7f7f8181, 0x817f8181, 0x7f7f7f81, 0x817f7f81, 0x81818181, +0x8181817f, 0x81817f81, 0x7f7f8181, 0x7f7f8181, 0x7f7f8181, 0x817f817f, 0x7f817f7f, 0x7f81817f, +0x81817f7f, 0x81818181, 0x7f81817f, 0x7f817f7f, 0x7f7f7f81, 0x817f7f81, 0x81818181, 0x7f7f7f81, +0x7f7f8181, 0x7f81817f, 0x7f7f7f7f, 0x7f7f8181, 0x81817f81, 0x7f81817f, 0x81817f81, 0x7f7f8181, +0x817f817f, 0x7f817f81, 0x7f7f8181, 0x817f817f, 0x7f817f7f, 0x817f7f81, 0x7f7f7f81, 0x7f7f817f, +0x817f8181, 0x7f7f817f, 0x8181817f, 0x81818181, 0x81817f7f, 0x7f817f7f, 0x7f7f817f, 0x81817f7f, +0x7f7f817f, 0x7f81817f, 0x81818181, 0x81817f81, 0x8181817f, 0x7f817f7f, 0x81818181, 0x7f7f7f81, +0x7f7f7f7f, 0x81817f7f, 0x81817f81, 0x7f7f817f, 0x7f7f8181, 0x8181817f, 0x81818181, 0x7f7f7f81, +0x7f817f7f, 0x7f817f7f, 0x817f7f81, 0x7f7f817f, 0x8181817f, 0x817f7f7f, 0x7f817f81, 0x817f8181, +0x7f817f7f, 0x7f817f7f, 0x7f7f817f, 0x7f7f817f, 0x7f817f7f, 0x7f817f7f, 0x7f818181, 0x817f7f81, +0x7f81817f, 0x7f817f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f7f, 0x81817f7f, 0x7f81817f, 0x81818181, +0x7f7f8181, 0x7f7f7f81, 0x7f817f81, 0x81817f81, 0x81817f81, 0x8181817f, 0x7f818181, 0x81817f81, +0x817f7f81, 0x817f7f7f, 0x7f7f7f81, 0x7f7f7f7f, 0x7f7f8181, 0x7f81817f, 0x7f7f817f, 0x7f7f817f, +0x817f7f81, 0x7f7f7f7f, 0x81817f81, 0x817f817f, 0x817f817f, 0x817f817f, 0x7f81817f, 0x7f7f8181, +0x7f818181, 0x817f8181, 0x7f7f8181, 0x7f817f81, 0x81818181, 0x7f817f81, 0x817f7f7f, 0x7f817f7f, +0x7f7f8181, 0x817f8181, 0x7f818181, 0x81817f81, 0x7f818181, 0x8181817f, 0x8181817f, 0x7f818181, +0x8181817f, 0x7f7f7f7f, 0x7f7f817f, 0x7f818181, 0x7f7f7f81, 0x7f7f817f, 0x81818181, 0x817f7f7f, +0x8181817f, 0x817f817f, 0x7f7f8181, 0x7f7f7f81, 0x81818181, 0x8181817f, 0x817f817f, 0x817f817f, +0x7f818181, 0x7f7f8181, 0x8181817f, 0x817f817f, 0x81817f7f, 0x81818181, 0x7f81817f, 0x817f817f, +0x7f7f7f7f, 0x7f817f81, 0x7f818181, 0x817f7f7f, 0x817f7f81, 0x7f817f81, 0x7f7f817f, 0x7f7f7f7f, +0x81817f81, 0x7f7f7f7f, 0x817f8181, 0x7f7f817f, 0x8181817f, 0x7f81817f, 0x7f818181, 0x7f7f7f7f, +0x7f7f7f7f, 0x7f7f7f81, 0x7f818181, 0x7f818181, 0x8181817f, 0x7f7f8181, 0x817f7f81, 0x7f817f7f, +0x817f7f7f, 0x7f7f7f81, 0x7f7f8181, 0x7f817f81, 0x817f7f81, 0x7f7f7f81, 0x7f817f7f, 0x7f7f7f81, +0x817f817f, 0x817f7f7f, 0x81818181, 0x7f818181, 0x81818181, 0x81818181, 0x7f7f7f81, 0x8181817f, +0x81818181, 0x7f817f7f, 0x817f7f81, 0x7f7f8181, 0x7f818181, 0x8181817f, 0x8181817f, 0x817f7f81, +0x7f817f7f, 0x81817f7f, 0x817f8181, 0x7f817f7f, 0x7f81817f, 0x81817f7f, 0x7f817f7f, 0x817f7f7f, +0x817f7f7f, 0x7f817f81, 0x8181817f, 0x817f7f7f, 0x7f817f81, 0x81817f81, 0x817f817f, 0x7f7f7f81, +0x7f817f81, 0x7f817f7f, 0x817f817f, 0x817f7f81, 0x7f7f7f7f, 0x81817f81, 0x7f818181, 0x81817f81, +0x817f8181, 0x7f817f7f, 0x81817f81, 0x817f7f81, 0x7f7f7f7f, 0x81817f81, 0x81817f81, 0x81817f81, +0x81817f7f, 0x7f81817f, 0x81818181, 0x7f7f8181, 0x817f817f, 0x817f8181, 0x817f7f7f, 0x8181817f, +0x81817f7f, 0x8181817f, 0x7f817f81, 0x81817f7f, 0x81818181, 0x817f7f7f, 0x7f81817f, 0x817f817f, +0x81818181, 0x817f7f81, 0x817f817f, 0x817f7f7f, 0x81817f7f, 0x7f7f8181, 0x817f7f81, 0x8181817f, +0x7f818181, 0x7f817f81, 0x8181817f, 0x7f7f7f81, 0x8181817f, 0x81818181, 0x7f7f8181, 0x7f7f7f7f, +0x7f81817f, 0x817f8181, 0x817f817f, 0x7f81817f, 0x81818181, 0x7f817f7f, 0x7f7f7f7f, 0x7f7f7f7f, +0x7f7f8181, 0x817f8181, 0x817f817f, 0x7f7f7f81, 0x817f7f7f, 0x7f7f817f, 0x7f817f7f, 0x817f8181, +0x81817f81, 0x817f8181, 0x7f81817f, 0x817f8181, 0x817f8181, 0x817f7f81, 0x7f7f817f, 0x7f818181, +0x7f7f817f, 0x7f7f7f7f, 0x817f8181, 0x7f7f7f7f, 0x8181817f, 0x817f8181, 0x81818181, 0x817f7f7f, +0x7f7f817f, 0x817f7f7f, 0x817f817f, 0x7f818181, 0x8181817f, 0x817f817f, 0x7f7f7f7f, 0x7f81817f, +0x7f817f81, 0x81818181, 0x7f817f7f, 0x817f817f, 0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x81817f81, +0x81818181, 0x81817f81, 0x7f7f817f, 0x81818181, 0x817f8181, 0x7f81817f, 0x81817f7f, 0x7f81817f, +0x817f8181, 0x81818181, 0x7f7f7f81, 0x81817f81, 0x817f7f7f, 0x817f8181, 0x7f7f817f, 0x7f7f8181, +0x7f81817f, 0x7f818181, 0x7f81817f, 0x7f7f817f, 0x7f81817f, 0x7f7f7f81, 0x7f81817f, 0x817f8181, +0x81817f81, 0x81818181, 0x817f8181, 0x81818181, 0x7f81817f, 0x7f7f8181, 0x7f81817f, 0x81817f81, +0x7f818181, 0x81818181, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x81817f7f, 0x7f81817f, 0x81818181, +0x81817f81, 0x81817f7f, 0x7f7f817f, 0x817f7f7f, 0x8181817f, 0x81817f81, 0x81817f81, 0x8181817f, +0x817f817f, 0x81817f7f, 0x7f817f7f, 0x817f817f, 0x817f7f7f, 0x8181817f, 0x7f7f7f7f, 0x817f817f, +0x817f8181, 0x7f817f7f, 0x7f817f7f, 0x7f81817f, 0x7f7f817f, 0x7f817f7f, 0x7f7f8181, 0x7f7f7f7f, +0x7f81817f, 0x817f817f, 0x7f7f817f, 0x7f7f8181, 0x7f7f7f7f, 0x817f7f7f, 0x7f81817f, 0x81818181, +0x7f7f817f, 0x7f81817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f7f8181, 0x81818181, 0x817f7f7f, 0x81818181, +0x81817f81, 0x817f817f, 0x7f817f81, 0x7f7f8181, 0x7f7f7f81, 0x81817f81, 0x7f7f7f81, 0x817f7f7f, +0x817f817f, 0x817f8181, 0x7f817f81, 0x817f7f7f, 0x7f817f7f, 0x7f7f7f81, 0x81817f7f, 0x7f7f7f81, +0x7f817f7f, 0x7f81817f, 0x817f817f, 0x7f7f8181, 0x7f81817f, 0x8181817f, 0x7f7f7f7f, 0x7f7f817f, +0x7f7f7f7f, 0x7f817f7f, 0x7f7f817f, 0x7f817f81, 0x817f817f, 0x81817f7f, 0x7f81817f, 0x8181817f, +0x817f7f81, 0x7f7f7f81, 0x81818181, 0x7f817f81, 0x81818181, 0x7f818181, 0x817f7f7f, 0x81818181, +0x81818181, 0x7f7f7f7f, 0x8181817f, 0x7f7f7f81, 0x81818181, 0x7f7f817f, 0x817f7f81, 0x7f81817f, +0x817f817f, 0x7f7f7f81, 0x7f81817f, 0x7f81817f, 0x7f818181, 0x817f817f, 0x81817f7f, 0x7f7f817f, +0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x817f817f, 0x81817f7f, 0x7f7f7f81, 0x817f817f, 0x7f7f817f, +0x81817f7f, 0x7f7f7f7f, 0x7f81817f, 0x817f7f7f, 0x7f7f8181, 0x7f7f8181, 0x7f7f817f, 0x7f81817f, +0x81817f81, 0x817f7f81, 0x7f81817f, 0x81817f7f, 0x817f8181, 0x817f8181, 0x817f7f7f, 0x7f7f7f7f, +0x7f7f7f7f, 0x81817f81, 0x817f817f, 0x81817f81, 0x7f7f7f7f, 0x817f817f, 0x817f7f81, 0x81817f81, +0x817f7f81, 0x817f7f7f, 0x81817f81, 0x817f7f81, 0x7f81817f, 0x81817f7f, 0x7f818181, 0x8181817f, +0x7f7f7f81, 0x81817f81, 0x7f817f81, 0x81818181, 0x817f8181, 0x7f7f8181, 0x81818181, 0x817f7f81, +0x817f8181, 0x81817f7f, 0x8181817f, 0x817f817f, 0x8181817f, 0x7f818181, 0x81817f81, 0x81818181, +0x817f817f, 0x817f7f7f, 0x7f7f7f7f, 0x7f7f7f81, 0x7f817f81, 0x81817f81, 0x7f7f8181, 0x8181817f, +0x817f7f81, 0x7f818181, 0x7f7f817f, 0x7f81817f, 0x81818181, 0x81817f7f, 0x7f817f7f, 0x817f8181, +0x7f7f8181, 0x81817f81, 0x7f7f8181, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f7f, 0x81817f81, +0x7f7f817f, 0x817f817f, 0x817f8181, 0x7f817f7f, 0x7f817f7f, 0x8181817f, 0x817f8181, 0x817f7f7f, +0x817f7f81, 0x817f8181, 0x8181817f, 0x7f818181, 0x7f818181, 0x7f7f7f81, 0x817f7f81, 0x7f817f81, +0x81817f81, 0x817f8181, 0x817f7f7f, 0x817f7f7f, 0x81818181, 0x7f7f7f81, 0x817f817f, 0x817f7f81, +0x81817f7f, 0x7f818181, 0x7f7f817f, 0x7f818181, 0x7f7f8181, 0x7f817f7f, 0x817f8181, 0x81817f81, +0x81818181, 0x7f817f7f, 0x81817f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f81, 0x81817f7f, 0x7f817f7f, +0x7f7f8181, 0x817f8181, 0x7f818181, 0x8181817f, 0x81817f81, 0x817f7f7f, 0x7f817f81, 0x81818181, +0x817f817f, 0x81818181, 0x817f7f81, 0x817f7f7f, 0x817f817f, 0x7f817f81, 0x7f817f7f, 0x817f7f81, +0x7f818181, 0x817f7f7f, 0x7f81817f, 0x81817f7f, 0x817f8181, 0x817f8181, 0x81818181, 0x817f7f81, +0x7f7f817f, 0x81817f7f, 0x8181817f, 0x7f7f8181, 0x7f81817f, 0x8181817f, 0x817f7f81, 0x7f81817f, +0x817f817f, 0x7f7f817f, 0x817f7f7f, 0x8181817f, 0x7f7f817f, 0x7f81817f, 0x8181817f, 0x7f81817f, +0x81818181, 0x81818181, 0x7f81817f, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f818181, 0x817f7f7f, +0x817f7f7f, 0x81817f81, 0x7f817f81, 0x7f817f7f, 0x817f8181, 0x7f81817f, 0x7f7f817f, 0x81817f7f, +0x7f7f7f7f, 0x817f7f81, 0x81817f81, 0x817f817f, 0x7f7f8181, 0x817f7f81, 0x7f7f8181, 0x81818181, +0x7f7f7f81, 0x81817f7f, 0x817f7f81, 0x8181817f, 0x7f7f7f81, 0x817f7f7f, 0x81818181, 0x81817f7f, +0x7f817f7f, 0x817f7f7f, 0x81817f7f, 0x81817f7f, 0x817f7f7f, 0x81817f7f, 0x817f8181, 0x7f81817f, +0x7f7f7f7f, 0x817f817f, 0x817f7f81, 0x8181817f, 0x817f7f7f, 0x7f817f7f, 0x8181817f, 0x81817f81, +0x817f7f81, 0x7f7f7f81, 0x7f7f7f81, 0x7f818181, 0x7f817f81, 0x81818181, 0x817f8181, 0x8181817f, +0x7f817f7f, 0x817f8181, 0x7f818181, 0x817f7f81, 0x7f817f7f, 0x7f7f8181, 0x817f817f, 0x817f8181, +0x7f81817f, 0x7f7f817f, 0x7f7f8181, 0x7f7f817f, 0x8181817f, 0x81817f81, 0x817f8181, 0x7f7f7f81, +0x8181817f, 0x8181817f, 0x8181817f, 0x7f7f7f81, 0x8181817f, 0x7f818181, 0x81818181, 0x7f7f7f81, +0x7f817f7f, 0x81818181, 0x817f8181, 0x817f8181, 0x7f7f7f7f, 0x817f7f81, 0x817f817f, 0x817f7f81, +0x81818181, 0x817f8181, 0x7f7f8181, 0x7f7f8181, 0x817f817f, 0x81817f7f, 0x7f7f8181, 0x817f8181, +0x7f7f817f, 0x7f7f817f, 0x7f817f81, 0x7f817f7f, 0x7f818181, 0x7f81817f, 0x81817f7f, 0x817f8181, +0x817f8181, 0x7f818181, 0x8181817f, 0x7f817f7f, 0x81817f81, 0x7f7f8181, 0x817f7f81, 0x817f8181, +0x7f7f7f7f, 0x817f7f7f, 0x7f817f81, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f81, 0x81818181, 0x81817f81, +0x7f817f7f, 0x7f818181, 0x817f7f81, 0x8181817f, 0x81817f7f, 0x8181817f, 0x7f81817f, 0x7f817f81, +0x817f8181, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f818181, 0x7f81817f, 0x7f7f8181, 0x7f7f7f81, +0x817f817f, 0x81817f7f, 0x7f81817f, 0x81817f7f, 0x7f818181, 0x817f8181, 0x81818181, 0x8181817f, +0x81817f7f, 0x7f817f7f, 0x817f8181, 0x81817f7f, 0x7f7f7f7f, 0x7f7f7f7f, 0x81817f81, 0x817f817f, +0x817f8181, 0x81817f81, 0x81817f81, 0x7f817f7f, 0x7f7f817f, 0x7f7f7f81, 0x7f7f7f7f, 0x8181817f, +0x81817f81, 0x7f817f7f, 0x81817f81, 0x81817f81, 0x7f817f7f, 0x7f7f817f, 0x81817f81, 0x817f8181, +0x81817f81, 0x8181817f, 0x7f7f7f81, 0x817f8181, 0x81817f7f, 0x81817f81, 0x817f8181, 0x817f8181, +0x817f817f, 0x817f7f7f, 0x81817f7f, 0x7f817f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f818181, 0x81817f81, +0x817f8181, 0x817f7f7f, 0x817f817f, 0x8181817f, 0x81818181, 0x7f817f81, 0x81817f7f, 0x81818181, +0x7f818181, 0x817f817f, 0x7f7f7f81, 0x817f817f, 0x7f7f817f, 0x817f817f, 0x7f7f7f7f, 0x817f7f7f, +0x817f7f7f, 0x7f817f81, 0x7f7f817f, 0x81817f7f, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x817f817f, +0x817f817f, 0x7f817f81, 0x7f818181, 0x7f7f7f81, 0x7f817f81, 0x81817f81, 0x81818181, 0x7f817f7f, +0x7f7f7f81, 0x817f817f, 0x7f817f7f, 0x817f7f81, 0x7f7f7f7f, 0x817f8181, 0x7f7f7f7f, 0x817f817f, +0x81818181, 0x81818181, 0x8181817f, 0x81817f81, 0x81818181, 0x817f8181, 0x7f817f7f, 0x7f817f81, +0x817f8181, 0x7f818181, 0x817f7f81, 0x817f7f7f, 0x7f7f817f, 0x8181817f, 0x817f7f7f, 0x7f817f7f, +0x7f7f7f7f, 0x7f81817f, 0x7f7f817f, 0x7f81817f, 0x81817f81, 0x817f8181, 0x7f817f81, 0x817f8181, +0x81818181, 0x817f817f, 0x81817f81, 0x817f817f, 0x817f8181, 0x817f7f7f, 0x7f7f7f7f, 0x7f7f7f7f, +0x81817f81, 0x7f81817f, 0x817f7f81, 0x7f818181, 0x81817f7f, 0x7f7f8181, 0x7f7f8181, 0x7f817f7f, +0x8181817f, 0x7f81817f, 0x8181817f, 0x81818181, 0x81817f7f, 0x7f7f817f, 0x81817f81, 0x8181817f, +0x7f81817f, 0x7f817f81, 0x7f7f817f, 0x7f817f7f, 0x7f817f7f, 0x817f8181, 0x81817f81, 0x7f7f7f7f, +0x7f817f7f, 0x81817f7f, 0x7f7f7f7f, 0x817f817f, 0x7f7f7f81, 0x817f7f81, 0x7f818181, 0x7f7f8181, +0x7f817f81, 0x7f817f7f, 0x8181817f, 0x7f7f7f81, 0x7f7f8181, 0x81818181, 0x7f7f8181, 0x7f817f7f, +0x7f7f8181, 0x7f81817f, 0x7f7f8181, 0x81818181, 0x7f817f7f, 0x7f7f8181, 0x7f817f7f, 0x8181817f, +0x7f817f7f, 0x817f7f7f, 0x81817f81, 0x7f817f81, 0x817f817f, 0x7f817f81, 0x7f817f7f, 0x81818181, +0x7f817f81, 0x7f7f8181, 0x7f81817f, 0x81818181, 0x7f7f7f81, 0x7f817f7f, 0x7f7f7f7f, 0x7f817f81, +0x7f81817f, 0x817f7f7f, 0x817f8181, 0x7f817f7f, 0x817f7f81, 0x7f7f7f7f, 0x7f817f7f, 0x7f817f81, +0x817f7f7f, 0x817f8181, 0x7f7f7f7f, 0x817f7f81, 0x81817f7f, 0x81818181, 0x817f817f, 0x81818181, +0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f7f, 0x81817f81, 0x7f81817f, 0x7f7f7f81, 0x7f7f7f81, 0x81817f7f, +0x817f7f81, 0x817f7f81, 0x7f817f7f, 0x7f7f7f7f, 0x8181817f, 0x81817f7f, 0x7f7f7f7f, 0x7f818181, +0x7f818181, 0x81818181, 0x817f8181, 0x81817f7f, 0x7f7f8181, 0x817f817f, 0x7f818181, 0x7f7f8181, +0x7f7f817f, 0x7f7f8181, 0x7f7f7f7f, 0x8181817f, 0x81818181, 0x817f8181, 0x7f7f7f7f, 0x7f81817f, +0x81817f7f, 0x7f7f8181, 0x81817f81, 0x817f7f7f, 0x817f7f7f, 0x7f817f81, 0x817f7f7f, 0x81817f7f, +0x817f817f, 0x7f7f7f81, 0x817f7f7f, 0x817f817f, 0x817f7f7f, 0x817f8181, 0x81818181, 0x7f81817f, +0x7f7f8181, 0x7f81817f, 0x81817f7f, 0x7f818181, 0x81817f81, 0x817f7f81, 0x7f7f8181, 0x7f817f81, +0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f7f, 0x817f8181, 0x8181817f, 0x817f7f81, 0x817f7f81, 0x7f817f81, +0x7f7f817f, 0x7f817f81, 0x7f817f7f, 0x7f817f81, 0x81818181, 0x81818181, 0x7f7f7f7f, 0x81818181, +0x7f817f7f, 0x81818181, 0x7f81817f, 0x7f817f81, 0x81817f7f, 0x817f7f81, 0x7f7f817f, 0x7f817f7f, +0x7f818181, 0x7f7f817f, 0x7f817f7f, 0x7f7f7f7f, 0x817f8181, 0x81817f81, 0x817f7f81, 0x7f7f7f81, +0x81818181, 0x7f817f7f, 0x81818181, 0x7f818181, 0x817f7f81, 0x7f817f7f, 0x81818181, 0x81818181, +0x7f817f81, 0x7f7f7f7f, 0x81817f7f, 0x7f818181, 0x81817f7f, 0x81817f81, 0x8181817f, 0x817f817f, +0x817f817f, 0x817f7f81, 0x7f7f8181, 0x817f7f7f, 0x817f8181, 0x7f7f7f7f, 0x7f7f7f7f, 0x817f7f81, +0x817f7f7f, 0x7f7f8181, 0x7f817f81, 0x7f81817f, 0x817f7f7f, 0x7f7f7f7f, 0x7f7f8181, 0x7f7f817f, +0x817f7f7f, 0x81818181, 0x8181817f, 0x7f817f7f, 0x7f7f8181, 0x81817f81, 0x7f81817f, 0x7f7f8181, +0x817f7f7f, 0x817f7f81, 0x7f81817f, 0x7f7f7f81, 0x7f817f81, 0x8181817f, 0x817f7f81, 0x7f817f81, +0x81817f81, 0x81817f81, 0x81817f7f, 0x7f7f817f, 0x817f7f81, 0x7f81817f, 0x817f817f, 0x7f817f81, +0x817f7f81, 0x7f7f817f, 0x7f7f817f, 0x7f7f7f7f, 0x7f81817f, 0x7f7f817f, 0x8181817f, 0x817f7f7f, +0x7f817f81, 0x7f817f7f, 0x7f81817f, 0x81817f81, 0x7f817f7f, 0x7f81817f, 0x81817f7f, 0x7f7f7f7f, +0x817f817f, 0x7f7f7f7f, 0x7f81817f, 0x817f8181, 0x81818181, 0x7f81817f, 0x7f817f7f, 0x8181817f, +0x817f817f, 0x81818181, 0x81817f81, 0x81817f81, 0x7f7f8181, 0x7f7f7f81, 0x7f7f7f7f, 0x8181817f, +0x7f817f81, 0x817f7f81, 0x81818181, 0x7f7f8181, 0x81817f7f, 0x81818181, 0x7f81817f, 0x7f7f7f7f, +0x81818181, 0x7f81817f, 0x7f7f7f81, 0x8181817f, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f8181, 0x81818181, +0x81818181, 0x7f7f8181, 0x7f817f81, 0x81817f7f, 0x7f7f817f, 0x817f7f81, 0x7f7f817f, 0x817f817f, +0x81817f7f, 0x7f7f817f, 0x7f7f7f7f, 0x81817f81, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f7f8181, +0x7f817f7f, 0x7f817f81, 0x817f8181, 0x817f7f7f, 0x7f818181, 0x7f81817f, 0x817f817f, 0x7f7f8181, +0x81817f7f, 0x7f7f7f7f, 0x817f7f81, 0x8181817f, 0x81818181, 0x7f817f81, 0x7f817f81, 0x7f81817f, +0x7f81817f, 0x7f7f817f, 0x817f7f7f, 0x7f817f7f, 0x817f7f81, 0x7f7f817f, 0x817f8181, 0x7f7f7f7f, +0x7f81817f, 0x7f7f817f, 0x817f817f, 0x817f817f, 0x7f817f7f, 0x7f7f7f7f, 0x7f81817f, 0x7f7f8181, +0x817f7f81, 0x7f7f7f81, 0x7f818181, 0x817f817f, 0x7f7f817f, 0x7f817f81, 0x817f7f81, 0x81818181, +0x81817f81, 0x7f7f7f81, 0x7f817f7f, 0x7f818181, 0x817f817f, 0x7f7f8181, 0x7f81817f, 0x7f81817f, +0x817f7f81, 0x81818181, 0x7f817f81, 0x7f7f7f81, 0x7f81817f, 0x7f818181, 0x7f817f7f, 0x81817f81, +0x81818181, 0x7f817f7f, 0x81817f81, 0x81817f81, 0x7f7f817f, 0x817f8181, 0x81818181, 0x7f81817f, +0x81817f81, 0x7f7f817f, 0x7f7f817f, 0x7f817f81, 0x817f7f7f, 0x8181817f, 0x81817f81, 0x81818181, +0x7f817f81, 0x7f7f7f7f, 0x81818181, 0x7f7f8181, 0x81817f81, 0x81817f7f, 0x7f7f817f, 0x7f7f7f7f, +0x7f7f7f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f7f7f81, 0x81817f81, 0x817f8181, 0x7f7f817f, 0x7f7f7f7f, +0x817f817f, 0x8181817f, 0x817f8181, 0x7f7f7f7f, 0x7f7f7f81, 0x81817f7f, 0x7f7f7f7f, 0x7f7f817f, +0x7f81817f, 0x7f7f7f81, 0x817f8181, 0x81817f81, 0x817f7f81, 0x7f7f817f, 0x7f81817f, 0x7f7f7f7f, +0x81818181, 0x81817f81, 0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x81817f81, 0x8181817f, 0x7f81817f, +0x81818181, 0x7f818181, 0x817f7f81, 0x81817f7f, 0x81818181, 0x817f7f7f, 0x7f7f7f81, 0x817f7f7f, +0x817f817f, 0x817f7f81, 0x81818181, 0x81817f81, 0x7f818181, 0x81817f7f, 0x817f817f, 0x7f81817f, +0x817f7f81, 0x7f81817f, 0x8181817f, 0x7f7f7f7f, 0x8181817f, 0x7f7f817f, 0x7f817f7f, 0x817f7f7f, +0x7f818181, 0x817f8181, 0x7f7f817f, 0x817f7f81, 0x7f817f81, 0x81817f81, 0x7f7f8181, 0x7f81817f, +0x7f7f817f, 0x7f817f7f, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x817f7f81, 0x81818181, 0x7f7f817f, +0x817f8181, 0x7f81817f, 0x7f817f7f, 0x81817f81, 0x7f7f817f, 0x81817f81, 0x7f7f817f, 0x81818181, +0x81817f7f, 0x7f817f7f, 0x817f8181, 0x7f7f8181, 0x817f7f81, 0x81818181, 0x7f7f8181, 0x7f817f7f, +0x7f817f7f, 0x817f817f, 0x817f7f81, 0x81817f81, 0x7f7f8181, 0x817f7f7f, 0x7f7f8181, 0x81817f81, +0x7f817f7f, 0x817f8181, 0x817f817f, 0x81817f81, 0x81817f81, 0x7f7f817f, 0x7f7f8181, 0x7f7f7f81, +0x81817f81, 0x7f81817f, 0x7f7f8181, 0x81818181, 0x817f7f7f, 0x7f81817f, 0x7f7f817f, 0x7f817f7f, +0x7f818181, 0x7f81817f, 0x817f8181, 0x7f7f7f7f, 0x7f81817f, 0x7f818181, 0x817f817f, 0x81818181, +0x7f818181, 0x7f817f81, 0x7f81817f, 0x7f7f8181, 0x817f7f81, 0x7f7f8181, 0x7f81817f, 0x7f7f7f81, +0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x7f7f8181, 0x7f818181, 0x81818181, 0x817f7f81, 0x817f8181, +0x7f7f7f7f, 0x7f818181, 0x7f7f7f7f, 0x7f7f7f81, 0x817f8181, 0x7f7f7f7f, 0x817f7f7f, 0x7f818181, +0x81818181, 0x7f7f817f, 0x81818181, 0x7f818181, 0x7f817f7f, 0x7f817f81, 0x7f7f8181, 0x81818181, +0x7f7f7f81, 0x817f8181, 0x7f7f7f7f, 0x81818181, 0x81817f7f, 0x7f817f7f, 0x7f817f7f, 0x817f817f, +0x7f817f81, 0x817f8181, 0x817f817f, 0x817f817f, 0x7f7f8181, 0x817f7f81, 0x7f7f8181, 0x7f7f7f81, +0x7f7f7f7f, 0x7f7f7f7f, 0x8181817f + +e = +34560 + +k = +6144 + +rv_index = +0 + +iter_max = +8 + +iter_min = +4 + +expected_iter_count = +8 + +ext_scale = +15 + +num_maps = +0 + +code_block_mode = +1 + +op_flags = +RTE_BBDEV_TURBO_SOFT_OUTPUT, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE, RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN, +RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT + +expected_status = +OK \ No newline at end of file diff --git a/app/test-bbdev/test_vectors/turbo_dec_c2_k3136_r0_e4920_sbd_negllr.data b/app/test-bbdev/test_vectors/turbo_dec_c2_k3136_r0_e4920_sbd_negllr.data new file mode 100644 index 00000000..cbf4e72e --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_dec_c2_k3136_r0_e4920_sbd_negllr.data @@ -0,0 +1,676 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_DEC + +input0 = +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0xD0000000, 0x33CDC8CE, 0x4FAEE4CC, 0xC7DC23C3, 0xC306D6CA, 0x2B360A24, 0xE91C423D, +0x1F323427, 0x4B1C33B6, 0x3EC9D0E7, 0x39204DD1, 0xCCD73C37, 0xC6F6D8E1, 0x1DF828F3, 0xDEE34025, +0xC41EC235, 0xD035E1D8, 0x3CC32843, 0x29B60C3C, 0xE92E122E, 0x454BD4C8, 0x35D02543, 0x33E4D0AC, +0x07AF2228, 0x38C62DD1, 0x233800C5, 0x3CC02DD1, 0x35E73B19, 0xDEBED026, 0x33C5EDD9, 0x33E51527, +0x1F21EA18, 0xCC3E3BD2, 0x013CC14C, 0x3724D23B, 0x23CDED2D, 0x21424630, 0xC5B0E64D, 0xCFC03BEF, +0x24294241, 0x46C526BC, 0xD82F334C, 0x1E283DCB, 0x3F3F33C7, 0x4A14D2A1, 0xD92F2AF3, 0xCFA820CC, +0xDD30C6CB, 0x2B3ACAB7, 0x4AFE29CD, 0x25BD3200, 0x2D293323, 0xD32A3B29, 0x29E64D0C, 0xBE4BFB3C, +0xB1DD242D, 0x0427F11D, 0x3046CFDA, 0xD633C0D5, 0xC0DDBE01, 0xDA3DD9B4, 0x3BCE3638, 0x23262CC6, +0x432BD2AE, 0x23A9E1C7, 0x1BE45609, 0x22CD35AA, 0x32D6371F, 0xCE27352A, 0xE2B73F40, 0xC2D0CE2C, +0xE72E3A3E, 0x2FF52147, 0xEFCF2E33, 0x003103CA, 0xC6C14A33, 0x45CAC0CF, 0x011E1FD3, 0x23D3C72B, +0x2EB644C4, 0x484BD930, 0x3AC22E1A, 0x1AB6DD42, 0xCEDDE232, 0x3ACCCC31, 0xB830CA2D, 0xC930D2D7, +0xD0E049BB, 0xE6243524, 0x3EF53914, 0xCB083BE3, 0x411EDBE9, 0x30C3C3E8, 0x3721F5E7, 0xBBDF3FD9, +0x2AD21F33, 0xEE402858, 0xD3AFD237, 0x3D39401D, 0x30003623, 0x4734DC46, 0x192ACD32, 0x3324CAC2, +0xD714D4B5, 0xDC2E21D0, 0x2327CDBA, 0x15B739C3, 0x31C9EEB8, 0xFAB5372E, 0xC239B5DA, 0xE628440F, +0x34D425C6, 0x293BE6FD, 0xBDC04BBC, 0xE0DDB7CF, 0xD627BC34, 0xA5422FCB, 0x13211ABC, 0x1708E51C, +0xCE33DAD1, 0x51AEC731, 0x2E383824, 0xC722D0D3, 0xCB32292B, 0xCECC402B, 0x1B312EF0, 0x34CF46CA, +0xE3BD252C, 0x0C21EC36, 0x4ADFECBE, 0xC4D2E62C, 0xADE5BAD3, 0xDDCC1D18, 0x521AC135, 0x3F302D37, +0x22CE2DCC, 0x262A1BCF, 0x4EE7D7CA, 0xD43D3526, 0x1840E020, 0xC3201F2F, 0x3ACB453E, 0x41EAD5B6, +0xB6C4CF17, 0x243A55D4, 0xC7501E28, 0x351DCD1C, 0xE3A8D502, 0x3235271C, 0xAC342F00, 0x34B4E3D8, +0x36473AE4, 0xAE403CEB, 0xBC3FDA3F, 0x552CDBEC, 0xE7C42B5C, 0xE743E13D, 0x18D44650, 0xBA2EDF31, +0xB5C3EC42, 0x432EBE2E, 0xADDE4F2A, 0x401BCAD6, 0x03D6EAEB, 0xD0DEB133, 0x153FC4D4, 0x30DA2FD3, +0xEE282FD9, 0x242ADC39, 0xD72B42D9, 0x44C7E0D5, 0xC72CB3C7, 0x27C9472B, 0x0036DFB6, 0xD9C53112, +0xC0360E0E, 0x4EEA0E27, 0xEDCF27C1, 0x1739D5DC, 0x22DE2CE1, 0xDCCAF633, 0x31D02417, 0xD7DCD0DA, +0xDE1DE0DC, 0xD827DD22, 0x1CDBD7EC, 0xCE171BCB, 0xEE2347CF, 0xC22C312D, 0xDBCC2A38, 0xBE0ADED4, +0xDC20CC2D, 0xD5380DB2, 0x2A32C701, 0x3EC7D41E, 0xCDD7D74C, 0xE83E2A44, 0xC12B26E6, 0xE70046D7, +0xD626D1EC, 0x35DDE41D, 0x303F27BD, 0xC94D4517, 0x31D1E229, 0xAD3529BF, 0x3AC5D21C, 0xCF1DB731, +0x2738DBEB, 0x1F382B33, 0x1ED52213, 0xFCE22CD2, 0x1330331E, 0x4A30BA0F, 0x1F38B42A, 0x2E34CB1F, +0xE3E125D5, 0x3AC72D17, 0xBD3C342C, 0x37272810, 0x1D34E4C7, 0xE5CC2EF8, 0x1E2FF1DF, 0xB9294236, +0x26F10017, 0xBDE3FBD1, 0xB7DB52D0, 0x2E3CB8E9, 0xB3343D3C, 0x3E3BDFDD, 0x1F46393A, 0x25FA20C1, +0x2D194CD1, 0xE9471D26, 0x34C3242E, 0xAE012034, 0xC8F3FED7, 0xB825362B, 0x322DC136, 0x42F41511, +0x181855C2, 0x4937D5EB, 0xE13CDC23, 0x08242A28, 0x2DCCDFC0, 0x13CEC64F, 0xB3E6F7ED, 0xD93401EE, +0x3D02DEE0, 0xCFC52300, 0xD2F1BE38, 0x2D092924, 0x253E2AF0, 0x37DACAD0, 0xDB1ECFE7, 0x40CED84D, +0x3706C42A, 0x4231D91A, 0xDD30471D, 0xBD272DC8, 0xCB1941D9, 0xC22FC7C5, 0x333F3C0C, 0x3F3D2518, +0xDDCCD143, 0xB3D834D5, 0xD231BEE4, 0x2BDB3B3D, 0x2F2754E7, 0xEAF3CE64, 0x3632E4C0, 0xD83324C2, +0x11A4D5CC, 0x00D627D3, 0xD51F2731, 0x17C92BE2, 0x1AD62E1E, 0x16DB2418, 0x16B83823, 0x2711D54B, +0xDFB02BDF, 0x28CDA600, 0x2AE9ED3E, 0xD314CD20, 0xD04F272D, 0xD0F9CD37, 0xCC1735E5, 0xD328EC37, +0xA7D31A26, 0x214BD9A5, 0xCFCB4226, 0x233C4AD5, 0x26EF37D8, 0xD83E3230, 0x3EC8C93C, 0x07B9282B, +0xD1E6DAF2, 0x31C52EDD, 0x3A0038A5, 0x56C1E0DF, 0x22E437C0, 0xDBC8143B, 0xCF1DB03F, 0x1D4BE636, +0xB8D3C23A, 0xBCC3372E, 0xAF284228, 0xDCE02F1E, 0xB9D4E3D4, 0x56D3D0D8, 0x494816C1, 0xFB1240C3, +0x23B1BDD2, 0x1236322E, 0xC2CBCB47, 0x102FE7C8, 0xBB0AE1D9, 0x3831BC29, 0x39AECBD9, 0xC7CF1ED2, +0x0AE43913, 0xDDD62F2C, 0x4FD522E6, 0x3CB90A55, 0x2101C8EC, 0x432604BD, 0x29E3E31E, 0xC25B3C29, +0x20E927D4, 0xD4300C41, 0xD9AECBD5, 0x48D8E143, 0x2CEE272F, 0xD2C11FBA, 0x2F2DB8D7, 0x1CF4DFE6, +0xCCDCCEBF, 0xE052D0EA, 0xDADDEB1E, 0xC72ACB3A, 0x332E2B1B, 0x2D26E827, 0x2A29E6A6, 0xE614D6D5, +0x221D1CC5, 0x51102031, 0x1735C248, 0x31D4DCD5, 0x3C20DC00, 0x2D231732, 0xE54AD9CD, 0x1D24D826, +0xE936373A, 0x20C0D3C9, 0x2B231520, 0xD6CED133, 0xDFC8C7B5, 0xDC2FE8B8, 0x3332249B, 0xC6E6E11F, +0xB7DA17AF, 0x1FD91321, 0xEE29CD41, 0xE1262FEF, 0xB6181DC6, 0x24F618C6, 0x3BE5DA2D, 0xFC2C35C8, +0x51C232CC, 0x36183EDF, 0x2B2CD6EF, 0x2F2DBDB9, 0x00D03413, 0xCDC9CBC4, 0xDB3309CC, 0x264EB22C, +0xCED1EAC9, 0x38D53F27, 0x2EE7D6F3, 0xC7232C23, 0xCCD42515, 0xC9C8C8E3, 0x231E3930, 0x5BBEDFCA, +0x34D7E5D7, 0x25CCE9A7, 0xCD4BF1E8, 0x5E2542E6, 0x38DCE43E, 0xDA34F0CA, 0x2EBFF841, 0xDA422D3E, +0x13CA2231, 0xD5C6DA27, 0xBAC0C6B6, 0xC82C49FF, 0x31312123, 0xBF00DFBF, 0xE8F23819, 0x2C34E81E, +0xDC4C2B0C, 0x3A5650F2, 0x2CCB0FD0, 0xC42D4C28, 0x4F19DA4B, 0xF63DE630, 0x29F51C1F, 0xD6BE14C4, +0xD2DDCDD6, 0xE0D02825, 0x2FE0E3D1, 0x41342FE8, 0x25D547ED, 0xC5A732B5, 0x19BD503F, 0x4CD9DA3D, +0x24D013BF, 0x4FE93FCA, 0x12DBE8B9, 0x33DF151F, 0x1E24AECD, 0x2E2E3656, 0x133500AD, 0x3641403C, +0xBDDF2BD4, 0xE33130E0, 0x3DC02726, 0x18390C3C, 0x26C04DC9, 0xDBF22D35, 0xC3E1EC45, 0xCED12623, +0x1CE92E39, 0xCF2FD222, 0xD8DE31E6, 0x1EE916D1, 0x35DCB621, 0x19C2EB54, 0x41B34ED4, 0xD9373BDF, +0x203ED6D8, 0x2CD4A8C8, 0x14E0C6C6, 0xDF242131, 0xCDD921CF, 0x33AACBCC, 0x2AD1A5C6, 0x3ED42B00, +0xBF181FD0, 0xC0CC2329, 0x1BD1EDC9, 0x3A2D47CB, 0xDD27D8CB, 0xD8E8BAE6, 0xD5DE3FF5, 0x2C30D641, +0xDCDCDF26, 0xC2C34433, 0x2837F53D, 0x29D8E12D, 0xBF3FEBD0, 0x34CACC45, 0x30E1DA21, 0x39DDDCB3, +0x30193518, 0xC0132E19, 0x313D3736, 0xB2D4FF34, 0xF4D92945, 0xC70D3AEA, 0x3E3034F0, 0x00DA2F07, +0x1DD415EB, 0xCC1AC4B2, 0x232EB21C, 0xD7431E16, 0x2FCE0835, 0xCF39242C, 0x1BCA2CD4, 0xD8B6D0FA, +0x18C3262F, 0x2CE3BDCB, 0xD839DC38, 0x26161D24, 0x1223B3DC, 0xD6D63515, 0xCFBABFC6, 0xC1BF18C7, +0x2637C715, 0x264020EA, 0xD024D3C4, 0x4126A11D, 0x2E34442D, 0x32BB1CC9, 0x36E7DD40, 0x3113C3C5, +0xD100C8CE, 0xD21651E8, 0xC3CFCDD9, 0x403139F1, 0xD3D02119, 0x32171AC8, 0xD2F9B62E, 0xB5B335D4, +0xC1462835, 0x1F1C2C35, 0xD8DDBF35, 0x422D2DDA, 0x38ED19D7, 0x3326BD2F, 0x4534DFCE, 0x46403132, +0x19CDD51E, 0xCBC3CBC0, 0xE3CF2139, 0xC646ECD8, 0x29E7C7DD, 0xE94CCAF8, 0xB8C6EA37, 0xEC2B09DA, +0xC8C82738, 0xC6CCDDFB, 0x3FCA3B4D, 0xC6482BD1, 0x2DE1D333, 0x3530BB3A, 0xBF4838EA, 0xE4411ACB, +0x3514D939, 0xCD372B27, 0x4BEC1C39, 0xAB21C221, 0xD8D83F37, 0xCCE0512D, 0xDA4640C2, 0x36AE1CD6, +0xBC2F303B, 0x20C3B8CD, 0xBFE6D4E0, 0xBECCC4EB, 0xC7CC1E4E, 0xCC2BD03C, 0x3738C518, 0x23B824CC, +0x2BECD0BC, 0x2107B945, 0x2AC21B00, 0xBE49F2EB, 0x31D2E3DC, 0xDAD23F11, 0xCDED3137, 0x3ABADA3A, +0xD9C0C12C, 0x36CEE8CF, 0xD0D5BDD3, 0x2AC9CFCC, 0xE61FD747, 0xC7B7B12A, 0x2BD520DF, 0xD5BC1D31, +0xE6D8423B, 0xCC21EB43, 0x1F46292D, 0x171EE038, 0xDFA5E244, 0x1BD5C4AD, 0xC622C2B5, 0x4E33CFD3, +0x3F263F30, 0x34E4CF15, 0x0041CC36, 0x4021DB29, 0xD62E2231, 0x2C22AE21, 0x1A201F44, 0xC6D7C100, +0x3A335125, 0x42473A2A, 0xDF32DA1F, 0xE5D8B51F, 0x3049E832, 0xE6DED531, 0x315DD01E, 0xDDD92034, +0x2EBF0AEB, 0x4B4639DD, 0xBED6C8DA, 0xDABA3714, 0xDD133BD7, 0xC934EACF, 0x0D35C3E2, 0xB4CCC213, +0x37EE202B, 0x0AD21AD9, 0x28D2C9CD, 0xD800DB2D, 0xD34B274A, 0xBDD33644, 0x38EFF1CC, 0xBDABD734, +0xB11E3E32, 0x2F1C23DC, 0xE80BD7DE, 0xDBE7D6F7, 0x240248BF, 0x23CEF1F6, 0x1F56E634, 0x33CD2230, +0xB4DB182F, 0xD338BA36, 0x33392B24, 0xBB3B3649, 0x282F30E7, 0x45CC35B1, 0x46261B30, 0x1FD9A722, +0x3BE3E1C3, 0x25334FF0, 0xB8BB28C0, 0xD751E2CC, 0x403600E1, 0x461FC3ED, 0xCB30C202, 0xD8D2B92A, +0x2923D71B, 0xB9DDE6CC, 0xB935CB0A, 0xEF37D723, 0xD1DA06CD, 0x303CE928, 0xF8B9BA27, 0x37370A1F, +0xD6C1DDD9, 0x18B6463F, 0x28C71FC3, 0x38D8B53A, 0xD22DE9A1, 0x18DDB236, 0x0A28D515, 0x363B263D, +0x1F3A122D, 0x202AE323, 0xC72337E8, 0x32ED08CC, 0x2658C347, 0xC6F4E200, 0x45D1C2C3, 0xF92D343A, +0x3EBE1E65, 0x10EB2FD3, 0xC64FD5E5, 0xC5D3F521, 0xE9CA1A37, 0xD736CDD6, 0xDCC7C233, 0xC9C8E2D4, +0x2B99D60A, 0x1EE3DA0D, 0x30BDD8E1, 0x16D3BE1F, 0x1C4524D4, 0xC8D5D432, 0x19AEDA3D, 0xCDD4CAD5, +0xD835332E, 0x2412DA1B, 0xD0CBEBD7, 0xC3CEF425, 0xD1D743E3, 0x000A163B, 0x23DC3129, 0x202332D1, +0x322F2216, 0x28F5131A, 0xD316CF0D, 0xD0344C30, 0x423325D7, 0x37204237, 0xEEDDC721, 0x2038E805, +0xD525C522, 0xBEDDE327, 0x3AC8BCC5, 0x45402E27, 0x34DFC1D9, 0x2DB0D049, 0x322F2ACF, 0xDB3AC3C5, +0xD8D0EB4F, 0x2A1DCCCC, 0xD0D33DC2, 0x1349DC45, 0x39CBBC32, 0x30301EBF, 0x2C00DFC0, 0x5DD0CA11, +0xD0220C3A, 0xC527CD1A, 0x31CD372C, 0x2E0ED7DC, 0xC6F92338, 0xF537E8D0, 0x3626DBBF, 0xC9C3C6C3, +0x30DDBE17, 0x50394C39, 0x3A11343B, 0x27E8DC2E, 0x0DDB303F, 0x1818BCF0, 0xC5DC17DB, 0x24C53A26, +0xECB0E7E2, 0xC21B1928, 0x15D7C920, 0x4126AACE, 0xE214D8C7, 0xD40BCD42, 0xB2CBD018, 0x554436DB, +0x3C2D25D9, 0x21CEC0DC, 0xE9251919, 0xE41ACDC3, 0xDECF67C3, 0xEDDE3225, 0xCA30EEDD, 0xCFE01E37, +0xD231BF41, 0x34DF1EBD, 0x1DD2CC2C, 0x21142E29, 0xEA322B37, 0xCEC311DE, 0xF0F738DD, 0xEBED1A40, +0xDCE640E8, 0xD0D1BA2B, 0xE906D6DB, 0xC71938B1, 0x13C59F39, 0x1ED6DBB6, 0x231BD3A9, 0x2C2B30DC, +0x0ED50000, 0x2DD4EED6, 0xD9D21BEE, 0xE1442E36, 0x38CEC4AE, 0xC626CCF4, 0xC2CD3F36, 0xD71F2BD9, +0x2F421837, 0x31B11A23, 0xD4D5282D, 0xD0E6BED7, 0x30D737CB, 0xC8142A09, 0xD8D2CFDC, 0xC22D3C44, +0xF62DF1C9, 0x3D38DBC9, 0xBFC2E5D3, 0xDCE42128, 0x3A304038, 0xDBE6D626, 0x28250FE0, 0x27B52BCF, +0xE0D8DC3C, 0xC8DA09DA, 0x2DF02FD4, 0xE539D7D1, 0xCDE729C2, 0x4816E313, 0x4E392AC5, 0x112CF50F, +0x3AD94924, 0x4B22273A, 0x43D2DE9E, 0xDA22DE30, 0x3FE324DB, 0xD9CC2811, 0x29D253D4, 0xE2BC353C, +0x0CC92147, 0xB8F0C6F5, 0x3D35DEBF, 0xED4ACBEB, 0xAC34E532, 0xEA2AC526, 0xEAC0BFC8, 0x4431D9DD, +0xC0CE4E21, 0x0000F2DD, 0xE0D8C7BA, 0xDDDC2E36, 0xDA42C22B, 0x2B312614, 0xDC391EF4, 0xCC2E31D8, +0x3DF4D630, 0xC72C21D9, 0xDB2BD531, 0xD916DB3C, 0xBEC11DD2, 0xD1D5CEA4, 0xBA072ED7, 0x402F2829, +0xE3C0F627, 0xC7CC3CBD, 0xF41D3345, 0xD706DB14, 0xB2DF36AC, 0xEBC4551F, 0x32B01929, 0xC5B6DDEC, +0xD91DE130, 0xE1213222, 0x4126D927, 0xAF52300D, 0x291B301C, 0x241A1843, 0xE43C1FDC, 0xBBCC191C, +0x2C25E011, 0xBD3EE9DB, 0xC9BF1345, 0x2F333AD2, 0x1CD822DB, 0x23BE43EA, 0x2527EADC, 0xB52E1C1E, +0x391CCF17, 0xD2C82C0C, 0x242E3830, 0xD328D808, 0x35CEF4CD, 0xD81BE035, 0xD0DF38DA, 0xD1D3C42E, +0x3A2C1EEA, 0xC6D03AD2, 0x1E362E4E, 0xE6080000, 0xD018C312, 0xEFAA323B, 0xD1294BD2, 0xBAD52C3B, +0xE0373E10, 0x260CDDE2, 0xC902E040, 0x30472BD5, 0x534BE7D2, 0x20CCB523, 0x21CCD843, 0x2259E24E, +0xDE35FDD1, 0xD4D91D3E, 0xCFCA11D5, 0x35D724BD, 0x3C3F0CB8, 0x3EC447CB, 0xE835DDC4, 0xC3C73E31, +0xDD432B2F, 0x2FDCBDC2, 0x2D3CCE17, 0x25B8AB31, 0x2ACCFFE5, 0x40E03C2B, 0xE7DA1841, 0x17E53020, +0xD44E0B2F, 0x46DA22EC, 0x33422425, 0x211840C5, 0x23C1CFD5, 0xC8350BE5, 0xD833CDD0, 0xE63DC528, +0x292ADC17, 0xD8BCD3C1, 0xCD39F02E, 0xA71EC343, 0xD6CC3A3E, 0x2E12D2CF, 0xC40B1FB1, 0x37DCDC33, +0x20E52EEA, 0x271629E5, 0xC4BEE0C2, 0x1B3CDBD6, 0x00004934, 0x18E145C7, 0x4BE93522, 0x3029C309, +0x31D1D50F, 0x3DB42C1A, 0x21A643B2, 0x3E26D8C2, 0xCDDE28D1, 0x44E835E1, 0xEDC4DED1, 0xC4F9C9ED, +0xC5E1BBCC, 0x4426C1D4, 0xD9BB372B, 0x37232E0D, 0xE5D0D0AE, 0xC9D23CC5, 0xF638E2C7, 0x2DB6E6D9, +0xC0C9B72D, 0xDF28E6D0, 0x11E2DCC8, 0xD8E2F31C, 0xEA3937DE, 0x2741B953, 0x19D5CEE6, 0x33DDC22D, +0x2BF72326, 0x2143363C, 0x13CA1A1F, 0xB9D7DAC9, 0x22C6B72E, 0xC5DA2631, 0xCB32ECCF, 0x032D15D4, +0xBB1EE613, 0x38C42452, 0xDBCD2726, 0x1AF44039, 0x22CF2EC6, 0xCAEDE6B4, 0x2823D423, 0xDCC61C39, +0xC12D19BF, 0xC612C31B, 0x31F1D32D, 0x3E1C09E4, 0xD824C5E7, 0xDFFC2839, 0x2AD40000, 0xDFCCBBBC, +0x3FC9CC12, 0xEF2626E1, 0x0FCDEA2A, 0xEBD735CF, 0x4E1A2834, 0xCDEDE1D5, 0x31CCCD29, 0x2F4618DF, +0x48C11936, 0x29DADE41, 0xD71C1AE0, 0xDDD2285A, 0xB13DCDC8, 0xF020B8DB, 0xD0B837C2, 0xDC3B48E8, +0x18C3C530, 0x192C2940, 0x2ADA3AE4, 0xEA34C55A, 0xDAB2BED4, 0x1A51E838, 0xC832AEBE, 0x10DE2DE2, +0x3B2408CC, 0x22CEBD1E, 0xC919D6BE, 0x21D2D128, 0xCFE03A2A, 0xB7DEDC3B, 0x33C630ED, 0x5A0EEAD1, +0x3CB03FCE, 0x24EC15E4, 0xD0385BBF, 0x20CB4E4B, 0x30E0C535, 0x301BCD3C, 0x3642DDD3, 0xD90FDC50, +0xCA35BBCC, 0x393B3B2F, 0xE2BE352D, 0x362BE3BF, 0x1922B73D, 0xDE3448BD, 0xF7C0C02D, 0x00002F43, +0x2FCAD0E4, 0xDE412A34, 0xFBF339DA, 0x2BB94330, 0x39B521D5, 0x2DE31F43, 0x382C4F23, 0x35314C2D, +0x2DE3DBDC, 0x3A18EAB9, 0xD0DF131C, 0x45EA41E0, 0x48BBC1BF, 0x2220D7E4, 0x29C8D0C5, 0x3C0527CA, +0xC1ED061C, 0xCB48CA3E, 0xE846E633, 0x0EB2D7CF, 0x44C42335, 0x27F0D440, 0x332FD847, 0xD72F2CE3, +0xD7ECD6C9, 0x4026D51F, 0xD334BEE6, 0xE01B40C5, 0xD62BDA2F, 0xDEEA36AF, 0xABD039C7, 0x2E123FCE, +0xFD3924A8, 0xD0C6D529, 0xD52AEED4, 0x211D2A35, 0x1EE9C5F1, 0xCECAF921, 0x38C92740, 0x3E4E2A1A, +0xB8FAC7C3, 0xF11F1C36, 0xD4DA163A, 0x2701DC38, 0x0E3019D5, 0x57DB23E1, 0x43CD26D0, 0xADCC4934, +0xE0D3D706, 0x403A0000, 0xDECED5E1, 0xDAE6CF1D, 0xD5E33C40, 0x35B3D038, 0x24D73231, 0x2EC01D40, +0xC2D23CDC, 0x44C20BC6, 0x44C1D7A8, 0xE9D1C745, 0x36E5C2C6, 0xB81B2D42, 0x413917CC, 0xD8E031BA, +0xCE3CC1C4, 0x3EC91AC5, 0x0DE406CE, 0x35D1DA27, 0xC8E5CAE3, 0x313EC127, 0xB61631CA, 0xD73B3444, +0xD834C850, 0x37D31DE2, 0xCCC9153D, 0xBF41E9CA, 0xD649F9DC, 0x3A31D501, 0x50CD1DE3, 0xC8E54012, +0x4432D4D5, 0xCBB93023, 0xE8FCD6C2, 0xCEE0CAD1, 0xE41B2E39, 0x28CC0E40, 0xDFEAEFFD, 0xB3EA2723, +0x3534B935, 0x42D12A09, 0xED3DCA0A, 0x263C352B, 0xAD224EDF, 0x2CDF2A10, 0xF0BE4CAD, 0xBD45D8D5, +0x263F27D4, 0xE2372FB4, 0xC35240DF, 0x1EF3DCDA, 0x3AD0D224, 0x27DAD527, 0x303A2CBE, 0xCDDCC829, +0xE22B35C9, 0xD5DC222F, 0xEEBDCB63, 0x2042DAD8, 0x06CC3524, 0xD11BBB3E, 0xAE2229E6, 0x1FC81E1F, +0xEF4B12DA, 0xE1D846DD, 0xBA3B1CDA, 0xE154C801, 0xD6092CD2, 0xD0F132CB, 0x3A40D027, 0x123530BB, +0x1EC5B4DC, 0xD11825CB, 0xC603DECC, 0x28BCC5ED, 0x2C03E1CD, 0xE9AA12A5, 0xDB2DD53A, 0xCA32D01E, +0x2F4A09C5, 0xFE411E26, 0x1354CEC8, 0x3736C5F4, 0xC62F3C3B, 0x252DDFB9, 0xD0353723, 0xD8DDC21E, +0xE3DF21ED, 0xDAE33AC0, 0x2ADA323E, 0xDC43E1D9, 0x3A23463E, 0xD71934F8, 0xDB1037C8, 0x38B7BCCC, +0xDA2C1CC1, 0x3EEEE349, 0x2ACB2A26, 0xD4BCBCD8, 0x4AB50000, 0x9EC941D8, 0x3824C9D0, 0x203CCD36, +0x0DCDEDD8, 0x5B333036, 0x3125C7DD, 0xD7383F30, 0xD6C72617, 0xE1C245DD, 0xBDD3E818, 0x3210422F, +0xD00ECF2E, 0x44411A3A, 0x02E72E22, 0xF106D24C, 0xCFC847CF, 0x0421AFCF, 0xD2C525A9, 0x3CBCE63A, +0xE02A1E36, 0xE036E3E4, 0xDDC22B25, 0x2EC7452E, 0xE1E3EFE3, 0xD82ED6CE, 0x36C1E115, 0xD129E026, +0x251E1C44, 0xE8E5D7CA, 0xDDE7D40F, 0xC5D8D9D8, 0xA1C52550, 0x3ACDDA25, 0x3548C4E1, 0x2913D9C8, +0x1E28243A, 0xCA3324E2, 0xD0D7D1C0, 0xBDE9D0C4, 0x35BF4FE0, 0xCA42E33C, 0xBBFA33DA, 0xD753C9CD, +0xDA2CE11D, 0xD9BEE407, 0xE31DD4C1, 0xCBCCD51B, 0x3BB8D6C9, 0x0000DDDD, 0x28D3CDDC, 0xD62445B5, +0xB3211F13, 0xD7B9BF29, 0xD429D5E5, 0xB9C23CCE, 0xC219EDC1, 0x211E0B34, 0xB8F0E1C1, 0x33C0EF4C, +0xD8D6C7D0, 0x30E8D3D8, 0xB06514D1, 0x21B9D915, 0xE2323DCD, 0xDA41311F, 0xE24AB11F, 0xE9B92BE7, +0x33EA23C3, 0x04C430C0, 0xE849EF0D, 0xD1232C2C, 0x260FD13E, 0x002DCBDD, 0xEB3129E2, 0x20B4C7C6, +0x25D32335, 0x2CCB39D0, 0xEA13DAC6, 0xCDEE2E14, 0xCDAA2CD0, 0xE1D339E6, 0x36DCD03B, 0xF5453114, +0x34D428ED, 0xEAEDF458, 0xD226CC2D, 0xEDD03AD8, 0xEE1CC83D, 0x10DBB5E5, 0xF018CBDB, 0x5E33E31E, +0xC6D9232B, 0x1A373130, 0xDFE7DDCB, 0xB21FC5C6, 0xC1CDB1B7, 0x3F2DF128, 0x482BB426, 0x323E0000, +0xF5D43741, 0xCC1C1FB9, 0x00004E2E, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x13000000, 0xC9112F26, 0xE3DF1FED, 0x284020CF, 0xC6ED13CA, 0x38B1E217, 0xD90FE8DB, +0xD7C0C3B7, 0x341336CE, 0x45E2D239, 0x3E22F0D8, 0x461D2C29, 0x3825D628, 0xD733E5F1, 0xD7B6C532, +0xC62B28C7, 0xD0BF3024, 0xF0BA3A26, 0x2D1D2836, 0x1BCF2DDE, 0xD02B251A, 0xB938CD2C, 0x34E220D9, +0xC2BEC23B, 0xCC2908CD, 0x1D080039, 0x42BB25C5, 0x32EB3206, 0x36D3D1E0, 0xC32E10C3, 0x243B13E6, +0xC02A1FF0, 0xC325D6DD, 0x23E33D2C, 0xDCD7DED5, 0x9C23C21D, 0x32E62056, 0x06314421, 0xE9E4ED24, +0xC0C9C233, 0x44BE3FDD, 0xD25134E5, 0x292EE2E7, 0x4EE0D1DF, 0x29C82E3A, 0xF5392A22, 0x4BE2D133, +0x58C2C4C8, 0xD2212920, 0x2935D4DB, 0xDECF4400, 0x31D7CCC9, 0x12DCFB2D, 0x2B2F0723, 0x2DE7B54E, +0x2AD24ADA, 0x261E2931, 0xD538CEDF, 0xE12A1C2C, 0x33DDEDE9, 0x2FFC36C4, 0xE61823CE, 0xC225E1DF, +0xC2102FEF, 0xCFD22617, 0x3EFBDA37, 0xDAD6BCD8, 0x110623E7, 0x3320E2D1, 0x1932B8D5, 0xCCEBBFC9, +0x47234011, 0x0A2B45D8, 0x25C11244, 0x00EF3221, 0xC634CC38, 0x0F34D0E6, 0x1A3F40EA, 0x2DEDC82D, +0xFFD2C8BD, 0x40E321CD, 0x24441BD3, 0xD932EDDD, 0x48362127, 0x1230EADE, 0x39E632F9, 0xD0D451DC, +0x18DD2DC0, 0xC22AC72E, 0xD71E172E, 0xAFDED12B, 0x1DBAA948, 0xF927B647, 0xD7C31C23, 0x3328E73B, +0xDBD72DD9, 0x3E23D424, 0xCB09E025, 0x16BA21CF, 0xDD00B9CB, 0x29E3CAD7, 0x3A19D011, 0xEA0EC4E0, +0xD51CDDBF, 0x2AD1C640, 0x2CD6CBD3, 0x36D6D43C, 0xA72851C2, 0xE7EE3C3F, 0x432A4721, 0x46C726E5, +0x253FD426, 0xD03B2C27, 0x1AEC343B, 0x5A2BC735, 0xCE31D2D2, 0xCFEDF30D, 0x492B4141, 0x20DC1BAD, +0xDAF81CD9, 0xD6EAD21D, 0x2339CD25, 0xA31546BB, 0xD0C0F63F, 0x382BE719, 0xDA34BFDB, 0x1EDBBADF, +0x5151E646, 0xC164D5E2, 0xD8292FCA, 0x33CF1F3B, 0x3FF531F4, 0xD3EB3453, 0xD433E236, 0x1B102B1C, +0xC6C320E8, 0xD8393C2E, 0x17EAB7EB, 0xE0C42CE5, 0x313720CC, 0x3F39C42D, 0x33374AD5, 0xCE1ED41E, +0xF7D2BE0D, 0xF114311E, 0xE22CD6EB, 0x4E23CA2A, 0xCDCF36D3, 0x463B1EC4, 0x312DE400, 0x1A1219CE, +0x0CD1E2C3, 0x3850CBEA, 0x31D7CCCD, 0x3F2D3936, 0xC518E33E, 0xC31F31B7, 0xC0AED6C9, 0x2AC6E5D9, +0xC334B8BF, 0x35EFD72E, 0xD72B3AD8, 0x31073AD7, 0x25E22D38, 0xF0CD4A64, 0x28DFDB2D, 0x22D9E714, +0xB8E43935, 0x35FCBFEC, 0x1DC1391E, 0xD6E731A5, 0xBBC8B837, 0xBD3FBE3A, 0x00BADC53, 0x28C43F33, +0x1CE0BAC2, 0xE0E125D9, 0x3BD6CB3D, 0x313230BE, 0xC4243D34, 0x1F24D2B1, 0x2058384C, 0x2AE9192D, +0x302CCCEE, 0xD435C4C7, 0xB0D53217, 0xE52D32D9, 0xE0AF2127, 0x3E47D0CA, 0x4112BFCA, 0x27D0D0BD, +0xE81E5CF9, 0xD50CBCE5, 0x2DC9B4F7, 0x3FBA33BC, 0xCBCC2821, 0xDFD51326, 0xD20ADA3A, 0x2000C3BB, +0xC4DC4BE5, 0xCD39D7A7, 0xC40FF54F, 0xCC3BD1CC, 0x2C19EACC, 0xCD2E2129, 0x44D9F0DB, 0x2B40E11C, +0x31343F4C, 0x0E142817, 0xD0104135, 0xE304CAC7, 0x259A1FD4, 0xC3EAED3C, 0x1A311AD7, 0xD8242BEE, +0x20BF4EFD, 0x3FCA16C5, 0xDDEED03A, 0x0A22C52E, 0xD53B2D3A, 0xE22A2CB5, 0x291D3228, 0x2CCC2E45, +0x48BA00D5, 0x4FE12D1D, 0xD8D91DD1, 0xCEDBEC31, 0xFBD526B9, 0x3C1DE845, 0xAFDCE91C, 0xD622D31E, +0xCF2620D9, 0x28D63427, 0x362C3DDF, 0xD72C2530, 0xD6A7C203, 0x442C3233, 0x29B24E3F, 0xDD21412C, +0x400BD6D0, 0xC8D4B831, 0x171EF12A, 0x34FFC9D0, 0x3B3ED02D, 0x0BC7E2CB, 0xC43D28C6, 0x42F6C948, +0xD24AD429, 0xCB2C2A00, 0x43E2A236, 0xBC353312, 0xD91324B5, 0x30B4D013, 0xC92FDE2F, 0xBFD6D432, +0xC82DCABC, 0xC032DC1D, 0x30C9D2CB, 0xE0341DC5, 0xDDCEBE39, 0x2CC41D58, 0xE213BF14, 0xDFCDCBD9, +0x2E3C36DD, 0xE5262833, 0xF841291D, 0x15E324F5, 0x0315D732, 0xD1C5C5CA, 0xD73144D3, 0x2ABEBA1A, +0xBCF42E2C, 0x00E1E53B, 0x4423AD2C, 0xB021CF20, 0xD8D5EEB6, 0xBD35BCCD, 0xE6D85334, 0xD549DF2D, +0x35C74D19, 0x3F4BBE3D, 0x1F3FA3EB, 0xCEB2B335, 0x35D0BCBA, 0xD5D8CF27, 0xB11DCFC4, 0x24D3D408, +0x183B35D0, 0x1C14D619, 0x35D445BB, 0xDE452444, 0xC83315C5, 0x3F2122D0, 0xB623C737, 0x33321DD0, +0xD6DF33DE, 0xDE39AA20, 0xCF00F239, 0x412410D5, 0xC5DC2339, 0x1D3BD1B6, 0xC0E3481E, 0x1030451A, +0x30EA30D1, 0xC6DCB9C7, 0x2C36DC17, 0xCBB0ED30, 0x26C20FBE, 0x35D6D528, 0xD6CC2242, 0xEDE4241E, +0xC036DED9, 0x26303627, 0xB6B6371B, 0x26E3D6DF, 0x0D36C729, 0xBEEE4FD7, 0xE3C839D1, 0xC0313031, +0xAEAD2828, 0x36323D23, 0x13342728, 0x2904EE20, 0x261DDCD6, 0x0CBCE1E1, 0xD2CBD5C3, 0xFD1C4B2C, +0x1CD8CDDB, 0xD4BE53B1, 0x2BD6CDDE, 0xC61A01E1, 0x3D41BED3, 0xE0333BD0, 0x4040E821, 0xC4A32449, +0xD3D3A8BF, 0x36E11C2C, 0x3CC1321B, 0xC5D03E1C, 0xB813D9DD, 0xD6C51838, 0xCEBED141, 0x1FEA3813, +0x3611CAC0, 0xD12EE2C6, 0xE1BFD22D, 0xD0DAE9CC, 0x0E03DB00, 0xBC37393F, 0x422BC916, 0x49C4D72A, +0xBFC0243B, 0xC1C2F1C3, 0xDF36CC32, 0x49E53EE4, 0xBFC02CDF, 0x2BEE2BE1, 0xC23224C5, 0xD63F16DA, +0x15DBC21F, 0x49E4CC57, 0xBE272CC7, 0x274E0643, 0x5C1DDBE7, 0x34B73DF0, 0xCD2D2EB9, 0x1AD43BCD, +0xFD3047D3, 0xDAC53936, 0x1328DCC8, 0xBC32331D, 0x001F2634, 0xC845410F, 0xDBDB1C2B, 0xD1D819D0, +0xEBDDE12D, 0x37E335D7, 0xCDA84635, 0x0F362FBE, 0x30C229C4, 0xBC1DC43E, 0x3CD02F3D, 0x2EF5F5F4, +0x462D15E6, 0x362F1FC5, 0xC0C9CD49, 0x14C92639, 0x2BD135D0, 0x49C6B8DA, 0xDBC7E1D9, 0xC6382328, +0xEDC217BC, 0xE4D1331C, 0xDCC741CF, 0x283A341C, 0x38B93420, 0x46001BE7, 0x3E1030C4, 0x42A8C1CD, +0xBB3339C5, 0xE538DD20, 0xDF3F2602, 0xE52BE5C1, 0xBE2B41BA, 0x3BD4D337, 0x2D39C3DB, 0xC3BB36F0, +0xD12CCF17, 0x4130B93A, 0xE31DEAEC, 0xF9DEF52D, 0xADD35231, 0xD62ED0EF, 0xBE303521, 0x1E2ADB41, +0x310AC62F, 0x22DA2DC5, 0x26E1D9D0, 0x1F4D2AC1, 0xC8E6DCE1, 0xC6B3D62E, 0xD6D20019, 0x4EDC2D39, +0x342AE224, 0x07CBD4CB, 0xE43FB3BE, 0x1BE53748, 0xDD44C8A2, 0xBC34B7BA, 0xA1DD1D43, 0x23D31827, +0x3519E93F, 0xC83B3720, 0x1BF0B72A, 0xEAC7DCC0, 0xCA4B28EC, 0xD54D242F, 0xF35435D1, 0x3DDEC824, +0xC31128CE, 0x0B0FD136, 0x442D1E2B, 0xCBD1C3EC, 0xD635341B, 0xDD1F2931, 0x3442CDD0, 0xDADD3600, +0xC31E40E3, 0xB0BD1B22, 0x38CA1F0D, 0xD42D3043, 0xE65127E4, 0x2F15C9D6, 0x3E360B30, 0xB2CB1524, +0xC7BBC4DF, 0x38D244C7, 0x483B31C0, 0xDB43C8D1, 0x43DCDBBC, 0xDCC7571D, 0x24BAC2C6, 0x3B4ED6DC, +0x3AE03245, 0x0C40E1E2, 0x493724C0, 0xD4ECC537, 0x36CC30BA, 0x1C303FC6, 0xD12F27D5, 0x00C4272F, +0xFFFD061D, 0xC2D9251D, 0xC1D7D625, 0xBFCD3AD3, 0xB42E11D4, 0xC7230E1E, 0x24CD2C21, 0x0BD436F9, +0xCECCD331, 0xE6BDE0D5, 0xD2C6C941, 0xE1D1D3EF, 0xEB4123CD, 0xDA1755E5, 0xCEE4C529, 0xDC43C5D1, +0xD8FDDFEA, 0x1AC9C317, 0x22E13DCE, 0x01D7BA29, 0x1551E449, 0x43CD363E, 0xC6281B24, 0xCBD23725, +0x3200D61C, 0x35D6CCBC, 0x262CB22C, 0xE2E43DB1, 0x26493335, 0x3AD72ACA, 0xC4244430, 0x3AD7C9C3, +0xC91ED8D1, 0x40EAD2AC, 0x38C9CDAC, 0x413BC137, 0xCD304B1F, 0xEFCA2323, 0xAC3FE4CD, 0x3616F6E0, +0x41C2272A, 0xDCD7B2E2, 0x27D9E018, 0xDEE2D943, 0xD5C01F22, 0xB0D93540, 0xB411C13D, 0xDE39CBE0, +0xE2C6F126, 0x31450828, 0xD2BC2927, 0xBACA1DD1, 0x34DDCDC3, 0xD8302AD5, 0xE2D02824, 0x0B2BE03E, +0x3BCAFD2C, 0x2E461D45, 0xBBF6F32E, 0x31E5DED2, 0x2ED4E847, 0x3BDDCD29, 0x231331D8, 0xF521CF2E, +0xD024C718, 0x20D91011, 0xDB1E0F34, 0xD931C0BB, 0xBB310837, 0x29B7C5CB, 0x3B31DEE0, 0x0836B926, +0x28CB1FD4, 0x31072A30, 0xEE41F300, 0x35CB501E, 0x603A2B47, 0x2C2739CE, 0xC0CFC4D9, 0xE0CED43C, +0x22BDC9CA, 0xBAEC122D, 0x4D16F2C5, 0xB6E5C3DA, 0x19D5C9D0, 0x1D1DDFC2, 0x223A23FB, 0xCD382B09, +0x3025C2E6, 0x3918BDD5, 0x33DFEF0E, 0x291A302B, 0x14184930, 0xD414D233, 0x473CCED7, 0xD2D318B7, +0x3E2738D2, 0x2928D537, 0x002E14BB, 0xE2E03734, 0x1B1EDFD1, 0xCF483FD8, 0x3D043844, 0xD52E1C4C, +0x2823EAD6, 0x1F2D241A, 0xBC4B22E6, 0x2EE7DFB8, 0x3424E2F0, 0xD41BD7CB, 0x2BBCDF1E, 0x155B5033, +0xB527D543, 0xE3C5E9F4, 0xD9B5C92B, 0xD3CAE0DF, 0x31C4CED7, 0x0BD33B25, 0xCF3B111D, 0xE62F241F, +0x22D93D22, 0xCF27EF43, 0xDAD2C5DD, 0xC60019BE, 0x2935D5C3, 0x2D0BCD22, 0x224714EA, 0xE7CAB817, +0xD1282222, 0x3C25EBD0, 0x45F0A8DC, 0x3ACFB92D, 0x34B4E7E6, 0xE93523E0, 0x2C292FCA, 0x44D4C7AC, +0xEA22C3FE, 0xEF37E524, 0x313ADCDA, 0x2212D846, 0x1BE0FF4B, 0x1CBC2E34, 0x174AF041, 0xD245223E, +0xE1DB2BDC, 0xF2E9A515, 0xE3C33BCC, 0x43DBD5CE, 0xF11700DB, 0xDE44DD36, 0x26E1D604, 0xC2ACE419, +0xB9D8C7D4, 0x37EFE21E, 0xE2BAC2D1, 0x3926E91A, 0xD013E8BC, 0x2C43ECD9, 0x232B30F8, 0xD6C9E0BD, +0x26A718BC, 0xEFF6C840, 0xC13332C2, 0xC4B9BD36, 0x1B4921BD, 0xD0EBECE0, 0xBCE4D647, 0x35D4DCD5, +0x1BBE4C27, 0xECC1B7CB, 0xD820284D, 0x0BC33DBF, 0x34D03140, 0x38EC3900, 0x29EBDBC9, 0xD43A3041, +0x3AE0DD0C, 0x09CFCFDB, 0x44E0C7CE, 0x20DBC4E3, 0x26DC41DE, 0xBA3AD2E5, 0x2ABA34E1, 0xD70FBEB7, +0x40E03FE2, 0xF8D62041, 0xBD47B7EA, 0xD2E03E34, 0x1FC6D2DE, 0xCF1CD155, 0x32D52630, 0x3C463D2C, +0x253130E3, 0x1ED8BB4A, 0x411BDFDD, 0x3FD8EE35, 0x33D02BBB, 0x002ECAAE, 0xDF1FDFEB, 0x2CD8DA3C, +0xB6352D59, 0xD5BDD3B9, 0x1946630D, 0xD9181731, 0xD419D31F, 0xCE2FB7D0, 0x32EFE3C3, 0xCDDB2FD3, +0xD247BB3E, 0xBA41DECA, 0x23244AD2, 0xDEAAF2B2, 0xB8D525F2, 0xD327A4D0, 0x3718C0E2, 0x2D3E20D9, +0x24CFDF12, 0x26D0BE1C, 0xC8D9282E, 0xEB54CADF, 0x44DB1AED, 0xC927BF29, 0x2E002B39, 0xBB2625E4, +0xBB4421D2, 0x18CBAA3A, 0x213ECD37, 0x0F33163C, 0x3622E432, 0xE5C7C62B, 0xDE39E7C7, 0xF3151CCA, +0x2ACED6D2, 0x2ADADFC4, 0xD6241ACC, 0xB92F0622, 0x1BC639E4, 0x33D92BC4, 0xAF14E0DD, 0xE6EEC2C9, +0x16BDCAE3, 0x272E1AC3, 0x292E1EBE, 0xDCE439B1, 0x4225E1C6, 0xCD372420, 0x27D716B1, 0xD83EE339, +0xDBCAD4B3, 0x26385033, 0xC5373528, 0x2DD2C936, 0xF9D5BAE6, 0x2E2DC7C2, 0xCD4BD045, 0x1DDF2DE8, +0x0E3740CE, 0xE5D82C36, 0x29D1360A, 0xCD03C8C6, 0x37CCCCD8, 0xD83AD330, 0xBFD619CD, 0xDEDDC432, +0xD0DADAEC, 0xD3E14F26, 0xA8CD35D9, 0xA3F325BD, 0xBC1626D7, 0x3AB81BD5, 0xBBFF1832, 0xC9DB2126, +0x16250000, 0xE003BB39, 0x35D533CE, 0x1801C610, 0xD9D44644, 0x474BD3C3, 0x1C30C51E, 0x2E3342C6, +0xB7D6B3E9, 0x5AE44BD2, 0x46D62830, 0xDBDD4B0C, 0x38CF2F1D, 0x2D331ACB, 0x36CCDED6, 0x10D7DA10, +0xD42246E0, 0x0FC73FC0, 0x2C3E1E37, 0x2CF0D1B8, 0x17412EDD, 0x2630D0DD, 0xDBD535E4, 0xD927DA43, +0xB636C11E, 0x25CBE2D8, 0x4711F64B, 0xBC2C2029, 0xD0E3F538, 0xB92C27DF, 0x272DE2D2, 0x0CC5BCEB, +0xC0BC2EDF, 0x26D131C9, 0x1F092FEE, 0x2D342B34, 0x31B825D5, 0xC6BBC9C1, 0x38F1D0E4, 0xCBF7FDC7, +0xC3CAC9B6, 0xDBC436C6, 0x19DC1FD8, 0x28CCAED1, 0x1921BDB4, 0x24BCBF33, 0x301EE244, 0x36BDBDE1, +0x31C5B724, 0x00003ECD, 0x220B1EE7, 0xDEBAD3DC, 0x21CAB942, 0xC9193B33, 0xE3CD3A0A, 0x0DC22CC3, +0x3EC0C7CB, 0x43F2BFE4, 0x0ECFE9C6, 0x3ECA2FD5, 0xCB492DD9, 0x23E7BDC9, 0xD33DC151, 0xFCBE2933, +0x5ACEC51E, 0xF2172E1C, 0x3C24DE23, 0x3320D4D5, 0x2AB53C20, 0xC328323A, 0x45EA4133, 0x24391A32, +0xC7D8DA2B, 0xF4EAAF39, 0xDFCA36E3, 0xDEBED5C8, 0x36252C36, 0x04D7DBC7, 0x30CAC23D, 0xB60BF73D, +0xEACA2A30, 0x34C12ABE, 0xE2D747D8, 0x19D21FE3, 0x1DFD24EC, 0xB6B5D9C4, 0xBF2C1DD2, 0xD2B442E0, +0x37CCE1BD, 0x2AD1E1CC, 0x18DAC6BD, 0x213AD93E, 0x2031D8CB, 0xD82F2D12, 0xD112D2F2, 0x19CD43F1, +0x36BCB9CA, 0xD7C5DDFC, 0x34011D28, 0x3BDB0000, 0xCD00E1AA, 0x5228252C, 0x3512CA34, 0x0EE53A41, +0x242CD2C2, 0xF425DD2A, 0xF02FDDF8, 0xD82026D3, 0x4ABEE7B8, 0x2DC9DDDC, 0x3BEECD3C, 0x2ED1442A, +0xC713E92A, 0x310CC12B, 0xE128D520, 0x48E5C1CF, 0xC91CE3ED, 0xCCCBC72F, 0x0B23E3E9, 0xC1D2C8B6, +0x511ED534, 0x2AC7D4C3, 0xCFDCE335, 0xD4C9F643, 0xD433EBF1, 0x1BF8DA31, 0x223FC515, 0xCC2F13C2, +0xF825FFC8, 0x35E51633, 0x36293C4A, 0xC72ACA36, 0x4544E736, 0x1BD9315C, 0xCF16DFD4, 0xD0AFEFC3, +0xCECBBDE2, 0x05D6AA32, 0xD851F045, 0x240FD948, 0x3D35B2E6, 0x17D7C7EE, 0x0B22DACE, 0xF53AD637, +0xB2372DDB, 0xF6DF14CB, 0xBED95B24, 0xD4D5190B, 0x0000CB29, 0x3DC1ECF5, 0x30C6B34F, 0x1D2629B6, +0xD429D1CC, 0xD50716E3, 0xD0C3C2CC, 0xC82CC7D4, 0x5F27C718, 0xE6201BE4, 0xDACFD42F, 0x2E2EE9D4, +0x3AD036C0, 0x3828DB20, 0x27D63439, 0xD21026DE, 0xD60710BC, 0x13AEB9E9, 0x28C41944, 0x49D5E3D7, +0x27163429, 0x1BBAD528, 0xB12A14BC, 0xC72FD9E2, 0x1F43213A, 0x133B3EE5, 0x3622FE21, 0xD438DCBE, +0x41CB25E8, 0xD0D83E2C, 0x32BFC2CE, 0x362BE1DA, 0xC3CBCE24, 0x22DECDC2, 0x30CBD5CD, 0xD42DBC26, +0xDDF6AAD0, 0xCADC0EC8, 0xDC333434, 0x333114F5, 0xCE24361A, 0xB1C5D123, 0x4B25380B, 0x2FEC3634, +0x3EE2A610, 0xB4CEE72F, 0x29BCC721, 0xB23850CD, 0xA51AE4DD, 0xC81BBC2F, 0xD4DF0000, 0x2DE5D1E8, +0x2E0A28D0, 0x29E3C1E1, 0x56F62DD6, 0x3828DD4B, 0xC41ACFDF, 0xD2390A3A, 0xDF3028CC, 0xE8C5E22B, +0xD11CDCC1, 0x44D1CBEC, 0x33C73DDC, 0x1D25B7DC, 0xE7DD0525, 0x2743C41A, 0x2AE2D3DC, 0x21D6E82A, +0x133CBAC7, 0x593F34B0, 0xDA5FE339, 0x5206252C, 0x394FD5BD, 0x21D2C92C, 0x292426E0, 0x48E0362B, +0xCC272DE4, 0xC815C4DE, 0x49BD5D37, 0x1912342F, 0x2CE9E01B, 0x30D8D2D9, 0xDEBF46E8, 0x392F473A, +0x3A18D324, 0x2FE31FD0, 0x2B2B2710, 0xB53B2AC7, 0xB3481CFF, 0x2DD7CFBC, 0x2232351A, 0xD7C91C16, +0x1CB4DA16, 0x1BCBDF3C, 0xD2213822, 0x2121DB1F, 0xDBD1C02A, 0x27DA33C7, 0x351FC4CD, 0x0000B9D8, +0xC2142C29, 0x38B9DD31, 0x54E60CDF, 0x2A4A42DB, 0x35CF2BB5, 0x0CEA3323, 0xC2D8BEE2, 0xC61F3B33, +0x10CE11D0, 0x3336CE2E, 0x303ADD25, 0x3347B7D9, 0x33222C1A, 0x1A43E4C1, 0x4326E019, 0x2AE2E92E, +0xD2C5DC40, 0xBA45D7D6, 0xDF341BDC, 0x3A361925, 0xD4FEF4D2, 0x3018DA31, 0xDDC12817, 0x3038DE24, +0xEDC0292B, 0x3E3E3336, 0xCB1BEF17, 0xE10B2EBF, 0x2B20BABF, 0xC80C421B, 0xC2411D21, 0xECD8C7D6, +0xC824E4AD, 0x65B53DD7, 0x304325D4, 0xDD1DEAE6, 0xD33131CF, 0x2F36452B, 0x23B219E9, 0xC1C23529, +0x21DC3836, 0xD618E933, 0x393E2C14, 0x362ACA3A, 0xD6E2C237, 0xD6085111, 0xD721FCB1, 0xCCE6DEBC, +0xD1DCD920, 0xC3320000, 0xEA26CB19, 0x18BB26B7, 0x233BDF4F, 0xDE23FD3F, 0x2ADA1ECB, 0x3122E839, +0xEF42D7D7, 0xCAC7202D, 0xCD4CDED1, 0x2633C824, 0xD0123E4E, 0x104429B6, 0xC8CD4B00, 0x5136B738, +0xCFC8202B, 0x163135DD, 0x2C3821E8, 0x29314CE0, 0x0F3CE4EC, 0xE5432DAF, 0xC63AE340, 0x4639DB18, +0xDF282511, 0x25B5CBD3, 0xCAF50F37, 0x1E443B33, 0xE32CD32D, 0x10DA47B9, 0xDA410526, 0x34F0E4EA, +0xCEED3134, 0xC0B31AA7, 0xD12B1AD3, 0xC722C743, 0xBFBEDD1F, 0xD328D52B, 0xCC384024, 0x18B43FB8, +0x281742BB, 0xD8322FD2, 0xB5D628C6, 0xE420C0CB, 0xC9EB391D, 0x41E6B419, 0xBCC543E1, 0x2C37DDEF, +0x1F2D41E9, 0xD0F0D205, 0x1FEAE5C5, 0x15DF14B8, 0x47E327D9, 0xD328CE25, 0x36DB2E24, 0xD92E1EBF, +0x2B3230F4, 0xD1EAD5D9, 0xC7212ADA, 0xCD1235DC, 0xC519D5DB, 0xBFC631DF, 0xD5222B35, 0x35B6DEC6, +0x153C4428, 0xC5C3E42F, 0x1B2E2D2A, 0x3CD422A6, 0xC22842CC, 0xCADAC43D, 0x2C10DAD2, 0x18C708B5, +0x36D636BD, 0xCD29C5D3, 0xBFED433A, 0xE0384939, 0x005629F2, 0x36E2C425, 0x2830DF1C, 0xC9CB351C, +0xB0D7D3E6, 0xF3CEC214, 0xDC25D1C7, 0x1BD5CBCC, 0x19DD21C9, 0xC854EE30, 0x4FE14EE3, 0x38D5D3E8, +0xC9D126E4, 0x2FDD2130, 0xCE2130D1, 0xCFCAB0C9, 0xC71F0E1A, 0x2627D4DE, 0xCBE9DACD, 0xE7121F22, +0x2CC8E435, 0x3838391D, 0xDD37242E, 0xEBBE2F10, 0x233C0000, 0xAFC91CE7, 0xCE3536F6, 0x0F25D248, +0x2D04EBE4, 0x3FF634E5, 0xE929CE26, 0x24E0BABE, 0x302CD52B, 0xD1E2073F, 0x4333D11C, 0xDA48C2D1, +0x2D33F315, 0x29B2C8C3, 0x3B26E3EB, 0x3821C6DA, 0x32163DD6, 0x1DC737BC, 0x463CCD24, 0x19D63DDD, +0x200BDDB7, 0xBDD41E22, 0xC40A444F, 0x34BF49ED, 0xD4C0C0F4, 0xA41FB721, 0xBEDF51E8, 0x15D82A17, +0x383FDF21, 0x2B2918D0, 0xE6EB4718, 0x244530DB, 0x25DD3919, 0x3BA819BE, 0xDAEFBE38, 0xEB3FCCD9, +0xD1DF2E1C, 0x2520C3E3, 0x32D0BFD5, 0x37312BD5, 0xDC29D0C9, 0xBED02432, 0x310C3A18, 0x411ED8D7, +0xA4E5CE22, 0x163CBD30, 0xC3E2BE45, 0xCFBBAAC3, 0xC8C0E4CF, 0x0000CEFC, 0xE8CC2433, 0x39ED223F, +0x243AD83B, 0xD44D39C6, 0xCABF24DE, 0x252050EB, 0x29C73B40, 0xD4F434CF, 0xCAE4D5DF, 0xC5F2EEF1, +0x2919C8D2, 0x2FC4D20B, 0x1CBF5528, 0x3DBBD52D, 0xE03AD031, 0x3D2CCA17, 0xBE23BDB8, 0xE7DA3B30, +0xD1D2D331, 0xCC1825EA, 0x2F2D39D2, 0xF9313526, 0xCFBCC2E1, 0xE5BE3AD4, 0xBF59BE1C, 0x22FFC8B5, +0x32B14015, 0x31D6BEE6, 0x46DF3843, 0xDCD1D8B2, 0xE728C8BE, 0xEA1E2A28, 0xD1D8C7D2, 0x30CAF134, +0xEDD0EADA, 0x13C1E3D0, 0xB9D7DC29, 0xEAB93624, 0x24E49F38, 0x2E3FDEDC, 0xE6D0D23D, 0x1FBB3ED2, +0x23D540DD, 0xD727D92B, 0x251E33E6, 0x4BA7EECE, 0xC1CD203C, 0xC5CFED32, 0xB9CF35C3, 0x29710000, +0x29D7C636, 0x3A3ED1D0, 0xCC1B271F, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 + +hard_output0 = +0x42441F44, 0x006DFAE9, 0xDBB63AC2, 0xA3365CB3, 0xFA44372B, 0xC2603862, 0xA6C86E09, 0x416F0BF7, +0x8E58011D, 0x47C54B99, 0x269AD14C, 0x0B286902, 0x093E4C64, 0x8D84D8E1, 0xB8AE0993, 0x49567118, +0x8CEAABD2, 0x12D33409, 0xE3358237, 0x130176F6, 0x0986718F, 0xA18C83B5, 0x04EEF058, 0x59704040, +0x7478FE81, 0x01D27193, 0xEA1AD7F3, 0x21E6C2B9, 0x6478A1FF, 0xED9959AD, 0xE39E57BE, 0x4D859105, +0x056EF72D, 0xBD170BB7, 0xF01F9ADD, 0x99BF0C05, 0x44BAD09F, 0xF6ABDD61, 0xC0F8F116, 0x2972B53C, +0x0FEC944B, 0xD1675432, 0x0025F563, 0xF42B2EA8, 0x808E5C37, 0xB6E79AD5, 0x3706284D, 0xB9C0AFA5, +0xFF0E7E28, 0x5FA45C6F, 0x9CD5244E, 0xA013DDEA, 0x0D27D1A2, 0x2AE414AD, 0x41614379, 0xE6B68872, +0x5585D926, 0x5098D45B, 0xF8980ABD, 0x65821418, 0xEF8968A3, 0x301DEC3A, 0x57EF2A7F, 0xC17BE446, +0x94B65D62, 0xC9E6F350, 0x2E6130BC, 0x235F2E5E, 0xF1C13241, 0x94B291D6, 0x8C342458, 0x175B1FCA, +0xE07310FD, 0xD24934DD, 0x3CE8D053, 0x5C8F243D, 0x945B0AB3, 0x50EB8CC1, 0x8EF499D4, 0xA67801BF, +0x1680F061, 0x283FE705, 0xB8D7E773, 0x13AD3D2F, 0x4A6C305B, 0x1C1E5B12, 0x6F57D880, 0x8A666E5E, +0x26511296, 0xFFC09750, 0xFF91760A, 0xAC26795C, 0x75F321FD, 0x4221B9CB, 0x2E119188, 0x0772A832, +0x74D6036B, 0xA001B764, 0xF0D72816, 0xCBE119F5, 0xC5D6B248, 0x1BBAB091, 0x1BBE518E, 0xC647DCB1, +0x240CE252, 0x3F8AAE65, 0xE0CDEAAB, 0xBE724062, 0x5720636C, 0xC816E67F, 0x8E5A6C7F, 0x9E2738B3, +0x2419CBDF, 0xA6BF9B75, 0x48B21FC3, 0x8C4AED8D, 0xC1D8BCA8, 0x01353DE3, 0xA99D7D85, 0x9C6DDD20, +0xA650398D, 0x4E7E5777, 0x623FA183, 0xF02B261F, 0xF0BBA66C, 0x35AC701F, 0xD818E590, 0x7CA713BE, +0x01153743, 0xD1302B91, 0x55459E3C, 0x63FDA677, 0xECF93759, 0xBBE0E349, 0x73FD4FDE, 0x5C48C9E8, +0x70F46F73, 0x0B15437C, 0x9F4159A0, 0x7C343E78, 0xB3D085E1, 0x04266351, 0x1933DAD9, 0x83AF8C77, +0x2AC11BE1, 0x76C25CF6, 0x083E6CF7, 0x00DDBA22, 0x6AD2F284, 0xF87DBC6D, 0x7D40B45D, 0x6EBF21BD, +0x972E8535, 0x632B51D8, 0x5BFBA23A, 0xCDF1D44D, 0x11749158, 0x06DC45D1, 0x93D63A59, 0x9D0552F7, +0x61C8048F, 0xBFDE9FD2, 0x77827B70, 0xFFB130DC, 0xF18BBCE9, 0xD50C324F, 0x15521E17, 0x7D2470C3, +0x5EE34C4C, 0x2AC0C8AB, 0xB2283269, 0x4107FE15, 0xE6DA1ED3, 0x66A2A3A7, 0xB204B429, 0x0E02F9D0, +0xE5A45550, 0xEAF94102, 0x18F57A81, 0x8EEC9C68, 0xD575F397, 0xFC1BE8BF, 0x104E073F, 0xDF219F97, +0x2EC93D4A, 0x6033EE18, 0xC1351A38, 0x420C6A87, 0x0B5DC831, 0xD90D23E2, 0x13B9E883, 0xCD65BE3C, +0xDB17A9CC, 0xC73C2A7E, 0x3281 + +c = +2 + +cab = +1 + +ea = +4918 + +eb = +4920 + +c_neg = +0 + +k_neg = +3072 + +k_pos = +3136 + +rv_index = +0 + +iter_max = +8 + +iter_min = +4 + +expected_iter_count = +8 + +ext_scale = +15 + +num_maps = +0 + +code_block_mode = +0 + +op_flags = +RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE, RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN + +expected_status = +OK \ No newline at end of file diff --git a/app/test-bbdev/test_vectors/turbo_dec_c2_k3136_r0_e4920_sbd_negllr_crc24b.data b/app/test-bbdev/test_vectors/turbo_dec_c2_k3136_r0_e4920_sbd_negllr_crc24b.data new file mode 100644 index 00000000..a866878a --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_dec_c2_k3136_r0_e4920_sbd_negllr_crc24b.data @@ -0,0 +1,677 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_DEC + +input0 = +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0xD0000000, 0x33CDC8CE, 0x4FAEE4CC, 0xC7DC23C3, 0xC306D6CA, 0x2B360A24, 0xE91C423D, +0x1F323427, 0x4B1C33B6, 0x3EC9D0E7, 0x39204DD1, 0xCCD73C37, 0xC6F6D8E1, 0x1DF828F3, 0xDEE34025, +0xC41EC235, 0xD035E1D8, 0x3CC32843, 0x29B60C3C, 0xE92E122E, 0x454BD4C8, 0x35D02543, 0x33E4D0AC, +0x07AF2228, 0x38C62DD1, 0x233800C5, 0x3CC02DD1, 0x35E73B19, 0xDEBED026, 0x33C5EDD9, 0x33E51527, +0x1F21EA18, 0xCC3E3BD2, 0x013CC14C, 0x3724D23B, 0x23CDED2D, 0x21424630, 0xC5B0E64D, 0xCFC03BEF, +0x24294241, 0x46C526BC, 0xD82F334C, 0x1E283DCB, 0x3F3F33C7, 0x4A14D2A1, 0xD92F2AF3, 0xCFA820CC, +0xDD30C6CB, 0x2B3ACAB7, 0x4AFE29CD, 0x25BD3200, 0x2D293323, 0xD32A3B29, 0x29E64D0C, 0xBE4BFB3C, +0xB1DD242D, 0x0427F11D, 0x3046CFDA, 0xD633C0D5, 0xC0DDBE01, 0xDA3DD9B4, 0x3BCE3638, 0x23262CC6, +0x432BD2AE, 0x23A9E1C7, 0x1BE45609, 0x22CD35AA, 0x32D6371F, 0xCE27352A, 0xE2B73F40, 0xC2D0CE2C, +0xE72E3A3E, 0x2FF52147, 0xEFCF2E33, 0x003103CA, 0xC6C14A33, 0x45CAC0CF, 0x011E1FD3, 0x23D3C72B, +0x2EB644C4, 0x484BD930, 0x3AC22E1A, 0x1AB6DD42, 0xCEDDE232, 0x3ACCCC31, 0xB830CA2D, 0xC930D2D7, +0xD0E049BB, 0xE6243524, 0x3EF53914, 0xCB083BE3, 0x411EDBE9, 0x30C3C3E8, 0x3721F5E7, 0xBBDF3FD9, +0x2AD21F33, 0xEE402858, 0xD3AFD237, 0x3D39401D, 0x30003623, 0x4734DC46, 0x192ACD32, 0x3324CAC2, +0xD714D4B5, 0xDC2E21D0, 0x2327CDBA, 0x15B739C3, 0x31C9EEB8, 0xFAB5372E, 0xC239B5DA, 0xE628440F, +0x34D425C6, 0x293BE6FD, 0xBDC04BBC, 0xE0DDB7CF, 0xD627BC34, 0xA5422FCB, 0x13211ABC, 0x1708E51C, +0xCE33DAD1, 0x51AEC731, 0x2E383824, 0xC722D0D3, 0xCB32292B, 0xCECC402B, 0x1B312EF0, 0x34CF46CA, +0xE3BD252C, 0x0C21EC36, 0x4ADFECBE, 0xC4D2E62C, 0xADE5BAD3, 0xDDCC1D18, 0x521AC135, 0x3F302D37, +0x22CE2DCC, 0x262A1BCF, 0x4EE7D7CA, 0xD43D3526, 0x1840E020, 0xC3201F2F, 0x3ACB453E, 0x41EAD5B6, +0xB6C4CF17, 0x243A55D4, 0xC7501E28, 0x351DCD1C, 0xE3A8D502, 0x3235271C, 0xAC342F00, 0x34B4E3D8, +0x36473AE4, 0xAE403CEB, 0xBC3FDA3F, 0x552CDBEC, 0xE7C42B5C, 0xE743E13D, 0x18D44650, 0xBA2EDF31, +0xB5C3EC42, 0x432EBE2E, 0xADDE4F2A, 0x401BCAD6, 0x03D6EAEB, 0xD0DEB133, 0x153FC4D4, 0x30DA2FD3, +0xEE282FD9, 0x242ADC39, 0xD72B42D9, 0x44C7E0D5, 0xC72CB3C7, 0x27C9472B, 0x0036DFB6, 0xD9C53112, +0xC0360E0E, 0x4EEA0E27, 0xEDCF27C1, 0x1739D5DC, 0x22DE2CE1, 0xDCCAF633, 0x31D02417, 0xD7DCD0DA, +0xDE1DE0DC, 0xD827DD22, 0x1CDBD7EC, 0xCE171BCB, 0xEE2347CF, 0xC22C312D, 0xDBCC2A38, 0xBE0ADED4, +0xDC20CC2D, 0xD5380DB2, 0x2A32C701, 0x3EC7D41E, 0xCDD7D74C, 0xE83E2A44, 0xC12B26E6, 0xE70046D7, +0xD626D1EC, 0x35DDE41D, 0x303F27BD, 0xC94D4517, 0x31D1E229, 0xAD3529BF, 0x3AC5D21C, 0xCF1DB731, +0x2738DBEB, 0x1F382B33, 0x1ED52213, 0xFCE22CD2, 0x1330331E, 0x4A30BA0F, 0x1F38B42A, 0x2E34CB1F, +0xE3E125D5, 0x3AC72D17, 0xBD3C342C, 0x37272810, 0x1D34E4C7, 0xE5CC2EF8, 0x1E2FF1DF, 0xB9294236, +0x26F10017, 0xBDE3FBD1, 0xB7DB52D0, 0x2E3CB8E9, 0xB3343D3C, 0x3E3BDFDD, 0x1F46393A, 0x25FA20C1, +0x2D194CD1, 0xE9471D26, 0x34C3242E, 0xAE012034, 0xC8F3FED7, 0xB825362B, 0x322DC136, 0x42F41511, +0x181855C2, 0x4937D5EB, 0xE13CDC23, 0x08242A28, 0x2DCCDFC0, 0x13CEC64F, 0xB3E6F7ED, 0xD93401EE, +0x3D02DEE0, 0xCFC52300, 0xD2F1BE38, 0x2D092924, 0x253E2AF0, 0x37DACAD0, 0xDB1ECFE7, 0x40CED84D, +0x3706C42A, 0x4231D91A, 0xDD30471D, 0xBD272DC8, 0xCB1941D9, 0xC22FC7C5, 0x333F3C0C, 0x3F3D2518, +0xDDCCD143, 0xB3D834D5, 0xD231BEE4, 0x2BDB3B3D, 0x2F2754E7, 0xEAF3CE64, 0x3632E4C0, 0xD83324C2, +0x11A4D5CC, 0x00D627D3, 0xD51F2731, 0x17C92BE2, 0x1AD62E1E, 0x16DB2418, 0x16B83823, 0x2711D54B, +0xDFB02BDF, 0x28CDA600, 0x2AE9ED3E, 0xD314CD20, 0xD04F272D, 0xD0F9CD37, 0xCC1735E5, 0xD328EC37, +0xA7D31A26, 0x214BD9A5, 0xCFCB4226, 0x233C4AD5, 0x26EF37D8, 0xD83E3230, 0x3EC8C93C, 0x07B9282B, +0xD1E6DAF2, 0x31C52EDD, 0x3A0038A5, 0x56C1E0DF, 0x22E437C0, 0xDBC8143B, 0xCF1DB03F, 0x1D4BE636, +0xB8D3C23A, 0xBCC3372E, 0xAF284228, 0xDCE02F1E, 0xB9D4E3D4, 0x56D3D0D8, 0x494816C1, 0xFB1240C3, +0x23B1BDD2, 0x1236322E, 0xC2CBCB47, 0x102FE7C8, 0xBB0AE1D9, 0x3831BC29, 0x39AECBD9, 0xC7CF1ED2, +0x0AE43913, 0xDDD62F2C, 0x4FD522E6, 0x3CB90A55, 0x2101C8EC, 0x432604BD, 0x29E3E31E, 0xC25B3C29, +0x20E927D4, 0xD4300C41, 0xD9AECBD5, 0x48D8E143, 0x2CEE272F, 0xD2C11FBA, 0x2F2DB8D7, 0x1CF4DFE6, +0xCCDCCEBF, 0xE052D0EA, 0xDADDEB1E, 0xC72ACB3A, 0x332E2B1B, 0x2D26E827, 0x2A29E6A6, 0xE614D6D5, +0x221D1CC5, 0x51102031, 0x1735C248, 0x31D4DCD5, 0x3C20DC00, 0x2D231732, 0xE54AD9CD, 0x1D24D826, +0xE936373A, 0x20C0D3C9, 0x2B231520, 0xD6CED133, 0xDFC8C7B5, 0xDC2FE8B8, 0x3332249B, 0xC6E6E11F, +0xB7DA17AF, 0x1FD91321, 0xEE29CD41, 0xE1262FEF, 0xB6181DC6, 0x24F618C6, 0x3BE5DA2D, 0xFC2C35C8, +0x51C232CC, 0x36183EDF, 0x2B2CD6EF, 0x2F2DBDB9, 0x00D03413, 0xCDC9CBC4, 0xDB3309CC, 0x264EB22C, +0xCED1EAC9, 0x38D53F27, 0x2EE7D6F3, 0xC7232C23, 0xCCD42515, 0xC9C8C8E3, 0x231E3930, 0x5BBEDFCA, +0x34D7E5D7, 0x25CCE9A7, 0xCD4BF1E8, 0x5E2542E6, 0x38DCE43E, 0xDA34F0CA, 0x2EBFF841, 0xDA422D3E, +0x13CA2231, 0xD5C6DA27, 0xBAC0C6B6, 0xC82C49FF, 0x31312123, 0xBF00DFBF, 0xE8F23819, 0x2C34E81E, +0xDC4C2B0C, 0x3A5650F2, 0x2CCB0FD0, 0xC42D4C28, 0x4F19DA4B, 0xF63DE630, 0x29F51C1F, 0xD6BE14C4, +0xD2DDCDD6, 0xE0D02825, 0x2FE0E3D1, 0x41342FE8, 0x25D547ED, 0xC5A732B5, 0x19BD503F, 0x4CD9DA3D, +0x24D013BF, 0x4FE93FCA, 0x12DBE8B9, 0x33DF151F, 0x1E24AECD, 0x2E2E3656, 0x133500AD, 0x3641403C, +0xBDDF2BD4, 0xE33130E0, 0x3DC02726, 0x18390C3C, 0x26C04DC9, 0xDBF22D35, 0xC3E1EC45, 0xCED12623, +0x1CE92E39, 0xCF2FD222, 0xD8DE31E6, 0x1EE916D1, 0x35DCB621, 0x19C2EB54, 0x41B34ED4, 0xD9373BDF, +0x203ED6D8, 0x2CD4A8C8, 0x14E0C6C6, 0xDF242131, 0xCDD921CF, 0x33AACBCC, 0x2AD1A5C6, 0x3ED42B00, +0xBF181FD0, 0xC0CC2329, 0x1BD1EDC9, 0x3A2D47CB, 0xDD27D8CB, 0xD8E8BAE6, 0xD5DE3FF5, 0x2C30D641, +0xDCDCDF26, 0xC2C34433, 0x2837F53D, 0x29D8E12D, 0xBF3FEBD0, 0x34CACC45, 0x30E1DA21, 0x39DDDCB3, +0x30193518, 0xC0132E19, 0x313D3736, 0xB2D4FF34, 0xF4D92945, 0xC70D3AEA, 0x3E3034F0, 0x00DA2F07, +0x1DD415EB, 0xCC1AC4B2, 0x232EB21C, 0xD7431E16, 0x2FCE0835, 0xCF39242C, 0x1BCA2CD4, 0xD8B6D0FA, +0x18C3262F, 0x2CE3BDCB, 0xD839DC38, 0x26161D24, 0x1223B3DC, 0xD6D63515, 0xCFBABFC6, 0xC1BF18C7, +0x2637C715, 0x264020EA, 0xD024D3C4, 0x4126A11D, 0x2E34442D, 0x32BB1CC9, 0x36E7DD40, 0x3113C3C5, +0xD100C8CE, 0xD21651E8, 0xC3CFCDD9, 0x403139F1, 0xD3D02119, 0x32171AC8, 0xD2F9B62E, 0xB5B335D4, +0xC1462835, 0x1F1C2C35, 0xD8DDBF35, 0x422D2DDA, 0x38ED19D7, 0x3326BD2F, 0x4534DFCE, 0x46403132, +0x19CDD51E, 0xCBC3CBC0, 0xE3CF2139, 0xC646ECD8, 0x29E7C7DD, 0xE94CCAF8, 0xB8C6EA37, 0xEC2B09DA, +0xC8C82738, 0xC6CCDDFB, 0x3FCA3B4D, 0xC6482BD1, 0x2DE1D333, 0x3530BB3A, 0xBF4838EA, 0xE4411ACB, +0x3514D939, 0xCD372B27, 0x4BEC1C39, 0xAB21C221, 0xD8D83F37, 0xCCE0512D, 0xDA4640C2, 0x36AE1CD6, +0xBC2F303B, 0x20C3B8CD, 0xBFE6D4E0, 0xBECCC4EB, 0xC7CC1E4E, 0xCC2BD03C, 0x3738C518, 0x23B824CC, +0x2BECD0BC, 0x2107B945, 0x2AC21B00, 0xBE49F2EB, 0x31D2E3DC, 0xDAD23F11, 0xCDED3137, 0x3ABADA3A, +0xD9C0C12C, 0x36CEE8CF, 0xD0D5BDD3, 0x2AC9CFCC, 0xE61FD747, 0xC7B7B12A, 0x2BD520DF, 0xD5BC1D31, +0xE6D8423B, 0xCC21EB43, 0x1F46292D, 0x171EE038, 0xDFA5E244, 0x1BD5C4AD, 0xC622C2B5, 0x4E33CFD3, +0x3F263F30, 0x34E4CF15, 0x0041CC36, 0x4021DB29, 0xD62E2231, 0x2C22AE21, 0x1A201F44, 0xC6D7C100, +0x3A335125, 0x42473A2A, 0xDF32DA1F, 0xE5D8B51F, 0x3049E832, 0xE6DED531, 0x315DD01E, 0xDDD92034, +0x2EBF0AEB, 0x4B4639DD, 0xBED6C8DA, 0xDABA3714, 0xDD133BD7, 0xC934EACF, 0x0D35C3E2, 0xB4CCC213, +0x37EE202B, 0x0AD21AD9, 0x28D2C9CD, 0xD800DB2D, 0xD34B274A, 0xBDD33644, 0x38EFF1CC, 0xBDABD734, +0xB11E3E32, 0x2F1C23DC, 0xE80BD7DE, 0xDBE7D6F7, 0x240248BF, 0x23CEF1F6, 0x1F56E634, 0x33CD2230, +0xB4DB182F, 0xD338BA36, 0x33392B24, 0xBB3B3649, 0x282F30E7, 0x45CC35B1, 0x46261B30, 0x1FD9A722, +0x3BE3E1C3, 0x25334FF0, 0xB8BB28C0, 0xD751E2CC, 0x403600E1, 0x461FC3ED, 0xCB30C202, 0xD8D2B92A, +0x2923D71B, 0xB9DDE6CC, 0xB935CB0A, 0xEF37D723, 0xD1DA06CD, 0x303CE928, 0xF8B9BA27, 0x37370A1F, +0xD6C1DDD9, 0x18B6463F, 0x28C71FC3, 0x38D8B53A, 0xD22DE9A1, 0x18DDB236, 0x0A28D515, 0x363B263D, +0x1F3A122D, 0x202AE323, 0xC72337E8, 0x32ED08CC, 0x2658C347, 0xC6F4E200, 0x45D1C2C3, 0xF92D343A, +0x3EBE1E65, 0x10EB2FD3, 0xC64FD5E5, 0xC5D3F521, 0xE9CA1A37, 0xD736CDD6, 0xDCC7C233, 0xC9C8E2D4, +0x2B99D60A, 0x1EE3DA0D, 0x30BDD8E1, 0x16D3BE1F, 0x1C4524D4, 0xC8D5D432, 0x19AEDA3D, 0xCDD4CAD5, +0xD835332E, 0x2412DA1B, 0xD0CBEBD7, 0xC3CEF425, 0xD1D743E3, 0x000A163B, 0x23DC3129, 0x202332D1, +0x322F2216, 0x28F5131A, 0xD316CF0D, 0xD0344C30, 0x423325D7, 0x37204237, 0xEEDDC721, 0x2038E805, +0xD525C522, 0xBEDDE327, 0x3AC8BCC5, 0x45402E27, 0x34DFC1D9, 0x2DB0D049, 0x322F2ACF, 0xDB3AC3C5, +0xD8D0EB4F, 0x2A1DCCCC, 0xD0D33DC2, 0x1349DC45, 0x39CBBC32, 0x30301EBF, 0x2C00DFC0, 0x5DD0CA11, +0xD0220C3A, 0xC527CD1A, 0x31CD372C, 0x2E0ED7DC, 0xC6F92338, 0xF537E8D0, 0x3626DBBF, 0xC9C3C6C3, +0x30DDBE17, 0x50394C39, 0x3A11343B, 0x27E8DC2E, 0x0DDB303F, 0x1818BCF0, 0xC5DC17DB, 0x24C53A26, +0xECB0E7E2, 0xC21B1928, 0x15D7C920, 0x4126AACE, 0xE214D8C7, 0xD40BCD42, 0xB2CBD018, 0x554436DB, +0x3C2D25D9, 0x21CEC0DC, 0xE9251919, 0xE41ACDC3, 0xDECF67C3, 0xEDDE3225, 0xCA30EEDD, 0xCFE01E37, +0xD231BF41, 0x34DF1EBD, 0x1DD2CC2C, 0x21142E29, 0xEA322B37, 0xCEC311DE, 0xF0F738DD, 0xEBED1A40, +0xDCE640E8, 0xD0D1BA2B, 0xE906D6DB, 0xC71938B1, 0x13C59F39, 0x1ED6DBB6, 0x231BD3A9, 0x2C2B30DC, +0x0ED50000, 0x2DD4EED6, 0xD9D21BEE, 0xE1442E36, 0x38CEC4AE, 0xC626CCF4, 0xC2CD3F36, 0xD71F2BD9, +0x2F421837, 0x31B11A23, 0xD4D5282D, 0xD0E6BED7, 0x30D737CB, 0xC8142A09, 0xD8D2CFDC, 0xC22D3C44, +0xF62DF1C9, 0x3D38DBC9, 0xBFC2E5D3, 0xDCE42128, 0x3A304038, 0xDBE6D626, 0x28250FE0, 0x27B52BCF, +0xE0D8DC3C, 0xC8DA09DA, 0x2DF02FD4, 0xE539D7D1, 0xCDE729C2, 0x4816E313, 0x4E392AC5, 0x112CF50F, +0x3AD94924, 0x4B22273A, 0x43D2DE9E, 0xDA22DE30, 0x3FE324DB, 0xD9CC2811, 0x29D253D4, 0xE2BC353C, +0x0CC92147, 0xB8F0C6F5, 0x3D35DEBF, 0xED4ACBEB, 0xAC34E532, 0xEA2AC526, 0xEAC0BFC8, 0x4431D9DD, +0xC0CE4E21, 0x0000F2DD, 0xE0D8C7BA, 0xDDDC2E36, 0xDA42C22B, 0x2B312614, 0xDC391EF4, 0xCC2E31D8, +0x3DF4D630, 0xC72C21D9, 0xDB2BD531, 0xD916DB3C, 0xBEC11DD2, 0xD1D5CEA4, 0xBA072ED7, 0x402F2829, +0xE3C0F627, 0xC7CC3CBD, 0xF41D3345, 0xD706DB14, 0xB2DF36AC, 0xEBC4551F, 0x32B01929, 0xC5B6DDEC, +0xD91DE130, 0xE1213222, 0x4126D927, 0xAF52300D, 0x291B301C, 0x241A1843, 0xE43C1FDC, 0xBBCC191C, +0x2C25E011, 0xBD3EE9DB, 0xC9BF1345, 0x2F333AD2, 0x1CD822DB, 0x23BE43EA, 0x2527EADC, 0xB52E1C1E, +0x391CCF17, 0xD2C82C0C, 0x242E3830, 0xD328D808, 0x35CEF4CD, 0xD81BE035, 0xD0DF38DA, 0xD1D3C42E, +0x3A2C1EEA, 0xC6D03AD2, 0x1E362E4E, 0xE6080000, 0xD018C312, 0xEFAA323B, 0xD1294BD2, 0xBAD52C3B, +0xE0373E10, 0x260CDDE2, 0xC902E040, 0x30472BD5, 0x534BE7D2, 0x20CCB523, 0x21CCD843, 0x2259E24E, +0xDE35FDD1, 0xD4D91D3E, 0xCFCA11D5, 0x35D724BD, 0x3C3F0CB8, 0x3EC447CB, 0xE835DDC4, 0xC3C73E31, +0xDD432B2F, 0x2FDCBDC2, 0x2D3CCE17, 0x25B8AB31, 0x2ACCFFE5, 0x40E03C2B, 0xE7DA1841, 0x17E53020, +0xD44E0B2F, 0x46DA22EC, 0x33422425, 0x211840C5, 0x23C1CFD5, 0xC8350BE5, 0xD833CDD0, 0xE63DC528, +0x292ADC17, 0xD8BCD3C1, 0xCD39F02E, 0xA71EC343, 0xD6CC3A3E, 0x2E12D2CF, 0xC40B1FB1, 0x37DCDC33, +0x20E52EEA, 0x271629E5, 0xC4BEE0C2, 0x1B3CDBD6, 0x00004934, 0x18E145C7, 0x4BE93522, 0x3029C309, +0x31D1D50F, 0x3DB42C1A, 0x21A643B2, 0x3E26D8C2, 0xCDDE28D1, 0x44E835E1, 0xEDC4DED1, 0xC4F9C9ED, +0xC5E1BBCC, 0x4426C1D4, 0xD9BB372B, 0x37232E0D, 0xE5D0D0AE, 0xC9D23CC5, 0xF638E2C7, 0x2DB6E6D9, +0xC0C9B72D, 0xDF28E6D0, 0x11E2DCC8, 0xD8E2F31C, 0xEA3937DE, 0x2741B953, 0x19D5CEE6, 0x33DDC22D, +0x2BF72326, 0x2143363C, 0x13CA1A1F, 0xB9D7DAC9, 0x22C6B72E, 0xC5DA2631, 0xCB32ECCF, 0x032D15D4, +0xBB1EE613, 0x38C42452, 0xDBCD2726, 0x1AF44039, 0x22CF2EC6, 0xCAEDE6B4, 0x2823D423, 0xDCC61C39, +0xC12D19BF, 0xC612C31B, 0x31F1D32D, 0x3E1C09E4, 0xD824C5E7, 0xDFFC2839, 0x2AD40000, 0xDFCCBBBC, +0x3FC9CC12, 0xEF2626E1, 0x0FCDEA2A, 0xEBD735CF, 0x4E1A2834, 0xCDEDE1D5, 0x31CCCD29, 0x2F4618DF, +0x48C11936, 0x29DADE41, 0xD71C1AE0, 0xDDD2285A, 0xB13DCDC8, 0xF020B8DB, 0xD0B837C2, 0xDC3B48E8, +0x18C3C530, 0x192C2940, 0x2ADA3AE4, 0xEA34C55A, 0xDAB2BED4, 0x1A51E838, 0xC832AEBE, 0x10DE2DE2, +0x3B2408CC, 0x22CEBD1E, 0xC919D6BE, 0x21D2D128, 0xCFE03A2A, 0xB7DEDC3B, 0x33C630ED, 0x5A0EEAD1, +0x3CB03FCE, 0x24EC15E4, 0xD0385BBF, 0x20CB4E4B, 0x30E0C535, 0x301BCD3C, 0x3642DDD3, 0xD90FDC50, +0xCA35BBCC, 0x393B3B2F, 0xE2BE352D, 0x362BE3BF, 0x1922B73D, 0xDE3448BD, 0xF7C0C02D, 0x00002F43, +0x2FCAD0E4, 0xDE412A34, 0xFBF339DA, 0x2BB94330, 0x39B521D5, 0x2DE31F43, 0x382C4F23, 0x35314C2D, +0x2DE3DBDC, 0x3A18EAB9, 0xD0DF131C, 0x45EA41E0, 0x48BBC1BF, 0x2220D7E4, 0x29C8D0C5, 0x3C0527CA, +0xC1ED061C, 0xCB48CA3E, 0xE846E633, 0x0EB2D7CF, 0x44C42335, 0x27F0D440, 0x332FD847, 0xD72F2CE3, +0xD7ECD6C9, 0x4026D51F, 0xD334BEE6, 0xE01B40C5, 0xD62BDA2F, 0xDEEA36AF, 0xABD039C7, 0x2E123FCE, +0xFD3924A8, 0xD0C6D529, 0xD52AEED4, 0x211D2A35, 0x1EE9C5F1, 0xCECAF921, 0x38C92740, 0x3E4E2A1A, +0xB8FAC7C3, 0xF11F1C36, 0xD4DA163A, 0x2701DC38, 0x0E3019D5, 0x57DB23E1, 0x43CD26D0, 0xADCC4934, +0xE0D3D706, 0x403A0000, 0xDECED5E1, 0xDAE6CF1D, 0xD5E33C40, 0x35B3D038, 0x24D73231, 0x2EC01D40, +0xC2D23CDC, 0x44C20BC6, 0x44C1D7A8, 0xE9D1C745, 0x36E5C2C6, 0xB81B2D42, 0x413917CC, 0xD8E031BA, +0xCE3CC1C4, 0x3EC91AC5, 0x0DE406CE, 0x35D1DA27, 0xC8E5CAE3, 0x313EC127, 0xB61631CA, 0xD73B3444, +0xD834C850, 0x37D31DE2, 0xCCC9153D, 0xBF41E9CA, 0xD649F9DC, 0x3A31D501, 0x50CD1DE3, 0xC8E54012, +0x4432D4D5, 0xCBB93023, 0xE8FCD6C2, 0xCEE0CAD1, 0xE41B2E39, 0x28CC0E40, 0xDFEAEFFD, 0xB3EA2723, +0x3534B935, 0x42D12A09, 0xED3DCA0A, 0x263C352B, 0xAD224EDF, 0x2CDF2A10, 0xF0BE4CAD, 0xBD45D8D5, +0x263F27D4, 0xE2372FB4, 0xC35240DF, 0x1EF3DCDA, 0x3AD0D224, 0x27DAD527, 0x303A2CBE, 0xCDDCC829, +0xE22B35C9, 0xD5DC222F, 0xEEBDCB63, 0x2042DAD8, 0x06CC3524, 0xD11BBB3E, 0xAE2229E6, 0x1FC81E1F, +0xEF4B12DA, 0xE1D846DD, 0xBA3B1CDA, 0xE154C801, 0xD6092CD2, 0xD0F132CB, 0x3A40D027, 0x123530BB, +0x1EC5B4DC, 0xD11825CB, 0xC603DECC, 0x28BCC5ED, 0x2C03E1CD, 0xE9AA12A5, 0xDB2DD53A, 0xCA32D01E, +0x2F4A09C5, 0xFE411E26, 0x1354CEC8, 0x3736C5F4, 0xC62F3C3B, 0x252DDFB9, 0xD0353723, 0xD8DDC21E, +0xE3DF21ED, 0xDAE33AC0, 0x2ADA323E, 0xDC43E1D9, 0x3A23463E, 0xD71934F8, 0xDB1037C8, 0x38B7BCCC, +0xDA2C1CC1, 0x3EEEE349, 0x2ACB2A26, 0xD4BCBCD8, 0x4AB50000, 0x9EC941D8, 0x3824C9D0, 0x203CCD36, +0x0DCDEDD8, 0x5B333036, 0x3125C7DD, 0xD7383F30, 0xD6C72617, 0xE1C245DD, 0xBDD3E818, 0x3210422F, +0xD00ECF2E, 0x44411A3A, 0x02E72E22, 0xF106D24C, 0xCFC847CF, 0x0421AFCF, 0xD2C525A9, 0x3CBCE63A, +0xE02A1E36, 0xE036E3E4, 0xDDC22B25, 0x2EC7452E, 0xE1E3EFE3, 0xD82ED6CE, 0x36C1E115, 0xD129E026, +0x251E1C44, 0xE8E5D7CA, 0xDDE7D40F, 0xC5D8D9D8, 0xA1C52550, 0x3ACDDA25, 0x3548C4E1, 0x2913D9C8, +0x1E28243A, 0xCA3324E2, 0xD0D7D1C0, 0xBDE9D0C4, 0x35BF4FE0, 0xCA42E33C, 0xBBFA33DA, 0xD753C9CD, +0xDA2CE11D, 0xD9BEE407, 0xE31DD4C1, 0xCBCCD51B, 0x3BB8D6C9, 0x0000DDDD, 0x28D3CDDC, 0xD62445B5, +0xB3211F13, 0xD7B9BF29, 0xD429D5E5, 0xB9C23CCE, 0xC219EDC1, 0x211E0B34, 0xB8F0E1C1, 0x33C0EF4C, +0xD8D6C7D0, 0x30E8D3D8, 0xB06514D1, 0x21B9D915, 0xE2323DCD, 0xDA41311F, 0xE24AB11F, 0xE9B92BE7, +0x33EA23C3, 0x04C430C0, 0xE849EF0D, 0xD1232C2C, 0x260FD13E, 0x002DCBDD, 0xEB3129E2, 0x20B4C7C6, +0x25D32335, 0x2CCB39D0, 0xEA13DAC6, 0xCDEE2E14, 0xCDAA2CD0, 0xE1D339E6, 0x36DCD03B, 0xF5453114, +0x34D428ED, 0xEAEDF458, 0xD226CC2D, 0xEDD03AD8, 0xEE1CC83D, 0x10DBB5E5, 0xF018CBDB, 0x5E33E31E, +0xC6D9232B, 0x1A373130, 0xDFE7DDCB, 0xB21FC5C6, 0xC1CDB1B7, 0x3F2DF128, 0x482BB426, 0x323E0000, +0xF5D43741, 0xCC1C1FB9, 0x00004E2E, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x13000000, 0xC9112F26, 0xE3DF1FED, 0x284020CF, 0xC6ED13CA, 0x38B1E217, 0xD90FE8DB, +0xD7C0C3B7, 0x341336CE, 0x45E2D239, 0x3E22F0D8, 0x461D2C29, 0x3825D628, 0xD733E5F1, 0xD7B6C532, +0xC62B28C7, 0xD0BF3024, 0xF0BA3A26, 0x2D1D2836, 0x1BCF2DDE, 0xD02B251A, 0xB938CD2C, 0x34E220D9, +0xC2BEC23B, 0xCC2908CD, 0x1D080039, 0x42BB25C5, 0x32EB3206, 0x36D3D1E0, 0xC32E10C3, 0x243B13E6, +0xC02A1FF0, 0xC325D6DD, 0x23E33D2C, 0xDCD7DED5, 0x9C23C21D, 0x32E62056, 0x06314421, 0xE9E4ED24, +0xC0C9C233, 0x44BE3FDD, 0xD25134E5, 0x292EE2E7, 0x4EE0D1DF, 0x29C82E3A, 0xF5392A22, 0x4BE2D133, +0x58C2C4C8, 0xD2212920, 0x2935D4DB, 0xDECF4400, 0x31D7CCC9, 0x12DCFB2D, 0x2B2F0723, 0x2DE7B54E, +0x2AD24ADA, 0x261E2931, 0xD538CEDF, 0xE12A1C2C, 0x33DDEDE9, 0x2FFC36C4, 0xE61823CE, 0xC225E1DF, +0xC2102FEF, 0xCFD22617, 0x3EFBDA37, 0xDAD6BCD8, 0x110623E7, 0x3320E2D1, 0x1932B8D5, 0xCCEBBFC9, +0x47234011, 0x0A2B45D8, 0x25C11244, 0x00EF3221, 0xC634CC38, 0x0F34D0E6, 0x1A3F40EA, 0x2DEDC82D, +0xFFD2C8BD, 0x40E321CD, 0x24441BD3, 0xD932EDDD, 0x48362127, 0x1230EADE, 0x39E632F9, 0xD0D451DC, +0x18DD2DC0, 0xC22AC72E, 0xD71E172E, 0xAFDED12B, 0x1DBAA948, 0xF927B647, 0xD7C31C23, 0x3328E73B, +0xDBD72DD9, 0x3E23D424, 0xCB09E025, 0x16BA21CF, 0xDD00B9CB, 0x29E3CAD7, 0x3A19D011, 0xEA0EC4E0, +0xD51CDDBF, 0x2AD1C640, 0x2CD6CBD3, 0x36D6D43C, 0xA72851C2, 0xE7EE3C3F, 0x432A4721, 0x46C726E5, +0x253FD426, 0xD03B2C27, 0x1AEC343B, 0x5A2BC735, 0xCE31D2D2, 0xCFEDF30D, 0x492B4141, 0x20DC1BAD, +0xDAF81CD9, 0xD6EAD21D, 0x2339CD25, 0xA31546BB, 0xD0C0F63F, 0x382BE719, 0xDA34BFDB, 0x1EDBBADF, +0x5151E646, 0xC164D5E2, 0xD8292FCA, 0x33CF1F3B, 0x3FF531F4, 0xD3EB3453, 0xD433E236, 0x1B102B1C, +0xC6C320E8, 0xD8393C2E, 0x17EAB7EB, 0xE0C42CE5, 0x313720CC, 0x3F39C42D, 0x33374AD5, 0xCE1ED41E, +0xF7D2BE0D, 0xF114311E, 0xE22CD6EB, 0x4E23CA2A, 0xCDCF36D3, 0x463B1EC4, 0x312DE400, 0x1A1219CE, +0x0CD1E2C3, 0x3850CBEA, 0x31D7CCCD, 0x3F2D3936, 0xC518E33E, 0xC31F31B7, 0xC0AED6C9, 0x2AC6E5D9, +0xC334B8BF, 0x35EFD72E, 0xD72B3AD8, 0x31073AD7, 0x25E22D38, 0xF0CD4A64, 0x28DFDB2D, 0x22D9E714, +0xB8E43935, 0x35FCBFEC, 0x1DC1391E, 0xD6E731A5, 0xBBC8B837, 0xBD3FBE3A, 0x00BADC53, 0x28C43F33, +0x1CE0BAC2, 0xE0E125D9, 0x3BD6CB3D, 0x313230BE, 0xC4243D34, 0x1F24D2B1, 0x2058384C, 0x2AE9192D, +0x302CCCEE, 0xD435C4C7, 0xB0D53217, 0xE52D32D9, 0xE0AF2127, 0x3E47D0CA, 0x4112BFCA, 0x27D0D0BD, +0xE81E5CF9, 0xD50CBCE5, 0x2DC9B4F7, 0x3FBA33BC, 0xCBCC2821, 0xDFD51326, 0xD20ADA3A, 0x2000C3BB, +0xC4DC4BE5, 0xCD39D7A7, 0xC40FF54F, 0xCC3BD1CC, 0x2C19EACC, 0xCD2E2129, 0x44D9F0DB, 0x2B40E11C, +0x31343F4C, 0x0E142817, 0xD0104135, 0xE304CAC7, 0x259A1FD4, 0xC3EAED3C, 0x1A311AD7, 0xD8242BEE, +0x20BF4EFD, 0x3FCA16C5, 0xDDEED03A, 0x0A22C52E, 0xD53B2D3A, 0xE22A2CB5, 0x291D3228, 0x2CCC2E45, +0x48BA00D5, 0x4FE12D1D, 0xD8D91DD1, 0xCEDBEC31, 0xFBD526B9, 0x3C1DE845, 0xAFDCE91C, 0xD622D31E, +0xCF2620D9, 0x28D63427, 0x362C3DDF, 0xD72C2530, 0xD6A7C203, 0x442C3233, 0x29B24E3F, 0xDD21412C, +0x400BD6D0, 0xC8D4B831, 0x171EF12A, 0x34FFC9D0, 0x3B3ED02D, 0x0BC7E2CB, 0xC43D28C6, 0x42F6C948, +0xD24AD429, 0xCB2C2A00, 0x43E2A236, 0xBC353312, 0xD91324B5, 0x30B4D013, 0xC92FDE2F, 0xBFD6D432, +0xC82DCABC, 0xC032DC1D, 0x30C9D2CB, 0xE0341DC5, 0xDDCEBE39, 0x2CC41D58, 0xE213BF14, 0xDFCDCBD9, +0x2E3C36DD, 0xE5262833, 0xF841291D, 0x15E324F5, 0x0315D732, 0xD1C5C5CA, 0xD73144D3, 0x2ABEBA1A, +0xBCF42E2C, 0x00E1E53B, 0x4423AD2C, 0xB021CF20, 0xD8D5EEB6, 0xBD35BCCD, 0xE6D85334, 0xD549DF2D, +0x35C74D19, 0x3F4BBE3D, 0x1F3FA3EB, 0xCEB2B335, 0x35D0BCBA, 0xD5D8CF27, 0xB11DCFC4, 0x24D3D408, +0x183B35D0, 0x1C14D619, 0x35D445BB, 0xDE452444, 0xC83315C5, 0x3F2122D0, 0xB623C737, 0x33321DD0, +0xD6DF33DE, 0xDE39AA20, 0xCF00F239, 0x412410D5, 0xC5DC2339, 0x1D3BD1B6, 0xC0E3481E, 0x1030451A, +0x30EA30D1, 0xC6DCB9C7, 0x2C36DC17, 0xCBB0ED30, 0x26C20FBE, 0x35D6D528, 0xD6CC2242, 0xEDE4241E, +0xC036DED9, 0x26303627, 0xB6B6371B, 0x26E3D6DF, 0x0D36C729, 0xBEEE4FD7, 0xE3C839D1, 0xC0313031, +0xAEAD2828, 0x36323D23, 0x13342728, 0x2904EE20, 0x261DDCD6, 0x0CBCE1E1, 0xD2CBD5C3, 0xFD1C4B2C, +0x1CD8CDDB, 0xD4BE53B1, 0x2BD6CDDE, 0xC61A01E1, 0x3D41BED3, 0xE0333BD0, 0x4040E821, 0xC4A32449, +0xD3D3A8BF, 0x36E11C2C, 0x3CC1321B, 0xC5D03E1C, 0xB813D9DD, 0xD6C51838, 0xCEBED141, 0x1FEA3813, +0x3611CAC0, 0xD12EE2C6, 0xE1BFD22D, 0xD0DAE9CC, 0x0E03DB00, 0xBC37393F, 0x422BC916, 0x49C4D72A, +0xBFC0243B, 0xC1C2F1C3, 0xDF36CC32, 0x49E53EE4, 0xBFC02CDF, 0x2BEE2BE1, 0xC23224C5, 0xD63F16DA, +0x15DBC21F, 0x49E4CC57, 0xBE272CC7, 0x274E0643, 0x5C1DDBE7, 0x34B73DF0, 0xCD2D2EB9, 0x1AD43BCD, +0xFD3047D3, 0xDAC53936, 0x1328DCC8, 0xBC32331D, 0x001F2634, 0xC845410F, 0xDBDB1C2B, 0xD1D819D0, +0xEBDDE12D, 0x37E335D7, 0xCDA84635, 0x0F362FBE, 0x30C229C4, 0xBC1DC43E, 0x3CD02F3D, 0x2EF5F5F4, +0x462D15E6, 0x362F1FC5, 0xC0C9CD49, 0x14C92639, 0x2BD135D0, 0x49C6B8DA, 0xDBC7E1D9, 0xC6382328, +0xEDC217BC, 0xE4D1331C, 0xDCC741CF, 0x283A341C, 0x38B93420, 0x46001BE7, 0x3E1030C4, 0x42A8C1CD, +0xBB3339C5, 0xE538DD20, 0xDF3F2602, 0xE52BE5C1, 0xBE2B41BA, 0x3BD4D337, 0x2D39C3DB, 0xC3BB36F0, +0xD12CCF17, 0x4130B93A, 0xE31DEAEC, 0xF9DEF52D, 0xADD35231, 0xD62ED0EF, 0xBE303521, 0x1E2ADB41, +0x310AC62F, 0x22DA2DC5, 0x26E1D9D0, 0x1F4D2AC1, 0xC8E6DCE1, 0xC6B3D62E, 0xD6D20019, 0x4EDC2D39, +0x342AE224, 0x07CBD4CB, 0xE43FB3BE, 0x1BE53748, 0xDD44C8A2, 0xBC34B7BA, 0xA1DD1D43, 0x23D31827, +0x3519E93F, 0xC83B3720, 0x1BF0B72A, 0xEAC7DCC0, 0xCA4B28EC, 0xD54D242F, 0xF35435D1, 0x3DDEC824, +0xC31128CE, 0x0B0FD136, 0x442D1E2B, 0xCBD1C3EC, 0xD635341B, 0xDD1F2931, 0x3442CDD0, 0xDADD3600, +0xC31E40E3, 0xB0BD1B22, 0x38CA1F0D, 0xD42D3043, 0xE65127E4, 0x2F15C9D6, 0x3E360B30, 0xB2CB1524, +0xC7BBC4DF, 0x38D244C7, 0x483B31C0, 0xDB43C8D1, 0x43DCDBBC, 0xDCC7571D, 0x24BAC2C6, 0x3B4ED6DC, +0x3AE03245, 0x0C40E1E2, 0x493724C0, 0xD4ECC537, 0x36CC30BA, 0x1C303FC6, 0xD12F27D5, 0x00C4272F, +0xFFFD061D, 0xC2D9251D, 0xC1D7D625, 0xBFCD3AD3, 0xB42E11D4, 0xC7230E1E, 0x24CD2C21, 0x0BD436F9, +0xCECCD331, 0xE6BDE0D5, 0xD2C6C941, 0xE1D1D3EF, 0xEB4123CD, 0xDA1755E5, 0xCEE4C529, 0xDC43C5D1, +0xD8FDDFEA, 0x1AC9C317, 0x22E13DCE, 0x01D7BA29, 0x1551E449, 0x43CD363E, 0xC6281B24, 0xCBD23725, +0x3200D61C, 0x35D6CCBC, 0x262CB22C, 0xE2E43DB1, 0x26493335, 0x3AD72ACA, 0xC4244430, 0x3AD7C9C3, +0xC91ED8D1, 0x40EAD2AC, 0x38C9CDAC, 0x413BC137, 0xCD304B1F, 0xEFCA2323, 0xAC3FE4CD, 0x3616F6E0, +0x41C2272A, 0xDCD7B2E2, 0x27D9E018, 0xDEE2D943, 0xD5C01F22, 0xB0D93540, 0xB411C13D, 0xDE39CBE0, +0xE2C6F126, 0x31450828, 0xD2BC2927, 0xBACA1DD1, 0x34DDCDC3, 0xD8302AD5, 0xE2D02824, 0x0B2BE03E, +0x3BCAFD2C, 0x2E461D45, 0xBBF6F32E, 0x31E5DED2, 0x2ED4E847, 0x3BDDCD29, 0x231331D8, 0xF521CF2E, +0xD024C718, 0x20D91011, 0xDB1E0F34, 0xD931C0BB, 0xBB310837, 0x29B7C5CB, 0x3B31DEE0, 0x0836B926, +0x28CB1FD4, 0x31072A30, 0xEE41F300, 0x35CB501E, 0x603A2B47, 0x2C2739CE, 0xC0CFC4D9, 0xE0CED43C, +0x22BDC9CA, 0xBAEC122D, 0x4D16F2C5, 0xB6E5C3DA, 0x19D5C9D0, 0x1D1DDFC2, 0x223A23FB, 0xCD382B09, +0x3025C2E6, 0x3918BDD5, 0x33DFEF0E, 0x291A302B, 0x14184930, 0xD414D233, 0x473CCED7, 0xD2D318B7, +0x3E2738D2, 0x2928D537, 0x002E14BB, 0xE2E03734, 0x1B1EDFD1, 0xCF483FD8, 0x3D043844, 0xD52E1C4C, +0x2823EAD6, 0x1F2D241A, 0xBC4B22E6, 0x2EE7DFB8, 0x3424E2F0, 0xD41BD7CB, 0x2BBCDF1E, 0x155B5033, +0xB527D543, 0xE3C5E9F4, 0xD9B5C92B, 0xD3CAE0DF, 0x31C4CED7, 0x0BD33B25, 0xCF3B111D, 0xE62F241F, +0x22D93D22, 0xCF27EF43, 0xDAD2C5DD, 0xC60019BE, 0x2935D5C3, 0x2D0BCD22, 0x224714EA, 0xE7CAB817, +0xD1282222, 0x3C25EBD0, 0x45F0A8DC, 0x3ACFB92D, 0x34B4E7E6, 0xE93523E0, 0x2C292FCA, 0x44D4C7AC, +0xEA22C3FE, 0xEF37E524, 0x313ADCDA, 0x2212D846, 0x1BE0FF4B, 0x1CBC2E34, 0x174AF041, 0xD245223E, +0xE1DB2BDC, 0xF2E9A515, 0xE3C33BCC, 0x43DBD5CE, 0xF11700DB, 0xDE44DD36, 0x26E1D604, 0xC2ACE419, +0xB9D8C7D4, 0x37EFE21E, 0xE2BAC2D1, 0x3926E91A, 0xD013E8BC, 0x2C43ECD9, 0x232B30F8, 0xD6C9E0BD, +0x26A718BC, 0xEFF6C840, 0xC13332C2, 0xC4B9BD36, 0x1B4921BD, 0xD0EBECE0, 0xBCE4D647, 0x35D4DCD5, +0x1BBE4C27, 0xECC1B7CB, 0xD820284D, 0x0BC33DBF, 0x34D03140, 0x38EC3900, 0x29EBDBC9, 0xD43A3041, +0x3AE0DD0C, 0x09CFCFDB, 0x44E0C7CE, 0x20DBC4E3, 0x26DC41DE, 0xBA3AD2E5, 0x2ABA34E1, 0xD70FBEB7, +0x40E03FE2, 0xF8D62041, 0xBD47B7EA, 0xD2E03E34, 0x1FC6D2DE, 0xCF1CD155, 0x32D52630, 0x3C463D2C, +0x253130E3, 0x1ED8BB4A, 0x411BDFDD, 0x3FD8EE35, 0x33D02BBB, 0x002ECAAE, 0xDF1FDFEB, 0x2CD8DA3C, +0xB6352D59, 0xD5BDD3B9, 0x1946630D, 0xD9181731, 0xD419D31F, 0xCE2FB7D0, 0x32EFE3C3, 0xCDDB2FD3, +0xD247BB3E, 0xBA41DECA, 0x23244AD2, 0xDEAAF2B2, 0xB8D525F2, 0xD327A4D0, 0x3718C0E2, 0x2D3E20D9, +0x24CFDF12, 0x26D0BE1C, 0xC8D9282E, 0xEB54CADF, 0x44DB1AED, 0xC927BF29, 0x2E002B39, 0xBB2625E4, +0xBB4421D2, 0x18CBAA3A, 0x213ECD37, 0x0F33163C, 0x3622E432, 0xE5C7C62B, 0xDE39E7C7, 0xF3151CCA, +0x2ACED6D2, 0x2ADADFC4, 0xD6241ACC, 0xB92F0622, 0x1BC639E4, 0x33D92BC4, 0xAF14E0DD, 0xE6EEC2C9, +0x16BDCAE3, 0x272E1AC3, 0x292E1EBE, 0xDCE439B1, 0x4225E1C6, 0xCD372420, 0x27D716B1, 0xD83EE339, +0xDBCAD4B3, 0x26385033, 0xC5373528, 0x2DD2C936, 0xF9D5BAE6, 0x2E2DC7C2, 0xCD4BD045, 0x1DDF2DE8, +0x0E3740CE, 0xE5D82C36, 0x29D1360A, 0xCD03C8C6, 0x37CCCCD8, 0xD83AD330, 0xBFD619CD, 0xDEDDC432, +0xD0DADAEC, 0xD3E14F26, 0xA8CD35D9, 0xA3F325BD, 0xBC1626D7, 0x3AB81BD5, 0xBBFF1832, 0xC9DB2126, +0x16250000, 0xE003BB39, 0x35D533CE, 0x1801C610, 0xD9D44644, 0x474BD3C3, 0x1C30C51E, 0x2E3342C6, +0xB7D6B3E9, 0x5AE44BD2, 0x46D62830, 0xDBDD4B0C, 0x38CF2F1D, 0x2D331ACB, 0x36CCDED6, 0x10D7DA10, +0xD42246E0, 0x0FC73FC0, 0x2C3E1E37, 0x2CF0D1B8, 0x17412EDD, 0x2630D0DD, 0xDBD535E4, 0xD927DA43, +0xB636C11E, 0x25CBE2D8, 0x4711F64B, 0xBC2C2029, 0xD0E3F538, 0xB92C27DF, 0x272DE2D2, 0x0CC5BCEB, +0xC0BC2EDF, 0x26D131C9, 0x1F092FEE, 0x2D342B34, 0x31B825D5, 0xC6BBC9C1, 0x38F1D0E4, 0xCBF7FDC7, +0xC3CAC9B6, 0xDBC436C6, 0x19DC1FD8, 0x28CCAED1, 0x1921BDB4, 0x24BCBF33, 0x301EE244, 0x36BDBDE1, +0x31C5B724, 0x00003ECD, 0x220B1EE7, 0xDEBAD3DC, 0x21CAB942, 0xC9193B33, 0xE3CD3A0A, 0x0DC22CC3, +0x3EC0C7CB, 0x43F2BFE4, 0x0ECFE9C6, 0x3ECA2FD5, 0xCB492DD9, 0x23E7BDC9, 0xD33DC151, 0xFCBE2933, +0x5ACEC51E, 0xF2172E1C, 0x3C24DE23, 0x3320D4D5, 0x2AB53C20, 0xC328323A, 0x45EA4133, 0x24391A32, +0xC7D8DA2B, 0xF4EAAF39, 0xDFCA36E3, 0xDEBED5C8, 0x36252C36, 0x04D7DBC7, 0x30CAC23D, 0xB60BF73D, +0xEACA2A30, 0x34C12ABE, 0xE2D747D8, 0x19D21FE3, 0x1DFD24EC, 0xB6B5D9C4, 0xBF2C1DD2, 0xD2B442E0, +0x37CCE1BD, 0x2AD1E1CC, 0x18DAC6BD, 0x213AD93E, 0x2031D8CB, 0xD82F2D12, 0xD112D2F2, 0x19CD43F1, +0x36BCB9CA, 0xD7C5DDFC, 0x34011D28, 0x3BDB0000, 0xCD00E1AA, 0x5228252C, 0x3512CA34, 0x0EE53A41, +0x242CD2C2, 0xF425DD2A, 0xF02FDDF8, 0xD82026D3, 0x4ABEE7B8, 0x2DC9DDDC, 0x3BEECD3C, 0x2ED1442A, +0xC713E92A, 0x310CC12B, 0xE128D520, 0x48E5C1CF, 0xC91CE3ED, 0xCCCBC72F, 0x0B23E3E9, 0xC1D2C8B6, +0x511ED534, 0x2AC7D4C3, 0xCFDCE335, 0xD4C9F643, 0xD433EBF1, 0x1BF8DA31, 0x223FC515, 0xCC2F13C2, +0xF825FFC8, 0x35E51633, 0x36293C4A, 0xC72ACA36, 0x4544E736, 0x1BD9315C, 0xCF16DFD4, 0xD0AFEFC3, +0xCECBBDE2, 0x05D6AA32, 0xD851F045, 0x240FD948, 0x3D35B2E6, 0x17D7C7EE, 0x0B22DACE, 0xF53AD637, +0xB2372DDB, 0xF6DF14CB, 0xBED95B24, 0xD4D5190B, 0x0000CB29, 0x3DC1ECF5, 0x30C6B34F, 0x1D2629B6, +0xD429D1CC, 0xD50716E3, 0xD0C3C2CC, 0xC82CC7D4, 0x5F27C718, 0xE6201BE4, 0xDACFD42F, 0x2E2EE9D4, +0x3AD036C0, 0x3828DB20, 0x27D63439, 0xD21026DE, 0xD60710BC, 0x13AEB9E9, 0x28C41944, 0x49D5E3D7, +0x27163429, 0x1BBAD528, 0xB12A14BC, 0xC72FD9E2, 0x1F43213A, 0x133B3EE5, 0x3622FE21, 0xD438DCBE, +0x41CB25E8, 0xD0D83E2C, 0x32BFC2CE, 0x362BE1DA, 0xC3CBCE24, 0x22DECDC2, 0x30CBD5CD, 0xD42DBC26, +0xDDF6AAD0, 0xCADC0EC8, 0xDC333434, 0x333114F5, 0xCE24361A, 0xB1C5D123, 0x4B25380B, 0x2FEC3634, +0x3EE2A610, 0xB4CEE72F, 0x29BCC721, 0xB23850CD, 0xA51AE4DD, 0xC81BBC2F, 0xD4DF0000, 0x2DE5D1E8, +0x2E0A28D0, 0x29E3C1E1, 0x56F62DD6, 0x3828DD4B, 0xC41ACFDF, 0xD2390A3A, 0xDF3028CC, 0xE8C5E22B, +0xD11CDCC1, 0x44D1CBEC, 0x33C73DDC, 0x1D25B7DC, 0xE7DD0525, 0x2743C41A, 0x2AE2D3DC, 0x21D6E82A, +0x133CBAC7, 0x593F34B0, 0xDA5FE339, 0x5206252C, 0x394FD5BD, 0x21D2C92C, 0x292426E0, 0x48E0362B, +0xCC272DE4, 0xC815C4DE, 0x49BD5D37, 0x1912342F, 0x2CE9E01B, 0x30D8D2D9, 0xDEBF46E8, 0x392F473A, +0x3A18D324, 0x2FE31FD0, 0x2B2B2710, 0xB53B2AC7, 0xB3481CFF, 0x2DD7CFBC, 0x2232351A, 0xD7C91C16, +0x1CB4DA16, 0x1BCBDF3C, 0xD2213822, 0x2121DB1F, 0xDBD1C02A, 0x27DA33C7, 0x351FC4CD, 0x0000B9D8, +0xC2142C29, 0x38B9DD31, 0x54E60CDF, 0x2A4A42DB, 0x35CF2BB5, 0x0CEA3323, 0xC2D8BEE2, 0xC61F3B33, +0x10CE11D0, 0x3336CE2E, 0x303ADD25, 0x3347B7D9, 0x33222C1A, 0x1A43E4C1, 0x4326E019, 0x2AE2E92E, +0xD2C5DC40, 0xBA45D7D6, 0xDF341BDC, 0x3A361925, 0xD4FEF4D2, 0x3018DA31, 0xDDC12817, 0x3038DE24, +0xEDC0292B, 0x3E3E3336, 0xCB1BEF17, 0xE10B2EBF, 0x2B20BABF, 0xC80C421B, 0xC2411D21, 0xECD8C7D6, +0xC824E4AD, 0x65B53DD7, 0x304325D4, 0xDD1DEAE6, 0xD33131CF, 0x2F36452B, 0x23B219E9, 0xC1C23529, +0x21DC3836, 0xD618E933, 0x393E2C14, 0x362ACA3A, 0xD6E2C237, 0xD6085111, 0xD721FCB1, 0xCCE6DEBC, +0xD1DCD920, 0xC3320000, 0xEA26CB19, 0x18BB26B7, 0x233BDF4F, 0xDE23FD3F, 0x2ADA1ECB, 0x3122E839, +0xEF42D7D7, 0xCAC7202D, 0xCD4CDED1, 0x2633C824, 0xD0123E4E, 0x104429B6, 0xC8CD4B00, 0x5136B738, +0xCFC8202B, 0x163135DD, 0x2C3821E8, 0x29314CE0, 0x0F3CE4EC, 0xE5432DAF, 0xC63AE340, 0x4639DB18, +0xDF282511, 0x25B5CBD3, 0xCAF50F37, 0x1E443B33, 0xE32CD32D, 0x10DA47B9, 0xDA410526, 0x34F0E4EA, +0xCEED3134, 0xC0B31AA7, 0xD12B1AD3, 0xC722C743, 0xBFBEDD1F, 0xD328D52B, 0xCC384024, 0x18B43FB8, +0x281742BB, 0xD8322FD2, 0xB5D628C6, 0xE420C0CB, 0xC9EB391D, 0x41E6B419, 0xBCC543E1, 0x2C37DDEF, +0x1F2D41E9, 0xD0F0D205, 0x1FEAE5C5, 0x15DF14B8, 0x47E327D9, 0xD328CE25, 0x36DB2E24, 0xD92E1EBF, +0x2B3230F4, 0xD1EAD5D9, 0xC7212ADA, 0xCD1235DC, 0xC519D5DB, 0xBFC631DF, 0xD5222B35, 0x35B6DEC6, +0x153C4428, 0xC5C3E42F, 0x1B2E2D2A, 0x3CD422A6, 0xC22842CC, 0xCADAC43D, 0x2C10DAD2, 0x18C708B5, +0x36D636BD, 0xCD29C5D3, 0xBFED433A, 0xE0384939, 0x005629F2, 0x36E2C425, 0x2830DF1C, 0xC9CB351C, +0xB0D7D3E6, 0xF3CEC214, 0xDC25D1C7, 0x1BD5CBCC, 0x19DD21C9, 0xC854EE30, 0x4FE14EE3, 0x38D5D3E8, +0xC9D126E4, 0x2FDD2130, 0xCE2130D1, 0xCFCAB0C9, 0xC71F0E1A, 0x2627D4DE, 0xCBE9DACD, 0xE7121F22, +0x2CC8E435, 0x3838391D, 0xDD37242E, 0xEBBE2F10, 0x233C0000, 0xAFC91CE7, 0xCE3536F6, 0x0F25D248, +0x2D04EBE4, 0x3FF634E5, 0xE929CE26, 0x24E0BABE, 0x302CD52B, 0xD1E2073F, 0x4333D11C, 0xDA48C2D1, +0x2D33F315, 0x29B2C8C3, 0x3B26E3EB, 0x3821C6DA, 0x32163DD6, 0x1DC737BC, 0x463CCD24, 0x19D63DDD, +0x200BDDB7, 0xBDD41E22, 0xC40A444F, 0x34BF49ED, 0xD4C0C0F4, 0xA41FB721, 0xBEDF51E8, 0x15D82A17, +0x383FDF21, 0x2B2918D0, 0xE6EB4718, 0x244530DB, 0x25DD3919, 0x3BA819BE, 0xDAEFBE38, 0xEB3FCCD9, +0xD1DF2E1C, 0x2520C3E3, 0x32D0BFD5, 0x37312BD5, 0xDC29D0C9, 0xBED02432, 0x310C3A18, 0x411ED8D7, +0xA4E5CE22, 0x163CBD30, 0xC3E2BE45, 0xCFBBAAC3, 0xC8C0E4CF, 0x0000CEFC, 0xE8CC2433, 0x39ED223F, +0x243AD83B, 0xD44D39C6, 0xCABF24DE, 0x252050EB, 0x29C73B40, 0xD4F434CF, 0xCAE4D5DF, 0xC5F2EEF1, +0x2919C8D2, 0x2FC4D20B, 0x1CBF5528, 0x3DBBD52D, 0xE03AD031, 0x3D2CCA17, 0xBE23BDB8, 0xE7DA3B30, +0xD1D2D331, 0xCC1825EA, 0x2F2D39D2, 0xF9313526, 0xCFBCC2E1, 0xE5BE3AD4, 0xBF59BE1C, 0x22FFC8B5, +0x32B14015, 0x31D6BEE6, 0x46DF3843, 0xDCD1D8B2, 0xE728C8BE, 0xEA1E2A28, 0xD1D8C7D2, 0x30CAF134, +0xEDD0EADA, 0x13C1E3D0, 0xB9D7DC29, 0xEAB93624, 0x24E49F38, 0x2E3FDEDC, 0xE6D0D23D, 0x1FBB3ED2, +0x23D540DD, 0xD727D92B, 0x251E33E6, 0x4BA7EECE, 0xC1CD203C, 0xC5CFED32, 0xB9CF35C3, 0x29710000, +0x29D7C636, 0x3A3ED1D0, 0xCC1B271F, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 + +hard_output0 = +0x42441F44, 0x006DFAE9, 0xDBB63AC2, 0xA3365CB3, 0xFA44372B, 0xC2603862, 0xA6C86E09, 0x416F0BF7, +0x8E58011D, 0x47C54B99, 0x269AD14C, 0x0B286902, 0x093E4C64, 0x8D84D8E1, 0xB8AE0993, 0x49567118, +0x8CEAABD2, 0x12D33409, 0xE3358237, 0x130176F6, 0x0986718F, 0xA18C83B5, 0x04EEF058, 0x59704040, +0x7478FE81, 0x01D27193, 0xEA1AD7F3, 0x21E6C2B9, 0x6478A1FF, 0xED9959AD, 0xE39E57BE, 0x4D859105, +0x056EF72D, 0xBD170BB7, 0xF01F9ADD, 0x99BF0C05, 0x44BAD09F, 0xF6ABDD61, 0xC0F8F116, 0x2972B53C, +0x0FEC944B, 0xD1675432, 0x0025F563, 0xF42B2EA8, 0x808E5C37, 0xB6E79AD5, 0x3706284D, 0xB9C0AFA5, +0xFF0E7E28, 0x5FA45C6F, 0x9CD5244E, 0xA013DDEA, 0x0D27D1A2, 0x2AE414AD, 0x41614379, 0xE6B68872, +0x5585D926, 0x5098D45B, 0xF8980ABD, 0x65821418, 0xEF8968A3, 0x301DEC3A, 0x57EF2A7F, 0xC17BE446, +0x94B65D62, 0xC9E6F350, 0x2E6130BC, 0x235F2E5E, 0xF1C13241, 0x94B291D6, 0x8C342458, 0x175B1FCA, +0xE07310FD, 0xD24934DD, 0x3CE8D053, 0x5C8F243D, 0x945B0AB3, 0x50EB8CC1, 0x8EF499D4, 0xA67801BF, +0x1680F061, 0x283FE705, 0xB8D7E773, 0x13AD3D2F, 0x4A6C305B, 0x1C1E5B12, 0x6F57D880, 0x8A666E5E, +0x26511296, 0xFFC09750, 0xFF91760A, 0xAC26795C, 0x75F321FD, 0x4221B9CB, 0x2E119188, 0x0772A832, +0x74D6036B, 0x505D6D64, 0x16A001B7, 0xF5F0D728, 0x48CBE119, 0x91C5D6B2, 0x8E1BBAB0, 0xB11BBE51, +0x52C647DC, 0x65240CE2, 0xAB3F8AAE, 0x62E0CDEA, 0x6CBE7240, 0x7F572063, 0x7FC816E6, 0xB38E5A6C, +0xDF9E2738, 0x752419CB, 0xC3A6BF9B, 0x8D48B21F, 0xA88C4AED, 0xE3C1D8BC, 0x8501353D, 0x20A99D7D, +0x8D9C6DDD, 0x77A65039, 0x834E7E57, 0x1F623FA1, 0x6CF02B26, 0x1FF0BBA6, 0x9035AC70, 0xBED818E5, +0x437CA713, 0x91011537, 0x3CD1302B, 0x7755459E, 0x5963FDA6, 0x49ECF937, 0xDEBBE0E3, 0xE873FD4F, +0x735C48C9, 0x7C70F46F, 0xA00B1543, 0x789F4159, 0xE17C343E, 0x51B3D085, 0xD9042663, 0x771933DA, +0xE183AF8C, 0xF62AC11B, 0xF776C25C, 0x22083E6C, 0x8400DDBA, 0x6D6AD2F2, 0x5DF87DBC, 0xBD7D40B4, +0x356EBF21, 0xD8972E85, 0x3A632B51, 0x4D5BFBA2, 0x58CDF1D4, 0xD1117491, 0x5906DC45, 0xF793D63A, +0x8F9D0552, 0xD261C804, 0x70BFDE9F, 0xDC77827B, 0xE9FFB130, 0x4FF18BBC, 0x17D50C32, 0xC315521E, +0x4C7D2470, 0xAB5EE34C, 0x692AC0C8, 0x15B22832, 0xD34107FE, 0xA7E6DA1E, 0x2966A2A3, 0xD0B204B4, +0x500E02F9, 0x02E5A455, 0x81EAF941, 0x6818F57A, 0x978EEC9C, 0xBFD575F3, 0x3FFC1BE8, 0x97104E07, +0x4ADF219F, 0x182EC93D, 0x386033EE, 0x87C1351A, 0x31420C6A, 0xE20B5DC8, 0x83D90D23, 0x3C13B9E8, +0xCCCD65BE, 0x7EDB17A9, 0x81C73C2A, 0xE202E332 + +c = +2 + +cab = +1 + +ea = +4918 + +eb = +4920 + +c_neg = +0 + +k_neg = +3072 + +k_pos = +3136 + +rv_index = +0 + +iter_max = +8 + +iter_min = +4 + +expected_iter_count = +8 + +ext_scale = +15 + +num_maps = +0 + +code_block_mode = +0 + +op_flags = +RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE, RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN, +RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP + +expected_status = +OK \ No newline at end of file diff --git a/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1190_rm.data b/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1190_rm.data new file mode 100644 index 00000000..6221756a --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1190_rm.data @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_ENC + +input0 = +0x11D2BCAC, 0x4D + +output0 = +0xD2399179, 0x640EB999, 0x2CBAF577, 0xAF224AE2, 0x9D139927, 0xE6909B29, 0xA25B7F47, 0x2AA224CE, +0x399179F2, 0x0EB999D2, 0xBAF57764, 0x224AE22C, 0x139927AF, 0x909B299D, 0x5B7F47E6, 0xA224CEA2, +0x9179F22A, 0xB999D239, 0xF577640E, 0x4AE22CBA, 0x9927AF22, 0x9B299D13, 0x7F47E690, 0x24CEA25B, +0x79F22AA2, 0x99D23991, 0x77640EB9, 0xE22CBAF5, 0x27AF224A, 0x299D1399, 0x47E6909B, 0xCEA25B7F, +0xF22AA224, 0xD2399179, 0x640EB999, 0x2CBAF577, 0xAF224AE2, 0x24 + +e = +1190 + +k = +40 + +ncb = +192 + +rv_index = +0 + +code_block_mode = +1 + +op_flags = +RTE_BBDEV_TURBO_RATE_MATCH + +expected_status = +OK diff --git a/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1194_rm.data b/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1194_rm.data new file mode 100644 index 00000000..c569abd7 --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1194_rm.data @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_ENC + +input0 = +0x11D2BCAC, 0x4D + +output0 = +0xB3E8D6DF, 0xBC8A2889, 0x744E649E, 0x99436EA6, 0x8B6EFD1D, 0xAB889238, 0xE744E6C9, 0x39E4664A, +0xE8D6DF91, 0x8A2889B3, 0x4E649EBC, 0x436EA674, 0x6EFD1D99, 0x8892388B, 0x44E6C9AB, 0xE4664AE7, +0xD6DF9139, 0x2889B3E8, 0x649EBC8A, 0x6EA6744E, 0xFD1D9943, 0x92388B6E, 0xE6C9AB88, 0x664AE744, +0xDF9139E4, 0x89B3E8D6, 0x9EBC8A28, 0xA6744E64, 0x1D99436E, 0x388B6EFD, 0xC9AB8892, 0x4AE744E6, +0x9139E466, 0xB3E8D6DF, 0xBC8A2889, 0x744E649E, 0x99436EA6, 0xC01D + +e = +1194 + +k = +40 + +ncb = +192 + +rv_index = +2 + +code_block_mode = +1 + +op_flags = +RTE_BBDEV_TURBO_RATE_MATCH + +expected_status = +OK diff --git a/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1196_rm.data b/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1196_rm.data new file mode 100644 index 00000000..72be6f5d --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1196_rm.data @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_ENC + +input0 = +0x11D2BCAC, 0x4D + +output0 = +0xBC8A2889, 0x744E649E, 0x99436EA6, 0x8B6EFD1D, 0xAB889238, 0xE744E6C9, 0x39E4664A, 0xE8D6DF91, +0x8A2889B3, 0x4E649EBC, 0x436EA674, 0x6EFD1D99, 0x8892388B, 0x44E6C9AB, 0xE4664AE7, 0xD6DF9139, +0x2889B3E8, 0x649EBC8A, 0x6EA6744E, 0xFD1D9943, 0x92388B6E, 0xE6C9AB88, 0x664AE744, 0xDF9139E4, +0x89B3E8D6, 0x9EBC8A28, 0xA6744E64, 0x1D99436E, 0x388B6EFD, 0xC9AB8892, 0x4AE744E6, 0x9139E466, +0xB3E8D6DF, 0xBC8A2889, 0x744E649E, 0x99436EA6, 0x8B6EFD1D, 0x9038 + +e = +1196 + +k = +40 + +ncb = +192 + +rv_index = +3 + +code_block_mode = +1 + +op_flags = +RTE_BBDEV_TURBO_RATE_MATCH + +expected_status = +OK diff --git a/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e272_rm.data b/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e272_rm.data new file mode 100644 index 00000000..883a76cf --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e272_rm.data @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_ENC + +input0 = +0x11d2bcac, 0x4d + +output0 = +0xd2399179, 0x640eb999, 0x2cbaf577, 0xaf224ae2, 0x9d139927, 0xe6909b29, 0xa25b7f47, 0x2aa224ce, +0x79f2 + +e = +272 + +k = +40 + +ncb = +192 + +rv_index = +0 + +code_block_mode = +1 + +op_flags = +RTE_BBDEV_TURBO_RATE_MATCH + +expected_status = +OK diff --git a/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e120_rm_rvidx.data b/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e120_rm_rvidx.data new file mode 100644 index 00000000..b3867f35 --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e120_rm_rvidx.data @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_ENC + +input0 = +0x11d2bcac, 0x7715ae4d, 0xc9f4dbc6, 0x2fda3c1a, 0x09349b16, 0x2cd3c189, +0xe5650429, 0xc73c42c1, 0xe7336bb7, 0xbb7de593, 0x83f986aa, 0xc0ade12e, +0x6730b095, 0x78f7c059, 0xaa907199, 0x75c323d1, 0x7061f9ba, 0x97e067bf, +0xc155cd55, 0x6a4cb08d, 0x4260e2fa, 0xff35e496, 0x37f251d3, 0x02fd9f9e, +0x6f53345c, 0x790cda6d, 0x3b8549e4, 0x56d0c6ea, 0x70a38006, 0xfff18223, +0x6f2035b5, 0x6cf508d3, 0x98a0a308, 0x432353fc, 0x0eb818eb, 0xdd9a128b, +0xf92431b2, 0xad788286, 0xda07d5de, 0x44b4b3a8, 0xbe62fd67, 0x61a4eb54, +0x90d7ac9f, 0xc4d9a930, 0xfddc3c24, 0xf5e3b1c8, 0x38143538, 0xcb1d3062, +0xcae36df3, 0x50a73437, 0x542dcab7, 0x875973a0, 0x34690309, 0x49cb1ddb, +0xf8a62b92, 0x82af6103, 0xc9f8c928, 0x3835b822, 0x16b044a6, 0xdae89096, +0xa5be47d4, 0xaf61189e, 0x5cd70faf, 0x037331cf, 0xef7fa5f6, 0xb2f2b41a, +0xa6f222c6, 0xdb60fe4c, 0x2a857a9a, 0x0b821f9d, 0x348afd17, 0x7eecbaeb, +0x92bb9509, 0x8a3cec24, 0xd02549a2, 0x155ffa81, 0x2b7feac6, 0x3ee461e7, +0xc981f936, 0x89b544c7, 0x9a431e36, 0x62511734, 0x769f9647, 0x211a747e, +0x567abef4, 0xad87e2b4, 0xa3e0c3bf, 0x6d325dd5, 0xf561cc46, 0x39925735, +0x3d8abbfd, 0xc3724c88, 0x8bdf03c9, 0x1b02a12a, 0x4f233a0c, 0x9ca9444a, +0xc5d1f7e6, 0x4d995f37, 0xd9aefb32, 0xd0465248, 0x0f3a3b21, 0x62ea8c0c, +0x91f8d54e, 0x5cf75514, 0x14618a01, 0x8fe9b87e, 0xf2b424f9, 0x49724ce2, +0xa1464587, 0x5e00dc83, 0x59475455, 0x444119b1, 0x4fb9f036, 0x65fcbc1a, +0x1c63a990, 0x767a6114, 0xb0ede06a, 0xcfb91ae3, 0x7874af5f, 0xf78772a3, +0xa2932c81, 0x77f2759d, 0x930dc8f1, 0x95ce14a5, 0x134363ee, 0x61ee143f, +0xf0034b35, 0xfdc75fce, 0x3be2dcf3, 0xff3a07eb, 0xdc43f0eb, 0x23ba73bb, +0x45f7649e, 0xcacc297c, 0xa3dd98db, 0x058d46a9, 0x6bcfc154, 0x7be8e1e6, +0x618a4754, 0x8d193c46, 0xba39e1ce, 0xc3b85cfc, 0xd80d853b, 0x38d6440d, +0x9c1a6185, 0x90c9dfcb, 0x01c6e841, 0xeedfe6ac, 0x7b61a3ae, 0xad3924dd, +0x514bc1a8, 0x6ed60d7e, 0x1b74b79b, 0xea295947, 0x5f9a5d33, 0xcd24311f, +0x0bd3d10c, 0xd214ecfe, 0xcb37035d, 0x8ce95168, 0x7020cb52, 0xe432107d, +0x042d63ac, 0x6201c8dd, 0x4bea65a2, 0x1a3cf453, 0x5b8e868d, 0x3d653bb0, +0x24c1967e, 0x37f183a9, 0x3700e703, 0xb168a02b, 0x2592cd82, 0x5c0cdb66, +0x70b64ebd, 0xf8dcb9a7, 0x634a8335, 0x06642398, 0xfe2f4497, 0x2e775256, +0x30f85cbe, 0xf4d2fdaf, 0x46ca6c9d, 0x022097d6, 0xcedeb03e, 0x3d8353bc, +0x0d005559, 0xd457eb38, 0xb1931313, 0xd4f12824, 0xcc444f1b, 0xfaa42c5f + +output0 = +0xd0d936d6, 0x870c26f3, 0x3f12805d, 0x4a6016 + +e = +120 + +k = +6144 + +ncb = +18528 + +rv_index = +1 + +code_block_mode = +1 + +op_flags = +RTE_BBDEV_TURBO_RATE_MATCH, RTE_BBDEV_TURBO_RV_INDEX_BYPASS + +expected_status = +OK diff --git a/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e18444.data b/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e18444.data new file mode 100644 index 00000000..dcad5587 --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e18444.data @@ -0,0 +1,156 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_ENC + +input0 = +0x11d2bcac, 0x7715ae4d, 0xc9f4dbc6, 0x2fda3c1a, 0x09349b16, 0x2cd3c189, +0xe5650429, 0xc73c42c1, 0xe7336bb7, 0xbb7de593, 0x83f986aa, 0xc0ade12e, +0x6730b095, 0x78f7c059, 0xaa907199, 0x75c323d1, 0x7061f9ba, 0x97e067bf, +0xc155cd55, 0x6a4cb08d, 0x4260e2fa, 0xff35e496, 0x37f251d3, 0x02fd9f9e, +0x6f53345c, 0x790cda6d, 0x3b8549e4, 0x56d0c6ea, 0x70a38006, 0xfff18223, +0x6f2035b5, 0x6cf508d3, 0x98a0a308, 0x432353fc, 0x0eb818eb, 0xdd9a128b, +0xf92431b2, 0xad788286, 0xda07d5de, 0x44b4b3a8, 0xbe62fd67, 0x61a4eb54, +0x90d7ac9f, 0xc4d9a930, 0xfddc3c24, 0xf5e3b1c8, 0x38143538, 0xcb1d3062, +0xcae36df3, 0x50a73437, 0x542dcab7, 0x875973a0, 0x34690309, 0x49cb1ddb, +0xf8a62b92, 0x82af6103, 0xc9f8c928, 0x3835b822, 0x16b044a6, 0xdae89096, +0xa5be47d4, 0xaf61189e, 0x5cd70faf, 0x037331cf, 0xef7fa5f6, 0xb2f2b41a, +0xa6f222c6, 0xdb60fe4c, 0x2a857a9a, 0x0b821f9d, 0x348afd17, 0x7eecbaeb, +0x92bb9509, 0x8a3cec24, 0xd02549a2, 0x155ffa81, 0x2b7feac6, 0x3ee461e7, +0xc981f936, 0x89b544c7, 0x9a431e36, 0x62511734, 0x769f9647, 0x211a747e, +0x567abef4, 0xad87e2b4, 0xa3e0c3bf, 0x6d325dd5, 0xf561cc46, 0x39925735, +0x3d8abbfd, 0xc3724c88, 0x8bdf03c9, 0x1b02a12a, 0x4f233a0c, 0x9ca9444a, +0xc5d1f7e6, 0x4d995f37, 0xd9aefb32, 0xd0465248, 0x0f3a3b21, 0x62ea8c0c, +0x91f8d54e, 0x5cf75514, 0x14618a01, 0x8fe9b87e, 0xf2b424f9, 0x49724ce2, +0xa1464587, 0x5e00dc83, 0x59475455, 0x444119b1, 0x4fb9f036, 0x65fcbc1a, +0x1c63a990, 0x767a6114, 0xb0ede06a, 0xcfb91ae3, 0x7874af5f, 0xf78772a3, +0xa2932c81, 0x77f2759d, 0x930dc8f1, 0x95ce14a5, 0x134363ee, 0x61ee143f, +0xf0034b35, 0xfdc75fce, 0x3be2dcf3, 0xff3a07eb, 0xdc43f0eb, 0x23ba73bb, +0x45f7649e, 0xcacc297c, 0xa3dd98db, 0x058d46a9, 0x6bcfc154, 0x7be8e1e6, +0x618a4754, 0x8d193c46, 0xba39e1ce, 0xc3b85cfc, 0xd80d853b, 0x38d6440d, +0x9c1a6185, 0x90c9dfcb, 0x01c6e841, 0xeedfe6ac, 0x7b61a3ae, 0xad3924dd, +0x514bc1a8, 0x6ed60d7e, 0x1b74b79b, 0xea295947, 0x5f9a5d33, 0xcd24311f, +0x0bd3d10c, 0xd214ecfe, 0xcb37035d, 0x8ce95168, 0x7020cb52, 0xe432107d, +0x042d63ac, 0x6201c8dd, 0x4bea65a2, 0x1a3cf453, 0x5b8e868d, 0x3d653bb0, +0x24c1967e, 0x37f183a9, 0x3700e703, 0xb168a02b, 0x2592cd82, 0x5c0cdb66, +0x70b64ebd, 0xf8dcb9a7, 0x634a8335, 0x06642398, 0xfe2f4497, 0x2e775256, +0x30f85cbe, 0xf4d2fdaf, 0x46ca6c9d, 0x022097d6, 0xcedeb03e, 0x3d8353bc, +0x0d005559, 0xd457eb38, 0xb1931313, 0xd4f12824, 0xcc444f1b, 0xfaa42c5f + +output0 = +0x11d2bcac, 0x7715ae4d, 0xc9f4dbc6, 0x2fda3c1a, 0x09349b16, 0x2cd3c189, +0xe5650429, 0xc73c42c1, 0xe7336bb7, 0xbb7de593, 0x83f986aa, 0xc0ade12e, +0x6730b095, 0x78f7c059, 0xaa907199, 0x75c323d1, 0x7061f9ba, 0x97e067bf, +0xc155cd55, 0x6a4cb08d, 0x4260e2fa, 0xff35e496, 0x37f251d3, 0x02fd9f9e, +0x6f53345c, 0x790cda6d, 0x3b8549e4, 0x56d0c6ea, 0x70a38006, 0xfff18223, +0x6f2035b5, 0x6cf508d3, 0x98a0a308, 0x432353fc, 0x0eb818eb, 0xdd9a128b, +0xf92431b2, 0xad788286, 0xda07d5de, 0x44b4b3a8, 0xbe62fd67, 0x61a4eb54, +0x90d7ac9f, 0xc4d9a930, 0xfddc3c24, 0xf5e3b1c8, 0x38143538, 0xcb1d3062, +0xcae36df3, 0x50a73437, 0x542dcab7, 0x875973a0, 0x34690309, 0x49cb1ddb, +0xf8a62b92, 0x82af6103, 0xc9f8c928, 0x3835b822, 0x16b044a6, 0xdae89096, +0xa5be47d4, 0xaf61189e, 0x5cd70faf, 0x037331cf, 0xef7fa5f6, 0xb2f2b41a, +0xa6f222c6, 0xdb60fe4c, 0x2a857a9a, 0x0b821f9d, 0x348afd17, 0x7eecbaeb, +0x92bb9509, 0x8a3cec24, 0xd02549a2, 0x155ffa81, 0x2b7feac6, 0x3ee461e7, +0xc981f936, 0x89b544c7, 0x9a431e36, 0x62511734, 0x769f9647, 0x211a747e, +0x567abef4, 0xad87e2b4, 0xa3e0c3bf, 0x6d325dd5, 0xf561cc46, 0x39925735, +0x3d8abbfd, 0xc3724c88, 0x8bdf03c9, 0x1b02a12a, 0x4f233a0c, 0x9ca9444a, +0xc5d1f7e6, 0x4d995f37, 0xd9aefb32, 0xd0465248, 0x0f3a3b21, 0x62ea8c0c, +0x91f8d54e, 0x5cf75514, 0x14618a01, 0x8fe9b87e, 0xf2b424f9, 0x49724ce2, +0xa1464587, 0x5e00dc83, 0x59475455, 0x444119b1, 0x4fb9f036, 0x65fcbc1a, +0x1c63a990, 0x767a6114, 0xb0ede06a, 0xcfb91ae3, 0x7874af5f, 0xf78772a3, +0xa2932c81, 0x77f2759d, 0x930dc8f1, 0x95ce14a5, 0x134363ee, 0x61ee143f, +0xf0034b35, 0xfdc75fce, 0x3be2dcf3, 0xff3a07eb, 0xdc43f0eb, 0x23ba73bb, +0x45f7649e, 0xcacc297c, 0xa3dd98db, 0x058d46a9, 0x6bcfc154, 0x7be8e1e6, +0x618a4754, 0x8d193c46, 0xba39e1ce, 0xc3b85cfc, 0xd80d853b, 0x38d6440d, +0x9c1a6185, 0x90c9dfcb, 0x01c6e841, 0xeedfe6ac, 0x7b61a3ae, 0xad3924dd, +0x514bc1a8, 0x6ed60d7e, 0x1b74b79b, 0xea295947, 0x5f9a5d33, 0xcd24311f, +0x0bd3d10c, 0xd214ecfe, 0xcb37035d, 0x8ce95168, 0x7020cb52, 0xe432107d, +0x042d63ac, 0x6201c8dd, 0x4bea65a2, 0x1a3cf453, 0x5b8e868d, 0x3d653bb0, +0x24c1967e, 0x37f183a9, 0x3700e703, 0xb168a02b, 0x2592cd82, 0x5c0cdb66, +0x70b64ebd, 0xf8dcb9a7, 0x634a8335, 0x06642398, 0xfe2f4497, 0x2e775256, +0x30f85cbe, 0xf4d2fdaf, 0x46ca6c9d, 0x022097d6, 0xcedeb03e, 0x3d8353bc, +0x0d005559, 0xd457eb38, 0xb1931313, 0xd4f12824, 0xcc444f1b, 0xfaa42c5f, +0x4fde636c, 0x10ea20a0, 0xe6da8721, 0xbfde5b08, 0x9c3739da, 0xb6dc015a, +0x427db088, 0xdfdb8e6f, 0x756be6c1, 0x21f5297b, 0x06135665, 0xc1602b7d, +0x049536c5, 0xbbb3b801, 0x0cdb0c19, 0x7b2ad622, 0xfee8218f, 0xc5c7f123, +0x8abd3301, 0xa15b534d, 0x29dd2053, 0xd409abf9, 0x3ef19d6b, 0x70a3cbc2, +0x7a51423a, 0x4505b2ad, 0xdc74c75e, 0x068751a9, 0xb0b56437, 0x14a10371, +0x76af806f, 0xa8a47e19, 0x7c97a26e, 0x7998a3d6, 0xdc1ad1e2, 0xb532a301, +0xca8a3e7d, 0xd0aef374, 0x204990c0, 0xc7011aec, 0xa69151ea, 0x53390026, +0x7bf0d762, 0x735c2202, 0x64159e54, 0x5a3b1a56, 0x9ef1def2, 0x0ab8a961, +0x587b0886, 0xb8cc5975, 0x2a5a0f23, 0x069d05be, 0x9cc3c207, 0x40ef1a02, +0x4fae3f5b, 0x1f127aae, 0xd4e6d411, 0x17ac43ef, 0xe4bf891b, 0xfbb21765, +0x2c560c7e, 0x8561988c, 0x73a01032, 0x0cfef73a, 0x694c4991, 0x885d7a3f, +0x4218d1ff, 0xc2efaffb, 0xaf9d9715, 0xf76de6b2, 0xcce8e8ff, 0x370e3800, +0x493675eb, 0xd8fbcbda, 0xa5b382c2, 0x86c8f1ea, 0x3d724ea4, 0xb067034c, +0x6491d87e, 0x1a745ce4, 0xbb27180b, 0x1a2f0acc, 0xac4b7b3b, 0xe324578b, +0xc87928df, 0x9c1de566, 0x0ce2a17d, 0xaf2e13ce, 0x146a8659, 0x8727f6ae, +0xe2df7d03, 0x1a8e4cb4, 0xfa590976, 0x13a7c236, 0xc07489d0, 0xbe905e17, +0xafeb3d4b, 0x201e73f2, 0x5bdca12e, 0x3e15a852, 0xbcfc3271, 0x5d398410, +0x6bfacc15, 0x011fc61f, 0x43e43fd7, 0x0640717c, 0x96bfb3ff, 0x158eac19, +0x3b852e91, 0x74f9ceda, 0xcac71326, 0xfc0e312a, 0x20e8137b, 0xa1162611, +0x239ac7fe, 0xb9d00f8a, 0xea0b5241, 0x019f0d25, 0xc5153264, 0xb48a5547, +0xe54e339f, 0x17a6cca5, 0x5065af0d, 0x5ce648b9, 0xb457b342, 0xc1cb3f0e, +0x28d03c8b, 0x5144ed7a, 0xdb80779f, 0x53ce1b87, 0xbc071147, 0xbfe46c11, +0x7296785e, 0x83e4a63e, 0xc58982e9, 0x9538c5b9, 0xf14abaaa, 0xd915124c, +0x73540cd6, 0xe333696b, 0x58f9e00a, 0xd4dad10f, 0xc0de1420, 0x355e2bdc, +0xb2faa8fd, 0xbe6a12f1, 0x45d415cc, 0x47f5aed9, 0x4754e770, 0x2bb07385, +0x41374352, 0xf80beb47, 0xef02f35c, 0xc9c1b86e, 0x94b5785b, 0xba33123f, +0x7e39f0c9, 0x028a9286, 0x7d52c9f1, 0x06f04da6, 0xbc6a68d1, 0xfc70bace, +0x95b6a129, 0xfff224bb, 0x701ef3fb, 0x3309286f, 0x544ae8c1, 0xdca62c4a, +0xf8862ee2, 0xf9e3cd29, 0x2c07cce2, 0x8d93652a, 0xf47e4611, 0x4635f586, +0x1c03e0f4, 0x819724c7, 0x96b2a3f0, 0xeeb1ad95, 0xff08e517, 0xbd4ba6ed, +0x49ddb12d, 0x365734b5, 0x5edf7459, 0x2ee117a9, 0x067b9462, 0xb703685f, +0x72499876, 0xb71dd772, 0x5d02b478, 0x4b6fed1d, 0x2df232c1, 0x733eaaff, +0x1618c029, 0x0adda94b, 0xd271e560, 0xd120ba30, 0x6dfecc73, 0x4e751af7, +0x0e7a6b63, 0xa1ad5dad, 0xb41201b1, 0x245f3e86, 0x3a5114ac, 0x5cd532c9, +0x27ff9c3a, 0x7a847392, 0x6b2d0514, 0x82d0cc45, 0x0c779ba2, 0x8690f3ef, +0x0c058474, 0x47a8ee9a, 0xac4c2475, 0x496b67a5, 0x033f43f5, 0x6e6ce3c9, +0x644f5163, 0x8f4b92d9, 0x217798ee, 0x4b8e5368, 0xce751989, 0x7ab05365, +0xe227cee3, 0xf35e2851, 0x6f304c87, 0x1389c81d, 0x6ebc3989, 0x2b32ac3e, +0x59c5d111, 0xa8e8699c, 0xa5b15150, 0x8cacebe8, 0x40c2ede6, 0x71ea78c9, +0xa9e40f49, 0x58309eab, 0xd2eb879c, 0x54b9086f, 0xce206d93, 0x47e7087a, +0xac1e1dd9, 0x1d3a7bf1, 0x07d21fd4, 0x3a84a2a7, 0xe3ce33de, 0x55c94ddf, +0xd827c1ea, 0x3b4dcf5a, 0x7d5fbeb6, 0xd71ccdae, 0x516a9035, 0x33b3bee0, +0x61201364, 0xcf344f8d, 0x8c887934, 0x1998c127, 0xe24f1190, 0x75e8ea20, +0x8a379eda, 0x8a894b14, 0xa3d7c264, 0xa62b0119, 0x87f4d316, 0xecdc5f2b, +0xbc2424ec, 0x71169a71, 0x61aa2d5d, 0xb5f5f160, 0xc15a4969, 0xc5419315, +0xbf84483e, 0x2b1687c1, 0xd1aa06df, 0x22d5befd, 0x8b09b15f, 0xa88ffc01, +0xbb33617f, 0xb01a3e2e, 0x912a939c, 0xc649d802, 0xeba14b11, 0x3c902b57, +0xbcf35a8d, 0x45a964a1, 0x0c9416ef, 0xec9ae2d1, 0xc5e56fee, 0x88bfa336, +0x653cfe85, 0x92d21037, 0x96bc60e4, 0xf5317c1e, 0x5e7118f4, 0x91a9e04b, +0x43f0cfec, 0xb763dc3c, 0xad6bac40, 0x21685f69, 0x5e4d6f70, 0xd8d5a4c4, +0x15ed5efe, 0x3bbefa38, 0xbf7cf912, 0xb1427ffd, 0x652235ff, 0x927a7046, +0x8806625f, 0x56967b59, 0x6ec3dba6, 0x0ee31e0f, 0x25111a51, 0x091c763e, +0x1867732b, 0x28365594, 0xa589c975, 0x585b906f, 0xb85c4f20, 0xced98475, +0x52f29d16, 0x1ca0de39, 0xede3c4c8, 0xda808e91, 0x449784ab, 0xe3caf590, +0xabf23939, 0xe97f259a, 0x0a879f81, 0x5c6268ac, 0x0a4f1b62, 0xfe15825e, +0xbea5e6f9, 0x0d2db98f, 0xeec8cd64, 0x1747ddf5, 0x8b4f161c, 0xd236c9f6, +0x1a5b5a2f, 0x08918e6d, 0xdde43ad9, 0x8c316d0f, 0x7f1b3342, 0x3e385cee, +0x55ff6f87, 0x89bbe534, 0x67c23afe, 0x250dc97a, 0x06b3b332, 0x61a03930, +0x2ca17159, 0x3260c4f6, 0x3c810bde, 0x28d94372, 0xa9701f5a, 0x3880475b, +0x352ff389, 0x2bf653e0, 0xa4a6cc35, 0x4d211cfc, 0xeda0b37d, 0x14c0e6d8, +0xe457aaea, 0x1c6ed15f, 0x155d3b90, 0x7f43aed4, 0x289a98f9, 0x42246910, +0x62d047f5, 0x75714462, 0xe2c2a4df, 0x9dfc0f13, 0x03361f4c, 0xc55b5ca7, +0x9dd89720, 0x17376073, 0xfb6ee849, 0x280e697e, 0x57b24f3a, 0x2089b956, +0xa8baa602, 0x5b7ab88c, 0xd69f8b56, 0x64f80d52, 0x223427b9, 0xfd393c95, +0xb074 + +e = +18444 + +k = +6144 + +ncb = +18528 + +rv_index = +0 + +code_block_mode = +1 + +expected_status = +OK diff --git a/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e18448_crc24a.data b/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e18448_crc24a.data new file mode 100644 index 00000000..345d1410 --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e18448_crc24a.data @@ -0,0 +1,159 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_ENC + +input0 = +0x11d2bcac, 0x7715ae4d, 0xc9f4dbc6, 0x2fda3c1a, 0x09349b16, 0x2cd3c189, +0xe5650429, 0xc73c42c1, 0xe7336bb7, 0xbb7de593, 0x83f986aa, 0xc0ade12e, +0x6730b095, 0x78f7c059, 0xaa907199, 0x75c323d1, 0x7061f9ba, 0x97e067bf, +0xc155cd55, 0x6a4cb08d, 0x4260e2fa, 0xff35e496, 0x37f251d3, 0x02fd9f9e, +0x6f53345c, 0x790cda6d, 0x3b8549e4, 0x56d0c6ea, 0x70a38006, 0xfff18223, +0x6f2035b5, 0x6cf508d3, 0x98a0a308, 0x432353fc, 0x0eb818eb, 0xdd9a128b, +0xf92431b2, 0xad788286, 0xda07d5de, 0x44b4b3a8, 0xbe62fd67, 0x61a4eb54, +0x90d7ac9f, 0xc4d9a930, 0xfddc3c24, 0xf5e3b1c8, 0x38143538, 0xcb1d3062, +0xcae36df3, 0x50a73437, 0x542dcab7, 0x875973a0, 0x34690309, 0x49cb1ddb, +0xf8a62b92, 0x82af6103, 0xc9f8c928, 0x3835b822, 0x16b044a6, 0xdae89096, +0xa5be47d4, 0xaf61189e, 0x5cd70faf, 0x037331cf, 0xef7fa5f6, 0xb2f2b41a, +0xa6f222c6, 0xdb60fe4c, 0x2a857a9a, 0x0b821f9d, 0x348afd17, 0x7eecbaeb, +0x92bb9509, 0x8a3cec24, 0xd02549a2, 0x155ffa81, 0x2b7feac6, 0x3ee461e7, +0xc981f936, 0x89b544c7, 0x9a431e36, 0x62511734, 0x769f9647, 0x211a747e, +0x567abef4, 0xad87e2b4, 0xa3e0c3bf, 0x6d325dd5, 0xf561cc46, 0x39925735, +0x3d8abbfd, 0xc3724c88, 0x8bdf03c9, 0x1b02a12a, 0x4f233a0c, 0x9ca9444a, +0xc5d1f7e6, 0x4d995f37, 0xd9aefb32, 0xd0465248, 0x0f3a3b21, 0x62ea8c0c, +0x91f8d54e, 0x5cf75514, 0x14618a01, 0x8fe9b87e, 0xf2b424f9, 0x49724ce2, +0xa1464587, 0x5e00dc83, 0x59475455, 0x444119b1, 0x4fb9f036, 0x65fcbc1a, +0x1c63a990, 0x767a6114, 0xb0ede06a, 0xcfb91ae3, 0x7874af5f, 0xf78772a3, +0xa2932c81, 0x77f2759d, 0x930dc8f1, 0x95ce14a5, 0x134363ee, 0x61ee143f, +0xf0034b35, 0xfdc75fce, 0x3be2dcf3, 0xff3a07eb, 0xdc43f0eb, 0x23ba73bb, +0x45f7649e, 0xcacc297c, 0xa3dd98db, 0x058d46a9, 0x6bcfc154, 0x7be8e1e6, +0x618a4754, 0x8d193c46, 0xba39e1ce, 0xc3b85cfc, 0xd80d853b, 0x38d6440d, +0x9c1a6185, 0x90c9dfcb, 0x01c6e841, 0xeedfe6ac, 0x7b61a3ae, 0xad3924dd, +0x514bc1a8, 0x6ed60d7e, 0x1b74b79b, 0xea295947, 0x5f9a5d33, 0xcd24311f, +0x0bd3d10c, 0xd214ecfe, 0xcb37035d, 0x8ce95168, 0x7020cb52, 0xe432107d, +0x042d63ac, 0x6201c8dd, 0x4bea65a2, 0x1a3cf453, 0x5b8e868d, 0x3d653bb0, +0x24c1967e, 0x37f183a9, 0x3700e703, 0xb168a02b, 0x2592cd82, 0x5c0cdb66, +0x70b64ebd, 0xf8dcb9a7, 0x634a8335, 0x06642398, 0xfe2f4497, 0x2e775256, +0x30f85cbe, 0xf4d2fdaf, 0x46ca6c9d, 0x022097d6, 0xcedeb03e, 0x3d8353bc, +0x0d005559, 0xd457eb38, 0xb1931313, 0xd4f12824, 0xcc444f1b, 0x5f + +output0 = +0x11d2bcac, 0x7715ae4d, 0xc9f4dbc6, 0x2fda3c1a, 0x09349b16, 0x2cd3c189, +0xe5650429, 0xc73c42c1, 0xe7336bb7, 0xbb7de593, 0x83f986aa, 0xc0ade12e, +0x6730b095, 0x78f7c059, 0xaa907199, 0x75c323d1, 0x7061f9ba, 0x97e067bf, +0xc155cd55, 0x6a4cb08d, 0x4260e2fa, 0xff35e496, 0x37f251d3, 0x02fd9f9e, +0x6f53345c, 0x790cda6d, 0x3b8549e4, 0x56d0c6ea, 0x70a38006, 0xfff18223, +0x6f2035b5, 0x6cf508d3, 0x98a0a308, 0x432353fc, 0x0eb818eb, 0xdd9a128b, +0xf92431b2, 0xad788286, 0xda07d5de, 0x44b4b3a8, 0xbe62fd67, 0x61a4eb54, +0x90d7ac9f, 0xc4d9a930, 0xfddc3c24, 0xf5e3b1c8, 0x38143538, 0xcb1d3062, +0xcae36df3, 0x50a73437, 0x542dcab7, 0x875973a0, 0x34690309, 0x49cb1ddb, +0xf8a62b92, 0x82af6103, 0xc9f8c928, 0x3835b822, 0x16b044a6, 0xdae89096, +0xa5be47d4, 0xaf61189e, 0x5cd70faf, 0x037331cf, 0xef7fa5f6, 0xb2f2b41a, +0xa6f222c6, 0xdb60fe4c, 0x2a857a9a, 0x0b821f9d, 0x348afd17, 0x7eecbaeb, +0x92bb9509, 0x8a3cec24, 0xd02549a2, 0x155ffa81, 0x2b7feac6, 0x3ee461e7, +0xc981f936, 0x89b544c7, 0x9a431e36, 0x62511734, 0x769f9647, 0x211a747e, +0x567abef4, 0xad87e2b4, 0xa3e0c3bf, 0x6d325dd5, 0xf561cc46, 0x39925735, +0x3d8abbfd, 0xc3724c88, 0x8bdf03c9, 0x1b02a12a, 0x4f233a0c, 0x9ca9444a, +0xc5d1f7e6, 0x4d995f37, 0xd9aefb32, 0xd0465248, 0x0f3a3b21, 0x62ea8c0c, +0x91f8d54e, 0x5cf75514, 0x14618a01, 0x8fe9b87e, 0xf2b424f9, 0x49724ce2, +0xa1464587, 0x5e00dc83, 0x59475455, 0x444119b1, 0x4fb9f036, 0x65fcbc1a, +0x1c63a990, 0x767a6114, 0xb0ede06a, 0xcfb91ae3, 0x7874af5f, 0xf78772a3, +0xa2932c81, 0x77f2759d, 0x930dc8f1, 0x95ce14a5, 0x134363ee, 0x61ee143f, +0xf0034b35, 0xfdc75fce, 0x3be2dcf3, 0xff3a07eb, 0xdc43f0eb, 0x23ba73bb, +0x45f7649e, 0xcacc297c, 0xa3dd98db, 0x058d46a9, 0x6bcfc154, 0x7be8e1e6, +0x618a4754, 0x8d193c46, 0xba39e1ce, 0xc3b85cfc, 0xd80d853b, 0x38d6440d, +0x9c1a6185, 0x90c9dfcb, 0x01c6e841, 0xeedfe6ac, 0x7b61a3ae, 0xad3924dd, +0x514bc1a8, 0x6ed60d7e, 0x1b74b79b, 0xea295947, 0x5f9a5d33, 0xcd24311f, +0x0bd3d10c, 0xd214ecfe, 0xcb37035d, 0x8ce95168, 0x7020cb52, 0xe432107d, +0x042d63ac, 0x6201c8dd, 0x4bea65a2, 0x1a3cf453, 0x5b8e868d, 0x3d653bb0, +0x24c1967e, 0x37f183a9, 0x3700e703, 0xb168a02b, 0x2592cd82, 0x5c0cdb66, +0x70b64ebd, 0xf8dcb9a7, 0x634a8335, 0x06642398, 0xfe2f4497, 0x2e775256, +0x30f85cbe, 0xf4d2fdaf, 0x46ca6c9d, 0x022097d6, 0xcedeb03e, 0x3d8353bc, +0x0d005559, 0xd457eb38, 0xb1931313, 0xd4f12824, 0xcc444f1b, 0xa533d85f, +0x4fde63ac, 0x10ea20a0, 0xe6da8721, 0xbfde5b08, 0x9c3739da, 0xb6dc015a, +0x427db088, 0xdfdb8e6f, 0x756be6c1, 0x21f5297b, 0x06135665, 0xc1602b7d, +0x049536c5, 0xbbb3b801, 0x0cdb0c19, 0x7b2ad622, 0xfee8218f, 0xc5c7f123, +0x8abd3301, 0xa15b534d, 0x29dd2053, 0xd409abf9, 0x3ef19d6b, 0x70a3cbc2, +0x7a51423a, 0x4505b2ad, 0xdc74c75e, 0x068751a9, 0xb0b56437, 0x14a10371, +0x76af806f, 0xa8a47e19, 0x7c97a26e, 0x7998a3d6, 0xdc1ad1e2, 0xb532a301, +0xca8a3e7d, 0xd0aef374, 0x204990c0, 0xc7011aec, 0xa69151ea, 0x53390026, +0x7bf0d762, 0x735c2202, 0x64159e54, 0x5a3b1a56, 0x9ef1def2, 0x0ab8a961, +0x587b0886, 0xb8cc5975, 0x2a5a0f23, 0x069d05be, 0x9cc3c207, 0x40ef1a02, +0x4fae3f5b, 0x1f127aae, 0xd4e6d411, 0x17ac43ef, 0xe4bf891b, 0xfbb21765, +0x2c560c7e, 0x8561988c, 0x73a01032, 0x0cfef73a, 0x694c4991, 0x885d7a3f, +0x4218d1ff, 0xc2efaffb, 0xaf9d9715, 0xf76de6b2, 0xcce8e8ff, 0x370e3800, +0x493675eb, 0xd8fbcbda, 0xa5b382c2, 0x86c8f1ea, 0x3d724ea4, 0xb067034c, +0x6491d87e, 0x1a745ce4, 0xbb27180b, 0x1a2f0acc, 0xac4b7b3b, 0xe324578b, +0xc87928df, 0x9c1de566, 0x0ce2a17d, 0xaf2e13ce, 0x146a8659, 0x8727f6ae, +0xe2df7d03, 0x1a8e4cb4, 0xfa590976, 0x13a7c236, 0xc07489d0, 0xbe905e17, +0xafeb3d4b, 0x201e73f2, 0x5bdca12e, 0x3e15a852, 0xbcfc3271, 0x5d398410, +0x6bfacc15, 0x011fc61f, 0x43e43fd7, 0x0640717c, 0x96bfb3ff, 0x158eac19, +0x3b852e91, 0x74f9ceda, 0xcac71326, 0xfc0e312a, 0x20e8137b, 0xa1162611, +0x239ac7fe, 0xb9d00f8a, 0xea0b5241, 0x019f0d25, 0xc5153264, 0xb48a5547, +0xe54e339f, 0x17a6cca5, 0x5065af0d, 0x5ce648b9, 0xb457b342, 0xc1cb3f0e, +0x28d03c8b, 0x5144ed7a, 0xdb80779f, 0x53ce1b87, 0xbc071147, 0xbfe46c11, +0x7296785e, 0x83e4a63e, 0xc58982e9, 0x9538c5b9, 0xf14abaaa, 0xd915124c, +0x73540cd6, 0xe333696b, 0x58f9e00a, 0xd4dad10f, 0xc0de1420, 0x355e2bdc, +0xb2faa8fd, 0xbe6a12f1, 0x45d415cc, 0x47f5aed9, 0x4754e770, 0x2bb07385, +0x41374352, 0xf80beb47, 0xef02f35c, 0xc9c1b86e, 0x94b5785b, 0xba33123f, +0x7e39f0c9, 0x028a9286, 0x7d52c9f1, 0x06f04da6, 0xbc6a68d1, 0xfc70bace, +0x95b6a129, 0xfff224bb, 0x701ef3fb, 0x3309286f, 0x544ae8c1, 0xdca62c4a, +0xf8862ee2, 0xf9e3cd29, 0x2c07cce2, 0x8d93652a, 0xf47e4611, 0x4635f586, +0x1c03e0f4, 0x819724c7, 0x96b2a3f0, 0xeeb1ad95, 0xff08e517, 0xbd4ba6ed, +0x49ddb12d, 0x365734b5, 0x5edf7459, 0x2ee117a9, 0x067b9462, 0xb703685f, +0x72499876, 0xb71dd772, 0x5d02b478, 0x4b6fed1d, 0x2df232c1, 0xb9dea0ff, +0x1618c046, 0x0adda94b, 0xd271e560, 0xd120ba30, 0x6dfecc73, 0x4e751af7, +0x0e7a6b63, 0xa1ad5dad, 0xb41201b1, 0x245f3e86, 0x3a5114ac, 0x5cd532c9, +0x27ff9c3a, 0x7a847392, 0x6b2d0514, 0x82d0cc45, 0x30779ba2, 0x3f0c8156, +0x9bce6106, 0x3511b2b4, 0x82dbef90, 0xac19def9, 0x5f11d43e, 0xa5899170, +0xdd137ff4, 0x188077ab, 0x53cec4c0, 0x6519988d, 0x2b07a0d5, 0x269ec4ae, +0x29c2bc5a, 0x4a0206c6, 0xf8fba9f5, 0x61309433, 0x402bf26c, 0xce401562, +0xf7eb46da, 0x4d9ad0c0, 0xf99fc69b, 0x1b670e56, 0x327bb1c8, 0x5f7db32c, +0x4c96b615, 0x041e0960, 0x190ef525, 0xede526f8, 0x59eb88e1, 0x355e5454, +0x8289d63c, 0xf848c2ad, 0x5bfc881f, 0xf161d01e, 0x5a921d49, 0xc202a8ad, +0xaa9e9dc4, 0x8211e14d, 0xea945bc4, 0xa5a59180, 0x7ffd5bd0, 0xd6c107bc, +0x3d0e84af, 0x04d13d34, 0xa21f54a3, 0xfcea787b, 0xbe61865b, 0xbe0d9899, +0x336bb04d, 0xf843ae66, 0x8d400981, 0x4359b845, 0xdbda44dd, 0x27392d92, +0x05780a7b, 0xe6dd7f03, 0x13137173, 0x9b623a85, 0x2428f035, 0x996f04de, +0x74613a87, 0x924aa956, 0x4661e3ad, 0x506ce2d3, 0xa59e7aba, 0x34fd455d, +0x70d613c6, 0x094610b9, 0x06e176ee, 0xb4f0842c, 0xc53680f4, 0xd9e2920b, +0xe0ddcd46, 0x8e4c1618, 0xb5c83878, 0x7b5107a3, 0xb75c33c0, 0xa62868d3, +0x804e47d9, 0xcefc87fc, 0xca5e125d, 0x3ed40ea7, 0xe72d3663, 0x06620539, +0x314993c2, 0x99f417d9, 0x4819151c, 0x7d46c8a2, 0x95a81dc9, 0x61898a53, +0x8226bb8c, 0x4907a616, 0x91eb32f7, 0x5430c6a1, 0x390ca234, 0x599f02ff, +0x315a4cc8, 0xc15d9e2b, 0x1c7a8788, 0x2074d5ea, 0xc063a30d, 0x5532e1f5, +0xd3820192, 0x916a7b03, 0x32422c07, 0x2ae2cc41, 0x96cb84c5, 0x2bab3d29, +0x0edc0add, 0x408e4981, 0x2606b671, 0x63dca006, 0x8f4261d9, 0xbfe4623b, +0x60174b80, 0x50230b0d, 0x9d4c7af3, 0x2edb3482, 0x24d8d087, 0x1b673b02, +0xe28b7132, 0xc6c8cb36, 0x5794e3f3, 0x808c3887, 0xf9f64a32, 0xfca10213, +0xff29e373, 0x54bf19a6, 0x16014860, 0x356d4398, 0xe8d0d630, 0x4c8100c0, +0x7b68a462, 0x6cc95c68, 0x3becad35, 0xb2c6b4c3, 0x740aef1c, 0x4f37f2d5, +0xc9d3c805, 0x6e4e533d, 0xf7647967, 0x91856de5, 0x87e7e428, 0xddf2fe07, +0x69016442, 0xe0132159, 0x1afae2a2, 0x63b6d719, 0x08d20a21, 0x48ee7113, +0x2fb2d853, 0xa532ffc8, 0x8296dee2, 0x0dfaf2fa, 0x060d531c, 0xa756d04c, +0x3efed03e, 0xbe9436db, 0x5b9e8a48, 0x0a37ea61, 0x718f4362, 0xebcc9742, +0x78aa2e7c, 0x4b19f7b8, 0x308b9af0, 0x915247e9, 0xc079aa48, 0x5230e578, +0x862d6de7, 0xbe0801d0, 0x8ab11c9d, 0xaf1d7feb, 0xe9d15530, 0x44651202, +0xc006 + +e = +18448 + +k = +6144 + +ncb = +18528 + +rv_index = +0 + +code_block_mode = +1 + +op_flags = +RTE_BBDEV_TURBO_CRC_24A_ATTACH + +expected_status = +OK \ No newline at end of file diff --git a/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data b/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data new file mode 100644 index 00000000..9ce68b72 --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data @@ -0,0 +1,180 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_ENC + +input0 = +0xe5e0a392, 0x6f5468f1, 0xc8f1c505, 0x4502caf2, 0x23df5578, 0xc2f29afe, 0x06838d34, 0x264eab9b, +0x1e97c133, 0xabad27aa, 0x684fa9b4, 0x0fed735e, 0x6ab6bcad, 0x9c3e401a, 0x66a63035, 0xa72f7f6b, +0x509d2953, 0xf03c574a, 0x48f12185, 0x27318395, 0x008fc1ce, 0x7efcf19d, 0x0120de12, 0x6ed28bff, +0x2044547a, 0x04d95124, 0x6baf2e1e, 0x60d93509, 0x299aa27f, 0x8b67d93a, 0xc33d9631, 0xe468dbe1, +0x1a82d0b6, 0x8ee2ff7b, 0x39298170, 0x33a90b6a, 0x273d17f7, 0xb4368615, 0x60c390b8, 0x24696570, +0xa1f4abcc, 0x08b68dd0, 0x91f8c883, 0xc9ceed85, 0x6467da9b, 0x48dfae24, 0xf4ff4992, 0x8c8200cf, +0xaac02792, 0xf506f71d, 0x096df98a, 0x3074be38, 0x9b675626, 0x4ccace33, 0x50f0f60d, 0xf4b59cc4, +0x748a8799, 0x9032c151, 0x1fb945f6, 0x01bd098a, 0x90a7907b, 0xe086172c, 0x7ed3dc8d, 0xea0dd126, +0xf9b35bc1, 0x8ad4e233, 0x37b5e1a1, 0x95bf8c3a, 0x9d288b43, 0x23a860f1, 0x4e993fc3, 0x3c774bd4, +0xe411be48, 0x22ab80c7, 0x9dd09a47, 0xc9e6988e, 0xb4cbf0b2, 0xb9e2a389, 0xf1494438, 0x2c87638b, +0x892468ec, 0x7915d1aa, 0x4d9cb9c4, 0x643a388e, 0x48c4358f, 0x937ad5a1, 0xdae53508, 0xa1305607, +0xbfb63a5c, 0xb4bb9324, 0x71daab0b, 0xa582346b, 0x0e5402e1, 0x75af83e7, 0x2b241b17, 0xfa17aca1, +0x9637cb2e, 0xc61fe511, 0x093934f7, 0x4b703b44, 0x992ab229, 0x4ddc99b5, 0xe2be6b60, 0xa2217861, +0x8f621931, 0x272c0ec6, 0x23a4d5b2, 0xc6f4e002, 0xb76397c0, 0xc00e3c01, 0xb89d29b3, 0x5e9bc984, +0x00391d56, 0x55fae02f, 0x4bc4bc9d, 0x9a147b22, 0x0fb492ed, 0x753dc9a6, 0x9f493eec, 0x6e135dc2, +0x0eeca515, 0x1c1512f9, 0xb88075e3, 0x326a97f0, 0xc755faf5, 0x06b7961b, 0xbd50fd69, 0x5f071c92, +0x2806c099, 0xe364f4ab, 0x2c4a114a, 0x7655b100, 0xf5e66750, 0xf9212705, 0x245e3b8d, 0xb0f1fb0d, +0xd7be9558, 0x27030e24, 0x9ea9b62e, 0xbde9be45, 0xce0adae6, 0xc28780a4, 0x1664ccde, 0xa7a62e3f, +0xa4f8ec00, 0x964e813c, 0x108c95e0, 0x097fe10b, 0x08da3505, 0xde413158, 0x046a31c0, 0xd9cdd804, +0xdfbdff9a, 0xfd77493f, 0xdffb2388, 0xe2ffa111, 0x29eb6ac7, 0xf9e1ac68, 0xc2ed68c7, 0x34d65405, +0x25f187ba, 0x3df23a2c, 0x96a2a5a9, 0x2bef4b26, 0x28d1c41d, 0xf380c0f1, 0xc4d5456e, 0x6b699c28, +0x0aafd6b3, 0xf88f3078, 0x114438d0, 0x93836544, 0xebd5767d, 0x9185b5c2, 0x593e4fe7, 0x29d3348c, +0x881f16c4, 0x77e33d03, 0x6794bbf5, 0x6b6c38f3, 0xa860673a, 0xb1d2d152, 0x08f9f310, 0x82c37500, +0xce0ae34c, 0xd5478e86, 0xcfe1adfc, 0xfaf06d37, 0xf7e21bfc, 0x3b077287, 0x45b33c08, 0x41 + +output0 = +0x25832957, 0x1235fe50, 0xa3b0b09b, 0x0bb7c797, 0xce815b03, 0x7b702251, 0x2c25fc60, 0x23ac55e0, +0x8bea35cb, 0xe33b833c, 0xec1d38a4, 0xb59acb59, 0x0f1fa8b7, 0xcb77fce6, 0x6564fc78, 0x9e6c39cc, +0x94f1bed1, 0xe02684ad, 0x074045a4, 0x4dc9b344, 0xd187bed8, 0x2a129ecb, 0xf6c5bd4d, 0xf98d5c26, +0x78addaae, 0xbe87f7d2, 0xf42a7df6, 0xa4683306, 0xbfa9b509, 0x3a67ddfe, 0x013d68d5, 0xb2a1fb8a, +0x0dfc5da7, 0x792f6b24, 0xaa220ec2, 0x6cb0a7a4, 0xd6e5ea4a, 0x1cd6156c, 0x9bac3cad, 0x926a3950, +0x658815b0, 0x2a2e4349, 0x9684d70f, 0x67a62f63, 0x7a8aae48, 0x8aa046e5, 0x3cad66d5, 0xe7e269b8, +0x95a9a8ca, 0x426f46ae, 0xf780d265, 0x2e0d244d, 0x5480bc07, 0x66554568, 0x938aeda7, 0x1144006c, +0x450320b3, 0xce9dfce9, 0x59a277a8, 0xde38cc1a, 0x1cd2bd60, 0x0079ca20, 0x58dd4633, 0x55398bbd, +0x5fcf982f, 0xdbbd3e30, 0xdcaab5e4, 0xc368c59f, 0xfba8e7d2, 0x9902b581, 0x2ec0a87f, 0x0ebea4ee, +0x13bb2fd2, 0xbc1df5be, 0xe34baa5e, 0x4a66fc5d, 0x7fc9d926, 0x1d2dc178, 0xe780752f, 0x160e82fd, +0x2e6205ba, 0x3d2fd1d7, 0x22b03620, 0x1c1ce57b, 0x5a8f8e09, 0xc9522523, 0x712e5cc0, 0xa27b10ad, +0x52d2df01, 0x3fe886d6, 0x29f37f12, 0x479e3783, 0xa37a445c, 0x338f9df1, 0x74dca176, 0x11acbbc4, +0xac50920a, 0xe9375593, 0xc483a084, 0xed281b58, 0x21ea3cd3, 0x5cf2e76a, 0x50c5e5f4, 0x68d14253, +0x374cea8e, 0xf3533631, 0xb8f1bece, 0x3a188078, 0xb15325de, 0x989b4e70, 0x40f142f9, 0x4a55cc30, +0x72a442b2, 0xe583a6c7, 0x4cb90a7d, 0xe19187c0, 0x899ea4e0, 0x42598555, 0x7d012357, 0x3e9cd281, +0x75094cbb, 0x80afef50, 0x8a834f90, 0xa6182605, 0xdfb7c49e, 0xde57caf6, 0x62720457, 0x5a40705e, +0x2728f823, 0xde22d20c, 0x8389268a, 0x2aad9f81, 0xa376314c, 0xd9288769, 0xa7a1cc5d, 0x1dcdb0ed, +0x0b23e2d8, 0x024e9165, 0x0e375ea5, 0x27c561c7, 0x48687976, 0xe97021bc, 0xbf82aea2, 0x6cb87bd3, +0xaa3f7ed5, 0x1e5c3aaf, 0xccad60b5, 0xb672d5da, 0xde0ff047, 0xf7cd4216, 0x45220a3b, 0x060d16c7, +0xb6dc8617, 0xc10d9dc7, 0x3d229a69, 0x8deea79a, 0xda6883e1, 0x610f41f9, 0xfa0f57f0, 0x6f97b547, +0xf00577ee, 0xe1be3d28, 0x809810e0, 0x7d4b8416, 0xdcc82557, 0xce6a01b1, 0xbc4cb663, 0x8bc88eac, +0xab9e918d, 0x6ec29c41, 0xbd0e745e, 0xa97bc23e, 0x1e1370c2, 0xc2744139, 0xf5d2f67b, 0x9d5ae9dd, +0x1040a10a, 0x1b54944c, 0x9a6810b1, 0xc7079d11, 0x8c89d60d, 0xc9523c76, 0x304b7adc, 0x2fd915eb, +0x076192e8, 0x8cbd3bc1, 0x90440a9c, 0x81961a26, 0x6a53f026, 0xd2b07ce5, 0xc72e767a, 0x9d47c3d9, +0xab2b7cb3, 0xf4b3aeec, 0x18441c12, 0x56b3007f, 0x81c3cfff, 0x128b72ed, 0x214dee41, 0xc735baeb, +0x89cf54e7, 0xcdfce851, 0x77cc0a00, 0x57cc6e0a, 0x7ffc8dad, 0xb036445b, 0x0699a9f7, 0x8781d7b5, +0xdd0f9d01, 0x43f01de8, 0x6a2e7dee, 0xf802d6b3, 0x10094fc1, 0x954cf6ad, 0xd7aee91a, 0x1e3369c8, +0x2acd8c15, 0x3303a61d, 0x79344e4d, 0x1253cd91, 0x34c34c77, 0xbd075dce, 0xe0fdd1cc, 0xf9015c56, +0x931b9096, 0x604f4bbf, 0x3b1d991a, 0x24007891, 0x7c0c1709, 0xe0d43230, 0x11821166, 0xf21e2558, +0x2c37009d, 0x98718786, 0xa9a112e6, 0xd7e20b10, 0xfe917f72, 0xb76101ff, 0x42a8d327, 0x9039f71b, +0x6d1628cb, 0xfd01251c, 0x6f0572a2, 0xb36a7e7a, 0x2b1a4556, 0xe679b027, 0x313573d0, 0xb5f7c20c, +0xc8d1530a, 0x338e756e, 0xe6befc47, 0xfe12dccd, 0x6ad1e5b7, 0x40b078ff, 0x96aaacba, 0xedfdeb8f, +0x8da3673a, 0xc965882e, 0xa4dec6e9, 0x7bace08f, 0xf144852a, 0xd5d787d0, 0x204d32e6, 0x733098aa, +0x12f3a53a, 0xe060086d, 0x3ab1739a, 0xd391e860, 0xc23d9687, 0x0d3a84e0, 0x855ea4cf, 0x0d0ea5b7, +0xfd082430, 0x70df7cb4, 0xe712f026, 0x84ce32ac, 0x67b04602, 0xac0b1faa, 0xed119dcd, 0x184afb0b, +0x9abbefd2, 0x41b3e2f9, 0xe18d5703, 0x6e5f4de7, 0xb47380a2, 0x239a3714, 0x810bb522, 0x5ad95c77, +0xe4b37e0c, 0x1cc200a6, 0x404c4390, 0x90533af1, 0xb49f2403, 0x1dada65f, 0x96f86d1b, 0x58f679a9, +0x1fe3c66d, 0xcc5eb28d, 0x504b7aff, 0x7c18a397, 0x47ad4d5f, 0x348f12c5, 0x80bac6e0, 0x47bb3c74, +0xccf16f02, 0xa932a5af, 0x51ebb452, 0x25808fcc, 0x47d14a43, 0xf0c2a0af, 0x30f938b5, 0x428420c7, +0x61a66d9c, 0x082caa4f, 0xd9c74e3d, 0xb4d161b9, 0xb409f331, 0x14245a44, 0xddb67f1e, 0x1018c62c, +0xc1e58e6d, 0x4ba86bd6, 0xf4a282aa, 0x4a85f90d, 0xbf0efb7b, 0x03706f08, 0xbe06a5f2, 0xe37f45f1, +0xcbc4001c, 0x14decb05, 0xf3c6050f, 0xd1df33d7, 0xdab729c7, 0x8898dad3, 0x4a44a313, 0x30bee51e, +0xd67b07ef, 0xf233eb19, 0x4c52bca3, 0x5d824a4b, 0x275faf2b, 0x428f8a95, 0x03f70c8b, 0x609e4036, +0x59ce2147, 0xc7eecc6b, 0xa22d21c2, 0xc5764d9a, 0xb4524bc6, 0x6d9bdfc9, 0x0918b4f7, 0x62bb8022, +0x7a98bdcc, 0xc963131d, 0x949036b8, 0x649ae51d, 0x93038d33, 0xe26cb45e, 0x4813516a, 0xc06c759c, +0xfa6a9793, 0xdd59c686, 0x37b4b5b0, 0x66a02fac, 0xde159ecf, 0x90cdb66e, 0x1c9f7f94, 0x3a31f69e, +0xcd93bc45, 0x32744ee0, 0x0f6ae2b1, 0x3bc9e963, 0x8cddbae4, 0x8f9517ff, 0x7d304ec1, 0x344e0357, +0x7cd2ed30, 0x1f9fa3a4, 0x9dbeb67b, 0x59253b51, 0x41b6911a, 0x12b574af, 0x62a777da, 0x9a6c834a, +0xaa16081d, 0x3f105c37, 0x6eac5dc0, 0x76e56fd1, 0x67bffbfb, 0xce069b80, 0x8fe970b1, 0x61945ee0, +0xaf9d44c2, 0xf2cd16cc, 0x07a768c5, 0xd6eecaa6, 0x6e8ce063, 0x4b30b443, 0x360822c9, 0xa6442b33, +0xf02d09a0, 0x22056421, 0xc552aa94, 0x1346ea1a, 0xab6c54a1, 0x35a69bdf, 0xfc6095ea, 0xbde0d55a, +0x1b433ec0, 0x137483a0, 0x8c1cb29d, 0xf2bc4e17, 0x45df6882, 0xbd702bc9, 0xabbead61, 0xc97ef61d, +0x01b68438, 0xbdd10231, 0x2e046323, 0x80e619bd, 0xa5126a2a, 0x74106e0d, 0x9e57b499, 0xcb275127, +0x9e01f21e, 0xbaf4a744, 0x38604f7e, 0xa85493d6, 0x9a4b4541, 0x3f397269, 0xdde71734, 0x157dfe98, +0x40334ad9, 0xc889f52f, 0xbddf4d23, 0xd2200018, 0x2a4b7180, 0x8a1a357f, 0xcd6af7e4, 0x1426b52b, +0x2a45481f, 0xfc38394c, 0x05123e4e, 0xa0215834, 0xe2c2306e, 0x4ee9fe1a, 0x6bbbf30c, 0x19a39072, +0x846b0bdf, 0xde4ef7f9, 0x47f1d67c, 0xf507e9e8, 0x14c1f617, 0xaa756ccf, 0xececc02a, 0x183cd616, +0x9d4bac81, 0xd959d1ea, 0xefb81e2b, 0xd8ca27fc, 0x5c7e9eb4, 0x64d5d550, 0x32ed3c41, 0x8f62e8a8, +0x5e2b65c5, 0x0a7e5e1b, 0x88abb657, 0x781ee74a, 0x840c27fd, 0xec83df4c, 0x163acaf0, 0xb2649960, +0x2a706f9a, 0x5226c811, 0x5bcb501c, 0xb87caa99, 0xecb4a37e, 0x8927c424, 0xfea9ac62, 0x17da41d1, +0x571c5987, 0x4217ea55, 0x5043b664, 0x18fb3ece, 0xfb2d13b9, 0xb6599d4b, 0xbb8a536d, 0xa719683f, +0x81cc2d0f, 0xc888f1d3, 0xccb0a428, 0xd2cdff48, 0x0b3440e6, 0x45700619, 0xcea80987, 0xa5ad1cc3, +0xd29deaf7, 0x4a8c95af, 0x9ac63a98, 0xf46d26b0, 0xf9a09352, 0x5f8b3fb7, 0x367c8da1, 0x3244b6a2, +0x9634b973, 0x27292ae5, 0xc4f7ae33, 0x9b73a4f7, 0x086a8bb4, 0x616cd17b, 0x74fdb210, 0x5f483d60, +0xf7666630, 0x107cd026, 0xfa2a4a07, 0x171b319e, 0x509d50c8, 0xf2d15061, 0x981d5759, 0xf23bc5c6, +0xe9befe22, 0x1e784009, 0x7cda3751, 0x1c5b2b69, 0x3ba09115, 0x756f1ef5, 0x2b47353b, 0x553a33d9, +0x1f029262, 0x6393d0ee, 0x97e01180, 0x0c53980c, 0x5da79e34, 0x1e0b53c4, 0x06a9f2ba, 0x622181b0, +0x4898ac50, 0x874c780e, 0xf8cc5072, 0x4a5a63ca, 0xf6dea6a2, 0xc3134004, 0x4505bae6, 0xc3f4b638, +0x0dda8e07, 0x0c4135bf, 0x61e80f25, 0x4b31a76c, 0xd91f0d09, 0x4ba0d577, 0xc95d0086, 0x21a61725, +0x16862c1f, 0x9588795d, 0x6ecde7c4, 0x353288cd, 0x94e4647e, 0xeceaba51, 0x7c602ff8, 0xbba48cef, +0x6556c6bf, 0xde912190, 0x86602d55, 0xf549554a, 0xd4d52081, 0x9728c5db, 0x7d09e129, 0x068008ab, +0x353a6ea4, 0x90b44e4e, 0x5c97a08a, 0x6293b3c7, 0xa3e4114b, 0x7ce44646, 0x8f2b0269, 0x291ca1bb, +0x98721528, 0xe30f5532, 0x0bbb2951, 0x7b7c390a, 0xb835b070, 0x2712e51c, 0xc20fb607, 0x5a05ce52, +0x5eb33cc2, 0x33c8b3a8, 0x81433abe, 0xb99cc5de, 0x817a5bab, 0xc76ffef0, 0xc68fb77c, 0x96c35c46, +0xef1bedc9, 0x42d84a19, 0x54440a6e, 0x3c4b7400, 0xe88bdd94, 0xe1b91c7d, 0xdcdba422, 0xc865625f, +0xaaed9adf, 0x782f8dd7, 0xd267ef7b, 0x366340af, 0x5a9b408a, 0xd6edff9b, 0x8356ad73, 0xbaaf18d0, +0xdf752a1b, 0xb246d2c0, 0xe2209cf7, 0x7b4aaa2a, 0xaeaec406, 0x5dc1665d, 0xcad3ca61, 0x9603b5c9, +0x58012ba9, 0x32945486, 0x78fda0e2, 0xfa326649, 0xe88a7466, 0x6a54aea7, 0x6a56ad08, 0x9e86cbd3, +0x8aaa7c2e, 0x66e45a99, 0x285d26f4, 0x40d2740f, 0xc87be0d2, 0x55844605, 0xd87e6a56, 0x04c036a9, +0x00321b41, 0xc99f5e34, 0x7a87eadc, 0xc3ac9125, 0xdd0be68d, 0xa70cc221, 0x6d340390, 0xb3d88bd5, +0x8cf95295, 0xeb03f3f5, 0x5a4bbedd, 0x56fcc9ad, 0x7a2e3d8c, 0x501bb88f, 0x8cfa9729, 0x4beaee02, +0xfb22ede0, 0x51ef3bb1, 0xa4eac5db, 0xc6df35be, 0x9c6da264, 0x128cf797, 0x58f7d2d1, 0x20d87f0e, +0x56a06be1, 0x127ded22, 0x6b03d2f3, 0x51be2702, 0xe898c0c1, 0x5532a2f5, 0xc2059c2c, 0x07d11ae7, +0xfd1d20ba, 0x6e682d25, 0xff27f183, 0x79339832, 0x47c475e4, 0xd8193faa, 0x1d6a37f3, 0xba4b4cc7, +0x25a910c1, 0x5335c90a, 0x084a987e, 0xb281453c, 0xce33dd8e, 0x7fae16a2, 0x5c4ecf25, 0x2d340555, +0xa4ee8816, 0x651373c3, 0xefeb3c3f, 0x0188871b, 0x55e2ad83, 0xe904173b, 0x2f948fb9, 0xc50c0314, +0x2a24ab54, 0x687a2c47, 0xabd0573e, 0x7908cc94, 0x490a1e1e, 0x555895e8, 0x30722594, 0x291dd817, +0xc0b4ebc3, 0xfa0e5597, 0xf80409f8, 0x6152a038, 0x4bec698a, 0xa56cff7d, 0x4770e57d, 0x04e72526, +0x823fa205, 0x22cd7082, 0x68a2e82d, 0xfa193898, 0x17c3a4d2, 0x7298366a, 0xcadc958d, 0x0cdb7e1a, +0x228eddd1, 0x1459b630, 0xe3552ae0, 0x1c76ec70, 0x96677752, 0x17c28b84, 0xe82a9a0e, 0xbb37fd2b, +0xe357cd86, 0xa5f3aafa, 0x0a56ebc1, 0x57adcddc, 0x007f642b, 0x2c64e1fd, 0xa2b073df, 0x60715c24, +0x6d7861d0, 0xd0796ccb, 0xa29916dc, 0x7eaad923, 0x3618dee8, 0x1094af8d, 0x70051ff6, 0x597ba4ff, +0x70e7fe76, 0xdb83025f, 0x09011eee, 0x44680188, 0x5c72d5b7, 0x1610cb8d, 0x643be6ac, 0xecc8cacb, +0x19d9b888, 0xcc19b4ea, 0x40e7e526, 0x27ecd3eb, 0x01279cba, 0x1794e331, 0x6dbf274c, 0x95de5d2f, +0x14aad0a9, 0x45c90401, 0x0611bb41, 0xd019a189, 0x68dd707c, 0xc563c798, 0xa4c79d2c, 0x5db10eb3, +0x2689fe92, 0xbb137c10, 0xa4c0c9d8, 0xa9610249, 0x056f1268, 0xcb57ae36, 0x62a7270d, 0x349c7dec, +0xc237db79, 0xebcabeba, 0xc421413f, 0x0bf08741, 0xfcfc6f35, 0x28d71e38, 0xe41e24b1, 0xa3bb1ed2, +0x4c757e5c, 0x8f1e95f8, 0xac00d0cc, 0xeca670c7, 0xdfd87ac5, 0x43b4f5c7, 0x997a0f6b, 0x785d6b90, +0xd0197018, 0xdf81defd, 0xd2e73e04, 0x603dabe6, 0xf0148c2f, 0x64df0a91, 0x9aae51c9, 0x93867ced, +0xcc58e131, 0x60daa1d2, 0xe3d43433, 0xd51c9947, 0xcc742731, 0xd0e54c33, 0x1fcddc7b, 0xc06505de, +0x0169991f, 0xb4f43bb9, 0x91a901f6, 0x8017b9d3, 0x70914002, 0x2d03c3c7, 0x1861064e, 0x51821521, +0x03d029ef, 0x7768c872, 0x2a618e19, 0xbe00911a, 0xf927772d, 0x16f0ef1f, 0x3a7d721b, 0x73bf2184, +0x81b20c99, 0x50c2d166, 0x2027da1f, 0xe6a7f756, 0x516435ab, 0x077bb2a2, 0x33076d9e, 0x2fcc1053, +0x3da5507b, 0x58e7861c, 0xcb7f34e3, 0xc1dd6cee, 0x5d7eeb2f, 0x8bf7af16, 0xcaaa0b04, 0xbffe68a9, +0x7aa6d3de, 0x86e8d238, 0x6d9c9e5c, 0x0afe48ea, 0x54a8b2c7, 0x7d081d4f, 0x24635e7d, 0x83a90ad2, +0x5faa3307, 0x86d02631, 0x3ba7090e, 0x890ea613, 0x6379381d, 0x43082edc, 0x45fadca0, 0x507a5be8, +0x4002d3e0, 0xcd47db8f, 0x016f02f7, 0x2cc37a2e, 0x6b2440e8, 0xf0a17a06, 0xd1d9ccba, 0xb4bfd01e, +0xfb2e8da1, 0x2b9eafb9, 0x78351034, 0xd5741ede, 0x0728eaf6, 0x7943413b, 0x502b32a2, 0xcd7517b8, +0xebc7a095, 0x0c604a3e, 0x3404c921, 0xa5130fc4, 0x49320039, 0x6afa45fb, 0xdfb6d1d1, 0x9f976a89, +0x6edc8665, 0x25dbf831, 0xa4f7cfec, 0x317a09b5, 0xdaf4c587, 0x28517cd4, 0x6b0c4ef3, 0xcb4307a8, +0xff2670b4, 0x53faca1c, 0x4e2b952a, 0xf8c81cb5, 0xad345402, 0x0cfa7a14, 0x8f530b2f, 0x08720c93, +0xdac62944, 0xa2fa1466, 0xecd483c0, 0x1d969b7d, 0x301f431b, 0xa245449b, 0xfbe74141, 0x61ccd26d, +0xeed80681, 0xba661d5c, 0x2aa8ba84, 0x98df402f, 0xb0bfa754, 0xf786f0eb, 0x502a3f00, 0x5714ef6b, +0x0cc031fe, 0xbd5cb04c, 0x5cf040e1, 0x3d733d6f, 0x9b721cfd, 0xa93dad7d, 0x343a8188, 0x5beea144, +0x77f00ee3, 0xb39e61bd, 0xc53b2a3f, 0xa8b4c424, 0xf5bad225, 0xa85879f2, 0xcfb028f4, 0x09643370, +0x1c7204e6, 0xcebc96e5, 0x12227cec, 0xd7a429da, 0xb5645c6c, 0xf99d4c2b, 0x417bdfb6, 0x0b289280, +0xd9cb2cb6, 0x36d1a187, 0x69839b3c, 0x59de4109, 0xd03843a6, 0x46eb3539, 0x11a526ce, 0x56c78934, +0x763909cc, 0x656ca8af, 0x5b0bdb9d, 0xfac27a43, 0xe1f96c06, 0x6cebe65d, 0xf94709d9, 0x63efc9f1, +0xc95ba413, 0xe704de3c, 0x261e2b43, 0x9c3ef6a0, 0xad4bbe93, 0x79f1cfd8, 0xe314fc58, 0x3470d507, +0xdd0e43e3, 0x394aca27, 0x6bbbf7f1, 0xb213d5e9, 0x1ba99155, 0x4bf71a64, 0x7aa72d51, 0x36a82476, +0x81d0a1c9, 0xc175a36a, 0xda05fc03, 0xfe16edc6, 0xbbbf6f57, 0xb00978f6, 0x0e17eb6c, 0xe905fe98, +0x49241c46, 0x6cc1fcda, 0x8a562cdf, 0xae6c7a70, 0x083e66ed, 0x433be4c6, 0x2092bc04, 0xb4326383, +0x92006a4a, 0x401602df, 0xa54a2952, 0xa4ae512c, 0x46153a61, 0xbaf9bdca, 0x56a95e63, 0x5eadc50f, +0xe403dc0b, 0x3708ba31, 0x21db3941, 0xeb74c1c8, 0x8d2628cf, 0xb7925cf4, 0xdb1ad60b, 0x67dfb1ea, +0x4b8893ec, 0x2d101360, 0x3036d21b, 0x9ed1eb42, 0xa1a60268, 0xe1d6502a, 0x459b4907, 0x1275e279, +0x20efb17c, 0x7f4ae419, 0xf6e4a74b, 0x35698d03, 0x5414844a, 0x2397a6b9, 0x7e41f393, 0xe78fd97d + +e = +32256 + +k = +6144 + +ncb = +18528 + +rv_index = +0 + +code_block_mode = +1 + +op_flags = +RTE_BBDEV_TURBO_RATE_MATCH, RTE_BBDEV_TURBO_CRC_24B_ATTACH + +expected_status = +OK diff --git a/app/test-bbdev/test_vectors/turbo_enc_c2_k5952_r0_e17868_crc24b.data b/app/test-bbdev/test_vectors/turbo_enc_c2_k5952_r0_e17868_crc24b.data new file mode 100644 index 00000000..13006501 --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_enc_c2_k5952_r0_e17868_crc24b.data @@ -0,0 +1,300 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_ENC + +input0 = +0x11d2bcac, 0x7715ae4d, 0xc9f4dbc6, 0x2fda3c1a, 0x09349b16, 0x2cd3c189, +0xe5650429, 0xc73c42c1, 0xe7336bb7, 0xbb7de593, 0x83f986aa, 0xc0ade12e, +0x6730b095, 0x78f7c059, 0xaa907199, 0x75c323d1, 0x7061f9ba, 0x97e067bf, +0xc155cd55, 0x6a4cb08d, 0x4260e2fa, 0xff35e496, 0x37f251d3, 0x02fd9f9e, +0x6f53345c, 0x790cda6d, 0x3b8549e4, 0x56d0c6ea, 0x70a38006, 0xfff18223, +0x6f2035b5, 0x6cf508d3, 0x98a0a308, 0x432353fc, 0x0eb818eb, 0xdd9a128b, +0xf92431b2, 0xad788286, 0xda07d5de, 0x44b4b3a8, 0xbe62fd67, 0x61a4eb54, +0x90d7ac9f, 0xc4d9a930, 0xfddc3c24, 0xf5e3b1c8, 0x38143538, 0xcb1d3062, +0xcae36df3, 0x50a73437, 0x542dcab7, 0x875973a0, 0x34690309, 0x49cb1ddb, +0xf8a62b92, 0x82af6103, 0xc9f8c928, 0x3835b822, 0x16b044a6, 0xdae89096, +0xa5be47d4, 0xaf61189e, 0x5cd70faf, 0x037331cf, 0xef7fa5f6, 0xb2f2b41a, +0xa6f222c6, 0xdb60fe4c, 0x2a857a9a, 0x0b821f9d, 0x348afd17, 0x7eecbaeb, +0x92bb9509, 0x8a3cec24, 0xd02549a2, 0x155ffa81, 0x2b7feac6, 0x3ee461e7, +0xc981f936, 0x89b544c7, 0x9a431e36, 0x62511734, 0x769f9647, 0x211a747e, +0x567abef4, 0xad87e2b4, 0xa3e0c3bf, 0x6d325dd5, 0xf561cc46, 0x39925735, +0x3d8abbfd, 0xc3724c88, 0x8bdf03c9, 0x1b02a12a, 0x4f233a0c, 0x9ca9444a, +0xc5d1f7e6, 0x4d995f37, 0xd9aefb32, 0xd0465248, 0x0f3a3b21, 0x62ea8c0c, +0x91f8d54e, 0x5cf75514, 0x14618a01, 0x8fe9b87e, 0xf2b424f9, 0x49724ce2, +0xa1464587, 0x5e00dc83, 0x59475455, 0x444119b1, 0x4fb9f036, 0x65fcbc1a, +0x1c63a990, 0x767a6114, 0xb0ede06a, 0xcfb91ae3, 0x7874af5f, 0xf78772a3, +0xa2932c81, 0x77f2759d, 0x930dc8f1, 0x95ce14a5, 0x134363ee, 0x61ee143f, +0xf0034b35, 0xfdc75fce, 0x3be2dcf3, 0xff3a07eb, 0xdc43f0eb, 0x23ba73bb, +0x45f7649e, 0xcacc297c, 0xa3dd98db, 0x058d46a9, 0x6bcfc154, 0x7be8e1e6, +0x618a4754, 0x8d193c46, 0xba39e1ce, 0xc3b85cfc, 0xd80d853b, 0x38d6440d, +0x9c1a6185, 0x90c9dfcb, 0x01c6e841, 0xeedfe6ac, 0x7b61a3ae, 0xad3924dd, +0x514bc1a8, 0x6ed60d7e, 0x1b74b79b, 0xea295947, 0x5f9a5d33, 0xcd24311f, +0x0bd3d10c, 0xd214ecfe, 0xcb37035d, 0x8ce95168, 0x7020cb52, 0xe432107d, +0x042d63ac, 0x6201c8dd, 0x4bea65a2, 0x1a3cf453, 0x5b8e868d, 0x3d653bb0, +0x24c1967e, 0x37f183a9, 0x3700e703, 0xb168a02b, 0x2592cd82, 0x5c0cdb66, +0x70b64ebd, 0xf8dcb9a7, 0x634a8335, 0x06642398, 0xfe2f4497, 0x2e775256, +0x30f85cbe, 0xf4d2fdaf, 0x46ca6c9d, 0x022097d6, 0xcedeb03e, 0xaf9437bc, +0x341f8045, 0x93f6ff86, 0x2e019ec9, 0xc80116e8, 0xf984ff34, 0xe4f1be20, +0xfe455510, 0xc561cdba, 0x2d6f560f, 0xb239fd9d, 0xd254e343, 0x8090448a, +0xa724c4f1, 0xa00fe4f0, 0xf0379ac6, 0x0c61b383, 0x44b2228e, 0x9d223bc0, +0xe7cfbc38, 0x62ae6a40, 0x6f8d445c, 0xc6045eaf, 0x8507df18, 0xe64c057e, +0x4e589113, 0x3ba3bf55, 0x6a702b19, 0xea23af7d, 0xa1b98022, 0x7096c45c, +0xace3b8c1, 0x47ed7bb4, 0x198a2b8f, 0x586b57db, 0xbf9d24d4, 0x96054c66, +0x13f26306, 0x2a43524b, 0x1ecd419b, 0xb3a24852, 0x4ed7765f, 0x4c99fbe3, +0x6c3b9ebb, 0x1ef7bfeb, 0x0daf1256, 0xe9c90c7f, 0xbe067ed7, 0x469102ba, +0xedccc098, 0x96691fb1, 0x2a9b850f, 0x58aba5f2, 0x0bcad31d, 0x90b1f25b, +0x78a55fe4, 0xdd41e626, 0xdcba3e83, 0x2209d8a9, 0x6ee2f76f, 0x55643570, +0xd181a0ef, 0xd1b2790a, 0xc7793587, 0x3aebe10c, 0xbbde5fe0, 0x5c308948, +0x01439ece, 0x5319fbef, 0x10b0de1b, 0x6595f4f3, 0xa3e6a7c9, 0xf97b2e10, +0x4513cf3d, 0x8ded3394, 0x0949a772, 0x934f2269, 0x2b636628, 0x92101c68, +0x804dfd94, 0x178315be, 0x384adf65, 0xe64d11d3, 0xc44881c0, 0x7a8e7886, +0xb721c0c8, 0x6d0d0fc0, 0x97f0cc5a, 0x3c67c31f, 0x4906dd09, 0x6202c01a, +0xad8d6c5b, 0x53158056, 0x13704ed5, 0xa1dfeaaa, 0xb35e4b40, 0xd9c3e6fa, +0x0c9650ce, 0xbdfe5c22, 0x874bedb3, 0xbd07d0bd, 0x4eef4a4f, 0x2970d932, +0xfdd003ac, 0x609fbea4, 0xad817794, 0xb4ee9f96, 0x559b3faa, 0x1edcf35f, +0x8342e9fb, 0x69a73981, 0x5fc86a07, 0x929b45aa, 0x5c32e847, 0x43421484, +0x9962abf5, 0x8cf07ae4, 0xba7b556d, 0x2da42ac1, 0xac0eef6f, 0xdc118ef8, +0x6061597a, 0xc5f7a007, 0xb9b92c51, 0x87049008, 0x72c9da8a, 0x54dc48c6, +0xb4c4b392, 0x50204c8d, 0x00b2ef59, 0xcdac687c, 0xb2158dc3, 0x4f1c65e3, +0x8af22286, 0x4444ee49, 0x2d8921ce, 0x3c0a8d11, 0xb97325da, 0x056aec7b, +0xa96ca31b, 0x71a5febb, 0xa3166c47, 0x5e920dcd, 0xa58b2e61, 0x4d3b392f, +0x1b9b177b, 0x058b0dfa, 0x4bdc16ab, 0x8a4eeac3, 0xfbf2d2fc, 0x636f04a6, +0x75a14bda, 0xa28f1947, 0xf2f248e4, 0x934e7dd6, 0xeb4401cb, 0x37f693a2, +0xd1cb7e16, 0xbd5d9abb, 0x6f613f63, 0x9bfb5fd5, 0x6b70c5ea, 0xbdcc6c7a, +0x41efaee1, 0xf45d3965, 0x0499b12b, 0x7820873a, 0xed53961f, 0x303c4f04, +0x2e4bd363, 0x1bcdd589, 0xf7317de0, 0x7ba3d53a, 0x61fb36a8, 0x23e55b43, +0x0ef838e9, 0x7b9d91a3, 0x3cd7d835, 0x5f28e517, 0xa100fad7, 0xff39800d, +0xd7894433, 0xb61a62cd, 0xde618b70, 0x8560d770, 0xed02 + +output0 = +0x11d2bcac, 0x7715ae4d, 0xc9f4dbc6, 0x2fda3c1a, 0x09349b16, 0x2cd3c189, +0xe5650429, 0xc73c42c1, 0xe7336bb7, 0xbb7de593, 0x83f986aa, 0xc0ade12e, +0x6730b095, 0x78f7c059, 0xaa907199, 0x75c323d1, 0x7061f9ba, 0x97e067bf, +0xc155cd55, 0x6a4cb08d, 0x4260e2fa, 0xff35e496, 0x37f251d3, 0x02fd9f9e, +0x6f53345c, 0x790cda6d, 0x3b8549e4, 0x56d0c6ea, 0x70a38006, 0xfff18223, +0x6f2035b5, 0x6cf508d3, 0x98a0a308, 0x432353fc, 0x0eb818eb, 0xdd9a128b, +0xf92431b2, 0xad788286, 0xda07d5de, 0x44b4b3a8, 0xbe62fd67, 0x61a4eb54, +0x90d7ac9f, 0xc4d9a930, 0xfddc3c24, 0xf5e3b1c8, 0x38143538, 0xcb1d3062, +0xcae36df3, 0x50a73437, 0x542dcab7, 0x875973a0, 0x34690309, 0x49cb1ddb, +0xf8a62b92, 0x82af6103, 0xc9f8c928, 0x3835b822, 0x16b044a6, 0xdae89096, +0xa5be47d4, 0xaf61189e, 0x5cd70faf, 0x037331cf, 0xef7fa5f6, 0xb2f2b41a, +0xa6f222c6, 0xdb60fe4c, 0x2a857a9a, 0x0b821f9d, 0x348afd17, 0x7eecbaeb, +0x92bb9509, 0x8a3cec24, 0xd02549a2, 0x155ffa81, 0x2b7feac6, 0x3ee461e7, +0xc981f936, 0x89b544c7, 0x9a431e36, 0x62511734, 0x769f9647, 0x211a747e, +0x567abef4, 0xad87e2b4, 0xa3e0c3bf, 0x6d325dd5, 0xf561cc46, 0x39925735, +0x3d8abbfd, 0xc3724c88, 0x8bdf03c9, 0x1b02a12a, 0x4f233a0c, 0x9ca9444a, +0xc5d1f7e6, 0x4d995f37, 0xd9aefb32, 0xd0465248, 0x0f3a3b21, 0x62ea8c0c, +0x91f8d54e, 0x5cf75514, 0x14618a01, 0x8fe9b87e, 0xf2b424f9, 0x49724ce2, +0xa1464587, 0x5e00dc83, 0x59475455, 0x444119b1, 0x4fb9f036, 0x65fcbc1a, +0x1c63a990, 0x767a6114, 0xb0ede06a, 0xcfb91ae3, 0x7874af5f, 0xf78772a3, +0xa2932c81, 0x77f2759d, 0x930dc8f1, 0x95ce14a5, 0x134363ee, 0x61ee143f, +0xf0034b35, 0xfdc75fce, 0x3be2dcf3, 0xff3a07eb, 0xdc43f0eb, 0x23ba73bb, +0x45f7649e, 0xcacc297c, 0xa3dd98db, 0x058d46a9, 0x6bcfc154, 0x7be8e1e6, +0x618a4754, 0x8d193c46, 0xba39e1ce, 0xc3b85cfc, 0xd80d853b, 0x38d6440d, +0x9c1a6185, 0x90c9dfcb, 0x01c6e841, 0xeedfe6ac, 0x7b61a3ae, 0xad3924dd, +0x514bc1a8, 0x6ed60d7e, 0x1b74b79b, 0xea295947, 0x5f9a5d33, 0xcd24311f, +0x0bd3d10c, 0xd214ecfe, 0xcb37035d, 0x8ce95168, 0x7020cb52, 0xe432107d, +0x042d63ac, 0x6201c8dd, 0x4bea65a2, 0x1a3cf453, 0x5b8e868d, 0x3d653bb0, +0x24c1967e, 0x37f183a9, 0x3700e703, 0xb168a02b, 0x2592cd82, 0x5c0cdb66, +0x70b64ebd, 0xf8dcb9a7, 0x634a8335, 0x06642398, 0xfe2f4497, 0x2e775256, +0x30f85cbe, 0xf4d2fdaf, 0x46ca6c9d, 0x022097d6, 0xcedeb03e, 0xb4ca20bc, +0x4fde636c, 0x10ea20a0, 0xe6da8721, 0xbfde5b08, 0x9c3739da, 0xb6dc015a, +0x427db088, 0xdfdb8e6f, 0x756be6c1, 0x21f5297b, 0x06135665, 0xc1602b7d, +0x049536c5, 0xbbb3b801, 0x0cdb0c19, 0x7b2ad622, 0xfee8218f, 0xc5c7f123, +0x8abd3301, 0xa15b534d, 0x29dd2053, 0xd409abf9, 0x3ef19d6b, 0x70a3cbc2, +0x7a51423a, 0x4505b2ad, 0xdc74c75e, 0x068751a9, 0xb0b56437, 0x14a10371, +0x76af806f, 0xa8a47e19, 0x7c97a26e, 0x7998a3d6, 0xdc1ad1e2, 0xb532a301, +0xca8a3e7d, 0xd0aef374, 0x204990c0, 0xc7011aec, 0xa69151ea, 0x53390026, +0x7bf0d762, 0x735c2202, 0x64159e54, 0x5a3b1a56, 0x9ef1def2, 0x0ab8a961, +0x587b0886, 0xb8cc5975, 0x2a5a0f23, 0x069d05be, 0x9cc3c207, 0x40ef1a02, +0x4fae3f5b, 0x1f127aae, 0xd4e6d411, 0x17ac43ef, 0xe4bf891b, 0xfbb21765, +0x2c560c7e, 0x8561988c, 0x73a01032, 0x0cfef73a, 0x694c4991, 0x885d7a3f, +0x4218d1ff, 0xc2efaffb, 0xaf9d9715, 0xf76de6b2, 0xcce8e8ff, 0x370e3800, +0x493675eb, 0xd8fbcbda, 0xa5b382c2, 0x86c8f1ea, 0x3d724ea4, 0xb067034c, +0x6491d87e, 0x1a745ce4, 0xbb27180b, 0x1a2f0acc, 0xac4b7b3b, 0xe324578b, +0xc87928df, 0x9c1de566, 0x0ce2a17d, 0xaf2e13ce, 0x146a8659, 0x8727f6ae, +0xe2df7d03, 0x1a8e4cb4, 0xfa590976, 0x13a7c236, 0xc07489d0, 0xbe905e17, +0xafeb3d4b, 0x201e73f2, 0x5bdca12e, 0x3e15a852, 0xbcfc3271, 0x5d398410, +0x6bfacc15, 0x011fc61f, 0x43e43fd7, 0x0640717c, 0x96bfb3ff, 0x158eac19, +0x3b852e91, 0x74f9ceda, 0xcac71326, 0xfc0e312a, 0x20e8137b, 0xa1162611, +0x239ac7fe, 0xb9d00f8a, 0xea0b5241, 0x019f0d25, 0xc5153264, 0xb48a5547, +0xe54e339f, 0x17a6cca5, 0x5065af0d, 0x5ce648b9, 0xb457b342, 0xc1cb3f0e, +0x28d03c8b, 0x5144ed7a, 0xdb80779f, 0x53ce1b87, 0xbc071147, 0xbfe46c11, +0x7296785e, 0x83e4a63e, 0xc58982e9, 0x9538c5b9, 0xf14abaaa, 0xd915124c, +0x73540cd6, 0xe333696b, 0x58f9e00a, 0xd4dad10f, 0xc0de1420, 0x355e2bdc, +0xb2faa8fd, 0xbe6a12f1, 0x45d415cc, 0x47f5aed9, 0x4754e770, 0x2bb07385, +0x41374352, 0xf80beb47, 0xef02f35c, 0xc9c1b86e, 0x94b5785b, 0xba33123f, +0x7e39f0c9, 0x028a9286, 0x7d52c9f1, 0x06f04da6, 0xbc6a68d1, 0xfc70bace, +0x95b6a129, 0xfff224bb, 0x701ef3fb, 0x3309286f, 0x544ae8c1, 0xdca62c4a, +0xf8862ee2, 0xf9e3cd29, 0x2c07cce2, 0x8d93652a, 0xf47e4611, 0x4635f586, +0x1c03e0f4, 0x819724c7, 0x96b2a3f0, 0xeeb1ad95, 0xff08e517, 0xbd4ba6ed, +0x49ddb12d, 0x365734b5, 0x5edf7459, 0x2ee117a9, 0x067b9462, 0x549f6d5f, +0xe1dd8309, 0x12bb8e0a, 0x4382fe0f, 0x57fce11b, 0x1bc1c809, 0xd2741876, +0xcffd36b9, 0x45a64a67, 0xb77955d5, 0xb5825f24, 0x86eef2a8, 0xb66cac3b, +0xfec661f8, 0x531d5963, 0xa0a2f109, 0xe1795b68, 0x6bfd44e0, 0x0849af41, +0xf56d7d73, 0x3eab025f, 0xd109b015, 0xec24d23b, 0x8b3f603c, 0x648f421f, +0xc833f32a, 0x53cbda14, 0xb9b3fee4, 0xcf9ac8ab, 0x300f0548, 0xfe0bc595, +0x4f437a1a, 0xdacfae1f, 0x284e4a6d, 0x57815e28, 0x15d32b39, 0x68f85b97, +0x18b21602, 0x941259ef, 0x68598d90, 0x6dfd81db, 0xa8fc9a55, 0x60fef2b3, +0xcf07961b, 0x11b96588, 0xd9928a88, 0x2bbc72fd, 0x025f81b4, 0x44794d6d, +0x6a7420aa, 0xa73d4d89, 0x25b523ff, 0x521dbea3, 0xdacde374, 0xbf41776f, +0x9c1e3cb6, 0xb7b3499b, 0x8e154ed3, 0x1ac8f8c8, 0x4f2cf203, 0xa293c979, +0x2792a2e1, 0x329ca1dd, 0x01e1aa4a, 0x01a78d3e, 0x100c74ed, 0xb0db13c7, +0x99d53541, 0xd435a3b9, 0x898bd713, 0x069287bd, 0xe3175d24, 0xb09dc7fb, +0x7340d7d3, 0x81369cc3, 0x8f16a7a8, 0x43963d57, 0x58e0f6a3, 0x39803b6c, +0xa419a5ac, 0xf45fd6fa, 0xec389d5b, 0x636ce34f, 0x71405df6, 0xfc504724, +0xa3a3b192, 0x19df1379, 0xaf028f42, 0x3e1838e8, 0x6b7a64a9, 0xaa90ffee, +0x238ee398, 0x67eae4bd, 0x0a549a15, 0x125873d3, 0x00ece005, 0x9f7b2b8f, +0x7571f73c, 0x98940ab4, 0x192ed328, 0x8fae66da, 0x9063b323, 0x2542f666, +0x07ac7c8e, 0xce92857a, 0x3ded4b69, 0x50204cb4, 0x2a81cd17, 0xfd6ac536, +0xafb46424, 0xe802147b, 0x30a37858, 0x90697235, 0x1606e767, 0x26c1048e, +0x3a20df7b, 0xe127de97, 0x481abd6c, 0xc3a61d15, 0xbb2e929c, 0xd2ec0d65, +0xc0e53693, 0x500c2e9a, 0x316d0f4b, 0x18354452, 0xb2dd7728, 0x5a874b3e, +0x6dcf6f9c, 0x9d7c15ce, 0x1b25566d, 0xe13d6e7a, 0x9d9d06ae, 0x4d0e7d23, +0x33100e24, 0x7c20e0f8, 0x2cfc95bf, 0xb8e702a0, 0x5e60215b, 0xa516093e, +0x4cf3bed7, 0x4a0d6f56, 0xaf4901cc, 0xaeb7137e, 0x07d3c34d, 0x38641bc6, +0xf7366ca3, 0x6e01bd0d, 0x86818282, 0xf0f8712a, 0x431f57ad, 0x46b832c3, +0xec1893b8, 0xfa43c94f, 0x8b4c382a, 0x84cc0cec, 0x3f1016f3, 0xdbc7b88d, +0xa3c34e52, 0x50691492, 0x8b10e709, 0x5c5db670, 0xcda5888b, 0xa1ef2917, +0x1d4689ba, 0xc6470dcc, 0x7010373d, 0x4f27b426, 0x06b0de9d, 0xba83e005, +0x97cd5aad, 0xc1d0f41a, 0x01bae35a, 0xb30c805e, 0x8d7982eb, 0xe1a7b51d, +0x88a9f4d2, 0x7a6742a8, 0x0d9cb569, 0x06e9bc87, 0xf8c93b93, 0x89f3e80a, +0x96b971de, 0xcd548ef1, 0x5fbeb12a, 0x39c798e3, 0x90183352, 0x9b2476a7, +0x9437b0ae, 0x1f8045af, 0xf6ff8634, 0x019ec993, 0x0116e82e, 0x84ff34c8, +0xf1be20f9, 0x455510e4, 0x61cdbafe, 0x6f560fc5, 0x39fd9d2d, 0x54e343b2, +0x90448ad2, 0x24c4f180, 0x0fe4f0a7, 0x379ac6a0, 0x61b383f0, 0xb2228e0c, +0x223bc044, 0xcfbc389d, 0xae6a40e7, 0x8d445c62, 0x045eaf6f, 0x07df18c6, +0x4c057e85, 0x589113e6, 0xa3bf554e, 0x702b193b, 0x23af7d6a, 0xb98022ea, +0x96c45ca1, 0xe3b8c170, 0xed7bb4ac, 0x8a2b8f47, 0x6b57db19, 0x9d24d458, +0x054c66bf, 0xf2630696, 0x43524b13, 0xcd419b2a, 0xa248521e, 0xd7765fb3, +0x99fbe34e, 0x3b9ebb4c, 0xf7bfeb6c, 0xaf12561e, 0xc90c7f0d, 0x067ed7e9, +0x9102babe, 0xccc09846, 0x691fb1ed, 0x9b850f96, 0xaba5f22a, 0xcad31d58, +0xb1f25b0b, 0xa55fe490, 0x41e62678, 0xba3e83dd, 0x09d8a9dc, 0xe2f76f22, +0x6435706e, 0x81a0ef55, 0xb2790ad1, 0x793587d1, 0xebe10cc7, 0xde5fe03a, +0x308948bb, 0x439ece5c, 0x19fbef01, 0xb0de1b53, 0x95f4f310, 0xe6a7c965, +0x7b2e10a3, 0x13cf3df9, 0xed339445, 0x49a7728d, 0x4f226909, 0x63662893, +0x101c682b, 0x4dfd9492, 0x8315be80, 0x4adf6517, 0x4d11d338, 0x4881c0e6, +0x8e7886c4, 0x21c0c87a, 0x0d0fc0b7, 0xf0cc5a6d, 0x67c31f97, 0x06dd093c, +0x02c01a49, 0x8d6c5b62, 0x158056ad, 0x704ed553, 0xdfeaaa13, 0x5e4b40a1, +0xc3e6fab3, 0x9650ced9, 0xfe5c220c, 0x4bedb3bd, 0x07d0bd87, 0xef4a4fbd, +0x70d9324e, 0xd003ac29, 0x9fbea4fd, 0x81779460, 0xee9f96ad, 0x9b3faab4, +0xdcf35f55, 0x42e9fb1e, 0xa7398183, 0xc86a0769, 0x9b45aa5f, 0x32e84792, +0x4214845c, 0x62abf543, 0xf07ae499, 0x7b556d8c, 0xa42ac1ba, 0x0eef6f2d, +0x118ef8ac, 0x61597adc, 0xf7a00760, 0xb92c51c5, 0x049008b9, 0xc9da8a87, +0xdc48c672, 0xc4b39254, 0x204c8db4, 0xb2ef5950, 0xac687c00, 0x158dc3cd, +0x1c65e3b2, 0xf222864f, 0x44ee498a, 0x8921ce44, 0x0a8d112d, 0x7325da3c, +0x6aec7bb9, 0x6ca31b05, 0xa5febba9, 0x166c4771, 0x920dcda3, 0x8b2e615e, +0x3b392fa5, 0x9b177b4d, 0x8b0dfa1b, 0xdc16ab05, 0x4eeac34b, 0xf2d2fc8a, +0x6f04a6fb, 0xa14bda63, 0x8f194775, 0xf248e4a2, 0x4e7dd6f2, 0x4401cb93, +0xf693a2eb, 0xcb7e1637, 0x5d9abbd1, 0x613f63bd, 0xfb5fd56f, 0x70c5ea9b, +0xcc6c7a6b, 0xefaee1bd, 0x5d396541, 0x99b12bf4, 0x20873a04, 0x53961f78, +0x3c4f04ed, 0x4bd36330, 0xcdd5892e, 0x317de01b, 0xa3d53af7, 0xfb36a87b, +0xe55b4361, 0xf838e923, 0x9d91a30e, 0xd7d8357b, 0x28e5173c, 0x00fad75f, +0x39800da1, 0x894433ff, 0x1a62cdd7, 0x618b70b6, 0x60d770de, 0xbaed0285, +0x7bd24f54, 0x96a33975, 0x6884bd60, 0xeb75cf39, 0x19dd058a, 0xd4b8f064, +0x045178ca, 0xd9e3c1de, 0x91b494a6, 0x3316ac81, 0xb926c7d8, 0xc190f058, +0xce70a42c, 0x54f3b3c4, 0x79b20d90, 0x75177ad7, 0x9f010f51, 0x889a4fd5, +0x8fbbc8f0, 0xf1340040, 0xc22acb69, 0xe1473a0a, 0x52ddd7e3, 0x4cd54494, +0x4eb7a9ed, 0xd35e6a65, 0x3236b14c, 0x89786899, 0x3dc59704, 0x51d674be, +0xbcf13659, 0x6ef6266e, 0xf50ef497, 0x141354bc, 0xc8a42e76, 0x59b14233, +0x8b6cad7d, 0x061d8efe, 0xaca42513, 0x161cee21, 0xfeac5471, 0x4c499d31, +0xac2892cb, 0x9bacf4c5, 0xfff36588, 0x898f7a06, 0xad87be87, 0xebe7cb95, +0xa52b5fdf, 0x9f1f3945, 0xad435317, 0x1b316bcc, 0x8e6c0fd3, 0x767981d6, +0x1f86412b, 0x32b457f5, 0x30ab14db, 0x7120e8ae, 0x3972f57e, 0x2268af9d, +0xf45e5738, 0xc48bd523, 0x76726cae, 0x10bcdaa2, 0x49dcbe96, 0x8e28c545, +0x25fd1cc8, 0xc272f106, 0x1a34497e, 0x5f850d98, 0x92e324e4, 0x7e59f811, +0x0bba7ff0, 0x86b3b23f, 0x29558cee, 0xcbd55b0d, 0x15c10dc2, 0xd6617dd5, +0x41168898, 0x920132eb, 0xd10f51c6, 0xdc6eba51, 0xa07f89cc, 0xfcea8627, +0x44a52afa, 0x03d4230b, 0xdbb998d8, 0xf30d2a06, 0x08d35aeb, 0x9597f9bc, +0x720d2689, 0xf64314dd, 0xcadd04c5, 0x9cab9213, 0x22205cca, 0xd6cc196e, +0xa820e3c8, 0x7c262f92, 0xaf481f6b, 0xcecc0fe6, 0x2ec0b33a, 0x20aebc0a, +0xb5699b4b, 0xe0a559ba, 0xb9cd5224, 0x9f7ceb48, 0x35807e3c, 0x30be0794, +0x05463d41, 0x0e10046f, 0x776e71d6, 0xc6bac22d, 0xfee7f911, 0x3bc76715, +0x5024f666, 0x88a07482, 0x7dc55ed7, 0x3ee1d3b6, 0xec419af3, 0x4cd0e39f, +0x988f3802, 0x23baa080, 0xef9ce568, 0xf46ff60f, 0x2efcb093, 0x23b5b12a, +0xd5fb68c7, 0x58d45968, 0x6dce76cd, 0xf82c5a91, 0xfe6a2bf7, 0x0303b383, +0xf1b1196a, 0xa18d6d69, 0xb0025e33, 0x3f51e6c7, 0x0263bac1, 0x87743b29, +0x8f62b573, 0x7a80ad05, 0x95869387, 0x33e6a0f5, 0x50950449, 0x5da456ad, +0xb2ab1617, 0x7b345bf9, 0x7f923fd1, 0x4ca337fe, 0x95ccf61b, 0xfd8221ca, +0x0f19808f, 0x80029266, 0x915a3c47, 0x415045d7, 0x92f91b36, 0xd23576e5, +0x74a5652e, 0xe3d7288c, 0xba8213f7, 0x3a0ecd53, 0xa34fa01a, 0x62b4db3b, +0x3a96a793, 0xcef7b0da, 0xb64b8d17, 0x8716c3ed, 0xbd0ab055, 0xa6896492, +0x1250e75b, 0xb2bce1a7, 0x4100ca8f, 0x2e05625f, 0x8eb5bc83, 0x3dac8027, +0x62b3376d, 0xd6b2cc64, 0x4c9502dc, 0x6c1cfcbe, 0x380c1dce, 0x2b238ea8, +0x7051d59b, 0x580b122d, 0x6174b6e5, 0x4353d726, 0xba555765, 0xb645490f, +0x58427e5f, 0x06fc96e1, 0xae8f4528, 0xab0fee5e, 0x5de960b3, 0x63e79c19, +0x8552eeb8, 0x9344db26, 0x3326d17c, 0xa4174885, 0x6fe45472, 0x7bd20a9e, +0xa5e4aba0, 0x77b509af, 0x22f64240, 0x40bc5144, 0x57ca1b2d, 0xc1260090, +0x008997aa, 0xe8029716, 0x6efb01f3, 0xc73e1366, 0x9de737ff, 0x1b5fabe8, +0x957bb682, 0x5e27f5a5, 0x7777491a, 0x9b6fc6c1, 0xb0802205, 0x16e94d46, +0x58079aee, 0x2b24c4c4, 0xfffb2e7b, 0x7c5560e1, 0xe1ad3009, 0x6a0c0027, +0x09aea741, 0x9af375a6, 0x7106b7bf, 0x1cdcff69, 0xa007f9c2, 0x6f7729c0, +0xeac4297f, 0x8622ede7, 0x89346bfa, 0xc24674b5, 0xd1eabf00, 0xfa6b2d39, +0x04b0c153, 0x6adf49dc, 0x48ea0937, 0xae771ea6, 0xd4208255, 0x4052586d, +0x9c6aa640, 0x9adbb652, 0x017d8661, 0xb20855f0, 0x1e2ed7b6, 0x70af7f25, +0x354df0de, 0xf2fdfde7, 0x0844478c, 0x63993fb2, 0xc49e71ae, 0x1e15ae81, +0x932ad241, 0xba83cf8f, 0xc7e227f3, 0xd1171a85, 0x63ea34a0, 0xe74f042b, +0xd88173d9, 0xba7d8443, 0x9d47c6c4, 0xbe282a93, 0x6142fbac, 0x69307e44, +0xbb86a7f1, 0x9fa3418f, 0x3a3eadce, 0x9804167b, 0x2d5eb47a, 0x5aa2372d, +0x66eb8e1c, 0xf56da1fd, 0x3753eee9, 0xac78c709, 0x787ae648, 0x42d4fa7f, +0x5a036af5, 0xfc6dfa54, 0x269f1195, 0xef1c698d, 0x2932b2d6, 0xba10b26c, +0xf9b58396, 0x8f04a38a, 0x31c1264d, 0xf11c5ac6, 0x693d8ef6, 0x25b1d015, +0x01c06e8f, 0x9c250a6f, 0x4b8fffcd, 0x1eff0743, 0x5a4974ec, 0x9d2d24c1, +0x5562b2ae, 0x2a5b2deb, 0xd4a32596, 0x8ec43665, 0xde3ae654, 0x99e979d0, +0xb6250c7c, 0x478a2891, 0xe1d6c035, 0x1dc453e8, 0xc7490c8d, 0xd4e85297, +0x8fa94952, 0xc863cde8, 0x724dbbc8, 0x89ba9607, 0x0144ba1b, 0x3fed833c, +0xd16278d4, 0x0080f227, 0xfbe241c5, 0x61892e7f, 0x503dced0, 0xa54d6ea5, +0xcb5024a3, 0x591e11d1, 0xabeb85db, 0xc8314e58, 0xfb7792cb, 0xc70e5ada, +0xcc07e739, 0xdb3ad4d6, 0x7a2d24e8, 0x2a0e216a, 0xa04aa774, 0x1762ac59, +0xed53f5f9, 0x261a791e, 0x7a4102ae, 0x4650dcc9, 0xffa48ffa, 0x35a8f4e1, +0x98e0f041, 0x3e3a9a83, 0xfb43c8e9, 0x7e0e6a1e, 0x06f718e5, 0x6812c114, +0xaf21e4a1, 0x10ce04ca, 0x6e465a05, 0x250758f2, 0xfdf02f68, 0x426ec74c, +0x51e44567, 0x17e637cc, 0x9da657e4, 0x39ecef6e, 0xd31c4423, 0x12bc06b4, +0x35af82e0, 0x76379d09, 0x7fb7aa1e, 0x0b73e469, 0x7b3697a4, 0x6bbbca38, +0x018d2e07, 0x8d74a290, 0x02e00314, 0xdc9d1f4f, 0x62cbdd73, 0xd1173c86, +0x62c1d5f2, 0xdfb16100, 0x37cc28e8, 0x43954e64, 0xeaa699d9, 0x0cd0a997, +0xa0f26045 + +ea = +17868 + +eb = +17868 + +cab = +2 + +c = +2 + +c_neg = +0 + +k_pos = +5952 + +k_neg = +5888 + +ncb_pos = +17952 + +ncb_neg = +17760 + +r = +0 + +rv_index = +0 + +code_block_mode = +0 + +op_flags = +RTE_BBDEV_TURBO_CRC_24B_ATTACH + +expected_status = +OK diff --git a/app/test-bbdev/test_vectors/turbo_enc_c3_k4800_r2_e14412_crc24b.data b/app/test-bbdev/test_vectors/turbo_enc_c3_k4800_r2_e14412_crc24b.data new file mode 100644 index 00000000..9e17429d --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_enc_c3_k4800_r2_e14412_crc24b.data @@ -0,0 +1,153 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_ENC + +input0 = +0xb0fe86aa, 0xffbf2968, 0x4f55a631, 0xf2152dbc, 0x195a49a5, 0xa4aa7c6c, +0x48a7be38, 0xd32c84f2, 0xb040da60, 0xb39eb07c, 0x3443aad6, 0x19f52e13, +0xb2914341, 0x0d5dfdb3, 0x1ba8aec0, 0x62d0c7be, 0x1d55475c, 0x10f7dd79, +0x967e5333, 0xc0d524c1, 0xdca8da87, 0x1146ecf7, 0xfa09a85c, 0x83be800f, +0x11b814cc, 0x362a4704, 0x4f22e7ce, 0x9985a0e2, 0x9de4be72, 0x488a45b3, +0x6996ade2, 0xdb18c09a, 0x487d62a3, 0x94f82d13, 0xa0743f83, 0x75d2f5dd, +0x0e470346, 0x6e2ec2e5, 0x1a67fe5f, 0xc369330d, 0x860e8766, 0x42c110f6, +0x804a9985, 0x55db02b4, 0xb899fc5c, 0x57811d20, 0xda4f1f9d, 0x2a2efc30, +0x953e7487, 0x4b0c3065, 0x247d5d47, 0xdde382e7, 0x1d6d2ad7, 0x4358ac24, +0x2d0fd97f, 0x1483821e, 0xf6c74ee5, 0x23b5c2f4, 0x95d4a5fc, 0x486f370b, +0x8dbae43d, 0x248f17d5, 0x2df76099, 0xd19ec872, 0xf592aa0e, 0x8de04c3c, +0x3ada94fb, 0x9dd001a3, 0x2b4db6d1, 0x791cf495, 0x57dc764b, 0x21789f1f, +0x0d38adf5, 0xd8cefdc2, 0xed98f8bf, 0x024bc4ed, 0x4f9d3062, 0x332d27c5, +0xde70ad4e, 0xd85872d2, 0x5d0e29c0, 0xa4fd1887, 0x3fa67ba1, 0x46b9b008, +0x055bbf61, 0xf78e59f0, 0x287d2e3b, 0x5b473fe7, 0xd7ad6e04, 0x9cdf4986, +0x830432ae, 0xffded1ba, 0x35c00f77, 0xcf0a6648, 0x196c03aa, 0xba4e2dd3, +0xdaf70fe4, 0x883de2cd, 0xedc3b52e, 0x14e638c1, 0xe607103a, 0xc4cfb700, +0x4ed9843d, 0xc442a0cb, 0xc0d3bba2, 0x36789ba8, 0x9c9b7a24, 0xb943b6a4, +0x8f2c6879, 0x0fb16a0c, 0x2677bada, 0xd14f2f7e, 0x662ec398, 0x1e37b7ae, +0x8ad0312d, 0x21adb6a1, 0x072a11fd, 0x7ae828d1, 0xb510655f, 0x09f7b477, +0x10a175fc, 0xb7086f51, 0x24546af3, 0xfae81b50, 0xcb328832, 0x63693615, +0xa37c3678, 0x15fb6364, 0xb5f75a78, 0xfa9aa5f6, 0x03804a0e, 0x1d245890, +0x06d6cdcd, 0x7485a3c7, 0x35a008ae, 0x1abeaf1c, 0xd9c55966, 0x2b694666, +0x5341f2df, 0xcb647280, 0xfb88d81e, 0xf99a44d0, 0xb0585534, 0x0877b530, +0x752ab39a, 0xd493d8bc, 0xfaa6a57a, 0xccb7c008, 0x9c35994b, 0xb8 + +output0 = +0xb0fe86aa, 0xffbf2968, 0x4f55a631, 0xf2152dbc, 0x195a49a5, 0xa4aa7c6c, +0x48a7be38, 0xd32c84f2, 0xb040da60, 0xb39eb07c, 0x3443aad6, 0x19f52e13, +0xb2914341, 0x0d5dfdb3, 0x1ba8aec0, 0x62d0c7be, 0x1d55475c, 0x10f7dd79, +0x967e5333, 0xc0d524c1, 0xdca8da87, 0x1146ecf7, 0xfa09a85c, 0x83be800f, +0x11b814cc, 0x362a4704, 0x4f22e7ce, 0x9985a0e2, 0x9de4be72, 0x488a45b3, +0x6996ade2, 0xdb18c09a, 0x487d62a3, 0x94f82d13, 0xa0743f83, 0x75d2f5dd, +0x0e470346, 0x6e2ec2e5, 0x1a67fe5f, 0xc369330d, 0x860e8766, 0x42c110f6, +0x804a9985, 0x55db02b4, 0xb899fc5c, 0x57811d20, 0xda4f1f9d, 0x2a2efc30, +0x953e7487, 0x4b0c3065, 0x247d5d47, 0xdde382e7, 0x1d6d2ad7, 0x4358ac24, +0x2d0fd97f, 0x1483821e, 0xf6c74ee5, 0x23b5c2f4, 0x95d4a5fc, 0x486f370b, +0x8dbae43d, 0x248f17d5, 0x2df76099, 0xd19ec872, 0xf592aa0e, 0x8de04c3c, +0x3ada94fb, 0x9dd001a3, 0x2b4db6d1, 0x791cf495, 0x57dc764b, 0x21789f1f, +0x0d38adf5, 0xd8cefdc2, 0xed98f8bf, 0x024bc4ed, 0x4f9d3062, 0x332d27c5, +0xde70ad4e, 0xd85872d2, 0x5d0e29c0, 0xa4fd1887, 0x3fa67ba1, 0x46b9b008, +0x055bbf61, 0xf78e59f0, 0x287d2e3b, 0x5b473fe7, 0xd7ad6e04, 0x9cdf4986, +0x830432ae, 0xffded1ba, 0x35c00f77, 0xcf0a6648, 0x196c03aa, 0xba4e2dd3, +0xdaf70fe4, 0x883de2cd, 0xedc3b52e, 0x14e638c1, 0xe607103a, 0xc4cfb700, +0x4ed9843d, 0xc442a0cb, 0xc0d3bba2, 0x36789ba8, 0x9c9b7a24, 0xb943b6a4, +0x8f2c6879, 0x0fb16a0c, 0x2677bada, 0xd14f2f7e, 0x662ec398, 0x1e37b7ae, +0x8ad0312d, 0x21adb6a1, 0x072a11fd, 0x7ae828d1, 0xb510655f, 0x09f7b477, +0x10a175fc, 0xb7086f51, 0x24546af3, 0xfae81b50, 0xcb328832, 0x63693615, +0xa37c3678, 0x15fb6364, 0xb5f75a78, 0xfa9aa5f6, 0x03804a0e, 0x1d245890, +0x06d6cdcd, 0x7485a3c7, 0x35a008ae, 0x1abeaf1c, 0xd9c55966, 0x2b694666, +0x5341f2df, 0xcb647280, 0xfb88d81e, 0xf99a44d0, 0xb0585534, 0x0877b530, +0x752ab39a, 0xd493d8bc, 0xfaa6a57a, 0xccb7c008, 0x9c35994b, 0x47ee26b8, +0x94f624ac, 0xa3bd4876, 0xf7d4854e, 0x8871d433, 0x9321d942, 0x7b626be8, +0x72c934b0, 0x3b7af8a4, 0x5102c29f, 0x710e4dbc, 0x99708292, 0x1458c4c1, +0x61026bc5, 0xe776e388, 0x4a0222b3, 0x760e5aaf, 0x662f3583, 0x5ab1005b, +0xe527ef70, 0x4170d611, 0x307bebc4, 0xfdd00caf, 0xbaae1044, 0xcab4d459, +0x38281dcf, 0x90580c89, 0x49cf5986, 0xa27da769, 0xceced49b, 0x5ea37953, +0x8a7e6c1c, 0x1e01b4e2, 0xe08026ae, 0x3754534a, 0x903b0ecf, 0x65f97a55, +0x90798ed9, 0x9d1133bc, 0xe356a39f, 0xe47acbce, 0x01ccf326, 0x1954fd3d, +0x240e69f8, 0x1da20bb4, 0xe1ab1684, 0x44c65d48, 0xd265e6c2, 0x51d4ef07, +0x000970ef, 0xfeb939f4, 0x5dcc0132, 0x2bd27ae5, 0xba5dbd25, 0xa9d190e4, +0x61556bec, 0x7fd6caba, 0x7fe312cb, 0xdd319413, 0x92a5dbbf, 0x17e61915, +0x56067b67, 0x3dc348c0, 0x58c17fe0, 0xbe6bffcc, 0xd379026c, 0xc4174780, +0xcce7f026, 0xb74b7eb0, 0xe4d5f625, 0x6bc16d3b, 0xff3e300e, 0x83f0d55a, +0xf2e537df, 0x75c18f78, 0xa5458d1a, 0x47c778b0, 0x92eb8716, 0xcb5816fd, +0xc2cc7079, 0xa7f1dc75, 0xed9e5ffa, 0xb7d6747f, 0xa2dc6907, 0x99b4d187, +0x69f3138a, 0xbd285a1f, 0xb8ee18e0, 0x734b77a5, 0xc0700f69, 0x6c72a77a, +0x76609ba9, 0xcfae9b73, 0x2cd329f0, 0x0d45aa12, 0x419fbcd7, 0x03e00f00, +0x0effee99, 0x7f879eb4, 0x29a4c8df, 0x4432400a, 0x22b9cf55, 0xa1c4c645, +0x200251b0, 0xff293906, 0xe11288e2, 0xde1e8ec7, 0x675752d1, 0x9630743e, +0xd848c67e, 0x1eedfda9, 0x58b954fc, 0xa0dc7f1c, 0x0aea313e, 0x062b9449, +0x0f17e57f, 0x96def6ec, 0x2ba45f14, 0xd9a5f8c5, 0x231483c8, 0x29b8783e, +0xa6d24594, 0x1a62ffcd, 0x54c87a7f, 0x6fa9ca9a, 0xfff0f890, 0x51d6ae46, +0xa96e82b0, 0x68cd9808, 0x56214df5, 0x2169defa, 0x72c1ecce, 0x2448e3c9, +0x8eb2b3b3, 0x62c89c11, 0xbba20ee2, 0xea33f53a, 0x8b96a6b8, 0x9d33d551, +0x363bd14e, 0x5fdd2b5f, 0xf8187546, 0xb692beab, 0x8df6b4c0, 0x753e2302, +0xb90f7a37, 0x2b6bac6a, 0x13a07bc5, 0xb67f6d2b, 0x47b21569, 0xc2ab143f, +0xf86ce30a, 0xde2dab70, 0x6f258860, 0x2878735c, 0x2aaeac20, 0xda80fb3e, +0xe7a8ccf6, 0xbf011844, 0x40a610f8, 0x82f3fdf7, 0xadc4ec2c, 0xb8551030, +0x76004380, 0xa384ff18, 0x080cf587, 0x6c4991d3, 0x2daea9aa, 0xa92d0c4f, +0x4a9d117e, 0x2761f025, 0x96b40443, 0x57a82be4, 0x62374c44, 0x55dc64ae, +0x28aa9f0c, 0x03a3b963, 0x41dbaa26, 0xb3c23735, 0x971cbd31, 0x939a9f80, +0x76439fdc, 0xa9df79d6, 0x926ae3e2, 0x5ee75745, 0xf2396e52, 0xe18bd816, +0x3f9834b9, 0x816a07bc, 0x8f873310, 0x45cf9b96, 0x0ce634dd, 0xe64a16d9, +0x2733775a, 0x2b648c7e, 0xe600ee8e, 0xd99d8ae3, 0x10dadf2a, 0x904d3f87, +0x3963e9e7, 0x47fcce89, 0xc256c898, 0x7db6cb66, 0xe1611a8b, 0xed81b10d, +0x75eff974, 0x8a0a3d67, 0xa44311ff, 0x6f876783, 0x43dc93ce, 0x88616a33, +0xa8706e8f, 0x33a2cbed, 0x3a6d20c3, 0x55175086, 0x39680945, 0x2d779a7b, +0x0818a4ce, 0x55558918, 0xf7c08d35, 0x980a3038, 0x9cf068db, 0x3385d333, +0xd81b33fb, 0x188018d5, 0x47c57bd3, 0x9ec2324f, 0x6901cd77, 0xc3bac44f, +0x4d96aba8, 0x9094da5b, 0xa67a1353, 0x1fdfc4db, 0xa2fbeefa, 0xab4828e3, +0x37d1db45, 0x0d33b3e9, 0x1ad72bb9, 0x7257bf9c, 0x2ec35167, 0xa4488b7f, +0xf9dae588, 0x1038905a, 0x88ddf410, 0xaac11693, 0x24ac025d, 0x56cefbb5, +0x6afe7f59, 0xc7f989e8, 0x15872570, 0x1bf16cdb, 0xfe9c93ce, 0x1fc9a076, +0x85d37185, 0x1078cd31, 0xe1cd0327, 0x6d5315bc, 0x298cd836, 0xc8e21f06, +0xe561c32d, 0x8ec404b4, 0x4d39bfbb, 0x24ede8c8, 0x451d6034, 0x3bafeea2, +0x202f0ccf, 0x1fad37ce, 0xf04b5693, 0xeee57cd9, 0x5ef70007, 0x018e8a4f, +0xfa61c9a9, 0x09989fcf, 0xe66b558b, 0x966efd48, 0x7525021d, 0xe7978b5e, +0x7eb1d6dc, 0xa10c5e5b, 0xb7815e69, 0x7d486cfb, 0xcffdeb2a, 0x7375cb32, +0x599008dc, 0xff91c796, 0x560ed4ad, 0x14e9a876, 0xfccf6a66, 0xa58be028, +0xea9d408b, 0x3afc373b, 0xee008458, 0x19b6042e, 0x84806314, 0x431a4ba4, +0x009ad6a1, 0xd7c62bf4, 0x1bebecba, 0x5c662f69, 0x83bfdea1, 0x45872a9a, +0x00c9 + + +ea = +14412 + +eb = +14412 + +cab = +4 + +c = +4 + +c_neg = +0 + +k_pos = +4800 + +k_neg = +4736 + +ncb_pos = +14496 + +ncb_neg = +14304 + +r = +2 + +rv_index = +0 + +code_block_mode = +0 + +op_flags = +RTE_BBDEV_TURBO_CRC_24B_ATTACH + +expected_status = +OK \ No newline at end of file diff --git a/app/test-bbdev/test_vectors/turbo_enc_c4_k4800_r2_e14412_crc24b.data b/app/test-bbdev/test_vectors/turbo_enc_c4_k4800_r2_e14412_crc24b.data new file mode 100644 index 00000000..ff95a3f9 --- /dev/null +++ b/app/test-bbdev/test_vectors/turbo_enc_c4_k4800_r2_e14412_crc24b.data @@ -0,0 +1,252 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_ENC + +input0 = +0xb0fe86aa, 0xffbf2968, 0x4f55a631, 0xf2152dbc, 0x195a49a5, 0xa4aa7c6c, +0x48a7be38, 0xd32c84f2, 0xb040da60, 0xb39eb07c, 0x3443aad6, 0x19f52e13, +0xb2914341, 0x0d5dfdb3, 0x1ba8aec0, 0x62d0c7be, 0x1d55475c, 0x10f7dd79, +0x967e5333, 0xc0d524c1, 0xdca8da87, 0x1146ecf7, 0xfa09a85c, 0x83be800f, +0x11b814cc, 0x362a4704, 0x4f22e7ce, 0x9985a0e2, 0x9de4be72, 0x488a45b3, +0x6996ade2, 0xdb18c09a, 0x487d62a3, 0x94f82d13, 0xa0743f83, 0x75d2f5dd, +0x0e470346, 0x6e2ec2e5, 0x1a67fe5f, 0xc369330d, 0x860e8766, 0x42c110f6, +0x804a9985, 0x55db02b4, 0xb899fc5c, 0x57811d20, 0xda4f1f9d, 0x2a2efc30, +0x953e7487, 0x4b0c3065, 0x247d5d47, 0xdde382e7, 0x1d6d2ad7, 0x4358ac24, +0x2d0fd97f, 0x1483821e, 0xf6c74ee5, 0x23b5c2f4, 0x95d4a5fc, 0x486f370b, +0x8dbae43d, 0x248f17d5, 0x2df76099, 0xd19ec872, 0xf592aa0e, 0x8de04c3c, +0x3ada94fb, 0x9dd001a3, 0x2b4db6d1, 0x791cf495, 0x57dc764b, 0x21789f1f, +0x0d38adf5, 0xd8cefdc2, 0xed98f8bf, 0x024bc4ed, 0x4f9d3062, 0x332d27c5, +0xde70ad4e, 0xd85872d2, 0x5d0e29c0, 0xa4fd1887, 0x3fa67ba1, 0x46b9b008, +0x055bbf61, 0xf78e59f0, 0x287d2e3b, 0x5b473fe7, 0xd7ad6e04, 0x9cdf4986, +0x830432ae, 0xffded1ba, 0x35c00f77, 0xcf0a6648, 0x196c03aa, 0xba4e2dd3, +0xdaf70fe4, 0x883de2cd, 0xedc3b52e, 0x14e638c1, 0xe607103a, 0xc4cfb700, +0x4ed9843d, 0xc442a0cb, 0xc0d3bba2, 0x36789ba8, 0x9c9b7a24, 0xb943b6a4, +0x8f2c6879, 0x0fb16a0c, 0x2677bada, 0xd14f2f7e, 0x662ec398, 0x1e37b7ae, +0x8ad0312d, 0x21adb6a1, 0x072a11fd, 0x7ae828d1, 0xb510655f, 0x09f7b477, +0x10a175fc, 0xb7086f51, 0x24546af3, 0xfae81b50, 0xcb328832, 0x63693615, +0xa37c3678, 0x15fb6364, 0xb5f75a78, 0xfa9aa5f6, 0x03804a0e, 0x1d245890, +0x06d6cdcd, 0x7485a3c7, 0x35a008ae, 0x1abeaf1c, 0xd9c55966, 0x2b694666, +0x5341f2df, 0xcb647280, 0xfb88d81e, 0xf99a44d0, 0xb0585534, 0x0877b530, +0x752ab39a, 0xd493d8bc, 0xfaa6a57a, 0xccb7c008, 0x9c35994b, 0x9cca47b8, +0xff47dda2, 0xa557841a, 0x7459fb6a, 0x93ec6495, 0x0ef7e6d2, 0xa8a6e6d4, +0x46364f91, 0xd52c5776, 0x4dfd0d25, 0xd458c769, 0x24864930, 0xf7789991, +0xfcc231f2, 0xc8e4cfd0, 0x50975038, 0x989398d1, 0x04504b14, 0x5e529570, +0xf8c17202, 0xaf459a8a, 0xac2fecb2, 0x0eb3a176, 0xf6bac24f, 0x6fbddade, +0x42456542, 0x83c10198, 0xbea4efaf, 0xff2f2966, 0x5edae82b, 0x09c27f13, +0xa094c3b4, 0xc53b810e, 0x7f1cc39d, 0x25731c8a, 0x8d34b6ee, 0x5fb78b80, +0x2980eef8, 0x7b76a9fb, 0x37bfacb9, 0x80f7a0ea, 0xd1e3e69d, 0xcda19b8b, +0x9af26246, 0xf55495cf, 0x6f45ad1b, 0xfe2cb329, 0xc33cb7e6, 0x67bbde4a, +0x32310c7b, 0x80e6b00a, 0xdd4004d2, 0x0e0eb8b6, 0x6f8d951f, 0xb196cb40, +0x6dbe29dd, 0xa138a375, 0xd4f5b058, 0x69ea638f, 0x50438a7a, 0xe5530eb3, +0x527cb218, 0xbfa67b9d, 0x7efb86df, 0x724ed65b, 0x38006782, 0x6a9924a4, +0x749955c2, 0x5cb82627, 0xa74ceecb, 0x89eb103d, 0x89a12973, 0x660613c4, +0x8bdf96ae, 0xf08afcce, 0xa4023081, 0x79c6786c, 0x8026bdf0, 0x621f29a1, +0xa8d8d3ec, 0x4ee5d537, 0xda83c99f, 0x9a69d0a8, 0xae027f9a, 0x639d9cc9, +0xe80e2be1, 0x89d78cfb, 0x862fabb7, 0xb248c138, 0x3fe35f29, 0xf8b74cc6, +0x27741dcf, 0xf9236485, 0x05225155, 0x4ed40ee6, 0xb30e4a95, 0x7d890ee0, +0x0a97eb0d, 0xe7877652, 0x3de6fa67, 0xa3a5961e, 0x1bdf2af5, 0x5a7dcbbe, +0x1641f1af, 0x4db5845b, 0x735761f2, 0x32a1431e, 0xab1e00b1, 0x495ddeb7, +0xb480f29c, 0x47d83714, 0x8ba04129, 0x8c96062e, 0x657e7f7f, 0x30dfba23, +0xfd4ee1bc, 0x9ffe10b1, 0xc917d2db, 0x18f0bd8d, 0x4b6c35d7, 0x54896222, +0x7ce0f6c6, 0xb7813f4a, 0x71c52b4b, 0xac65da3a, 0x9aef9b4e, 0x85f9657e, +0x62ba1965, 0xa1e39267, 0x40f66fb2, 0x10433b94, 0x851df333, 0xf0c6aa26, +0xe0e19368, 0x5a706636, 0xf5de1e8c, 0x5ebc735a, 0x0967ef39, 0xf76c1fb7, +0xe53f7f16, 0x51f2c4eb, 0x047dda3d, 0x73518b4a, 0x3ad90dd3, 0xaee51fc1, +0x78f01060, 0x98d1502c, 0x990d5499, 0x7a19453c, 0x104c + +output0 = +0xb0fe86aa, 0xffbf2968, 0x4f55a631, 0xf2152dbc, 0x195a49a5, 0xa4aa7c6c, +0x48a7be38, 0xd32c84f2, 0xb040da60, 0xb39eb07c, 0x3443aad6, 0x19f52e13, +0xb2914341, 0x0d5dfdb3, 0x1ba8aec0, 0x62d0c7be, 0x1d55475c, 0x10f7dd79, +0x967e5333, 0xc0d524c1, 0xdca8da87, 0x1146ecf7, 0xfa09a85c, 0x83be800f, +0x11b814cc, 0x362a4704, 0x4f22e7ce, 0x9985a0e2, 0x9de4be72, 0x488a45b3, +0x6996ade2, 0xdb18c09a, 0x487d62a3, 0x94f82d13, 0xa0743f83, 0x75d2f5dd, +0x0e470346, 0x6e2ec2e5, 0x1a67fe5f, 0xc369330d, 0x860e8766, 0x42c110f6, +0x804a9985, 0x55db02b4, 0xb899fc5c, 0x57811d20, 0xda4f1f9d, 0x2a2efc30, +0x953e7487, 0x4b0c3065, 0x247d5d47, 0xdde382e7, 0x1d6d2ad7, 0x4358ac24, +0x2d0fd97f, 0x1483821e, 0xf6c74ee5, 0x23b5c2f4, 0x95d4a5fc, 0x486f370b, +0x8dbae43d, 0x248f17d5, 0x2df76099, 0xd19ec872, 0xf592aa0e, 0x8de04c3c, +0x3ada94fb, 0x9dd001a3, 0x2b4db6d1, 0x791cf495, 0x57dc764b, 0x21789f1f, +0x0d38adf5, 0xd8cefdc2, 0xed98f8bf, 0x024bc4ed, 0x4f9d3062, 0x332d27c5, +0xde70ad4e, 0xd85872d2, 0x5d0e29c0, 0xa4fd1887, 0x3fa67ba1, 0x46b9b008, +0x055bbf61, 0xf78e59f0, 0x287d2e3b, 0x5b473fe7, 0xd7ad6e04, 0x9cdf4986, +0x830432ae, 0xffded1ba, 0x35c00f77, 0xcf0a6648, 0x196c03aa, 0xba4e2dd3, +0xdaf70fe4, 0x883de2cd, 0xedc3b52e, 0x14e638c1, 0xe607103a, 0xc4cfb700, +0x4ed9843d, 0xc442a0cb, 0xc0d3bba2, 0x36789ba8, 0x9c9b7a24, 0xb943b6a4, +0x8f2c6879, 0x0fb16a0c, 0x2677bada, 0xd14f2f7e, 0x662ec398, 0x1e37b7ae, +0x8ad0312d, 0x21adb6a1, 0x072a11fd, 0x7ae828d1, 0xb510655f, 0x09f7b477, +0x10a175fc, 0xb7086f51, 0x24546af3, 0xfae81b50, 0xcb328832, 0x63693615, +0xa37c3678, 0x15fb6364, 0xb5f75a78, 0xfa9aa5f6, 0x03804a0e, 0x1d245890, +0x06d6cdcd, 0x7485a3c7, 0x35a008ae, 0x1abeaf1c, 0xd9c55966, 0x2b694666, +0x5341f2df, 0xcb647280, 0xfb88d81e, 0xf99a44d0, 0xb0585534, 0x0877b530, +0x752ab39a, 0xd493d8bc, 0xfaa6a57a, 0xccb7c008, 0x9c35994b, 0x47ee26b8, +0x94f624ac, 0xa3bd4876, 0xf7d4854e, 0x8871d433, 0x9321d942, 0x7b626be8, +0x72c934b0, 0x3b7af8a4, 0x5102c29f, 0x710e4dbc, 0x99708292, 0x1458c4c1, +0x61026bc5, 0xe776e388, 0x4a0222b3, 0x760e5aaf, 0x662f3583, 0x5ab1005b, +0xe527ef70, 0x4170d611, 0x307bebc4, 0xfdd00caf, 0xbaae1044, 0xcab4d459, +0x38281dcf, 0x90580c89, 0x49cf5986, 0xa27da769, 0xceced49b, 0x5ea37953, +0x8a7e6c1c, 0x1e01b4e2, 0xe08026ae, 0x3754534a, 0x903b0ecf, 0x65f97a55, +0x90798ed9, 0x9d1133bc, 0xe356a39f, 0xe47acbce, 0x01ccf326, 0x1954fd3d, +0x240e69f8, 0x1da20bb4, 0xe1ab1684, 0x44c65d48, 0xd265e6c2, 0x51d4ef07, +0x000970ef, 0xfeb939f4, 0x5dcc0132, 0x2bd27ae5, 0xba5dbd25, 0xa9d190e4, +0x61556bec, 0x7fd6caba, 0x7fe312cb, 0xdd319413, 0x92a5dbbf, 0x17e61915, +0x56067b67, 0x3dc348c0, 0x58c17fe0, 0xbe6bffcc, 0xd379026c, 0xc4174780, +0xcce7f026, 0xb74b7eb0, 0xe4d5f625, 0x6bc16d3b, 0xff3e300e, 0x83f0d55a, +0xf2e537df, 0x75c18f78, 0xa5458d1a, 0x47c778b0, 0x92eb8716, 0xcb5816fd, +0xc2cc7079, 0xa7f1dc75, 0xed9e5ffa, 0xb7d6747f, 0xa2dc6907, 0x99b4d187, +0x69f3138a, 0xbd285a1f, 0xb8ee18e0, 0x734b77a5, 0xc0700f69, 0x6c72a77a, +0x76609ba9, 0xcfae9b73, 0x2cd329f0, 0x0d45aa12, 0x419fbcd7, 0x03e00f00, +0x0effee99, 0x7f879eb4, 0x29a4c8df, 0x4432400a, 0x22b9cf55, 0xa1c4c645, +0x200251b0, 0xff293906, 0xe11288e2, 0xde1e8ec7, 0x675752d1, 0x9630743e, +0xd848c67e, 0x1eedfda9, 0x58b954fc, 0xa0dc7f1c, 0x0aea313e, 0x062b9449, +0x0f17e57f, 0x96def6ec, 0x2ba45f14, 0xd9a5f8c5, 0x231483c8, 0x29b8783e, +0xa6d24594, 0x1a62ffcd, 0x54c87a7f, 0x6fa9ca9a, 0xfff0f890, 0x51d6ae46, +0xa96e82b0, 0x68cd9808, 0x56214df5, 0x2169defa, 0x72c1ecce, 0x2448e3c9, +0x8eb2b3b3, 0x62c89c11, 0xbba20ee2, 0xea33f53a, 0x8b96a6b8, 0x9d33d551, +0x363bd14e, 0x5fdd2b5f, 0xf8187546, 0xb692beab, 0x8df6b4c0, 0x753e2302, +0xb90f7a37, 0x2b6bac6a, 0x13a07bc5, 0xb67f6d2b, 0x47b21569, 0xc2ab143f, +0xf86ce30a, 0xde2dab70, 0x6f258860, 0x2878735c, 0x2aaeac20, 0xda80fb3e, +0xe7a8ccf6, 0xbf011844, 0x40a610f8, 0x82f3fdf7, 0xadc4ec2c, 0xb8551030, +0x76004380, 0xa384ff18, 0x080cf587, 0x6c4991d3, 0x2daea9aa, 0xa92d0c4f, +0x4a9d117e, 0x2761f025, 0x96b40443, 0x57a82be4, 0x62374c44, 0x55dc64ae, +0x28aa9f0c, 0x03a3b963, 0x41dbaa26, 0xb3c23735, 0x971cbd31, 0x939a9f80, +0x76439fdc, 0xa9df79d6, 0x926ae3e2, 0x5ee75745, 0xf2396e52, 0xe18bd816, +0x3f9834b9, 0x816a07bc, 0x8f873310, 0x45cf9b96, 0x0ce634dd, 0xe64a16d9, +0x2733775a, 0x2b648c7e, 0xe600ee8e, 0xd99d8ae3, 0x10dadf2a, 0x904d3f87, +0x3963e9e7, 0x47fcce89, 0xc256c898, 0x7db6cb66, 0xe1611a8b, 0xed81b10d, +0x75eff974, 0x8a0a3d67, 0xa44311ff, 0x6f876783, 0x43dc93ce, 0x88616a33, +0xa8706e8f, 0x33a2cbed, 0x3a6d20c3, 0x55175086, 0x39680945, 0x2d779a7b, +0x0818a4ce, 0x55558918, 0xf7c08d35, 0x980a3038, 0x9cf068db, 0x3385d333, +0xd81b33fb, 0x188018d5, 0x47c57bd3, 0x9ec2324f, 0x6901cd77, 0xc3bac44f, +0x4d96aba8, 0x9094da5b, 0xa67a1353, 0x1fdfc4db, 0xa2fbeefa, 0xab4828e3, +0x37d1db45, 0x0d33b3e9, 0x1ad72bb9, 0x7257bf9c, 0x2ec35167, 0xa4488b7f, +0xf9dae588, 0x1038905a, 0x88ddf410, 0xaac11693, 0x24ac025d, 0x56cefbb5, +0x6afe7f59, 0xc7f989e8, 0x15872570, 0x1bf16cdb, 0xfe9c93ce, 0x1fc9a076, +0x85d37185, 0x1078cd31, 0xe1cd0327, 0x6d5315bc, 0x298cd836, 0xc8e21f06, +0xe561c32d, 0x8ec404b4, 0x4d39bfbb, 0x24ede8c8, 0x451d6034, 0x3bafeea2, +0x202f0ccf, 0x1fad37ce, 0xf04b5693, 0xeee57cd9, 0x5ef70007, 0x018e8a4f, +0xfa61c9a9, 0x09989fcf, 0xe66b558b, 0x966efd48, 0x7525021d, 0xe7978b5e, +0x7eb1d6dc, 0xa10c5e5b, 0xb7815e69, 0x7d486cfb, 0xcffdeb2a, 0x7375cb32, +0x599008dc, 0xff91c796, 0x560ed4ad, 0x14e9a876, 0xfccf6a66, 0xa58be028, +0xea9d408b, 0x3afc373b, 0xee008458, 0x19b6042e, 0x84806314, 0x431a4ba4, +0x009ad6a1, 0xd7c62bf4, 0x1bebecba, 0x5c662f69, 0x83bfdea1, 0x45872a9a, +0xca4700c9, 0x47dda29c, 0x57841aff, 0x59fb6aa5, 0xec649574, 0xf7e6d293, +0xa6e6d40e, 0x364f91a8, 0x2c577646, 0xfd0d25d5, 0x58c7694d, 0x864930d4, +0x78999124, 0xc231f2f7, 0xe4cfd0fc, 0x975038c8, 0x9398d150, 0x504b1498, +0x52957004, 0xc172025e, 0x459a8af8, 0x2fecb2af, 0xb3a176ac, 0xbac24f0e, +0xbddadef6, 0x4565426f, 0xc1019842, 0xa4efaf83, 0x2f2966be, 0xdae82bff, +0xc27f135e, 0x94c3b409, 0x3b810ea0, 0x1cc39dc5, 0x731c8a7f, 0x34b6ee25, +0xb78b808d, 0x80eef85f, 0x76a9fb29, 0xbfacb97b, 0xf7a0ea37, 0xe3e69d80, +0xa19b8bd1, 0xf26246cd, 0x5495cf9a, 0x45ad1bf5, 0x2cb3296f, 0x3cb7e6fe, +0xbbde4ac3, 0x310c7b67, 0xe6b00a32, 0x4004d280, 0x0eb8b6dd, 0x8d951f0e, +0x96cb406f, 0xbe29ddb1, 0x38a3756d, 0xf5b058a1, 0xea638fd4, 0x438a7a69, +0x530eb350, 0x7cb218e5, 0xa67b9d52, 0xfb86dfbf, 0x4ed65b7e, 0x00678272, +0x9924a438, 0x9955c26a, 0xb8262774, 0x4ceecb5c, 0xeb103da7, 0xa1297389, +0x0613c489, 0xdf96ae66, 0x8afcce8b, 0x023081f0, 0xc6786ca4, 0x26bdf079, +0x1f29a180, 0xd8d3ec62, 0xe5d537a8, 0x83c99f4e, 0x69d0a8da, 0x027f9a9a, +0x9d9cc9ae, 0x0e2be163, 0xd78cfbe8, 0x2fabb789, 0x48c13886, 0xe35f29b2, +0xb74cc63f, 0x741dcff8, 0x23648527, 0x225155f9, 0xd40ee605, 0x0e4a954e, +0x890ee0b3, 0x97eb0d7d, 0x8776520a, 0xe6fa67e7, 0xa5961e3d, 0xdf2af5a3, +0x7dcbbe1b, 0x41f1af5a, 0xb5845b16, 0x5761f24d, 0xa1431e73, 0x1e00b132, +0x5ddeb7ab, 0x80f29c49, 0xd83714b4, 0xa0412947, 0x96062e8b, 0x7e7f7f8c, +0xdfba2365, 0x4ee1bc30, 0xfe10b1fd, 0x17d2db9f, 0xf0bd8dc9, 0x6c35d718, +0x8962224b, 0xe0f6c654, 0x813f4a7c, 0xc52b4bb7, 0x65da3a71, 0xef9b4eac, +0xf9657e9a, 0xba196585, 0xe3926762, 0xf66fb2a1, 0x433b9440, 0x1df33310, +0xc6aa2685, 0xe19368f0, 0x706636e0, 0xde1e8c5a, 0xbc735af5, 0x67ef395e, +0x6c1fb709, 0x3f7f16f7, 0xf2c4ebe5, 0x7dda3d51, 0x518b4a04, 0xd90dd373, +0xe51fc13a, 0xf01060ae, 0xd1502c78, 0x0d549998, 0x19453c99, 0x52104c7a, +0xc477d443, 0xce6910c0, 0x0dc163b4, 0x1dc4dab2, 0x5c4dbc0c, 0x6145ced9, +0xf02e295b, 0x60196b15, 0x2aa68b89, 0x9ff0dd3b, 0xe4b874ce, 0x9175ec6c, +0x9b629234, 0x644ce81d, 0x359fac36, 0x5211675f, 0xd24c092f, 0x1de9385e, +0x1352b7e5, 0x8d650e83, 0x7057d366, 0x43371dd2, 0x61678e6e, 0xd436279b, +0x267562b6, 0xabaf1706, 0x814bd74e, 0x3269cbda, 0x0a34b3cd, 0xa74c3d1a, +0xd3b098c8, 0x02030412, 0xd75d7207, 0x519d1b3d, 0x1958436b, 0x69ba4221, +0x81b6b4cf, 0xb83234e4, 0x7e652d03, 0x63bcf36e, 0xefecb5f7, 0x60550e08, +0x394963ce, 0xfd6f2b38, 0x1342c68b, 0xbc39f1ca, 0x21bdd863, 0x59ade0af, +0x2d0c793a, 0xa74702d4, 0xc00885f9, 0xc73c27af, 0x6566ea9e, 0xa31e0f7c, +0x499f1706, 0xe19617ad, 0x0e19900a, 0x0a8d3669, 0xcc482af9, 0x5eb35096, +0x269b51c1, 0x80d1145b, 0x4be232ff, 0x3d31fc83, 0x89127a6c, 0x90af3379, +0x7726d002, 0x35f15151, 0x8393c3b5, 0x27a9ada8, 0x25940510, 0x05c49bf1, +0xc7c1b886, 0xdb00826f, 0xf658f61f, 0xe5d77d98, 0xa637b6fb, 0x2f515fa5, +0xb1f80c38, 0xe082d248, 0x4220acd0, 0x06360060, 0xcf42c277, 0xf5972529, +0xf7e274fb, 0xfe41cc28, 0xde661de0, 0xa157bd26, 0x8e1f4788, 0x35c4111a, +0x11a7360e, 0x751188e8, 0x544d9fc3, 0x33d853cd, 0x754542f7, 0x05d9979b, +0x73e59071, 0x4909bb7e, 0x64ae94be, 0x0eb4c8e9, 0xacb903fe, 0xc716288e, +0x1e914aa8, 0x19d127bd, 0x913a6dd4, 0x6af354d9, 0x72c29a95, 0x2fa731a5, +0x9f206402, 0xa44abe92, 0xeb090ab4, 0x85b7584c, 0xf27cd398, 0x3b828e38, +0x0cb8dd56, 0x37657f3c, 0x78fa0f2a, 0x3df7a0cb, 0x2621740f, 0x4d92422f, +0x334a8fa2, 0x124f947d, 0x31be3505, 0xf6a1e561, 0x5d0c4087, 0x62971b45, +0x312472a7, 0x933af4e3, 0x45c28196, 0x8ddd4f00, 0xa5ec20fb, 0x9acde751, +0x23ac64e8, 0xbea00461, 0xf9d65eac, 0xe21db306, 0xcacb4f76, 0x83950ca3, +0x66069329, 0xf7d72838, 0x5d13a747, 0x5c9ca583, 0x1d9d225e, 0xb2c705fe, +0xd0fac625, 0x0a73b38e, 0xec8d6692, 0x0bb64587, 0xf147c00c, 0xcdd6020c, +0x1608cede, 0xb531a423, 0x1c8b1b08, 0xdd74f03e, 0x6d3076fb, 0x2dcaaa44, +0xe869582b, 0x0f19cde9, 0x44d15927, 0xee751f17, 0x655f24c4, 0x7508164e, +0xab414b0d, 0x381d3525, 0x7a18cb74, 0xc91e435a, 0xb3397aa0, 0xa5567595, +0x1da36e52, 0xb43ba598, 0x3ef0d4fd, 0x11d348c4, 0xfcb4cbfa, 0xa3dee1a7, +0x8c6be341, 0xefafff59, 0xeaa95a58, 0x01ba74bb, 0x5b1d3193, 0x2ac942bb, +0xbb5816ca, 0xe79720dd, 0x1683bbad, 0xea2f3992, 0xa89cdd39, 0x3a389386, +0xc54d671f, 0xe8564365, 0x373196e0, 0xc08acfd2, 0x32f556b2, 0x15340220, +0x817d0d1b, 0x4b9afaa3, 0xd3bbf932, 0x6c9ddf75, 0x0fca9bea, 0x2cd2d913, +0x026e3647, 0x6e769b35, 0xdab34e6e, 0xc9b75a74, 0xfdc90304, 0xc6442959, +0xa8f44fb4, 0x73fb26de, 0x833d2d60, 0x8cf7a461, 0xdc5c4bf9, 0x1bce371f, +0xd1d6b743, 0xc1124cae, 0xf4d33161, 0x696956a5, 0xa5ce9c72, 0x5f84e109, +0xa0dae0ff, 0xdfdf169e, 0xb734307c, 0x25843b5d, 0x0b710ae4, 0xafd25b2b, +0xc013b89b, 0x5246e064, 0xeb28ab92, 0x4f92747c, 0x2f3c0c8b, 0x3268720d, +0xef638533, 0xd0fc40ab, 0x5d29c943, 0x3fd9b311, 0x6833bf43, 0xa188ad03, +0xacba3ad2, 0x9696f4da, 0x07b1b2fa, 0xb8f925f6, 0x082573f4, 0x506b5c0e, +0xe290e707, 0x80933f06, 0x38dcad25, 0x276ab82f, 0x788f3a2c, 0x4b0e14bc, +0xc1b38b43, 0x962a0efe, 0x77f19522, 0xd5bd951b, 0x90415ed2, 0x67a6a806, +0x82d0503f, 0x814e505f, 0x6448341a, 0x2c88ba72, 0x1f783411, 0x5dcfc5d9, +0x15cea1e2, 0x351ebdaa, 0xff083e5e, 0x173297d6, 0xbdadb9f2, 0xe82ebe50, +0x33fff936, 0xaed0e402, 0xcd08e297, 0xeefee410, 0x023aedc0, 0x493fdd4c, +0xd4454937, 0xa4c21893, 0xf9740292, 0x14308130, 0x2aa05568, 0xbd88a714, +0x3f264976, 0x62203300, 0x5d9aa0bf, 0x3b9be4b0, 0xcb1c6dff, 0xe7f5ded8, +0xb6ce0ec7, 0xdc1f094b, 0x0f93747c, 0x1768e49a, 0x1d7ebe93, 0x25d53887, +0x144a1fe2, 0x32bc280c, 0x1d0b7884, 0x106b8928, 0x5aa38780, 0x87ca0d93, +0x11a81f71, 0xe0957877, 0xb21fb0f7, 0x1e5a7ac0, 0x09db53a1, 0x5210dd59, +0xa0566364 + +ea = +14412 + +eb = +14412 + +cab = +4 + +c = +4 + +c_neg = +0 + +k_pos = +4800 + +k_neg = +4736 + +ncb_pos = +14496 + +ncb_neg = +14304 + +r = +2 + +rv_index = +0 + +code_block_mode = +0 + +op_flags = +RTE_BBDEV_TURBO_CRC_24B_ATTACH + +expected_status = +OK \ No newline at end of file diff --git a/app/test-bbdev/turbo_dec_default.data b/app/test-bbdev/turbo_dec_default.data new file mode 120000 index 00000000..371cbc69 --- /dev/null +++ b/app/test-bbdev/turbo_dec_default.data @@ -0,0 +1 @@ +test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data \ No newline at end of file diff --git a/app/test-bbdev/turbo_enc_default.data b/app/test-bbdev/turbo_enc_default.data new file mode 120000 index 00000000..5587f9cc --- /dev/null +++ b/app/test-bbdev/turbo_enc_default.data @@ -0,0 +1 @@ +test_vectors/turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data \ No newline at end of file diff --git a/app/test-crypto-perf/Makefile b/app/test-crypto-perf/Makefile index 3935aec4..78135f38 100644 --- a/app/test-crypto-perf/Makefile +++ b/app/test-crypto-perf/Makefile @@ -7,6 +7,8 @@ include $(RTE_SDK)/mk/rte.vars.mk APP = dpdk-test-crypto-perf CFLAGS += $(WERROR_FLAGS) +CFLAGS += -DALLOW_EXPERIMENTAL_API +CFLAGS += -O3 # all source are stored in SRCS-y SRCS-y := main.c diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c index 8f320099..44808f50 100644 --- a/app/test-crypto-perf/cperf_ops.c +++ b/app/test-crypto-perf/cperf_ops.c @@ -514,6 +514,7 @@ cperf_create_session(struct rte_mempool *sess_mp, auth_xform.next = NULL; auth_xform.auth.algo = options->auth_algo; auth_xform.auth.op = options->auth_op; + auth_xform.auth.iv.offset = iv_offset; /* auth different than null */ if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) { @@ -568,6 +569,8 @@ cperf_create_session(struct rte_mempool *sess_mp, auth_xform.next = NULL; auth_xform.auth.algo = options->auth_algo; auth_xform.auth.op = options->auth_op; + auth_xform.auth.iv.offset = iv_offset + + cipher_xform.cipher.iv.length; /* auth different than null */ if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) { diff --git a/app/test-crypto-perf/cperf_options.h b/app/test-crypto-perf/cperf_options.h index 54a3ad5c..f5bf03c8 100644 --- a/app/test-crypto-perf/cperf_options.h +++ b/app/test-crypto-perf/cperf_options.h @@ -1,3 +1,6 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ #ifndef _CPERF_OPTIONS_ #define _CPERF_OPTIONS_ @@ -73,6 +76,8 @@ struct cperf_options { uint32_t pool_sz; uint32_t total_ops; + uint32_t headroom_sz; + uint32_t tailroom_sz; uint32_t segment_sz; uint32_t test_buffer_size; uint32_t *imix_buffer_sizes; diff --git a/app/test-crypto-perf/cperf_test_common.c b/app/test-crypto-perf/cperf_test_common.c index 21cb1c22..e803dc10 100644 --- a/app/test-crypto-perf/cperf_test_common.c +++ b/app/test-crypto-perf/cperf_test_common.c @@ -3,6 +3,7 @@ */ #include +#include #include "cperf_test_common.h" @@ -10,12 +11,15 @@ struct obj_params { uint32_t src_buf_offset; uint32_t dst_buf_offset; uint16_t segment_sz; + uint16_t headroom_sz; + uint16_t data_len; uint16_t segments_nb; }; static void fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp, - void *obj, uint32_t mbuf_offset, uint16_t segment_sz) + void *obj, uint32_t mbuf_offset, uint16_t segment_sz, + uint16_t headroom, uint16_t data_len) { uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf); @@ -25,10 +29,10 @@ fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp, m->buf_iova = rte_mempool_virt2iova(obj) + mbuf_offset + mbuf_hdr_size; m->buf_len = segment_sz; - m->data_len = segment_sz; + m->data_len = data_len; - /* No headroom needed for the buffer */ - m->data_off = 0; + /* Use headroom specified for the buffer */ + m->data_off = headroom; /* init some constant fields */ m->pool = mp; @@ -41,7 +45,7 @@ fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp, static void fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp, void *obj, uint32_t mbuf_offset, uint16_t segment_sz, - uint16_t segments_nb) + uint16_t headroom, uint16_t data_len, uint16_t segments_nb) { uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf); uint16_t remaining_segments = segments_nb; @@ -56,10 +60,10 @@ fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp, m->buf_iova = next_seg_phys_addr; next_seg_phys_addr += mbuf_hdr_size + segment_sz; m->buf_len = segment_sz; - m->data_len = segment_sz; + m->data_len = data_len; - /* No headroom needed for the buffer */ - m->data_off = 0; + /* Use headroom specified for the buffer */ + m->data_off = headroom; /* init some constant fields */ m->pool = mp; @@ -91,17 +95,19 @@ mempool_obj_init(struct rte_mempool *mp, op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; op->sess_type = RTE_CRYPTO_OP_WITH_SESSION; - op->phys_addr = rte_mem_virt2phy(obj); + op->phys_addr = rte_mem_virt2iova(obj); op->mempool = mp; /* Set source buffer */ op->sym->m_src = m; if (params->segments_nb == 1) fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset, - params->segment_sz); + params->segment_sz, params->headroom_sz, + params->data_len); else fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset, - params->segment_sz, params->segments_nb); + params->segment_sz, params->headroom_sz, + params->data_len, params->segments_nb); /* Set destination buffer */ @@ -109,7 +115,8 @@ mempool_obj_init(struct rte_mempool *mp, m = (struct rte_mbuf *) ((uint8_t *) obj + params->dst_buf_offset); fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset, - params->segment_sz); + params->segment_sz, params->headroom_sz, + params->data_len); op->sym->m_dst = m; } else op->sym->m_dst = NULL; @@ -124,6 +131,7 @@ cperf_alloc_common_memory(const struct cperf_options *options, uint32_t *dst_buf_offset, struct rte_mempool **pool) { + const char *mp_ops_name; char pool_name[32] = ""; int ret; @@ -170,6 +178,11 @@ cperf_alloc_common_memory(const struct cperf_options *options, struct obj_params params = { .segment_sz = options->segment_sz, + .headroom_sz = options->headroom_sz, + /* Data len = segment size - (headroom + tailroom) */ + .data_len = options->segment_sz - + options->headroom_sz - + options->tailroom_sz, .segments_nb = segments_nb, .src_buf_offset = crypto_op_total_size_padded, .dst_buf_offset = 0 @@ -193,8 +206,10 @@ cperf_alloc_common_memory(const struct cperf_options *options, return -1; } + mp_ops_name = rte_mbuf_best_mempool_ops(); + ret = rte_mempool_set_ops_byname(*pool, - RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL); + mp_ops_name, NULL); if (ret != 0) { RTE_LOG(ERR, USER1, "Error setting mempool handler for device %u\n", diff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c index 8f761608..c8d16db6 100644 --- a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c +++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c @@ -145,7 +145,7 @@ pmd_cyclecount_bench_ops(struct pmd_cyclecount_state *state, uint32_t cur_op, for (cur_iter_op = 0; cur_iter_op < iter_ops_needed; cur_iter_op += test_burst_size) { - uint32_t burst_size = RTE_MIN(state->opts->total_ops - cur_op, + uint32_t burst_size = RTE_MIN(iter_ops_needed - cur_iter_op, test_burst_size); struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op]; diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c b/app/test-crypto-perf/cperf_test_vector_parsing.c index 26321d00..92932a23 100644 --- a/app/test-crypto-perf/cperf_test_vector_parsing.c +++ b/app/test-crypto-perf/cperf_test_vector_parsing.c @@ -506,8 +506,7 @@ parse_file(struct cperf_test_vector *vector, struct cperf_options *opts) if (entry == NULL) return -1; - memset(entry, 0, strlen(line) + 1); - strncpy(entry, line, strlen(line)); + strcpy(entry, line); /* check if entry ends with , or = */ if (entry[strlen(entry) - 1] == ',' @@ -524,8 +523,8 @@ parse_file(struct cperf_test_vector *vector, struct cperf_options *opts) if (entry_extended == NULL) goto err; entry = entry_extended; - - strncat(entry, line, strlen(line)); + /* entry has been allocated accordingly */ + strcpy(&entry[strlen(entry)], line); if (entry[strlen(entry) - 1] != ',') break; diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c index 019d8359..5c7dadb6 100644 --- a/app/test-crypto-perf/main.c +++ b/app/test-crypto-perf/main.c @@ -21,8 +21,6 @@ #include "cperf_test_verify.h" #include "cperf_test_pmd_cyclecount.h" -#define NUM_SESSIONS 2048 -#define SESS_MEMPOOL_CACHE_SIZE 64 const char *cperf_test_type_strs[] = { [CPERF_TEST_TYPE_THROUGHPUT] = "throughput", @@ -67,6 +65,7 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs, struct rte_mempool *session_pool_socket[]) { uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id; + uint32_t sessions_needed = 0; unsigned int i, j; int ret; @@ -80,18 +79,24 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs, nb_lcores = rte_lcore_count() - 1; - if (enabled_cdev_count > nb_lcores) { - printf("Number of capable crypto devices (%d) " - "has to be less or equal to number of slave " - "cores (%d)\n", enabled_cdev_count, nb_lcores); + if (nb_lcores < 1) { + RTE_LOG(ERR, USER1, + "Number of enabled cores need to be higher than 1\n"); return -EINVAL; } + /* + * Use less number of devices, + * if there are more available than cores. + */ + if (enabled_cdev_count > nb_lcores) + enabled_cdev_count = nb_lcores; + /* Create a mempool shared by all the devices */ uint32_t max_sess_size = 0, sess_size; for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) { - sess_size = rte_cryptodev_get_private_session_size(cdev_id); + sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id); if (sess_size > max_sess_size) max_sess_size = sess_size; } @@ -143,17 +148,62 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs, .nb_descriptors = opts->nb_descriptors }; + /** + * Device info specifies the min headroom and tailroom + * requirement for the crypto PMD. This need to be honoured + * by the application, while creating mbuf. + */ + if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) { + /* Update headroom */ + opts->headroom_sz = cdev_info.min_mbuf_headroom_req; + } + if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) { + /* Update tailroom */ + opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req; + } + + /* Update segment size to include headroom & tailroom */ + opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz); + + uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions; + /* + * Two sessions objects are required for each session + * (one for the header, one for the private data) + */ + if (!strcmp((const char *)opts->device_type, + "crypto_scheduler")) { +#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER + uint32_t nb_slaves = + rte_cryptodev_scheduler_slaves_get(cdev_id, + NULL); + + sessions_needed = 2 * enabled_cdev_count * + opts->nb_qps * nb_slaves; +#endif + } else + sessions_needed = 2 * enabled_cdev_count * + opts->nb_qps; + + /* + * A single session is required per queue pair + * in each device + */ + if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) { + RTE_LOG(ERR, USER1, + "Device does not support at least " + "%u sessions\n", opts->nb_qps); + return -ENOTSUP; + } if (session_pool_socket[socket_id] == NULL) { char mp_name[RTE_MEMPOOL_NAMESIZE]; struct rte_mempool *sess_mp; snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "sess_mp_%u", socket_id); - sess_mp = rte_mempool_create(mp_name, - NUM_SESSIONS, + sessions_needed, max_sess_size, - SESS_MEMPOOL_CACHE_SIZE, + 0, 0, NULL, NULL, NULL, NULL, socket_id, 0); diff --git a/app/test-crypto-perf/meson.build b/app/test-crypto-perf/meson.build new file mode 100644 index 00000000..eacd7a0f --- /dev/null +++ b/app/test-crypto-perf/meson.build @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +allow_experimental_apis = true +sources = files('cperf_ops.c', + 'cperf_options_parsing.c', + 'cperf_test_common.c', + 'cperf_test_latency.c', + 'cperf_test_pmd_cyclecount.c', + 'cperf_test_throughput.c', + 'cperf_test_vector_parsing.c', + 'cperf_test_vectors.c', + 'cperf_test_verify.c', + 'main.c') +deps = ['cryptodev'] diff --git a/app/test-eventdev/evt_main.c b/app/test-eventdev/evt_main.c index 57bb9457..a8d304ba 100644 --- a/app/test-eventdev/evt_main.c +++ b/app/test-eventdev/evt_main.c @@ -20,29 +20,41 @@ struct evt_test *test; static void signal_handler(int signum) { - if (signum == SIGINT || signum == SIGTERM) { + int i; + static uint8_t once; + + if ((signum == SIGINT || signum == SIGTERM) && !once) { + once = true; printf("\nSignal %d received, preparing to exit...\n", signum); - /* request all lcores to exit from the main loop */ - *(int *)test->test_priv = true; - rte_wmb(); - rte_eal_mp_wait_lcore(); + if (test != NULL) { + /* request all lcores to exit from the main loop */ + *(int *)test->test_priv = true; + rte_wmb(); + + if (test->ops.ethdev_destroy) + test->ops.ethdev_destroy(test, &opt); - if (test->ops.test_result) - test->ops.test_result(test, &opt); + rte_eal_mp_wait_lcore(); - if (test->ops.eventdev_destroy) - test->ops.eventdev_destroy(test, &opt); + if (test->ops.test_result) + test->ops.test_result(test, &opt); - if (test->ops.ethdev_destroy) - test->ops.ethdev_destroy(test, &opt); + if (opt.prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { + RTE_ETH_FOREACH_DEV(i) + rte_eth_dev_close(i); + } - if (test->ops.mempool_destroy) - test->ops.mempool_destroy(test, &opt); + if (test->ops.eventdev_destroy) + test->ops.eventdev_destroy(test, &opt); - if (test->ops.test_destroy) - test->ops.test_destroy(test, &opt); + if (test->ops.mempool_destroy) + test->ops.mempool_destroy(test, &opt); + + if (test->ops.test_destroy) + test->ops.test_destroy(test, &opt); + } /* exit with the expected status */ signal(signum, SIG_DFL); diff --git a/app/test-eventdev/evt_options.c b/app/test-eventdev/evt_options.c index 9683b222..cfa43a16 100644 --- a/app/test-eventdev/evt_options.c +++ b/app/test-eventdev/evt_options.c @@ -27,6 +27,11 @@ evt_options_default(struct evt_options *opt) opt->pool_sz = 16 * 1024; opt->wkr_deq_dep = 16; opt->nb_pkts = (1ULL << 26); /* do ~64M packets */ + opt->nb_timers = 1E8; + opt->nb_timer_adptrs = 1; + opt->timer_tick_nsec = 1E3; /* 1000ns ~ 1us */ + opt->max_tmo_nsec = 1E5; /* 100000ns ~100us */ + opt->expiry_nsec = 1E4; /* 10000ns ~10us */ opt->prod_type = EVT_PROD_TYPE_SYNT; } @@ -86,6 +91,22 @@ evt_parse_eth_prod_type(struct evt_options *opt, const char *arg __rte_unused) return 0; } +static int +evt_parse_timer_prod_type(struct evt_options *opt, const char *arg __rte_unused) +{ + opt->prod_type = EVT_PROD_TYPE_EVENT_TIMER_ADPTR; + return 0; +} + +static int +evt_parse_timer_prod_type_burst(struct evt_options *opt, + const char *arg __rte_unused) +{ + opt->prod_type = EVT_PROD_TYPE_EVENT_TIMER_ADPTR; + opt->timdev_use_burst = 1; + return 0; +} + static int evt_parse_test_name(struct evt_options *opt, const char *arg) { @@ -119,6 +140,56 @@ evt_parse_nb_pkts(struct evt_options *opt, const char *arg) return ret; } +static int +evt_parse_nb_timers(struct evt_options *opt, const char *arg) +{ + int ret; + + ret = parser_read_uint64(&(opt->nb_timers), arg); + + return ret; +} + +static int +evt_parse_timer_tick_nsec(struct evt_options *opt, const char *arg) +{ + int ret; + + ret = parser_read_uint64(&(opt->timer_tick_nsec), arg); + + return ret; +} + +static int +evt_parse_max_tmo_nsec(struct evt_options *opt, const char *arg) +{ + int ret; + + ret = parser_read_uint64(&(opt->max_tmo_nsec), arg); + + return ret; +} + +static int +evt_parse_expiry_nsec(struct evt_options *opt, const char *arg) +{ + int ret; + + ret = parser_read_uint64(&(opt->expiry_nsec), arg); + + return ret; +} + +static int +evt_parse_nb_timer_adptrs(struct evt_options *opt, const char *arg) +{ + int ret; + + ret = parser_read_uint8(&(opt->nb_timer_adptrs), arg); + + return ret; +} + static int evt_parse_pool_sz(struct evt_options *opt, const char *arg) { @@ -169,7 +240,17 @@ usage(char *program) "\t--worker_deq_depth : dequeue depth of the worker\n" "\t--fwd_latency : perform fwd_latency measurement\n" "\t--queue_priority : enable queue priority\n" - "\t--prod_type_ethdev : use ethernet device as producer\n." + "\t--prod_type_ethdev : use ethernet device as producer.\n" + "\t--prod_type_timerdev : use event timer device as producer.\n" + "\t expity_nsec would be the timeout\n" + "\t in ns.\n" + "\t--prod_type_timerdev_burst : use timer device as producer\n" + "\t burst mode.\n" + "\t--nb_timers : number of timers to arm.\n" + "\t--nb_timer_adptrs : number of timer adapters to use.\n" + "\t--timer_tick_nsec : timer tick interval in ns.\n" + "\t--max_tmo_nsec : max timeout interval in ns.\n" + "\t--expiry_nsec : event timer expiry ns.\n" ); printf("available tests:\n"); evt_test_dump_names(); @@ -217,22 +298,29 @@ evt_parse_sched_type_list(struct evt_options *opt, const char *arg) } static struct option lgopts[] = { - { EVT_NB_FLOWS, 1, 0, 0 }, - { EVT_DEVICE, 1, 0, 0 }, - { EVT_VERBOSE, 1, 0, 0 }, - { EVT_TEST, 1, 0, 0 }, - { EVT_PROD_LCORES, 1, 0, 0 }, - { EVT_WORK_LCORES, 1, 0, 0 }, - { EVT_SOCKET_ID, 1, 0, 0 }, - { EVT_POOL_SZ, 1, 0, 0 }, - { EVT_NB_PKTS, 1, 0, 0 }, - { EVT_WKR_DEQ_DEP, 1, 0, 0 }, - { EVT_SCHED_TYPE_LIST, 1, 0, 0 }, - { EVT_FWD_LATENCY, 0, 0, 0 }, - { EVT_QUEUE_PRIORITY, 0, 0, 0 }, - { EVT_PROD_ETHDEV, 0, 0, 0 }, - { EVT_HELP, 0, 0, 0 }, - { NULL, 0, 0, 0 } + { EVT_NB_FLOWS, 1, 0, 0 }, + { EVT_DEVICE, 1, 0, 0 }, + { EVT_VERBOSE, 1, 0, 0 }, + { EVT_TEST, 1, 0, 0 }, + { EVT_PROD_LCORES, 1, 0, 0 }, + { EVT_WORK_LCORES, 1, 0, 0 }, + { EVT_SOCKET_ID, 1, 0, 0 }, + { EVT_POOL_SZ, 1, 0, 0 }, + { EVT_NB_PKTS, 1, 0, 0 }, + { EVT_WKR_DEQ_DEP, 1, 0, 0 }, + { EVT_SCHED_TYPE_LIST, 1, 0, 0 }, + { EVT_FWD_LATENCY, 0, 0, 0 }, + { EVT_QUEUE_PRIORITY, 0, 0, 0 }, + { EVT_PROD_ETHDEV, 0, 0, 0 }, + { EVT_PROD_TIMERDEV, 0, 0, 0 }, + { EVT_PROD_TIMERDEV_BURST, 0, 0, 0 }, + { EVT_NB_TIMERS, 1, 0, 0 }, + { EVT_NB_TIMER_ADPTRS, 1, 0, 0 }, + { EVT_TIMER_TICK_NSEC, 1, 0, 0 }, + { EVT_MAX_TMO_NSEC, 1, 0, 0 }, + { EVT_EXPIRY_NSEC, 1, 0, 0 }, + { EVT_HELP, 0, 0, 0 }, + { NULL, 0, 0, 0 } }; static int @@ -255,11 +343,18 @@ evt_opts_parse_long(int opt_idx, struct evt_options *opt) { EVT_FWD_LATENCY, evt_parse_fwd_latency}, { EVT_QUEUE_PRIORITY, evt_parse_queue_priority}, { EVT_PROD_ETHDEV, evt_parse_eth_prod_type}, + { EVT_PROD_TIMERDEV, evt_parse_timer_prod_type}, + { EVT_PROD_TIMERDEV_BURST, evt_parse_timer_prod_type_burst}, + { EVT_NB_TIMERS, evt_parse_nb_timers}, + { EVT_NB_TIMER_ADPTRS, evt_parse_nb_timer_adptrs}, + { EVT_TIMER_TICK_NSEC, evt_parse_timer_tick_nsec}, + { EVT_MAX_TMO_NSEC, evt_parse_max_tmo_nsec}, + { EVT_EXPIRY_NSEC, evt_parse_expiry_nsec}, }; for (i = 0; i < RTE_DIM(parsermap); i++) { if (strncmp(lgopts[opt_idx].name, parsermap[i].lgopt_name, - strlen(parsermap[i].lgopt_name)) == 0) + strlen(lgopts[opt_idx].name)) == 0) return parsermap[i].parser_fn(opt, optarg); } @@ -305,6 +400,7 @@ evt_options_dump(struct evt_options *opt) evt_dump("pool_sz", "%d", opt->pool_sz); evt_dump("master lcore", "%d", rte_get_master_lcore()); evt_dump("nb_pkts", "%"PRIu64, opt->nb_pkts); + evt_dump("nb_timers", "%"PRIu64, opt->nb_timers); evt_dump_begin("available lcores"); RTE_LCORE_FOREACH(lcore_id) printf("%d ", lcore_id); diff --git a/app/test-eventdev/evt_options.h b/app/test-eventdev/evt_options.h index 46d12222..f3de48a1 100644 --- a/app/test-eventdev/evt_options.h +++ b/app/test-eventdev/evt_options.h @@ -9,6 +9,7 @@ #include #include +#include #include #include @@ -31,12 +32,20 @@ #define EVT_FWD_LATENCY ("fwd_latency") #define EVT_QUEUE_PRIORITY ("queue_priority") #define EVT_PROD_ETHDEV ("prod_type_ethdev") +#define EVT_PROD_TIMERDEV ("prod_type_timerdev") +#define EVT_PROD_TIMERDEV_BURST ("prod_type_timerdev_burst") +#define EVT_NB_TIMERS ("nb_timers") +#define EVT_NB_TIMER_ADPTRS ("nb_timer_adptrs") +#define EVT_TIMER_TICK_NSEC ("timer_tick_nsec") +#define EVT_MAX_TMO_NSEC ("max_tmo_nsec") +#define EVT_EXPIRY_NSEC ("expiry_nsec") #define EVT_HELP ("help") enum evt_prod_type { EVT_PROD_TYPE_NONE, EVT_PROD_TYPE_SYNT, /* Producer type Synthetic i.e. CPU. */ EVT_PROD_TYPE_ETH_RX_ADPTR, /* Producer type Eth Rx Adapter. */ + EVT_PROD_TYPE_EVENT_TIMER_ADPTR, /* Producer type Timer Adapter. */ EVT_PROD_TYPE_MAX, }; @@ -52,11 +61,19 @@ struct evt_options { int nb_stages; int verbose_level; uint64_t nb_pkts; + uint8_t nb_timer_adptrs; + uint64_t nb_timers; + uint64_t timer_tick_nsec; + uint64_t optm_timer_tick_nsec; + uint64_t max_tmo_nsec; + uint64_t expiry_nsec; uint16_t wkr_deq_dep; uint8_t dev_id; uint32_t fwd_latency:1; uint32_t q_priority:1; enum evt_prod_type prod_type; + uint8_t timdev_use_burst; + uint8_t timdev_cnt; }; void evt_options_default(struct evt_options *opt); @@ -262,6 +279,24 @@ evt_dump_producer_type(struct evt_options *opt) case EVT_PROD_TYPE_ETH_RX_ADPTR: snprintf(name, EVT_PROD_MAX_NAME_LEN, "Ethdev Rx Adapter producers"); + evt_dump("nb_ethdev", "%d", rte_eth_dev_count_avail()); + break; + case EVT_PROD_TYPE_EVENT_TIMER_ADPTR: + if (opt->timdev_use_burst) + snprintf(name, EVT_PROD_MAX_NAME_LEN, + "Event timer adapter burst mode producer"); + else + snprintf(name, EVT_PROD_MAX_NAME_LEN, + "Event timer adapter producer"); + evt_dump("nb_timer_adapters", "%d", opt->nb_timer_adptrs); + evt_dump("max_tmo_nsec", "%"PRIu64"", opt->max_tmo_nsec); + evt_dump("expiry_nsec", "%"PRIu64"", opt->expiry_nsec); + if (opt->optm_timer_tick_nsec) + evt_dump("optm_timer_tick_nsec", "%"PRIu64"", + opt->optm_timer_tick_nsec); + else + evt_dump("timer_tick_nsec", "%"PRIu64"", + opt->timer_tick_nsec); break; } evt_dump("prod_type", "%s", name); diff --git a/app/test-eventdev/evt_test.h b/app/test-eventdev/evt_test.h index 7477a325..f07d2c33 100644 --- a/app/test-eventdev/evt_test.h +++ b/app/test-eventdev/evt_test.h @@ -77,8 +77,7 @@ void evt_test_dump_names(void); #define EVT_TEST_REGISTER(nm) \ static struct evt_test_entry _evt_test_entry_ ##nm; \ -RTE_INIT(evt_test_ ##nm); \ -static void evt_test_ ##nm(void) \ +RTE_INIT(evt_test_ ##nm) \ { \ _evt_test_entry_ ##nm.test.name = RTE_STR(nm);\ memcpy(&_evt_test_entry_ ##nm.test.ops, &nm, \ diff --git a/app/test-eventdev/meson.build b/app/test-eventdev/meson.build index 7c373c87..a81dcd13 100644 --- a/app/test-eventdev/meson.build +++ b/app/test-eventdev/meson.build @@ -1,6 +1,7 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Cavium, Inc +allow_experimental_apis = true sources = files('evt_main.c', 'evt_options.c', 'evt_test.c', @@ -11,19 +12,4 @@ sources = files('evt_main.c', 'test_perf_common.c', 'test_perf_atq.c', 'test_perf_queue.c') - -dep_objs = [get_variable(get_option('default_library') + '_rte_eventdev')] -dep_objs += cc.find_library('execinfo', required: false) # BSD only - -link_libs = [] -if get_option('default_library') == 'static' - link_libs = dpdk_drivers -endif - -executable('dpdk-test-eventdev', - sources, - c_args: [machine_args, '-DALLOW_EXPERIMENTAL_API'], - link_whole: link_libs, - dependencies: dep_objs, - install_rpath: join_paths(get_option('prefix'), driver_install_path), - install: true) +deps += 'eventdev' diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c index c57fbbfa..35debcfd 100644 --- a/app/test-eventdev/test_order_atq.c +++ b/app/test-eventdev/test_order_atq.c @@ -151,10 +151,14 @@ order_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt) if (ret) return ret; - ret = evt_service_setup(opt->dev_id); - if (ret) { - evt_err("No service lcore found to run event dev."); - return ret; + if (!evt_has_distributed_sched(opt->dev_id)) { + uint32_t service_id; + rte_event_dev_service_id_get(opt->dev_id, &service_id); + ret = evt_service_setup(service_id); + if (ret) { + evt_err("No service lcore found to run event dev."); + return ret; + } } ret = rte_event_dev_start(opt->dev_id); diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c index f603a023..17f7b984 100644 --- a/app/test-eventdev/test_order_queue.c +++ b/app/test-eventdev/test_order_queue.c @@ -164,10 +164,14 @@ order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) if (ret) return ret; - ret = evt_service_setup(opt->dev_id); - if (ret) { - evt_err("No service lcore found to run event dev."); - return ret; + if (!evt_has_distributed_sched(opt->dev_id)) { + uint32_t service_id; + rte_event_dev_service_id_get(opt->dev_id, &service_id); + ret = evt_service_setup(service_id); + if (ret) { + evt_err("No service lcore found to run event dev."); + return ret; + } } ret = rte_event_dev_start(opt->dev_id); diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c index b36b22a7..9715a2ce 100644 --- a/app/test-eventdev/test_perf_atq.c +++ b/app/test-eventdev/test_perf_atq.c @@ -11,7 +11,7 @@ atq_nb_event_queues(struct evt_options *opt) { /* nb_queues = number of producers */ return opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? - rte_eth_dev_count() : evt_nr_active_lcores(opt->plcores); + rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores); } static inline __attribute__((always_inline)) void @@ -43,15 +43,12 @@ perf_atq_worker(void *arg, const int enable_fwd_latency) while (t->done == false) { uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0); - if (enable_fwd_latency) - rte_prefetch0(ev.event_ptr); - if (!event) { rte_pause(); continue; } - if (enable_fwd_latency) + if (enable_fwd_latency && !prod_timer_type) /* first stage in pipeline, mark ts to compute fwd latency */ atq_mark_fwd_latency(&ev); @@ -90,7 +87,7 @@ perf_atq_worker_burst(void *arg, const int enable_fwd_latency) } for (i = 0; i < nb_rx; i++) { - if (enable_fwd_latency) { + if (enable_fwd_latency && !prod_timer_type) { rte_prefetch0(ev[i+1].event_ptr); /* first stage in pipeline. * mark time stamp to compute fwd latency @@ -163,7 +160,8 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt) struct rte_event_dev_info dev_info; nb_ports = evt_nr_active_lcores(opt->wlcores); - nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 : + nb_ports += (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR || + opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) ? 0 : evt_nr_active_lcores(opt->plcores); nb_queues = atq_nb_event_queues(opt); diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c index 59fa0a49..d33cb2cd 100644 --- a/app/test-eventdev/test_perf_common.c +++ b/app/test-eventdev/test_perf_common.c @@ -72,6 +72,128 @@ perf_producer(void *arg) return 0; } +static inline int +perf_event_timer_producer(void *arg) +{ + struct prod_data *p = arg; + struct test_perf *t = p->t; + struct evt_options *opt = t->opt; + uint32_t flow_counter = 0; + uint64_t count = 0; + uint64_t arm_latency = 0; + const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs; + const uint32_t nb_flows = t->nb_flows; + const uint64_t nb_timers = opt->nb_timers; + struct rte_mempool *pool = t->pool; + struct perf_elt *m; + struct rte_event_timer_adapter **adptr = t->timer_adptr; + struct rte_event_timer tim; + uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec; + + memset(&tim, 0, sizeof(struct rte_event_timer)); + timeout_ticks = opt->optm_timer_tick_nsec ? + (timeout_ticks * opt->timer_tick_nsec) + / opt->optm_timer_tick_nsec : timeout_ticks; + timeout_ticks += timeout_ticks ? 0 : 1; + tim.ev.event_type = RTE_EVENT_TYPE_TIMER; + tim.ev.op = RTE_EVENT_OP_NEW; + tim.ev.sched_type = t->opt->sched_type_list[0]; + tim.ev.queue_id = p->queue_id; + tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; + tim.state = RTE_EVENT_TIMER_NOT_ARMED; + tim.timeout_ticks = timeout_ticks; + + if (opt->verbose_level > 1) + printf("%s(): lcore %d\n", __func__, rte_lcore_id()); + + while (count < nb_timers && t->done == false) { + if (rte_mempool_get(pool, (void **)&m) < 0) + continue; + + m->tim = tim; + m->tim.ev.flow_id = flow_counter++ % nb_flows; + m->tim.ev.event_ptr = m; + m->timestamp = rte_get_timer_cycles(); + while (rte_event_timer_arm_burst( + adptr[flow_counter % nb_timer_adptrs], + (struct rte_event_timer **)&m, 1) != 1) { + if (t->done) + break; + rte_pause(); + m->timestamp = rte_get_timer_cycles(); + } + arm_latency += rte_get_timer_cycles() - m->timestamp; + count++; + } + fflush(stdout); + rte_delay_ms(1000); + printf("%s(): lcore %d Average event timer arm latency = %.3f us\n", + __func__, rte_lcore_id(), (float)(arm_latency / count) / + (rte_get_timer_hz() / 1000000)); + return 0; +} + +static inline int +perf_event_timer_producer_burst(void *arg) +{ + int i; + struct prod_data *p = arg; + struct test_perf *t = p->t; + struct evt_options *opt = t->opt; + uint32_t flow_counter = 0; + uint64_t count = 0; + uint64_t arm_latency = 0; + const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs; + const uint32_t nb_flows = t->nb_flows; + const uint64_t nb_timers = opt->nb_timers; + struct rte_mempool *pool = t->pool; + struct perf_elt *m[BURST_SIZE + 1] = {NULL}; + struct rte_event_timer_adapter **adptr = t->timer_adptr; + struct rte_event_timer tim; + uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec; + + memset(&tim, 0, sizeof(struct rte_event_timer)); + timeout_ticks = opt->optm_timer_tick_nsec ? + (timeout_ticks * opt->timer_tick_nsec) + / opt->optm_timer_tick_nsec : timeout_ticks; + timeout_ticks += timeout_ticks ? 0 : 1; + tim.ev.event_type = RTE_EVENT_TYPE_TIMER; + tim.ev.op = RTE_EVENT_OP_NEW; + tim.ev.sched_type = t->opt->sched_type_list[0]; + tim.ev.queue_id = p->queue_id; + tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; + tim.state = RTE_EVENT_TIMER_NOT_ARMED; + tim.timeout_ticks = timeout_ticks; + + if (opt->verbose_level > 1) + printf("%s(): lcore %d\n", __func__, rte_lcore_id()); + + while (count < nb_timers && t->done == false) { + if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0) + continue; + for (i = 0; i < BURST_SIZE; i++) { + rte_prefetch0(m[i + 1]); + m[i]->tim = tim; + m[i]->tim.ev.flow_id = flow_counter++ % nb_flows; + m[i]->tim.ev.event_ptr = m[i]; + m[i]->timestamp = rte_get_timer_cycles(); + } + rte_event_timer_arm_tmo_tick_burst( + adptr[flow_counter % nb_timer_adptrs], + (struct rte_event_timer **)m, + tim.timeout_ticks, + BURST_SIZE); + arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp; + count += BURST_SIZE; + } + fflush(stdout); + rte_delay_ms(1000); + printf("%s(): lcore %d Average event timer arm latency = %.3f us\n", + __func__, rte_lcore_id(), (float)(arm_latency / count) / + (rte_get_timer_hz() / 1000000)); + return 0; +} + static int perf_producer_wrapper(void *arg) { @@ -80,6 +202,12 @@ perf_producer_wrapper(void *arg) /* Launch the producer function only in case of synthetic producer. */ if (t->opt->prod_type == EVT_PROD_TYPE_SYNT) return perf_producer(arg); + else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR && + !t->opt->timdev_use_burst) + return perf_event_timer_producer(arg); + else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR && + t->opt->timdev_use_burst) + return perf_event_timer_producer_burst(arg); return 0; } @@ -146,8 +274,7 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt, port_idx++; } - const uint64_t total_pkts = opt->nb_pkts * - evt_nr_active_lcores(opt->plcores); + const uint64_t total_pkts = t->outstand_pkts; uint64_t dead_lock_cycles = rte_get_timer_cycles(); int64_t dead_lock_remaining = total_pkts; @@ -189,7 +316,9 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt, if (remaining <= 0) { t->result = EVT_TEST_SUCCESS; - if (opt->prod_type == EVT_PROD_TYPE_SYNT) { + if (opt->prod_type == EVT_PROD_TYPE_SYNT || + opt->prod_type == + EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { t->done = true; rte_smp_wmb(); break; @@ -226,7 +355,7 @@ perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, memset(&queue_conf, 0, sizeof(struct rte_event_eth_rx_adapter_queue_conf)); queue_conf.ev.sched_type = opt->sched_type_list[0]; - for (prod = 0; prod < rte_eth_dev_count(); prod++) { + RTE_ETH_FOREACH_DEV(prod) { uint32_t cap; ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, @@ -283,6 +412,65 @@ perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, return ret; } +static int +perf_event_timer_adapter_setup(struct test_perf *t) +{ + int i; + int ret; + struct rte_event_timer_adapter_info adapter_info; + struct rte_event_timer_adapter *wl; + uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores); + uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; + + if (nb_producers == 1) + flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT; + + for (i = 0; i < t->opt->nb_timer_adptrs; i++) { + struct rte_event_timer_adapter_conf config = { + .event_dev_id = t->opt->dev_id, + .timer_adapter_id = i, + .timer_tick_ns = t->opt->timer_tick_nsec, + .max_tmo_ns = t->opt->max_tmo_nsec, + .nb_timers = 2 * 1024 * 1024, + .flags = flags, + }; + + wl = rte_event_timer_adapter_create(&config); + if (wl == NULL) { + evt_err("failed to create event timer ring %d", i); + return rte_errno; + } + + memset(&adapter_info, 0, + sizeof(struct rte_event_timer_adapter_info)); + rte_event_timer_adapter_get_info(wl, &adapter_info); + t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns; + + if (!(adapter_info.caps & + RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) { + uint32_t service_id; + + rte_event_timer_adapter_service_id_get(wl, + &service_id); + ret = evt_service_setup(service_id); + if (ret) { + evt_err("Failed to setup service core" + " for timer adapter\n"); + return ret; + } + rte_service_runstate_set(service_id, 1); + } + + ret = rte_event_timer_adapter_start(wl); + if (ret) { + evt_err("failed to Start event timer adapter %d", i); + return ret; + } + t->timer_adptr[i] = wl; + } + return 0; +} + int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, uint8_t stride, uint8_t nb_queues, @@ -326,6 +514,18 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, ret = perf_event_rx_adapter_setup(opt, stride, *port_conf); if (ret) return ret; + } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { + prod = 0; + for ( ; port < perf_nb_event_ports(opt); port++) { + struct prod_data *p = &t->prod[port]; + p->queue_id = prod * stride; + p->t = t; + prod++; + } + + ret = perf_event_timer_adapter_setup(t); + if (ret) + return ret; } else { prod = 0; for ( ; port < perf_nb_event_ports(opt); port++) { @@ -415,10 +615,13 @@ perf_opt_check(struct evt_options *opt, uint64_t nb_queues) } /* Fixups */ - if (opt->nb_stages == 1 && opt->fwd_latency) { + if ((opt->nb_stages == 1 && + opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) && + opt->fwd_latency) { evt_info("fwd_latency is valid when nb_stages > 1, disabling"); opt->fwd_latency = 0; } + if (opt->fwd_latency && !opt->q_priority) { evt_info("enabled queue priority for latency measurement"); opt->q_priority = 1; @@ -447,8 +650,13 @@ perf_opt_dump(struct evt_options *opt, uint8_t nb_queues) void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt) { - RTE_SET_USED(test); + int i; + struct test_perf *t = evt_test_priv(test); + if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { + for (i = 0; i < opt->nb_timer_adptrs; i++) + rte_event_timer_adapter_stop(t->timer_adptr[i]); + } rte_event_dev_stop(opt->dev_id); rte_event_dev_close(opt->dev_id); } @@ -465,20 +673,14 @@ perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused, int perf_ethdev_setup(struct evt_test *test, struct evt_options *opt) { - int i; + uint16_t i; struct test_perf *t = evt_test_priv(test); struct rte_eth_conf port_conf = { .rxmode = { .mq_mode = ETH_MQ_RX_RSS, .max_rx_pkt_len = ETHER_MAX_LEN, .split_hdr_size = 0, - .header_split = 0, - .hw_ip_checksum = 0, - .hw_vlan_filter = 0, - .hw_vlan_strip = 0, - .hw_vlan_extend = 0, - .jumbo_frame = 0, - .hw_strip_crc = 1, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, }, .rx_adv_conf = { .rss_conf = { @@ -488,19 +690,33 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt) }, }; - if (opt->prod_type == EVT_PROD_TYPE_SYNT) + if (opt->prod_type == EVT_PROD_TYPE_SYNT || + opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) return 0; - if (!rte_eth_dev_count()) { + if (!rte_eth_dev_count_avail()) { evt_err("No ethernet ports found."); return -ENODEV; } - for (i = 0; i < rte_eth_dev_count(); i++) { + RTE_ETH_FOREACH_DEV(i) { + struct rte_eth_dev_info dev_info; + struct rte_eth_conf local_port_conf = port_conf; + + rte_eth_dev_info_get(i, &dev_info); + + local_port_conf.rx_adv_conf.rss_conf.rss_hf &= + dev_info.flow_type_rss_offloads; + if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != + port_conf.rx_adv_conf.rss_conf.rss_hf) { + evt_info("Port %u modified RSS hash function based on hardware support," + "requested:%#"PRIx64" configured:%#"PRIx64"\n", + i, + port_conf.rx_adv_conf.rss_conf.rss_hf, + local_port_conf.rx_adv_conf.rss_conf.rss_hf); + } - if (rte_eth_dev_configure(i, 1, 1, - &port_conf) - < 0) { + if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) { evt_err("Failed to configure eth port [%d]", i); return -EINVAL; } @@ -527,14 +743,13 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt) void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt) { - int i; + uint16_t i; RTE_SET_USED(test); if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { - for (i = 0; i < rte_eth_dev_count(); i++) { + RTE_ETH_FOREACH_DEV(i) { rte_event_eth_rx_adapter_stop(i); rte_eth_dev_stop(i); - rte_eth_dev_close(i); } } } @@ -544,7 +759,8 @@ perf_mempool_setup(struct evt_test *test, struct evt_options *opt) { struct test_perf *t = evt_test_priv(test); - if (opt->prod_type == EVT_PROD_TYPE_SYNT) { + if (opt->prod_type == EVT_PROD_TYPE_SYNT || + opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { t->pool = rte_mempool_create(test->name, /* mempool name */ opt->pool_sz, /* number of elements*/ sizeof(struct perf_elt), /* element size*/ @@ -594,10 +810,18 @@ perf_test_setup(struct evt_test *test, struct evt_options *opt) struct test_perf *t = evt_test_priv(test); - t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->plcores); + if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { + t->outstand_pkts = opt->nb_timers * + evt_nr_active_lcores(opt->plcores); + t->nb_pkts = opt->nb_timers; + } else { + t->outstand_pkts = opt->nb_pkts * + evt_nr_active_lcores(opt->plcores); + t->nb_pkts = opt->nb_pkts; + } + t->nb_workers = evt_nr_active_lcores(opt->wlcores); t->done = false; - t->nb_pkts = opt->nb_pkts; t->nb_flows = opt->nb_flows; t->result = EVT_TEST_FAILED; t->opt = opt; diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h index 9ad99733..d8fbee6d 100644 --- a/app/test-eventdev/test_perf_common.h +++ b/app/test-eventdev/test_perf_common.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -39,6 +40,7 @@ struct prod_data { struct test_perf *t; } __rte_cache_aligned; + struct test_perf { /* Don't change the offset of "done". Signal handler use this memory * to terminate all lcores work. @@ -54,10 +56,18 @@ struct test_perf { struct worker_data worker[EVT_MAX_PORTS]; struct evt_options *opt; uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned; + struct rte_event_timer_adapter *timer_adptr[ + RTE_EVENT_TIMER_ADAPTER_NUM_MAX] __rte_cache_aligned; } __rte_cache_aligned; struct perf_elt { - uint64_t timestamp; + union { + struct rte_event_timer tim; + struct { + char pad[offsetof(struct rte_event_timer, user_meta)]; + uint64_t timestamp; + }; + }; } __rte_cache_aligned; #define BURST_SIZE 16 @@ -68,6 +78,8 @@ struct perf_elt { struct evt_options *opt = t->opt;\ const uint8_t dev = w->dev_id;\ const uint8_t port = w->port_id;\ + const uint8_t prod_timer_type = \ + opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR;\ uint8_t *const sched_type_list = &t->sched_type_list[0];\ struct rte_mempool *const pool = t->pool;\ const uint8_t nb_stages = t->opt->nb_stages;\ diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c index db8f2f3e..04ce9419 100644 --- a/app/test-eventdev/test_perf_queue.c +++ b/app/test-eventdev/test_perf_queue.c @@ -11,7 +11,7 @@ perf_queue_nb_event_queues(struct evt_options *opt) { /* nb_queues = number of producers * number of stages */ uint8_t nb_prod = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? - rte_eth_dev_count() : evt_nr_active_lcores(opt->plcores); + rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores); return nb_prod * opt->nb_stages; } @@ -49,7 +49,7 @@ perf_queue_worker(void *arg, const int enable_fwd_latency) rte_pause(); continue; } - if (enable_fwd_latency) + if (enable_fwd_latency && !prod_timer_type) /* first q in pipeline, mark timestamp to compute fwd latency */ mark_fwd_latency(&ev, nb_stages); @@ -88,7 +88,7 @@ perf_queue_worker_burst(void *arg, const int enable_fwd_latency) } for (i = 0; i < nb_rx; i++) { - if (enable_fwd_latency) { + if (enable_fwd_latency && !prod_timer_type) { rte_prefetch0(ev[i+1].event_ptr); /* first queue in pipeline. * mark time stamp to compute fwd latency @@ -161,7 +161,8 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) struct rte_event_dev_info dev_info; nb_ports = evt_nr_active_lcores(opt->wlcores); - nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 : + nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR || + opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ? 0 : evt_nr_active_lcores(opt->plcores); nb_queues = perf_queue_nb_event_queues(opt); diff --git a/app/test-eventdev/test_pipeline_atq.c b/app/test-eventdev/test_pipeline_atq.c index dd718977..26dc79f9 100644 --- a/app/test-eventdev/test_pipeline_atq.c +++ b/app/test-eventdev/test_pipeline_atq.c @@ -12,7 +12,7 @@ pipeline_atq_nb_event_queues(struct evt_options *opt) { RTE_SET_USED(opt); - return rte_eth_dev_count(); + return rte_eth_dev_count_avail(); } static int @@ -324,7 +324,7 @@ pipeline_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt) uint8_t nb_worker_queues = 0; nb_ports = evt_nr_active_lcores(opt->wlcores); - nb_queues = rte_eth_dev_count(); + nb_queues = rte_eth_dev_count_avail(); /* One extra port and queueu for Tx service */ if (t->mt_unsafe) { diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c index 6cad9357..a54068df 100644 --- a/app/test-eventdev/test_pipeline_common.c +++ b/app/test-eventdev/test_pipeline_common.c @@ -166,7 +166,7 @@ pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) */ lcores = 2; - if (!rte_eth_dev_count()) { + if (!rte_eth_dev_count_avail()) { evt_err("test needs minimum 1 ethernet dev"); return -1; } @@ -213,7 +213,7 @@ pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) int pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt) { - int i; + uint16_t i; uint8_t nb_queues = 1; uint8_t mt_state = 0; struct test_pipeline *t = evt_test_priv(test); @@ -223,7 +223,6 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt) .mq_mode = ETH_MQ_RX_RSS, .max_rx_pkt_len = ETHER_MAX_LEN, .offloads = DEV_RX_OFFLOAD_CRC_STRIP, - .ignore_offload_bitfield = 1, }, .rx_adv_conf = { .rss_conf = { @@ -234,23 +233,34 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt) }; RTE_SET_USED(opt); - if (!rte_eth_dev_count()) { + if (!rte_eth_dev_count_avail()) { evt_err("No ethernet ports found.\n"); return -ENODEV; } - for (i = 0; i < rte_eth_dev_count(); i++) { + RTE_ETH_FOREACH_DEV(i) { struct rte_eth_dev_info dev_info; + struct rte_eth_conf local_port_conf = port_conf; - memset(&dev_info, 0, sizeof(struct rte_eth_dev_info)); rte_eth_dev_info_get(i, &dev_info); mt_state = !(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MT_LOCKFREE); rx_conf = dev_info.default_rxconf; rx_conf.offloads = port_conf.rxmode.offloads; + local_port_conf.rx_adv_conf.rss_conf.rss_hf &= + dev_info.flow_type_rss_offloads; + if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != + port_conf.rx_adv_conf.rss_conf.rss_hf) { + evt_info("Port %u modified RSS hash function based on hardware support," + "requested:%#"PRIx64" configured:%#"PRIx64"\n", + i, + port_conf.rx_adv_conf.rss_conf.rss_hf, + local_port_conf.rx_adv_conf.rss_conf.rss_hf); + } + if (rte_eth_dev_configure(i, nb_queues, nb_queues, - &port_conf) + &local_port_conf) < 0) { evt_err("Failed to configure eth port [%d]\n", i); return -EINVAL; @@ -337,7 +347,7 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, memset(&queue_conf, 0, sizeof(struct rte_event_eth_rx_adapter_queue_conf)); queue_conf.ev.sched_type = opt->sched_type_list[0]; - for (prod = 0; prod < rte_eth_dev_count(); prod++) { + RTE_ETH_FOREACH_DEV(prod) { uint32_t cap; ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, @@ -419,7 +429,7 @@ pipeline_event_tx_service_setup(struct evt_test *test, struct evt_options *opt, tx->dev_id = opt->dev_id; tx->queue_id = tx_queue_id; tx->port_id = tx_port_id; - tx->nb_ethports = rte_eth_dev_count(); + tx->nb_ethports = rte_eth_dev_count_avail(); tx->t = t; /* Register Tx service */ @@ -453,7 +463,7 @@ pipeline_event_tx_service_setup(struct evt_test *test, struct evt_options *opt, void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt) { - int i; + uint16_t i; RTE_SET_USED(test); RTE_SET_USED(opt); struct test_pipeline *t = evt_test_priv(test); @@ -464,10 +474,9 @@ pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt) rte_service_component_unregister(t->tx_service.service_id); } - for (i = 0; i < rte_eth_dev_count(); i++) { + RTE_ETH_FOREACH_DEV(i) { rte_event_eth_rx_adapter_stop(i); rte_eth_dev_stop(i); - rte_eth_dev_close(i); } } diff --git a/app/test-eventdev/test_pipeline_queue.c b/app/test-eventdev/test_pipeline_queue.c index 02fc27cf..ca5f4578 100644 --- a/app/test-eventdev/test_pipeline_queue.c +++ b/app/test-eventdev/test_pipeline_queue.c @@ -10,7 +10,7 @@ static __rte_always_inline int pipeline_queue_nb_event_queues(struct evt_options *opt) { - uint16_t eth_count = rte_eth_dev_count(); + uint16_t eth_count = rte_eth_dev_count_avail(); return (eth_count * opt->nb_stages) + eth_count; } @@ -333,7 +333,7 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) uint8_t nb_worker_queues = 0; nb_ports = evt_nr_active_lcores(opt->wlcores); - nb_queues = rte_eth_dev_count() * (nb_stages); + nb_queues = rte_eth_dev_count_avail() * (nb_stages); /* Extra port for Tx service. */ if (t->mt_unsafe) { @@ -341,7 +341,7 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) nb_ports++; nb_queues++; } else - nb_queues += rte_eth_dev_count(); + nb_queues += rte_eth_dev_count_avail(); rte_event_dev_info_get(opt->dev_id, &info); diff --git a/app/test-pmd/Makefile b/app/test-pmd/Makefile index ed588ab6..2b4d604b 100644 --- a/app/test-pmd/Makefile +++ b/app/test-pmd/Makefile @@ -13,6 +13,7 @@ APP = testpmd CFLAGS += -DALLOW_EXPERIMENTAL_API CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) +CFLAGS += -Wno-deprecated-declarations # # all source are stored in SRCS-y @@ -33,9 +34,10 @@ SRCS-y += txonly.c SRCS-y += csumonly.c SRCS-y += icmpecho.c SRCS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ieee1588fwd.c +SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf_cmd.c -ifeq ($(CONFIG_RTE_LIBRTE_PMD_SOFTNIC)$(CONFIG_RTE_LIBRTE_SCHED),yy) -SRCS-y += tm.c +ifeq ($(CONFIG_RTE_LIBRTE_PMD_SOFTNIC), y) +SRCS-y += softnicfwd.c endif ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y) @@ -44,8 +46,10 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_BOND),y) LDLIBS += -lrte_pmd_bond endif -ifeq ($(CONFIG_RTE_LIBRTE_DPAA_PMD),y) +ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS)$(CONFIG_RTE_LIBRTE_DPAA_PMD),yy) LDLIBS += -lrte_pmd_dpaa +LDLIBS += -lrte_bus_dpaa +LDLIBS += -lrte_mempool_dpaa endif ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y) diff --git a/app/test-pmd/bpf_cmd.c b/app/test-pmd/bpf_cmd.c new file mode 100644 index 00000000..830bfc13 --- /dev/null +++ b/app/test-pmd/bpf_cmd.c @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "testpmd.h" + +static const struct rte_bpf_xsym bpf_xsym[] = { + { + .name = RTE_STR(stdout), + .type = RTE_BPF_XTYPE_VAR, + .var = { + .val = &stdout, + .desc = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(stdout), + }, + }, + }, + { + .name = RTE_STR(rte_pktmbuf_dump), + .type = RTE_BPF_XTYPE_FUNC, + .func = { + .val = (void *)rte_pktmbuf_dump, + .nb_args = 3, + .args = { + [0] = { + .type = RTE_BPF_ARG_RAW, + .size = sizeof(uintptr_t), + }, + [1] = { + .type = RTE_BPF_ARG_PTR_MBUF, + .size = sizeof(struct rte_mbuf), + }, + [2] = { + .type = RTE_BPF_ARG_RAW, + .size = sizeof(uint32_t), + }, + }, + }, + }, +}; + +/* *** load BPF program *** */ +struct cmd_bpf_ld_result { + cmdline_fixed_string_t bpf; + cmdline_fixed_string_t dir; + uint8_t port; + uint16_t queue; + cmdline_fixed_string_t op; + cmdline_fixed_string_t flags; + cmdline_fixed_string_t prm; +}; + +static void +bpf_parse_flags(const char *str, struct rte_bpf_arg *arg, uint32_t *flags) +{ + uint32_t i, v; + + *flags = RTE_BPF_ETH_F_NONE; + arg->type = RTE_BPF_ARG_PTR; + arg->size = mbuf_data_size; + + for (i = 0; str[i] != 0; i++) { + v = toupper(str[i]); + if (v == 'J') + *flags |= RTE_BPF_ETH_F_JIT; + else if (v == 'M') { + arg->type = RTE_BPF_ARG_PTR_MBUF; + arg->size = sizeof(struct rte_mbuf); + arg->buf_size = mbuf_data_size; + } else if (v == '-') + continue; + else + printf("unknown flag: \'%c\'", v); + } +} + +static void cmd_operate_bpf_ld_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + int32_t rc; + uint32_t flags; + struct cmd_bpf_ld_result *res; + struct rte_bpf_prm prm; + const char *fname, *sname; + + res = parsed_result; + memset(&prm, 0, sizeof(prm)); + prm.xsym = bpf_xsym; + prm.nb_xsym = RTE_DIM(bpf_xsym); + + bpf_parse_flags(res->flags, &prm.prog_arg, &flags); + fname = res->prm; + sname = ".text"; + + if (strcmp(res->dir, "rx") == 0) { + rc = rte_bpf_eth_rx_elf_load(res->port, res->queue, &prm, + fname, sname, flags); + printf("%d:%s\n", rc, strerror(-rc)); + } else if (strcmp(res->dir, "tx") == 0) { + rc = rte_bpf_eth_tx_elf_load(res->port, res->queue, &prm, + fname, sname, flags); + printf("%d:%s\n", rc, strerror(-rc)); + } else + printf("invalid value: %s\n", res->dir); +} + +cmdline_parse_token_string_t cmd_load_bpf_start = + TOKEN_STRING_INITIALIZER(struct cmd_bpf_ld_result, + bpf, "bpf-load"); +cmdline_parse_token_string_t cmd_load_bpf_dir = + TOKEN_STRING_INITIALIZER(struct cmd_bpf_ld_result, + dir, "rx#tx"); +cmdline_parse_token_num_t cmd_load_bpf_port = + TOKEN_NUM_INITIALIZER(struct cmd_bpf_ld_result, port, UINT8); +cmdline_parse_token_num_t cmd_load_bpf_queue = + TOKEN_NUM_INITIALIZER(struct cmd_bpf_ld_result, queue, UINT16); +cmdline_parse_token_string_t cmd_load_bpf_flags = + TOKEN_STRING_INITIALIZER(struct cmd_bpf_ld_result, + flags, NULL); +cmdline_parse_token_string_t cmd_load_bpf_prm = + TOKEN_STRING_INITIALIZER(struct cmd_bpf_ld_result, + prm, NULL); + +cmdline_parse_inst_t cmd_operate_bpf_ld_parse = { + .f = cmd_operate_bpf_ld_parsed, + .data = NULL, + .help_str = "bpf-load rx|tx ", + .tokens = { + (void *)&cmd_load_bpf_start, + (void *)&cmd_load_bpf_dir, + (void *)&cmd_load_bpf_port, + (void *)&cmd_load_bpf_queue, + (void *)&cmd_load_bpf_flags, + (void *)&cmd_load_bpf_prm, + NULL, + }, +}; + +/* *** unload BPF program *** */ +struct cmd_bpf_unld_result { + cmdline_fixed_string_t bpf; + cmdline_fixed_string_t dir; + uint8_t port; + uint16_t queue; +}; + +static void cmd_operate_bpf_unld_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_bpf_unld_result *res; + + res = parsed_result; + + if (strcmp(res->dir, "rx") == 0) + rte_bpf_eth_rx_unload(res->port, res->queue); + else if (strcmp(res->dir, "tx") == 0) + rte_bpf_eth_tx_unload(res->port, res->queue); + else + printf("invalid value: %s\n", res->dir); +} + +cmdline_parse_token_string_t cmd_unload_bpf_start = + TOKEN_STRING_INITIALIZER(struct cmd_bpf_unld_result, + bpf, "bpf-unload"); +cmdline_parse_token_string_t cmd_unload_bpf_dir = + TOKEN_STRING_INITIALIZER(struct cmd_bpf_unld_result, + dir, "rx#tx"); +cmdline_parse_token_num_t cmd_unload_bpf_port = + TOKEN_NUM_INITIALIZER(struct cmd_bpf_unld_result, port, UINT8); +cmdline_parse_token_num_t cmd_unload_bpf_queue = + TOKEN_NUM_INITIALIZER(struct cmd_bpf_unld_result, queue, UINT16); + +cmdline_parse_inst_t cmd_operate_bpf_unld_parse = { + .f = cmd_operate_bpf_unld_parsed, + .data = NULL, + .help_str = "bpf-unload rx|tx ", + .tokens = { + (void *)&cmd_unload_bpf_start, + (void *)&cmd_unload_bpf_dir, + (void *)&cmd_unload_bpf_port, + (void *)&cmd_unload_bpf_queue, + NULL, + }, +}; diff --git a/app/test-pmd/bpf_cmd.h b/app/test-pmd/bpf_cmd.h new file mode 100644 index 00000000..5ee4c9f7 --- /dev/null +++ b/app/test-pmd/bpf_cmd.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#ifndef _BPF_CMD_H_ +#define _BPF_CMD_H_ + +#ifdef RTE_LIBRTE_BPF + + /* BPF CLI */ +extern cmdline_parse_inst_t cmd_operate_bpf_ld_parse; +extern cmdline_parse_inst_t cmd_operate_bpf_unld_parse; + +#endif /* RTE_LIBRTE_BPF */ + +#endif /* _BPF_CMD_H_ */ diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index d1dc1de6..589121d6 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -60,7 +60,7 @@ #include #include #endif -#ifdef RTE_LIBRTE_DPAA_PMD +#if defined RTE_LIBRTE_DPAA_BUS && defined RTE_LIBRTE_DPAA_PMD #include #endif #ifdef RTE_LIBRTE_IXGBE_PMD @@ -75,6 +75,7 @@ #include "testpmd.h" #include "cmdline_mtr.h" #include "cmdline_tm.h" +#include "bpf_cmd.h" static struct cmdline *testpmd_cl; @@ -771,9 +772,36 @@ static void cmd_help_long_parsed(void *parsed_result, " (priority) (weight)\n" " Set port tm node parent.\n\n" + "suspend port tm node (port_id) (node_id)" + " Suspend tm node.\n\n" + + "resume port tm node (port_id) (node_id)" + " Resume tm node.\n\n" + "port tm hierarchy commit (port_id) (clean_on_fail)\n" " Commit tm hierarchy.\n\n" + "vxlan ip-version (ipv4|ipv6) vni (vni) udp-src" + " (udp-src) udp-dst (udp-dst) ip-src (ip-src) ip-dst" + " (ip-dst) eth-src (eth-src) eth-dst (eth-dst)\n" + " Configure the VXLAN encapsulation for flows.\n\n" + + "vxlan-with-vlan ip-version (ipv4|ipv6) vni (vni)" + " udp-src (udp-src) udp-dst (udp-dst) ip-src (ip-src)" + " ip-dst (ip-dst) vlan-tci (vlan-tci) eth-src (eth-src)" + " eth-dst (eth-dst)\n" + " Configure the VXLAN encapsulation for flows.\n\n" + + "nvgre ip-version (ipv4|ipv6) tni (tni) ip-src" + " (ip-src) ip-dst (ip-dst) eth-src (eth-src) eth-dst" + " (eth-dst)\n" + " Configure the NVGRE encapsulation for flows.\n\n" + + "nvgre-with-vlan ip-version (ipv4|ipv6) tni (tni)" + " ip-src (ip-src) ip-dst (ip-dst) vlan-tci (vlan-tci)" + " eth-src (eth-src) eth-dst (eth-dst)\n" + " Configure the NVGRE encapsulation for flows.\n\n" + , list_pkt_forwarding_modes() ); } @@ -806,6 +834,9 @@ static void cmd_help_long_parsed(void *parsed_result, " duplex (half|full|auto)\n" " Set speed and duplex for all ports or port_id\n\n" + "port config (port_id|all) loopback (mode)\n" + " Set loopback mode for all ports or port_id\n\n" + "port config all (rxq|txq|rxd|txd) (value)\n" " Set number for rxq/txq/rxd/txd.\n\n" @@ -818,8 +849,8 @@ static void cmd_help_long_parsed(void *parsed_result, " Set crc-strip/scatter/rx-checksum/hardware-vlan/drop_en" " for ports.\n\n" - "port config all rss (all|ip|tcp|udp|sctp|ether|port|vxlan|" - "geneve|nvgre|none|)\n" + "port config all rss (all|default|ip|tcp|udp|sctp|" + "ether|port|vxlan|geneve|nvgre|none|)\n" " Set the RSS mode.\n\n" "port config port-id rss reta (hash,queue)[,(hash,queue)]\n" @@ -843,10 +874,18 @@ static void cmd_help_long_parsed(void *parsed_result, "port config mtu X value\n" " Set the MTU of port X to a given value\n\n" + "port config (port_id) (rxq|txq) (queue_id) ring_size (value)\n" + " Set a rx/tx queue's ring size configuration, the new" + " value will take effect after command that (re-)start the port" + " or command that setup the specific queue\n\n" + "port (port_id) (rxq|txq) (queue_id) (start|stop)\n" " Start/stop a rx/tx queue of port X. Only take effect" " when port X is started\n\n" + "port (port_id) (rxq|txq) (queue_id) setup\n" + " Setup a rx/tx queue of port X.\n\n" + "port config (port_id|all) l2-tunnel E-tag ether-type" " (value)\n" " Set the value of E-tag ether-type.\n\n" @@ -870,6 +909,9 @@ static void cmd_help_long_parsed(void *parsed_result, "port config (port_id) pctype (pctype_id) hash_inset|" "fdir_inset|fdir_flx_inset clear all" " Clear RSS|FDIR|FDIR_FLX input set completely for some pctype\n\n" + + "port config (port_id) udp_tunnel_port add|rm vxlan|geneve (udp_port)\n\n" + " Add/remove UDP tunnel port for tunneling offload\n\n" ); } @@ -1415,7 +1457,7 @@ cmdline_parse_inst_t cmd_config_speed_all = { struct cmd_config_speed_specific { cmdline_fixed_string_t port; cmdline_fixed_string_t keyword; - uint8_t id; + portid_t id; cmdline_fixed_string_t item1; cmdline_fixed_string_t item2; cmdline_fixed_string_t value1; @@ -1455,7 +1497,7 @@ cmdline_parse_token_string_t cmd_config_speed_specific_keyword = TOKEN_STRING_INITIALIZER(struct cmd_config_speed_specific, keyword, "config"); cmdline_parse_token_num_t cmd_config_speed_specific_id = - TOKEN_NUM_INITIALIZER(struct cmd_config_speed_specific, id, UINT8); + TOKEN_NUM_INITIALIZER(struct cmd_config_speed_specific, id, UINT16); cmdline_parse_token_string_t cmd_config_speed_specific_item1 = TOKEN_STRING_INITIALIZER(struct cmd_config_speed_specific, item1, "speed"); @@ -1487,6 +1529,122 @@ cmdline_parse_inst_t cmd_config_speed_specific = { }, }; +/* *** configure loopback for all ports *** */ +struct cmd_config_loopback_all { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + cmdline_fixed_string_t all; + cmdline_fixed_string_t item; + uint32_t mode; +}; + +static void +cmd_config_loopback_all_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_config_loopback_all *res = parsed_result; + portid_t pid; + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); + return; + } + + RTE_ETH_FOREACH_DEV(pid) { + ports[pid].dev_conf.lpbk_mode = res->mode; + } + + cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1); +} + +cmdline_parse_token_string_t cmd_config_loopback_all_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_all, port, "port"); +cmdline_parse_token_string_t cmd_config_loopback_all_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_all, keyword, + "config"); +cmdline_parse_token_string_t cmd_config_loopback_all_all = + TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_all, all, "all"); +cmdline_parse_token_string_t cmd_config_loopback_all_item = + TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_all, item, + "loopback"); +cmdline_parse_token_num_t cmd_config_loopback_all_mode = + TOKEN_NUM_INITIALIZER(struct cmd_config_loopback_all, mode, UINT32); + +cmdline_parse_inst_t cmd_config_loopback_all = { + .f = cmd_config_loopback_all_parsed, + .data = NULL, + .help_str = "port config all loopback ", + .tokens = { + (void *)&cmd_config_loopback_all_port, + (void *)&cmd_config_loopback_all_keyword, + (void *)&cmd_config_loopback_all_all, + (void *)&cmd_config_loopback_all_item, + (void *)&cmd_config_loopback_all_mode, + NULL, + }, +}; + +/* *** configure loopback for specific port *** */ +struct cmd_config_loopback_specific { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + uint16_t port_id; + cmdline_fixed_string_t item; + uint32_t mode; +}; + +static void +cmd_config_loopback_specific_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_config_loopback_specific *res = parsed_result; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + + if (!port_is_stopped(res->port_id)) { + printf("Please stop port %u first\n", res->port_id); + return; + } + + ports[res->port_id].dev_conf.lpbk_mode = res->mode; + + cmd_reconfig_device_queue(res->port_id, 1, 1); +} + + +cmdline_parse_token_string_t cmd_config_loopback_specific_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_specific, port, + "port"); +cmdline_parse_token_string_t cmd_config_loopback_specific_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_specific, keyword, + "config"); +cmdline_parse_token_num_t cmd_config_loopback_specific_id = + TOKEN_NUM_INITIALIZER(struct cmd_config_loopback_specific, port_id, + UINT16); +cmdline_parse_token_string_t cmd_config_loopback_specific_item = + TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_specific, item, + "loopback"); +cmdline_parse_token_num_t cmd_config_loopback_specific_mode = + TOKEN_NUM_INITIALIZER(struct cmd_config_loopback_specific, mode, + UINT32); + +cmdline_parse_inst_t cmd_config_loopback_specific = { + .f = cmd_config_loopback_specific_parsed, + .data = NULL, + .help_str = "port config loopback ", + .tokens = { + (void *)&cmd_config_loopback_specific_port, + (void *)&cmd_config_loopback_specific_keyword, + (void *)&cmd_config_loopback_specific_id, + (void *)&cmd_config_loopback_specific_item, + (void *)&cmd_config_loopback_specific_mode, + NULL, + }, +}; + /* *** configure txq/rxq, txd/rxd *** */ struct cmd_config_rx_tx { cmdline_fixed_string_t port; @@ -1739,11 +1897,13 @@ cmd_config_rx_mode_flag_parsed(void *parsed_result, port = &ports[pid]; rx_offloads = port->dev_conf.rxmode.offloads; if (!strcmp(res->name, "crc-strip")) { - if (!strcmp(res->value, "on")) + if (!strcmp(res->value, "on")) { rx_offloads |= DEV_RX_OFFLOAD_CRC_STRIP; - else if (!strcmp(res->value, "off")) + rx_offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC; + } else if (!strcmp(res->value, "off")) { + rx_offloads |= DEV_RX_OFFLOAD_KEEP_CRC; rx_offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP; - else { + } else { printf("Unknown parameter\n"); return; } @@ -1879,8 +2039,11 @@ cmd_config_rss_parsed(void *parsed_result, { struct cmd_config_rss *res = parsed_result; struct rte_eth_rss_conf rss_conf = { .rss_key_len = 0, }; + struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; + int use_default = 0; + int all_updated = 1; int diag; - uint8_t i; + uint16_t i; if (!strcmp(res->value, "all")) rss_conf.rss_hf = ETH_RSS_IP | ETH_RSS_TCP | @@ -1906,6 +2069,8 @@ cmd_config_rss_parsed(void *parsed_result, rss_conf.rss_hf = ETH_RSS_NVGRE; else if (!strcmp(res->value, "none")) rss_conf.rss_hf = 0; + else if (!strcmp(res->value, "default")) + use_default = 1; else if (isdigit(res->value[0]) && atoi(res->value) > 0 && atoi(res->value) < 64) rss_conf.rss_hf = 1ULL << atoi(res->value); @@ -1914,13 +2079,32 @@ cmd_config_rss_parsed(void *parsed_result, return; } rss_conf.rss_key = NULL; - for (i = 0; i < rte_eth_dev_count(); i++) { - diag = rte_eth_dev_rss_hash_update(i, &rss_conf); - if (diag < 0) + /* Update global configuration for RSS types. */ + RTE_ETH_FOREACH_DEV(i) { + struct rte_eth_rss_conf local_rss_conf; + + rte_eth_dev_info_get(i, &dev_info); + if (use_default) + rss_conf.rss_hf = dev_info.flow_type_rss_offloads; + + local_rss_conf = rss_conf; + local_rss_conf.rss_hf = rss_conf.rss_hf & + dev_info.flow_type_rss_offloads; + if (local_rss_conf.rss_hf != rss_conf.rss_hf) { + printf("Port %u modified RSS hash function based on hardware support," + "requested:%#"PRIx64" configured:%#"PRIx64"\n", + i, rss_conf.rss_hf, local_rss_conf.rss_hf); + } + diag = rte_eth_dev_rss_hash_update(i, &local_rss_conf); + if (diag < 0) { + all_updated = 0; printf("Configuration of RSS hash at ethernet port %d " "failed with error (%d): %s.\n", i, -diag, strerror(-diag)); + } } + if (all_updated && !use_default) + rss_hf = rss_conf.rss_hf; } cmdline_parse_token_string_t cmd_config_rss_port = @@ -1938,7 +2122,7 @@ cmdline_parse_inst_t cmd_config_rss = { .f = cmd_config_rss_parsed, .data = NULL, .help_str = "port config all rss " - "all|ip|tcp|udp|sctp|ether|port|vxlan|geneve|nvgre|none|", + "all|default|ip|tcp|udp|sctp|ether|port|vxlan|geneve|nvgre|none|", .tokens = { (void *)&cmd_config_rss_port, (void *)&cmd_config_rss_keyword, @@ -2067,6 +2251,102 @@ cmdline_parse_inst_t cmd_config_rss_hash_key = { }, }; +/* *** configure port rxq/txq ring size *** */ +struct cmd_config_rxtx_ring_size { + cmdline_fixed_string_t port; + cmdline_fixed_string_t config; + portid_t portid; + cmdline_fixed_string_t rxtxq; + uint16_t qid; + cmdline_fixed_string_t rsize; + uint16_t size; +}; + +static void +cmd_config_rxtx_ring_size_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_config_rxtx_ring_size *res = parsed_result; + struct rte_port *port; + uint8_t isrx; + + if (port_id_is_invalid(res->portid, ENABLED_WARN)) + return; + + if (res->portid == (portid_t)RTE_PORT_ALL) { + printf("Invalid port id\n"); + return; + } + + port = &ports[res->portid]; + + if (!strcmp(res->rxtxq, "rxq")) + isrx = 1; + else if (!strcmp(res->rxtxq, "txq")) + isrx = 0; + else { + printf("Unknown parameter\n"); + return; + } + + if (isrx && rx_queue_id_is_invalid(res->qid)) + return; + else if (!isrx && tx_queue_id_is_invalid(res->qid)) + return; + + if (isrx && res->size != 0 && res->size <= rx_free_thresh) { + printf("Invalid rx ring_size, must > rx_free_thresh: %d\n", + rx_free_thresh); + return; + } + + if (isrx) + port->nb_rx_desc[res->qid] = res->size; + else + port->nb_tx_desc[res->qid] = res->size; + + cmd_reconfig_device_queue(res->portid, 0, 1); +} + +cmdline_parse_token_string_t cmd_config_rxtx_ring_size_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_ring_size, + port, "port"); +cmdline_parse_token_string_t cmd_config_rxtx_ring_size_config = + TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_ring_size, + config, "config"); +cmdline_parse_token_num_t cmd_config_rxtx_ring_size_portid = + TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_ring_size, + portid, UINT16); +cmdline_parse_token_string_t cmd_config_rxtx_ring_size_rxtxq = + TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_ring_size, + rxtxq, "rxq#txq"); +cmdline_parse_token_num_t cmd_config_rxtx_ring_size_qid = + TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_ring_size, + qid, UINT16); +cmdline_parse_token_string_t cmd_config_rxtx_ring_size_rsize = + TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_ring_size, + rsize, "ring_size"); +cmdline_parse_token_num_t cmd_config_rxtx_ring_size_size = + TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_ring_size, + size, UINT16); + +cmdline_parse_inst_t cmd_config_rxtx_ring_size = { + .f = cmd_config_rxtx_ring_size_parsed, + .data = NULL, + .help_str = "port config rxq|txq ring_size ", + .tokens = { + (void *)&cmd_config_rxtx_ring_size_port, + (void *)&cmd_config_rxtx_ring_size_config, + (void *)&cmd_config_rxtx_ring_size_portid, + (void *)&cmd_config_rxtx_ring_size_rxtxq, + (void *)&cmd_config_rxtx_ring_size_qid, + (void *)&cmd_config_rxtx_ring_size_rsize, + (void *)&cmd_config_rxtx_ring_size_size, + NULL, + }, +}; + /* *** configure port rxq/txq start/stop *** */ struct cmd_config_rxtx_queue { cmdline_fixed_string_t port; @@ -2152,7 +2432,7 @@ cmdline_parse_inst_t cmd_config_rxtx_queue = { .data = NULL, .help_str = "port rxq|txq start|stop", .tokens = { - (void *)&cmd_config_speed_all_port, + (void *)&cmd_config_rxtx_queue_port, (void *)&cmd_config_rxtx_queue_portid, (void *)&cmd_config_rxtx_queue_rxtxq, (void *)&cmd_config_rxtx_queue_qid, @@ -2161,6 +2441,117 @@ cmdline_parse_inst_t cmd_config_rxtx_queue = { }, }; +/* *** configure port rxq/txq setup *** */ +struct cmd_setup_rxtx_queue { + cmdline_fixed_string_t port; + portid_t portid; + cmdline_fixed_string_t rxtxq; + uint16_t qid; + cmdline_fixed_string_t setup; +}; + +/* Common CLI fields for queue setup */ +cmdline_parse_token_string_t cmd_setup_rxtx_queue_port = + TOKEN_STRING_INITIALIZER(struct cmd_setup_rxtx_queue, port, "port"); +cmdline_parse_token_num_t cmd_setup_rxtx_queue_portid = + TOKEN_NUM_INITIALIZER(struct cmd_setup_rxtx_queue, portid, UINT16); +cmdline_parse_token_string_t cmd_setup_rxtx_queue_rxtxq = + TOKEN_STRING_INITIALIZER(struct cmd_setup_rxtx_queue, rxtxq, "rxq#txq"); +cmdline_parse_token_num_t cmd_setup_rxtx_queue_qid = + TOKEN_NUM_INITIALIZER(struct cmd_setup_rxtx_queue, qid, UINT16); +cmdline_parse_token_string_t cmd_setup_rxtx_queue_setup = + TOKEN_STRING_INITIALIZER(struct cmd_setup_rxtx_queue, setup, "setup"); + +static void +cmd_setup_rxtx_queue_parsed( + void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_setup_rxtx_queue *res = parsed_result; + struct rte_port *port; + struct rte_mempool *mp; + unsigned int socket_id; + uint8_t isrx = 0; + int ret; + + if (port_id_is_invalid(res->portid, ENABLED_WARN)) + return; + + if (res->portid == (portid_t)RTE_PORT_ALL) { + printf("Invalid port id\n"); + return; + } + + if (!strcmp(res->rxtxq, "rxq")) + isrx = 1; + else if (!strcmp(res->rxtxq, "txq")) + isrx = 0; + else { + printf("Unknown parameter\n"); + return; + } + + if (isrx && rx_queue_id_is_invalid(res->qid)) { + printf("Invalid rx queue\n"); + return; + } else if (!isrx && tx_queue_id_is_invalid(res->qid)) { + printf("Invalid tx queue\n"); + return; + } + + port = &ports[res->portid]; + if (isrx) { + socket_id = rxring_numa[res->portid]; + if (!numa_support || socket_id == NUMA_NO_CONFIG) + socket_id = port->socket_id; + + mp = mbuf_pool_find(socket_id); + if (mp == NULL) { + printf("Failed to setup RX queue: " + "No mempool allocation" + " on the socket %d\n", + rxring_numa[res->portid]); + return; + } + ret = rte_eth_rx_queue_setup(res->portid, + res->qid, + port->nb_rx_desc[res->qid], + socket_id, + &port->rx_conf[res->qid], + mp); + if (ret) + printf("Failed to setup RX queue\n"); + } else { + socket_id = txring_numa[res->portid]; + if (!numa_support || socket_id == NUMA_NO_CONFIG) + socket_id = port->socket_id; + + ret = rte_eth_tx_queue_setup(res->portid, + res->qid, + port->nb_tx_desc[res->qid], + socket_id, + &port->tx_conf[res->qid]); + if (ret) + printf("Failed to setup TX queue\n"); + } +} + +cmdline_parse_inst_t cmd_setup_rxtx_queue = { + .f = cmd_setup_rxtx_queue_parsed, + .data = NULL, + .help_str = "port rxq|txq setup", + .tokens = { + (void *)&cmd_setup_rxtx_queue_port, + (void *)&cmd_setup_rxtx_queue_portid, + (void *)&cmd_setup_rxtx_queue_rxtxq, + (void *)&cmd_setup_rxtx_queue_qid, + (void *)&cmd_setup_rxtx_queue_setup, + NULL, + }, +}; + + /* *** Configure RSS RETA *** */ struct cmd_config_rss_reta { cmdline_fixed_string_t port; @@ -2599,6 +2990,8 @@ cmd_config_burst_parsed(void *parsed_result, __attribute__((unused)) void *data) { struct cmd_config_burst *res = parsed_result; + struct rte_eth_dev_info dev_info; + uint16_t rec_nb_pkts; if (!all_ports_stopped()) { printf("Please stop all ports first\n"); @@ -2606,11 +2999,34 @@ cmd_config_burst_parsed(void *parsed_result, } if (!strcmp(res->name, "burst")) { - if (res->value < 1 || res->value > MAX_PKT_BURST) { + if (res->value == 0) { + /* If user gives a value of zero, query the PMD for + * its recommended Rx burst size. Testpmd uses a single + * size for all ports, so assume all ports are the same + * NIC model and use the values from Port 0. + */ + rte_eth_dev_info_get(0, &dev_info); + rec_nb_pkts = dev_info.default_rxportconf.burst_size; + + if (rec_nb_pkts == 0) { + printf("PMD does not recommend a burst size.\n" + "User provided value must be between" + " 1 and %d\n", MAX_PKT_BURST); + return; + } else if (rec_nb_pkts > MAX_PKT_BURST) { + printf("PMD recommended burst size of %d" + " exceeds maximum value of %d\n", + rec_nb_pkts, MAX_PKT_BURST); + return; + } + printf("Using PMD-provided burst value of %d\n", + rec_nb_pkts); + nb_pkt_per_burst = rec_nb_pkts; + } else if (res->value > MAX_PKT_BURST) { printf("burst must be >= 1 && <= %d\n", MAX_PKT_BURST); return; - } - nb_pkt_per_burst = res->value; + } else + nb_pkt_per_burst = res->value; } else { printf("Unknown parameter\n"); return; @@ -4013,6 +4429,12 @@ check_tunnel_tso_nic_support(portid_t port_id) if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) printf("Warning: GENEVE TUNNEL TSO not supported therefore " "not enabled for port %d\n", port_id); + if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO)) + printf("Warning: IP TUNNEL TSO not supported therefore " + "not enabled for port %d\n", port_id); + if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO)) + printf("Warning: UDP TUNNEL TSO not supported therefore " + "not enabled for port %d\n", port_id); return dev_info; } @@ -4040,13 +4462,17 @@ cmd_tunnel_tso_set_parsed(void *parsed_result, ~(DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_IPIP_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO); + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO); printf("TSO for tunneled packets is disabled\n"); } else { uint64_t tso_offloads = (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_IPIP_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO); + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO); ports[res->port_id].dev_conf.txmode.offloads |= (tso_offloads & dev_info.tx_offload_capa); @@ -4399,8 +4825,9 @@ cmd_gso_show_parsed(void *parsed_result, if (gso_ports[res->cmd_pid].enable) { printf("Max GSO'd packet size: %uB\n" "Supported GSO types: TCP/IPv4, " - "VxLAN with inner TCP/IPv4 packet, " - "GRE with inner TCP/IPv4 packet\n", + "UDP/IPv4, VxLAN with inner " + "TCP/IPv4 packet, GRE with inner " + "TCP/IPv4 packet\n", gso_max_segment_size); } else printf("GSO is not enabled on Port %u\n", res->cmd_pid); @@ -5402,7 +5829,7 @@ static void cmd_create_bonded_device_parsed(void *parsed_result, port_id); /* Update number of ports */ - nb_ports = rte_eth_dev_count(); + nb_ports = rte_eth_dev_count_avail(); reconfig(port_id, res->socket); rte_eth_promiscuous_enable(port_id); } @@ -5511,11 +5938,6 @@ static void cmd_set_bond_mon_period_parsed(void *parsed_result, struct cmd_set_bond_mon_period_result *res = parsed_result; int ret; - if (res->port_num >= nb_ports) { - printf("Port id %d must be less than %d\n", res->port_num, nb_ports); - return; - } - ret = rte_eth_bond_link_monitoring_set(res->port_num, res->period_ms); /* check the return value and print it if is < 0 */ @@ -5572,12 +5994,6 @@ cmd_set_bonding_agg_mode(void *parsed_result, struct cmd_set_bonding_agg_mode_policy_result *res = parsed_result; uint8_t policy = AGG_BANDWIDTH; - if (res->port_num >= nb_ports) { - printf("Port id %d must be less than %d\n", - res->port_num, nb_ports); - return; - } - if (!strcmp(res->policy, "bandwidth")) policy = AGG_BANDWIDTH; else if (!strcmp(res->policy, "stable")) @@ -8278,6 +8694,89 @@ cmdline_parse_inst_t cmd_tunnel_udp_config = { }, }; +struct cmd_config_tunnel_udp_port { + cmdline_fixed_string_t port; + cmdline_fixed_string_t config; + portid_t port_id; + cmdline_fixed_string_t udp_tunnel_port; + cmdline_fixed_string_t action; + cmdline_fixed_string_t tunnel_type; + uint16_t udp_port; +}; + +static void +cmd_cfg_tunnel_udp_port_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_config_tunnel_udp_port *res = parsed_result; + struct rte_eth_udp_tunnel tunnel_udp; + int ret = 0; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + + tunnel_udp.udp_port = res->udp_port; + + if (!strcmp(res->tunnel_type, "vxlan")) { + tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN; + } else if (!strcmp(res->tunnel_type, "geneve")) { + tunnel_udp.prot_type = RTE_TUNNEL_TYPE_GENEVE; + } else { + printf("Invalid tunnel type\n"); + return; + } + + if (!strcmp(res->action, "add")) + ret = rte_eth_dev_udp_tunnel_port_add(res->port_id, + &tunnel_udp); + else + ret = rte_eth_dev_udp_tunnel_port_delete(res->port_id, + &tunnel_udp); + + if (ret < 0) + printf("udp tunneling port add error: (%s)\n", strerror(-ret)); +} + +cmdline_parse_token_string_t cmd_config_tunnel_udp_port_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, port, + "port"); +cmdline_parse_token_string_t cmd_config_tunnel_udp_port_config = + TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, config, + "config"); +cmdline_parse_token_num_t cmd_config_tunnel_udp_port_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_config_tunnel_udp_port, port_id, + UINT16); +cmdline_parse_token_string_t cmd_config_tunnel_udp_port_tunnel_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, + udp_tunnel_port, + "udp_tunnel_port"); +cmdline_parse_token_string_t cmd_config_tunnel_udp_port_action = + TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, action, + "add#rm"); +cmdline_parse_token_string_t cmd_config_tunnel_udp_port_tunnel_type = + TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, tunnel_type, + "vxlan#geneve"); +cmdline_parse_token_num_t cmd_config_tunnel_udp_port_value = + TOKEN_NUM_INITIALIZER(struct cmd_config_tunnel_udp_port, udp_port, + UINT16); + +cmdline_parse_inst_t cmd_cfg_tunnel_udp_port = { + .f = cmd_cfg_tunnel_udp_port_parsed, + .data = NULL, + .help_str = "port config udp_tunnel_port add|rm vxlan|geneve ", + .tokens = { + (void *)&cmd_config_tunnel_udp_port_port, + (void *)&cmd_config_tunnel_udp_port_config, + (void *)&cmd_config_tunnel_udp_port_port_id, + (void *)&cmd_config_tunnel_udp_port_tunnel_port, + (void *)&cmd_config_tunnel_udp_port_action, + (void *)&cmd_config_tunnel_udp_port_tunnel_type, + (void *)&cmd_config_tunnel_udp_port_value, + NULL, + }, +}; + /* *** GLOBAL CONFIG *** */ struct cmd_global_config_result { cmdline_fixed_string_t cmd; @@ -8621,7 +9120,7 @@ static void cmd_dump_parsed(void *parsed_result, else if (!strcmp(res->dump, "dump_mempool")) rte_mempool_list_dump(stdout); else if (!strcmp(res->dump, "dump_devargs")) - rte_eal_devargs_dump(stdout); + rte_devargs_dump(stdout); else if (!strcmp(res->dump, "dump_log_types")) rte_log_dump(stdout); } @@ -10725,11 +11224,6 @@ cmd_flow_director_mask_parsed(void *parsed_result, struct rte_eth_fdir_masks *mask; struct rte_port *port; - if (res->port_id > nb_ports) { - printf("Invalid port, range is [0, %d]\n", nb_ports - 1); - return; - } - port = &ports[res->port_id]; /** Check if the port is not started **/ if (port->port_status != RTE_PORT_STOPPED) { @@ -10926,11 +11420,6 @@ cmd_flow_director_flex_mask_parsed(void *parsed_result, uint16_t i; int ret; - if (res->port_id > nb_ports) { - printf("Invalid port, range is [0, %d]\n", nb_ports - 1); - return; - } - port = &ports[res->port_id]; /** Check if the port is not started **/ if (port->port_status != RTE_PORT_STOPPED) { @@ -11082,11 +11571,6 @@ cmd_flow_director_flxpld_parsed(void *parsed_result, struct rte_port *port; int ret = 0; - if (res->port_id > nb_ports) { - printf("Invalid port, range is [0, %d]\n", nb_ports - 1); - return; - } - port = &ports[res->port_id]; /** Check if the port is not started **/ if (port->port_status != RTE_PORT_STOPPED) { @@ -11728,7 +12212,7 @@ struct cmd_config_l2_tunnel_eth_type_result { cmdline_fixed_string_t port; cmdline_fixed_string_t config; cmdline_fixed_string_t all; - uint8_t id; + portid_t id; cmdline_fixed_string_t l2_tunnel; cmdline_fixed_string_t l2_tunnel_type; cmdline_fixed_string_t eth_type; @@ -11750,7 +12234,7 @@ cmdline_parse_token_string_t cmd_config_l2_tunnel_eth_type_all_str = cmdline_parse_token_num_t cmd_config_l2_tunnel_eth_type_id = TOKEN_NUM_INITIALIZER (struct cmd_config_l2_tunnel_eth_type_result, - id, UINT8); + id, UINT16); cmdline_parse_token_string_t cmd_config_l2_tunnel_eth_type_l2_tunnel = TOKEN_STRING_INITIALIZER (struct cmd_config_l2_tunnel_eth_type_result, @@ -11863,7 +12347,7 @@ struct cmd_config_l2_tunnel_en_dis_result { cmdline_fixed_string_t port; cmdline_fixed_string_t config; cmdline_fixed_string_t all; - uint8_t id; + portid_t id; cmdline_fixed_string_t l2_tunnel; cmdline_fixed_string_t l2_tunnel_type; cmdline_fixed_string_t en_dis; @@ -11884,7 +12368,7 @@ cmdline_parse_token_string_t cmd_config_l2_tunnel_en_dis_all_str = cmdline_parse_token_num_t cmd_config_l2_tunnel_en_dis_id = TOKEN_NUM_INITIALIZER (struct cmd_config_l2_tunnel_en_dis_result, - id, UINT8); + id, UINT16); cmdline_parse_token_string_t cmd_config_l2_tunnel_en_dis_l2_tunnel = TOKEN_STRING_INITIALIZER (struct cmd_config_l2_tunnel_en_dis_result, @@ -12861,7 +13345,7 @@ cmd_set_tx_loopback_parsed( if (ret == -ENOTSUP) ret = rte_pmd_bnxt_set_tx_loopback(res->port_id, is_on); #endif -#ifdef RTE_LIBRTE_DPAA_PMD +#if defined RTE_LIBRTE_DPAA_BUS && defined RTE_LIBRTE_DPAA_PMD if (ret == -ENOTSUP) ret = rte_pmd_dpaa_set_tx_loopback(res->port_id, is_on); #endif @@ -14356,20 +14840,14 @@ static void cmd_set_port_tm_hierarchy_default_parsed(void *parsed_result, p = &ports[port_id]; - /* Port tm flag */ - if (p->softport.tm_flag == 0) { - printf(" tm not enabled on port %u (error)\n", port_id); - return; - } - /* Forward mode: tm */ - if (strcmp(cur_fwd_config.fwd_eng->fwd_mode_name, "tm")) { - printf(" tm mode not enabled(error)\n"); + if (strcmp(cur_fwd_config.fwd_eng->fwd_mode_name, "softnic")) { + printf(" softnicfwd mode not enabled(error)\n"); return; } /* Set the default tm hierarchy */ - p->softport.tm.default_hierarchy_enable = 1; + p->softport.default_tm_hierarchy_enable = 1; } cmdline_parse_inst_t cmd_set_port_tm_hierarchy_default = { @@ -14388,10 +14866,330 @@ cmdline_parse_inst_t cmd_set_port_tm_hierarchy_default = { }; #endif -/* Strict link priority scheduling mode setting */ -static void -cmd_strict_link_prio_parsed( - void *parsed_result, +/** Set VXLAN encapsulation details */ +struct cmd_set_vxlan_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t vxlan; + cmdline_fixed_string_t pos_token; + cmdline_fixed_string_t ip_version; + uint32_t vlan_present:1; + uint32_t vni; + uint16_t udp_src; + uint16_t udp_dst; + cmdline_ipaddr_t ip_src; + cmdline_ipaddr_t ip_dst; + uint16_t tci; + struct ether_addr eth_src; + struct ether_addr eth_dst; +}; + +cmdline_parse_token_string_t cmd_set_vxlan_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, set, "set"); +cmdline_parse_token_string_t cmd_set_vxlan_vxlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, vxlan, "vxlan"); +cmdline_parse_token_string_t cmd_set_vxlan_vxlan_with_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, vxlan, + "vxlan-with-vlan"); +cmdline_parse_token_string_t cmd_set_vxlan_ip_version = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "ip-version"); +cmdline_parse_token_string_t cmd_set_vxlan_ip_version_value = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, ip_version, + "ipv4#ipv6"); +cmdline_parse_token_string_t cmd_set_vxlan_vni = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "vni"); +cmdline_parse_token_num_t cmd_set_vxlan_vni_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, vni, UINT32); +cmdline_parse_token_string_t cmd_set_vxlan_udp_src = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "udp-src"); +cmdline_parse_token_num_t cmd_set_vxlan_udp_src_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, udp_src, UINT16); +cmdline_parse_token_string_t cmd_set_vxlan_udp_dst = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "udp-dst"); +cmdline_parse_token_num_t cmd_set_vxlan_udp_dst_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, udp_dst, UINT16); +cmdline_parse_token_string_t cmd_set_vxlan_ip_src = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "ip-src"); +cmdline_parse_token_ipaddr_t cmd_set_vxlan_ip_src_value = + TOKEN_IPADDR_INITIALIZER(struct cmd_set_vxlan_result, ip_src); +cmdline_parse_token_string_t cmd_set_vxlan_ip_dst = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "ip-dst"); +cmdline_parse_token_ipaddr_t cmd_set_vxlan_ip_dst_value = + TOKEN_IPADDR_INITIALIZER(struct cmd_set_vxlan_result, ip_dst); +cmdline_parse_token_string_t cmd_set_vxlan_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "vlan-tci"); +cmdline_parse_token_num_t cmd_set_vxlan_vlan_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, tci, UINT16); +cmdline_parse_token_string_t cmd_set_vxlan_eth_src = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "eth-src"); +cmdline_parse_token_etheraddr_t cmd_set_vxlan_eth_src_value = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_vxlan_result, eth_src); +cmdline_parse_token_string_t cmd_set_vxlan_eth_dst = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "eth-dst"); +cmdline_parse_token_etheraddr_t cmd_set_vxlan_eth_dst_value = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_vxlan_result, eth_dst); + +static void cmd_set_vxlan_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_set_vxlan_result *res = parsed_result; + union { + uint32_t vxlan_id; + uint8_t vni[4]; + } id = { + .vxlan_id = rte_cpu_to_be_32(res->vni) & RTE_BE32(0x00ffffff), + }; + + if (strcmp(res->vxlan, "vxlan") == 0) + vxlan_encap_conf.select_vlan = 0; + else if (strcmp(res->vxlan, "vxlan-with-vlan") == 0) + vxlan_encap_conf.select_vlan = 1; + if (strcmp(res->ip_version, "ipv4") == 0) + vxlan_encap_conf.select_ipv4 = 1; + else if (strcmp(res->ip_version, "ipv6") == 0) + vxlan_encap_conf.select_ipv4 = 0; + else + return; + rte_memcpy(vxlan_encap_conf.vni, &id.vni[1], 3); + vxlan_encap_conf.udp_src = rte_cpu_to_be_16(res->udp_src); + vxlan_encap_conf.udp_dst = rte_cpu_to_be_16(res->udp_dst); + if (vxlan_encap_conf.select_ipv4) { + IPV4_ADDR_TO_UINT(res->ip_src, vxlan_encap_conf.ipv4_src); + IPV4_ADDR_TO_UINT(res->ip_dst, vxlan_encap_conf.ipv4_dst); + } else { + IPV6_ADDR_TO_ARRAY(res->ip_src, vxlan_encap_conf.ipv6_src); + IPV6_ADDR_TO_ARRAY(res->ip_dst, vxlan_encap_conf.ipv6_dst); + } + if (vxlan_encap_conf.select_vlan) + vxlan_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci); + rte_memcpy(vxlan_encap_conf.eth_src, res->eth_src.addr_bytes, + ETHER_ADDR_LEN); + rte_memcpy(vxlan_encap_conf.eth_dst, res->eth_dst.addr_bytes, + ETHER_ADDR_LEN); +} + +cmdline_parse_inst_t cmd_set_vxlan = { + .f = cmd_set_vxlan_parsed, + .data = NULL, + .help_str = "set vxlan ip-version ipv4|ipv6 vni udp-src" + " udp-dst ip-src ip-dst " + " eth-src eth-dst ", + .tokens = { + (void *)&cmd_set_vxlan_set, + (void *)&cmd_set_vxlan_vxlan, + (void *)&cmd_set_vxlan_ip_version, + (void *)&cmd_set_vxlan_ip_version_value, + (void *)&cmd_set_vxlan_vni, + (void *)&cmd_set_vxlan_vni_value, + (void *)&cmd_set_vxlan_udp_src, + (void *)&cmd_set_vxlan_udp_src_value, + (void *)&cmd_set_vxlan_udp_dst, + (void *)&cmd_set_vxlan_udp_dst_value, + (void *)&cmd_set_vxlan_ip_src, + (void *)&cmd_set_vxlan_ip_src_value, + (void *)&cmd_set_vxlan_ip_dst, + (void *)&cmd_set_vxlan_ip_dst_value, + (void *)&cmd_set_vxlan_eth_src, + (void *)&cmd_set_vxlan_eth_src_value, + (void *)&cmd_set_vxlan_eth_dst, + (void *)&cmd_set_vxlan_eth_dst_value, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_set_vxlan_with_vlan = { + .f = cmd_set_vxlan_parsed, + .data = NULL, + .help_str = "set vxlan-with-vlan ip-version ipv4|ipv6 vni " + " udp-src udp-dst ip-src ip-dst" + " vlan-tci eth-src eth-dst" + " ", + .tokens = { + (void *)&cmd_set_vxlan_set, + (void *)&cmd_set_vxlan_vxlan_with_vlan, + (void *)&cmd_set_vxlan_ip_version, + (void *)&cmd_set_vxlan_ip_version_value, + (void *)&cmd_set_vxlan_vni, + (void *)&cmd_set_vxlan_vni_value, + (void *)&cmd_set_vxlan_udp_src, + (void *)&cmd_set_vxlan_udp_src_value, + (void *)&cmd_set_vxlan_udp_dst, + (void *)&cmd_set_vxlan_udp_dst_value, + (void *)&cmd_set_vxlan_ip_src, + (void *)&cmd_set_vxlan_ip_src_value, + (void *)&cmd_set_vxlan_ip_dst, + (void *)&cmd_set_vxlan_ip_dst_value, + (void *)&cmd_set_vxlan_vlan, + (void *)&cmd_set_vxlan_vlan_value, + (void *)&cmd_set_vxlan_eth_src, + (void *)&cmd_set_vxlan_eth_src_value, + (void *)&cmd_set_vxlan_eth_dst, + (void *)&cmd_set_vxlan_eth_dst_value, + NULL, + }, +}; + +/** Set NVGRE encapsulation details */ +struct cmd_set_nvgre_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t nvgre; + cmdline_fixed_string_t pos_token; + cmdline_fixed_string_t ip_version; + uint32_t tni; + cmdline_ipaddr_t ip_src; + cmdline_ipaddr_t ip_dst; + uint16_t tci; + struct ether_addr eth_src; + struct ether_addr eth_dst; +}; + +cmdline_parse_token_string_t cmd_set_nvgre_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, set, "set"); +cmdline_parse_token_string_t cmd_set_nvgre_nvgre = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, nvgre, "nvgre"); +cmdline_parse_token_string_t cmd_set_nvgre_nvgre_with_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, nvgre, + "nvgre-with-vlan"); +cmdline_parse_token_string_t cmd_set_nvgre_ip_version = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token, + "ip-version"); +cmdline_parse_token_string_t cmd_set_nvgre_ip_version_value = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, ip_version, + "ipv4#ipv6"); +cmdline_parse_token_string_t cmd_set_nvgre_tni = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token, + "tni"); +cmdline_parse_token_num_t cmd_set_nvgre_tni_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_nvgre_result, tni, UINT32); +cmdline_parse_token_string_t cmd_set_nvgre_ip_src = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token, + "ip-src"); +cmdline_parse_token_num_t cmd_set_nvgre_ip_src_value = + TOKEN_IPADDR_INITIALIZER(struct cmd_set_nvgre_result, ip_src); +cmdline_parse_token_string_t cmd_set_nvgre_ip_dst = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token, + "ip-dst"); +cmdline_parse_token_ipaddr_t cmd_set_nvgre_ip_dst_value = + TOKEN_IPADDR_INITIALIZER(struct cmd_set_nvgre_result, ip_dst); +cmdline_parse_token_string_t cmd_set_nvgre_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token, + "vlan-tci"); +cmdline_parse_token_num_t cmd_set_nvgre_vlan_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_nvgre_result, tci, UINT16); +cmdline_parse_token_string_t cmd_set_nvgre_eth_src = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token, + "eth-src"); +cmdline_parse_token_etheraddr_t cmd_set_nvgre_eth_src_value = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_nvgre_result, eth_src); +cmdline_parse_token_string_t cmd_set_nvgre_eth_dst = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token, + "eth-dst"); +cmdline_parse_token_etheraddr_t cmd_set_nvgre_eth_dst_value = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_nvgre_result, eth_dst); + +static void cmd_set_nvgre_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_set_nvgre_result *res = parsed_result; + union { + uint32_t nvgre_tni; + uint8_t tni[4]; + } id = { + .nvgre_tni = rte_cpu_to_be_32(res->tni) & RTE_BE32(0x00ffffff), + }; + + if (strcmp(res->nvgre, "nvgre") == 0) + nvgre_encap_conf.select_vlan = 0; + else if (strcmp(res->nvgre, "nvgre-with-vlan") == 0) + nvgre_encap_conf.select_vlan = 1; + if (strcmp(res->ip_version, "ipv4") == 0) + nvgre_encap_conf.select_ipv4 = 1; + else if (strcmp(res->ip_version, "ipv6") == 0) + nvgre_encap_conf.select_ipv4 = 0; + else + return; + rte_memcpy(nvgre_encap_conf.tni, &id.tni[1], 3); + if (nvgre_encap_conf.select_ipv4) { + IPV4_ADDR_TO_UINT(res->ip_src, nvgre_encap_conf.ipv4_src); + IPV4_ADDR_TO_UINT(res->ip_dst, nvgre_encap_conf.ipv4_dst); + } else { + IPV6_ADDR_TO_ARRAY(res->ip_src, nvgre_encap_conf.ipv6_src); + IPV6_ADDR_TO_ARRAY(res->ip_dst, nvgre_encap_conf.ipv6_dst); + } + if (nvgre_encap_conf.select_vlan) + nvgre_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci); + rte_memcpy(nvgre_encap_conf.eth_src, res->eth_src.addr_bytes, + ETHER_ADDR_LEN); + rte_memcpy(nvgre_encap_conf.eth_dst, res->eth_dst.addr_bytes, + ETHER_ADDR_LEN); +} + +cmdline_parse_inst_t cmd_set_nvgre = { + .f = cmd_set_nvgre_parsed, + .data = NULL, + .help_str = "set nvgre ip-version tni ip-src" + " ip-dst eth-src " + " eth-dst ", + .tokens = { + (void *)&cmd_set_nvgre_set, + (void *)&cmd_set_nvgre_nvgre, + (void *)&cmd_set_nvgre_ip_version, + (void *)&cmd_set_nvgre_ip_version_value, + (void *)&cmd_set_nvgre_tni, + (void *)&cmd_set_nvgre_tni_value, + (void *)&cmd_set_nvgre_ip_src, + (void *)&cmd_set_nvgre_ip_src_value, + (void *)&cmd_set_nvgre_ip_dst, + (void *)&cmd_set_nvgre_ip_dst_value, + (void *)&cmd_set_nvgre_eth_src, + (void *)&cmd_set_nvgre_eth_src_value, + (void *)&cmd_set_nvgre_eth_dst, + (void *)&cmd_set_nvgre_eth_dst_value, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_set_nvgre_with_vlan = { + .f = cmd_set_nvgre_parsed, + .data = NULL, + .help_str = "set nvgre-with-vlan ip-version tni " + " ip-src ip-dst vlan-tci " + " eth-src eth-dst ", + .tokens = { + (void *)&cmd_set_nvgre_set, + (void *)&cmd_set_nvgre_nvgre_with_vlan, + (void *)&cmd_set_nvgre_ip_version, + (void *)&cmd_set_nvgre_ip_version_value, + (void *)&cmd_set_nvgre_tni, + (void *)&cmd_set_nvgre_tni_value, + (void *)&cmd_set_nvgre_ip_src, + (void *)&cmd_set_nvgre_ip_src_value, + (void *)&cmd_set_nvgre_ip_dst, + (void *)&cmd_set_nvgre_ip_dst_value, + (void *)&cmd_set_nvgre_vlan, + (void *)&cmd_set_nvgre_vlan_value, + (void *)&cmd_set_nvgre_eth_src, + (void *)&cmd_set_nvgre_eth_src_value, + (void *)&cmd_set_nvgre_eth_dst, + (void *)&cmd_set_nvgre_eth_dst_value, + NULL, + }, +}; + +/* Strict link priority scheduling mode setting */ +static void +cmd_strict_link_prio_parsed( + void *parsed_result, __attribute__((unused)) struct cmdline *cl, __attribute__((unused)) void *data) { @@ -14467,11 +15265,6 @@ cmd_ddp_add_parsed( int file_num; int ret = -ENOTSUP; - if (res->port_id > nb_ports) { - printf("Invalid port, range is [0, %d]\n", nb_ports - 1); - return; - } - if (!all_ports_stopped()) { printf("Please stop all ports first\n"); return; @@ -14549,11 +15342,6 @@ cmd_ddp_del_parsed( uint32_t size; int ret = -ENOTSUP; - if (res->port_id > nb_ports) { - printf("Invalid port, range is [0, %d]\n", nb_ports - 1); - return; - } - if (!all_ports_stopped()) { printf("Please stop all ports first\n"); return; @@ -14850,12 +15638,12 @@ cmdline_parse_token_num_t cmd_ddp_get_list_port_id = static void cmd_ddp_get_list_parsed( - void *parsed_result, + __attribute__((unused)) void *parsed_result, __attribute__((unused)) struct cmdline *cl, __attribute__((unused)) void *data) { - struct cmd_ddp_get_list_result *res = parsed_result; #ifdef RTE_LIBRTE_I40E_PMD + struct cmd_ddp_get_list_result *res = parsed_result; struct rte_pmd_i40e_profile_list *p_list; struct rte_pmd_i40e_profile_info *p_info; uint32_t p_num; @@ -14864,11 +15652,6 @@ cmd_ddp_get_list_parsed( #endif int ret = -ENOTSUP; - if (res->port_id > nb_ports) { - printf("Invalid port, range is [0, %d]\n", nb_ports - 1); - return; - } - #ifdef RTE_LIBRTE_I40E_PMD size = PROFILE_INFO_SIZE * MAX_PROFILE_NUM + 4; p_list = (struct rte_pmd_i40e_profile_list *)malloc(size); @@ -14931,22 +15714,17 @@ struct cmd_cfg_input_set_result { static void cmd_cfg_input_set_parsed( - void *parsed_result, + __attribute__((unused)) void *parsed_result, __attribute__((unused)) struct cmdline *cl, __attribute__((unused)) void *data) { - struct cmd_cfg_input_set_result *res = parsed_result; #ifdef RTE_LIBRTE_I40E_PMD + struct cmd_cfg_input_set_result *res = parsed_result; enum rte_pmd_i40e_inset_type inset_type = INSET_NONE; struct rte_pmd_i40e_inset inset; #endif int ret = -ENOTSUP; - if (res->port_id > nb_ports) { - printf("Invalid port, range is [0, %d]\n", nb_ports - 1); - return; - } - if (!all_ports_stopped()) { printf("Please stop all ports first\n"); return; @@ -15059,22 +15837,17 @@ struct cmd_clear_input_set_result { static void cmd_clear_input_set_parsed( - void *parsed_result, + __attribute__((unused)) void *parsed_result, __attribute__((unused)) struct cmdline *cl, __attribute__((unused)) void *data) { - struct cmd_clear_input_set_result *res = parsed_result; #ifdef RTE_LIBRTE_I40E_PMD + struct cmd_clear_input_set_result *res = parsed_result; enum rte_pmd_i40e_inset_type inset_type = INSET_NONE; struct rte_pmd_i40e_inset inset; #endif int ret = -ENOTSUP; - if (res->port_id > nb_ports) { - printf("Invalid port, range is [0, %d]\n", nb_ports - 1); - return; - } - if (!all_ports_stopped()) { printf("Please stop all ports first\n"); return; @@ -16030,6 +16803,805 @@ cmdline_parse_inst_t cmd_load_from_file = { }, }; +/* Get Rx offloads capabilities */ +struct cmd_rx_offload_get_capa_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t rx_offload; + cmdline_fixed_string_t capabilities; +}; + +cmdline_parse_token_string_t cmd_rx_offload_get_capa_show = + TOKEN_STRING_INITIALIZER + (struct cmd_rx_offload_get_capa_result, + show, "show"); +cmdline_parse_token_string_t cmd_rx_offload_get_capa_port = + TOKEN_STRING_INITIALIZER + (struct cmd_rx_offload_get_capa_result, + port, "port"); +cmdline_parse_token_num_t cmd_rx_offload_get_capa_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_rx_offload_get_capa_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_rx_offload_get_capa_rx_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_rx_offload_get_capa_result, + rx_offload, "rx_offload"); +cmdline_parse_token_string_t cmd_rx_offload_get_capa_capabilities = + TOKEN_STRING_INITIALIZER + (struct cmd_rx_offload_get_capa_result, + capabilities, "capabilities"); + +static void +print_rx_offloads(uint64_t offloads) +{ + uint64_t single_offload; + int begin; + int end; + int bit; + + if (offloads == 0) + return; + + begin = __builtin_ctzll(offloads); + end = sizeof(offloads) * CHAR_BIT - __builtin_clzll(offloads); + + single_offload = 1 << begin; + for (bit = begin; bit < end; bit++) { + if (offloads & single_offload) + printf(" %s", + rte_eth_dev_rx_offload_name(single_offload)); + single_offload <<= 1; + } +} + +static void +cmd_rx_offload_get_capa_parsed( + void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_rx_offload_get_capa_result *res = parsed_result; + struct rte_eth_dev_info dev_info; + portid_t port_id = res->port_id; + uint64_t queue_offloads; + uint64_t port_offloads; + + rte_eth_dev_info_get(port_id, &dev_info); + queue_offloads = dev_info.rx_queue_offload_capa; + port_offloads = dev_info.rx_offload_capa ^ queue_offloads; + + printf("Rx Offloading Capabilities of port %d :\n", port_id); + printf(" Per Queue :"); + print_rx_offloads(queue_offloads); + + printf("\n"); + printf(" Per Port :"); + print_rx_offloads(port_offloads); + printf("\n\n"); +} + +cmdline_parse_inst_t cmd_rx_offload_get_capa = { + .f = cmd_rx_offload_get_capa_parsed, + .data = NULL, + .help_str = "show port rx_offload capabilities", + .tokens = { + (void *)&cmd_rx_offload_get_capa_show, + (void *)&cmd_rx_offload_get_capa_port, + (void *)&cmd_rx_offload_get_capa_port_id, + (void *)&cmd_rx_offload_get_capa_rx_offload, + (void *)&cmd_rx_offload_get_capa_capabilities, + NULL, + } +}; + +/* Get Rx offloads configuration */ +struct cmd_rx_offload_get_configuration_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t rx_offload; + cmdline_fixed_string_t configuration; +}; + +cmdline_parse_token_string_t cmd_rx_offload_get_configuration_show = + TOKEN_STRING_INITIALIZER + (struct cmd_rx_offload_get_configuration_result, + show, "show"); +cmdline_parse_token_string_t cmd_rx_offload_get_configuration_port = + TOKEN_STRING_INITIALIZER + (struct cmd_rx_offload_get_configuration_result, + port, "port"); +cmdline_parse_token_num_t cmd_rx_offload_get_configuration_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_rx_offload_get_configuration_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_rx_offload_get_configuration_rx_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_rx_offload_get_configuration_result, + rx_offload, "rx_offload"); +cmdline_parse_token_string_t cmd_rx_offload_get_configuration_configuration = + TOKEN_STRING_INITIALIZER + (struct cmd_rx_offload_get_configuration_result, + configuration, "configuration"); + +static void +cmd_rx_offload_get_configuration_parsed( + void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_rx_offload_get_configuration_result *res = parsed_result; + struct rte_eth_dev_info dev_info; + portid_t port_id = res->port_id; + struct rte_port *port = &ports[port_id]; + uint64_t port_offloads; + uint64_t queue_offloads; + uint16_t nb_rx_queues; + int q; + + printf("Rx Offloading Configuration of port %d :\n", port_id); + + port_offloads = port->dev_conf.rxmode.offloads; + printf(" Port :"); + print_rx_offloads(port_offloads); + printf("\n"); + + rte_eth_dev_info_get(port_id, &dev_info); + nb_rx_queues = dev_info.nb_rx_queues; + for (q = 0; q < nb_rx_queues; q++) { + queue_offloads = port->rx_conf[q].offloads; + printf(" Queue[%2d] :", q); + print_rx_offloads(queue_offloads); + printf("\n"); + } + printf("\n"); +} + +cmdline_parse_inst_t cmd_rx_offload_get_configuration = { + .f = cmd_rx_offload_get_configuration_parsed, + .data = NULL, + .help_str = "show port rx_offload configuration", + .tokens = { + (void *)&cmd_rx_offload_get_configuration_show, + (void *)&cmd_rx_offload_get_configuration_port, + (void *)&cmd_rx_offload_get_configuration_port_id, + (void *)&cmd_rx_offload_get_configuration_rx_offload, + (void *)&cmd_rx_offload_get_configuration_configuration, + NULL, + } +}; + +/* Enable/Disable a per port offloading */ +struct cmd_config_per_port_rx_offload_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t config; + portid_t port_id; + cmdline_fixed_string_t rx_offload; + cmdline_fixed_string_t offload; + cmdline_fixed_string_t on_off; +}; + +cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_port = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_rx_offload_result, + port, "port"); +cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_config = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_rx_offload_result, + config, "config"); +cmdline_parse_token_num_t cmd_config_per_port_rx_offload_result_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_config_per_port_rx_offload_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_rx_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_rx_offload_result, + rx_offload, "rx_offload"); +cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_rx_offload_result, + offload, "vlan_strip#ipv4_cksum#udp_cksum#tcp_cksum#tcp_lro#" + "qinq_strip#outer_ipv4_cksum#macsec_strip#" + "header_split#vlan_filter#vlan_extend#jumbo_frame#" + "crc_strip#scatter#timestamp#security#keep_crc"); +cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_rx_offload_result, + on_off, "on#off"); + +static uint64_t +search_rx_offload(const char *name) +{ + uint64_t single_offload; + const char *single_name; + int found = 0; + unsigned int bit; + + single_offload = 1; + for (bit = 0; bit < sizeof(single_offload) * CHAR_BIT; bit++) { + single_name = rte_eth_dev_rx_offload_name(single_offload); + if (!strcasecmp(single_name, name)) { + found = 1; + break; + } else if (!strcasecmp(single_name, "UNKNOWN")) + break; + else if (single_name == NULL) + break; + single_offload <<= 1; + } + + if (found) + return single_offload; + + return 0; +} + +static void +cmd_config_per_port_rx_offload_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_config_per_port_rx_offload_result *res = parsed_result; + portid_t port_id = res->port_id; + struct rte_eth_dev_info dev_info; + struct rte_port *port = &ports[port_id]; + uint64_t single_offload; + uint16_t nb_rx_queues; + int q; + + if (port->port_status != RTE_PORT_STOPPED) { + printf("Error: Can't config offload when Port %d " + "is not stopped\n", port_id); + return; + } + + single_offload = search_rx_offload(res->offload); + if (single_offload == 0) { + printf("Unknown offload name: %s\n", res->offload); + return; + } + + rte_eth_dev_info_get(port_id, &dev_info); + nb_rx_queues = dev_info.nb_rx_queues; + if (!strcmp(res->on_off, "on")) { + port->dev_conf.rxmode.offloads |= single_offload; + for (q = 0; q < nb_rx_queues; q++) + port->rx_conf[q].offloads |= single_offload; + } else { + port->dev_conf.rxmode.offloads &= ~single_offload; + for (q = 0; q < nb_rx_queues; q++) + port->rx_conf[q].offloads &= ~single_offload; + } + + cmd_reconfig_device_queue(port_id, 1, 1); +} + +cmdline_parse_inst_t cmd_config_per_port_rx_offload = { + .f = cmd_config_per_port_rx_offload_parsed, + .data = NULL, + .help_str = "port config rx_offload vlan_strip|ipv4_cksum|" + "udp_cksum|tcp_cksum|tcp_lro|qinq_strip|outer_ipv4_cksum|" + "macsec_strip|header_split|vlan_filter|vlan_extend|" + "jumbo_frame|crc_strip|scatter|timestamp|security|keep_crc " + "on|off", + .tokens = { + (void *)&cmd_config_per_port_rx_offload_result_port, + (void *)&cmd_config_per_port_rx_offload_result_config, + (void *)&cmd_config_per_port_rx_offload_result_port_id, + (void *)&cmd_config_per_port_rx_offload_result_rx_offload, + (void *)&cmd_config_per_port_rx_offload_result_offload, + (void *)&cmd_config_per_port_rx_offload_result_on_off, + NULL, + } +}; + +/* Enable/Disable a per queue offloading */ +struct cmd_config_per_queue_rx_offload_result { + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t rxq; + uint16_t queue_id; + cmdline_fixed_string_t rx_offload; + cmdline_fixed_string_t offload; + cmdline_fixed_string_t on_off; +}; + +cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_port = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_rx_offload_result, + port, "port"); +cmdline_parse_token_num_t cmd_config_per_queue_rx_offload_result_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_config_per_queue_rx_offload_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_rxq = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_rx_offload_result, + rxq, "rxq"); +cmdline_parse_token_num_t cmd_config_per_queue_rx_offload_result_queue_id = + TOKEN_NUM_INITIALIZER + (struct cmd_config_per_queue_rx_offload_result, + queue_id, UINT16); +cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_rxoffload = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_rx_offload_result, + rx_offload, "rx_offload"); +cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_rx_offload_result, + offload, "vlan_strip#ipv4_cksum#udp_cksum#tcp_cksum#tcp_lro#" + "qinq_strip#outer_ipv4_cksum#macsec_strip#" + "header_split#vlan_filter#vlan_extend#jumbo_frame#" + "crc_strip#scatter#timestamp#security#keep_crc"); +cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_rx_offload_result, + on_off, "on#off"); + +static void +cmd_config_per_queue_rx_offload_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_config_per_queue_rx_offload_result *res = parsed_result; + struct rte_eth_dev_info dev_info; + portid_t port_id = res->port_id; + uint16_t queue_id = res->queue_id; + struct rte_port *port = &ports[port_id]; + uint64_t single_offload; + + if (port->port_status != RTE_PORT_STOPPED) { + printf("Error: Can't config offload when Port %d " + "is not stopped\n", port_id); + return; + } + + rte_eth_dev_info_get(port_id, &dev_info); + if (queue_id >= dev_info.nb_rx_queues) { + printf("Error: input queue_id should be 0 ... " + "%d\n", dev_info.nb_rx_queues - 1); + return; + } + + single_offload = search_rx_offload(res->offload); + if (single_offload == 0) { + printf("Unknown offload name: %s\n", res->offload); + return; + } + + if (!strcmp(res->on_off, "on")) + port->rx_conf[queue_id].offloads |= single_offload; + else + port->rx_conf[queue_id].offloads &= ~single_offload; + + cmd_reconfig_device_queue(port_id, 1, 1); +} + +cmdline_parse_inst_t cmd_config_per_queue_rx_offload = { + .f = cmd_config_per_queue_rx_offload_parsed, + .data = NULL, + .help_str = "port rxq rx_offload " + "vlan_strip|ipv4_cksum|" + "udp_cksum|tcp_cksum|tcp_lro|qinq_strip|outer_ipv4_cksum|" + "macsec_strip|header_split|vlan_filter|vlan_extend|" + "jumbo_frame|crc_strip|scatter|timestamp|security|keep_crc " + "on|off", + .tokens = { + (void *)&cmd_config_per_queue_rx_offload_result_port, + (void *)&cmd_config_per_queue_rx_offload_result_port_id, + (void *)&cmd_config_per_queue_rx_offload_result_rxq, + (void *)&cmd_config_per_queue_rx_offload_result_queue_id, + (void *)&cmd_config_per_queue_rx_offload_result_rxoffload, + (void *)&cmd_config_per_queue_rx_offload_result_offload, + (void *)&cmd_config_per_queue_rx_offload_result_on_off, + NULL, + } +}; + +/* Get Tx offloads capabilities */ +struct cmd_tx_offload_get_capa_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t tx_offload; + cmdline_fixed_string_t capabilities; +}; + +cmdline_parse_token_string_t cmd_tx_offload_get_capa_show = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_offload_get_capa_result, + show, "show"); +cmdline_parse_token_string_t cmd_tx_offload_get_capa_port = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_offload_get_capa_result, + port, "port"); +cmdline_parse_token_num_t cmd_tx_offload_get_capa_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_tx_offload_get_capa_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_tx_offload_get_capa_tx_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_offload_get_capa_result, + tx_offload, "tx_offload"); +cmdline_parse_token_string_t cmd_tx_offload_get_capa_capabilities = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_offload_get_capa_result, + capabilities, "capabilities"); + +static void +print_tx_offloads(uint64_t offloads) +{ + uint64_t single_offload; + int begin; + int end; + int bit; + + if (offloads == 0) + return; + + begin = __builtin_ctzll(offloads); + end = sizeof(offloads) * CHAR_BIT - __builtin_clzll(offloads); + + single_offload = 1 << begin; + for (bit = begin; bit < end; bit++) { + if (offloads & single_offload) + printf(" %s", + rte_eth_dev_tx_offload_name(single_offload)); + single_offload <<= 1; + } +} + +static void +cmd_tx_offload_get_capa_parsed( + void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_tx_offload_get_capa_result *res = parsed_result; + struct rte_eth_dev_info dev_info; + portid_t port_id = res->port_id; + uint64_t queue_offloads; + uint64_t port_offloads; + + rte_eth_dev_info_get(port_id, &dev_info); + queue_offloads = dev_info.tx_queue_offload_capa; + port_offloads = dev_info.tx_offload_capa ^ queue_offloads; + + printf("Tx Offloading Capabilities of port %d :\n", port_id); + printf(" Per Queue :"); + print_tx_offloads(queue_offloads); + + printf("\n"); + printf(" Per Port :"); + print_tx_offloads(port_offloads); + printf("\n\n"); +} + +cmdline_parse_inst_t cmd_tx_offload_get_capa = { + .f = cmd_tx_offload_get_capa_parsed, + .data = NULL, + .help_str = "show port tx_offload capabilities", + .tokens = { + (void *)&cmd_tx_offload_get_capa_show, + (void *)&cmd_tx_offload_get_capa_port, + (void *)&cmd_tx_offload_get_capa_port_id, + (void *)&cmd_tx_offload_get_capa_tx_offload, + (void *)&cmd_tx_offload_get_capa_capabilities, + NULL, + } +}; + +/* Get Tx offloads configuration */ +struct cmd_tx_offload_get_configuration_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t tx_offload; + cmdline_fixed_string_t configuration; +}; + +cmdline_parse_token_string_t cmd_tx_offload_get_configuration_show = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_offload_get_configuration_result, + show, "show"); +cmdline_parse_token_string_t cmd_tx_offload_get_configuration_port = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_offload_get_configuration_result, + port, "port"); +cmdline_parse_token_num_t cmd_tx_offload_get_configuration_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_tx_offload_get_configuration_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_tx_offload_get_configuration_tx_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_offload_get_configuration_result, + tx_offload, "tx_offload"); +cmdline_parse_token_string_t cmd_tx_offload_get_configuration_configuration = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_offload_get_configuration_result, + configuration, "configuration"); + +static void +cmd_tx_offload_get_configuration_parsed( + void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_tx_offload_get_configuration_result *res = parsed_result; + struct rte_eth_dev_info dev_info; + portid_t port_id = res->port_id; + struct rte_port *port = &ports[port_id]; + uint64_t port_offloads; + uint64_t queue_offloads; + uint16_t nb_tx_queues; + int q; + + printf("Tx Offloading Configuration of port %d :\n", port_id); + + port_offloads = port->dev_conf.txmode.offloads; + printf(" Port :"); + print_tx_offloads(port_offloads); + printf("\n"); + + rte_eth_dev_info_get(port_id, &dev_info); + nb_tx_queues = dev_info.nb_tx_queues; + for (q = 0; q < nb_tx_queues; q++) { + queue_offloads = port->tx_conf[q].offloads; + printf(" Queue[%2d] :", q); + print_tx_offloads(queue_offloads); + printf("\n"); + } + printf("\n"); +} + +cmdline_parse_inst_t cmd_tx_offload_get_configuration = { + .f = cmd_tx_offload_get_configuration_parsed, + .data = NULL, + .help_str = "show port tx_offload configuration", + .tokens = { + (void *)&cmd_tx_offload_get_configuration_show, + (void *)&cmd_tx_offload_get_configuration_port, + (void *)&cmd_tx_offload_get_configuration_port_id, + (void *)&cmd_tx_offload_get_configuration_tx_offload, + (void *)&cmd_tx_offload_get_configuration_configuration, + NULL, + } +}; + +/* Enable/Disable a per port offloading */ +struct cmd_config_per_port_tx_offload_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t config; + portid_t port_id; + cmdline_fixed_string_t tx_offload; + cmdline_fixed_string_t offload; + cmdline_fixed_string_t on_off; +}; + +cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_port = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_tx_offload_result, + port, "port"); +cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_config = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_tx_offload_result, + config, "config"); +cmdline_parse_token_num_t cmd_config_per_port_tx_offload_result_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_config_per_port_tx_offload_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_tx_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_tx_offload_result, + tx_offload, "tx_offload"); +cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_tx_offload_result, + offload, "vlan_insert#ipv4_cksum#udp_cksum#tcp_cksum#" + "sctp_cksum#tcp_tso#udp_tso#outer_ipv4_cksum#" + "qinq_insert#vxlan_tnl_tso#gre_tnl_tso#" + "ipip_tnl_tso#geneve_tnl_tso#macsec_insert#" + "mt_lockfree#multi_segs#mbuf_fast_free#security"); +cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_tx_offload_result, + on_off, "on#off"); + +static uint64_t +search_tx_offload(const char *name) +{ + uint64_t single_offload; + const char *single_name; + int found = 0; + unsigned int bit; + + single_offload = 1; + for (bit = 0; bit < sizeof(single_offload) * CHAR_BIT; bit++) { + single_name = rte_eth_dev_tx_offload_name(single_offload); + if (!strcasecmp(single_name, name)) { + found = 1; + break; + } else if (!strcasecmp(single_name, "UNKNOWN")) + break; + else if (single_name == NULL) + break; + single_offload <<= 1; + } + + if (found) + return single_offload; + + return 0; +} + +static void +cmd_config_per_port_tx_offload_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_config_per_port_tx_offload_result *res = parsed_result; + portid_t port_id = res->port_id; + struct rte_eth_dev_info dev_info; + struct rte_port *port = &ports[port_id]; + uint64_t single_offload; + uint16_t nb_tx_queues; + int q; + + if (port->port_status != RTE_PORT_STOPPED) { + printf("Error: Can't config offload when Port %d " + "is not stopped\n", port_id); + return; + } + + single_offload = search_tx_offload(res->offload); + if (single_offload == 0) { + printf("Unknown offload name: %s\n", res->offload); + return; + } + + rte_eth_dev_info_get(port_id, &dev_info); + nb_tx_queues = dev_info.nb_tx_queues; + if (!strcmp(res->on_off, "on")) { + port->dev_conf.txmode.offloads |= single_offload; + for (q = 0; q < nb_tx_queues; q++) + port->tx_conf[q].offloads |= single_offload; + } else { + port->dev_conf.txmode.offloads &= ~single_offload; + for (q = 0; q < nb_tx_queues; q++) + port->tx_conf[q].offloads &= ~single_offload; + } + + cmd_reconfig_device_queue(port_id, 1, 1); +} + +cmdline_parse_inst_t cmd_config_per_port_tx_offload = { + .f = cmd_config_per_port_tx_offload_parsed, + .data = NULL, + .help_str = "port config tx_offload " + "vlan_insert|ipv4_cksum|udp_cksum|tcp_cksum|" + "sctp_cksum|tcp_tso|udp_tso|outer_ipv4_cksum|" + "qinq_insert|vxlan_tnl_tso|gre_tnl_tso|" + "ipip_tnl_tso|geneve_tnl_tso|macsec_insert|" + "mt_lockfree|multi_segs|mbuf_fast_free|security " + "on|off", + .tokens = { + (void *)&cmd_config_per_port_tx_offload_result_port, + (void *)&cmd_config_per_port_tx_offload_result_config, + (void *)&cmd_config_per_port_tx_offload_result_port_id, + (void *)&cmd_config_per_port_tx_offload_result_tx_offload, + (void *)&cmd_config_per_port_tx_offload_result_offload, + (void *)&cmd_config_per_port_tx_offload_result_on_off, + NULL, + } +}; + +/* Enable/Disable a per queue offloading */ +struct cmd_config_per_queue_tx_offload_result { + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t txq; + uint16_t queue_id; + cmdline_fixed_string_t tx_offload; + cmdline_fixed_string_t offload; + cmdline_fixed_string_t on_off; +}; + +cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_port = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_tx_offload_result, + port, "port"); +cmdline_parse_token_num_t cmd_config_per_queue_tx_offload_result_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_config_per_queue_tx_offload_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_txq = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_tx_offload_result, + txq, "txq"); +cmdline_parse_token_num_t cmd_config_per_queue_tx_offload_result_queue_id = + TOKEN_NUM_INITIALIZER + (struct cmd_config_per_queue_tx_offload_result, + queue_id, UINT16); +cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_txoffload = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_tx_offload_result, + tx_offload, "tx_offload"); +cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_tx_offload_result, + offload, "vlan_insert#ipv4_cksum#udp_cksum#tcp_cksum#" + "sctp_cksum#tcp_tso#udp_tso#outer_ipv4_cksum#" + "qinq_insert#vxlan_tnl_tso#gre_tnl_tso#" + "ipip_tnl_tso#geneve_tnl_tso#macsec_insert#" + "mt_lockfree#multi_segs#mbuf_fast_free#security"); +cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_tx_offload_result, + on_off, "on#off"); + +static void +cmd_config_per_queue_tx_offload_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_config_per_queue_tx_offload_result *res = parsed_result; + struct rte_eth_dev_info dev_info; + portid_t port_id = res->port_id; + uint16_t queue_id = res->queue_id; + struct rte_port *port = &ports[port_id]; + uint64_t single_offload; + + if (port->port_status != RTE_PORT_STOPPED) { + printf("Error: Can't config offload when Port %d " + "is not stopped\n", port_id); + return; + } + + rte_eth_dev_info_get(port_id, &dev_info); + if (queue_id >= dev_info.nb_tx_queues) { + printf("Error: input queue_id should be 0 ... " + "%d\n", dev_info.nb_tx_queues - 1); + return; + } + + single_offload = search_tx_offload(res->offload); + if (single_offload == 0) { + printf("Unknown offload name: %s\n", res->offload); + return; + } + + if (!strcmp(res->on_off, "on")) + port->tx_conf[queue_id].offloads |= single_offload; + else + port->tx_conf[queue_id].offloads &= ~single_offload; + + cmd_reconfig_device_queue(port_id, 1, 1); +} + +cmdline_parse_inst_t cmd_config_per_queue_tx_offload = { + .f = cmd_config_per_queue_tx_offload_parsed, + .data = NULL, + .help_str = "port txq tx_offload " + "vlan_insert|ipv4_cksum|udp_cksum|tcp_cksum|" + "sctp_cksum|tcp_tso|udp_tso|outer_ipv4_cksum|" + "qinq_insert|vxlan_tnl_tso|gre_tnl_tso|" + "ipip_tnl_tso|geneve_tnl_tso|macsec_insert|" + "mt_lockfree|multi_segs|mbuf_fast_free|security " + "on|off", + .tokens = { + (void *)&cmd_config_per_queue_tx_offload_result_port, + (void *)&cmd_config_per_queue_tx_offload_result_port_id, + (void *)&cmd_config_per_queue_tx_offload_result_txq, + (void *)&cmd_config_per_queue_tx_offload_result_queue_id, + (void *)&cmd_config_per_queue_tx_offload_result_txoffload, + (void *)&cmd_config_per_queue_tx_offload_result_offload, + (void *)&cmd_config_per_queue_tx_offload_result_on_off, + NULL, + } +}; + /* ******************************************************************************** */ /* list of instructions */ @@ -16130,12 +17702,16 @@ cmdline_parse_ctx_t main_ctx[] = { (cmdline_parse_inst_t *)&cmd_operate_detach_port, (cmdline_parse_inst_t *)&cmd_config_speed_all, (cmdline_parse_inst_t *)&cmd_config_speed_specific, + (cmdline_parse_inst_t *)&cmd_config_loopback_all, + (cmdline_parse_inst_t *)&cmd_config_loopback_specific, (cmdline_parse_inst_t *)&cmd_config_rx_tx, (cmdline_parse_inst_t *)&cmd_config_mtu, (cmdline_parse_inst_t *)&cmd_config_max_pkt_len, (cmdline_parse_inst_t *)&cmd_config_rx_mode_flag, (cmdline_parse_inst_t *)&cmd_config_rss, + (cmdline_parse_inst_t *)&cmd_config_rxtx_ring_size, (cmdline_parse_inst_t *)&cmd_config_rxtx_queue, + (cmdline_parse_inst_t *)&cmd_setup_rxtx_queue, (cmdline_parse_inst_t *)&cmd_config_rss_reta, (cmdline_parse_inst_t *)&cmd_showport_reta, (cmdline_parse_inst_t *)&cmd_config_burst, @@ -16234,6 +17810,10 @@ cmdline_parse_ctx_t main_ctx[] = { #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED (cmdline_parse_inst_t *)&cmd_set_port_tm_hierarchy_default, #endif + (cmdline_parse_inst_t *)&cmd_set_vxlan, + (cmdline_parse_inst_t *)&cmd_set_vxlan_with_vlan, + (cmdline_parse_inst_t *)&cmd_set_nvgre, + (cmdline_parse_inst_t *)&cmd_set_nvgre_with_vlan, (cmdline_parse_inst_t *)&cmd_ddp_add, (cmdline_parse_inst_t *)&cmd_ddp_del, (cmdline_parse_inst_t *)&cmd_ddp_get_list, @@ -16271,7 +17851,22 @@ cmdline_parse_ctx_t main_ctx[] = { (cmdline_parse_inst_t *)&cmd_add_port_tm_leaf_node, (cmdline_parse_inst_t *)&cmd_del_port_tm_node, (cmdline_parse_inst_t *)&cmd_set_port_tm_node_parent, + (cmdline_parse_inst_t *)&cmd_suspend_port_tm_node, + (cmdline_parse_inst_t *)&cmd_resume_port_tm_node, (cmdline_parse_inst_t *)&cmd_port_tm_hierarchy_commit, + (cmdline_parse_inst_t *)&cmd_cfg_tunnel_udp_port, + (cmdline_parse_inst_t *)&cmd_rx_offload_get_capa, + (cmdline_parse_inst_t *)&cmd_rx_offload_get_configuration, + (cmdline_parse_inst_t *)&cmd_config_per_port_rx_offload, + (cmdline_parse_inst_t *)&cmd_config_per_queue_rx_offload, + (cmdline_parse_inst_t *)&cmd_tx_offload_get_capa, + (cmdline_parse_inst_t *)&cmd_tx_offload_get_configuration, + (cmdline_parse_inst_t *)&cmd_config_per_port_tx_offload, + (cmdline_parse_inst_t *)&cmd_config_per_queue_tx_offload, +#ifdef RTE_LIBRTE_BPF + (cmdline_parse_inst_t *)&cmd_operate_bpf_ld_parse, + (cmdline_parse_inst_t *)&cmd_operate_bpf_unld_parse, +#endif NULL, }; diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c index a5cf84f7..f9260600 100644 --- a/app/test-pmd/cmdline_flow.c +++ b/app/test-pmd/cmdline_flow.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2016 6WIND S.A. - * Copyright 2016 Mellanox. + * Copyright 2016 Mellanox Technologies, Ltd */ #include @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -68,6 +69,7 @@ enum index { PRIORITY, INGRESS, EGRESS, + TRANSFER, /* Validate/create pattern. */ PATTERN, @@ -85,8 +87,12 @@ enum index { ITEM_PF, ITEM_VF, ITEM_VF_ID, - ITEM_PORT, - ITEM_PORT_INDEX, + ITEM_PHY_PORT, + ITEM_PHY_PORT_INDEX, + ITEM_PORT_ID, + ITEM_PORT_ID_ID, + ITEM_MARK, + ITEM_MARK_ID, ITEM_RAW, ITEM_RAW_RELATIVE, ITEM_RAW_SEARCH, @@ -98,11 +104,11 @@ enum index { ITEM_ETH_SRC, ITEM_ETH_TYPE, ITEM_VLAN, - ITEM_VLAN_TPID, ITEM_VLAN_TCI, ITEM_VLAN_PCP, ITEM_VLAN_DEI, ITEM_VLAN_VID, + ITEM_VLAN_INNER_TYPE, ITEM_IPV4, ITEM_IPV4_TOS, ITEM_IPV4_TTL, @@ -150,6 +156,28 @@ enum index { ITEM_GENEVE, ITEM_GENEVE_VNI, ITEM_GENEVE_PROTO, + ITEM_VXLAN_GPE, + ITEM_VXLAN_GPE_VNI, + ITEM_ARP_ETH_IPV4, + ITEM_ARP_ETH_IPV4_SHA, + ITEM_ARP_ETH_IPV4_SPA, + ITEM_ARP_ETH_IPV4_THA, + ITEM_ARP_ETH_IPV4_TPA, + ITEM_IPV6_EXT, + ITEM_IPV6_EXT_NEXT_HDR, + ITEM_ICMP6, + ITEM_ICMP6_TYPE, + ITEM_ICMP6_CODE, + ITEM_ICMP6_ND_NS, + ITEM_ICMP6_ND_NS_TARGET_ADDR, + ITEM_ICMP6_ND_NA, + ITEM_ICMP6_ND_NA_TARGET_ADDR, + ITEM_ICMP6_ND_OPT, + ITEM_ICMP6_ND_OPT_TYPE, + ITEM_ICMP6_ND_OPT_SLA_ETH, + ITEM_ICMP6_ND_OPT_SLA_ETH_SLA, + ITEM_ICMP6_ND_OPT_TLA_ETH, + ITEM_ICMP6_ND_OPT_TLA_ETH_TLA, /* Validate/create actions. */ ACTIONS, @@ -157,6 +185,8 @@ enum index { ACTION_END, ACTION_VOID, ACTION_PASSTHRU, + ACTION_JUMP, + ACTION_JUMP_GROUP, ACTION_MARK, ACTION_MARK_ID, ACTION_FLAG, @@ -164,33 +194,106 @@ enum index { ACTION_QUEUE_INDEX, ACTION_DROP, ACTION_COUNT, - ACTION_DUP, - ACTION_DUP_INDEX, + ACTION_COUNT_SHARED, + ACTION_COUNT_ID, ACTION_RSS, + ACTION_RSS_FUNC, + ACTION_RSS_LEVEL, + ACTION_RSS_FUNC_DEFAULT, + ACTION_RSS_FUNC_TOEPLITZ, + ACTION_RSS_FUNC_SIMPLE_XOR, + ACTION_RSS_TYPES, + ACTION_RSS_TYPE, + ACTION_RSS_KEY, + ACTION_RSS_KEY_LEN, ACTION_RSS_QUEUES, ACTION_RSS_QUEUE, ACTION_PF, ACTION_VF, ACTION_VF_ORIGINAL, ACTION_VF_ID, + ACTION_PHY_PORT, + ACTION_PHY_PORT_ORIGINAL, + ACTION_PHY_PORT_INDEX, + ACTION_PORT_ID, + ACTION_PORT_ID_ORIGINAL, + ACTION_PORT_ID_ID, ACTION_METER, ACTION_METER_ID, + ACTION_OF_SET_MPLS_TTL, + ACTION_OF_SET_MPLS_TTL_MPLS_TTL, + ACTION_OF_DEC_MPLS_TTL, + ACTION_OF_SET_NW_TTL, + ACTION_OF_SET_NW_TTL_NW_TTL, + ACTION_OF_DEC_NW_TTL, + ACTION_OF_COPY_TTL_OUT, + ACTION_OF_COPY_TTL_IN, + ACTION_OF_POP_VLAN, + ACTION_OF_PUSH_VLAN, + ACTION_OF_PUSH_VLAN_ETHERTYPE, + ACTION_OF_SET_VLAN_VID, + ACTION_OF_SET_VLAN_VID_VLAN_VID, + ACTION_OF_SET_VLAN_PCP, + ACTION_OF_SET_VLAN_PCP_VLAN_PCP, + ACTION_OF_POP_MPLS, + ACTION_OF_POP_MPLS_ETHERTYPE, + ACTION_OF_PUSH_MPLS, + ACTION_OF_PUSH_MPLS_ETHERTYPE, + ACTION_VXLAN_ENCAP, + ACTION_VXLAN_DECAP, + ACTION_NVGRE_ENCAP, + ACTION_NVGRE_DECAP, }; -/** Size of pattern[] field in struct rte_flow_item_raw. */ -#define ITEM_RAW_PATTERN_SIZE 36 +/** Maximum size for pattern in struct rte_flow_item_raw. */ +#define ITEM_RAW_PATTERN_SIZE 40 /** Storage size for struct rte_flow_item_raw including pattern. */ #define ITEM_RAW_SIZE \ - (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE) + (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE) -/** Number of queue[] entries in struct rte_flow_action_rss. */ -#define ACTION_RSS_NUM 32 +/** Maximum number of queue indices in struct rte_flow_action_rss. */ +#define ACTION_RSS_QUEUE_NUM 32 -/** Storage size for struct rte_flow_action_rss including queues. */ -#define ACTION_RSS_SIZE \ - (offsetof(struct rte_flow_action_rss, queue) + \ - sizeof(*((struct rte_flow_action_rss *)0)->queue) * ACTION_RSS_NUM) +/** Storage for struct rte_flow_action_rss including external data. */ +struct action_rss_data { + struct rte_flow_action_rss conf; + uint8_t key[RSS_HASH_KEY_LENGTH]; + uint16_t queue[ACTION_RSS_QUEUE_NUM]; +}; + +/** Maximum number of items in struct rte_flow_action_vxlan_encap. */ +#define ACTION_VXLAN_ENCAP_ITEMS_NUM 6 + +/** Storage for struct rte_flow_action_vxlan_encap including external data. */ +struct action_vxlan_encap_data { + struct rte_flow_action_vxlan_encap conf; + struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM]; + struct rte_flow_item_eth item_eth; + struct rte_flow_item_vlan item_vlan; + union { + struct rte_flow_item_ipv4 item_ipv4; + struct rte_flow_item_ipv6 item_ipv6; + }; + struct rte_flow_item_udp item_udp; + struct rte_flow_item_vxlan item_vxlan; +}; + +/** Maximum number of items in struct rte_flow_action_nvgre_encap. */ +#define ACTION_NVGRE_ENCAP_ITEMS_NUM 5 + +/** Storage for struct rte_flow_action_nvgre_encap including external data. */ +struct action_nvgre_encap_data { + struct rte_flow_action_nvgre_encap conf; + struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM]; + struct rte_flow_item_eth item_eth; + struct rte_flow_item_vlan item_vlan; + union { + struct rte_flow_item_ipv4 item_ipv4; + struct rte_flow_item_ipv6 item_ipv6; + }; + struct rte_flow_item_nvgre item_nvgre; +}; /** Maximum number of subsequent tokens and arguments on the stack. */ #define CTX_STACK_SIZE 16 @@ -217,6 +320,9 @@ struct context { struct arg { uint32_t hton:1; /**< Use network byte ordering. */ uint32_t sign:1; /**< Value is signed. */ + uint32_t bounded:1; /**< Value is bounded. */ + uintmax_t min; /**< Minimum value if bounded. */ + uintmax_t max; /**< Maximum value if bounded. */ uint32_t offset; /**< Relative offset from ctx->object. */ uint32_t size; /**< Field size. */ const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */ @@ -309,11 +415,21 @@ struct token { .size = sizeof(*((s *)0)->f), \ }) -/** Static initializer for ARGS() with arbitrary size. */ -#define ARGS_ENTRY_USZ(s, f, sz) \ +/** Static initializer for ARGS() with arbitrary offset and size. */ +#define ARGS_ENTRY_ARB(o, s) \ (&(const struct arg){ \ - .offset = offsetof(s, f), \ - .size = (sz), \ + .offset = (o), \ + .size = (s), \ + }) + +/** Same as ARGS_ENTRY_ARB() with bounded values. */ +#define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \ + (&(const struct arg){ \ + .bounded = 1, \ + .min = (i), \ + .max = (a), \ + .offset = (o), \ + .size = (s), \ }) /** Same as ARGS_ENTRY() using network byte ordering. */ @@ -343,7 +459,7 @@ struct buffer { } destroy; /**< Destroy arguments. */ struct { uint32_t rule; - enum rte_flow_action_type action; + struct rte_flow_action action; } query; /**< Query arguments. */ struct { uint32_t *group; @@ -384,6 +500,7 @@ static const enum index next_vc_attr[] = { PRIORITY, INGRESS, EGRESS, + TRANSFER, PATTERN, ZERO, }; @@ -416,7 +533,9 @@ static const enum index next_item[] = { ITEM_ANY, ITEM_PF, ITEM_VF, - ITEM_PORT, + ITEM_PHY_PORT, + ITEM_PORT_ID, + ITEM_MARK, ITEM_RAW, ITEM_ETH, ITEM_VLAN, @@ -436,6 +555,15 @@ static const enum index next_item[] = { ITEM_GTPC, ITEM_GTPU, ITEM_GENEVE, + ITEM_VXLAN_GPE, + ITEM_ARP_ETH_IPV4, + ITEM_IPV6_EXT, + ITEM_ICMP6, + ITEM_ICMP6_ND_NS, + ITEM_ICMP6_ND_NA, + ITEM_ICMP6_ND_OPT, + ITEM_ICMP6_ND_OPT_SLA_ETH, + ITEM_ICMP6_ND_OPT_TLA_ETH, ZERO, }; @@ -457,8 +585,20 @@ static const enum index item_vf[] = { ZERO, }; -static const enum index item_port[] = { - ITEM_PORT_INDEX, +static const enum index item_phy_port[] = { + ITEM_PHY_PORT_INDEX, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_port_id[] = { + ITEM_PORT_ID_ID, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_mark[] = { + ITEM_MARK_ID, ITEM_NEXT, ZERO, }; @@ -482,11 +622,11 @@ static const enum index item_eth[] = { }; static const enum index item_vlan[] = { - ITEM_VLAN_TPID, ITEM_VLAN_TCI, ITEM_VLAN_PCP, ITEM_VLAN_DEI, ITEM_VLAN_VID, + ITEM_VLAN_INNER_TYPE, ITEM_NEXT, ZERO, }; @@ -586,20 +726,96 @@ static const enum index item_geneve[] = { ZERO, }; +static const enum index item_vxlan_gpe[] = { + ITEM_VXLAN_GPE_VNI, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_arp_eth_ipv4[] = { + ITEM_ARP_ETH_IPV4_SHA, + ITEM_ARP_ETH_IPV4_SPA, + ITEM_ARP_ETH_IPV4_THA, + ITEM_ARP_ETH_IPV4_TPA, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_ipv6_ext[] = { + ITEM_IPV6_EXT_NEXT_HDR, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp6[] = { + ITEM_ICMP6_TYPE, + ITEM_ICMP6_CODE, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp6_nd_ns[] = { + ITEM_ICMP6_ND_NS_TARGET_ADDR, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp6_nd_na[] = { + ITEM_ICMP6_ND_NA_TARGET_ADDR, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp6_nd_opt[] = { + ITEM_ICMP6_ND_OPT_TYPE, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp6_nd_opt_sla_eth[] = { + ITEM_ICMP6_ND_OPT_SLA_ETH_SLA, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp6_nd_opt_tla_eth[] = { + ITEM_ICMP6_ND_OPT_TLA_ETH_TLA, + ITEM_NEXT, + ZERO, +}; + static const enum index next_action[] = { ACTION_END, ACTION_VOID, ACTION_PASSTHRU, + ACTION_JUMP, ACTION_MARK, ACTION_FLAG, ACTION_QUEUE, ACTION_DROP, ACTION_COUNT, - ACTION_DUP, ACTION_RSS, ACTION_PF, ACTION_VF, + ACTION_PHY_PORT, + ACTION_PORT_ID, ACTION_METER, + ACTION_OF_SET_MPLS_TTL, + ACTION_OF_DEC_MPLS_TTL, + ACTION_OF_SET_NW_TTL, + ACTION_OF_DEC_NW_TTL, + ACTION_OF_COPY_TTL_OUT, + ACTION_OF_COPY_TTL_IN, + ACTION_OF_POP_VLAN, + ACTION_OF_PUSH_VLAN, + ACTION_OF_SET_VLAN_VID, + ACTION_OF_SET_VLAN_PCP, + ACTION_OF_POP_MPLS, + ACTION_OF_PUSH_MPLS, + ACTION_VXLAN_ENCAP, + ACTION_VXLAN_DECAP, + ACTION_NVGRE_ENCAP, + ACTION_NVGRE_DECAP, ZERO, }; @@ -615,13 +831,19 @@ static const enum index action_queue[] = { ZERO, }; -static const enum index action_dup[] = { - ACTION_DUP_INDEX, +static const enum index action_count[] = { + ACTION_COUNT_ID, + ACTION_COUNT_SHARED, ACTION_NEXT, ZERO, }; static const enum index action_rss[] = { + ACTION_RSS_FUNC, + ACTION_RSS_LEVEL, + ACTION_RSS_TYPES, + ACTION_RSS_KEY, + ACTION_RSS_KEY_LEN, ACTION_RSS_QUEUES, ACTION_NEXT, ZERO, @@ -634,12 +856,74 @@ static const enum index action_vf[] = { ZERO, }; +static const enum index action_phy_port[] = { + ACTION_PHY_PORT_ORIGINAL, + ACTION_PHY_PORT_INDEX, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_port_id[] = { + ACTION_PORT_ID_ORIGINAL, + ACTION_PORT_ID_ID, + ACTION_NEXT, + ZERO, +}; + static const enum index action_meter[] = { ACTION_METER_ID, ACTION_NEXT, ZERO, }; +static const enum index action_of_set_mpls_ttl[] = { + ACTION_OF_SET_MPLS_TTL_MPLS_TTL, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_set_nw_ttl[] = { + ACTION_OF_SET_NW_TTL_NW_TTL, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_push_vlan[] = { + ACTION_OF_PUSH_VLAN_ETHERTYPE, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_set_vlan_vid[] = { + ACTION_OF_SET_VLAN_VID_VLAN_VID, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_set_vlan_pcp[] = { + ACTION_OF_SET_VLAN_PCP_VLAN_PCP, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_pop_mpls[] = { + ACTION_OF_POP_MPLS_ETHERTYPE, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_push_mpls[] = { + ACTION_OF_PUSH_MPLS_ETHERTYPE, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_jump[] = { + ACTION_JUMP_GROUP, + ACTION_NEXT, + ZERO, +}; + static int parse_init(struct context *, const struct token *, const char *, unsigned int, void *, unsigned int); @@ -650,9 +934,24 @@ static int parse_vc_spec(struct context *, const struct token *, const char *, unsigned int, void *, unsigned int); static int parse_vc_conf(struct context *, const struct token *, const char *, unsigned int, void *, unsigned int); +static int parse_vc_action_rss(struct context *, const struct token *, + const char *, unsigned int, void *, + unsigned int); +static int parse_vc_action_rss_func(struct context *, const struct token *, + const char *, unsigned int, void *, + unsigned int); +static int parse_vc_action_rss_type(struct context *, const struct token *, + const char *, unsigned int, void *, + unsigned int); static int parse_vc_action_rss_queue(struct context *, const struct token *, const char *, unsigned int, void *, unsigned int); +static int parse_vc_action_vxlan_encap(struct context *, const struct token *, + const char *, unsigned int, void *, + unsigned int); +static int parse_vc_action_nvgre_encap(struct context *, const struct token *, + const char *, unsigned int, void *, + unsigned int); static int parse_destroy(struct context *, const struct token *, const char *, unsigned int, void *, unsigned int); @@ -705,6 +1004,8 @@ static int comp_port(struct context *, const struct token *, unsigned int, char *, unsigned int); static int comp_rule_id(struct context *, const struct token *, unsigned int, char *, unsigned int); +static int comp_vc_action_rss_type(struct context *, const struct token *, + unsigned int, char *, unsigned int); static int comp_vc_action_rss_queue(struct context *, const struct token *, unsigned int, char *, unsigned int); @@ -856,7 +1157,7 @@ static const struct token token_list[] = { .next = NEXT(NEXT_ENTRY(QUERY_ACTION), NEXT_ENTRY(RULE_ID), NEXT_ENTRY(PORT_ID)), - .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action), + .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type), ARGS_ENTRY(struct buffer, args.query.rule), ARGS_ENTRY(struct buffer, port)), .call = parse_query, @@ -928,6 +1229,12 @@ static const struct token token_list[] = { .next = NEXT(next_vc_attr), .call = parse_vc, }, + [TRANSFER] = { + .name = "transfer", + .help = "apply rule directly to endpoints found in pattern", + .next = NEXT(next_vc_attr), + .call = parse_vc, + }, /* Validate/create pattern. */ [PATTERN] = { .name = "pattern", @@ -1001,36 +1308,64 @@ static const struct token token_list[] = { }, [ITEM_PF] = { .name = "pf", - .help = "match packets addressed to the physical function", + .help = "match traffic from/to the physical function", .priv = PRIV_ITEM(PF, 0), .next = NEXT(NEXT_ENTRY(ITEM_NEXT)), .call = parse_vc, }, [ITEM_VF] = { .name = "vf", - .help = "match packets addressed to a virtual function ID", + .help = "match traffic from/to a virtual function ID", .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)), .next = NEXT(item_vf), .call = parse_vc, }, [ITEM_VF_ID] = { .name = "id", - .help = "destination VF ID", + .help = "VF ID", .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param), .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)), }, - [ITEM_PORT] = { - .name = "port", - .help = "device-specific physical port index to use", - .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)), - .next = NEXT(item_port), + [ITEM_PHY_PORT] = { + .name = "phy_port", + .help = "match traffic from/to a specific physical port", + .priv = PRIV_ITEM(PHY_PORT, + sizeof(struct rte_flow_item_phy_port)), + .next = NEXT(item_phy_port), .call = parse_vc, }, - [ITEM_PORT_INDEX] = { + [ITEM_PHY_PORT_INDEX] = { .name = "index", .help = "physical port index", - .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param), - .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)), + .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)), + }, + [ITEM_PORT_ID] = { + .name = "port_id", + .help = "match traffic from/to a given DPDK port ID", + .priv = PRIV_ITEM(PORT_ID, + sizeof(struct rte_flow_item_port_id)), + .next = NEXT(item_port_id), + .call = parse_vc, + }, + [ITEM_PORT_ID_ID] = { + .name = "id", + .help = "DPDK port ID", + .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)), + }, + [ITEM_MARK] = { + .name = "mark", + .help = "match traffic against value set in previously matched rule", + .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)), + .next = NEXT(item_mark), + .call = parse_vc, + }, + [ITEM_MARK_ID] = { + .name = "id", + .help = "Integer value to match against", + .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)), }, [ITEM_RAW] = { .name = "raw", @@ -1073,9 +1408,9 @@ static const struct token token_list[] = { NEXT_ENTRY(ITEM_PARAM_IS, ITEM_PARAM_SPEC, ITEM_PARAM_MASK)), - .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length), - ARGS_ENTRY_USZ(struct rte_flow_item_raw, - pattern, + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern), + ARGS_ENTRY(struct rte_flow_item_raw, length), + ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw), ITEM_RAW_PATTERN_SIZE)), }, [ITEM_ETH] = { @@ -1110,12 +1445,6 @@ static const struct token token_list[] = { .next = NEXT(item_vlan), .call = parse_vc, }, - [ITEM_VLAN_TPID] = { - .name = "tpid", - .help = "tag protocol identifier", - .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param), - .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)), - }, [ITEM_VLAN_TCI] = { .name = "tci", .help = "tag control information", @@ -1143,6 +1472,13 @@ static const struct token token_list[] = { .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan, tci, "\x0f\xff")), }, + [ITEM_VLAN_INNER_TYPE] = { + .name = "inner_type", + .help = "inner EtherType", + .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, + inner_type)), + }, [ITEM_IPV4] = { .name = "ipv4", .help = "match IPv4 header", @@ -1473,6 +1809,182 @@ static const struct token token_list[] = { .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, protocol)), }, + [ITEM_VXLAN_GPE] = { + .name = "vxlan-gpe", + .help = "match VXLAN-GPE header", + .priv = PRIV_ITEM(VXLAN_GPE, + sizeof(struct rte_flow_item_vxlan_gpe)), + .next = NEXT(item_vxlan_gpe), + .call = parse_vc, + }, + [ITEM_VXLAN_GPE_VNI] = { + .name = "vni", + .help = "VXLAN-GPE identifier", + .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe, + vni)), + }, + [ITEM_ARP_ETH_IPV4] = { + .name = "arp_eth_ipv4", + .help = "match ARP header for Ethernet/IPv4", + .priv = PRIV_ITEM(ARP_ETH_IPV4, + sizeof(struct rte_flow_item_arp_eth_ipv4)), + .next = NEXT(item_arp_eth_ipv4), + .call = parse_vc, + }, + [ITEM_ARP_ETH_IPV4_SHA] = { + .name = "sha", + .help = "sender hardware address", + .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4, + sha)), + }, + [ITEM_ARP_ETH_IPV4_SPA] = { + .name = "spa", + .help = "sender IPv4 address", + .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4, + spa)), + }, + [ITEM_ARP_ETH_IPV4_THA] = { + .name = "tha", + .help = "target hardware address", + .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4, + tha)), + }, + [ITEM_ARP_ETH_IPV4_TPA] = { + .name = "tpa", + .help = "target IPv4 address", + .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4, + tpa)), + }, + [ITEM_IPV6_EXT] = { + .name = "ipv6_ext", + .help = "match presence of any IPv6 extension header", + .priv = PRIV_ITEM(IPV6_EXT, + sizeof(struct rte_flow_item_ipv6_ext)), + .next = NEXT(item_ipv6_ext), + .call = parse_vc, + }, + [ITEM_IPV6_EXT_NEXT_HDR] = { + .name = "next_hdr", + .help = "next header", + .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext, + next_hdr)), + }, + [ITEM_ICMP6] = { + .name = "icmp6", + .help = "match any ICMPv6 header", + .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)), + .next = NEXT(item_icmp6), + .call = parse_vc, + }, + [ITEM_ICMP6_TYPE] = { + .name = "type", + .help = "ICMPv6 type", + .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6, + type)), + }, + [ITEM_ICMP6_CODE] = { + .name = "code", + .help = "ICMPv6 code", + .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6, + code)), + }, + [ITEM_ICMP6_ND_NS] = { + .name = "icmp6_nd_ns", + .help = "match ICMPv6 neighbor discovery solicitation", + .priv = PRIV_ITEM(ICMP6_ND_NS, + sizeof(struct rte_flow_item_icmp6_nd_ns)), + .next = NEXT(item_icmp6_nd_ns), + .call = parse_vc, + }, + [ITEM_ICMP6_ND_NS_TARGET_ADDR] = { + .name = "target_addr", + .help = "target address", + .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns, + target_addr)), + }, + [ITEM_ICMP6_ND_NA] = { + .name = "icmp6_nd_na", + .help = "match ICMPv6 neighbor discovery advertisement", + .priv = PRIV_ITEM(ICMP6_ND_NA, + sizeof(struct rte_flow_item_icmp6_nd_na)), + .next = NEXT(item_icmp6_nd_na), + .call = parse_vc, + }, + [ITEM_ICMP6_ND_NA_TARGET_ADDR] = { + .name = "target_addr", + .help = "target address", + .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na, + target_addr)), + }, + [ITEM_ICMP6_ND_OPT] = { + .name = "icmp6_nd_opt", + .help = "match presence of any ICMPv6 neighbor discovery" + " option", + .priv = PRIV_ITEM(ICMP6_ND_OPT, + sizeof(struct rte_flow_item_icmp6_nd_opt)), + .next = NEXT(item_icmp6_nd_opt), + .call = parse_vc, + }, + [ITEM_ICMP6_ND_OPT_TYPE] = { + .name = "type", + .help = "ND option type", + .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt, + type)), + }, + [ITEM_ICMP6_ND_OPT_SLA_ETH] = { + .name = "icmp6_nd_opt_sla_eth", + .help = "match ICMPv6 neighbor discovery source Ethernet" + " link-layer address option", + .priv = PRIV_ITEM + (ICMP6_ND_OPT_SLA_ETH, + sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)), + .next = NEXT(item_icmp6_nd_opt_sla_eth), + .call = parse_vc, + }, + [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = { + .name = "sla", + .help = "source Ethernet LLA", + .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)), + }, + [ITEM_ICMP6_ND_OPT_TLA_ETH] = { + .name = "icmp6_nd_opt_tla_eth", + .help = "match ICMPv6 neighbor discovery target Ethernet" + " link-layer address option", + .priv = PRIV_ITEM + (ICMP6_ND_OPT_TLA_ETH, + sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)), + .next = NEXT(item_icmp6_nd_opt_tla_eth), + .call = parse_vc, + }, + [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = { + .name = "tla", + .help = "target Ethernet LLA", + .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)), + }, /* Validate/create actions. */ [ACTIONS] = { @@ -1506,6 +2018,20 @@ static const struct token token_list[] = { .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), .call = parse_vc, }, + [ACTION_JUMP] = { + .name = "jump", + .help = "redirect traffic to a given group", + .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)), + .next = NEXT(action_jump), + .call = parse_vc, + }, + [ACTION_JUMP_GROUP] = { + .name = "group", + .help = "group to redirect traffic to", + .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)), + .call = parse_vc_conf, + }, [ACTION_MARK] = { .name = "mark", .help = "attach 32 bit value to packets", @@ -1552,30 +2078,100 @@ static const struct token token_list[] = { [ACTION_COUNT] = { .name = "count", .help = "enable counters for this rule", - .priv = PRIV_ACTION(COUNT, 0), - .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .priv = PRIV_ACTION(COUNT, + sizeof(struct rte_flow_action_count)), + .next = NEXT(action_count), .call = parse_vc, }, - [ACTION_DUP] = { - .name = "dup", - .help = "duplicate packets to a given queue index", - .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)), - .next = NEXT(action_dup), - .call = parse_vc, + [ACTION_COUNT_ID] = { + .name = "identifier", + .help = "counter identifier to use", + .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)), + .call = parse_vc_conf, }, - [ACTION_DUP_INDEX] = { - .name = "index", - .help = "queue index to duplicate packets to", - .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)), - .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)), + [ACTION_COUNT_SHARED] = { + .name = "shared", + .help = "shared counter", + .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)), + .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count, + shared, 1)), .call = parse_vc_conf, }, [ACTION_RSS] = { .name = "rss", .help = "spread packets among several queues", - .priv = PRIV_ACTION(RSS, ACTION_RSS_SIZE), + .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)), .next = NEXT(action_rss), - .call = parse_vc, + .call = parse_vc_action_rss, + }, + [ACTION_RSS_FUNC] = { + .name = "func", + .help = "RSS hash function to apply", + .next = NEXT(action_rss, + NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT, + ACTION_RSS_FUNC_TOEPLITZ, + ACTION_RSS_FUNC_SIMPLE_XOR)), + }, + [ACTION_RSS_FUNC_DEFAULT] = { + .name = "default", + .help = "default hash function", + .call = parse_vc_action_rss_func, + }, + [ACTION_RSS_FUNC_TOEPLITZ] = { + .name = "toeplitz", + .help = "Toeplitz hash function", + .call = parse_vc_action_rss_func, + }, + [ACTION_RSS_FUNC_SIMPLE_XOR] = { + .name = "simple_xor", + .help = "simple XOR hash function", + .call = parse_vc_action_rss_func, + }, + [ACTION_RSS_LEVEL] = { + .name = "level", + .help = "encapsulation level for \"types\"", + .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_ARB + (offsetof(struct action_rss_data, conf) + + offsetof(struct rte_flow_action_rss, level), + sizeof(((struct rte_flow_action_rss *)0)-> + level))), + }, + [ACTION_RSS_TYPES] = { + .name = "types", + .help = "specific RSS hash types", + .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)), + }, + [ACTION_RSS_TYPE] = { + .name = "{type}", + .help = "RSS hash type", + .call = parse_vc_action_rss_type, + .comp = comp_vc_action_rss_type, + }, + [ACTION_RSS_KEY] = { + .name = "key", + .help = "RSS hash key", + .next = NEXT(action_rss, NEXT_ENTRY(STRING)), + .args = ARGS(ARGS_ENTRY_ARB(0, 0), + ARGS_ENTRY_ARB + (offsetof(struct action_rss_data, conf) + + offsetof(struct rte_flow_action_rss, key_len), + sizeof(((struct rte_flow_action_rss *)0)-> + key_len)), + ARGS_ENTRY(struct action_rss_data, key)), + }, + [ACTION_RSS_KEY_LEN] = { + .name = "key_len", + .help = "RSS hash key length in bytes", + .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_ARB_BOUNDED + (offsetof(struct action_rss_data, conf) + + offsetof(struct rte_flow_action_rss, key_len), + sizeof(((struct rte_flow_action_rss *)0)-> + key_len), + 0, + RSS_HASH_KEY_LENGTH)), }, [ACTION_RSS_QUEUES] = { .name = "queues", @@ -1591,14 +2187,14 @@ static const struct token token_list[] = { }, [ACTION_PF] = { .name = "pf", - .help = "redirect packets to physical device function", + .help = "direct traffic to physical function", .priv = PRIV_ACTION(PF, 0), .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), .call = parse_vc, }, [ACTION_VF] = { .name = "vf", - .help = "redirect packets to virtual device function", + .help = "direct traffic to a virtual function ID", .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)), .next = NEXT(action_vf), .call = parse_vc, @@ -1613,11 +2209,58 @@ static const struct token token_list[] = { }, [ACTION_VF_ID] = { .name = "id", - .help = "VF ID to redirect packets to", + .help = "VF ID", .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)), .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)), .call = parse_vc_conf, }, + [ACTION_PHY_PORT] = { + .name = "phy_port", + .help = "direct packets to physical port index", + .priv = PRIV_ACTION(PHY_PORT, + sizeof(struct rte_flow_action_phy_port)), + .next = NEXT(action_phy_port), + .call = parse_vc, + }, + [ACTION_PHY_PORT_ORIGINAL] = { + .name = "original", + .help = "use original port index if possible", + .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)), + .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port, + original, 1)), + .call = parse_vc_conf, + }, + [ACTION_PHY_PORT_INDEX] = { + .name = "index", + .help = "physical port index", + .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port, + index)), + .call = parse_vc_conf, + }, + [ACTION_PORT_ID] = { + .name = "port_id", + .help = "direct matching traffic to a given DPDK port ID", + .priv = PRIV_ACTION(PORT_ID, + sizeof(struct rte_flow_action_port_id)), + .next = NEXT(action_port_id), + .call = parse_vc, + }, + [ACTION_PORT_ID_ORIGINAL] = { + .name = "original", + .help = "use original DPDK port ID if possible", + .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)), + .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id, + original, 1)), + .call = parse_vc_conf, + }, + [ACTION_PORT_ID_ID] = { + .name = "id", + .help = "DPDK port ID", + .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)), + .call = parse_vc_conf, + }, [ACTION_METER] = { .name = "meter", .help = "meter the directed packets at given id", @@ -1633,6 +2276,200 @@ static const struct token token_list[] = { .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)), .call = parse_vc_conf, }, + [ACTION_OF_SET_MPLS_TTL] = { + .name = "of_set_mpls_ttl", + .help = "OpenFlow's OFPAT_SET_MPLS_TTL", + .priv = PRIV_ACTION + (OF_SET_MPLS_TTL, + sizeof(struct rte_flow_action_of_set_mpls_ttl)), + .next = NEXT(action_of_set_mpls_ttl), + .call = parse_vc, + }, + [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = { + .name = "mpls_ttl", + .help = "MPLS TTL", + .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl, + mpls_ttl)), + .call = parse_vc_conf, + }, + [ACTION_OF_DEC_MPLS_TTL] = { + .name = "of_dec_mpls_ttl", + .help = "OpenFlow's OFPAT_DEC_MPLS_TTL", + .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_OF_SET_NW_TTL] = { + .name = "of_set_nw_ttl", + .help = "OpenFlow's OFPAT_SET_NW_TTL", + .priv = PRIV_ACTION + (OF_SET_NW_TTL, + sizeof(struct rte_flow_action_of_set_nw_ttl)), + .next = NEXT(action_of_set_nw_ttl), + .call = parse_vc, + }, + [ACTION_OF_SET_NW_TTL_NW_TTL] = { + .name = "nw_ttl", + .help = "IP TTL", + .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl, + nw_ttl)), + .call = parse_vc_conf, + }, + [ACTION_OF_DEC_NW_TTL] = { + .name = "of_dec_nw_ttl", + .help = "OpenFlow's OFPAT_DEC_NW_TTL", + .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_OF_COPY_TTL_OUT] = { + .name = "of_copy_ttl_out", + .help = "OpenFlow's OFPAT_COPY_TTL_OUT", + .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_OF_COPY_TTL_IN] = { + .name = "of_copy_ttl_in", + .help = "OpenFlow's OFPAT_COPY_TTL_IN", + .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_OF_POP_VLAN] = { + .name = "of_pop_vlan", + .help = "OpenFlow's OFPAT_POP_VLAN", + .priv = PRIV_ACTION(OF_POP_VLAN, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_OF_PUSH_VLAN] = { + .name = "of_push_vlan", + .help = "OpenFlow's OFPAT_PUSH_VLAN", + .priv = PRIV_ACTION + (OF_PUSH_VLAN, + sizeof(struct rte_flow_action_of_push_vlan)), + .next = NEXT(action_of_push_vlan), + .call = parse_vc, + }, + [ACTION_OF_PUSH_VLAN_ETHERTYPE] = { + .name = "ethertype", + .help = "EtherType", + .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_of_push_vlan, + ethertype)), + .call = parse_vc_conf, + }, + [ACTION_OF_SET_VLAN_VID] = { + .name = "of_set_vlan_vid", + .help = "OpenFlow's OFPAT_SET_VLAN_VID", + .priv = PRIV_ACTION + (OF_SET_VLAN_VID, + sizeof(struct rte_flow_action_of_set_vlan_vid)), + .next = NEXT(action_of_set_vlan_vid), + .call = parse_vc, + }, + [ACTION_OF_SET_VLAN_VID_VLAN_VID] = { + .name = "vlan_vid", + .help = "VLAN id", + .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_of_set_vlan_vid, + vlan_vid)), + .call = parse_vc_conf, + }, + [ACTION_OF_SET_VLAN_PCP] = { + .name = "of_set_vlan_pcp", + .help = "OpenFlow's OFPAT_SET_VLAN_PCP", + .priv = PRIV_ACTION + (OF_SET_VLAN_PCP, + sizeof(struct rte_flow_action_of_set_vlan_pcp)), + .next = NEXT(action_of_set_vlan_pcp), + .call = parse_vc, + }, + [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = { + .name = "vlan_pcp", + .help = "VLAN priority", + .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_of_set_vlan_pcp, + vlan_pcp)), + .call = parse_vc_conf, + }, + [ACTION_OF_POP_MPLS] = { + .name = "of_pop_mpls", + .help = "OpenFlow's OFPAT_POP_MPLS", + .priv = PRIV_ACTION(OF_POP_MPLS, + sizeof(struct rte_flow_action_of_pop_mpls)), + .next = NEXT(action_of_pop_mpls), + .call = parse_vc, + }, + [ACTION_OF_POP_MPLS_ETHERTYPE] = { + .name = "ethertype", + .help = "EtherType", + .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_of_pop_mpls, + ethertype)), + .call = parse_vc_conf, + }, + [ACTION_OF_PUSH_MPLS] = { + .name = "of_push_mpls", + .help = "OpenFlow's OFPAT_PUSH_MPLS", + .priv = PRIV_ACTION + (OF_PUSH_MPLS, + sizeof(struct rte_flow_action_of_push_mpls)), + .next = NEXT(action_of_push_mpls), + .call = parse_vc, + }, + [ACTION_OF_PUSH_MPLS_ETHERTYPE] = { + .name = "ethertype", + .help = "EtherType", + .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_of_push_mpls, + ethertype)), + .call = parse_vc_conf, + }, + [ACTION_VXLAN_ENCAP] = { + .name = "vxlan_encap", + .help = "VXLAN encapsulation, uses configuration set by \"set" + " vxlan\"", + .priv = PRIV_ACTION(VXLAN_ENCAP, + sizeof(struct action_vxlan_encap_data)), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_vxlan_encap, + }, + [ACTION_VXLAN_DECAP] = { + .name = "vxlan_decap", + .help = "Performs a decapsulation action by stripping all" + " headers of the VXLAN tunnel network overlay from the" + " matched flow.", + .priv = PRIV_ACTION(VXLAN_DECAP, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_NVGRE_ENCAP] = { + .name = "nvgre_encap", + .help = "NVGRE encapsulation, uses configuration set by \"set" + " nvgre\"", + .priv = PRIV_ACTION(NVGRE_ENCAP, + sizeof(struct action_nvgre_encap_data)), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_nvgre_encap, + }, + [ACTION_NVGRE_DECAP] = { + .name = "nvgre_decap", + .help = "Performs a decapsulation action by stripping all" + " headers of the NVGRE tunnel network overlay from the" + " matched flow.", + .priv = PRIV_ACTION(NVGRE_DECAP, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, }; /** Remove and return last entry from argument stack. */ @@ -1858,6 +2695,9 @@ parse_vc(struct context *ctx, const struct token *token, case EGRESS: out->args.vc.attr.egress = 1; return len; + case TRANSFER: + out->args.vc.attr.transfer = 1; + return len; case PATTERN: out->args.vc.pattern = (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1), @@ -1909,6 +2749,7 @@ parse_vc(struct context *ctx, const struct token *token, return -1; *action = (struct rte_flow_action){ .type = priv->type, + .conf = data_size ? data : NULL, }; ++out->args.vc.actions_n; ctx->object = action; @@ -1989,7 +2830,6 @@ parse_vc_conf(struct context *ctx, const struct token *token, void *buf, unsigned int size) { struct buffer *out = buf; - struct rte_flow_action *action; (void)size; /* Token name must match. */ @@ -1998,14 +2838,147 @@ parse_vc_conf(struct context *ctx, const struct token *token, /* Nothing else to do if there is no buffer. */ if (!out) return len; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + return len; +} + +/** Parse RSS action. */ +static int +parse_vc_action_rss(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_rss_data *action_rss_data; + unsigned int i; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; if (!out->args.vc.actions_n) return -1; action = &out->args.vc.actions[out->args.vc.actions_n - 1]; /* Point to selected object. */ ctx->object = out->args.vc.data; ctx->objmask = NULL; - /* Update configuration pointer. */ - action->conf = ctx->object; + /* Set up default configuration. */ + action_rss_data = ctx->object; + *action_rss_data = (struct action_rss_data){ + .conf = (struct rte_flow_action_rss){ + .func = RTE_ETH_HASH_FUNCTION_DEFAULT, + .level = 0, + .types = rss_hf, + .key_len = sizeof(action_rss_data->key), + .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM), + .key = action_rss_data->key, + .queue = action_rss_data->queue, + }, + .key = "testpmd's default RSS hash key, " + "override it for better balancing", + .queue = { 0 }, + }; + for (i = 0; i < action_rss_data->conf.queue_num; ++i) + action_rss_data->queue[i] = i; + if (!port_id_is_invalid(ctx->port, DISABLED_WARN) && + ctx->port != (portid_t)RTE_PORT_ALL) { + struct rte_eth_dev_info info; + + rte_eth_dev_info_get(ctx->port, &info); + action_rss_data->conf.key_len = + RTE_MIN(sizeof(action_rss_data->key), + info.hash_key_size); + } + action->conf = &action_rss_data->conf; + return ret; +} + +/** + * Parse func field for RSS action. + * + * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the + * ACTION_RSS_FUNC_* index that called this function. + */ +static int +parse_vc_action_rss_func(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct action_rss_data *action_rss_data; + enum rte_eth_hash_function func; + + (void)buf; + (void)size; + /* Token name must match. */ + if (parse_default(ctx, token, str, len, NULL, 0) < 0) + return -1; + switch (ctx->curr) { + case ACTION_RSS_FUNC_DEFAULT: + func = RTE_ETH_HASH_FUNCTION_DEFAULT; + break; + case ACTION_RSS_FUNC_TOEPLITZ: + func = RTE_ETH_HASH_FUNCTION_TOEPLITZ; + break; + case ACTION_RSS_FUNC_SIMPLE_XOR: + func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR; + break; + default: + return -1; + } + if (!ctx->object) + return len; + action_rss_data = ctx->object; + action_rss_data->conf.func = func; + return len; +} + +/** + * Parse type field for RSS action. + * + * Valid tokens are type field names and the "end" token. + */ +static int +parse_vc_action_rss_type(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE); + struct action_rss_data *action_rss_data; + unsigned int i; + + (void)token; + (void)buf; + (void)size; + if (ctx->curr != ACTION_RSS_TYPE) + return -1; + if (!(ctx->objdata >> 16) && ctx->object) { + action_rss_data = ctx->object; + action_rss_data->conf.types = 0; + } + if (!strcmp_partial("end", str, len)) { + ctx->objdata &= 0xffff; + return len; + } + for (i = 0; rss_type_table[i].str; ++i) + if (!strcmp_partial(rss_type_table[i].str, str, len)) + break; + if (!rss_type_table[i].str) + return -1; + ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff); + /* Repeat token. */ + if (ctx->next_num == RTE_DIM(ctx->next)) + return -1; + ctx->next[ctx->next_num++] = next; + if (!ctx->object) + return len; + action_rss_data = ctx->object; + action_rss_data->conf.types |= rss_type_table[i].rss_type; return len; } @@ -2020,6 +2993,7 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token, void *buf, unsigned int size) { static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE); + struct action_rss_data *action_rss_data; int ret; int i; @@ -2031,11 +3005,14 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token, i = ctx->objdata >> 16; if (!strcmp_partial("end", str, len)) { ctx->objdata &= 0xffff; - return len; + goto end; } - if (i >= ACTION_RSS_NUM) + if (i >= ACTION_RSS_QUEUE_NUM) return -1; - if (push_args(ctx, ARGS_ENTRY(struct rte_flow_action_rss, queue[i]))) + if (push_args(ctx, + ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) + + i * sizeof(action_rss_data->queue[i]), + sizeof(action_rss_data->queue[i])))) return -1; ret = parse_int(ctx, token, str, len, NULL, 0); if (ret < 0) { @@ -2048,12 +3025,206 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token, if (ctx->next_num == RTE_DIM(ctx->next)) return -1; ctx->next[ctx->next_num++] = next; +end: if (!ctx->object) return len; - ((struct rte_flow_action_rss *)ctx->object)->num = i; + action_rss_data = ctx->object; + action_rss_data->conf.queue_num = i; + action_rss_data->conf.queue = i ? action_rss_data->queue : NULL; return len; } +/** Parse VXLAN encap action. */ +static int +parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_vxlan_encap_data *action_vxlan_encap_data; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Set up default configuration. */ + action_vxlan_encap_data = ctx->object; + *action_vxlan_encap_data = (struct action_vxlan_encap_data){ + .conf = (struct rte_flow_action_vxlan_encap){ + .definition = action_vxlan_encap_data->items, + }, + .items = { + { + .type = RTE_FLOW_ITEM_TYPE_ETH, + .spec = &action_vxlan_encap_data->item_eth, + .mask = &rte_flow_item_eth_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_VLAN, + .spec = &action_vxlan_encap_data->item_vlan, + .mask = &rte_flow_item_vlan_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_IPV4, + .spec = &action_vxlan_encap_data->item_ipv4, + .mask = &rte_flow_item_ipv4_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_UDP, + .spec = &action_vxlan_encap_data->item_udp, + .mask = &rte_flow_item_udp_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_VXLAN, + .spec = &action_vxlan_encap_data->item_vxlan, + .mask = &rte_flow_item_vxlan_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }, + .item_eth.type = 0, + .item_vlan = { + .tci = vxlan_encap_conf.vlan_tci, + .inner_type = 0, + }, + .item_ipv4.hdr = { + .src_addr = vxlan_encap_conf.ipv4_src, + .dst_addr = vxlan_encap_conf.ipv4_dst, + }, + .item_udp.hdr = { + .src_port = vxlan_encap_conf.udp_src, + .dst_port = vxlan_encap_conf.udp_dst, + }, + .item_vxlan.flags = 0, + }; + memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes, + vxlan_encap_conf.eth_dst, ETHER_ADDR_LEN); + memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes, + vxlan_encap_conf.eth_src, ETHER_ADDR_LEN); + if (!vxlan_encap_conf.select_ipv4) { + memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr, + &vxlan_encap_conf.ipv6_src, + sizeof(vxlan_encap_conf.ipv6_src)); + memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr, + &vxlan_encap_conf.ipv6_dst, + sizeof(vxlan_encap_conf.ipv6_dst)); + action_vxlan_encap_data->items[2] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .spec = &action_vxlan_encap_data->item_ipv6, + .mask = &rte_flow_item_ipv6_mask, + }; + } + if (!vxlan_encap_conf.select_vlan) + action_vxlan_encap_data->items[1].type = + RTE_FLOW_ITEM_TYPE_VOID; + memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni, + RTE_DIM(vxlan_encap_conf.vni)); + action->conf = &action_vxlan_encap_data->conf; + return ret; +} + +/** Parse NVGRE encap action. */ +static int +parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_nvgre_encap_data *action_nvgre_encap_data; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Set up default configuration. */ + action_nvgre_encap_data = ctx->object; + *action_nvgre_encap_data = (struct action_nvgre_encap_data){ + .conf = (struct rte_flow_action_nvgre_encap){ + .definition = action_nvgre_encap_data->items, + }, + .items = { + { + .type = RTE_FLOW_ITEM_TYPE_ETH, + .spec = &action_nvgre_encap_data->item_eth, + .mask = &rte_flow_item_eth_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_VLAN, + .spec = &action_nvgre_encap_data->item_vlan, + .mask = &rte_flow_item_vlan_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_IPV4, + .spec = &action_nvgre_encap_data->item_ipv4, + .mask = &rte_flow_item_ipv4_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_NVGRE, + .spec = &action_nvgre_encap_data->item_nvgre, + .mask = &rte_flow_item_nvgre_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }, + .item_eth.type = 0, + .item_vlan = { + .tci = nvgre_encap_conf.vlan_tci, + .inner_type = 0, + }, + .item_ipv4.hdr = { + .src_addr = nvgre_encap_conf.ipv4_src, + .dst_addr = nvgre_encap_conf.ipv4_dst, + }, + .item_nvgre.flow_id = 0, + }; + memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes, + nvgre_encap_conf.eth_dst, ETHER_ADDR_LEN); + memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes, + nvgre_encap_conf.eth_src, ETHER_ADDR_LEN); + if (!nvgre_encap_conf.select_ipv4) { + memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr, + &nvgre_encap_conf.ipv6_src, + sizeof(nvgre_encap_conf.ipv6_src)); + memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr, + &nvgre_encap_conf.ipv6_dst, + sizeof(nvgre_encap_conf.ipv6_dst)); + action_nvgre_encap_data->items[2] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .spec = &action_nvgre_encap_data->item_ipv6, + .mask = &rte_flow_item_ipv6_mask, + }; + } + if (!nvgre_encap_conf.select_vlan) + action_nvgre_encap_data->items[1].type = + RTE_FLOW_ITEM_TYPE_VOID; + memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni, + RTE_DIM(nvgre_encap_conf.tni)); + action->conf = &action_nvgre_encap_data->conf; + return ret; +} + /** Parse tokens for destroy command. */ static int parse_destroy(struct context *ctx, const struct token *token, @@ -2269,6 +3440,11 @@ parse_int(struct context *ctx, const struct token *token, strtoumax(str, &end, 0); if (errno || (size_t)(end - str) != len) goto error; + if (arg->bounded && + ((arg->sign && ((intmax_t)u < (intmax_t)arg->min || + (intmax_t)u > (intmax_t)arg->max)) || + (!arg->sign && (u < arg->min || u > arg->max)))) + goto error; if (!ctx->object) return len; if (arg->mask) { @@ -2323,8 +3499,8 @@ error: /** * Parse a string. * - * Two arguments (ctx->args) are retrieved from the stack to store data and - * its length (in that order). + * Three arguments (ctx->args) are retrieved from the stack to store data, + * its actual length and address (in that order). */ static int parse_string(struct context *ctx, const struct token *token, @@ -2333,6 +3509,7 @@ parse_string(struct context *ctx, const struct token *token, { const struct arg *arg_data = pop_args(ctx); const struct arg *arg_len = pop_args(ctx); + const struct arg *arg_addr = pop_args(ctx); char tmp[16]; /* Ought to be enough. */ int ret; @@ -2343,6 +3520,11 @@ parse_string(struct context *ctx, const struct token *token, push_args(ctx, arg_data); return -1; } + if (!arg_addr) { + push_args(ctx, arg_len); + push_args(ctx, arg_data); + return -1; + } size = arg_data->size; /* Bit-mask fill is not supported. */ if (arg_data->mask || size < len) @@ -2362,11 +3544,26 @@ parse_string(struct context *ctx, const struct token *token, buf = (uint8_t *)ctx->object + arg_data->offset; /* Output buffer is not necessarily NUL-terminated. */ memcpy(buf, str, len); - memset((uint8_t *)buf + len, 0x55, size - len); + memset((uint8_t *)buf + len, 0x00, size - len); if (ctx->objmask) memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len); + /* Save address if requested. */ + if (arg_addr->size) { + memcpy((uint8_t *)ctx->object + arg_addr->offset, + (void *[]){ + (uint8_t *)ctx->object + arg_data->offset + }, + arg_addr->size); + if (ctx->objmask) + memcpy((uint8_t *)ctx->objmask + arg_addr->offset, + (void *[]){ + (uint8_t *)ctx->objmask + arg_data->offset + }, + arg_addr->size); + } return len; error: + push_args(ctx, arg_addr); push_args(ctx, arg_len); push_args(ctx, arg_data); return -1; @@ -2509,6 +3706,7 @@ static const char *const boolean_name[] = { "false", "true", "no", "yes", "N", "Y", + "off", "on", NULL, }; @@ -2658,22 +3856,40 @@ comp_rule_id(struct context *ctx, const struct token *token, return i; } +/** Complete type field for RSS action. */ +static int +comp_vc_action_rss_type(struct context *ctx, const struct token *token, + unsigned int ent, char *buf, unsigned int size) +{ + unsigned int i; + + (void)ctx; + (void)token; + for (i = 0; rss_type_table[i].str; ++i) + ; + if (!buf) + return i + 1; + if (ent < i) + return snprintf(buf, size, "%s", rss_type_table[ent].str); + if (ent == i) + return snprintf(buf, size, "end"); + return -1; +} + /** Complete queue field for RSS action. */ static int comp_vc_action_rss_queue(struct context *ctx, const struct token *token, unsigned int ent, char *buf, unsigned int size) { - static const char *const str[] = { "", "end", NULL }; - unsigned int i; - (void)ctx; (void)token; - for (i = 0; str[i] != NULL; ++i) - if (buf && i == ent) - return snprintf(buf, size, "%s", str[i]); - if (buf) - return -1; - return i; + if (!buf) + return nb_rxq + 1; + if (ent < nb_rxq) + return snprintf(buf, size, "%u", ent); + if (ent == nb_rxq) + return snprintf(buf, size, "end"); + return -1; } /** Internal context. */ @@ -2927,7 +4143,7 @@ cmd_flow_parsed(const struct buffer *in) break; case QUERY: port_flow_query(in->port, in->args.query.rule, - in->args.query.action); + &in->args.query.action); break; case LIST: port_flow_list(in->port, in->args.list.group_n, diff --git a/app/test-pmd/cmdline_tm.c b/app/test-pmd/cmdline_tm.c index 35cad543..631f1799 100644 --- a/app/test-pmd/cmdline_tm.c +++ b/app/test-pmd/cmdline_tm.c @@ -234,6 +234,7 @@ static void cmd_show_port_tm_cap_parsed(void *parsed_result, return; memset(&cap, 0, sizeof(struct rte_tm_capabilities)); + memset(&error, 0, sizeof(struct rte_tm_error)); ret = rte_tm_capabilities_get(port_id, &cap, &error); if (ret) { print_err_msg(&error); @@ -374,6 +375,7 @@ static void cmd_show_port_tm_level_cap_parsed(void *parsed_result, return; memset(&lcap, 0, sizeof(struct rte_tm_level_capabilities)); + memset(&error, 0, sizeof(struct rte_tm_error)); ret = rte_tm_level_capabilities_get(port_id, level_id, &lcap, &error); if (ret) { print_err_msg(&error); @@ -498,6 +500,7 @@ static void cmd_show_port_tm_node_cap_parsed(void *parsed_result, if (port_id_is_invalid(port_id, ENABLED_WARN)) return; + memset(&error, 0, sizeof(struct rte_tm_error)); /* Node id must be valid */ ret = rte_tm_node_type_get(port_id, node_id, &is_leaf, &error); if (ret != 0) { @@ -615,6 +618,7 @@ static void cmd_show_port_tm_node_stats_parsed(void *parsed_result, if (port_id_is_invalid(port_id, ENABLED_WARN)) return; + memset(&error, 0, sizeof(struct rte_tm_error)); /* Port status */ if (!port_is_started(port_id)) { printf(" Port %u not started (error)\n", port_id); @@ -727,6 +731,7 @@ static void cmd_show_port_tm_node_type_parsed(void *parsed_result, if (port_id_is_invalid(port_id, ENABLED_WARN)) return; + memset(&error, 0, sizeof(struct rte_tm_error)); ret = rte_tm_node_type_get(port_id, node_id, &is_leaf, &error); if (ret != 0) { print_err_msg(&error); @@ -832,6 +837,7 @@ static void cmd_add_port_tm_node_shaper_profile_parsed(void *parsed_result, /* Private shaper profile params */ memset(&sp, 0, sizeof(struct rte_tm_shaper_params)); + memset(&error, 0, sizeof(struct rte_tm_error)); sp.peak.rate = res->tb_rate; sp.peak.size = res->tb_size; sp.pkt_length_adjust = pkt_len_adjust; @@ -919,6 +925,7 @@ static void cmd_del_port_tm_node_shaper_profile_parsed(void *parsed_result, if (port_id_is_invalid(port_id, ENABLED_WARN)) return; + memset(&error, 0, sizeof(struct rte_tm_error)); ret = rte_tm_shaper_profile_delete(port_id, shaper_id, &error); if (ret != 0) { print_err_msg(&error); @@ -1004,6 +1011,7 @@ static void cmd_add_port_tm_node_shared_shaper_parsed(void *parsed_result, if (port_id_is_invalid(port_id, ENABLED_WARN)) return; + memset(&error, 0, sizeof(struct rte_tm_error)); /* Command type: add */ if ((strcmp(res->cmd_type, "add") == 0) && (port_is_started(port_id))) { @@ -1098,6 +1106,7 @@ static void cmd_del_port_tm_node_shared_shaper_parsed(void *parsed_result, if (port_id_is_invalid(port_id, ENABLED_WARN)) return; + memset(&error, 0, sizeof(struct rte_tm_error)); ret = rte_tm_shared_shaper_delete(port_id, shared_shaper_id, &error); if (ret != 0) { print_err_msg(&error); @@ -1254,6 +1263,7 @@ static void cmd_add_port_tm_node_wred_profile_parsed(void *parsed_result, return; memset(&wp, 0, sizeof(struct rte_tm_wred_params)); + memset(&error, 0, sizeof(struct rte_tm_error)); /* WRED Params (Green Color)*/ color = RTE_TM_GREEN; @@ -1369,6 +1379,7 @@ static void cmd_del_port_tm_node_wred_profile_parsed(void *parsed_result, if (port_id_is_invalid(port_id, ENABLED_WARN)) return; + memset(&error, 0, sizeof(struct rte_tm_error)); ret = rte_tm_wred_profile_delete(port_id, wred_profile_id, &error); if (ret != 0) { print_err_msg(&error); @@ -1455,6 +1466,7 @@ static void cmd_set_port_tm_node_shaper_profile_parsed(void *parsed_result, if (port_id_is_invalid(port_id, ENABLED_WARN)) return; + memset(&error, 0, sizeof(struct rte_tm_error)); /* Port status */ if (!port_is_started(port_id)) { printf(" Port %u not started (error)\n", port_id); @@ -1500,7 +1512,7 @@ struct cmd_add_port_tm_nonleaf_node_result { uint32_t priority; uint32_t weight; uint32_t level_id; - uint32_t shaper_profile_id; + int32_t shaper_profile_id; uint32_t n_sp_priorities; uint64_t stats_mask; cmdline_multi_string_t multi_shared_shaper_id; @@ -1542,7 +1554,7 @@ cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_level_id = level_id, UINT32); cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_shaper_profile_id = TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result, - shaper_profile_id, UINT32); + shaper_profile_id, INT32); cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_n_sp_priorities = TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result, n_sp_priorities, UINT32); @@ -1571,6 +1583,7 @@ static void cmd_add_port_tm_nonleaf_node_parsed(void *parsed_result, return; memset(&np, 0, sizeof(struct rte_tm_node_params)); + memset(&error, 0, sizeof(struct rte_tm_error)); /* Node parameters */ if (res->parent_node_id < 0) @@ -1593,12 +1606,18 @@ static void cmd_add_port_tm_nonleaf_node_parsed(void *parsed_result, return; } - np.shaper_profile_id = res->shaper_profile_id; + if (res->shaper_profile_id < 0) + np.shaper_profile_id = UINT32_MAX; + else + np.shaper_profile_id = res->shaper_profile_id; + np.n_shared_shapers = n_shared_shapers; - if (np.n_shared_shapers) + if (np.n_shared_shapers) { np.shared_shaper_id = &shared_shaper_id[0]; - else - np.shared_shaper_id = NULL; + } else { + free(shared_shaper_id); + shared_shaper_id = NULL; + } np.nonleaf.n_sp_priorities = res->n_sp_priorities; np.stats_mask = res->stats_mask; @@ -1651,7 +1670,7 @@ struct cmd_add_port_tm_leaf_node_result { uint32_t priority; uint32_t weight; uint32_t level_id; - uint32_t shaper_profile_id; + int32_t shaper_profile_id; uint32_t cman_mode; uint32_t wred_profile_id; uint64_t stats_mask; @@ -1693,7 +1712,7 @@ cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_level_id = level_id, UINT32); cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_shaper_profile_id = TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result, - shaper_profile_id, UINT32); + shaper_profile_id, INT32); cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_cman_mode = TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result, cman_mode, UINT32); @@ -1725,6 +1744,7 @@ static void cmd_add_port_tm_leaf_node_parsed(void *parsed_result, return; memset(&np, 0, sizeof(struct rte_tm_node_params)); + memset(&error, 0, sizeof(struct rte_tm_error)); /* Node parameters */ if (res->parent_node_id < 0) @@ -1747,13 +1767,19 @@ static void cmd_add_port_tm_leaf_node_parsed(void *parsed_result, return; } - np.shaper_profile_id = res->shaper_profile_id; + if (res->shaper_profile_id < 0) + np.shaper_profile_id = UINT32_MAX; + else + np.shaper_profile_id = res->shaper_profile_id; + np.n_shared_shapers = n_shared_shapers; - if (np.n_shared_shapers) + if (np.n_shared_shapers) { np.shared_shaper_id = &shared_shaper_id[0]; - else - np.shared_shaper_id = NULL; + } else { + free(shared_shaper_id); + shared_shaper_id = NULL; + } np.leaf.cman = res->cman_mode; np.leaf.wred.wred_profile_id = res->wred_profile_id; @@ -1836,6 +1862,7 @@ static void cmd_del_port_tm_node_parsed(void *parsed_result, if (port_id_is_invalid(port_id, ENABLED_WARN)) return; + memset(&error, 0, sizeof(struct rte_tm_error)); /* Port status */ if (port_is_started(port_id)) { printf(" Port %u not stopped (error)\n", port_id); @@ -1925,6 +1952,7 @@ static void cmd_set_port_tm_node_parent_parsed(void *parsed_result, if (port_id_is_invalid(port_id, ENABLED_WARN)) return; + memset(&error, 0, sizeof(struct rte_tm_error)); /* Port status */ if (!port_is_started(port_id)) { printf(" Port %u not started (error)\n", port_id); @@ -1958,6 +1986,136 @@ cmdline_parse_inst_t cmd_set_port_tm_node_parent = { }, }; +/* *** Suspend Port TM Node *** */ +struct cmd_suspend_port_tm_node_result { + cmdline_fixed_string_t suspend; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t node; + uint16_t port_id; + uint32_t node_id; +}; + +cmdline_parse_token_string_t cmd_suspend_port_tm_node_suspend = + TOKEN_STRING_INITIALIZER( + struct cmd_suspend_port_tm_node_result, suspend, "suspend"); +cmdline_parse_token_string_t cmd_suspend_port_tm_node_port = + TOKEN_STRING_INITIALIZER( + struct cmd_suspend_port_tm_node_result, port, "port"); +cmdline_parse_token_string_t cmd_suspend_port_tm_node_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_suspend_port_tm_node_result, tm, "tm"); +cmdline_parse_token_string_t cmd_suspend_port_tm_node_node = + TOKEN_STRING_INITIALIZER( + struct cmd_suspend_port_tm_node_result, node, "node"); +cmdline_parse_token_num_t cmd_suspend_port_tm_node_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_suspend_port_tm_node_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_suspend_port_tm_node_node_id = + TOKEN_NUM_INITIALIZER( + struct cmd_suspend_port_tm_node_result, node_id, UINT32); + +static void cmd_suspend_port_tm_node_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_suspend_port_tm_node_result *res = parsed_result; + struct rte_tm_error error; + uint32_t node_id = res->node_id; + portid_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&error, 0, sizeof(struct rte_tm_error)); + ret = rte_tm_node_suspend(port_id, node_id, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_suspend_port_tm_node = { + .f = cmd_suspend_port_tm_node_parsed, + .data = NULL, + .help_str = "Suspend port tm node", + .tokens = { + (void *)&cmd_suspend_port_tm_node_suspend, + (void *)&cmd_suspend_port_tm_node_port, + (void *)&cmd_suspend_port_tm_node_tm, + (void *)&cmd_suspend_port_tm_node_node, + (void *)&cmd_suspend_port_tm_node_port_id, + (void *)&cmd_suspend_port_tm_node_node_id, + NULL, + }, +}; + +/* *** Resume Port TM Node *** */ +struct cmd_resume_port_tm_node_result { + cmdline_fixed_string_t resume; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t node; + uint16_t port_id; + uint32_t node_id; +}; + +cmdline_parse_token_string_t cmd_resume_port_tm_node_resume = + TOKEN_STRING_INITIALIZER( + struct cmd_resume_port_tm_node_result, resume, "resume"); +cmdline_parse_token_string_t cmd_resume_port_tm_node_port = + TOKEN_STRING_INITIALIZER( + struct cmd_resume_port_tm_node_result, port, "port"); +cmdline_parse_token_string_t cmd_resume_port_tm_node_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_resume_port_tm_node_result, tm, "tm"); +cmdline_parse_token_string_t cmd_resume_port_tm_node_node = + TOKEN_STRING_INITIALIZER( + struct cmd_resume_port_tm_node_result, node, "node"); +cmdline_parse_token_num_t cmd_resume_port_tm_node_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_resume_port_tm_node_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_resume_port_tm_node_node_id = + TOKEN_NUM_INITIALIZER( + struct cmd_resume_port_tm_node_result, node_id, UINT32); + +static void cmd_resume_port_tm_node_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_resume_port_tm_node_result *res = parsed_result; + struct rte_tm_error error; + uint32_t node_id = res->node_id; + portid_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&error, 0, sizeof(struct rte_tm_error)); + ret = rte_tm_node_resume(port_id, node_id, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_resume_port_tm_node = { + .f = cmd_resume_port_tm_node_parsed, + .data = NULL, + .help_str = "Resume port tm node", + .tokens = { + (void *)&cmd_resume_port_tm_node_resume, + (void *)&cmd_resume_port_tm_node_port, + (void *)&cmd_resume_port_tm_node_tm, + (void *)&cmd_resume_port_tm_node_node, + (void *)&cmd_resume_port_tm_node_port_id, + (void *)&cmd_resume_port_tm_node_node_id, + NULL, + }, +}; + /* *** Port TM Hierarchy Commit *** */ struct cmd_port_tm_hierarchy_commit_result { cmdline_fixed_string_t port; @@ -2007,6 +2165,7 @@ static void cmd_port_tm_hierarchy_commit_parsed(void *parsed_result, else clean_on_fail = 0; + memset(&error, 0, sizeof(struct rte_tm_error)); ret = rte_tm_hierarchy_commit(port_id, clean_on_fail, &error); if (ret != 0) { print_err_msg(&error); @@ -2017,7 +2176,7 @@ static void cmd_port_tm_hierarchy_commit_parsed(void *parsed_result, cmdline_parse_inst_t cmd_port_tm_hierarchy_commit = { .f = cmd_port_tm_hierarchy_commit_parsed, .data = NULL, - .help_str = "Set port tm node shaper profile", + .help_str = "Commit port tm hierarchy", .tokens = { (void *)&cmd_port_tm_hierarchy_commit_port, (void *)&cmd_port_tm_hierarchy_commit_tm, diff --git a/app/test-pmd/cmdline_tm.h b/app/test-pmd/cmdline_tm.h index ba303607..b3a14ade 100644 --- a/app/test-pmd/cmdline_tm.h +++ b/app/test-pmd/cmdline_tm.h @@ -22,6 +22,8 @@ extern cmdline_parse_inst_t cmd_add_port_tm_nonleaf_node; extern cmdline_parse_inst_t cmd_add_port_tm_leaf_node; extern cmdline_parse_inst_t cmd_del_port_tm_node; extern cmdline_parse_inst_t cmd_set_port_tm_node_parent; +extern cmdline_parse_inst_t cmd_suspend_port_tm_node; +extern cmdline_parse_inst_t cmd_resume_port_tm_node; extern cmdline_parse_inst_t cmd_port_tm_hierarchy_commit; #endif /* _CMDLINE_TM_H_ */ diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index 4bb255c6..14ccd686 100644 --- a/app/test-pmd/config.c +++ b/app/test-pmd/config.c @@ -73,12 +73,7 @@ static const struct { }, }; -struct rss_type_info { - char str[32]; - uint64_t rss_type; -}; - -static const struct rss_type_info rss_type_table[] = { +const struct rss_type_info rss_type_table[] = { { "ipv4", ETH_RSS_IPV4 }, { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, @@ -99,7 +94,12 @@ static const struct rss_type_info rss_type_table[] = { { "vxlan", ETH_RSS_VXLAN }, { "geneve", ETH_RSS_GENEVE }, { "nvgre", ETH_RSS_NVGRE }, - + { "ip", ETH_RSS_IP }, + { "udp", ETH_RSS_UDP }, + { "tcp", ETH_RSS_TCP }, + { "sctp", ETH_RSS_SCTP }, + { "tunnel", ETH_RSS_TUNNEL }, + { NULL, 0 }, }; static void @@ -121,15 +121,11 @@ nic_stats_display(portid_t port_id) struct rte_eth_stats stats; struct rte_port *port = &ports[port_id]; uint8_t i; - portid_t pid; static const char *nic_stats_border = "########################"; if (port_id_is_invalid(port_id, ENABLED_WARN)) { - printf("Valid port range is [0"); - RTE_ETH_FOREACH_DEV(pid) - printf(", %d", pid); - printf("]\n"); + print_valid_ports(); return; } rte_eth_stats_get(port_id, &stats); @@ -203,13 +199,8 @@ nic_stats_display(portid_t port_id) void nic_stats_clear(portid_t port_id) { - portid_t pid; - if (port_id_is_invalid(port_id, ENABLED_WARN)) { - printf("Valid port range is [0"); - RTE_ETH_FOREACH_DEV(pid) - printf(", %d", pid); - printf("]\n"); + print_valid_ports(); return; } rte_eth_stats_reset(port_id); @@ -286,15 +277,11 @@ nic_stats_mapping_display(portid_t port_id) { struct rte_port *port = &ports[port_id]; uint16_t i; - portid_t pid; static const char *nic_stats_mapping_border = "########################"; if (port_id_is_invalid(port_id, ENABLED_WARN)) { - printf("Valid port range is [0"); - RTE_ETH_FOREACH_DEV(pid) - printf(", %d", pid); - printf("]\n"); + print_valid_ports(); return; } @@ -405,14 +392,11 @@ port_infos_display(portid_t port_id) int vlan_offload; struct rte_mempool * mp; static const char *info_border = "*********************"; - portid_t pid; uint16_t mtu; + char name[RTE_ETH_NAME_MAX_LEN]; if (port_id_is_invalid(port_id, ENABLED_WARN)) { - printf("Valid port range is [0"); - RTE_ETH_FOREACH_DEV(pid) - printf(", %d", pid); - printf("]\n"); + print_valid_ports(); return; } port = &ports[port_id]; @@ -423,6 +407,8 @@ port_infos_display(portid_t port_id) info_border, port_id, info_border); rte_eth_macaddr_get(port_id, &mac_addr); print_ethaddr("MAC address: ", &mac_addr); + rte_eth_dev_get_name_by_port(port_id, name); + printf("\nDevice name: %s", name); printf("\nDriver name: %s", dev_info.driver_name); printf("\nConnect to socket: %u", port->socket_id); @@ -517,6 +503,18 @@ port_infos_display(portid_t port_id) printf("Min possible number of TXDs per queue: %hu\n", dev_info.tx_desc_lim.nb_min); printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); + + /* Show switch info only if valid switch domain and port id is set */ + if (dev_info.switch_info.domain_id != + RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { + if (dev_info.switch_info.name) + printf("Switch name: %s\n", dev_info.switch_info.name); + + printf("Switch domain Id: %u\n", + dev_info.switch_info.domain_id); + printf("Switch Port Id: %u\n", + dev_info.switch_info.port_id); + } } void @@ -722,6 +720,23 @@ port_offload_cap_display(portid_t port_id) printf("off\n"); } + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { + printf("IP tunnel TSO: "); + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_IP_TNL_TSO) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { + printf("UDP tunnel TSO: "); + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_UDP_TNL_TSO) + printf("on\n"); + else + printf("off\n"); + } } int @@ -742,6 +757,17 @@ port_id_is_invalid(portid_t port_id, enum print_warning warning) return 1; } +void print_valid_ports(void) +{ + portid_t pid; + + printf("The valid ports array is ["); + RTE_ETH_FOREACH_DEV(pid) { + printf(" %d", pid); + } + printf(" ]\n"); +} + static int vlan_id_is_invalid(uint16_t vlan_id) { @@ -754,6 +780,8 @@ vlan_id_is_invalid(uint16_t vlan_id) static int port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) { + const struct rte_pci_device *pci_dev; + const struct rte_bus *bus; uint64_t pci_len; if (reg_off & 0x3) { @@ -762,7 +790,21 @@ port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) (unsigned)reg_off); return 1; } - pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len; + + if (!ports[port_id].dev_info.device) { + printf("Invalid device\n"); + return 0; + } + + bus = rte_bus_find_by_device(ports[port_id].dev_info.device); + if (bus && !strcmp(bus->name, "pci")) { + pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); + } else { + printf("Not a PCI device\n"); + return 1; + } + + pci_len = pci_dev->mem_resource[0].len; if (reg_off >= pci_len) { printf("Port %d: register offset %u (0x%X) out of port PCI " "resource (length=%"PRIu64")\n", @@ -960,8 +1002,9 @@ static const struct { MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), MK_FLOW_ITEM(PF, 0), MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), - MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)), - MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */ + MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)), + MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)), + MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), @@ -980,33 +1023,89 @@ static const struct { MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), + MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)), + MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)), + MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)), + MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)), + MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)), + MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)), + MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)), + MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH, + sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)), + MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH, + sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)), }; -/** Compute storage space needed by item specification. */ -static void -flow_item_spec_size(const struct rte_flow_item *item, - size_t *size, size_t *pad) +/** Pattern item specification types. */ +enum item_spec_type { + ITEM_SPEC, + ITEM_LAST, + ITEM_MASK, +}; + +/** Compute storage space needed by item specification and copy it. */ +static size_t +flow_item_spec_copy(void *buf, const struct rte_flow_item *item, + enum item_spec_type type) { - if (!item->spec) { - *size = 0; + size_t size = 0; + const void *data = + type == ITEM_SPEC ? item->spec : + type == ITEM_LAST ? item->last : + type == ITEM_MASK ? item->mask : + NULL; + + if (!item->spec || !data) goto empty; - } switch (item->type) { union { const struct rte_flow_item_raw *raw; } spec; + union { + const struct rte_flow_item_raw *raw; + } last; + union { + const struct rte_flow_item_raw *raw; + } mask; + union { + const struct rte_flow_item_raw *raw; + } src; + union { + struct rte_flow_item_raw *raw; + } dst; + size_t off; case RTE_FLOW_ITEM_TYPE_RAW: spec.raw = item->spec; - *size = offsetof(struct rte_flow_item_raw, pattern) + - spec.raw->length * sizeof(*spec.raw->pattern); + last.raw = item->last ? item->last : item->spec; + mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask; + src.raw = data; + dst.raw = buf; + off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw), + sizeof(*src.raw->pattern)); + if (type == ITEM_SPEC || + (type == ITEM_MASK && + ((spec.raw->length & mask.raw->length) >= + (last.raw->length & mask.raw->length)))) + size = spec.raw->length & mask.raw->length; + else + size = last.raw->length & mask.raw->length; + size = off + size * sizeof(*src.raw->pattern); + if (dst.raw) { + memcpy(dst.raw, src.raw, sizeof(*src.raw)); + dst.raw->pattern = memcpy((uint8_t *)dst.raw + off, + src.raw->pattern, + size - off); + } break; default: - *size = flow_item[item->type].size; + size = flow_item[item->type].size; + if (buf) + memcpy(buf, data, size); break; } empty: - *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size; + return RTE_ALIGN_CEIL(size, sizeof(double)); } /** Generate flow_action[] entry. */ @@ -1028,39 +1127,92 @@ static const struct { MK_FLOW_ACTION(FLAG, 0), MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), MK_FLOW_ACTION(DROP, 0), - MK_FLOW_ACTION(COUNT, 0), - MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)), - MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */ + MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)), + MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), MK_FLOW_ACTION(PF, 0), MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), + MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)), + MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)), MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)), + MK_FLOW_ACTION(OF_SET_MPLS_TTL, + sizeof(struct rte_flow_action_of_set_mpls_ttl)), + MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0), + MK_FLOW_ACTION(OF_SET_NW_TTL, + sizeof(struct rte_flow_action_of_set_nw_ttl)), + MK_FLOW_ACTION(OF_DEC_NW_TTL, 0), + MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0), + MK_FLOW_ACTION(OF_COPY_TTL_IN, 0), + MK_FLOW_ACTION(OF_POP_VLAN, 0), + MK_FLOW_ACTION(OF_PUSH_VLAN, + sizeof(struct rte_flow_action_of_push_vlan)), + MK_FLOW_ACTION(OF_SET_VLAN_VID, + sizeof(struct rte_flow_action_of_set_vlan_vid)), + MK_FLOW_ACTION(OF_SET_VLAN_PCP, + sizeof(struct rte_flow_action_of_set_vlan_pcp)), + MK_FLOW_ACTION(OF_POP_MPLS, + sizeof(struct rte_flow_action_of_pop_mpls)), + MK_FLOW_ACTION(OF_PUSH_MPLS, + sizeof(struct rte_flow_action_of_push_mpls)), }; -/** Compute storage space needed by action configuration. */ -static void -flow_action_conf_size(const struct rte_flow_action *action, - size_t *size, size_t *pad) +/** Compute storage space needed by action configuration and copy it. */ +static size_t +flow_action_conf_copy(void *buf, const struct rte_flow_action *action) { - if (!action->conf) { - *size = 0; + size_t size = 0; + + if (!action->conf) goto empty; - } switch (action->type) { union { const struct rte_flow_action_rss *rss; - } conf; + } src; + union { + struct rte_flow_action_rss *rss; + } dst; + size_t off; case RTE_FLOW_ACTION_TYPE_RSS: - conf.rss = action->conf; - *size = offsetof(struct rte_flow_action_rss, queue) + - conf.rss->num * sizeof(*conf.rss->queue); + src.rss = action->conf; + dst.rss = buf; + off = 0; + if (dst.rss) + *dst.rss = (struct rte_flow_action_rss){ + .func = src.rss->func, + .level = src.rss->level, + .types = src.rss->types, + .key_len = src.rss->key_len, + .queue_num = src.rss->queue_num, + }; + off += sizeof(*src.rss); + if (src.rss->key_len) { + off = RTE_ALIGN_CEIL(off, sizeof(double)); + size = sizeof(*src.rss->key) * src.rss->key_len; + if (dst.rss) + dst.rss->key = memcpy + ((void *)((uintptr_t)dst.rss + off), + src.rss->key, size); + off += size; + } + if (src.rss->queue_num) { + off = RTE_ALIGN_CEIL(off, sizeof(double)); + size = sizeof(*src.rss->queue) * src.rss->queue_num; + if (dst.rss) + dst.rss->queue = memcpy + ((void *)((uintptr_t)dst.rss + off), + src.rss->queue, size); + off += size; + } + size = off; break; default: - *size = flow_action[action->type].size; + size = flow_action[action->type].size; + if (buf) + memcpy(buf, action->conf, size); break; } empty: - *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size; + return RTE_ALIGN_CEIL(size, sizeof(double)); } /** Generate a port_flow entry from attributes/pattern/actions. */ @@ -1073,7 +1225,6 @@ port_flow_new(const struct rte_flow_attr *attr, const struct rte_flow_action *action; struct port_flow *pf = NULL; size_t tmp; - size_t pad; size_t off1 = 0; size_t off2 = 0; int err = ENOTSUP; @@ -1091,24 +1242,23 @@ store: if (pf) dst = memcpy(pf->data + off1, item, sizeof(*item)); off1 += sizeof(*item); - flow_item_spec_size(item, &tmp, &pad); if (item->spec) { if (pf) - dst->spec = memcpy(pf->data + off2, - item->spec, tmp); - off2 += tmp + pad; + dst->spec = pf->data + off2; + off2 += flow_item_spec_copy + (pf ? pf->data + off2 : NULL, item, ITEM_SPEC); } if (item->last) { if (pf) - dst->last = memcpy(pf->data + off2, - item->last, tmp); - off2 += tmp + pad; + dst->last = pf->data + off2; + off2 += flow_item_spec_copy + (pf ? pf->data + off2 : NULL, item, ITEM_LAST); } if (item->mask) { if (pf) - dst->mask = memcpy(pf->data + off2, - item->mask, tmp); - off2 += tmp + pad; + dst->mask = pf->data + off2; + off2 += flow_item_spec_copy + (pf ? pf->data + off2 : NULL, item, ITEM_MASK); } off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END); @@ -1125,12 +1275,11 @@ store: if (pf) dst = memcpy(pf->data + off1, action, sizeof(*action)); off1 += sizeof(*action); - flow_action_conf_size(action, &tmp, &pad); if (action->conf) { if (pf) - dst->conf = memcpy(pf->data + off2, - action->conf, tmp); - off2 += tmp + pad; + dst->conf = pf->data + off2; + off2 += flow_action_conf_copy + (pf ? pf->data + off2 : NULL, action); } off2 = RTE_ALIGN_CEIL(off2, sizeof(double)); } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END); @@ -1168,10 +1317,15 @@ port_flow_complain(struct rte_flow_error *error) [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", + [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", + [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", + [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", + [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", + [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", }; const char *errstr; @@ -1326,7 +1480,7 @@ port_flow_flush(portid_t port_id) /** Query a flow rule. */ int port_flow_query(portid_t port_id, uint32_t rule, - enum rte_flow_action_type action) + const struct rte_flow_action *action) { struct rte_flow_error error; struct rte_port *port; @@ -1347,16 +1501,17 @@ port_flow_query(portid_t port_id, uint32_t rule, printf("Flow rule #%u not found\n", rule); return -ENOENT; } - if ((unsigned int)action >= RTE_DIM(flow_action) || - !flow_action[action].name) + if ((unsigned int)action->type >= RTE_DIM(flow_action) || + !flow_action[action->type].name) name = "unknown"; else - name = flow_action[action].name; - switch (action) { + name = flow_action[action->type].name; + switch (action->type) { case RTE_FLOW_ACTION_TYPE_COUNT: break; default: - printf("Cannot query action type %d (%s)\n", action, name); + printf("Cannot query action type %d (%s)\n", + action->type, name); return -ENOTSUP; } /* Poisoning to make sure PMDs update it in case of error. */ @@ -1364,7 +1519,7 @@ port_flow_query(portid_t port_id, uint32_t rule, memset(&query, 0, sizeof(query)); if (rte_flow_query(port_id, pf->flow, action, &query, &error)) return port_flow_complain(&error); - switch (action) { + switch (action->type) { case RTE_FLOW_ACTION_TYPE_COUNT: printf("%s:\n" " hits_set: %u\n" @@ -1379,7 +1534,7 @@ port_flow_query(portid_t port_id, uint32_t rule, break; default: printf("Cannot display result for action type %d (%s)\n", - action, name); + action->type, name); break; } return 0; @@ -1429,12 +1584,13 @@ port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) const struct rte_flow_item *item = pf->pattern; const struct rte_flow_action *action = pf->actions; - printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c\t", + printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", pf->id, pf->attr.group, pf->attr.priority, pf->attr.ingress ? 'i' : '-', - pf->attr.egress ? 'e' : '-'); + pf->attr.egress ? 'e' : '-', + pf->attr.transfer ? 't' : '-'); while (item->type != RTE_FLOW_ITEM_TYPE_END) { if (item->type != RTE_FLOW_ITEM_TYPE_VOID) printf("%s ", flow_item[item->type].name); @@ -1664,6 +1820,7 @@ void rxtx_config_display(void) { portid_t pid; + queueid_t qid; printf(" %s packet forwarding%s packets/burst=%d\n", cur_fwd_eng->fwd_mode_name, @@ -1678,30 +1835,63 @@ rxtx_config_display(void) nb_fwd_lcores, nb_fwd_ports); RTE_ETH_FOREACH_DEV(pid) { - struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf; - struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf; - - printf(" port %d:\n", (unsigned int)pid); - printf(" CRC stripping %s\n", - (ports[pid].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_CRC_STRIP) ? - "enabled" : "disabled"); - printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", - nb_rxq, nb_rxd, rx_conf->rx_free_thresh); - printf(" RX threshold registers: pthresh=%d hthresh=%d " - " wthresh=%d\n", - rx_conf->rx_thresh.pthresh, - rx_conf->rx_thresh.hthresh, - rx_conf->rx_thresh.wthresh); - printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", - nb_txq, nb_txd, tx_conf->tx_free_thresh); - printf(" TX threshold registers: pthresh=%d hthresh=%d " - " wthresh=%d\n", - tx_conf->tx_thresh.pthresh, - tx_conf->tx_thresh.hthresh, - tx_conf->tx_thresh.wthresh); - printf(" TX RS bit threshold=%d - TXQ offloads=0x%"PRIx64"\n", - tx_conf->tx_rs_thresh, tx_conf->offloads); + struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; + struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; + uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; + uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; + uint16_t nb_rx_desc_tmp; + uint16_t nb_tx_desc_tmp; + struct rte_eth_rxq_info rx_qinfo; + struct rte_eth_txq_info tx_qinfo; + int32_t rc; + + /* per port config */ + printf(" port %d: RX queue number: %d Tx queue number: %d\n", + (unsigned int)pid, nb_rxq, nb_txq); + + printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", + ports[pid].dev_conf.rxmode.offloads, + ports[pid].dev_conf.txmode.offloads); + + /* per rx queue config only for first queue to be less verbose */ + for (qid = 0; qid < 1; qid++) { + rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); + if (rc) + nb_rx_desc_tmp = nb_rx_desc[qid]; + else + nb_rx_desc_tmp = rx_qinfo.nb_desc; + + printf(" RX queue: %d\n", qid); + printf(" RX desc=%d - RX free threshold=%d\n", + nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh); + printf(" RX threshold registers: pthresh=%d hthresh=%d " + " wthresh=%d\n", + rx_conf[qid].rx_thresh.pthresh, + rx_conf[qid].rx_thresh.hthresh, + rx_conf[qid].rx_thresh.wthresh); + printf(" RX Offloads=0x%"PRIx64"\n", + rx_conf[qid].offloads); + } + + /* per tx queue config only for first queue to be less verbose */ + for (qid = 0; qid < 1; qid++) { + rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); + if (rc) + nb_tx_desc_tmp = nb_tx_desc[qid]; + else + nb_tx_desc_tmp = tx_qinfo.nb_desc; + + printf(" TX queue: %d\n", qid); + printf(" TX desc=%d - TX free threshold=%d\n", + nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh); + printf(" TX threshold registers: pthresh=%d hthresh=%d " + " wthresh=%d\n", + tx_conf[qid].tx_thresh.pthresh, + tx_conf[qid].tx_thresh.hthresh, + tx_conf[qid].tx_thresh.wthresh); + printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", + tx_conf[qid].offloads, tx_conf->tx_rs_thresh); + } } } @@ -1761,7 +1951,7 @@ port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) } rss_conf.rss_hf = 0; - for (i = 0; i < RTE_DIM(rss_type_table); i++) { + for (i = 0; rss_type_table[i].str; i++) { if (!strcmp(rss_info, rss_type_table[i].str)) rss_conf.rss_hf = rss_type_table[i].rss_type; } @@ -1790,7 +1980,7 @@ port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key) return; } printf("RSS functions:\n "); - for (i = 0; i < RTE_DIM(rss_type_table); i++) { + for (i = 0; rss_type_table[i].str; i++) { if (rss_hf & rss_type_table[i].rss_type) printf("%s ", rss_type_table[i].str); } @@ -1814,7 +2004,7 @@ port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, rss_conf.rss_key = NULL; rss_conf.rss_key_len = hash_key_len; rss_conf.rss_hf = 0; - for (i = 0; i < RTE_DIM(rss_type_table); i++) { + for (i = 0; rss_type_table[i].str; i++) { if (!strcmp(rss_type_table[i].str, rss_type)) rss_conf.rss_hf = rss_type_table[i].rss_type; } @@ -1987,15 +2177,11 @@ rss_fwd_config_setup(void) fs->tx_queue = rxq; fs->peer_addr = fs->tx_port; fs->retry_enabled = retry_enabled; - rxq = (queueid_t) (rxq + 1); - if (rxq < nb_q) - continue; - /* - * rxq == nb_q - * Restart from RX queue 0 on next RX port - */ - rxq = 0; rxp++; + if (rxp < nb_fwd_ports) + continue; + rxp = 0; + rxq++; } } @@ -2142,6 +2328,55 @@ icmp_echo_config_setup(void) } } +#if defined RTE_LIBRTE_PMD_SOFTNIC +static void +softnic_fwd_config_setup(void) +{ + struct rte_port *port; + portid_t pid, softnic_portid; + queueid_t i; + uint8_t softnic_enable = 0; + + RTE_ETH_FOREACH_DEV(pid) { + port = &ports[pid]; + const char *driver = port->dev_info.driver_name; + + if (strcmp(driver, "net_softnic") == 0) { + softnic_portid = pid; + softnic_enable = 1; + break; + } + } + + if (softnic_enable == 0) { + printf("Softnic mode not configured(%s)!\n", __func__); + return; + } + + cur_fwd_config.nb_fwd_ports = 1; + cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq; + + /* Re-initialize forwarding streams */ + init_fwd_streams(); + + /* + * In the softnic forwarding test, the number of forwarding cores + * is set to one and remaining are used for softnic packet processing. + */ + cur_fwd_config.nb_fwd_lcores = 1; + setup_fwd_config_of_each_lcore(&cur_fwd_config); + + for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) { + fwd_streams[i]->rx_port = softnic_portid; + fwd_streams[i]->rx_queue = i; + fwd_streams[i]->tx_port = softnic_portid; + fwd_streams[i]->tx_queue = i; + fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; + fwd_streams[i]->retry_enabled = retry_enabled; + } +} +#endif + void fwd_config_setup(void) { @@ -2150,6 +2385,14 @@ fwd_config_setup(void) icmp_echo_config_setup(); return; } + +#if defined RTE_LIBRTE_PMD_SOFTNIC + if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { + softnic_fwd_config_setup(); + return; + } +#endif + if ((nb_rxq > 1) && (nb_txq > 1)){ if (dcb_config) dcb_fwd_config_setup(); @@ -3018,6 +3261,7 @@ flowtype_to_str(uint16_t flow_type) {"vxlan", RTE_ETH_FLOW_VXLAN}, {"geneve", RTE_ETH_FLOW_GENEVE}, {"nvgre", RTE_ETH_FLOW_NVGRE}, + {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, }; for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c index 5f5ab64a..49482926 100644 --- a/app/test-pmd/csumonly.c +++ b/app/test-pmd/csumonly.c @@ -49,9 +49,12 @@ #define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */ #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN) -#define GRE_KEY_PRESENT 0x2000 -#define GRE_KEY_LEN 4 -#define GRE_SUPPORTED_FIELDS GRE_KEY_PRESENT +#define GRE_CHECKSUM_PRESENT 0x8000 +#define GRE_KEY_PRESENT 0x2000 +#define GRE_SEQUENCE_PRESENT 0x1000 +#define GRE_EXT_LEN 4 +#define GRE_SUPPORTED_FIELDS (GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\ + GRE_SEQUENCE_PRESENT) /* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */ #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN @@ -60,6 +63,8 @@ #define _htons(x) (x) #endif +uint16_t vxlan_gpe_udp_port = 4790; + /* structure that caches offload info for the current packet */ struct testpmd_offload_info { uint16_t ethertype; @@ -194,6 +199,70 @@ parse_vxlan(struct udp_hdr *udp_hdr, info->l2_len += ETHER_VXLAN_HLEN; /* add udp + vxlan */ } +/* Parse a vxlan-gpe header */ +static void +parse_vxlan_gpe(struct udp_hdr *udp_hdr, + struct testpmd_offload_info *info) +{ + struct ether_hdr *eth_hdr; + struct ipv4_hdr *ipv4_hdr; + struct ipv6_hdr *ipv6_hdr; + struct vxlan_gpe_hdr *vxlan_gpe_hdr; + uint8_t vxlan_gpe_len = sizeof(*vxlan_gpe_hdr); + + /* Check udp destination port. */ + if (udp_hdr->dst_port != _htons(vxlan_gpe_udp_port)) + return; + + vxlan_gpe_hdr = (struct vxlan_gpe_hdr *)((char *)udp_hdr + + sizeof(struct udp_hdr)); + + if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto == + VXLAN_GPE_TYPE_IPV4) { + info->is_tunnel = 1; + info->outer_ethertype = info->ethertype; + info->outer_l2_len = info->l2_len; + info->outer_l3_len = info->l3_len; + info->outer_l4_proto = info->l4_proto; + + ipv4_hdr = (struct ipv4_hdr *)((char *)vxlan_gpe_hdr + + vxlan_gpe_len); + + parse_ipv4(ipv4_hdr, info); + info->ethertype = _htons(ETHER_TYPE_IPv4); + info->l2_len = 0; + + } else if (vxlan_gpe_hdr->proto == VXLAN_GPE_TYPE_IPV6) { + info->is_tunnel = 1; + info->outer_ethertype = info->ethertype; + info->outer_l2_len = info->l2_len; + info->outer_l3_len = info->l3_len; + info->outer_l4_proto = info->l4_proto; + + ipv6_hdr = (struct ipv6_hdr *)((char *)vxlan_gpe_hdr + + vxlan_gpe_len); + + info->ethertype = _htons(ETHER_TYPE_IPv6); + parse_ipv6(ipv6_hdr, info); + info->l2_len = 0; + + } else if (vxlan_gpe_hdr->proto == VXLAN_GPE_TYPE_ETH) { + info->is_tunnel = 1; + info->outer_ethertype = info->ethertype; + info->outer_l2_len = info->l2_len; + info->outer_l3_len = info->l3_len; + info->outer_l4_proto = info->l4_proto; + + eth_hdr = (struct ether_hdr *)((char *)vxlan_gpe_hdr + + vxlan_gpe_len); + + parse_ethernet(eth_hdr, info); + } else + return; + + info->l2_len += ETHER_VXLAN_GPE_HLEN; +} + /* Parse a gre header */ static void parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info) @@ -203,14 +272,14 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info) struct ipv6_hdr *ipv6_hdr; uint8_t gre_len = 0; - /* check which fields are supported */ - if ((gre_hdr->flags & _htons(~GRE_SUPPORTED_FIELDS)) != 0) - return; - gre_len += sizeof(struct simple_gre_hdr); if (gre_hdr->flags & _htons(GRE_KEY_PRESENT)) - gre_len += GRE_KEY_LEN; + gre_len += GRE_EXT_LEN; + if (gre_hdr->flags & _htons(GRE_SEQUENCE_PRESENT)) + gre_len += GRE_EXT_LEN; + if (gre_hdr->flags & _htons(GRE_CHECKSUM_PRESENT)) + gre_len += GRE_EXT_LEN; if (gre_hdr->proto == _htons(ETHER_TYPE_IPv4)) { info->is_tunnel = 1; @@ -342,6 +411,8 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, info->ethertype); } } + if (info->gso_enable) + ol_flags |= PKT_TX_UDP_SEG; } else if (info->l4_proto == IPPROTO_TCP) { tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len); tcp_hdr->cksum = 0; @@ -588,6 +659,10 @@ pkt_copy_split(const struct rte_mbuf *pkt) * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP . * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 / * UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / Ether / IP|IP6 / + * UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / IP|IP6 / + * UDP|TCP|SCTP * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP * Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP * Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP @@ -664,7 +739,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) m = pkts_burst[i]; info.is_tunnel = 0; info.pkt_len = rte_pktmbuf_pkt_len(m); - tx_ol_flags = 0; + tx_ol_flags = m->ol_flags & + (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF); rx_ol_flags = m->ol_flags; /* Update the L3/L4 checksum error packet statistics */ @@ -691,9 +767,16 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) udp_hdr = (struct udp_hdr *)((char *)l3_hdr + info.l3_len); - parse_vxlan(udp_hdr, &info, m->packet_type); - if (info.is_tunnel) - tx_ol_flags |= PKT_TX_TUNNEL_VXLAN; + parse_vxlan_gpe(udp_hdr, &info); + if (info.is_tunnel) { + tx_ol_flags |= PKT_TX_TUNNEL_VXLAN_GPE; + } else { + parse_vxlan(udp_hdr, &info, + m->packet_type); + if (info.is_tunnel) + tx_ol_flags |= + PKT_TX_TUNNEL_VXLAN; + } } else if (info.l4_proto == IPPROTO_GRE) { struct simple_gre_hdr *gre_hdr; @@ -738,6 +821,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) /* step 3: fill the mbuf meta data (flags and header lengths) */ + m->tx_offload = 0; if (info.is_tunnel == 1) { if (info.tunnel_tso_segsz || (tx_offloads & diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c index 2adce701..7cac757a 100644 --- a/app/test-pmd/macfwd.c +++ b/app/test-pmd/macfwd.c @@ -96,7 +96,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs) ð_hdr->d_addr); ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr->s_addr); - mb->ol_flags = ol_flags; + mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF; + mb->ol_flags |= ol_flags; mb->l2_len = sizeof(struct ether_hdr); mb->l3_len = sizeof(struct ipv4_hdr); mb->vlan_tci = txp->tx_vlan_id; diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c index e2cc4812..a8384d5b 100644 --- a/app/test-pmd/macswap.c +++ b/app/test-pmd/macswap.c @@ -127,7 +127,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs) ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr); ether_addr_copy(&addr, ð_hdr->s_addr); - mb->ol_flags = ol_flags; + mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF; + mb->ol_flags |= ol_flags; mb->l2_len = sizeof(struct ether_hdr); mb->l3_len = sizeof(struct ipv4_hdr); mb->vlan_tci = txp->tx_vlan_id; diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build index 7ed74db2..a0b3be07 100644 --- a/app/test-pmd/meson.build +++ b/app/test-pmd/meson.build @@ -1,6 +1,10 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation +# override default name to drop the hyphen +name = 'testpmd' +allow_experimental_apis = true +cflags += '-Wno-deprecated-declarations' sources = files('cmdline.c', 'cmdline_flow.c', 'cmdline_mtr.c', @@ -22,6 +26,9 @@ deps = ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'meter', 'bus_pci'] if dpdk_conf.has('RTE_LIBRTE_PDUMP') deps += 'pdump' endif +if dpdk_conf.has('RTE_LIBRTE_BNXT_PMD') + deps += 'pmd_bnxt' +endif if dpdk_conf.has('RTE_LIBRTE_I40E_PMD') deps += 'pmd_i40e' endif @@ -29,25 +36,13 @@ if dpdk_conf.has('RTE_LIBRTE_IXGBE_PMD') deps += 'pmd_ixgbe' endif if dpdk_conf.has('RTE_LIBRTE_SOFTNIC_PMD') - sources += files('tm.c') + sources += files('softnicfwd.c') deps += 'pmd_softnic' endif - -dep_objs = [] -foreach d:deps - dep_objs += get_variable(get_option('default_library') + '_rte_' + d) -endforeach -dep_objs += cc.find_library('execinfo', required: false) # for BSD only - -link_libs = [] -if get_option('default_library') == 'static' - link_libs = dpdk_drivers +if dpdk_conf.has('RTE_LIBRTE_DPAA_PMD') + deps += ['bus_dpaa', 'mempool_dpaa', 'pmd_dpaa'] +endif +if dpdk_conf.has('RTE_LIBRTE_BPF') + sources += files('bpf_cmd.c') + deps += 'bpf' endif - -executable('dpdk-testpmd', - sources, - c_args: [machine_args, '-DALLOW_EXPERIMENTAL_API'], - link_whole: link_libs, - dependencies: dep_objs, - install_rpath: join_paths(get_option('prefix'), driver_install_path), - install: true) diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index 97d22b86..962fad78 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -70,7 +70,7 @@ usage(char* progname) "--rss-ip | --rss-udp | " "--rxpt= | --rxht= | --rxwt= | --rxfreet= | " "--txpt= | --txht= | --txwt= | --txfreet= | " - "--txrst= | --tx-offloads ]\n", + "--txrst= | --tx-offloads= | --vxlan-gpe-port= ]\n", progname); #ifdef RTE_LIBRTE_CMDLINE printf(" --interactive: run in interactive mode.\n"); @@ -186,6 +186,10 @@ usage(char* progname) printf(" --flow-isolate-all: " "requests flow API isolated mode on all ports at initialization time.\n"); printf(" --tx-offloads=0xXXXXXXXX: hexadecimal bitmask of TX queue offloads\n"); + printf(" --hot-plug: enable hot plug for device.\n"); + printf(" --vxlan-gpe-port=N: UPD port of tunnel VXLAN-GPE\n"); + printf(" --mlockall: lock all memory\n"); + printf(" --no-mlockall: do not lock all memory\n"); } #ifdef RTE_LIBRTE_CMDLINE @@ -373,7 +377,6 @@ parse_portnuma_config(const char *q_arg) }; unsigned long int_fld[_NUM_FLD]; char *str_fld[_NUM_FLD]; - portid_t pid; /* reset from value set at definition */ while ((p = strchr(p0,'(')) != NULL) { @@ -397,10 +400,7 @@ parse_portnuma_config(const char *q_arg) port_id = (portid_t)int_fld[FLD_PORT]; if (port_id_is_invalid(port_id, ENABLED_WARN) || port_id == (portid_t)RTE_PORT_ALL) { - printf("Valid port range is [0"); - RTE_ETH_FOREACH_DEV(pid) - printf(", %d", pid); - printf("]\n"); + print_valid_ports(); return -1; } socket_id = (uint8_t)int_fld[FLD_SOCKET]; @@ -431,7 +431,6 @@ parse_ringnuma_config(const char *q_arg) }; unsigned long int_fld[_NUM_FLD]; char *str_fld[_NUM_FLD]; - portid_t pid; #define RX_RING_ONLY 0x1 #define TX_RING_ONLY 0x2 #define RXTX_RING 0x3 @@ -458,10 +457,7 @@ parse_ringnuma_config(const char *q_arg) port_id = (portid_t)int_fld[FLD_PORT]; if (port_id_is_invalid(port_id, ENABLED_WARN) || port_id == (portid_t)RTE_PORT_ALL) { - printf("Valid port range is [0"); - RTE_ETH_FOREACH_DEV(pid) - printf(", %d", pid); - printf("]\n"); + print_valid_ports(); return -1; } socket_id = (uint8_t)int_fld[FLD_SOCKET]; @@ -512,6 +508,8 @@ parse_event_printing_config(const char *optarg, int enable) mask = UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET; else if (!strcmp(optarg, "vf_mbox")) mask = UINT32_C(1) << RTE_ETH_EVENT_VF_MBOX; + else if (!strcmp(optarg, "ipsec")) + mask = UINT32_C(1) << RTE_ETH_EVENT_IPSEC; else if (!strcmp(optarg, "macsec")) mask = UINT32_C(1) << RTE_ETH_EVENT_MACSEC; else if (!strcmp(optarg, "intr_rmv")) @@ -544,6 +542,8 @@ launch_args_parse(int argc, char** argv) /* Default offloads for all ports. */ uint64_t rx_offloads = rx_mode.offloads; uint64_t tx_offloads = tx_mode.offloads; + struct rte_eth_dev_info dev_info; + uint16_t rec_nb_pkts; static struct option lgopts[] = { { "help", 0, 0, 0 }, @@ -621,6 +621,10 @@ launch_args_parse(int argc, char** argv) { "print-event", 1, 0, 0 }, { "mask-event", 1, 0, 0 }, { "tx-offloads", 1, 0, 0 }, + { "hot-plug", 0, 0, 0 }, + { "vxlan-gpe-port", 1, 0, 0 }, + { "mlockall", 0, 0, 0 }, + { "no-mlockall", 0, 0, 0 }, { 0, 0, 0, 0 }, }; @@ -658,9 +662,8 @@ launch_args_parse(int argc, char** argv) if (!strcmp(lgopts[opt_idx].name, "cmdline-file")) { printf("CLI commands to be read from %s\n", optarg); - snprintf(cmdline_filename, - sizeof(cmdline_filename), "%s", - optarg); + strlcpy(cmdline_filename, optarg, + sizeof(cmdline_filename)); } if (!strcmp(lgopts[opt_idx].name, "auto-start")) { printf("Auto-start selected\n"); @@ -875,8 +878,10 @@ launch_args_parse(int argc, char** argv) " must be >= 0\n", n); } #endif - if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip")) + if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip")) { rx_offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP; + rx_offloads |= DEV_RX_OFFLOAD_KEEP_CRC; + } if (!strcmp(lgopts[opt_idx].name, "enable-lro")) rx_offloads |= DEV_RX_OFFLOAD_TCP_LRO; if (!strcmp(lgopts[opt_idx].name, "enable-scatter")) @@ -948,12 +953,38 @@ launch_args_parse(int argc, char** argv) } if (!strcmp(lgopts[opt_idx].name, "burst")) { n = atoi(optarg); - if ((n >= 1) && (n <= MAX_PKT_BURST)) - nb_pkt_per_burst = (uint16_t) n; - else + if (n == 0) { + /* A burst size of zero means that the + * PMD should be queried for + * recommended Rx burst size. Since + * testpmd uses a single size for all + * ports, port 0 is queried for the + * value, on the assumption that all + * ports are of the same NIC model. + */ + rte_eth_dev_info_get(0, &dev_info); + rec_nb_pkts = dev_info + .default_rxportconf.burst_size; + + if (rec_nb_pkts == 0) + rte_exit(EXIT_FAILURE, + "PMD does not recommend a burst size. " + "Provided value must be between " + "1 and %d\n", MAX_PKT_BURST); + else if (rec_nb_pkts > MAX_PKT_BURST) + rte_exit(EXIT_FAILURE, + "PMD recommended burst size of %d" + " exceeds maximum value of %d\n", + rec_nb_pkts, MAX_PKT_BURST); + printf("Using PMD-provided burst value of %d\n", + rec_nb_pkts); + nb_pkt_per_burst = rec_nb_pkts; + } else if (n > MAX_PKT_BURST) rte_exit(EXIT_FAILURE, - "burst must >= 1 and <= %d]", - MAX_PKT_BURST); + "burst must be between1 and %d\n", + MAX_PKT_BURST); + else + nb_pkt_per_burst = (uint16_t) n; } if (!strcmp(lgopts[opt_idx].name, "mbcache")) { n = atoi(optarg); @@ -1092,6 +1123,14 @@ launch_args_parse(int argc, char** argv) rte_exit(EXIT_FAILURE, "tx-offloads must be >= 0\n"); } + if (!strcmp(lgopts[opt_idx].name, "vxlan-gpe-port")) { + n = atoi(optarg); + if (n >= 0) + vxlan_gpe_udp_port = (uint16_t)n; + else + rte_exit(EXIT_FAILURE, + "vxlan-gpe-port must be >= 0\n"); + } if (!strcmp(lgopts[opt_idx].name, "print-event")) if (parse_event_printing_config(optarg, 1)) { rte_exit(EXIT_FAILURE, @@ -1102,7 +1141,12 @@ launch_args_parse(int argc, char** argv) rte_exit(EXIT_FAILURE, "invalid mask-event argument\n"); } - + if (!strcmp(lgopts[opt_idx].name, "hot-plug")) + hot_plug = 1; + if (!strcmp(lgopts[opt_idx].name, "mlockall")) + do_mlockall = 1; + if (!strcmp(lgopts[opt_idx].name, "no-mlockall")) + do_mlockall = 0; break; case 'h': usage(argv[0]); diff --git a/app/test-pmd/softnicfwd.c b/app/test-pmd/softnicfwd.c new file mode 100644 index 00000000..7ff62280 --- /dev/null +++ b/app/test-pmd/softnicfwd.c @@ -0,0 +1,674 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "testpmd.h" + +#define SUBPORT_NODES_PER_PORT 1 +#define PIPE_NODES_PER_SUBPORT 4096 +#define TC_NODES_PER_PIPE 4 +#define QUEUE_NODES_PER_TC 4 + +#define NUM_PIPE_NODES \ + (SUBPORT_NODES_PER_PORT * PIPE_NODES_PER_SUBPORT) + +#define NUM_TC_NODES \ + (NUM_PIPE_NODES * TC_NODES_PER_PIPE) + +#define ROOT_NODE_ID 1000000 +#define SUBPORT_NODES_START_ID 900000 +#define PIPE_NODES_START_ID 800000 +#define TC_NODES_START_ID 700000 + +#define STATS_MASK_DEFAULT \ + (RTE_TM_STATS_N_PKTS | \ + RTE_TM_STATS_N_BYTES | \ + RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \ + RTE_TM_STATS_N_BYTES_GREEN_DROPPED) + +#define STATS_MASK_QUEUE \ + (STATS_MASK_DEFAULT | \ + RTE_TM_STATS_N_PKTS_QUEUED) + +#define BYTES_IN_MBPS (1000 * 1000 / 8) +#define TOKEN_BUCKET_SIZE 1000000 + +/* TM Hierarchy Levels */ +enum tm_hierarchy_level { + TM_NODE_LEVEL_PORT = 0, + TM_NODE_LEVEL_SUBPORT, + TM_NODE_LEVEL_PIPE, + TM_NODE_LEVEL_TC, + TM_NODE_LEVEL_QUEUE, + TM_NODE_LEVEL_MAX, +}; + +struct tm_hierarchy { + /* TM Nodes */ + uint32_t root_node_id; + uint32_t subport_node_id[SUBPORT_NODES_PER_PORT]; + uint32_t pipe_node_id[SUBPORT_NODES_PER_PORT][PIPE_NODES_PER_SUBPORT]; + uint32_t tc_node_id[NUM_PIPE_NODES][TC_NODES_PER_PIPE]; + uint32_t queue_node_id[NUM_TC_NODES][QUEUE_NODES_PER_TC]; + + /* TM Hierarchy Nodes Shaper Rates */ + uint32_t root_node_shaper_rate; + uint32_t subport_node_shaper_rate; + uint32_t pipe_node_shaper_rate; + uint32_t tc_node_shaper_rate; + uint32_t tc_node_shared_shaper_rate; + + uint32_t n_shapers; +}; + +static struct fwd_lcore *softnic_fwd_lcore; +static uint16_t softnic_port_id; +struct fwd_engine softnic_fwd_engine; + +/* + * Softnic packet forward + */ +static void +softnic_fwd(struct fwd_stream *fs) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + uint16_t nb_rx; + uint16_t nb_tx; + uint32_t retry; + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t start_tsc; + uint64_t end_tsc; + uint64_t core_cycles; +#endif + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + start_tsc = rte_rdtsc(); +#endif + + /* Packets Receive */ + nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, + pkts_burst, nb_pkt_per_burst); + fs->rx_packets += nb_rx; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->rx_burst_stats.pkt_burst_spread[nb_rx]++; +#endif + + nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, + pkts_burst, nb_rx); + + /* Retry if necessary */ + if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) { + retry = 0; + while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) { + rte_delay_us(burst_tx_delay_time); + nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, + &pkts_burst[nb_tx], nb_rx - nb_tx); + } + } + fs->tx_packets += nb_tx; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; +#endif + + if (unlikely(nb_tx < nb_rx)) { + fs->fwd_dropped += (nb_rx - nb_tx); + do { + rte_pktmbuf_free(pkts_burst[nb_tx]); + } while (++nb_tx < nb_rx); + } +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + end_tsc = rte_rdtsc(); + core_cycles = (end_tsc - start_tsc); + fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); +#endif +} + +static void +softnic_fwd_run(struct fwd_stream *fs) +{ + rte_pmd_softnic_run(softnic_port_id); + softnic_fwd(fs); +} + +/** + * Softnic init + */ +static int +softnic_begin(void *arg __rte_unused) +{ + for (;;) { + if (!softnic_fwd_lcore->stopped) + break; + } + + do { + /* Run softnic */ + rte_pmd_softnic_run(softnic_port_id); + } while (!softnic_fwd_lcore->stopped); + + return 0; +} + +static void +set_tm_hiearchy_nodes_shaper_rate(portid_t port_id, + struct tm_hierarchy *h) +{ + struct rte_eth_link link_params; + uint64_t tm_port_rate; + + memset(&link_params, 0, sizeof(link_params)); + + rte_eth_link_get(port_id, &link_params); + tm_port_rate = (uint64_t)ETH_SPEED_NUM_10G * BYTES_IN_MBPS; + + /* Set tm hierarchy shapers rate */ + h->root_node_shaper_rate = tm_port_rate; + h->subport_node_shaper_rate = + tm_port_rate / SUBPORT_NODES_PER_PORT; + h->pipe_node_shaper_rate + = h->subport_node_shaper_rate / PIPE_NODES_PER_SUBPORT; + h->tc_node_shaper_rate = h->pipe_node_shaper_rate; + h->tc_node_shared_shaper_rate = h->subport_node_shaper_rate; +} + +static int +softport_tm_root_node_add(portid_t port_id, struct tm_hierarchy *h, + struct rte_tm_error *error) +{ + struct rte_tm_node_params rnp; + struct rte_tm_shaper_params rsp; + uint32_t priority, weight, level_id, shaper_profile_id; + + memset(&rsp, 0, sizeof(struct rte_tm_shaper_params)); + memset(&rnp, 0, sizeof(struct rte_tm_node_params)); + + /* Shaper profile Parameters */ + rsp.peak.rate = h->root_node_shaper_rate; + rsp.peak.size = TOKEN_BUCKET_SIZE; + rsp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; + shaper_profile_id = 0; + + if (rte_tm_shaper_profile_add(port_id, shaper_profile_id, + &rsp, error)) { + printf("%s ERROR(%d)-%s!(shaper_id %u)\n ", + __func__, error->type, error->message, + shaper_profile_id); + return -1; + } + + /* Root Node Parameters */ + h->root_node_id = ROOT_NODE_ID; + weight = 1; + priority = 0; + level_id = TM_NODE_LEVEL_PORT; + rnp.shaper_profile_id = shaper_profile_id; + rnp.nonleaf.n_sp_priorities = 1; + rnp.stats_mask = STATS_MASK_DEFAULT; + + /* Add Node to TM Hierarchy */ + if (rte_tm_node_add(port_id, h->root_node_id, RTE_TM_NODE_ID_NULL, + priority, weight, level_id, &rnp, error)) { + printf("%s ERROR(%d)-%s!(node_id %u, parent_id %u, level %u)\n", + __func__, error->type, error->message, + h->root_node_id, RTE_TM_NODE_ID_NULL, + level_id); + return -1; + } + /* Update */ + h->n_shapers++; + + printf(" Root node added (Start id %u, Count %u, level %u)\n", + h->root_node_id, 1, level_id); + + return 0; +} + +static int +softport_tm_subport_node_add(portid_t port_id, + struct tm_hierarchy *h, + struct rte_tm_error *error) +{ + uint32_t subport_parent_node_id, subport_node_id = 0; + struct rte_tm_node_params snp; + struct rte_tm_shaper_params ssp; + uint32_t priority, weight, level_id, shaper_profile_id; + uint32_t i; + + memset(&ssp, 0, sizeof(struct rte_tm_shaper_params)); + memset(&snp, 0, sizeof(struct rte_tm_node_params)); + + shaper_profile_id = h->n_shapers; + + /* Add Shaper Profile to TM Hierarchy */ + for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) { + ssp.peak.rate = h->subport_node_shaper_rate; + ssp.peak.size = TOKEN_BUCKET_SIZE; + ssp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; + + if (rte_tm_shaper_profile_add(port_id, shaper_profile_id, + &ssp, error)) { + printf("%s ERROR(%d)-%s!(shaper_id %u)\n ", + __func__, error->type, error->message, + shaper_profile_id); + return -1; + } + + /* Node Parameters */ + h->subport_node_id[i] = SUBPORT_NODES_START_ID + i; + subport_parent_node_id = h->root_node_id; + weight = 1; + priority = 0; + level_id = TM_NODE_LEVEL_SUBPORT; + snp.shaper_profile_id = shaper_profile_id; + snp.nonleaf.n_sp_priorities = 1; + snp.stats_mask = STATS_MASK_DEFAULT; + + /* Add Node to TM Hiearchy */ + if (rte_tm_node_add(port_id, + h->subport_node_id[i], + subport_parent_node_id, + priority, weight, + level_id, + &snp, + error)) { + printf("%s ERROR(%d)-%s!(node %u,parent %u,level %u)\n", + __func__, + error->type, + error->message, + h->subport_node_id[i], + subport_parent_node_id, + level_id); + return -1; + } + shaper_profile_id++; + subport_node_id++; + } + /* Update */ + h->n_shapers = shaper_profile_id; + + printf(" Subport nodes added (Start id %u, Count %u, level %u)\n", + h->subport_node_id[0], SUBPORT_NODES_PER_PORT, level_id); + + return 0; +} + +static int +softport_tm_pipe_node_add(portid_t port_id, + struct tm_hierarchy *h, + struct rte_tm_error *error) +{ + uint32_t pipe_parent_node_id; + struct rte_tm_node_params pnp; + struct rte_tm_shaper_params psp; + uint32_t priority, weight, level_id, shaper_profile_id; + uint32_t i, j; + + memset(&psp, 0, sizeof(struct rte_tm_shaper_params)); + memset(&pnp, 0, sizeof(struct rte_tm_node_params)); + + shaper_profile_id = h->n_shapers; + + /* Shaper Profile Parameters */ + psp.peak.rate = h->pipe_node_shaper_rate; + psp.peak.size = TOKEN_BUCKET_SIZE; + psp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; + + /* Pipe Node Parameters */ + weight = 1; + priority = 0; + level_id = TM_NODE_LEVEL_PIPE; + pnp.nonleaf.n_sp_priorities = 4; + pnp.stats_mask = STATS_MASK_DEFAULT; + + /* Add Shaper Profiles and Nodes to TM Hierarchy */ + for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) { + for (j = 0; j < PIPE_NODES_PER_SUBPORT; j++) { + if (rte_tm_shaper_profile_add(port_id, + shaper_profile_id, &psp, error)) { + printf("%s ERROR(%d)-%s!(shaper_id %u)\n ", + __func__, error->type, error->message, + shaper_profile_id); + return -1; + } + pnp.shaper_profile_id = shaper_profile_id; + pipe_parent_node_id = h->subport_node_id[i]; + h->pipe_node_id[i][j] = PIPE_NODES_START_ID + + (i * PIPE_NODES_PER_SUBPORT) + j; + + if (rte_tm_node_add(port_id, + h->pipe_node_id[i][j], + pipe_parent_node_id, + priority, weight, level_id, + &pnp, + error)) { + printf("%s ERROR(%d)-%s!(node %u,parent %u )\n", + __func__, + error->type, + error->message, + h->pipe_node_id[i][j], + pipe_parent_node_id); + + return -1; + } + shaper_profile_id++; + } + } + /* Update */ + h->n_shapers = shaper_profile_id; + + printf(" Pipe nodes added (Start id %u, Count %u, level %u)\n", + h->pipe_node_id[0][0], NUM_PIPE_NODES, level_id); + + return 0; +} + +static int +softport_tm_tc_node_add(portid_t port_id, + struct tm_hierarchy *h, + struct rte_tm_error *error) +{ + uint32_t tc_parent_node_id; + struct rte_tm_node_params tnp; + struct rte_tm_shaper_params tsp, tssp; + uint32_t shared_shaper_profile_id[TC_NODES_PER_PIPE]; + uint32_t priority, weight, level_id, shaper_profile_id; + uint32_t pos, n_tc_nodes, i, j, k; + + memset(&tsp, 0, sizeof(struct rte_tm_shaper_params)); + memset(&tssp, 0, sizeof(struct rte_tm_shaper_params)); + memset(&tnp, 0, sizeof(struct rte_tm_node_params)); + + shaper_profile_id = h->n_shapers; + + /* Private Shaper Profile (TC) Parameters */ + tsp.peak.rate = h->tc_node_shaper_rate; + tsp.peak.size = TOKEN_BUCKET_SIZE; + tsp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; + + /* Shared Shaper Profile (TC) Parameters */ + tssp.peak.rate = h->tc_node_shared_shaper_rate; + tssp.peak.size = TOKEN_BUCKET_SIZE; + tssp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; + + /* TC Node Parameters */ + weight = 1; + level_id = TM_NODE_LEVEL_TC; + tnp.n_shared_shapers = 1; + tnp.nonleaf.n_sp_priorities = 1; + tnp.stats_mask = STATS_MASK_DEFAULT; + + /* Add Shared Shaper Profiles to TM Hierarchy */ + for (i = 0; i < TC_NODES_PER_PIPE; i++) { + shared_shaper_profile_id[i] = shaper_profile_id; + + if (rte_tm_shaper_profile_add(port_id, + shared_shaper_profile_id[i], &tssp, error)) { + printf("%s ERROR(%d)-%s!(Shared shaper profileid %u)\n", + __func__, error->type, error->message, + shared_shaper_profile_id[i]); + + return -1; + } + if (rte_tm_shared_shaper_add_update(port_id, i, + shared_shaper_profile_id[i], error)) { + printf("%s ERROR(%d)-%s!(Shared shaper id %u)\n", + __func__, error->type, error->message, i); + + return -1; + } + shaper_profile_id++; + } + + /* Add Shaper Profiles and Nodes to TM Hierarchy */ + n_tc_nodes = 0; + for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) { + for (j = 0; j < PIPE_NODES_PER_SUBPORT; j++) { + for (k = 0; k < TC_NODES_PER_PIPE ; k++) { + priority = k; + tc_parent_node_id = h->pipe_node_id[i][j]; + tnp.shared_shaper_id = + (uint32_t *)calloc(1, sizeof(uint32_t)); + if (tnp.shared_shaper_id == NULL) { + printf("Shared shaper mem alloc err\n"); + return -1; + } + tnp.shared_shaper_id[0] = k; + pos = j + (i * PIPE_NODES_PER_SUBPORT); + h->tc_node_id[pos][k] = + TC_NODES_START_ID + n_tc_nodes; + + if (rte_tm_shaper_profile_add(port_id, + shaper_profile_id, &tsp, error)) { + printf("%s ERROR(%d)-%s!(shaper %u)\n", + __func__, error->type, + error->message, + shaper_profile_id); + + return -1; + } + tnp.shaper_profile_id = shaper_profile_id; + if (rte_tm_node_add(port_id, + h->tc_node_id[pos][k], + tc_parent_node_id, + priority, weight, + level_id, + &tnp, error)) { + printf("%s ERROR(%d)-%s!(node id %u)\n", + __func__, + error->type, + error->message, + h->tc_node_id[pos][k]); + + return -1; + } + shaper_profile_id++; + n_tc_nodes++; + } + } + } + /* Update */ + h->n_shapers = shaper_profile_id; + + printf(" TC nodes added (Start id %u, Count %u, level %u)\n", + h->tc_node_id[0][0], n_tc_nodes, level_id); + + return 0; +} + +static int +softport_tm_queue_node_add(portid_t port_id, struct tm_hierarchy *h, + struct rte_tm_error *error) +{ + uint32_t queue_parent_node_id; + struct rte_tm_node_params qnp; + uint32_t priority, weight, level_id, pos; + uint32_t n_queue_nodes, i, j, k; + + memset(&qnp, 0, sizeof(struct rte_tm_node_params)); + + /* Queue Node Parameters */ + priority = 0; + weight = 1; + level_id = TM_NODE_LEVEL_QUEUE; + qnp.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE; + qnp.leaf.cman = RTE_TM_CMAN_TAIL_DROP; + qnp.stats_mask = STATS_MASK_QUEUE; + + /* Add Queue Nodes to TM Hierarchy */ + n_queue_nodes = 0; + for (i = 0; i < NUM_PIPE_NODES; i++) { + for (j = 0; j < TC_NODES_PER_PIPE; j++) { + queue_parent_node_id = h->tc_node_id[i][j]; + for (k = 0; k < QUEUE_NODES_PER_TC; k++) { + pos = j + (i * TC_NODES_PER_PIPE); + h->queue_node_id[pos][k] = n_queue_nodes; + if (rte_tm_node_add(port_id, + h->queue_node_id[pos][k], + queue_parent_node_id, + priority, + weight, + level_id, + &qnp, error)) { + printf("%s ERROR(%d)-%s!(node %u)\n", + __func__, + error->type, + error->message, + h->queue_node_id[pos][k]); + + return -1; + } + n_queue_nodes++; + } + } + } + printf(" Queue nodes added (Start id %u, Count %u, level %u)\n", + h->queue_node_id[0][0], n_queue_nodes, level_id); + + return 0; +} + +static int +softport_tm_hierarchy_specify(portid_t port_id, + struct rte_tm_error *error) +{ + + struct tm_hierarchy h; + int status; + + memset(&h, 0, sizeof(struct tm_hierarchy)); + + /* TM hierarchy shapers rate */ + set_tm_hiearchy_nodes_shaper_rate(port_id, &h); + + /* Add root node (level 0) */ + status = softport_tm_root_node_add(port_id, &h, error); + if (status) + return status; + + /* Add subport node (level 1) */ + status = softport_tm_subport_node_add(port_id, &h, error); + if (status) + return status; + + /* Add pipe nodes (level 2) */ + status = softport_tm_pipe_node_add(port_id, &h, error); + if (status) + return status; + + /* Add traffic class nodes (level 3) */ + status = softport_tm_tc_node_add(port_id, &h, error); + if (status) + return status; + + /* Add queue nodes (level 4) */ + status = softport_tm_queue_node_add(port_id, &h, error); + if (status) + return status; + + return 0; +} + +/* + * Softnic TM default configuration + */ +static void +softnic_tm_default_config(portid_t pi) +{ + struct rte_port *port = &ports[pi]; + struct rte_tm_error error; + int status; + + /* Stop port */ + rte_eth_dev_stop(pi); + + /* TM hierarchy specification */ + status = softport_tm_hierarchy_specify(pi, &error); + if (status) { + printf(" TM Hierarchy built error(%d) - %s\n", + error.type, error.message); + return; + } + printf("\n TM Hierarchy Specified!\n"); + + /* TM hierarchy commit */ + status = rte_tm_hierarchy_commit(pi, 0, &error); + if (status) { + printf(" Hierarchy commit error(%d) - %s\n", + error.type, error.message); + return; + } + printf(" Hierarchy Committed (port %u)!\n", pi); + + /* Start port */ + status = rte_eth_dev_start(pi); + if (status) { + printf("\n Port %u start error!\n", pi); + return; + } + + /* Reset the default hierarchy flag */ + port->softport.default_tm_hierarchy_enable = 0; +} + +/* + * Softnic forwarding init + */ +static void +softnic_fwd_begin(portid_t pi) +{ + struct rte_port *port = &ports[pi]; + uint32_t lcore, fwd_core_present = 0, softnic_run_launch = 0; + int status; + + softnic_fwd_lcore = port->softport.fwd_lcore_arg[0]; + softnic_port_id = pi; + + /* Launch softnic_run function on lcores */ + for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) { + if (!rte_lcore_is_enabled(lcore)) + continue; + + if (lcore == rte_get_master_lcore()) + continue; + + if (fwd_core_present == 0) { + fwd_core_present++; + continue; + } + + status = rte_eal_remote_launch(softnic_begin, NULL, lcore); + if (status) + printf("softnic launch on lcore %u failed (%d)\n", + lcore, status); + + softnic_run_launch = 1; + } + + if (!softnic_run_launch) + softnic_fwd_engine.packet_fwd = softnic_fwd_run; + + /* Softnic TM default configuration */ + if (port->softport.default_tm_hierarchy_enable == 1) + softnic_tm_default_config(pi); +} + +struct fwd_engine softnic_fwd_engine = { + .fwd_mode_name = "softnic", + .port_fwd_begin = softnic_fwd_begin, + .port_fwd_end = NULL, + .packet_fwd = softnic_fwd, +}; diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 4c0e2586..ee48db2a 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -126,6 +127,8 @@ portid_t nb_ports; /**< Number of probed ethernet ports. */ struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ lcoreid_t nb_lcores; /**< Number of probed logical cores. */ +portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */ + /* * Test Forwarding Configuration. * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores @@ -154,9 +157,8 @@ struct fwd_engine * fwd_engines[] = { &tx_only_engine, &csum_fwd_engine, &icmp_echo_engine, -#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED - &softnic_tm_engine, - &softnic_tm_bypass_engine, +#if defined RTE_LIBRTE_PMD_SOFTNIC + &softnic_fwd_engine, #endif #ifdef RTE_LIBRTE_IEEE1588 &ieee1588_fwd_engine, @@ -210,9 +212,10 @@ queueid_t nb_txq = 1; /**< Number of TX queues per port. */ /* * Configurable number of RX/TX ring descriptors. + * Defaults are supplied by drivers via ethdev. */ -#define RTE_TEST_RX_DESC_DEFAULT 1024 -#define RTE_TEST_TX_DESC_DEFAULT 1024 +#define RTE_TEST_RX_DESC_DEFAULT 0 +#define RTE_TEST_TX_DESC_DEFAULT 0 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ @@ -284,6 +287,8 @@ uint8_t lsc_interrupt = 1; /* enabled by default */ */ uint8_t rmv_interrupt = 1; /* enabled by default */ +uint8_t hot_plug = 0; /**< hotplug disabled by default. */ + /* * Display or mask ether events * Default to all events except VF_MBOX @@ -292,8 +297,13 @@ uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | + (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV); +/* + * Decide if all memory are locked for performance. + */ +int do_mlockall = 0; /* * NIC bypass mode configuration options. @@ -325,7 +335,6 @@ lcoreid_t latencystats_lcore_id = -1; struct rte_eth_rxmode rx_mode = { .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ .offloads = DEV_RX_OFFLOAD_CRC_STRIP, - .ignore_offload_bitfield = 1, }; struct rte_eth_txmode tx_mode = { @@ -337,7 +346,7 @@ struct rte_fdir_conf fdir_conf = { .pballoc = RTE_FDIR_PBALLOC_64K, .status = RTE_FDIR_REPORT_STATUS, .mask = { - .vlan_tci_mask = 0x0, + .vlan_tci_mask = 0xFFEF, .ipv4_mask = { .src_ip = 0xFFFFFFFF, .dst_ip = 0xFFFFFFFF, @@ -384,6 +393,38 @@ uint8_t bitrate_enabled; struct gro_status gro_ports[RTE_MAX_ETHPORTS]; uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; +struct vxlan_encap_conf vxlan_encap_conf = { + .select_ipv4 = 1, + .select_vlan = 0, + .vni = "\x00\x00\x00", + .udp_src = 0, + .udp_dst = RTE_BE16(4789), + .ipv4_src = IPv4(127, 0, 0, 1), + .ipv4_dst = IPv4(255, 255, 255, 255), + .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x01", + .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x11\x11", + .vlan_tci = 0, + .eth_src = "\x00\x00\x00\x00\x00\x00", + .eth_dst = "\xff\xff\xff\xff\xff\xff", +}; + +struct nvgre_encap_conf nvgre_encap_conf = { + .select_ipv4 = 1, + .select_vlan = 0, + .tni = "\x00\x00\x00", + .ipv4_src = IPv4(127, 0, 0, 1), + .ipv4_dst = IPv4(255, 255, 255, 255), + .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x01", + .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x11\x11", + .vlan_tci = 0, + .eth_src = "\x00\x00\x00\x00\x00\x00", + .eth_dst = "\xff\xff\xff\xff\xff\xff", +}; + /* Forward function declarations */ static void map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port); @@ -391,6 +432,12 @@ static void check_all_ports_link_status(uint32_t port_mask); static int eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, void *ret_param); +static void eth_dev_event_callback(char *device_name, + enum rte_dev_event_type type, + void *param); +static int eth_dev_event_callback_register(void); +static int eth_dev_event_callback_unregister(void); + /* * Check if all the ports are started. @@ -656,6 +703,7 @@ init_config(void) uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; struct rte_gro_param gro_param; uint32_t gso_types; + int k; memset(port_per_socket,0,RTE_MAX_NUMA_NODES); @@ -690,6 +738,11 @@ init_config(void) port->dev_conf.txmode = tx_mode; port->dev_conf.rxmode = rx_mode; rte_eth_dev_info_get(pid, &port->dev_info); + + if (!(port->dev_info.rx_offload_capa & + DEV_RX_OFFLOAD_CRC_STRIP)) + port->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_CRC_STRIP; if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) port->dev_conf.txmode.offloads &= @@ -707,6 +760,15 @@ init_config(void) } } + /* Apply Rx offloads configuration */ + for (k = 0; k < port->dev_info.max_rx_queues; k++) + port->rx_conf[k].offloads = + port->dev_conf.rxmode.offloads; + /* Apply Tx offloads configuration */ + for (k = 0; k < port->dev_info.max_tx_queues; k++) + port->tx_conf[k].offloads = + port->dev_conf.txmode.offloads; + /* set flag to initialize port/queue */ port->need_reconfig = 1; port->need_reconfig_queues = 1; @@ -747,7 +809,7 @@ init_config(void) init_port_config(); gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GRE_TNL_TSO; + DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO; /* * Records which Mbuf pool to use by each logical core, if needed. */ @@ -786,6 +848,19 @@ init_config(void) "rte_gro_ctx_create() failed\n"); } } + +#if defined RTE_LIBRTE_PMD_SOFTNIC + if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { + RTE_ETH_FOREACH_DEV(pid) { + port = &ports[pid]; + const char *driver = port->dev_info.driver_name; + + if (strcmp(driver, "net_softnic") == 0) + port->softport.fwd_lcore_arg = fwd_lcores; + } + } +#endif + } @@ -871,18 +946,23 @@ init_fwd_streams(void) /* init new */ nb_fwd_streams = nb_fwd_streams_new; - fwd_streams = rte_zmalloc("testpmd: fwd_streams", - sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); - if (fwd_streams == NULL) - rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " - "failed\n", nb_fwd_streams); + if (nb_fwd_streams) { + fwd_streams = rte_zmalloc("testpmd: fwd_streams", + sizeof(struct fwd_stream *) * nb_fwd_streams, + RTE_CACHE_LINE_SIZE); + if (fwd_streams == NULL) + rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" + " (struct fwd_stream *)) failed\n", + nb_fwd_streams); - for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { - fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", - sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); - if (fwd_streams[sm_id] == NULL) - rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" - " failed\n"); + for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { + fwd_streams[sm_id] = rte_zmalloc("testpmd:" + " struct fwd_stream", sizeof(struct fwd_stream), + RTE_CACHE_LINE_SIZE); + if (fwd_streams[sm_id] == NULL) + rte_exit(EXIT_FAILURE, "rte_zmalloc" + "(struct fwd_stream) failed\n"); + } } return 0; @@ -916,6 +996,9 @@ pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) pktnb_stats[1] = pktnb_stats[0]; burst_stats[0] = nb_burst; pktnb_stats[0] = nb_pkt; + } else if (nb_burst > burst_stats[1]) { + burst_stats[1] = nb_burst; + pktnb_stats[1] = nb_pkt; } } if (total_burst == 0) @@ -1110,9 +1193,9 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) uint64_t tics_per_1sec; uint64_t tics_datum; uint64_t tics_current; - uint8_t idx_port, cnt_ports; + uint16_t i, cnt_ports; - cnt_ports = rte_eth_dev_count(); + cnt_ports = nb_ports; tics_datum = rte_rdtsc(); tics_per_1sec = rte_get_timer_hz(); #endif @@ -1127,11 +1210,9 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) tics_current = rte_rdtsc(); if (tics_current - tics_datum >= tics_per_1sec) { /* Periodic bitrate calculation */ - for (idx_port = 0; - idx_port < cnt_ports; - idx_port++) + for (i = 0; i < cnt_ports; i++) rte_stats_bitrate_calc(bitrate_data, - idx_port); + ports_ids[i]); tics_datum = tics_current; } } @@ -1201,6 +1282,31 @@ launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) } } +/* + * Update the forward ports list. + */ +void +update_fwd_ports(portid_t new_pid) +{ + unsigned int i; + unsigned int new_nb_fwd_ports = 0; + int move = 0; + + for (i = 0; i < nb_fwd_ports; ++i) { + if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN)) + move = 1; + else if (move) + fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i]; + else + new_nb_fwd_ports++; + } + if (new_pid < RTE_MAX_ETHPORTS) + fwd_ports_ids[new_nb_fwd_ports++] = new_pid; + + nb_fwd_ports = new_nb_fwd_ports; + nb_cfg_ports = new_nb_fwd_ports; +} + /* * Launch packet forwarding configuration. */ @@ -1236,10 +1342,6 @@ start_packet_forwarding(int with_tx_first) return; } - if (init_fwd_streams() < 0) { - printf("Fail from init_fwd_streams()\n"); - return; - } if(dcb_test) { for (i = 0; i < nb_fwd_ports; i++) { @@ -1259,10 +1361,11 @@ start_packet_forwarding(int with_tx_first) } test_done = 0; + fwd_config_setup(); + if(!no_flush_rx) flush_fwd_rx_queues(); - fwd_config_setup(); pkt_fwd_config_display(&cur_fwd_config); rxtx_config_display(); @@ -1586,20 +1689,19 @@ start_port(portid_t pid) } if (port->need_reconfig_queues > 0) { port->need_reconfig_queues = 0; - port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; - /* Apply Tx offloads configuration */ - port->tx_conf.offloads = port->dev_conf.txmode.offloads; /* setup tx queues */ for (qi = 0; qi < nb_txq; qi++) { if ((numa_support) && (txring_numa[pi] != NUMA_NO_CONFIG)) diag = rte_eth_tx_queue_setup(pi, qi, - nb_txd,txring_numa[pi], - &(port->tx_conf)); + port->nb_tx_desc[qi], + txring_numa[pi], + &(port->tx_conf[qi])); else diag = rte_eth_tx_queue_setup(pi, qi, - nb_txd,port->socket_id, - &(port->tx_conf)); + port->nb_tx_desc[qi], + port->socket_id, + &(port->tx_conf[qi])); if (diag == 0) continue; @@ -1610,15 +1712,14 @@ start_port(portid_t pid) RTE_PORT_STOPPED) == 0) printf("Port %d can not be set back " "to stopped\n", pi); - printf("Fail to configure port %d tx queues\n", pi); + printf("Fail to configure port %d tx queues\n", + pi); /* try to reconfigure queues next time */ port->need_reconfig_queues = 1; return -1; } - /* Apply Rx offloads configuration */ - port->rx_conf.offloads = port->dev_conf.rxmode.offloads; - /* setup rx queues */ for (qi = 0; qi < nb_rxq; qi++) { + /* setup rx queues */ if ((numa_support) && (rxring_numa[pi] != NUMA_NO_CONFIG)) { struct rte_mempool * mp = @@ -1632,8 +1733,10 @@ start_port(portid_t pid) } diag = rte_eth_rx_queue_setup(pi, qi, - nb_rxd,rxring_numa[pi], - &(port->rx_conf),mp); + port->nb_rx_desc[qi], + rxring_numa[pi], + &(port->rx_conf[qi]), + mp); } else { struct rte_mempool *mp = mbuf_pool_find(port->socket_id); @@ -1645,8 +1748,10 @@ start_port(portid_t pid) return -1; } diag = rte_eth_rx_queue_setup(pi, qi, - nb_rxd,port->socket_id, - &(port->rx_conf), mp); + port->nb_rx_desc[qi], + port->socket_id, + &(port->rx_conf[qi]), + mp); } if (diag == 0) continue; @@ -1657,7 +1762,8 @@ start_port(portid_t pid) RTE_PORT_STOPPED) == 0) printf("Port %d can not be set back " "to stopped\n", pi); - printf("Fail to configure port %d rx queues\n", pi); + printf("Fail to configure port %d rx queues\n", + pi); /* try to reconfigure queues next time */ port->need_reconfig_queues = 1; return -1; @@ -1853,6 +1959,39 @@ reset_port(portid_t pid) printf("Done\n"); } +static int +eth_dev_event_callback_register(void) +{ + int ret; + + /* register the device event callback */ + ret = rte_dev_event_callback_register(NULL, + eth_dev_event_callback, NULL); + if (ret) { + printf("Failed to register device event callback\n"); + return -1; + } + + return 0; +} + + +static int +eth_dev_event_callback_unregister(void) +{ + int ret; + + /* unregister the device event callback */ + ret = rte_dev_event_callback_unregister(NULL, + eth_dev_event_callback, NULL); + if (ret < 0) { + printf("Failed to unregister device event callback\n"); + return -1; + } + + return 0; +} + void attach_port(char *identifier) { @@ -1876,10 +2015,13 @@ attach_port(char *identifier) reconfig(pi, socket_id); rte_eth_promiscuous_enable(pi); - nb_ports = rte_eth_dev_count(); + ports_ids[nb_ports] = pi; + nb_ports = rte_eth_dev_count_avail(); ports[pi].port_status = RTE_PORT_STOPPED; + update_fwd_ports(pi); + printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); printf("Done\n"); } @@ -1888,6 +2030,7 @@ void detach_port(portid_t port_id) { char name[RTE_ETH_NAME_MAX_LEN]; + uint16_t i; printf("Detaching a port...\n"); @@ -1900,14 +2043,23 @@ detach_port(portid_t port_id) port_flow_flush(port_id); if (rte_eth_dev_detach(port_id, name)) { - TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name); + TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id); return; } - nb_ports = rte_eth_dev_count(); + for (i = 0; i < nb_ports; i++) { + if (ports_ids[i] == port_id) { + ports_ids[i] = ports_ids[nb_ports-1]; + ports_ids[nb_ports-1] = 0; + break; + } + } + nb_ports = rte_eth_dev_count_avail(); + + update_fwd_ports(RTE_MAX_ETHPORTS); - printf("Port '%s' is detached. Now total ports is %d\n", - name, nb_ports); + printf("Port %u is detached. Now total ports is %d\n", + port_id, nb_ports); printf("Done\n"); return; } @@ -1915,7 +2067,9 @@ detach_port(portid_t port_id) void pmd_test_exit(void) { + struct rte_device *device; portid_t pt_id; + int ret; if (test_done == 0) stop_packet_forwarding(); @@ -1927,8 +2081,33 @@ pmd_test_exit(void) fflush(stdout); stop_port(pt_id); close_port(pt_id); + + /* + * This is a workaround to fix a virtio-user issue that + * requires to call clean-up routine to remove existing + * socket. + * This workaround valid only for testpmd, needs a fix + * valid for all applications. + * TODO: Implement proper resource cleanup + */ + device = rte_eth_devices[pt_id].device; + if (device && !strcmp(device->driver->name, "net_virtio_user")) + detach_port(pt_id); } } + + if (hot_plug) { + ret = rte_dev_event_monitor_stop(); + if (ret) + RTE_LOG(ERR, EAL, + "fail to stop device event monitor."); + + ret = eth_dev_event_callback_unregister(); + if (ret) + RTE_LOG(ERR, EAL, + "fail to unregister all event callbacks."); + } + printf("\nBye...\n"); } @@ -1999,18 +2178,23 @@ check_all_ports_link_status(uint32_t port_mask) static void rmv_event_callback(void *arg) { - struct rte_eth_dev *dev; + int need_to_start = 0; + int org_no_link_check = no_link_check; portid_t port_id = (intptr_t)arg; RTE_ETH_VALID_PORTID_OR_RET(port_id); - dev = &rte_eth_devices[port_id]; + if (!test_done && port_is_forwarding(port_id)) { + need_to_start = 1; + stop_packet_forwarding(); + } + no_link_check = 1; stop_port(port_id); + no_link_check = org_no_link_check; close_port(port_id); - printf("removing device %s\n", dev->device->name); - if (rte_eal_dev_detach(dev->device)) - TESTPMD_LOG(ERR, "Failed to detach device %s\n", - dev->device->name); + detach_port(port_id); + if (need_to_start) + start_packet_forwarding(0); } /* This function is used by the interrupt thread */ @@ -2024,6 +2208,7 @@ eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state", [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset", [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox", + [RTE_ETH_EVENT_IPSEC] = "IPsec", [RTE_ETH_EVENT_MACSEC] = "MACsec", [RTE_ETH_EVENT_INTR_RMV] = "device removal", [RTE_ETH_EVENT_NEW] = "device probed", @@ -2059,6 +2244,37 @@ eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, return 0; } +/* This function is used by the interrupt thread */ +static void +eth_dev_event_callback(char *device_name, enum rte_dev_event_type type, + __rte_unused void *arg) +{ + if (type >= RTE_DEV_EVENT_MAX) { + fprintf(stderr, "%s called upon invalid event %d\n", + __func__, type); + fflush(stderr); + } + + switch (type) { + case RTE_DEV_EVENT_REMOVE: + RTE_LOG(ERR, EAL, "The device: %s has been removed!\n", + device_name); + /* TODO: After finish failure handle, begin to stop + * packet forward, stop port, close port, detach port. + */ + break; + case RTE_DEV_EVENT_ADD: + RTE_LOG(ERR, EAL, "The device: %s has been added!\n", + device_name); + /* TODO: After finish kernel driver binding, + * begin to attach port. + */ + break; + default: + break; + } +} + static int set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) { @@ -2140,39 +2356,51 @@ map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) static void rxtx_port_config(struct rte_port *port) { - port->rx_conf = port->dev_info.default_rxconf; - port->tx_conf = port->dev_info.default_txconf; + uint16_t qid; + + for (qid = 0; qid < nb_rxq; qid++) { + port->rx_conf[qid] = port->dev_info.default_rxconf; + + /* Check if any Rx parameters have been passed */ + if (rx_pthresh != RTE_PMD_PARAM_UNSET) + port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh; + + if (rx_hthresh != RTE_PMD_PARAM_UNSET) + port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh; + + if (rx_wthresh != RTE_PMD_PARAM_UNSET) + port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh; - /* Check if any RX/TX parameters have been passed */ - if (rx_pthresh != RTE_PMD_PARAM_UNSET) - port->rx_conf.rx_thresh.pthresh = rx_pthresh; + if (rx_free_thresh != RTE_PMD_PARAM_UNSET) + port->rx_conf[qid].rx_free_thresh = rx_free_thresh; - if (rx_hthresh != RTE_PMD_PARAM_UNSET) - port->rx_conf.rx_thresh.hthresh = rx_hthresh; + if (rx_drop_en != RTE_PMD_PARAM_UNSET) + port->rx_conf[qid].rx_drop_en = rx_drop_en; - if (rx_wthresh != RTE_PMD_PARAM_UNSET) - port->rx_conf.rx_thresh.wthresh = rx_wthresh; + port->nb_rx_desc[qid] = nb_rxd; + } - if (rx_free_thresh != RTE_PMD_PARAM_UNSET) - port->rx_conf.rx_free_thresh = rx_free_thresh; + for (qid = 0; qid < nb_txq; qid++) { + port->tx_conf[qid] = port->dev_info.default_txconf; - if (rx_drop_en != RTE_PMD_PARAM_UNSET) - port->rx_conf.rx_drop_en = rx_drop_en; + /* Check if any Tx parameters have been passed */ + if (tx_pthresh != RTE_PMD_PARAM_UNSET) + port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh; - if (tx_pthresh != RTE_PMD_PARAM_UNSET) - port->tx_conf.tx_thresh.pthresh = tx_pthresh; + if (tx_hthresh != RTE_PMD_PARAM_UNSET) + port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh; - if (tx_hthresh != RTE_PMD_PARAM_UNSET) - port->tx_conf.tx_thresh.hthresh = tx_hthresh; + if (tx_wthresh != RTE_PMD_PARAM_UNSET) + port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh; - if (tx_wthresh != RTE_PMD_PARAM_UNSET) - port->tx_conf.tx_thresh.wthresh = tx_wthresh; + if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) + port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh; - if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) - port->tx_conf.tx_rs_thresh = tx_rs_thresh; + if (tx_free_thresh != RTE_PMD_PARAM_UNSET) + port->tx_conf[qid].tx_free_thresh = tx_free_thresh; - if (tx_free_thresh != RTE_PMD_PARAM_UNSET) - port->tx_conf.tx_free_thresh = tx_free_thresh; + port->nb_tx_desc[qid] = nb_txd; + } } void @@ -2184,9 +2412,11 @@ init_port_config(void) RTE_ETH_FOREACH_DEV(pid) { port = &ports[pid]; port->dev_conf.fdir_conf = fdir_conf; + rte_eth_dev_info_get(pid, &port->dev_info); if (nb_rxq > 1) { port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; - port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; + port->dev_conf.rx_adv_conf.rss_conf.rss_hf = + rss_hf & port->dev_info.flow_type_rss_offloads; } else { port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; @@ -2216,17 +2446,6 @@ init_port_config(void) (rte_eth_devices[pid].data->dev_flags & RTE_ETH_DEV_INTR_RMV)) port->dev_conf.intr_conf.rmv = 1; - -#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED - /* Detect softnic port */ - if (!strcmp(port->dev_info.driver_name, "net_softnic")) { - port->softnic_enable = 1; - memset(&port->softport, 0, sizeof(struct softnic_port)); - - if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm")) - port->softport.tm_flag = 1; - } -#endif } } @@ -2251,7 +2470,10 @@ uint8_t port_is_bonding_slave(portid_t slave_pid) struct rte_port *port; port = &ports[slave_pid]; - return port->slave_flag; + if ((rte_eth_devices[slave_pid].data->dev_flags & + RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) + return 1; + return 0; } const uint16_t vlan_tags[] = { @@ -2262,12 +2484,14 @@ const uint16_t vlan_tags[] = { }; static int -get_eth_dcb_conf(struct rte_eth_conf *eth_conf, +get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, enum dcb_mode_enable dcb_mode, enum rte_eth_nb_tcs num_tcs, uint8_t pfc_en) { uint8_t i; + int32_t rc; + struct rte_eth_rss_conf rss_conf; /* * Builds up the correct configuration for dcb+vt based on the vlan tags array @@ -2307,6 +2531,10 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct rte_eth_dcb_tx_conf *tx_conf = ð_conf->tx_adv_conf.dcb_tx_conf; + rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); + if (rc != 0) + return rc; + rx_conf->nb_tcs = num_tcs; tx_conf->nb_tcs = num_tcs; @@ -2314,8 +2542,9 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf, rx_conf->dcb_tc[i] = i % num_tcs; tx_conf->dcb_tc[i] = i % num_tcs; } + eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; - eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; + eth_conf->rx_adv_conf.rss_conf = rss_conf; eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; } @@ -2349,17 +2578,13 @@ init_port_dcb_config(portid_t pid, port_conf.txmode = rte_port->dev_conf.txmode; /*set configuration of DCB in vt mode and DCB in non-vt mode*/ - retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); + retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); if (retval < 0) return retval; port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; - /** - * Write the configuration into the device. - * Set the numbers of RX & TX queues to 0, so - * the RX & TX queues will not be setup. - */ - rte_eth_dev_configure(pid, 0, 0, &port_conf); + /* re-configure the device . */ + rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); rte_eth_dev_info_get(pid, &rte_port->dev_info); @@ -2474,8 +2699,10 @@ signal_handler(int signum) int main(int argc, char** argv) { - int diag; + int diag; portid_t port_id; + uint16_t count; + int ret; signal(SIGINT, signal_handler); signal(SIGTERM, signal_handler); @@ -2489,17 +2716,17 @@ main(int argc, char** argv) rte_panic("Cannot register log type"); rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); - if (mlockall(MCL_CURRENT | MCL_FUTURE)) { - TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", - strerror(errno)); - } - #ifdef RTE_LIBRTE_PDUMP /* initialize packet capture framework */ rte_pdump_init(NULL); #endif - nb_ports = (portid_t) rte_eth_dev_count(); + count = 0; + RTE_ETH_FOREACH_DEV(port_id) { + ports_ids[count] = port_id; + count++; + } + nb_ports = (portid_t) count; if (nb_ports == 0) TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); @@ -2519,11 +2746,23 @@ main(int argc, char** argv) latencystats_enabled = 0; #endif + /* on FreeBSD, mlockall() is disabled by default */ +#ifdef RTE_EXEC_ENV_BSDAPP + do_mlockall = 0; +#else + do_mlockall = 1; +#endif + argc -= diag; argv += diag; if (argc > 1) launch_args_parse(argc, argv); + if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { + TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", + strerror(errno)); + } + if (tx_first && interactive) rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " "interactive mode.\n"); @@ -2543,6 +2782,18 @@ main(int argc, char** argv) nb_rxq, nb_txq); init_config(); + + if (hot_plug) { + /* enable hot plug monitoring */ + ret = rte_dev_event_monitor_start(); + if (ret) { + rte_errno = EINVAL; + return -1; + } + eth_dev_event_callback_register(); + + } + if (start_port(RTE_PORT_ALL) != 0) rte_exit(EXIT_FAILURE, "Start ports failed\n"); diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index 153abea0..a1f66147 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -57,10 +57,10 @@ typedef uint16_t streamid_t; #define MAX_QUEUE_ID ((1 << (sizeof(queueid_t) * 8)) - 1) -#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED -#define TM_MODE 1 +#if defined RTE_LIBRTE_PMD_SOFTNIC +#define SOFTNIC 1 #else -#define TM_MODE 0 +#define SOFTNIC 0 #endif enum { @@ -79,6 +79,19 @@ struct pkt_burst_stats { }; #endif +/** Information for a given RSS type. */ +struct rss_type_info { + const char *str; /**< Type name. */ + uint64_t rss_type; /**< Type value. */ +}; + +/** + * RSS type information table. + * + * An entry with a NULL type name terminates the list. + */ +extern const struct rss_type_info rss_type_table[]; + /** * The data structure associated with a forwarding stream between a receive * port/queue and a transmit port/queue. @@ -122,35 +135,13 @@ struct port_flow { uint8_t data[]; /**< Storage for pattern/actions. */ }; -#ifdef TM_MODE -/** - * Soft port tm related parameters - */ -struct softnic_port_tm { - uint32_t default_hierarchy_enable; /**< def hierarchy enable flag */ - uint32_t hierarchy_config; /**< set to 1 if hierarchy configured */ - - uint32_t n_subports_per_port; /**< Num of subport nodes per port */ - uint32_t n_pipes_per_subport; /**< Num of pipe nodes per subport */ - - uint64_t tm_pktfield0_slabpos; /**< Pkt field position for subport */ - uint64_t tm_pktfield0_slabmask; /**< Pkt field mask for the subport */ - uint64_t tm_pktfield0_slabshr; - uint64_t tm_pktfield1_slabpos; /**< Pkt field position for the pipe */ - uint64_t tm_pktfield1_slabmask; /**< Pkt field mask for the pipe */ - uint64_t tm_pktfield1_slabshr; - uint64_t tm_pktfield2_slabpos; /**< Pkt field position table index */ - uint64_t tm_pktfield2_slabmask; /**< Pkt field mask for tc table idx */ - uint64_t tm_pktfield2_slabshr; - uint64_t tm_tc_table[64]; /**< TC translation table */ -}; - +#ifdef SOFTNIC /** * The data structure associate with softnic port */ struct softnic_port { - unsigned int tm_flag; /**< set to 1 if tm feature is enabled */ - struct softnic_port_tm tm; /**< softnic port tm parameters */ + uint32_t default_tm_hierarchy_enable; /**< default tm hierarchy */ + struct fwd_lcore **fwd_lcore_arg; /**< softnic fwd core parameters */ }; #endif @@ -181,15 +172,16 @@ struct rte_port { uint8_t need_reconfig_queues; /**< need reconfiguring queues or not */ uint8_t rss_flag; /**< enable rss or not */ uint8_t dcb_flag; /**< enable dcb */ - struct rte_eth_rxconf rx_conf; /**< rx configuration */ - struct rte_eth_txconf tx_conf; /**< tx configuration */ + uint16_t nb_rx_desc[MAX_QUEUE_ID+1]; /**< per queue rx desc number */ + uint16_t nb_tx_desc[MAX_QUEUE_ID+1]; /**< per queue tx desc number */ + struct rte_eth_rxconf rx_conf[MAX_QUEUE_ID+1]; /**< per queue rx configuration */ + struct rte_eth_txconf tx_conf[MAX_QUEUE_ID+1]; /**< per queue tx configuration */ struct ether_addr *mc_addr_pool; /**< pool of multicast addrs */ uint32_t mc_addr_nb; /**< nb. of addr. in mc_addr_pool */ uint8_t slave_flag; /**< bonding slave port */ struct port_flow *flow_list; /**< Associated flows. */ -#ifdef TM_MODE - unsigned int softnic_enable; /**< softnic flag */ - struct softnic_port softport; /**< softnic port params */ +#ifdef SOFTNIC + struct softnic_port softport; /**< softnic params */ #endif }; @@ -251,9 +243,8 @@ extern struct fwd_engine rx_only_engine; extern struct fwd_engine tx_only_engine; extern struct fwd_engine csum_fwd_engine; extern struct fwd_engine icmp_echo_engine; -#ifdef TM_MODE -extern struct fwd_engine softnic_tm_engine; -extern struct fwd_engine softnic_tm_bypass_engine; +#ifdef SOFTNIC +extern struct fwd_engine softnic_fwd_engine; #endif #ifdef RTE_LIBRTE_IEEE1588 extern struct fwd_engine ieee1588_fwd_engine; @@ -320,6 +311,8 @@ extern uint8_t lsc_interrupt; /**< disabled by "--no-lsc-interrupt" parameter */ extern uint8_t rmv_interrupt; /**< disabled by "--no-rmv-interrupt" parameter */ extern uint32_t event_print_mask; /**< set by "--print-event xxxx" and "--mask-event xxxx parameters */ +extern uint8_t hot_plug; /**< enable by "--hot-plug" parameter */ +extern int do_mlockall; /**< set by "--mlockall" or "--no-mlockall" parameter */ #ifdef RTE_LIBRTE_IXGBE_BYPASS extern uint32_t bypass_timeout; /**< Store the NIC bypass watchdog timeout */ @@ -433,6 +426,8 @@ extern uint32_t retry_enabled; extern struct fwd_lcore **fwd_lcores; extern struct fwd_stream **fwd_streams; +extern uint16_t vxlan_gpe_udp_port; /**< UDP port of tunnel VXLAN-GPE. */ + extern portid_t nb_peer_eth_addrs; /**< Number of peer ethernet addresses. */ extern struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; @@ -460,6 +455,38 @@ struct gso_status { extern struct gso_status gso_ports[RTE_MAX_ETHPORTS]; extern uint16_t gso_max_segment_size; +/* VXLAN encap/decap parameters. */ +struct vxlan_encap_conf { + uint32_t select_ipv4:1; + uint32_t select_vlan:1; + uint8_t vni[3]; + rte_be16_t udp_src; + rte_be16_t udp_dst; + rte_be32_t ipv4_src; + rte_be32_t ipv4_dst; + uint8_t ipv6_src[16]; + uint8_t ipv6_dst[16]; + rte_be16_t vlan_tci; + uint8_t eth_src[ETHER_ADDR_LEN]; + uint8_t eth_dst[ETHER_ADDR_LEN]; +}; +struct vxlan_encap_conf vxlan_encap_conf; + +/* NVGRE encap/decap parameters. */ +struct nvgre_encap_conf { + uint32_t select_ipv4:1; + uint32_t select_vlan:1; + uint8_t tni[3]; + rte_be32_t ipv4_src; + rte_be32_t ipv4_dst; + uint8_t ipv6_src[16]; + uint8_t ipv6_dst[16]; + rte_be16_t vlan_tci; + uint8_t eth_src[ETHER_ADDR_LEN]; + uint8_t eth_dst[ETHER_ADDR_LEN]; +}; +struct nvgre_encap_conf nvgre_encap_conf; + static inline unsigned int lcore_num(void) { @@ -500,12 +527,25 @@ mbuf_pool_find(unsigned int sock_id) static inline uint32_t port_pci_reg_read(struct rte_port *port, uint32_t reg_off) { + const struct rte_pci_device *pci_dev; + const struct rte_bus *bus; void *reg_addr; uint32_t reg_v; - reg_addr = (void *) - ((char *)port->dev_info.pci_dev->mem_resource[0].addr + - reg_off); + if (!port->dev_info.device) { + printf("Invalid device\n"); + return 0; + } + + bus = rte_bus_find_by_device(port->dev_info.device); + if (bus && !strcmp(bus->name, "pci")) { + pci_dev = RTE_DEV_TO_PCI(port->dev_info.device); + } else { + printf("Not a PCI device\n"); + return 0; + } + + reg_addr = ((char *)pci_dev->mem_resource[0].addr + reg_off); reg_v = *((volatile uint32_t *)reg_addr); return rte_le_to_cpu_32(reg_v); } @@ -516,11 +556,24 @@ port_pci_reg_read(struct rte_port *port, uint32_t reg_off) static inline void port_pci_reg_write(struct rte_port *port, uint32_t reg_off, uint32_t reg_v) { + const struct rte_pci_device *pci_dev; + const struct rte_bus *bus; void *reg_addr; - reg_addr = (void *) - ((char *)port->dev_info.pci_dev->mem_resource[0].addr + - reg_off); + if (!port->dev_info.device) { + printf("Invalid device\n"); + return; + } + + bus = rte_bus_find_by_device(port->dev_info.device); + if (bus && !strcmp(bus->name, "pci")) { + pci_dev = RTE_DEV_TO_PCI(port->dev_info.device); + } else { + printf("Not a PCI device\n"); + return; + } + + reg_addr = ((char *)pci_dev->mem_resource[0].addr + reg_off); *((volatile uint32_t *)reg_addr) = rte_cpu_to_le_32(reg_v); } @@ -551,6 +604,7 @@ void fwd_config_setup(void); void set_def_fwd_config(void); void reconfig(portid_t new_port_id, unsigned socket_id); int init_fwd_streams(void); +void update_fwd_ports(portid_t new_pid); void set_fwd_eth_peer(portid_t port_id, char *peer_addr); @@ -575,7 +629,7 @@ int port_flow_create(portid_t port_id, int port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule); int port_flow_flush(portid_t port_id); int port_flow_query(portid_t port_id, uint32_t rule, - enum rte_flow_action_type action); + const struct rte_flow_action *action); void port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group); int port_flow_isolate(portid_t port_id, int set); @@ -681,6 +735,7 @@ enum print_warning { DISABLED_WARN }; int port_id_is_invalid(portid_t port_id, enum print_warning warning); +void print_valid_ports(void); int new_socket_id(unsigned int socket_id); queueid_t get_allowed_max_nb_rxq(portid_t *pid); diff --git a/app/test-pmd/tm.c b/app/test-pmd/tm.c deleted file mode 100644 index 7231552a..00000000 --- a/app/test-pmd/tm.c +++ /dev/null @@ -1,840 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017 Intel Corporation - */ -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "testpmd.h" - -#define SUBPORT_NODES_PER_PORT 1 -#define PIPE_NODES_PER_SUBPORT 4096 -#define TC_NODES_PER_PIPE 4 -#define QUEUE_NODES_PER_TC 4 - -#define NUM_PIPE_NODES \ - (SUBPORT_NODES_PER_PORT * PIPE_NODES_PER_SUBPORT) - -#define NUM_TC_NODES \ - (NUM_PIPE_NODES * TC_NODES_PER_PIPE) - -#define ROOT_NODE_ID 1000000 -#define SUBPORT_NODES_START_ID 900000 -#define PIPE_NODES_START_ID 800000 -#define TC_NODES_START_ID 700000 - -#define STATS_MASK_DEFAULT \ - (RTE_TM_STATS_N_PKTS | \ - RTE_TM_STATS_N_BYTES | \ - RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \ - RTE_TM_STATS_N_BYTES_GREEN_DROPPED) - -#define STATS_MASK_QUEUE \ - (STATS_MASK_DEFAULT | \ - RTE_TM_STATS_N_PKTS_QUEUED) - -#define BYTES_IN_MBPS (1000 * 1000 / 8) -#define TOKEN_BUCKET_SIZE 1000000 - -/* TM Hierarchy Levels */ -enum tm_hierarchy_level { - TM_NODE_LEVEL_PORT = 0, - TM_NODE_LEVEL_SUBPORT, - TM_NODE_LEVEL_PIPE, - TM_NODE_LEVEL_TC, - TM_NODE_LEVEL_QUEUE, - TM_NODE_LEVEL_MAX, -}; - -struct tm_hierarchy { - /* TM Nodes */ - uint32_t root_node_id; - uint32_t subport_node_id[SUBPORT_NODES_PER_PORT]; - uint32_t pipe_node_id[SUBPORT_NODES_PER_PORT][PIPE_NODES_PER_SUBPORT]; - uint32_t tc_node_id[NUM_PIPE_NODES][TC_NODES_PER_PIPE]; - uint32_t queue_node_id[NUM_TC_NODES][QUEUE_NODES_PER_TC]; - - /* TM Hierarchy Nodes Shaper Rates */ - uint32_t root_node_shaper_rate; - uint32_t subport_node_shaper_rate; - uint32_t pipe_node_shaper_rate; - uint32_t tc_node_shaper_rate; - uint32_t tc_node_shared_shaper_rate; - - uint32_t n_shapers; -}; - -#define BITFIELD(byte_array, slab_pos, slab_mask, slab_shr) \ -({ \ - uint64_t slab = *((uint64_t *) &byte_array[slab_pos]); \ - uint64_t val = \ - (rte_be_to_cpu_64(slab) & slab_mask) >> slab_shr; \ - val; \ -}) - -#define RTE_SCHED_PORT_HIERARCHY(subport, pipe, \ - traffic_class, queue, color) \ - ((((uint64_t) (queue)) & 0x3) | \ - ((((uint64_t) (traffic_class)) & 0x3) << 2) | \ - ((((uint64_t) (color)) & 0x3) << 4) | \ - ((((uint64_t) (subport)) & 0xFFFF) << 16) | \ - ((((uint64_t) (pipe)) & 0xFFFFFFFF) << 32)) - - -static void -pkt_metadata_set(struct rte_port *p, struct rte_mbuf **pkts, - uint32_t n_pkts) -{ - struct softnic_port_tm *tm = &p->softport.tm; - uint32_t i; - - for (i = 0; i < (n_pkts & (~0x3)); i += 4) { - struct rte_mbuf *pkt0 = pkts[i]; - struct rte_mbuf *pkt1 = pkts[i + 1]; - struct rte_mbuf *pkt2 = pkts[i + 2]; - struct rte_mbuf *pkt3 = pkts[i + 3]; - - uint8_t *pkt0_data = rte_pktmbuf_mtod(pkt0, uint8_t *); - uint8_t *pkt1_data = rte_pktmbuf_mtod(pkt1, uint8_t *); - uint8_t *pkt2_data = rte_pktmbuf_mtod(pkt2, uint8_t *); - uint8_t *pkt3_data = rte_pktmbuf_mtod(pkt3, uint8_t *); - - uint64_t pkt0_subport = BITFIELD(pkt0_data, - tm->tm_pktfield0_slabpos, - tm->tm_pktfield0_slabmask, - tm->tm_pktfield0_slabshr); - uint64_t pkt0_pipe = BITFIELD(pkt0_data, - tm->tm_pktfield1_slabpos, - tm->tm_pktfield1_slabmask, - tm->tm_pktfield1_slabshr); - uint64_t pkt0_dscp = BITFIELD(pkt0_data, - tm->tm_pktfield2_slabpos, - tm->tm_pktfield2_slabmask, - tm->tm_pktfield2_slabshr); - uint32_t pkt0_tc = tm->tm_tc_table[pkt0_dscp & 0x3F] >> 2; - uint32_t pkt0_tc_q = tm->tm_tc_table[pkt0_dscp & 0x3F] & 0x3; - uint64_t pkt1_subport = BITFIELD(pkt1_data, - tm->tm_pktfield0_slabpos, - tm->tm_pktfield0_slabmask, - tm->tm_pktfield0_slabshr); - uint64_t pkt1_pipe = BITFIELD(pkt1_data, - tm->tm_pktfield1_slabpos, - tm->tm_pktfield1_slabmask, - tm->tm_pktfield1_slabshr); - uint64_t pkt1_dscp = BITFIELD(pkt1_data, - tm->tm_pktfield2_slabpos, - tm->tm_pktfield2_slabmask, - tm->tm_pktfield2_slabshr); - uint32_t pkt1_tc = tm->tm_tc_table[pkt1_dscp & 0x3F] >> 2; - uint32_t pkt1_tc_q = tm->tm_tc_table[pkt1_dscp & 0x3F] & 0x3; - - uint64_t pkt2_subport = BITFIELD(pkt2_data, - tm->tm_pktfield0_slabpos, - tm->tm_pktfield0_slabmask, - tm->tm_pktfield0_slabshr); - uint64_t pkt2_pipe = BITFIELD(pkt2_data, - tm->tm_pktfield1_slabpos, - tm->tm_pktfield1_slabmask, - tm->tm_pktfield1_slabshr); - uint64_t pkt2_dscp = BITFIELD(pkt2_data, - tm->tm_pktfield2_slabpos, - tm->tm_pktfield2_slabmask, - tm->tm_pktfield2_slabshr); - uint32_t pkt2_tc = tm->tm_tc_table[pkt2_dscp & 0x3F] >> 2; - uint32_t pkt2_tc_q = tm->tm_tc_table[pkt2_dscp & 0x3F] & 0x3; - - uint64_t pkt3_subport = BITFIELD(pkt3_data, - tm->tm_pktfield0_slabpos, - tm->tm_pktfield0_slabmask, - tm->tm_pktfield0_slabshr); - uint64_t pkt3_pipe = BITFIELD(pkt3_data, - tm->tm_pktfield1_slabpos, - tm->tm_pktfield1_slabmask, - tm->tm_pktfield1_slabshr); - uint64_t pkt3_dscp = BITFIELD(pkt3_data, - tm->tm_pktfield2_slabpos, - tm->tm_pktfield2_slabmask, - tm->tm_pktfield2_slabshr); - uint32_t pkt3_tc = tm->tm_tc_table[pkt3_dscp & 0x3F] >> 2; - uint32_t pkt3_tc_q = tm->tm_tc_table[pkt3_dscp & 0x3F] & 0x3; - - uint64_t pkt0_sched = RTE_SCHED_PORT_HIERARCHY(pkt0_subport, - pkt0_pipe, - pkt0_tc, - pkt0_tc_q, - 0); - uint64_t pkt1_sched = RTE_SCHED_PORT_HIERARCHY(pkt1_subport, - pkt1_pipe, - pkt1_tc, - pkt1_tc_q, - 0); - uint64_t pkt2_sched = RTE_SCHED_PORT_HIERARCHY(pkt2_subport, - pkt2_pipe, - pkt2_tc, - pkt2_tc_q, - 0); - uint64_t pkt3_sched = RTE_SCHED_PORT_HIERARCHY(pkt3_subport, - pkt3_pipe, - pkt3_tc, - pkt3_tc_q, - 0); - - pkt0->hash.sched.lo = pkt0_sched & 0xFFFFFFFF; - pkt0->hash.sched.hi = pkt0_sched >> 32; - pkt1->hash.sched.lo = pkt1_sched & 0xFFFFFFFF; - pkt1->hash.sched.hi = pkt1_sched >> 32; - pkt2->hash.sched.lo = pkt2_sched & 0xFFFFFFFF; - pkt2->hash.sched.hi = pkt2_sched >> 32; - pkt3->hash.sched.lo = pkt3_sched & 0xFFFFFFFF; - pkt3->hash.sched.hi = pkt3_sched >> 32; - } - - for (; i < n_pkts; i++) { - struct rte_mbuf *pkt = pkts[i]; - - uint8_t *pkt_data = rte_pktmbuf_mtod(pkt, uint8_t *); - - uint64_t pkt_subport = BITFIELD(pkt_data, - tm->tm_pktfield0_slabpos, - tm->tm_pktfield0_slabmask, - tm->tm_pktfield0_slabshr); - uint64_t pkt_pipe = BITFIELD(pkt_data, - tm->tm_pktfield1_slabpos, - tm->tm_pktfield1_slabmask, - tm->tm_pktfield1_slabshr); - uint64_t pkt_dscp = BITFIELD(pkt_data, - tm->tm_pktfield2_slabpos, - tm->tm_pktfield2_slabmask, - tm->tm_pktfield2_slabshr); - uint32_t pkt_tc = tm->tm_tc_table[pkt_dscp & 0x3F] >> 2; - uint32_t pkt_tc_q = tm->tm_tc_table[pkt_dscp & 0x3F] & 0x3; - - uint64_t pkt_sched = RTE_SCHED_PORT_HIERARCHY(pkt_subport, - pkt_pipe, - pkt_tc, - pkt_tc_q, - 0); - - pkt->hash.sched.lo = pkt_sched & 0xFFFFFFFF; - pkt->hash.sched.hi = pkt_sched >> 32; - } -} - -/* - * Soft port packet forward - */ -static void -softport_packet_fwd(struct fwd_stream *fs) -{ - struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; - struct rte_port *rte_tx_port = &ports[fs->tx_port]; - uint16_t nb_rx; - uint16_t nb_tx; - uint32_t retry; - -#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES - uint64_t start_tsc; - uint64_t end_tsc; - uint64_t core_cycles; -#endif - -#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES - start_tsc = rte_rdtsc(); -#endif - - /* Packets Receive */ - nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, - pkts_burst, nb_pkt_per_burst); - fs->rx_packets += nb_rx; - -#ifdef RTE_TEST_PMD_RECORD_BURST_STATS - fs->rx_burst_stats.pkt_burst_spread[nb_rx]++; -#endif - - if (rte_tx_port->softnic_enable) { - /* Set packet metadata if tm flag enabled */ - if (rte_tx_port->softport.tm_flag) - pkt_metadata_set(rte_tx_port, pkts_burst, nb_rx); - - /* Softport run */ - rte_pmd_softnic_run(fs->tx_port); - } - nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, - pkts_burst, nb_rx); - - /* Retry if necessary */ - if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) { - retry = 0; - while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) { - rte_delay_us(burst_tx_delay_time); - nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, - &pkts_burst[nb_tx], nb_rx - nb_tx); - } - } - fs->tx_packets += nb_tx; - -#ifdef RTE_TEST_PMD_RECORD_BURST_STATS - fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; -#endif - - if (unlikely(nb_tx < nb_rx)) { - fs->fwd_dropped += (nb_rx - nb_tx); - do { - rte_pktmbuf_free(pkts_burst[nb_tx]); - } while (++nb_tx < nb_rx); - } -#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES - end_tsc = rte_rdtsc(); - core_cycles = (end_tsc - start_tsc); - fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); -#endif -} - -static void -set_tm_hiearchy_nodes_shaper_rate(portid_t port_id, struct tm_hierarchy *h) -{ - struct rte_eth_link link_params; - uint64_t tm_port_rate; - - memset(&link_params, 0, sizeof(link_params)); - - rte_eth_link_get(port_id, &link_params); - tm_port_rate = (uint64_t)link_params.link_speed * BYTES_IN_MBPS; - - if (tm_port_rate > UINT32_MAX) - tm_port_rate = UINT32_MAX; - - /* Set tm hierarchy shapers rate */ - h->root_node_shaper_rate = tm_port_rate; - h->subport_node_shaper_rate = - tm_port_rate / SUBPORT_NODES_PER_PORT; - h->pipe_node_shaper_rate - = h->subport_node_shaper_rate / PIPE_NODES_PER_SUBPORT; - h->tc_node_shaper_rate = h->pipe_node_shaper_rate; - h->tc_node_shared_shaper_rate = h->subport_node_shaper_rate; -} - -static int -softport_tm_root_node_add(portid_t port_id, struct tm_hierarchy *h, - struct rte_tm_error *error) -{ - struct rte_tm_node_params rnp; - struct rte_tm_shaper_params rsp; - uint32_t priority, weight, level_id, shaper_profile_id; - - memset(&rsp, 0, sizeof(struct rte_tm_shaper_params)); - memset(&rnp, 0, sizeof(struct rte_tm_node_params)); - - /* Shaper profile Parameters */ - rsp.peak.rate = h->root_node_shaper_rate; - rsp.peak.size = TOKEN_BUCKET_SIZE; - rsp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; - shaper_profile_id = 0; - - if (rte_tm_shaper_profile_add(port_id, shaper_profile_id, - &rsp, error)) { - printf("%s ERROR(%d)-%s!(shaper_id %u)\n ", - __func__, error->type, error->message, - shaper_profile_id); - return -1; - } - - /* Root Node Parameters */ - h->root_node_id = ROOT_NODE_ID; - weight = 1; - priority = 0; - level_id = TM_NODE_LEVEL_PORT; - rnp.shaper_profile_id = shaper_profile_id; - rnp.nonleaf.n_sp_priorities = 1; - rnp.stats_mask = STATS_MASK_DEFAULT; - - /* Add Node to TM Hierarchy */ - if (rte_tm_node_add(port_id, h->root_node_id, RTE_TM_NODE_ID_NULL, - priority, weight, level_id, &rnp, error)) { - printf("%s ERROR(%d)-%s!(node_id %u, parent_id %u, level %u)\n", - __func__, error->type, error->message, - h->root_node_id, RTE_TM_NODE_ID_NULL, - level_id); - return -1; - } - /* Update */ - h->n_shapers++; - - printf(" Root node added (Start id %u, Count %u, level %u)\n", - h->root_node_id, 1, level_id); - - return 0; -} - -static int -softport_tm_subport_node_add(portid_t port_id, struct tm_hierarchy *h, - struct rte_tm_error *error) -{ - uint32_t subport_parent_node_id, subport_node_id = 0; - struct rte_tm_node_params snp; - struct rte_tm_shaper_params ssp; - uint32_t priority, weight, level_id, shaper_profile_id; - uint32_t i; - - memset(&ssp, 0, sizeof(struct rte_tm_shaper_params)); - memset(&snp, 0, sizeof(struct rte_tm_node_params)); - - shaper_profile_id = h->n_shapers; - - /* Add Shaper Profile to TM Hierarchy */ - for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) { - ssp.peak.rate = h->subport_node_shaper_rate; - ssp.peak.size = TOKEN_BUCKET_SIZE; - ssp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; - - if (rte_tm_shaper_profile_add(port_id, shaper_profile_id, - &ssp, error)) { - printf("%s ERROR(%d)-%s!(shaper_id %u)\n ", - __func__, error->type, error->message, - shaper_profile_id); - return -1; - } - - /* Node Parameters */ - h->subport_node_id[i] = SUBPORT_NODES_START_ID + i; - subport_parent_node_id = h->root_node_id; - weight = 1; - priority = 0; - level_id = TM_NODE_LEVEL_SUBPORT; - snp.shaper_profile_id = shaper_profile_id; - snp.nonleaf.n_sp_priorities = 1; - snp.stats_mask = STATS_MASK_DEFAULT; - - /* Add Node to TM Hiearchy */ - if (rte_tm_node_add(port_id, - h->subport_node_id[i], - subport_parent_node_id, - priority, weight, - level_id, - &snp, - error)) { - printf("%s ERROR(%d)-%s!(node %u,parent %u,level %u)\n", - __func__, - error->type, - error->message, - h->subport_node_id[i], - subport_parent_node_id, - level_id); - return -1; - } - shaper_profile_id++; - subport_node_id++; - } - /* Update */ - h->n_shapers = shaper_profile_id; - - printf(" Subport nodes added (Start id %u, Count %u, level %u)\n", - h->subport_node_id[0], SUBPORT_NODES_PER_PORT, level_id); - - return 0; -} - -static int -softport_tm_pipe_node_add(portid_t port_id, struct tm_hierarchy *h, - struct rte_tm_error *error) -{ - uint32_t pipe_parent_node_id; - struct rte_tm_node_params pnp; - struct rte_tm_shaper_params psp; - uint32_t priority, weight, level_id, shaper_profile_id; - uint32_t i, j; - - memset(&psp, 0, sizeof(struct rte_tm_shaper_params)); - memset(&pnp, 0, sizeof(struct rte_tm_node_params)); - - shaper_profile_id = h->n_shapers; - - /* Shaper Profile Parameters */ - psp.peak.rate = h->pipe_node_shaper_rate; - psp.peak.size = TOKEN_BUCKET_SIZE; - psp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; - - /* Pipe Node Parameters */ - weight = 1; - priority = 0; - level_id = TM_NODE_LEVEL_PIPE; - pnp.nonleaf.n_sp_priorities = 4; - pnp.stats_mask = STATS_MASK_DEFAULT; - - /* Add Shaper Profiles and Nodes to TM Hierarchy */ - for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) { - for (j = 0; j < PIPE_NODES_PER_SUBPORT; j++) { - if (rte_tm_shaper_profile_add(port_id, - shaper_profile_id, &psp, error)) { - printf("%s ERROR(%d)-%s!(shaper_id %u)\n ", - __func__, error->type, error->message, - shaper_profile_id); - return -1; - } - pnp.shaper_profile_id = shaper_profile_id; - pipe_parent_node_id = h->subport_node_id[i]; - h->pipe_node_id[i][j] = PIPE_NODES_START_ID + - (i * PIPE_NODES_PER_SUBPORT) + j; - - if (rte_tm_node_add(port_id, - h->pipe_node_id[i][j], - pipe_parent_node_id, - priority, weight, level_id, - &pnp, - error)) { - printf("%s ERROR(%d)-%s!(node %u,parent %u )\n", - __func__, - error->type, - error->message, - h->pipe_node_id[i][j], - pipe_parent_node_id); - - return -1; - } - shaper_profile_id++; - } - } - /* Update */ - h->n_shapers = shaper_profile_id; - - printf(" Pipe nodes added (Start id %u, Count %u, level %u)\n", - h->pipe_node_id[0][0], NUM_PIPE_NODES, level_id); - - return 0; -} - -static int -softport_tm_tc_node_add(portid_t port_id, struct tm_hierarchy *h, - struct rte_tm_error *error) -{ - uint32_t tc_parent_node_id; - struct rte_tm_node_params tnp; - struct rte_tm_shaper_params tsp, tssp; - uint32_t shared_shaper_profile_id[TC_NODES_PER_PIPE]; - uint32_t priority, weight, level_id, shaper_profile_id; - uint32_t pos, n_tc_nodes, i, j, k; - - memset(&tsp, 0, sizeof(struct rte_tm_shaper_params)); - memset(&tssp, 0, sizeof(struct rte_tm_shaper_params)); - memset(&tnp, 0, sizeof(struct rte_tm_node_params)); - - shaper_profile_id = h->n_shapers; - - /* Private Shaper Profile (TC) Parameters */ - tsp.peak.rate = h->tc_node_shaper_rate; - tsp.peak.size = TOKEN_BUCKET_SIZE; - tsp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; - - /* Shared Shaper Profile (TC) Parameters */ - tssp.peak.rate = h->tc_node_shared_shaper_rate; - tssp.peak.size = TOKEN_BUCKET_SIZE; - tssp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; - - /* TC Node Parameters */ - weight = 1; - level_id = TM_NODE_LEVEL_TC; - tnp.n_shared_shapers = 1; - tnp.nonleaf.n_sp_priorities = 1; - tnp.stats_mask = STATS_MASK_DEFAULT; - - /* Add Shared Shaper Profiles to TM Hierarchy */ - for (i = 0; i < TC_NODES_PER_PIPE; i++) { - shared_shaper_profile_id[i] = shaper_profile_id; - - if (rte_tm_shaper_profile_add(port_id, - shared_shaper_profile_id[i], &tssp, error)) { - printf("%s ERROR(%d)-%s!(Shared shaper profileid %u)\n", - __func__, error->type, error->message, - shared_shaper_profile_id[i]); - - return -1; - } - if (rte_tm_shared_shaper_add_update(port_id, i, - shared_shaper_profile_id[i], error)) { - printf("%s ERROR(%d)-%s!(Shared shaper id %u)\n", - __func__, error->type, error->message, i); - - return -1; - } - shaper_profile_id++; - } - - /* Add Shaper Profiles and Nodes to TM Hierarchy */ - n_tc_nodes = 0; - for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) { - for (j = 0; j < PIPE_NODES_PER_SUBPORT; j++) { - for (k = 0; k < TC_NODES_PER_PIPE ; k++) { - priority = k; - tc_parent_node_id = h->pipe_node_id[i][j]; - tnp.shared_shaper_id = - (uint32_t *)calloc(1, sizeof(uint32_t)); - if (tnp.shared_shaper_id == NULL) { - printf("Shared shaper mem alloc err\n"); - return -1; - } - tnp.shared_shaper_id[0] = k; - pos = j + (i * PIPE_NODES_PER_SUBPORT); - h->tc_node_id[pos][k] = - TC_NODES_START_ID + n_tc_nodes; - - if (rte_tm_shaper_profile_add(port_id, - shaper_profile_id, &tsp, error)) { - printf("%s ERROR(%d)-%s!(shaper %u)\n", - __func__, error->type, - error->message, - shaper_profile_id); - - return -1; - } - tnp.shaper_profile_id = shaper_profile_id; - if (rte_tm_node_add(port_id, - h->tc_node_id[pos][k], - tc_parent_node_id, - priority, weight, - level_id, - &tnp, error)) { - printf("%s ERROR(%d)-%s!(node id %u)\n", - __func__, - error->type, - error->message, - h->tc_node_id[pos][k]); - - return -1; - } - shaper_profile_id++; - n_tc_nodes++; - } - } - } - /* Update */ - h->n_shapers = shaper_profile_id; - - printf(" TC nodes added (Start id %u, Count %u, level %u)\n", - h->tc_node_id[0][0], n_tc_nodes, level_id); - - return 0; -} - -static int -softport_tm_queue_node_add(portid_t port_id, struct tm_hierarchy *h, - struct rte_tm_error *error) -{ - uint32_t queue_parent_node_id; - struct rte_tm_node_params qnp; - uint32_t priority, weight, level_id, pos; - uint32_t n_queue_nodes, i, j, k; - - memset(&qnp, 0, sizeof(struct rte_tm_node_params)); - - /* Queue Node Parameters */ - priority = 0; - weight = 1; - level_id = TM_NODE_LEVEL_QUEUE; - qnp.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE; - qnp.leaf.cman = RTE_TM_CMAN_TAIL_DROP; - qnp.stats_mask = STATS_MASK_QUEUE; - - /* Add Queue Nodes to TM Hierarchy */ - n_queue_nodes = 0; - for (i = 0; i < NUM_PIPE_NODES; i++) { - for (j = 0; j < TC_NODES_PER_PIPE; j++) { - queue_parent_node_id = h->tc_node_id[i][j]; - for (k = 0; k < QUEUE_NODES_PER_TC; k++) { - pos = j + (i * TC_NODES_PER_PIPE); - h->queue_node_id[pos][k] = n_queue_nodes; - if (rte_tm_node_add(port_id, - h->queue_node_id[pos][k], - queue_parent_node_id, - priority, - weight, - level_id, - &qnp, error)) { - printf("%s ERROR(%d)-%s!(node %u)\n", - __func__, - error->type, - error->message, - h->queue_node_id[pos][k]); - - return -1; - } - n_queue_nodes++; - } - } - } - printf(" Queue nodes added (Start id %u, Count %u, level %u)\n", - h->queue_node_id[0][0], n_queue_nodes, level_id); - - return 0; -} - -/* - * TM Packet Field Setup - */ -static void -softport_tm_pktfield_setup(portid_t port_id) -{ - struct rte_port *p = &ports[port_id]; - uint64_t pktfield0_mask = 0; - uint64_t pktfield1_mask = 0x0000000FFF000000LLU; - uint64_t pktfield2_mask = 0x00000000000000FCLLU; - - p->softport.tm = (struct softnic_port_tm) { - .n_subports_per_port = SUBPORT_NODES_PER_PORT, - .n_pipes_per_subport = PIPE_NODES_PER_SUBPORT, - - /* Packet field to identify subport - * - * Default configuration assumes only one subport, thus - * the subport ID is hardcoded to 0 - */ - .tm_pktfield0_slabpos = 0, - .tm_pktfield0_slabmask = pktfield0_mask, - .tm_pktfield0_slabshr = - __builtin_ctzll(pktfield0_mask), - - /* Packet field to identify pipe. - * - * Default value assumes Ethernet/IPv4/UDP packets, - * UDP payload bits 12 .. 23 - */ - .tm_pktfield1_slabpos = 40, - .tm_pktfield1_slabmask = pktfield1_mask, - .tm_pktfield1_slabshr = - __builtin_ctzll(pktfield1_mask), - - /* Packet field used as index into TC translation table - * to identify the traffic class and queue. - * - * Default value assumes Ethernet/IPv4 packets, IPv4 - * DSCP field - */ - .tm_pktfield2_slabpos = 8, - .tm_pktfield2_slabmask = pktfield2_mask, - .tm_pktfield2_slabshr = - __builtin_ctzll(pktfield2_mask), - - .tm_tc_table = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - }, /**< TC translation table */ - }; -} - -static int -softport_tm_hierarchy_specify(portid_t port_id, struct rte_tm_error *error) -{ - - struct tm_hierarchy h; - int status; - - memset(&h, 0, sizeof(struct tm_hierarchy)); - - /* TM hierarchy shapers rate */ - set_tm_hiearchy_nodes_shaper_rate(port_id, &h); - - /* Add root node (level 0) */ - status = softport_tm_root_node_add(port_id, &h, error); - if (status) - return status; - - /* Add subport node (level 1) */ - status = softport_tm_subport_node_add(port_id, &h, error); - if (status) - return status; - - /* Add pipe nodes (level 2) */ - status = softport_tm_pipe_node_add(port_id, &h, error); - if (status) - return status; - - /* Add traffic class nodes (level 3) */ - status = softport_tm_tc_node_add(port_id, &h, error); - if (status) - return status; - - /* Add queue nodes (level 4) */ - status = softport_tm_queue_node_add(port_id, &h, error); - if (status) - return status; - - /* TM packet fields setup */ - softport_tm_pktfield_setup(port_id); - - return 0; -} - -/* - * Soft port Init - */ -static void -softport_tm_begin(portid_t pi) -{ - struct rte_port *port = &ports[pi]; - - /* Soft port TM flag */ - if (port->softport.tm_flag == 1) { - printf("\n\n TM feature available on port %u\n", pi); - - /* Soft port TM hierarchy configuration */ - if ((port->softport.tm.hierarchy_config == 0) && - (port->softport.tm.default_hierarchy_enable == 1)) { - struct rte_tm_error error; - int status; - - /* Stop port */ - rte_eth_dev_stop(pi); - - /* TM hierarchy specification */ - status = softport_tm_hierarchy_specify(pi, &error); - if (status) { - printf(" TM Hierarchy built error(%d) - %s\n", - error.type, error.message); - return; - } - printf("\n TM Hierarchy Specified!\n\v"); - - /* TM hierarchy commit */ - status = rte_tm_hierarchy_commit(pi, 0, &error); - if (status) { - printf(" Hierarchy commit error(%d) - %s\n", - error.type, error.message); - return; - } - printf(" Hierarchy Committed (port %u)!", pi); - port->softport.tm.hierarchy_config = 1; - - /* Start port */ - status = rte_eth_dev_start(pi); - if (status) { - printf("\n Port %u start error!\n", pi); - return; - } - printf("\n Port %u started!\n", pi); - return; - } - } - printf("\n TM feature not available on port %u", pi); -} - -struct fwd_engine softnic_tm_engine = { - .fwd_mode_name = "tm", - .port_fwd_begin = softport_tm_begin, - .port_fwd_end = NULL, - .packet_fwd = softport_packet_fwd, -}; - -struct fwd_engine softnic_tm_bypass_engine = { - .fwd_mode_name = "tm-bypass", - .port_fwd_begin = NULL, - .port_fwd_end = NULL, - .packet_fwd = softport_packet_fwd, -}; diff --git a/buildtools/Makefile b/buildtools/Makefile index 35a42ff5..7f76fd7d 100644 --- a/buildtools/Makefile +++ b/buildtools/Makefile @@ -1,33 +1,6 @@ -# BSD LICENSE -# -# Copyright(c) 2016 Neil Horman. All rights reserved. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Intel Corporation nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2016 Neil Horman +# All rights reserved. include $(RTE_SDK)/mk/rte.vars.mk diff --git a/buildtools/auto-config-h.sh b/buildtools/auto-config-h.sh index cb8bce9b..d28a5c3a 100755 --- a/buildtools/auto-config-h.sh +++ b/buildtools/auto-config-h.sh @@ -1,34 +1,6 @@ #!/bin/sh -# -# BSD LICENSE -# -# Copyright 2014-2015 6WIND S.A. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND S.A. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2014-2015 6WIND S.A. # # Crude script to detect whether particular types, macros and functions are # defined by trying to compile a file with a given header. Can be used to diff --git a/buildtools/pmdinfogen/Makefile b/buildtools/pmdinfogen/Makefile index bf07b6f2..a97a7648 100644 --- a/buildtools/pmdinfogen/Makefile +++ b/buildtools/pmdinfogen/Makefile @@ -1,33 +1,6 @@ -# BSD LICENSE -# -# Copyright(c) 2016 Neil Horman. All rights reserved. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Intel Corporation nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2016 Neil Horman +# All rights reserved. include $(RTE_SDK)/mk/rte.vars.mk @@ -41,7 +14,7 @@ HOSTAPP = dpdk-pmdinfogen # SRCS-y += pmdinfogen.c -HOST_CFLAGS += $(WERROR_FLAGS) -g +HOST_CFLAGS += $(HOST_WERROR_FLAGS) -g HOST_CFLAGS += -I$(RTE_OUTPUT)/include include $(RTE_SDK)/mk/rte.hostapp.mk diff --git a/config/arm/arm64_dpaa2_linuxapp_gcc b/config/arm/arm64_dpaa2_linuxapp_gcc new file mode 100644 index 00000000..7ec74ec4 --- /dev/null +++ b/config/arm/arm64_dpaa2_linuxapp_gcc @@ -0,0 +1,15 @@ +[binaries] +c = 'aarch64-linux-gnu-gcc' +cpp = 'aarch64-linux-gnu-cpp' +ar = 'aarch64-linux-gnu-ar' +as = 'aarch64-linux-gnu-as' +strip = 'aarch64-linux-gnu-strip' + +[host_machine] +system = 'linux' +cpu_family = 'aarch64' +cpu = 'armv8-a' +endian = 'little' + +[properties] +implementor_id = 'dpaa2' diff --git a/config/arm/arm64_dpaa_linuxapp_gcc b/config/arm/arm64_dpaa_linuxapp_gcc new file mode 100644 index 00000000..73a8f0b8 --- /dev/null +++ b/config/arm/arm64_dpaa_linuxapp_gcc @@ -0,0 +1,15 @@ +[binaries] +c = 'aarch64-linux-gnu-gcc' +cpp = 'aarch64-linux-gnu-cpp' +ar = 'aarch64-linux-gnu-ar' +as = 'aarch64-linux-gnu-as' +strip = 'aarch64-linux-gnu-strip' + +[host_machine] +system = 'linux' +cpu_family = 'aarch64' +cpu = 'armv8-a' +endian = 'little' + +[properties] +implementor_id = 'dpaa' diff --git a/config/arm/arm64_thunderx_linuxapp_gcc b/config/arm/arm64_thunderx_linuxapp_gcc index 7ff34af7..967d9d46 100644 --- a/config/arm/arm64_thunderx_linuxapp_gcc +++ b/config/arm/arm64_thunderx_linuxapp_gcc @@ -2,6 +2,7 @@ c = 'aarch64-linux-gnu-gcc' cpp = 'aarch64-linux-gnu-cpp' ar = 'aarch64-linux-gnu-gcc-ar' +strip = 'aarch64-linux-gnu-strip' [host_machine] system = 'linux' diff --git a/config/arm/meson.build b/config/arm/meson.build index 4e788a4e..40dbc87f 100644 --- a/config/arm/meson.build +++ b/config/arm/meson.build @@ -8,7 +8,7 @@ march_opt = '-march=@0@'.format(machine) arm_force_native_march = false machine_args_generic = [ - ['default', ['-march=armv8-a']], + ['default', ['-march=armv8-a+crc+crypto']], ['native', ['-march=native']], ['0xd03', ['-mcpu=cortex-a53']], ['0xd04', ['-mcpu=cortex-a35']], @@ -54,6 +54,17 @@ flags_cavium = [ ['RTE_MAX_LCORE', 96], ['RTE_MAX_VFIO_GROUPS', 128], ['RTE_RING_USE_C11_MEM_MODEL', false]] +flags_dpaa = [ + ['RTE_MACHINE', '"dpaa"'], + ['RTE_CACHE_LINE_SIZE', 64], + ['RTE_MAX_NUMA_NODES', 1], + ['RTE_MAX_LCORE', 16]] +flags_dpaa2 = [ + ['RTE_MACHINE', '"dpaa2"'], + ['RTE_CACHE_LINE_SIZE', 64], + ['RTE_MAX_NUMA_NODES', 1], + ['RTE_MAX_LCORE', 16], + ['RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', false]] ## Arm implementer ID (ARM DDI 0487C.a, Section G7.2.106, Page G7-5321) impl_generic = ['Generic armv8', flags_generic, machine_args_generic] @@ -69,15 +80,8 @@ impl_0x51 = ['Qualcomm', flags_generic, machine_args_generic] impl_0x53 = ['Samsung', flags_generic, machine_args_generic] impl_0x56 = ['Marvell', flags_generic, machine_args_generic] impl_0x69 = ['Intel', flags_generic, machine_args_generic] - - -if cc.get_define('__clang__') != '' - dpdk_conf.set_quoted('RTE_TOOLCHAIN', 'clang') - dpdk_conf.set('RTE_TOOLCHAIN_CLANG', 1) -else - dpdk_conf.set_quoted('RTE_TOOLCHAIN', 'gcc') - dpdk_conf.set('RTE_TOOLCHAIN_GCC', 1) -endif +impl_dpaa = ['NXP DPAA', flags_dpaa, machine_args_generic] +impl_dpaa2 = ['NXP DPAA2', flags_dpaa2, machine_args_generic] dpdk_conf.set('RTE_FORCE_INTRINSICS', 1) diff --git a/config/common_armv8a_linuxapp b/config/common_armv8a_linuxapp index 507b28a8..111c0056 100644 --- a/config/common_armv8a_linuxapp +++ b/config/common_armv8a_linuxapp @@ -36,61 +36,3 @@ CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n CONFIG_RTE_LIBRTE_AVP_PMD=n CONFIG_RTE_SCHED_VECTOR=n - -# -# ARMv8 Specific driver compilation flags -# - -# -# Compile NXP DPAA Bus -# -CONFIG_RTE_LIBRTE_DPAA_BUS=y -CONFIG_RTE_LIBRTE_DPAA_HWDEBUG=n - -# -# Compile NXP DPAA2 FSL-MC Bus -# -CONFIG_RTE_LIBRTE_FSLMC_BUS=y - -# -# Compile NXP DPAA Mempool -# -CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=y - -# -# Compile NXP DPAA2 Mempool -# -CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL=y - -# -# Compile bust-oriented NXP DPAA PMD -# -CONFIG_RTE_LIBRTE_DPAA_PMD=y - -# -# Compile burst-oriented NXP DPAA2 PMD driver -# -CONFIG_RTE_LIBRTE_DPAA2_PMD=y - -# -# Compile schedule-oriented NXP DPAA Event Dev PMD -# -CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV=y - -# -# Compile schedule-oriented NXP DPAA2 EVENTDEV driver -# -CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV=y - -# -# Compile NXP DPAA caam - crypto driver -# -CONFIG_RTE_LIBRTE_PMD_DPAA_SEC=y -CONFIG_RTE_LIBRTE_DPAA_MAX_CRYPTODEV=4 -CONFIG_RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS=2048 - -# -# Compile NXP DPAA2 crypto sec driver for CAAM HW -# -CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC=y -CONFIG_RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS=2048 diff --git a/config/common_base b/config/common_base index ad03cf43..4bcbaf92 100644 --- a/config/common_base +++ b/config/common_base @@ -61,7 +61,20 @@ CONFIG_RTE_CACHE_LINE_SIZE=64 CONFIG_RTE_LIBRTE_EAL=y CONFIG_RTE_MAX_LCORE=128 CONFIG_RTE_MAX_NUMA_NODES=8 -CONFIG_RTE_MAX_MEMSEG=256 +CONFIG_RTE_MAX_MEMSEG_LISTS=64 +# each memseg list will be limited to either RTE_MAX_MEMSEG_PER_LIST pages +# or RTE_MAX_MEM_MB_PER_LIST megabytes worth of memory, whichever is smaller +CONFIG_RTE_MAX_MEMSEG_PER_LIST=8192 +CONFIG_RTE_MAX_MEM_MB_PER_LIST=32768 +# a "type" is a combination of page size and NUMA node. total number of memseg +# lists per type will be limited to either RTE_MAX_MEMSEG_PER_TYPE pages (split +# over multiple lists of RTE_MAX_MEMSEG_PER_LIST pages), or +# RTE_MAX_MEM_MB_PER_TYPE megabytes of memory (split over multiple lists of +# RTE_MAX_MEM_MB_PER_LIST), whichever is smaller +CONFIG_RTE_MAX_MEMSEG_PER_TYPE=32768 +CONFIG_RTE_MAX_MEM_MB_PER_TYPE=131072 +# global maximum usable amount of VA, in megabytes +CONFIG_RTE_MAX_MEM_MB=524288 CONFIG_RTE_MAX_MEMZONE=2560 CONFIG_RTE_MAX_TAILQ=32 CONFIG_RTE_ENABLE_ASSERT=n @@ -74,8 +87,10 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n CONFIG_RTE_EAL_IGB_UIO=n CONFIG_RTE_EAL_VFIO=n CONFIG_RTE_MAX_VFIO_GROUPS=64 +CONFIG_RTE_MAX_VFIO_CONTAINERS=64 CONFIG_RTE_MALLOC_DEBUG=n CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n +CONFIG_RTE_USE_LIBBSD=n # # Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing. @@ -123,6 +138,11 @@ CONFIG_RTE_ETHDEV_PROFILE_ITT_WASTED_RX_ITERATIONS=n # CONFIG_RTE_ETHDEV_TX_PREPARE_NOOP=n +# +# Compile the Intel FPGA bus +# +CONFIG_RTE_LIBRTE_IFPGA_BUS=y + # # Compile PCI bus driver # @@ -143,6 +163,12 @@ CONFIG_RTE_LIBRTE_ARK_DEBUG_TX=n CONFIG_RTE_LIBRTE_ARK_DEBUG_STATS=n CONFIG_RTE_LIBRTE_ARK_DEBUG_TRACE=n +# +# Compile AMD PMD +# +CONFIG_RTE_LIBRTE_AXGBE_PMD=y +CONFIG_RTE_LIBRTE_AXGBE_PMD_DEBUG=n + # # Compile burst-oriented Broadcom PMD driver # @@ -172,6 +198,7 @@ CONFIG_RTE_LIBRTE_CXGBE_TPUT=y CONFIG_RTE_LIBRTE_DPAA_BUS=n CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=n CONFIG_RTE_LIBRTE_DPAA_PMD=n +CONFIG_RTE_LIBRTE_DPAA_HWDEBUG=n # # Compile NXP DPAA2 FSL-MC Bus @@ -188,11 +215,7 @@ CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=y # Compile burst-oriented NXP DPAA2 PMD driver # CONFIG_RTE_LIBRTE_DPAA2_PMD=n -CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT=n CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER=n -CONFIG_RTE_LIBRTE_DPAA2_DEBUG_RX=n -CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX=n -CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX_FREE=n # # Compile burst-oriented Amazon ENA PMD driver @@ -241,8 +264,6 @@ CONFIG_RTE_LIBRTE_I40E_INC_VECTOR=y CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF=64 CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4 -# interval up to 8160 us, aligned to 2 (or default value) -CONFIG_RTE_LIBRTE_I40E_ITR_INTERVAL=-1 # # Compile burst-oriented FM10K PMD @@ -270,15 +291,14 @@ CONFIG_RTE_LIBRTE_AVF_16BYTE_RX_DESC=n CONFIG_RTE_LIBRTE_MLX4_PMD=n CONFIG_RTE_LIBRTE_MLX4_DEBUG=n CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS=n -CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE=8 # -# Compile burst-oriented Mellanox ConnectX-4 & ConnectX-5 (MLX5) PMD +# Compile burst-oriented Mellanox ConnectX-4, ConnectX-5 & Bluefield +# (MLX5) PMD # CONFIG_RTE_LIBRTE_MLX5_PMD=n CONFIG_RTE_LIBRTE_MLX5_DEBUG=n CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS=n -CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE=8 # # Compile burst-oriented Netronome NFP PMD driver @@ -306,11 +326,6 @@ CONFIG_RTE_LIBRTE_SFC_EFX_DEBUG=n # Compile software PMD backed by SZEDATA2 device # CONFIG_RTE_LIBRTE_PMD_SZEDATA2=n -# -# Defines firmware type address space. -# See documentation for supported values. -# Other values raise compile time error. -CONFIG_RTE_LIBRTE_PMD_SZEDATA2_AS=0 # # Compile burst-oriented Cavium Thunderx NICVF PMD driver @@ -382,7 +397,20 @@ CONFIG_RTE_LIBRTE_PMD_FAILSAFE=y # # Compile Marvell PMD driver # -CONFIG_RTE_LIBRTE_MRVL_PMD=n +CONFIG_RTE_LIBRTE_MVPP2_PMD=n + +# +# Compile support for VMBus library +# +CONFIG_RTE_LIBRTE_VMBUS=n + +# +# Compile native PMD for Hyper-V/Azure +# +CONFIG_RTE_LIBRTE_NETVSC_PMD=n +CONFIG_RTE_LIBRTE_NETVSC_DEBUG_RX=n +CONFIG_RTE_LIBRTE_NETVSC_DEBUG_TX=n +CONFIG_RTE_LIBRTE_NETVSC_DEBUG_DUMP=n # # Compile virtual device driver for NetVSC on Hyper-V/Azure @@ -409,7 +437,7 @@ CONFIG_RTE_PMD_RING_MAX_TX_RINGS=16 # # Compile SOFTNIC PMD # -CONFIG_RTE_LIBRTE_PMD_SOFTNIC=y +CONFIG_RTE_LIBRTE_PMD_SOFTNIC=n # # Compile the TAP PMD @@ -427,6 +455,7 @@ CONFIG_RTE_PMD_PACKET_PREFETCH=y # CONFIG_RTE_LIBRTE_BBDEV=y CONFIG_RTE_BBDEV_MAX_DEVS=128 +CONFIG_RTE_BBDEV_OFFLOAD_COST=n # # Compile PMD for NULL bbdev device @@ -442,7 +471,6 @@ CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW=n # Compile generic crypto device library # CONFIG_RTE_LIBRTE_CRYPTODEV=y -CONFIG_RTE_LIBRTE_CRYPTODEV_DEBUG=n CONFIG_RTE_CRYPTO_MAX_DEVS=64 # @@ -455,49 +483,47 @@ CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO_DEBUG=n # Compile NXP DPAA2 crypto sec driver for CAAM HW # CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC=n -CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT=n -CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER=n -CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_RX=n # # NXP DPAA caam - crypto driver # CONFIG_RTE_LIBRTE_PMD_DPAA_SEC=n -CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT=n -CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER=n -CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_RX=n +CONFIG_RTE_LIBRTE_DPAA_MAX_CRYPTODEV=4 # -# Compile PMD for QuickAssist based devices +# Compile PMD for QuickAssist based devices - see docs for details # -CONFIG_RTE_LIBRTE_PMD_QAT=n -CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_INIT=n -CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_TX=n -CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_RX=n -CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_PMD_QAT=y +CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n # -# Number of sessions to create in the session memory pool -# on a single QuickAssist device. +# Max. number of QuickAssist devices, which can be detected and attached # -CONFIG_RTE_QAT_PMD_MAX_NB_SESSIONS=2048 +CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48 +CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16 + +# +# Compile PMD for virtio crypto devices +# +CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO=y +# +# Number of maximum virtio crypto devices +# +CONFIG_RTE_MAX_VIRTIO_CRYPTO=32 # # Compile PMD for AESNI backed device # CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n -CONFIG_RTE_LIBRTE_PMD_AESNI_MB_DEBUG=n # # Compile PMD for Software backed device # CONFIG_RTE_LIBRTE_PMD_OPENSSL=n -CONFIG_RTE_LIBRTE_PMD_OPENSSL_DEBUG=n # # Compile PMD for AESNI GCM device # CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n -CONFIG_RTE_LIBRTE_PMD_AESNI_GCM_DEBUG=n # # Compile PMD for SNOW 3G device @@ -509,36 +535,63 @@ CONFIG_RTE_LIBRTE_PMD_SNOW3G_DEBUG=n # Compile PMD for KASUMI device # CONFIG_RTE_LIBRTE_PMD_KASUMI=n -CONFIG_RTE_LIBRTE_PMD_KASUMI_DEBUG=n # # Compile PMD for ZUC device # CONFIG_RTE_LIBRTE_PMD_ZUC=n -CONFIG_RTE_LIBRTE_PMD_ZUC_DEBUG=n -# # Compile PMD for Crypto Scheduler device # CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER=y -CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n # # Compile PMD for NULL Crypto device # CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y +# +# Compile PMD for AMD CCP crypto device +# +CONFIG_RTE_LIBRTE_PMD_CCP=n + # # Compile PMD for Marvell Crypto device # -CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO=n -CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG=n +CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO=n +CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO_DEBUG=n # # Compile generic security library # CONFIG_RTE_LIBRTE_SECURITY=y +# +# Compile generic compression device library +# +CONFIG_RTE_LIBRTE_COMPRESSDEV=y +CONFIG_RTE_COMPRESS_MAX_DEVS=64 + +# +# Compile compressdev unit test +# +CONFIG_RTE_COMPRESSDEV_TEST=n + +# +# Compile PMD for Octeontx ZIPVF compression device +# +CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF=y + +# +# Compile PMD for ISA-L compression device +# +CONFIG_RTE_LIBRTE_PMD_ISAL=n + +# +# Compile PMD for ZLIB compression device +# +CONFIG_RTE_LIBRTE_PMD_ZLIB=n + # # Compile generic event device library # @@ -546,6 +599,9 @@ CONFIG_RTE_LIBRTE_EVENTDEV=y CONFIG_RTE_LIBRTE_EVENTDEV_DEBUG=n CONFIG_RTE_EVENT_MAX_DEVS=16 CONFIG_RTE_EVENT_MAX_QUEUES_PER_DEV=64 +CONFIG_RTE_EVENT_TIMER_ADAPTER_NUM_MAX=32 +CONFIG_RTE_EVENT_ETH_INTR_RING_SIZE=1024 +CONFIG_RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE=32 # # Compile PMD for skeleton event device @@ -586,6 +642,21 @@ CONFIG_RTE_LIBRTE_RAWDEV=y CONFIG_RTE_RAWDEV_MAX_DEVS=10 CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV=y +# +# Compile PMD for NXP DPAA2 CMDIF raw device +# +CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV=n + +# +# Compile PMD for NXP DPAA2 QDMA raw device +# +CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV=n + +# +# Compile PMD for Intel FPGA raw device +# +CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV=y + # # Compile librte_ring # @@ -602,6 +673,8 @@ CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=n # # Compile Mempool drivers # +CONFIG_RTE_DRIVER_MEMPOOL_BUCKET=y +CONFIG_RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB=64 CONFIG_RTE_DRIVER_MEMPOOL_RING=y CONFIG_RTE_DRIVER_MEMPOOL_STACK=y @@ -791,6 +864,20 @@ CONFIG_RTE_LIBRTE_VHOST_DEBUG=n # CONFIG_RTE_LIBRTE_PMD_VHOST=n +# +# Compile IFC driver +# To compile, CONFIG_RTE_LIBRTE_VHOST and CONFIG_RTE_EAL_VFIO +# should be enabled. +# +CONFIG_RTE_LIBRTE_IFC_PMD=n + +# +# Compile librte_bpf +# +CONFIG_RTE_LIBRTE_BPF=y +# allow load BPF from ELF files (requires libelf) +CONFIG_RTE_LIBRTE_BPF_ELF=n + # # Compile the test application # diff --git a/config/common_linuxapp b/config/common_linuxapp index ff98f235..9c5ea9d8 100644 --- a/config/common_linuxapp +++ b/config/common_linuxapp @@ -15,7 +15,9 @@ CONFIG_RTE_LIBRTE_PMD_KNI=y CONFIG_RTE_LIBRTE_VHOST=y CONFIG_RTE_LIBRTE_VHOST_NUMA=y CONFIG_RTE_LIBRTE_PMD_VHOST=y +CONFIG_RTE_LIBRTE_IFC_PMD=y CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y +CONFIG_RTE_LIBRTE_PMD_SOFTNIC=y CONFIG_RTE_LIBRTE_PMD_TAP=y CONFIG_RTE_LIBRTE_AVP_PMD=y CONFIG_RTE_LIBRTE_VDEV_NETVSC_PMD=y @@ -23,3 +25,22 @@ CONFIG_RTE_LIBRTE_NFP_PMD=y CONFIG_RTE_LIBRTE_POWER=y CONFIG_RTE_VIRTIO_USER=y CONFIG_RTE_PROC_INFO=y + +CONFIG_RTE_LIBRTE_VMBUS=y +CONFIG_RTE_LIBRTE_NETVSC_PMD=y + +# NXP DPAA BUS and drivers +CONFIG_RTE_LIBRTE_DPAA_BUS=y +CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=y +CONFIG_RTE_LIBRTE_DPAA_PMD=y +CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV=y +CONFIG_RTE_LIBRTE_PMD_DPAA_SEC=y + +# NXP FSLMC BUS and DPAA2 drivers +CONFIG_RTE_LIBRTE_FSLMC_BUS=y +CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL=y +CONFIG_RTE_LIBRTE_DPAA2_PMD=y +CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV=y +CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC=y +CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV=y +CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV=y diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc index a20b7a85..13be308d 100644 --- a/config/defconfig_arm-armv7a-linuxapp-gcc +++ b/config/defconfig_arm-armv7a-linuxapp-gcc @@ -1,32 +1,5 @@ -# BSD LICENSE -# -# Copyright (C) 2015 RehiveTech. All right reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of RehiveTech nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (C) 2015 RehiveTech. All right reserved. #include "common_linuxapp" diff --git a/config/defconfig_arm64-dpaa-linuxapp-gcc b/config/defconfig_arm64-dpaa-linuxapp-gcc index 52bfc792..c47aec0a 100644 --- a/config/defconfig_arm64-dpaa-linuxapp-gcc +++ b/config/defconfig_arm64-dpaa-linuxapp-gcc @@ -13,7 +13,7 @@ CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n # # Compile Environment Abstraction Layer # -CONFIG_RTE_MAX_LCORE=4 +CONFIG_RTE_MAX_LCORE=16 CONFIG_RTE_MAX_NUMA_NODES=1 CONFIG_RTE_CACHE_LINE_SIZE=64 CONFIG_RTE_PKTMBUF_HEADROOM=128 @@ -21,10 +21,3 @@ CONFIG_RTE_PKTMBUF_HEADROOM=128 # NXP DPAA Bus CONFIG_RTE_LIBRTE_DPAA_DEBUG_DRIVER=n CONFIG_RTE_LIBRTE_DPAA_HWDEBUG=n - -# -# FSL DPAA caam - crypto driver -# -CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT=n -CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER=n -CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_RX=n diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc index afdbc347..96f478a0 100644 --- a/config/defconfig_arm64-dpaa2-linuxapp-gcc +++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc @@ -9,9 +9,6 @@ CONFIG_RTE_MACHINE="dpaa2" CONFIG_RTE_ARCH_ARM_TUNE="cortex-a72" -# -# Compile Environment Abstraction Layer -# CONFIG_RTE_MAX_LCORE=16 CONFIG_RTE_MAX_NUMA_NODES=1 CONFIG_RTE_CACHE_LINE_SIZE=64 @@ -22,23 +19,4 @@ CONFIG_RTE_PKTMBUF_HEADROOM=128 CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n CONFIG_RTE_LIBRTE_VHOST_NUMA=n -# -# Compile Support Libraries for DPAA2 -# CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=n - -# -# Compile burst-oriented NXP DPAA2 PMD driver -# -CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT=n -CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER=n -CONFIG_RTE_LIBRTE_DPAA2_DEBUG_RX=n -CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX=n -CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX_FREE=n - -# -# Compile NXP DPAA2 crypto sec driver for CAAM HW -# -CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT=n -CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER=n -CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_RX=n diff --git a/config/defconfig_arm64-stingray-linuxapp-gcc b/config/defconfig_arm64-stingray-linuxapp-gcc new file mode 100644 index 00000000..99925072 --- /dev/null +++ b/config/defconfig_arm64-stingray-linuxapp-gcc @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (C) Broadcom 2017-2018. All rights reserved. +# + +#include "defconfig_arm64-armv8a-linuxapp-gcc" + +# Broadcom - Stingray +CONFIG_RTE_MACHINE="armv8a" +CONFIG_RTE_ARCH_ARM_TUNE="cortex-a72" + +# Doesn't support NUMA +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n +CONFIG_RTE_LIBRTE_VHOST_NUMA=n + +CONFIG_RTE_EAL_IGB_UIO=y +CONFIG_RTE_KNI_KMOD=n diff --git a/config/defconfig_i686-native-linuxapp-gcc b/config/defconfig_i686-native-linuxapp-gcc index a42ba4f5..1178fe35 100644 --- a/config/defconfig_i686-native-linuxapp-gcc +++ b/config/defconfig_i686-native-linuxapp-gcc @@ -46,3 +46,6 @@ CONFIG_RTE_LIBRTE_PMD_ZUC=n # AVP PMD is not supported on 32-bit # CONFIG_RTE_LIBRTE_AVP_PMD=n + +# 32-bit doesn't break up memory in lists, but does have VA allocation limit +CONFIG_RTE_MAX_MEM_MB=2048 diff --git a/config/defconfig_i686-native-linuxapp-icc b/config/defconfig_i686-native-linuxapp-icc index 144ba0ae..016c73f3 100644 --- a/config/defconfig_i686-native-linuxapp-icc +++ b/config/defconfig_i686-native-linuxapp-icc @@ -17,11 +17,6 @@ CONFIG_RTE_TOOLCHAIN_ICC=y # CONFIG_RTE_LIBRTE_KNI=n -# -# Vectorized PMD is not supported on 32-bit -# -CONFIG_RTE_IXGBE_INC_VECTOR=n - # # Solarflare PMD is not supported on 32-bit # @@ -51,3 +46,6 @@ CONFIG_RTE_LIBRTE_PMD_ZUC=n # AVP PMD is not supported on 32-bit # CONFIG_RTE_LIBRTE_AVP_PMD=n + +# 32-bit doesn't break up memory in lists, but does have VA allocation limit +CONFIG_RTE_MAX_MEM_MB=2048 diff --git a/config/defconfig_x86_x32-native-linuxapp-gcc b/config/defconfig_x86_x32-native-linuxapp-gcc index b6206a5c..57d000dc 100644 --- a/config/defconfig_x86_x32-native-linuxapp-gcc +++ b/config/defconfig_x86_x32-native-linuxapp-gcc @@ -26,3 +26,6 @@ CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n # AVP PMD is not supported on 32-bit # CONFIG_RTE_LIBRTE_AVP_PMD=n + +# 32-bit doesn't break up memory in lists, but does have VA allocation limit +CONFIG_RTE_MAX_MEM_MB=2048 diff --git a/config/meson.build b/config/meson.build index f8c67578..4d755323 100644 --- a/config/meson.build +++ b/config/meson.build @@ -11,6 +11,10 @@ dpdk_conf.set('RTE_MACHINE', machine) machine_args = [] machine_args += '-march=' + machine +toolchain = cc.get_id() +dpdk_conf.set_quoted('RTE_TOOLCHAIN', toolchain) +dpdk_conf.set('RTE_TOOLCHAIN_' + toolchain.to_upper(), 1) + # use pthreads add_project_link_arguments('-pthread', language: 'c') dpdk_extra_ldflags += '-pthread' @@ -38,6 +42,14 @@ if numa_dep.found() and cc.has_header('numaif.h') dpdk_extra_ldflags += '-lnuma' endif +# check for strlcpy +if host_machine.system() == 'linux' and cc.find_library('bsd', + required: false).found() and cc.has_header('bsd/string.h') + dpdk_conf.set('RTE_USE_LIBBSD', 1) + add_project_link_arguments('-lbsd', language: 'c') + dpdk_extra_ldflags += '-lbsd' +endif + # add -include rte_config to cflags add_project_arguments('-include', 'rte_config.h', language: 'c') @@ -45,9 +57,12 @@ add_project_arguments('-include', 'rte_config.h', language: 'c') warning_flags = [ '-Wsign-compare', '-Wcast-qual', - '-Wno-address-of-packed-member', - '-Wno-format-truncation' + '-Wno-address-of-packed-member' ] +if cc.sizeof('void *') == 4 +# for 32-bit, don't warn about casting a 32-bit pointer to 64-bit int - it's fine!! + warning_flags += '-Wno-pointer-to-int-cast' +endif foreach arg: warning_flags if cc.has_argument(arg) add_project_arguments(arg, language: 'c') @@ -61,6 +76,8 @@ dpdk_conf.set('RTE_LIBEAL_USE_HPET', get_option('use_hpet')) dpdk_conf.set('RTE_EAL_ALLOW_INV_SOCKET_ID', get_option('allow_invalid_socket_id')) # values which have defaults which may be overridden dpdk_conf.set('RTE_MAX_VFIO_GROUPS', 64) +dpdk_conf.set('RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB', 64) +dpdk_conf.set('RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', true) compile_time_cpuflags = [] if host_machine.cpu_family().startswith('x86') diff --git a/config/rte_config.h b/config/rte_config.h index 699878ad..a8e47977 100644 --- a/config/rte_config.h +++ b/config/rte_config.h @@ -21,13 +21,18 @@ /****** library defines ********/ /* EAL defines */ -#define RTE_MAX_MEMSEG 512 +#define RTE_MAX_MEMSEG_LISTS 128 +#define RTE_MAX_MEMSEG_PER_LIST 8192 +#define RTE_MAX_MEM_MB_PER_LIST 32768 +#define RTE_MAX_MEMSEG_PER_TYPE 32768 +#define RTE_MAX_MEM_MB_PER_TYPE 65536 +#define RTE_MAX_MEM_MB 524288 #define RTE_MAX_MEMZONE 2560 #define RTE_MAX_TAILQ 32 -#define RTE_LOG_LEVEL RTE_LOG_INFO #define RTE_LOG_DP_LEVEL RTE_LOG_INFO #define RTE_BACKTRACE 1 #define RTE_EAL_VFIO 1 +#define RTE_MAX_VFIO_CONTAINERS 64 /* bsd module defines */ #define RTE_CONTIGMEM_MAX_NUM_BUFS 64 @@ -52,9 +57,18 @@ #define RTE_CRYPTO_MAX_DEVS 64 #define RTE_CRYPTODEV_NAME_LEN 64 +/* compressdev defines */ +#define RTE_COMPRESS_MAX_DEVS 64 + /* eventdev defines */ #define RTE_EVENT_MAX_DEVS 16 #define RTE_EVENT_MAX_QUEUES_PER_DEV 64 +#define RTE_EVENT_TIMER_ADAPTER_NUM_MAX 32 +#define RTE_EVENT_ETH_INTR_RING_SIZE 1024 +#define RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE 32 + +/* rawdev defines */ +#define RTE_RAWDEV_MAX_DEVS 10 /* ip_fragmentation defines */ #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4 @@ -72,11 +86,16 @@ /****** driver defines ********/ -/* - * Number of sessions to create in the session memory pool - * on a single QuickAssist device. - */ -#define RTE_QAT_PMD_MAX_NB_SESSIONS 2048 +/* QuickAssist device */ +/* Max. number of QuickAssist devices which can be attached */ +#define RTE_PMD_QAT_MAX_PCI_DEVICES 48 +#define RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS 16 + +/* virtio crypto defines */ +#define RTE_MAX_VIRTIO_CRYPTO 32 + +/* DPAA SEC max cryptodev devices*/ +#define RTE_LIBRTE_DPAA_MAX_CRYPTODEV 4 /* fm10k defines */ #define RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE 1 diff --git a/devtools/check-dup-includes.sh b/devtools/check-dup-includes.sh index 10365e4a..e4c2748c 100755 --- a/devtools/check-dup-includes.sh +++ b/devtools/check-dup-includes.sh @@ -1,25 +1,6 @@ #! /bin/sh -e - -# Copyright 2017 Mellanox Technologies, Ltd. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2017 Mellanox Technologies, Ltd # Check C files in git repository for duplicated includes. # Usage: devtools/check-dup-includes.sh [directory] diff --git a/devtools/check-git-log.sh b/devtools/check-git-log.sh index c601f6ae..97dae4be 100755 --- a/devtools/check-git-log.sh +++ b/devtools/check-git-log.sh @@ -1,34 +1,6 @@ #! /bin/sh - -# BSD LICENSE -# +# SPDX-License-Identifier: BSD-3-Clause # Copyright 2016 6WIND S.A. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND S.A. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Check commit logs (headlines and references) # @@ -106,8 +78,8 @@ bad=$(echo "$headlines" | grep --color=always \ # check headline lowercase for first words bad=$(echo "$headlines" | grep --color=always \ - -e '^.*[A-Z].*:' \ - -e ': *[A-Z]' \ + -e '^.*[[:upper:]].*:' \ + -e ': *[[:upper:]]' \ | sed 's,^,\t,') [ -z "$bad" ] || printf "Wrong headline uppercase:\n$bad\n" @@ -123,12 +95,14 @@ bad=$(echo "$headlines" | grep -E --color=always \ -e ':.*\' \ -e ':.*\' \ -e ':.*\' \ + -e ':.*\' \ -e ':.*\' \ -e ':.*\' \ -e ':.*\' \ -e ':.*\' \ -e ':.*\' \ -e ':.*\' \ + -e ':.*\' \ -e ':.*\' \ -e ':.*\' \ -e ':.*\' \ @@ -138,7 +112,9 @@ bad=$(echo "$headlines" | grep -E --color=always \ -e ':.*\' \ -e ':.*\' \ -e ':.*\' \ + -e ':.*\' \ -e ':.*\<[Vv]lan\>' \ + -e ':.*\' \ -e ':.*\' \ | sed 's,^,\t,') [ -z "$bad" ] || printf "Wrong headline lowercase:\n$bad\n" diff --git a/devtools/check-includes.sh b/devtools/check-includes.sh index 685a3e77..9057633e 100755 --- a/devtools/check-includes.sh +++ b/devtools/check-includes.sh @@ -1,34 +1,6 @@ #!/bin/sh -e -# -# BSD LICENSE -# -# Copyright 2016 6WIND S.A. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND S.A. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2016 6WIND S.A. # This script checks that header files in a given directory do not miss # dependencies when included on their own, do not conflict and accept being diff --git a/devtools/check-maintainers.sh b/devtools/check-maintainers.sh index ac5326b8..85a300f0 100755 --- a/devtools/check-maintainers.sh +++ b/devtools/check-maintainers.sh @@ -1,34 +1,6 @@ #! /bin/sh - -# BSD LICENSE -# +# SPDX-License-Identifier: BSD-3-Clause # Copyright 2015 6WIND S.A. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND S.A. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Do some basic checks in MAINTAINERS file diff --git a/devtools/check-symbol-change.sh b/devtools/check-symbol-change.sh new file mode 100755 index 00000000..daaf45e1 --- /dev/null +++ b/devtools/check-symbol-change.sh @@ -0,0 +1,159 @@ +#!/bin/sh +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Neil Horman + +build_map_changes() +{ + local fname="$1" + local mapdb="$2" + + cat "$fname" | awk ' + # Initialize our variables + BEGIN {map="";sym="";ar="";sec=""; in_sec=0; in_map=0} + + # Anything that starts with + or -, followed by an a + # and ends in the string .map is the name of our map file + # This may appear multiple times in a patch if multiple + # map files are altered, and all section/symbol names + # appearing between a triggering of this rule and the + # next trigger of this rule are associated with this file + /[-+] a\/.*\.map/ {map=$2; in_map=1} + + # Same pattern as above, only it matches on anything that + # does not end in 'map', indicating we have left the map chunk. + # When we hit this, turn off the in_map variable, which + # supresses the subordonate rules below + /[-+] a\/.*\.^(map)/ {in_map=0} + + # Triggering this rule, which starts a line with a + and ends it + # with a { identifies a versioned section. The section name is + # the rest of the line with the + and { symbols remvoed. + # Triggering this rule sets in_sec to 1, which actives the + # symbol rule below + /+.*{/ {gsub("+",""); + if (in_map == 1) { + sec=$1; in_sec=1; + } + } + + # This rule idenfies the end of a section, and disables the + # symbol rule + /.*}/ {in_sec=0} + + # This rule matches on a + followed by any characters except a : + # (which denotes a global vs local segment), and ends with a ;. + # The semicolon is removed and the symbol is printed with its + # association file name and version section, along with an + # indicator that the symbol is a new addition. Note this rule + # only works if we have found a version section in the rule + # above (hence the in_sec check) And found a map file (the + # in_map check). If we are not in a map chunk, do nothing. If + # we are in a map chunk but not a section chunk, record it as + # unknown. + /^+[^}].*[^:*];/ {gsub(";","");sym=$2; + if (in_map == 1) { + if (in_sec == 1) { + print map " " sym " " sec " add" + } else { + print map " " sym " unknown add" + } + } + } + + # This is the same rule as above, but the rule matches on a + # leading - rather than a +, denoting that the symbol is being + # removed. + /^-[^}].*[^:*];/ {gsub(";","");sym=$2; + if (in_map == 1) { + if (in_sec == 1) { + print map " " sym " " sec " del" + } else { + print map " " sym " unknown del" + } + } + }' > "$mapdb" + + sort -u "$mapdb" > "$mapdb.2" + mv -f "$mapdb.2" "$mapdb" + +} + +check_for_rule_violations() +{ + local mapdb="$1" + local mname + local symname + local secname + local ar + local ret=0 + + while read mname symname secname ar + do + if [ "$ar" = "add" ] + then + + if [ "$secname" = "unknown" ] + then + # Just inform the user of this occurrence, but + # don't flag it as an error + echo -n "INFO: symbol $syname is added but " + echo -n "patch has insuficient context " + echo -n "to determine the section name " + echo -n "please ensure the version is " + echo "EXPERIMENTAL" + continue + fi + + if [ "$secname" != "EXPERIMENTAL" ] + then + # Symbols that are getting added in a section + # other than the experimental section + # to be moving from an already supported + # section or its a violation + grep -q \ + "$mname $symname [^EXPERIMENTAL] del" "$mapdb" + if [ $? -ne 0 ] + then + echo -n "ERROR: symbol $symname " + echo -n "is added in a section " + echo -n "other than the EXPERIMENTAL " + echo "section of the version map" + ret=1 + fi + fi + else + + if [ "$secname" != "EXPERIMENTAL" ] + then + # Just inform users that non-experimenal + # symbols need to go through a deprecation + # process + echo -n "INFO: symbol $symname is being " + echo -n "removed, ensure that it has " + echo "gone through the deprecation process" + fi + fi + done < "$mapdb" + + return $ret +} + +trap clean_and_exit_on_sig EXIT + +mapfile=`mktemp mapdb.XXXXXX` +patch=$1 +exit_code=1 + +clean_and_exit_on_sig() +{ + rm -f "$mapfile" + exit $exit_code +} + +build_map_changes "$patch" "$mapfile" +check_for_rule_violations "$mapfile" +exit_code=$? + +rm -f "$mapfile" + +exit $exit_code diff --git a/devtools/check-symbol-maps.sh b/devtools/check-symbol-maps.sh new file mode 100755 index 00000000..e137d48a --- /dev/null +++ b/devtools/check-symbol-maps.sh @@ -0,0 +1,30 @@ +#! /bin/sh -e +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 Mellanox Technologies, Ltd + +cd $(dirname $0)/.. + +# speed up by ignoring Unicode details +export LC_ALL=C + +find_orphan_symbols () +{ + for map in $(find lib drivers -name '*.map') ; do + for sym in $(sed -rn 's,^([^}]*_.*);,\1,p' $map) ; do + if echo $sym | grep -q '^per_lcore_' ; then + continue + fi + if ! grep -q -r --exclude=$(basename $map) \ + -w $sym $(dirname $map) ; then + echo "$map: $sym" + fi + done + done +} + +orphan_symbols=$(find_orphan_symbols) +if [ -n "$orphan_symbols" ] ; then + echo "Found only in symbol map file:" + echo "$orphan_symbols" | sed 's,^,\t,' + exit 1 +fi diff --git a/devtools/checkpatches.sh b/devtools/checkpatches.sh index 7676a6b5..ba795ad1 100755 --- a/devtools/checkpatches.sh +++ b/devtools/checkpatches.sh @@ -1,40 +1,14 @@ #! /bin/sh - -# BSD LICENSE -# +# SPDX-License-Identifier: BSD-3-Clause # Copyright 2015 6WIND S.A. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND S.A. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Load config options: # - DPDK_CHECKPATCH_PATH # - DPDK_CHECKPATCH_LINE_LENGTH . $(dirname $(readlink -e $0))/load-devel-config +VALIDATE_NEW_API=$(dirname $(readlink -e $0))/check-symbol-change.sh + length=${DPDK_CHECKPATCH_LINE_LENGTH:-80} # override default Linux options @@ -42,13 +16,21 @@ options="--no-tree" options="$options --max-line-length=$length" options="$options --show-types" options="$options --ignore=LINUX_VERSION_CODE,\ -FILE_PATH_CHANGES,MAINTAINERS_STYLE,\ +FILE_PATH_CHANGES,MAINTAINERS_STYLE,SPDX_LICENSE_TAG,\ VOLATILE,PREFER_PACKED,PREFER_ALIGNED,PREFER_PRINTF,\ PREFER_KERNEL_TYPES,BIT_MACRO,CONST_STRUCT,\ SPLIT_STRING,LONG_LINE_STRING,\ LINE_SPACING,PARENTHESIS_ALIGNMENT,NETWORKING_BLOCK_COMMENT_STYLE,\ NEW_TYPEDEFS,COMPARISON_TO_NULL" +clean_tmp_files() { + if echo $tmpinput | grep -q '^checkpatches\.' ; then + rm -f "$tmpinput" + fi +} + +trap "clean_tmp_files" INT + print_usage () { cat <<- END_OF_HELP usage: $(basename $0) [-q] [-v] [-nX|patch1 [patch2] ...]] @@ -61,6 +43,88 @@ print_usage () { END_OF_HELP } +check_forbidden_additions() { + # This awk script receives a list of expressions to monitor + # and a list of folders to search these expressions in + # - No search is done inside comments + # - Both additions and removals of the expressions are checked + # A positive balance of additions fails the check + read -d '' awk_script << 'EOF' + BEGIN { + split(FOLDERS,deny_folders," "); + split(EXPRESSIONS,deny_expr," "); + in_file=0; + in_comment=0; + count=0; + comment_start="/*" + comment_end="*/" + } + # search for add/remove instances in current file + # state machine assumes the comments structure is enforced by + # checkpatches.pl + (in_file) { + # comment start + if (index($0,comment_start) > 0) { + in_comment = 1 + } + # non comment code + if (in_comment == 0) { + for (i in deny_expr) { + forbidden_added = "^\+.*" deny_expr[i]; + forbidden_removed="^-.*" deny_expr[i]; + current = expressions[deny_expr[i]] + if ($0 ~ forbidden_added) { + count = count + 1; + expressions[deny_expr[i]] = current + 1 + } + if ($0 ~ forbidden_removed) { + count = count - 1; + expressions[deny_expr[i]] = current - 1 + } + } + } + # comment end + if (index($0,comment_end) > 0) { + in_comment = 0 + } + } + # switch to next file , check if the balance of add/remove + # of previous filehad new additions + ($0 ~ "^\+\+\+ b/") { + in_file = 0; + if (count > 0) { + exit; + } + for (i in deny_folders) { + re = "^\+\+\+ b/" deny_folders[i]; + if ($0 ~ deny_folders[i]) { + in_file = 1 + last_file = $0 + } + } + } + END { + if (count > 0) { + print "Warning in " substr(last_file,6) ":" + print "are you sure you want to add the following:" + for (key in expressions) { + if (expressions[key] > 0) { + print key + } + } + exit RET_ON_FAIL + } + } +EOF + # --------------------------------- + # refrain from new additions of rte_panic() and rte_exit() + # multiple folders and expressions are separated by spaces + awk -v FOLDERS="lib drivers" \ + -v EXPRESSIONS="rte_panic\\\( rte_exit\\\(" \ + -v RET_ON_FAIL=1 \ + "$awk_script" - +} + number=0 quiet=false verbose=false @@ -75,7 +139,7 @@ while getopts hn:qv ARG ; do done shift $(($OPTIND - 1)) -if [ ! -x "$DPDK_CHECKPATCH_PATH" ] ; then +if [ ! -f "$DPDK_CHECKPATCH_PATH" ] || [ ! -x "$DPDK_CHECKPATCH_PATH" ] ; then print_usage >&2 echo echo 'Cannot execute DPDK_CHECKPATCH_PATH' >&2 @@ -86,19 +150,45 @@ total=0 status=0 check () { # + local ret=0 + total=$(($total + 1)) ! $verbose || printf '\n### %s\n\n' "$3" if [ -n "$1" ] ; then - report=$($DPDK_CHECKPATCH_PATH $options "$1" 2>/dev/null) + tmpinput=$1 elif [ -n "$2" ] ; then - report=$(git format-patch --find-renames --no-stat --stdout -1 $commit | - $DPDK_CHECKPATCH_PATH $options - 2>/dev/null) + tmpinput=$(mktemp checkpatches.XXXXXX) + git format-patch --find-renames \ + --no-stat --stdout -1 $commit > "$tmpinput" else - report=$($DPDK_CHECKPATCH_PATH $options - 2>/dev/null) + tmpinput=$(mktemp checkpatches.XXXXXX) + cat > "$tmpinput" fi - [ $? -ne 0 ] || return 0 - $verbose || printf '\n### %s\n\n' "$3" - printf '%s\n' "$report" | sed -n '1,/^total:.*lines checked$/p' + + report=$($DPDK_CHECKPATCH_PATH $options "$tmpinput" 2>/dev/null) + if [ $? -ne 0 ] ; then + $verbose || printf '\n### %s\n\n' "$3" + printf '%s\n' "$report" | sed -n '1,/^total:.*lines checked$/p' + ret=1 + fi + + ! $verbose || printf '\nChecking API additions/removals:\n' + report=$($VALIDATE_NEW_API "$tmpinput") + if [ $? -ne 0 ] ; then + printf '%s\n' "$report" + ret=1 + fi + + ! $verbose || printf '\nChecking forbidden tokens additions:\n' + report=$(check_forbidden_additions <"$tmpinput") + if [ $? -ne 0 ] ; then + printf '%s\n' "$report" + ret=1 + fi + + clean_tmp_files + [ $ret -eq 0 ] && return 0 + status=$(($status + 1)) } diff --git a/devtools/cocci/strlcpy.cocci b/devtools/cocci/strlcpy.cocci new file mode 100644 index 00000000..335e2712 --- /dev/null +++ b/devtools/cocci/strlcpy.cocci @@ -0,0 +1,8 @@ +@use_strlcpy@ +identifier src, dst; +expression size; +@@ +( +- snprintf(dst, size, "%s", src) ++ strlcpy(dst, src, size) +) diff --git a/devtools/get-maintainer.sh b/devtools/get-maintainer.sh index 904b7785..b9160486 100755 --- a/devtools/get-maintainer.sh +++ b/devtools/get-maintainer.sh @@ -23,7 +23,8 @@ print_usage () { } # Requires DPDK_GETMAINTAINER_PATH devel config option set -if [ ! -x "$DPDK_GETMAINTAINER_PATH" ] ; then +if [ ! -f "$DPDK_GETMAINTAINER_PATH" ] || + [ ! -x "$DPDK_GETMAINTAINER_PATH" ] ; then print_usage >&2 echo echo 'Cannot execute DPDK_GETMAINTAINER_PATH' >&2 @@ -31,7 +32,7 @@ if [ ! -x "$DPDK_GETMAINTAINER_PATH" ] ; then fi FILES="COPYING CREDITS Kbuild" -FOLDERS="Documentation arch include fs init ipc kernel scripts" +FOLDERS="Documentation arch include fs init ipc scripts" # Kernel script checks for some files and folders to run workaround () { diff --git a/devtools/git-log-fixes.sh b/devtools/git-log-fixes.sh index cd5cf893..e37ee226 100755 --- a/devtools/git-log-fixes.sh +++ b/devtools/git-log-fixes.sh @@ -1,34 +1,6 @@ #! /bin/sh -e - -# BSD LICENSE -# +# SPDX-License-Identifier: BSD-3-Clause # Copyright 2016 6WIND S.A. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND S.A. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. print_usage () { diff --git a/devtools/test-build.sh b/devtools/test-build.sh index 3362edcc..1eee2417 100755 --- a/devtools/test-build.sh +++ b/devtools/test-build.sh @@ -1,48 +1,21 @@ #! /bin/sh -e - -# BSD LICENSE -# +# SPDX-License-Identifier: BSD-3-Clause # Copyright 2015 6WIND S.A. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND S.A. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. default_path=$PATH # Load config options: -# - AESNI_MULTI_BUFFER_LIB_PATH # - ARMV8_CRYPTO_LIB_PATH # - DPDK_BUILD_TEST_CONFIGS (defconfig1+option1+option2 defconfig2) # - DPDK_DEP_ARCHIVE # - DPDK_DEP_CFLAGS +# - DPDK_DEP_ISAL (y/[n]) # - DPDK_DEP_LDFLAGS # - DPDK_DEP_MLX (y/[n]) # - DPDK_DEP_NUMA ([y]/n) # - DPDK_DEP_PCAP (y/[n]) # - DPDK_DEP_SSL (y/[n]) +# - DPDK_DEP_IPSEC_MB (y/[n]) # - DPDK_DEP_SZE (y/[n]) # - DPDK_DEP_ZLIB (y/[n]) # - DPDK_MAKE_JOBS (int) @@ -122,14 +95,15 @@ reset_env () unset CROSS unset DPDK_DEP_ARCHIVE unset DPDK_DEP_CFLAGS + unset DPDK_DEP_ISAL unset DPDK_DEP_LDFLAGS unset DPDK_DEP_MLX unset DPDK_DEP_NUMA unset DPDK_DEP_PCAP unset DPDK_DEP_SSL + unset DPDK_DEP_IPSEC_MB unset DPDK_DEP_SZE unset DPDK_DEP_ZLIB - unset AESNI_MULTI_BUFFER_LIB_PATH unset ARMV8_CRYPTO_LIB_PATH unset FLEXRAN_SDK unset LIBMUSDK_PATH @@ -171,20 +145,23 @@ config () # <directory> <target> <options> sed -ri 's,(BYPASS=)n,\1y,' $1/.config test "$DPDK_DEP_ARCHIVE" != y || \ sed -ri 's,(RESOURCE_TAR=)n,\1y,' $1/.config + test "$DPDK_DEP_ISAL" != y || \ + sed -ri 's,(ISAL_PMD=)n,\1y,' $1/.config test "$DPDK_DEP_MLX" != y || \ sed -ri 's,(MLX._PMD=)n,\1y,' $1/.config test "$DPDK_DEP_SZE" != y || \ sed -ri 's,(PMD_SZEDATA2=)n,\1y,' $1/.config test "$DPDK_DEP_ZLIB" != y || \ sed -ri 's,(BNX2X_PMD=)n,\1y,' $1/.config - sed -ri 's,(NFP_PMD=)n,\1y,' $1/.config + test "$DPDK_DEP_ZLIB" != y || \ + sed -ri 's,(COMPRESSDEV_TEST=)n,\1y,' $1/.config test "$DPDK_DEP_PCAP" != y || \ sed -ri 's,(PCAP=)n,\1y,' $1/.config test -z "$ARMV8_CRYPTO_LIB_PATH" || \ sed -ri 's,(PMD_ARMV8_CRYPTO=)n,\1y,' $1/.config - test -z "$AESNI_MULTI_BUFFER_LIB_PATH" || \ + test "$DPDK_DEP_IPSEC_MB" != y || \ sed -ri 's,(PMD_AESNI_MB=)n,\1y,' $1/.config - test -z "$AESNI_MULTI_BUFFER_LIB_PATH" || \ + test "$DPDK_DEP_IPSEC_MB" != y || \ sed -ri 's,(PMD_AESNI_GCM=)n,\1y,' $1/.config test -z "$LIBSSO_SNOW3G_PATH" || \ sed -ri 's,(PMD_SNOW3G=)n,\1y,' $1/.config @@ -193,6 +170,8 @@ config () # <directory> <target> <options> test -z "$LIBSSO_ZUC_PATH" || \ sed -ri 's,(PMD_ZUC=)n,\1y,' $1/.config test "$DPDK_DEP_SSL" != y || \ + sed -ri 's,(PMD_CCP=)n,\1y,' $1/.config + test "$DPDK_DEP_SSL" != y || \ sed -ri 's,(PMD_OPENSSL=)n,\1y,' $1/.config test "$DPDK_DEP_SSL" != y || \ sed -ri 's,(PMD_QAT=)n,\1y,' $1/.config @@ -200,9 +179,9 @@ config () # <directory> <target> <options> sed -ri 's,(BBDEV_TURBO_SW=)n,\1y,' $1/.config sed -ri 's,(SCHED_.*=)n,\1y,' $1/.config test -z "$LIBMUSDK_PATH" || \ - sed -ri 's,(PMD_MRVL_CRYPTO=)n,\1y,' $1/.config + sed -ri 's,(PMD_MVSAM_CRYPTO=)n,\1y,' $1/.config test -z "$LIBMUSDK_PATH" || \ - sed -ri 's,(MRVL_PMD=)n,\1y,' $1/.config + sed -ri 's,(MVPP2_PMD=)n,\1y,' $1/.config build_config_hook $1 $2 $3 # Explicit enabler/disabler (uppercase) diff --git a/devtools/test-meson-builds.sh b/devtools/test-meson-builds.sh new file mode 100755 index 00000000..951c9067 --- /dev/null +++ b/devtools/test-meson-builds.sh @@ -0,0 +1,60 @@ +#! /bin/sh -e +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +# Run meson to auto-configure the various builds. +# * all builds get put in a directory whose name starts with "build-" +# * if a build-directory already exists we assume it was properly configured +# Run ninja after configuration is done. + +srcdir=$(dirname $(readlink -m $0))/.. +MESON=${MESON:-meson} + +if command -v ninja >/dev/null 2>&1 ; then + ninja_cmd=ninja +elif command -v ninja-build >/dev/null 2>&1 ; then + ninja_cmd=ninja-build +else + echo "ERROR: ninja is not found" >&2 + exit 1 +fi + +build () # <directory> <meson options> +{ + builddir=$1 + shift + if [ ! -d "$builddir" ] ; then + options="--werror -Dexamples=all $*" + echo "$MESON $options $srcdir $builddir" + $MESON $options $srcdir $builddir + unset CC + fi + echo "$ninja_cmd -C $builddir" + $ninja_cmd -C $builddir +} + +# shared and static linked builds with gcc and clang +for c in gcc clang ; do + for s in static shared ; do + export CC="ccache $c" + build build-$c-$s --default-library=$s + done +done + +# test compilation with minimal x86 instruction set +build build-x86-default -Dmachine=nehalem + +# enable cross compilation if gcc cross-compiler is found +c=aarch64-linux-gnu-gcc +if command -v $c >/dev/null 2>&1 ; then + # compile the general v8a also for clang to increase coverage + export CC="ccache clang" + build build-arm64-host-clang --cross-file \ + config/arm/arm64_armv8_linuxapp_gcc + + for f in config/arm/arm*gcc ; do + export CC="ccache gcc" + build build-$(basename $f | tr '_' '-' | cut -d'-' -f-2) \ + --cross-file $f + done +fi diff --git a/devtools/test-null.sh b/devtools/test-null.sh index 30cd0b03..61879e3e 100755 --- a/devtools/test-null.sh +++ b/devtools/test-null.sh @@ -1,34 +1,6 @@ #! /bin/sh -e - -# BSD LICENSE -# +# SPDX-License-Identifier: BSD-3-Clause # Copyright 2015 6WIND S.A. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND S.A. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Run a quick testpmd forwarding with null PMD without hugepage diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md index d77f205b..9265907f 100644 --- a/doc/api/doxy-api-index.md +++ b/doc/api/doxy-api-index.md @@ -2,35 +2,8 @@ API {#index} === <!-- - BSD LICENSE - - Copyright 2013-2017 6WIND S.A. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of 6WIND S.A. nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2013-2017 6WIND S.A. --> The public API headers are grouped by topics: @@ -45,14 +18,19 @@ The public API headers are grouped by topics: [bbdev] (@ref rte_bbdev.h), [cryptodev] (@ref rte_cryptodev.h), [security] (@ref rte_security.h), + [compressdev] (@ref rte_compressdev.h), + [compress] (@ref rte_comp.h), [eventdev] (@ref rte_eventdev.h), [event_eth_rx_adapter] (@ref rte_event_eth_rx_adapter.h), + [event_timer_adapter] (@ref rte_event_timer_adapter.h), + [event_crypto_adapter] (@ref rte_event_crypto_adapter.h), [rawdev] (@ref rte_rawdev.h), [metrics] (@ref rte_metrics.h), [bitrate] (@ref rte_bitrate.h), [latency] (@ref rte_latencystats.h), [devargs] (@ref rte_devargs.h), - [PCI] (@ref rte_pci.h) + [PCI] (@ref rte_pci.h), + [vfio] (@ref rte_vfio.h) - **device specific**: [softnic] (@ref rte_eth_softnic.h), @@ -63,6 +41,9 @@ The public API headers are grouped by topics: [i40e] (@ref rte_pmd_i40e.h), [bnxt] (@ref rte_pmd_bnxt.h), [dpaa] (@ref rte_pmd_dpaa.h), + [dpaa2_mempool] (@ref rte_dpaa2_mempool.h), + [dpaa2_cmdif] (@ref rte_pmd_dpaa2_cmdif.h), + [dpaa2_qdma] (@ref rte_pmd_dpaa2_qdma.h), [crypto_scheduler] (@ref rte_cryptodev_scheduler.h) - **memory**: @@ -133,7 +114,8 @@ The public API headers are grouped by topics: [EFD] (@ref rte_efd.h), [ACL] (@ref rte_acl.h), [member] (@ref rte_member.h), - [flow classify] (@ref rte_flow_classify.h) + [flow classify] (@ref rte_flow_classify.h), + [BPF] (@ref rte_bpf.h) - **containers**: [mbuf] (@ref rte_mbuf.h), @@ -159,6 +141,8 @@ The public API headers are grouped by topics: [array] (@ref rte_table_array.h), [stub] (@ref rte_table_stub.h) * [pipeline] (@ref rte_pipeline.h) + [port_in_action] (@ref rte_port_in_action.h) + [table_action] (@ref rte_table_action.h) - **basic**: [approx fraction] (@ref rte_approx.h), diff --git a/doc/api/doxy-api.conf b/doc/api/doxy-api.conf index cda52fdf..66693c38 100644 --- a/doc/api/doxy-api.conf +++ b/doc/api/doxy-api.conf @@ -1,54 +1,32 @@ -# BSD LICENSE -# +# SPDX-License-Identifier: BSD-3-Clause # Copyright 2013-2017 6WIND S.A. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND S.A. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. PROJECT_NAME = DPDK INPUT = doc/api/doxy-api-index.md \ drivers/crypto/scheduler \ + drivers/mempool/dpaa2 \ drivers/net/bnxt \ drivers/net/bonding \ drivers/net/dpaa \ drivers/net/i40e \ drivers/net/ixgbe \ drivers/net/softnic \ + drivers/raw/dpaa2_cmdif \ + drivers/raw/dpaa2_qdma \ lib/librte_eal/common/include \ lib/librte_eal/common/include/generic \ lib/librte_acl \ lib/librte_bbdev \ lib/librte_bitratestats \ + lib/librte_bpf \ lib/librte_cfgfile \ lib/librte_cmdline \ lib/librte_compat \ + lib/librte_compressdev \ lib/librte_cryptodev \ lib/librte_distributor \ lib/librte_efd \ - lib/librte_ether \ + lib/librte_ethdev \ lib/librte_eventdev \ lib/librte_flow_classify \ lib/librte_gro \ @@ -82,6 +60,7 @@ INPUT = doc/api/doxy-api-index.md \ FILE_PATTERNS = rte_*.h \ cmdline.h PREDEFINED = __DOXYGEN__ \ + VFIO_PRESENT \ __attribute__(x)= OPTIMIZE_OUTPUT_FOR_C = YES diff --git a/doc/api/doxy-html-custom.sh b/doc/api/doxy-html-custom.sh index e684a75a..3802007c 100755 --- a/doc/api/doxy-html-custom.sh +++ b/doc/api/doxy-html-custom.sh @@ -1,34 +1,6 @@ #! /bin/sh -e - -# BSD LICENSE -# +# SPDX-License-Identifier: BSD-3-Clause # Copyright 2013 6WIND S.A. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND S.A. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. CSS=$1 diff --git a/doc/guides/bbdevs/null.rst b/doc/guides/bbdevs/null.rst index 9baf2a99..0b885d17 100644 --- a/doc/guides/bbdevs/null.rst +++ b/doc/guides/bbdevs/null.rst @@ -4,7 +4,7 @@ BBDEV null Poll Mode Driver ============================ -The (**bbdev_null**) is a bbdev poll mode driver which provides a minimal +The (**baseband_null**) is a bbdev poll mode driver which provides a minimal implementation of a software bbdev device. As a null device it does not modify the data in the mbuf on which the bbdev operation is to operate and it only works for operation type ``RTE_BBDEV_OP_NONE``. @@ -30,9 +30,9 @@ Initialization To use the PMD in an application, user must: -- Call ``rte_vdev_init("bbdev_null")`` within the application. +- Call ``rte_vdev_init("baseband_null")`` within the application. -- Use ``--vdev="bbdev_null"`` in the EAL options, which will call ``rte_vdev_init()`` internally. +- Use ``--vdev="baseband_null"`` in the EAL options, which will call ``rte_vdev_init()`` internally. The following parameters (all optional) can be provided in the previous two calls: @@ -46,4 +46,4 @@ Example: .. code-block:: console - ./test-bbdev.py -e="--vdev=bbdev_null,socket_id=0,max_nb_queues=8" + ./test-bbdev.py -e="--vdev=baseband_null,socket_id=0,max_nb_queues=8" diff --git a/doc/guides/bbdevs/turbo_sw.rst b/doc/guides/bbdevs/turbo_sw.rst index b3fed163..0b96fbb1 100644 --- a/doc/guides/bbdevs/turbo_sw.rst +++ b/doc/guides/bbdevs/turbo_sw.rst @@ -4,7 +4,7 @@ SW Turbo Poll Mode Driver ========================= -The SW Turbo PMD (**turbo_sw**) provides a poll mode bbdev driver that utilizes +The SW Turbo PMD (**baseband_turbo_sw**) provides a poll mode bbdev driver that utilizes Intel optimized libraries for LTE Layer 1 workloads acceleration. This PMD supports the functions: Turbo FEC, Rate Matching and CRC functions. @@ -26,6 +26,8 @@ For the decode operation: * ``RTE_BBDEV_TURBO_CRC_TYPE_24B`` * ``RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN`` * ``RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN`` +* ``RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP`` +* ``RTE_BBDEV_TURBO_EARLY_TERMINATION`` Limitations @@ -39,25 +41,39 @@ Installation FlexRAN SDK Download ~~~~~~~~~~~~~~~~~~~~ -To build DPDK with the *turbo_sw* PMD the user is required to download -the export controlled ``FlexRAN SDK`` Libraries. An account at Intel Resource -Design Center needs to be registered from -`<https://www.intel.com/content/www/us/en/design/resource-design-center.html>`_. +To build DPDK with the *baseband_turbo_sw* PMD the user is required to download +the export controlled ``FlexRAN SDK`` Libraries. An account at `Intel Resource +Design Center <https://www.intel.com/content/www/us/en/design/resource-design-center.html>`_ +needs to be registered. Once registered, the user needs to log in, and look for -*Intel SWA_SW_FlexRAN_Release_Package R1_3_0* and click for download. Or use -this direct download link `<https://cdrd.intel.com/v1/dl/getContent/575367>`_. +*Intel FlexRAN Software Release Package -1-6-0* to download or directly through +this `link <https://cdrdv2.intel.com/v1/dl/getContent/600609>`_. After download is complete, the user needs to unpack and compile on their system before building DPDK. +The following table maps DPDK versions with past FlexRAN SDK releases: + +.. _table_flexran_releases: + +.. table:: DPDK and FlexRAN SDK releases compliance + + ===================== ============================ + DPDK version FlexRAN SDK release + ===================== ============================ + 18.02 1.3.0 + 18.05 1.4.0 + 18.08 1.6.0 + ===================== ============================ + FlexRAN SDK Installation ~~~~~~~~~~~~~~~~~~~~~~~~ The following are pre-requisites for building FlexRAN SDK Libraries: (a) An AVX2 supporting machine - (b) Windriver TS 2 or CentOS 7 operating systems - (c) Intel ICC compiler installed + (b) CentOS Linux release 7.2.1511 (Core) operating system + (c) Intel ICC 18.0.1 20171018 compiler installed The following instructions should be followed in this exact order: @@ -67,31 +83,25 @@ The following instructions should be followed in this exact order: source <path-to-icc-compiler-install-folder>/linux/bin/compilervars.sh intel64 -platform linux - -#. Extract the ``FlexRAN-1.3.0.tar.gz.zip`` package, then run the SDK extractor - script and accept the license: +#. Extract the ``flexran-1-6-0-tar.gz.zip`` package: .. code-block:: console - cd <path-to-workspace>/FlexRAN-1.3.0/ - ./SDK-R1.3.0.sh + unzip flexran-1-6-0-tar.gz.zip + tar xvzf flexran-1-6-0-tar.gz -C FlexRAN-1.6.0/ -#. To allow ``FlexRAN SDK R1.3.0`` to work with bbdev properly, the following - hotfix is required. Change the return of function ``rate_matching_turbo_lte_avx2()`` - located in file - ``<path-to-workspace>/FlexRAN-1.3.0/SDK-R1.3.0/sdk/source/phy/lib_rate_matching/phy_rate_match_avx2.cpp`` - to return 0 instead of 1. +#. Run the SDK extractor script and accept the license: - .. code-block:: c + .. code-block:: console - - return 1; - + return 0; + cd <path-to-workspace>/FlexRAN-1.6.0/ + ./SDK-R1.6.0.sh #. Generate makefiles based on system configuration: .. code-block:: console - cd <path-to-workspace>/FlexRAN-1.3.0/SDK-R1.3.0/sdk/ + cd <path-to-workspace>/FlexRAN-1.6.0/SDK-R1.6.0/sdk/ ./create-makefiles-linux.sh #. A build folder is generated in this form ``build-<ISA>-<CC>``, enter that @@ -100,7 +110,7 @@ The following instructions should be followed in this exact order: .. code-block:: console cd build-avx2-icc/ - make install + make && make install Initialization @@ -118,8 +128,8 @@ Example: .. code-block:: console - export FLEXRAN_SDK=<path-to-workspace>/FlexRAN-1.3.0/SDK-R1.3.0/sdk/build-avx2-icc/install - export DIR_WIRELESS_SDK=<path-to-workspace>/FlexRAN-1.3.0/SDK-R1.3.0/sdk/ + export FLEXRAN_SDK=<path-to-workspace>/FlexRAN-1.6.0/SDK-R1.6.0/sdk/build-avx2-icc/install + export DIR_WIRELESS_SDK=<path-to-workspace>/FlexRAN-1.6.0/SDK-R1.6.0/sdk/ * Set ``CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW=y`` in DPDK common configuration @@ -127,9 +137,9 @@ Example: To use the PMD in an application, user must: -- Call ``rte_vdev_init("turbo_sw")`` within the application. +- Call ``rte_vdev_init("baseband_turbo_sw")`` within the application. -- Use ``--vdev="turbo_sw"`` in the EAL options, which will call ``rte_vdev_init()`` internally. +- Use ``--vdev="baseband_turbo_sw"`` in the EAL options, which will call ``rte_vdev_init()`` internally. The following parameters (all optional) can be provided in the previous two calls: @@ -143,5 +153,5 @@ Example: .. code-block:: console - ./test-bbdev.py -e="--vdev=turbo_sw,socket_id=0,max_nb_queues=8" \ - -c validation -v ./test_vectors/bbdev_vector_t?_default.data + ./test-bbdev.py -e="--vdev=baseband_turbo_sw,socket_id=0,max_nb_queues=8" \ + -c validation -v ./turbo_*_default.data diff --git a/doc/guides/compressdevs/features/default.ini b/doc/guides/compressdevs/features/default.ini new file mode 100644 index 00000000..829e4df6 --- /dev/null +++ b/doc/guides/compressdevs/features/default.ini @@ -0,0 +1,26 @@ +; +; Features of a default compression driver. +; +; This file defines the features that are valid for inclusion in +; the other driver files and also the order that they appear in +; the features table in the documentation. +; +[Features] +HW Accelerated = +CPU SSE = +CPU AVX = +CPU AVX2 = +CPU AVX512 = +CPU NEON = +Stateful = +Pass-through = +OOP SGL In SGL Out = +OOP SGL In LB Out = +OOP LB In SGL Out = +Deflate = +LZS = +Adler32 = +Crc32 = +Adler32&Crc32 = +Fixed = +Dynamic = diff --git a/doc/guides/compressdevs/features/isal.ini b/doc/guides/compressdevs/features/isal.ini new file mode 100644 index 00000000..919cf705 --- /dev/null +++ b/doc/guides/compressdevs/features/isal.ini @@ -0,0 +1,16 @@ +; +; Refer to default.ini for the full list of available PMD features. +; +; Supported features of 'ISA-L' compression driver. +; +[Features] +CPU SSE = Y +CPU AVX = Y +CPU AVX2 = Y +CPU AVX512 = Y +OOP SGL In SGL Out = Y +OOP SGL In LB Out = Y +OOP LB In SGL Out = Y +Deflate = Y +Fixed = Y +Dynamic = Y diff --git a/doc/guides/compressdevs/features/octeontx.ini b/doc/guides/compressdevs/features/octeontx.ini new file mode 100644 index 00000000..884a8b07 --- /dev/null +++ b/doc/guides/compressdevs/features/octeontx.ini @@ -0,0 +1,10 @@ +; +; Refer to default.ini for the full list of available PMD features. +; +; Supported features of 'OCTEONTX ZIP' compression driver. +; +[Features] +HW Accelerated = Y +Deflate = Y +Fixed = Y +Dynamic = Y diff --git a/doc/guides/compressdevs/features/qat.ini b/doc/guides/compressdevs/features/qat.ini new file mode 100644 index 00000000..5cd4524b --- /dev/null +++ b/doc/guides/compressdevs/features/qat.ini @@ -0,0 +1,15 @@ +; +; Refer to default.ini for the full list of available PMD features. +; +; Supported features of 'QAT' compression driver. +; +[Features] +HW Accelerated = Y +OOP SGL In SGL Out = Y +OOP SGL In LB Out = Y +OOP LB In SGL Out = Y +Deflate = Y +Adler32 = Y +Crc32 = Y +Adler32&Crc32 = Y +Fixed = Y diff --git a/doc/guides/compressdevs/features/zlib.ini b/doc/guides/compressdevs/features/zlib.ini new file mode 100644 index 00000000..58a4ee3a --- /dev/null +++ b/doc/guides/compressdevs/features/zlib.ini @@ -0,0 +1,10 @@ +; +; Refer to default.ini for the full list of available PMD features. +; +; Supported features of 'ZLIB' compression driver. +; +[Features] +Pass-through = Y +Deflate = Y +Fixed = Y +Dynamic = Y diff --git a/doc/guides/compressdevs/index.rst b/doc/guides/compressdevs/index.rst new file mode 100644 index 00000000..1f37e260 --- /dev/null +++ b/doc/guides/compressdevs/index.rst @@ -0,0 +1,16 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2018 Intel Corporation. + +Compression Device Drivers +========================== + + +.. toctree:: + :maxdepth: 2 + :numbered: + + overview + isal + octeontx + qat_comp + zlib diff --git a/doc/guides/compressdevs/isal.rst b/doc/guides/compressdevs/isal.rst new file mode 100644 index 00000000..3bc30229 --- /dev/null +++ b/doc/guides/compressdevs/isal.rst @@ -0,0 +1,123 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2018 Intel Corporation. + +ISA-L Compression Poll Mode Driver +================================== + +The ISA-L PMD (**librte_pmd_isal_comp**) provides poll mode compression & +decompression driver support for utilizing Intel ISA-L library, +which implements the deflate algorithm for both Deflate(compression) and Inflate(decompression). + + +Features +-------- + +ISA-L PMD has support for: + +Compression/Decompression algorithm: + + * DEFLATE + +Huffman code type: + + * FIXED + * DYNAMIC + +Window size support: + + * 32K + +Level guide: + +The ISA-L levels have been mapped to somewhat correspond to the same ZLIB level, +i.e. ZLIB L1 gives a compression ratio similar to ISA-L L1. +Compressdev level 0 enables "No Compression", which passes the uncompressed +data to the output buffer, plus deflate headers. +The ISA-L library does not support this, therefore compressdev level 0 is not supported. + +The compressdev API has 10 levels, 0-9. ISA-L has 4 levels of compression, 0-3. +As a result the level mappings from the API to the PMD are shown below. + +.. _table_ISA-L_compression_levels: + +.. table:: Level mapping from Compressdev to ISA-L PMD. + + +-------------+----------------------------------------------+-----------------------------------------------+ + | Compressdev | PMD Functionality | Internal ISA-L | + | API Level | | Level | + +=============+==============================================+===============================================+ + | 0 | No compression, Not Supported | --- | + +-------------+----------------------------------------------+-----------------------------------------------+ + | 1 | Dynamic (Fast compression) | 1 | + +-------------+----------------------------------------------+-----------------------------------------------+ + | 2 | Dynamic | 2 | + | | (Higher compression ratio) | | + +-------------+----------------------------------------------+-----------------------------------------------+ + | 3 | Dynamic | 3 | + | | (Best compression ratio) | (Level 2 if | + | | | no AVX512/AVX2) | + +-------------+----------------------------------------------+-----------------------------------------------+ + | 4 | Dynamic (Best compression ratio) | Same as above | + +-------------+----------------------------------------------+-----------------------------------------------+ + | 5 | Dynamic (Best compression ratio) | Same as above | + +-------------+----------------------------------------------+-----------------------------------------------+ + | 6 | Dynamic (Best compression ratio) | Same as above | + +-------------+----------------------------------------------+-----------------------------------------------+ + | 7 | Dynamic (Best compression ratio) | Same as above | + +-------------+----------------------------------------------+-----------------------------------------------+ + | 8 | Dynamic (Best compression ratio) | Same as above | + +-------------+----------------------------------------------+-----------------------------------------------+ + | 9 | Dynamic (Best compression ratio) | Same as above | + +-------------+----------------------------------------------+-----------------------------------------------+ + +.. Note:: + + The above table only shows mapping when API calls for dynamic compression. + For fixed compression, regardless of API level, internally ISA-L level 0 is always used. + +Limitations +----------- + +* Compressdev level 0, no compression, is not supported. + +* Checksums will not be supported until future release. + +Installation +------------ + +* To build DPDK with Intel's ISA-L library, the user is required to download the library from `<https://github.com/01org/isa-l>`_. + +* Once downloaded, the user needs to build the library, the ISA-L autotools are usually sufficient:: + + ./autogen.sh + ./configure + +* make can be used to install the library on their system, before building DPDK:: + + make + sudo make install + +* To build with meson, the **libisal.pc** file, must be copied into "pkgconfig", + e.g. /usr/lib/pkgconfig or /usr/lib64/pkgconfig depending on your system, + for meson to find the ISA-L library. The **libisal.pc** is located in library sources:: + + cp isal/libisal.pc /usr/lib/pkgconfig/ + + +Initialization +-------------- + +In order to enable this virtual compression PMD, user must: + +* Set ``CONFIG_RTE_LIBRTE_PMD_ISAL=y`` in config/common_base. + +To use the PMD in an application, user must: + +* Call ``rte_vdev_init("compress_isal")`` within the application. + +* Use ``--vdev="compress_isal"`` in the EAL options, which will call ``rte_vdev_init()`` internally. + +The following parameter (optional) can be provided in the previous two calls: + +* ``socket_id:`` Specify the socket where the memory for the device is going to be allocated + (by default, socket_id will be the socket where the core that is creating the PMD is running on). diff --git a/doc/guides/compressdevs/octeontx.rst b/doc/guides/compressdevs/octeontx.rst new file mode 100644 index 00000000..5a32d5d1 --- /dev/null +++ b/doc/guides/compressdevs/octeontx.rst @@ -0,0 +1,105 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2018 Cavium Networks. + +Octeontx ZIP Compression Poll Mode Driver +========================================= + +The Octeontx ZIP PMD (**librte_pmd_octeontx_zip**) provides poll mode +compression & decompression driver for ZIP HW offload device, found in +**Cavium OCTEONTX** SoC family. + +More information can be found at `Cavium, Inc Official Website +<http://www.cavium.com/OCTEON-TX_ARM_Processors.html>`_. + +Features +-------- + +Octeontx ZIP PMD has support for: + +Compression/Decompression algorithm: + +* DEFLATE + +Huffman code type: + +* FIXED +* DYNAMIC + +Window size support: + +* 2 to 2^14 + +Limitations +----------- + +* Chained mbufs are not supported. + +Supported OCTEONTX SoCs +----------------------- + +- CN83xx + +Steps To Setup Platform +----------------------- + + Octeontx SDK includes kernel image which provides Octeontx ZIP PF + driver to manage configuration of ZIPVF device + Required version of SDK is "OCTEONTX-SDK-6.2.0-build35" or above. + + SDK can be install by using below command. + #rpm -ivh CTEONTX-SDK-6.2.0-build35.x86_64.rpm --force --nodeps + It will install OCTEONTX-SDK at following default location + /usr/local/Cavium_Networks/OCTEONTX-SDK/ + + For more information on building and booting linux kernel on OCTEONTX + please refer /usr/local/Cavium_Networks/OCTEONTX-SDK/docs/OcteonTX-SDK-UG_6.2.0.pdf. + + SDK and related information can be obtained from: `Cavium support site <https://support.cavium.com/>`_. + +Installation +------------ + +Driver Compilation +~~~~~~~~~~~~~~~~~~ + +To compile the OCTEONTX ZIP PMD for Linux arm64 gcc target, run the +following ``make`` command: + + .. code-block:: console + + cd <DPDK-source-directory> + make config T=arm64-thunderx-linuxapp-gcc install + + +Initialization +-------------- + +The octeontx zip is exposed as pci device which consists of a set of +PCIe VF devices. On EAL initialization, ZIP PCIe VF devices will be +probed. To use the PMD in an application, user must: + +* run dev_bind script to bind eight ZIP PCIe VFs to the ``vfio-pci`` driver: + + .. code-block:: console + + ./usertools/dpdk-devbind.py -b vfio-pci 0001:04:00.1 + ./usertools/dpdk-devbind.py -b vfio-pci 0001:04:00.2 + ./usertools/dpdk-devbind.py -b vfio-pci 0001:04:00.3 + ./usertools/dpdk-devbind.py -b vfio-pci 0001:04:00.4 + ./usertools/dpdk-devbind.py -b vfio-pci 0001:04:00.5 + ./usertools/dpdk-devbind.py -b vfio-pci 0001:04:00.6 + ./usertools/dpdk-devbind.py -b vfio-pci 0001:04:00.7 + ./usertools/dpdk-devbind.py -b vfio-pci 0001:04:01.0 + +* The unit test cases can be tested as below: + + .. code-block:: console + + reserve enough huge pages + cd to the top-level DPDK directory + export RTE_TARGET=arm64-thunderx-linuxapp-gcc + export RTE_SDK=`pwd` + cd to test/test + type the command "make" to compile + run the tests with "./test" + type the command "compressdev_autotest" to test diff --git a/doc/guides/compressdevs/overview.rst b/doc/guides/compressdevs/overview.rst new file mode 100644 index 00000000..70bbe82b --- /dev/null +++ b/doc/guides/compressdevs/overview.rst @@ -0,0 +1,32 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2018 Intel Corporation. + +Compression Device Supported Functionality Matrices +=================================================== + +Supported Feature Flags +----------------------- + +.. _table_compression_pmd_features: + +.. include:: overview_feature_table.txt + +.. Note:: + + - "Pass-through" feature flag refers to the ability of the PMD + to let input buffers pass-through it, copying the input to the output, + without making any modifications to it (no compression done). + + - "OOP SGL In SGL Out" feature flag stands for + "Out-of-place Scatter-gather list Input, Scatter-gater list Output", + which means PMD supports different scatter-gather styled input and output buffers + (i.e. both can consists of multiple segments). + + - "OOP SGL In LB Out" feature flag stands for + "Out-of-place Scatter-gather list Input, Linear Buffers Output", + which means PMD supports input from scatter-gathered styled buffers, outputting linear buffers + (i.e. single segment). + + - "OOP LB In SGL Out" feature flag stands for + "Out-of-place Linear Buffers Input, Scatter-gather list Output", + which means PMD supports input from linear buffer, outputting scatter-gathered styled buffers. diff --git a/doc/guides/compressdevs/qat_comp.rst b/doc/guides/compressdevs/qat_comp.rst new file mode 100644 index 00000000..8b1270b7 --- /dev/null +++ b/doc/guides/compressdevs/qat_comp.rst @@ -0,0 +1,47 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2018 Intel Corporation. + +Intel(R) QuickAssist (QAT) Compression Poll Mode Driver +======================================================= + +The QAT compression PMD provides poll mode compression & decompression driver +support for the following hardware accelerator devices: + +* ``Intel QuickAssist Technology C62x`` +* ``Intel QuickAssist Technology C3xxx`` + + +Features +-------- + +QAT compression PMD has support for: + +Compression/Decompression algorithm: + + * DEFLATE + +Huffman code type: + + * FIXED + +Window size support: + + * 32K + +Checksum generation: + + * CRC32, Adler and combined checksum + +Limitations +----------- + +* Compressdev level 0, no compression, is not supported. + +* Dynamic Huffman encoding is not yet supported. + +Installation +------------ + +The QAT compression PMD is built by default with a standard DPDK build. + +It depends on a QAT kernel driver, see :ref:`qat_kernel_installation`. diff --git a/doc/guides/compressdevs/zlib.rst b/doc/guides/compressdevs/zlib.rst new file mode 100644 index 00000000..986c59d4 --- /dev/null +++ b/doc/guides/compressdevs/zlib.rst @@ -0,0 +1,69 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2018 Cavium Networks. + +ZLIB Compression Poll Mode Driver +================================== + +The ZLIB PMD (**librte_pmd_zlib**) provides poll mode compression & +decompression driver based on SW zlib library, + +Features +-------- + +ZLIB PMD has support for: + +Compression/Decompression algorithm: + +* DEFLATE + +Huffman code type: + +* FIXED +* DYNAMIC + +Window size support: + +* Min - 256 bytes +* Max - 32K + +Limitations +----------- + +* Scatter-Gather and Stateful not supported. + +Installation +------------ + +* To build DPDK with ZLIB library, the user is required to download the ``libz`` library. +* Use following command for installation. + +* For Fedora users:: + sudo yum install zlib-devel +* For Ubuntu users:: + sudo apt-get install zlib1g-dev + +* Once downloaded, the user needs to build the library. + +* To build from sources + download zlib sources from http://zlib.net/ and do following before building DPDK:: + + make + sudo make install + +Initialization +-------------- + +In order to enable this virtual compression PMD, user must: + +* Set ``CONFIG_RTE_LIBRTE_PMD_ZLIB=y`` in config/common_base. + +To use the PMD in an application, user must: + +* Call ``rte_vdev_init("compress_zlib")`` within the application. + +* Use ``--vdev="compress_zlib"`` in the EAL options, which will call ``rte_vdev_init()`` internally. + +The following parameter (optional) can be provided in the previous two calls: + +* ``socket_id:`` Specify the socket where the memory for the device is going to be allocated + (by default, socket_id will be the socket where the core that is creating the PMD is running on). diff --git a/doc/guides/conf.py b/doc/guides/conf.py index cf06f257..c883306d 100644 --- a/doc/guides/conf.py +++ b/doc/guides/conf.py @@ -190,18 +190,23 @@ def generate_overview_table(output_filename, table_id, section, table_name, titl ini_files.sort() # Build up a list of the table header names from the ini filenames. - header_names = [] + pmd_names = [] for ini_filename in ini_files: name = ini_filename[:-4] name = name.replace('_vf', 'vf') + pmd_names.append(name) - # Pad the table header names to match the existing format. + # Pad the table header names. + max_header_len = len(max(pmd_names, key=len)) + header_names = [] + for name in pmd_names: if '_vec' in name: pmd, vec = name.split('_') - name = '{0:{fill}{align}7}vec'.format(pmd, fill='.', align='<') + name = '{0:{fill}{align}{width}}vec'.format(pmd, + fill='.', align='<', width=max_header_len-3) else: - name = '{0:{fill}{align}10}'.format(name, fill=' ', align='<') - + name = '{0:{fill}{align}{width}}'.format(name, + fill=' ', align='<', width=max_header_len) header_names.append(name) # Create a dict of the defined features for each driver from the ini files. @@ -253,7 +258,7 @@ def print_table_header(outfile, num_cols, header_names, title): print_table_row(outfile, title, line) - for i in range(1, 10): + for i in range(1, len(header_names[0])): line = '' for name in header_names: line += ' ' + name[i] @@ -310,7 +315,7 @@ def print_table_css(outfile, table_id): text-align: center; } table#idx th { - font-size: 80%; + font-size: 72%; white-space: pre-wrap; vertical-align: top; padding: 0.5em 0; @@ -383,6 +388,11 @@ def setup(app): 'AEAD', 'AEAD algorithms in crypto drivers', 'AEAD algorithm') + table_file = dirname(__file__) + '/compressdevs/overview_feature_table.txt' + generate_overview_table(table_file, 1, + 'Features', + 'Features availability in compression drivers', + 'Feature') if LooseVersion(sphinx_version) < LooseVersion('1.3.1'): print('Upgrade sphinx to version >= 1.3.1 for ' diff --git a/doc/guides/contributing/cheatsheet.rst b/doc/guides/contributing/cheatsheet.rst index 7bc07715..0debd118 100644 --- a/doc/guides/contributing/cheatsheet.rst +++ b/doc/guides/contributing/cheatsheet.rst @@ -1,3 +1,6 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright 2018 The DPDK contributors + Patch Cheatsheet ================ diff --git a/doc/guides/contributing/coding_style.rst b/doc/guides/contributing/coding_style.rst index b0f0adb8..b1bf0d15 100644 --- a/doc/guides/contributing/coding_style.rst +++ b/doc/guides/contributing/coding_style.rst @@ -1,3 +1,6 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright 2018 The DPDK contributors + .. _coding_style: DPDK Coding Style @@ -614,8 +617,8 @@ In the DPDK environment, use the logging interface provided: * is DEBUG) */ rte_log_set_level(my_logtype2, RTE_LOG_NOTICE); - /* enable all PMD logs (whose identifier string starts with "pmd") */ - rte_log_set_level_regexp("pmd.*", RTE_LOG_DEBUG); + /* enable all PMD logs (whose identifier string starts with "pmd.") */ + rte_log_set_level_pattern("pmd.*", RTE_LOG_DEBUG); /* log in debug level */ rte_log_set_global_level(RTE_LOG_DEBUG); diff --git a/doc/guides/contributing/design.rst b/doc/guides/contributing/design.rst index 88d3a433..651fd224 100644 --- a/doc/guides/contributing/design.rst +++ b/doc/guides/contributing/design.rst @@ -1,3 +1,6 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright 2018 The DPDK contributors + Design ====== diff --git a/doc/guides/contributing/documentation.rst b/doc/guides/contributing/documentation.rst index 82f2e1bb..6a075553 100644 --- a/doc/guides/contributing/documentation.rst +++ b/doc/guides/contributing/documentation.rst @@ -1,3 +1,6 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright 2018 The DPDK contributors + .. _doc_guidelines: DPDK Documentation Guidelines diff --git a/doc/guides/contributing/index.rst b/doc/guides/contributing/index.rst index 329b678a..f90df451 100644 --- a/doc/guides/contributing/index.rst +++ b/doc/guides/contributing/index.rst @@ -1,3 +1,6 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright 2018 The DPDK contributors + Contributor's Guidelines ======================== diff --git a/doc/guides/contributing/patches.rst b/doc/guides/contributing/patches.rst index 2287835f..a3d78802 100644 --- a/doc/guides/contributing/patches.rst +++ b/doc/guides/contributing/patches.rst @@ -1,3 +1,6 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright 2018 The DPDK contributors + .. submitting_patches: Contributing Code to DPDK @@ -256,6 +259,66 @@ In addition to the ``Signed-off-by:`` name the commit messages can also have tags for who reported, suggested, tested and reviewed the patch being posted. Please refer to the `Tested, Acked and Reviewed by`_ section. +Patch Fix Related Issues +~~~~~~~~~~~~~~~~~~~~~~~~ + +`Coverity <https://scan.coverity.com/projects/dpdk-data-plane-development-kit>`_ +is a tool for static code analysis. +It is used as a cloud-based service used to scan the DPDK source code, +and alert developers of any potential defects in the source code. +When fixing an issue found by Coverity, the patch must contain a Coverity issue ID +in the body of the commit message. For example:: + + + doc: fix some parameter description + + Update the docs, fixing description of some parameter. + + Coverity issue: 12345 + Fixes: abcdefgh1234 ("doc: add some parameter") + Cc: author@example.com + + Signed-off-by: Alex Smith <alex.smith@example.com> + + +`Bugzilla <https://dpdk.org/tracker>`_ +is a bug- or issue-tracking system. +Bug-tracking systems allow individual or groups of developers +effectively to keep track of outstanding problems with their product. +When fixing an issue raised in Bugzilla, the patch must contain +a Bugzilla issue ID in the body of the commit message. +For example:: + + doc: fix some parameter description + + Update the docs, fixing description of some parameter. + + Bugzilla ID: 12345 + Fixes: abcdefgh1234 ("doc: add some parameter") + Cc: author@example.com + + Signed-off-by: Alex Smith <alex.smith@example.com> + +Patch for Stable Releases +~~~~~~~~~~~~~~~~~~~~~~~~~ + +All fix patches to the master branch that are candidates for backporting +should also be CCed to the `stable@dpdk.org <http://dpdk.org/ml/listinfo/stable>`_ +mailing list. +In the commit message body the Cc: stable@dpdk.org should be inserted as follows:: + + doc: fix some parameter description + + Update the docs, fixing description of some parameter. + + Fixes: abcdefgh1234 ("doc: add some parameter") + Cc: stable@dpdk.org + + Signed-off-by: Alex Smith <alex.smith@example.com> + +For further information on stable contribution you can go to +:doc:`Stable Contribution Guide <stable>`. + Creating Patches ---------------- @@ -450,6 +513,20 @@ Experienced committers may send patches directly with ``git send-email`` without The options ``--annotate`` and ``confirm = always`` are recommended for checking patches before sending. +Backporting patches for Stable Releases +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Sometimes a maintainer or contributor wishes, or can be asked, to send a patch +for a stable release rather than mainline. +In this case the patch(es) should be sent to ``stable@dpdk.org``, +not to ``dev@dpdk.org``. + +Given that there are multiple stable releases being maintained at the same time, +please specify exactly which branch(es) the patch is for +using ``git send-email --subject-prefix='PATCH 16.11' ...`` +and also optionally in the cover letter or in the annotation. + + The Review Process ------------------ diff --git a/doc/guides/contributing/stable.rst b/doc/guides/contributing/stable.rst index 0f2f1f37..1746c046 100644 --- a/doc/guides/contributing/stable.rst +++ b/doc/guides/contributing/stable.rst @@ -1,3 +1,6 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright 2018 The DPDK contributors + .. stable_lts_releases: DPDK Stable Releases and Long Term Support @@ -57,7 +60,25 @@ that a tagged release has been tested. What changes should be backported --------------------------------- -Backporting should be limited to bug fixes. +Backporting should be limited to bug fixes. All patches accepted on the master +branch with a Fixes: tag should be backported to the relevant stable/LTS +branches, unless the submitter indicates otherwise. If there are exceptions, +they will be discussed on the mailing lists. + +Fixes suitable for backport should have a ``Cc: stable@dpdk.org`` tag in the +commit message body as follows:: + + doc: fix some parameter description + + Update the docs, fixing description of some parameter. + + Fixes: abcdefgh1234 ("doc: add some parameter") + Cc: stable@dpdk.org + + Signed-off-by: Alex Smith <alex.smith@example.com> + + +Fixes not suitable for backport should not include the ``Cc: stable@dpdk.org`` tag. Features should not be backported to stable releases. It may be acceptable, in limited cases, to back port features for the LTS release where: diff --git a/doc/guides/contributing/versioning.rst b/doc/guides/contributing/versioning.rst index c495294d..01b36247 100644 --- a/doc/guides/contributing/versioning.rst +++ b/doc/guides/contributing/versioning.rst @@ -1,3 +1,6 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright 2018 The DPDK contributors + Managing ABI updates ==================== @@ -43,29 +46,6 @@ ABI versions are set at the time of major release labeling, and the ABI may change multiple times, without warning, between the last release label and the HEAD label of the git tree. -APIs marked as ``experimental`` are not considered part of the ABI and may -change without warning at any time. Since changes to APIs are most likely -immediately after their introduction, as users begin to take advantage of -those new APIs and start finding issues with them, new DPDK APIs will be -automatically marked as ``experimental`` to allow for a period of stabilization -before they become part of a tracked ABI. - -Note that marking an API as experimental is a multi step process. -To mark an API as experimental, the symbols which are desired to be exported -must be placed in an EXPERIMENTAL version block in the corresponding libraries' -version map script. -Secondly, the corresponding definitions of those exported functions, and -their forward declarations (in the development header files), must be marked -with the ``__rte_experimental`` tag (see ``rte_compat.h``). -The DPDK build makefiles perform a check to ensure that the map file and the -C code reflect the same list of symbols. -This check can be circumvented by defining ``ALLOW_EXPERIMENTAL_API`` -during compilation in the corresponding library Makefile. - -In addition to tagging the code with ``__rte_experimental``, -the doxygen markup must also contain the EXPERIMENTAL string, -and the MAINTAINERS file should note the EXPERIMENTAL libraries. - ABI versions, once released, are available until such time as their deprecation has been noted in the Release Notes for at least one major release cycle. For example consider the case where the ABI for DPDK 2.0 has been @@ -119,6 +99,37 @@ readability purposes should be avoided. follow the relevant deprecation policy procedures as above: 3 acks and announcement at least one release in advance. +Experimental APIs +~~~~~~~~~~~~~~~~~ + +APIs marked as ``experimental`` are not considered part of the ABI and may +change without warning at any time. Since changes to APIs are most likely +immediately after their introduction, as users begin to take advantage of +those new APIs and start finding issues with them, new DPDK APIs will be +automatically marked as ``experimental`` to allow for a period of stabilization +before they become part of a tracked ABI. + +Note that marking an API as experimental is a multi step process. +To mark an API as experimental, the symbols which are desired to be exported +must be placed in an EXPERIMENTAL version block in the corresponding libraries' +version map script. +Secondly, the corresponding definitions of those exported functions, and +their forward declarations (in the development header files), must be marked +with the ``__rte_experimental`` tag (see ``rte_compat.h``). +The DPDK build makefiles perform a check to ensure that the map file and the +C code reflect the same list of symbols. +This check can be circumvented by defining ``ALLOW_EXPERIMENTAL_API`` +during compilation in the corresponding library Makefile. + +In addition to tagging the code with ``__rte_experimental``, +the doxygen markup must also contain the EXPERIMENTAL string, +and the MAINTAINERS file should note the EXPERIMENTAL libraries. + +For removing the experimental tag associated with an API, deprecation notice +is not required. Though, an API should remain in experimental state for at least +one release. Thereafter, normal process of posting patch for review to mailing +list can be followed. + Examples of Deprecation Notices ------------------------------- diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst index ffd6ba90..e0346080 100644 --- a/doc/guides/cryptodevs/aesni_gcm.rst +++ b/doc/guides/cryptodevs/aesni_gcm.rst @@ -36,12 +36,13 @@ Installation To build DPDK with the AESNI_GCM_PMD the user is required to download the multi-buffer library from `here <https://github.com/01org/intel-ipsec-mb>`_ and compile it on their user system before building DPDK. -The latest version of the library supported by this PMD is v0.48, which -can be downloaded in `<https://github.com/01org/intel-ipsec-mb/archive/v0.48.zip>`_. +The latest version of the library supported by this PMD is v0.50, which +can be downloaded in `<https://github.com/01org/intel-ipsec-mb/archive/v0.50.zip>`_. .. code-block:: console - make + make + make install As a reference, the following table shows a mapping between the past DPDK versions and the external crypto libraries supported by them: @@ -55,7 +56,8 @@ and the external crypto libraries supported by them: ============= ================================ 16.04 - 16.11 Multi-buffer library 0.43 - 0.44 17.02 - 17.05 ISA-L Crypto v2.18 - 17.08+ Multi-buffer library 0.46+ + 17.08 - 18.02 Multi-buffer library 0.46 - 0.48 + 18.05+ Multi-buffer library 0.49+ ============= ================================ @@ -64,9 +66,6 @@ Initialization In order to enable this virtual crypto PMD, user must: -* Export the environmental variable AESNI_MULTI_BUFFER_LIB_PATH with the path where - the library was extracted. - * Build the multi buffer library (explained in Installation section). * Set CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=y in config/common_base. diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst index 3950daae..c2929500 100644 --- a/doc/guides/cryptodevs/aesni_mb.rst +++ b/doc/guides/cryptodevs/aesni_mb.rst @@ -27,6 +27,7 @@ Cipher algorithms: * RTE_CRYPTO_CIPHER_AES256_CTR * RTE_CRYPTO_CIPHER_AES_DOCSISBPI * RTE_CRYPTO_CIPHER_DES_CBC +* RTE_CRYPTO_CIPHER_3DES_CBC * RTE_CRYPTO_CIPHER_DES_DOCSISBPI Hash algorithms: @@ -38,6 +39,7 @@ Hash algorithms: * RTE_CRYPTO_HASH_SHA384_HMAC * RTE_CRYPTO_HASH_SHA512_HMAC * RTE_CRYPTO_HASH_AES_XCBC_HMAC +* RTE_CRYPTO_HASH_AES_CMAC AEAD algorithms: @@ -56,12 +58,13 @@ Installation To build DPDK with the AESNI_MB_PMD the user is required to download the multi-buffer library from `here <https://github.com/01org/intel-ipsec-mb>`_ and compile it on their user system before building DPDK. -The latest version of the library supported by this PMD is v0.48, which -can be downloaded from `<https://github.com/01org/intel-ipsec-mb/archive/v0.48.zip>`_. +The latest version of the library supported by this PMD is v0.50, which +can be downloaded from `<https://github.com/01org/intel-ipsec-mb/archive/v0.50.zip>`_. .. code-block:: console - make + make + make install As a reference, the following table shows a mapping between the past DPDK versions and the Multi-Buffer library version supported by them: @@ -77,7 +80,8 @@ and the Multi-Buffer library version supported by them: 17.02 0.44 17.05 - 17.08 0.45 - 0.48 17.11 0.47 - 0.48 - 18.02+ 0.48 + 18.02 0.48 + 18.05+ 0.49+ ============== ============================ @@ -86,9 +90,6 @@ Initialization In order to enable this virtual crypto PMD, user must: -* Export the environmental variable AESNI_MULTI_BUFFER_LIB_PATH with the path where - the library was extracted. - * Build the multi buffer library (explained in Installation section). * Set CONFIG_RTE_LIBRTE_PMD_AESNI_MB=y in config/common_base. diff --git a/doc/guides/cryptodevs/ccp.rst b/doc/guides/cryptodevs/ccp.rst new file mode 100644 index 00000000..034d2036 --- /dev/null +++ b/doc/guides/cryptodevs/ccp.rst @@ -0,0 +1,140 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + +AMD CCP Poll Mode Driver +======================== + +This code provides the initial implementation of the ccp poll mode driver. +The CCP poll mode driver library (librte_pmd_ccp) implements support for +AMD’s cryptographic co-processor (CCP). The CCP PMD is a virtual crypto +poll mode driver which schedules crypto operations to one or more available +CCP hardware engines on the platform. The CCP PMD provides poll mode crypto +driver support for the following hardware accelerator devices:: + + AMD Cryptographic Co-processor (0x1456) + AMD Cryptographic Co-processor (0x1468) + +Features +-------- + +CCP crypto PMD has support for: + +Cipher algorithms: + +* ``RTE_CRYPTO_CIPHER_AES_CBC`` +* ``RTE_CRYPTO_CIPHER_AES_ECB`` +* ``RTE_CRYPTO_CIPHER_AES_CTR`` +* ``RTE_CRYPTO_CIPHER_3DES_CBC`` + +Hash algorithms: + +* ``RTE_CRYPTO_AUTH_SHA1`` +* ``RTE_CRYPTO_AUTH_SHA1_HMAC`` +* ``RTE_CRYPTO_AUTH_SHA224`` +* ``RTE_CRYPTO_AUTH_SHA224_HMAC`` +* ``RTE_CRYPTO_AUTH_SHA256`` +* ``RTE_CRYPTO_AUTH_SHA256_HMAC`` +* ``RTE_CRYPTO_AUTH_SHA384`` +* ``RTE_CRYPTO_AUTH_SHA384_HMAC`` +* ``RTE_CRYPTO_AUTH_SHA512`` +* ``RTE_CRYPTO_AUTH_SHA512_HMAC`` +* ``RTE_CRYPTO_AUTH_MD5_HMAC`` +* ``RTE_CRYPTO_AUTH_AES_CMAC`` +* ``RTE_CRYPTO_AUTH_SHA3_224`` +* ``RTE_CRYPTO_AUTH_SHA3_224_HMAC`` +* ``RTE_CRYPTO_AUTH_SHA3_256`` +* ``RTE_CRYPTO_AUTH_SHA3_256_HMAC`` +* ``RTE_CRYPTO_AUTH_SHA3_384`` +* ``RTE_CRYPTO_AUTH_SHA3_384_HMAC`` +* ``RTE_CRYPTO_AUTH_SHA3_512`` +* ``RTE_CRYPTO_AUTH_SHA3_512_HMAC`` + +AEAD algorithms: + +* ``RTE_CRYPTO_AEAD_AES_GCM`` + +Installation +------------ + +To compile ccp PMD, it has to be enabled in the config/common_base file and openssl +packages have to be installed in the build environment. + +* ``CONFIG_RTE_LIBRTE_PMD_CCP=y`` + +For Ubuntu 16.04 LTS use below to install openssl in the build system: + +.. code-block:: console + + sudo apt-get install openssl + +This code was verified on Ubuntu 16.04. + +Initialization +-------------- + +Bind the CCP devices to DPDK UIO driver module before running the CCP PMD stack. +e.g. for the 0x1456 device:: + + cd to the top-level DPDK directory + modprobe uio + insmod ./build/kmod/igb_uio.ko + echo "1022 1456" > /sys/bus/pci/drivers/igb_uio/new_id + +Another way to bind the CCP devices to DPDK UIO driver is by using the ``dpdk-devbind.py`` script. +The following command assumes ``BFD`` as ``0000:09:00.2``:: + + cd to the top-level DPDK directory + ./usertools/dpdk-devbind.py -b igb_uio 0000:09:00.2 + +In order to enable the ccp crypto PMD, user must set CONFIG_RTE_LIBRTE_PMD_CCP=y in config/common_base. + +To use the PMD in an application, user must: + +* Call rte_vdev_init("crypto_ccp") within the application. + +* Use --vdev="crypto_ccp" in the EAL options, which will call rte_vdev_init() internally. + +The following parameters (all optional) can be provided in the previous two calls: + +* socket_id: Specify the socket where the memory for the device is going to be allocated. + (by default, socket_id will be the socket where the core that is creating the PMD is running on). + +* max_nb_queue_pairs: Specify the maximum number of queue pairs in the device. + +* max_nb_sessions: Specify the maximum number of sessions that can be created (2048 by default). + +* ccp_auth_opt: Specify authentication operations to perform on CPU using openssl APIs. + +To validate ccp pmd, l2fwd-crypto example can be used with following command: + +.. code-block:: console + + sudo ./build/l2fwd-crypto -l 1 -n 4 --vdev "crypto_ccp" -- -p 0x1 + --chain CIPHER_HASH --cipher_op ENCRYPT --cipher_algo AES_CBC + --cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f + --iv 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:ff + --auth_op GENERATE --auth_algo SHA1_HMAC + --auth_key 11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11 + :11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11 + :11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11 + +The CCP PMD also supports computing authentication over CPU with cipher offloaded to CCP. +To enable this feature, pass an additional argument as ccp_auth_opt=1 to --vdev parameters as +following: + +.. code-block:: console + + sudo ./build/l2fwd-crypto -l 1 -n 4 --vdev "crypto_ccp,ccp_auth_opt=1" -- -p 0x1 + --chain CIPHER_HASH --cipher_op ENCRYPT --cipher_algo AES_CBC + --cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f + --iv 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:ff + --auth_op GENERATE --auth_algo SHA1_HMAC + --auth_key 11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11 + :11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11 + :11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11 + +Limitations +----------- + +* Chained mbufs are not supported. +* MD5_HMAC is supported only for CPU based authentication. diff --git a/doc/guides/cryptodevs/dpaa2_sec.rst b/doc/guides/cryptodevs/dpaa2_sec.rst index 5460a92d..9191704e 100644 --- a/doc/guides/cryptodevs/dpaa2_sec.rst +++ b/doc/guides/cryptodevs/dpaa2_sec.rst @@ -134,10 +134,20 @@ Supported DPAA2 SoCs * LS2088A/LS2048A * LS1088A/LS1048A +Whitelisting & Blacklisting +--------------------------- + +For blacklisting a DPAA2 SEC device, following commands can be used. + + .. code-block:: console + + <dpdk app> <EAL args> -b "fslmc:dpseci.x" -- ... + +Where x is the device object id as configured in resource container. + Limitations ----------- -* Chained mbufs are not supported. * Hash followed by Cipher mode is not supported * Only supports the session-oriented API implementation (session-less APIs are not supported). @@ -189,20 +199,6 @@ Please note that enabling debugging options may affect system performance. By default it is only enabled in defconfig_arm64-dpaa2-* config. Toggle compilation of the ``librte_pmd_dpaa2_sec`` driver. -* ``CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT`` (default ``n``) - Toggle display of initialization related driver messages - -* ``CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER`` (default ``n``) - Toggle display of driver runtime messages - -* ``CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_RX`` (default ``n``) - Toggle display of receive fast path run-time message - -* ``CONFIG_RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS`` - By default it is set as 2048 in defconfig_arm64-dpaa2-* config. - It indicates Number of sessions to create in the session memory pool - on a single DPAA2 SEC device. - Installations ------------- To compile the DPAA2_SEC PMD for Linux arm64 gcc target, run the @@ -212,3 +208,15 @@ following ``make`` command: cd <DPDK-source-directory> make config T=arm64-dpaa2-linuxapp-gcc install + +Enabling logs +------------- + +For enabling logs, use the following EAL parameter: + +.. code-block:: console + + ./your_crypto_application <EAL args> --log-level=pmd.crypto.dpaa2:<level> + +Using ``crypto.dpaa2`` as log matching criteria, all Crypto PMD logs can be +enabled which are lower than logging ``level``. diff --git a/doc/guides/cryptodevs/dpaa_sec.rst b/doc/guides/cryptodevs/dpaa_sec.rst index b98f7864..dd683894 100644 --- a/doc/guides/cryptodevs/dpaa_sec.rst +++ b/doc/guides/cryptodevs/dpaa_sec.rst @@ -78,10 +78,22 @@ Supported DPAA SoCs * LS1046A/LS1026A * LS1043A/LS1023A +Whitelisting & Blacklisting +--------------------------- + +For blacklisting a DPAA device, following commands can be used. + + .. code-block:: console + + <dpdk app> <EAL args> -b "dpaa_bus:dpaa-secX" -- ... + e.g. "dpaa_bus:dpaa-sec0" + + or to disable all 4 SEC devices + -b "dpaa_sec:dpaa-sec0" -b "dpaa_sec:dpaa-sec1" -b "dpaa_sec:dpaa-sec2" -b "dpaa_sec:dpaa-sec3" + Limitations ----------- -* Chained mbufs are not supported. * Hash followed by Cipher mode is not supported * Only supports the session-oriented API implementation (session-less APIs are not supported). @@ -132,20 +144,6 @@ Please note that enabling debugging options may affect system performance. By default it is only enabled in defconfig_arm64-dpaa-* config. Toggle compilation of the ``librte_pmd_dpaa_sec`` driver. -* ``CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT`` (default ``n``) - Toggle display of initialization related driver messages - -* ``CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER`` (default ``n``) - Toggle display of driver runtime messages - -* ``CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_RX`` (default ``n``) - Toggle display of receive fast path run-time message - -* ``CONFIG_RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS`` - By default it is set as 2048 in defconfig_arm64-dpaa-* config. - It indicates Number of sessions to create in the session memory pool - on a single DPAA SEC device. - Installations ------------- To compile the DPAA_SEC PMD for Linux arm64 gcc target, run the @@ -155,3 +153,15 @@ following ``make`` command: cd <DPDK-source-directory> make config T=arm64-dpaa-linuxapp-gcc install + +Enabling logs +------------- + +For enabling logs, use the following EAL parameter: + +.. code-block:: console + + ./your_crypto_application <EAL args> --log-level=pmd.crypto.dpaa:<level> + +Using ``pmd.crypto.dpaa`` as log matching criteria, all Crypto PMD logs can be +enabled which are lower than logging ``level``. diff --git a/doc/guides/cryptodevs/features/aesni_gcm.ini b/doc/guides/cryptodevs/features/aesni_gcm.ini index 920b6b6a..b9e9c906 100644 --- a/doc/guides/cryptodevs/features/aesni_gcm.ini +++ b/doc/guides/cryptodevs/features/aesni_gcm.ini @@ -10,7 +10,8 @@ CPU AESNI = Y CPU SSE = Y CPU AVX = Y CPU AVX2 = Y -Mbuf scatter gather = Y +OOP SGL In LB Out = Y +OOP LB In LB Out = Y ; ; Supported crypto algorithms of the 'aesni_gcm' crypto driver. ; diff --git a/doc/guides/cryptodevs/features/aesni_mb.ini b/doc/guides/cryptodevs/features/aesni_mb.ini index a5a45a6d..f7295745 100644 --- a/doc/guides/cryptodevs/features/aesni_mb.ini +++ b/doc/guides/cryptodevs/features/aesni_mb.ini @@ -24,6 +24,7 @@ AES CTR (192) = Y AES CTR (256) = Y AES DOCSIS BPI = Y DES CBC = Y +3DES CBC = Y DES DOCSIS BPI = Y ; @@ -37,6 +38,7 @@ SHA256 HMAC = Y SHA384 HMAC = Y SHA512 HMAC = Y AES XCBC MAC = Y +AES CMAC (128) = Y ; ; Supported AEAD algorithms of the 'aesni_mb' crypto driver. diff --git a/doc/guides/cryptodevs/features/ccp.ini b/doc/guides/cryptodevs/features/ccp.ini new file mode 100644 index 00000000..4722e135 --- /dev/null +++ b/doc/guides/cryptodevs/features/ccp.ini @@ -0,0 +1,59 @@ +; +; Supported features of the 'ccp' crypto poll mode driver. +; +; Refer to default.ini for the full list of available PMD features. +; +[Features] +Symmetric crypto = Y +Sym operation chaining = Y +HW Accelerated = Y + +; +; Supported crypto algorithms of the 'ccp' crypto driver. +; +[Cipher] +AES CBC (128) = Y +AES CBC (192) = Y +AES CBC (256) = Y +AES ECB (128) = Y +AES ECB (192) = Y +AES ECB (256) = Y +AES CTR (128) = Y +AES CTR (192) = Y +AES CTR (256) = Y +3DES CBC = Y + +; +; Supported authentication algorithms of the 'ccp' crypto driver. +; +[Auth] +MD5 HMAC = Y +SHA1 = Y +SHA1 HMAC = Y +SHA224 = Y +SHA224 HMAC = Y +SHA256 = Y +SHA256 HMAC = Y +SHA384 = Y +SHA384 HMAC = Y +SHA512 = Y +SHA512 HMAC = Y +AES CMAC (128) = Y +AES CMAC (192) = Y +AES CMAC (256) = Y +SHA3_224 = Y +SHA3_224 HMAC = Y +SHA3_256 = Y +SHA3_256 HMAC = Y +SHA3_384 = Y +SHA3_384 HMAC = Y +SHA3_512 = Y +SHA3_512 HMAC = Y + +; +; Supported AEAD algorithms of the 'ccp' crypto driver. +; +[AEAD] +AES GCM (128) = Y +AES GCM (192) = Y +AES GCM (256) = Y diff --git a/doc/guides/cryptodevs/features/default.ini b/doc/guides/cryptodevs/features/default.ini index 728ce3b7..92a7ccf3 100644 --- a/doc/guides/cryptodevs/features/default.ini +++ b/doc/guides/cryptodevs/features/default.ini @@ -18,7 +18,11 @@ CPU AVX512 = CPU AESNI = CPU NEON = CPU ARM CE = -Mbuf scatter gather = +In Place SGL = +OOP SGL In SGL Out = +OOP SGL In LB Out = +OOP LB In SGL Out = +OOP LB In LB Out = ; ; Supported crypto algorithms of a default crypto driver. @@ -28,6 +32,9 @@ NULL = AES CBC (128) = AES CBC (192) = AES CBC (256) = +AES ECB (128) = +AES ECB (192) = +AES ECB (256) = AES CTR (128) = AES CTR (192) = AES CTR (256) = @@ -62,6 +69,17 @@ AES GMAC = SNOW3G UIA2 = KASUMI F9 = ZUC EIA3 = +AES CMAC (128) = +AES CMAC (192) = +AES CMAC (256) = +SHA3_224 = +SHA3_224 HMAC = +SHA3_256 = +SHA3_256 HMAC = +SHA3_384 = +SHA3_384 HMAC = +SHA3_512 = +SHA3_512 HMAC = ; ; Supported AEAD algorithms of a default crypto driver. diff --git a/doc/guides/cryptodevs/features/dpaa2_sec.ini b/doc/guides/cryptodevs/features/dpaa2_sec.ini index 68c9960d..69700df4 100644 --- a/doc/guides/cryptodevs/features/dpaa2_sec.ini +++ b/doc/guides/cryptodevs/features/dpaa2_sec.ini @@ -8,7 +8,11 @@ Symmetric crypto = Y Sym operation chaining = Y HW Accelerated = Y Protocol offload = Y -Mbuf scatter gather = Y +In Place SGL = Y +OOP SGL In SGL Out = Y +OOP SGL In LB Out = Y +OOP LB In SGL Out = Y +OOP LB In LB Out = Y ; ; Supported crypto algorithms of the 'dpaa2_sec' crypto driver. diff --git a/doc/guides/cryptodevs/features/dpaa_sec.ini b/doc/guides/cryptodevs/features/dpaa_sec.ini index 260fae72..937b621c 100644 --- a/doc/guides/cryptodevs/features/dpaa_sec.ini +++ b/doc/guides/cryptodevs/features/dpaa_sec.ini @@ -8,7 +8,11 @@ Symmetric crypto = Y Sym operation chaining = Y HW Accelerated = Y Protocol offload = Y -Mbuf scatter gather = Y +In Place SGL = Y +OOP SGL In SGL Out = Y +OOP SGL In LB Out = Y +OOP LB In SGL Out = Y +OOP LB In LB Out = Y ; ; Supported crypto algorithms of the 'dpaa_sec' crypto driver. diff --git a/doc/guides/cryptodevs/features/mrvl.ini b/doc/guides/cryptodevs/features/mrvl.ini deleted file mode 100644 index 6d2fe6aa..00000000 --- a/doc/guides/cryptodevs/features/mrvl.ini +++ /dev/null @@ -1,42 +0,0 @@ -; Supported features of the 'mrvl' crypto driver. -; -; Refer to default.ini for the full list of available PMD features. -; -[Features] -Symmetric crypto = Y -Sym operation chaining = Y - -; -; Supported crypto algorithms of a default crypto driver. -; -[Cipher] -AES CBC (128) = Y -AES CBC (192) = Y -AES CBC (256) = Y -AES CTR (128) = Y -AES CTR (192) = Y -AES CTR (256) = Y -3DES CBC = Y -3DES CTR = Y - -; -; Supported authentication algorithms of a default crypto driver. -; -[Auth] -MD5 = Y -MD5 HMAC = Y -SHA1 = Y -SHA1 HMAC = Y -SHA256 = Y -SHA256 HMAC = Y -SHA384 = Y -SHA384 HMAC = Y -SHA512 = Y -SHA512 HMAC = Y -AES GMAC = Y - -; -; Supported AEAD algorithms of a default crypto driver. -; -[AEAD] -AES GCM (128) = Y diff --git a/doc/guides/cryptodevs/features/mvsam.ini b/doc/guides/cryptodevs/features/mvsam.ini new file mode 100644 index 00000000..b7c105af --- /dev/null +++ b/doc/guides/cryptodevs/features/mvsam.ini @@ -0,0 +1,42 @@ +; Supported features of the 'mvsam' crypto driver. +; +; Refer to default.ini for the full list of available PMD features. +; +[Features] +Symmetric crypto = Y +Sym operation chaining = Y + +; +; Supported crypto algorithms of a default crypto driver. +; +[Cipher] +AES CBC (128) = Y +AES CBC (192) = Y +AES CBC (256) = Y +AES CTR (128) = Y +AES CTR (192) = Y +AES CTR (256) = Y +3DES CBC = Y +3DES CTR = Y + +; +; Supported authentication algorithms of a default crypto driver. +; +[Auth] +MD5 = Y +MD5 HMAC = Y +SHA1 = Y +SHA1 HMAC = Y +SHA256 = Y +SHA256 HMAC = Y +SHA384 = Y +SHA384 HMAC = Y +SHA512 = Y +SHA512 HMAC = Y +AES GMAC = Y + +; +; Supported AEAD algorithms of a default crypto driver. +; +[AEAD] +AES GCM (128) = Y diff --git a/doc/guides/cryptodevs/features/null.ini b/doc/guides/cryptodevs/features/null.ini index a9e172da..ecf5779a 100644 --- a/doc/guides/cryptodevs/features/null.ini +++ b/doc/guides/cryptodevs/features/null.ini @@ -6,7 +6,7 @@ [Features] Symmetric crypto = Y Sym operation chaining = Y -Mbuf scatter gather = Y +In Place SGL = Y ; ; Supported crypto algorithms of the 'null' crypto driver. diff --git a/doc/guides/cryptodevs/features/openssl.ini b/doc/guides/cryptodevs/features/openssl.ini index 69156586..b9c0bdcc 100644 --- a/doc/guides/cryptodevs/features/openssl.ini +++ b/doc/guides/cryptodevs/features/openssl.ini @@ -6,7 +6,9 @@ [Features] Symmetric crypto = Y Sym operation chaining = Y -Mbuf scatter gather = Y +OOP SGL In LB Out = Y +OOP LB In LB Out = Y +Asymmetric crypto = Y ; ; Supported crypto algorithms of the 'openssl' crypto driver. @@ -49,3 +51,13 @@ AES GCM (256) = Y AES CCM (128) = Y AES CCM (192) = Y AES CCM (256) = Y + +; +; Supported Asymmetric algorithms of the 'openssl' crypto driver. +; +[Asymmetric] +RSA = Y +DSA = Y +Modular Exponentiation = Y +Modular Inversion = Y +Diffie-hellman = Y diff --git a/doc/guides/cryptodevs/features/qat.ini b/doc/guides/cryptodevs/features/qat.ini index 51ed5967..29d865e0 100644 --- a/doc/guides/cryptodevs/features/qat.ini +++ b/doc/guides/cryptodevs/features/qat.ini @@ -7,7 +7,11 @@ Symmetric crypto = Y Sym operation chaining = Y HW Accelerated = Y -Mbuf scatter gather = Y +In Place SGL = Y +OOP SGL In SGL Out = Y +OOP SGL In LB Out = Y +OOP LB In SGL Out = Y +OOP LB In LB Out = Y ; ; Supported crypto algorithms of the 'qat' crypto driver. diff --git a/doc/guides/cryptodevs/features/virtio.ini b/doc/guides/cryptodevs/features/virtio.ini new file mode 100644 index 00000000..168fc174 --- /dev/null +++ b/doc/guides/cryptodevs/features/virtio.ini @@ -0,0 +1,26 @@ +; Supported features of the 'virtio' crypto driver. +; +; Refer to default.ini for the full list of available PMD features. +; +[Features] +Symmetric crypto = Y +Sym operation chaining = Y + +; +; Supported crypto algorithms of the 'virtio' crypto driver. +; +[Cipher] +AES CBC (128) = Y +AES CBC (192) = Y +AES CBC (256) = Y + +; +; Supported authentication algorithms of the 'virtio' crypto driver. +; +[Auth] +SHA1 HMAC = Y + +; +; Supported AEAD algorithms of the 'virtio' crypto driver. +; +[AEAD] diff --git a/doc/guides/cryptodevs/index.rst b/doc/guides/cryptodevs/index.rst index 558c9267..e9928a4e 100644 --- a/doc/guides/cryptodevs/index.rst +++ b/doc/guides/cryptodevs/index.rst @@ -13,13 +13,15 @@ Crypto Device Drivers aesni_mb aesni_gcm armv8 + ccp dpaa2_sec dpaa_sec kasumi openssl - mrvl + mvsam null scheduler snow3g qat + virtio zuc diff --git a/doc/guides/cryptodevs/kasumi.rst b/doc/guides/cryptodevs/kasumi.rst index f56b5475..2265eee4 100644 --- a/doc/guides/cryptodevs/kasumi.rst +++ b/doc/guides/cryptodevs/kasumi.rst @@ -34,11 +34,11 @@ Installation ------------ To build DPDK with the KASUMI_PMD the user is required to download -the export controlled ``libsso_kasumi`` library, by requesting it from -`<https://networkbuilders.intel.com/network-technologies/dpdk>`_. -Once approval has been granted, the user needs to log in -`<https://networkbuilders.intel.com/dpdklogin>`_ -and click on "Kasumi Bit Stream crypto library" link, to download the library. +the export controlled ``libsso_kasumi`` library, by registering in +`Intel Resource & Design Center <https://www.intel.com/content/www/us/en/design/resource-design-center.html>`_. +Once approval has been granted, the user needs to search for +*Kasumi F8 F9 3GPP cryptographic algorithms Software Library* to download the +library or directly through this `link <https://cdrdv2.intel.com/v1/dl/getContent/575866>`_. After downloading the library, the user needs to unpack and compile it on their system before building DPDK:: diff --git a/doc/guides/cryptodevs/mrvl.rst b/doc/guides/cryptodevs/mrvl.rst deleted file mode 100644 index 6a0b08c5..00000000 --- a/doc/guides/cryptodevs/mrvl.rst +++ /dev/null @@ -1,193 +0,0 @@ -.. BSD LICENSE - Copyright(c) 2017 Marvell International Ltd. - Copyright(c) 2017 Semihalf. - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -MRVL Crypto Poll Mode Driver -============================ - -The MRVL CRYPTO PMD (**librte_crypto_mrvl_pmd**) provides poll mode crypto driver -support by utilizing MUSDK library, which provides cryptographic operations -acceleration by using Security Acceleration Engine (EIP197) directly from -user-space with minimum overhead and high performance. - -Features --------- - -MRVL CRYPTO PMD has support for: - -* Symmetric crypto -* Sym operation chaining -* AES CBC (128) -* AES CBC (192) -* AES CBC (256) -* AES CTR (128) -* AES CTR (192) -* AES CTR (256) -* 3DES CBC -* 3DES CTR -* MD5 -* MD5 HMAC -* SHA1 -* SHA1 HMAC -* SHA256 -* SHA256 HMAC -* SHA384 -* SHA384 HMAC -* SHA512 -* SHA512 HMAC -* AES GCM (128) - -Limitations ------------ - -* Hardware only supports scenarios where ICV (digest buffer) is placed just - after the authenticated data. Other placement will result in error. - -Installation ------------- - -MRVL CRYPTO PMD driver compilation is disabled by default due to external dependencies. -Currently there are two driver specific compilation options in -``config/common_base`` available: - -- ``CONFIG_RTE_LIBRTE_MRVL_CRYPTO`` (default ``n``) - - Toggle compilation of the librte_pmd_mrvl driver. - -- ``CONFIG_RTE_LIBRTE_MRVL_CRYPTO_DEBUG`` (default ``n``) - - Toggle display of debugging messages. - -For a list of prerequisites please refer to `Prerequisites` section in -:ref:`MRVL Poll Mode Driver <mrvl_poll_mode_driver>` guide. - -MRVL CRYPTO PMD requires MUSDK built with EIP197 support thus following -extra option must be passed to the library configuration script: - -.. code-block:: console - - --enable-sam - -For `crypto_safexcel.ko` module build instructions please refer -to `doc/musdk_get_started.txt`. - -Initialization --------------- - -After successfully building MRVL CRYPTO PMD, the following modules need to be -loaded: - -.. code-block:: console - - insmod musdk_uio.ko - insmod mvpp2x_sysfs.ko - insmod mv_pp_uio.ko - insmod mv_sam_uio.ko - insmod crypto_safexcel.ko - -The following parameters (all optional) are exported by the driver: - -* max_nb_queue_pairs: maximum number of queue pairs in the device (8 by default). -* max_nb_sessions: maximum number of sessions that can be created (2048 by default). -* socket_id: socket on which to allocate the device resources on. - -l2fwd-crypto example application can be used to verify MRVL CRYPTO PMD -operation: - -.. code-block:: console - - ./l2fwd-crypto --vdev=net_mrvl,iface=eth0 --vdev=crypto_mrvl -- \ - --cipher_op ENCRYPT --cipher_algo aes-cbc \ - --cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f \ - --auth_op GENERATE --auth_algo sha1-hmac \ - --auth_key 10:11:12:13:14:15:16:17:18:19:1a:1b:1c:1d:1e:1f - -Example output: - -.. code-block:: console - - [...] - AAD: at [0x7f253ceb80], len= - P ID 0 configuration ---- - Port mode : KR - MAC status : disabled - Link status : link up - Port speed : 10G - Port duplex : full - Port: Egress enable tx_port_num=16 qmap=0x1 - PORT: Port0 - link - P ID 0 configuration ---- - Port mode : KR - MAC status : disabled - Link status : link down - Port speed : 10G - Port duplex : full - Port: Egress enable tx_port_num=16 qmap=0x1 - Port 0, MAC address: 00:50:43:02:21:20 - - - Checking link statusdone - Port 0 Link Up - speed 0 Mbps - full-duplex - Lcore 0: RX port 0 - Allocated session pool on socket 0 - eip197: 0:0 registers: paddr: 0xf2880000, vaddr: 0x0x7f56a80000 - DMA buffer (131136 bytes) for CDR #0 allocated: paddr = 0xb0585e00, vaddr = 0x7f09384e00 - DMA buffer (131136 bytes) for RDR #0 allocated: paddr = 0xb05a5f00, vaddr = 0x7f093a4f00 - DMA buffers allocated for 2049 operations. Tokens - 256 bytes - Lcore 0: cryptodev 0 - L2FWD: lcore 1 has nothing to do - L2FWD: lcore 2 has nothing to do - L2FWD: lcore 3 has nothing to do - L2FWD: entering main loop on lcore 0 - L2FWD: -- lcoreid=0 portid=0 - L2FWD: -- lcoreid=0 cryptoid=0 - Options:- - nportmask: ffffffff - ports per lcore: 1 - refresh period : 10000 - single lcore mode: disabled - stats_printing: enabled - sessionless crypto: disabled - - Crypto chain: Input --> Encrypt --> Auth generate --> Output - - ---- Cipher information --- - Algorithm: aes-cbc - Cipher key: at [0x7f56db4e80], len=16 - 00000000: 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F | ................ - IV: at [0x7f56db4b80], len=16 - 00000000: 20 F0 63 0E 45 EB 2D 84 72 D4 13 6E 36 B5 AF FE | .c.E.-.r..n6... - - ---- Authentication information --- - Algorithm: sha1-hmac - Auth key: at [0x7f56db4d80], len=16 - 00000000: 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F | ................ - IV: at [0x7f56db4a80], len=0 - AAD: at [0x7f253ceb80], len= diff --git a/doc/guides/cryptodevs/mvsam.rst b/doc/guides/cryptodevs/mvsam.rst new file mode 100644 index 00000000..fd418c26 --- /dev/null +++ b/doc/guides/cryptodevs/mvsam.rst @@ -0,0 +1,193 @@ +.. BSD LICENSE + Copyright(c) 2017 Marvell International Ltd. + Copyright(c) 2017 Semihalf. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +MVSAM Crypto Poll Mode Driver +============================= + +The MVSAM CRYPTO PMD (**librte_crypto_mvsam_pmd**) provides poll mode crypto driver +support by utilizing MUSDK library, which provides cryptographic operations +acceleration by using Security Acceleration Engine (EIP197) directly from +user-space with minimum overhead and high performance. + +Features +-------- + +MVSAM CRYPTO PMD has support for: + +* Symmetric crypto +* Sym operation chaining +* AES CBC (128) +* AES CBC (192) +* AES CBC (256) +* AES CTR (128) +* AES CTR (192) +* AES CTR (256) +* 3DES CBC +* 3DES CTR +* MD5 +* MD5 HMAC +* SHA1 +* SHA1 HMAC +* SHA256 +* SHA256 HMAC +* SHA384 +* SHA384 HMAC +* SHA512 +* SHA512 HMAC +* AES GCM (128) + +Limitations +----------- + +* Hardware only supports scenarios where ICV (digest buffer) is placed just + after the authenticated data. Other placement will result in error. + +Installation +------------ + +MVSAM CRYPTO PMD driver compilation is disabled by default due to external dependencies. +Currently there are two driver specific compilation options in +``config/common_base`` available: + +- ``CONFIG_RTE_LIBRTE_MVSAM_CRYPTO`` (default ``n``) + + Toggle compilation of the librte_pmd_mvsam driver. + +- ``CONFIG_RTE_LIBRTE_MVSAM_CRYPTO_DEBUG`` (default ``n``) + + Toggle display of debugging messages. + +For a list of prerequisites please refer to `Prerequisites` section in +:ref:`MVPP2 Poll Mode Driver <mvpp2_poll_mode_driver>` guide. + +MVSAM CRYPTO PMD requires MUSDK built with EIP197 support thus following +extra option must be passed to the library configuration script: + +.. code-block:: console + + --enable-sam + +For `crypto_safexcel.ko` module build instructions please refer +to `doc/musdk_get_started.txt`. + +Initialization +-------------- + +After successfully building MVSAM CRYPTO PMD, the following modules need to be +loaded: + +.. code-block:: console + + insmod musdk_uio.ko + insmod mvpp2x_sysfs.ko + insmod mv_pp_uio.ko + insmod mv_sam_uio.ko + insmod crypto_safexcel.ko + +The following parameters (all optional) are exported by the driver: + +* max_nb_queue_pairs: maximum number of queue pairs in the device (8 by default). +* max_nb_sessions: maximum number of sessions that can be created (2048 by default). +* socket_id: socket on which to allocate the device resources on. + +l2fwd-crypto example application can be used to verify MVSAM CRYPTO PMD +operation: + +.. code-block:: console + + ./l2fwd-crypto --vdev=eth_mvpp2,iface=eth0 --vdev=crypto_mvsam -- \ + --cipher_op ENCRYPT --cipher_algo aes-cbc \ + --cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f \ + --auth_op GENERATE --auth_algo sha1-hmac \ + --auth_key 10:11:12:13:14:15:16:17:18:19:1a:1b:1c:1d:1e:1f + +Example output: + +.. code-block:: console + + [...] + AAD: at [0x7f253ceb80], len= + P ID 0 configuration ---- + Port mode : KR + MAC status : disabled + Link status : link up + Port speed : 10G + Port duplex : full + Port: Egress enable tx_port_num=16 qmap=0x1 + PORT: Port0 - link + P ID 0 configuration ---- + Port mode : KR + MAC status : disabled + Link status : link down + Port speed : 10G + Port duplex : full + Port: Egress enable tx_port_num=16 qmap=0x1 + Port 0, MAC address: 00:50:43:02:21:20 + + + Checking link statusdone + Port 0 Link Up - speed 0 Mbps - full-duplex + Lcore 0: RX port 0 + Allocated session pool on socket 0 + eip197: 0:0 registers: paddr: 0xf2880000, vaddr: 0x0x7f56a80000 + DMA buffer (131136 bytes) for CDR #0 allocated: paddr = 0xb0585e00, vaddr = 0x7f09384e00 + DMA buffer (131136 bytes) for RDR #0 allocated: paddr = 0xb05a5f00, vaddr = 0x7f093a4f00 + DMA buffers allocated for 2049 operations. Tokens - 256 bytes + Lcore 0: cryptodev 0 + L2FWD: lcore 1 has nothing to do + L2FWD: lcore 2 has nothing to do + L2FWD: lcore 3 has nothing to do + L2FWD: entering main loop on lcore 0 + L2FWD: -- lcoreid=0 portid=0 + L2FWD: -- lcoreid=0 cryptoid=0 + Options:- + nportmask: ffffffff + ports per lcore: 1 + refresh period : 10000 + single lcore mode: disabled + stats_printing: enabled + sessionless crypto: disabled + + Crypto chain: Input --> Encrypt --> Auth generate --> Output + + ---- Cipher information --- + Algorithm: aes-cbc + Cipher key: at [0x7f56db4e80], len=16 + 00000000: 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F | ................ + IV: at [0x7f56db4b80], len=16 + 00000000: 20 F0 63 0E 45 EB 2D 84 72 D4 13 6E 36 B5 AF FE | .c.E.-.r..n6... + + ---- Authentication information --- + Algorithm: sha1-hmac + Auth key: at [0x7f56db4d80], len=16 + 00000000: 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F | ................ + IV: at [0x7f56db4a80], len=0 + AAD: at [0x7f253ceb80], len= diff --git a/doc/guides/cryptodevs/openssl.rst b/doc/guides/cryptodevs/openssl.rst index 427fc807..bdc30f66 100644 --- a/doc/guides/cryptodevs/openssl.rst +++ b/doc/guides/cryptodevs/openssl.rst @@ -80,6 +80,7 @@ crypto processing. Test name is cryptodev_openssl_autotest. For performance test cryptodev_openssl_perftest can be used. +For asymmetric crypto operations testing, run cryptodev_openssl_asym_autotest. To verify real traffic l2fwd-crypto example can be used with this command: diff --git a/doc/guides/cryptodevs/overview.rst b/doc/guides/cryptodevs/overview.rst index b3cb6cae..3f776f07 100644 --- a/doc/guides/cryptodevs/overview.rst +++ b/doc/guides/cryptodevs/overview.rst @@ -11,6 +11,33 @@ Supported Feature Flags .. include:: overview_feature_table.txt +.. Note:: + + - "In Place SGL" feature flag stands for "In place Scatter-gather list", + which means that an input buffer can consist of multiple segments, + being the operation in-place (input address = output address). + + - "OOP SGL In SGL Out" feature flag stands for + "Out-of-place Scatter-gather list Input, Scatter-gater list Output", + which means pmd supports different scatter-gather styled input and output buffers + (i.e. both can consists of multiple segments). + + - "OOP SGL In LB Out" feature flag stands for + "Out-of-place Scatter-gather list Input, Linear Buffers Output", + which means PMD supports input from scatter-gathered styled buffers, + outputting linear buffers (i.e. single segment). + + - "OOP LB In SGL Out" feature flag stands for + "Out-of-place Linear Buffers Input, Scatter-gather list Output", + which means PMD supports input from linear buffer, outputting + scatter-gathered styled buffers. + + - "OOP LB In LB Out" feature flag stands for + "Out-of-place Linear Buffers Input, Scatter-gather list Output", + which means that Out-of-place operation is supported, + with linear input and output buffers. + + Supported Cipher Algorithms --------------------------- diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst index 8c8fefaa..bdc58eb2 100644 --- a/doc/guides/cryptodevs/qat.rst +++ b/doc/guides/cryptodevs/qat.rst @@ -68,12 +68,32 @@ Limitations * Queue pairs are not thread-safe (that is, within a single queue pair, RX and TX from different lcores is not supported). -Installation ------------- +Extra notes on KASUMI F9 +------------------------ + +When using KASUMI F9 authentication algorithm, the input buffer must be +constructed according to the 3GPP KASUMI specifications (section 4.4, page 13): +`<http://cryptome.org/3gpp/35201-900.pdf>`_. +Input buffer has to have COUNT (4 bytes), FRESH (4 bytes), MESSAGE and DIRECTION (1 bit) +concatenated. After the DIRECTION bit, a single '1' bit is appended, followed by +between 0 and 7 '0' bits, so that the total length of the buffer is multiple of 8 bits. +Note that the actual message can be any length, specified in bits. + +Once this buffer is passed this way, when creating the crypto operation, +length of data to authenticate (op.sym.auth.data.length) must be the length +of all the items described above, including the padding at the end. +Also, offset of data to authenticate (op.sym.auth.data.offset) +must be such that points at the start of the COUNT bytes. + + +Building the DPDK QAT cryptodev PMD +----------------------------------- + -To enable QAT in DPDK, follow the instructions for modifying the compile-time +To enable QAT crypto in DPDK, follow the instructions for modifying the compile-time configuration file as described `here <http://dpdk.org/doc/guides/linux_gsg/build_dpdk.html>`_. + Quick instructions are as follows: .. code-block:: console @@ -81,29 +101,95 @@ Quick instructions are as follows: cd to the top-level DPDK directory make config T=x86_64-native-linuxapp-gcc sed -i 's,\(CONFIG_RTE_LIBRTE_PMD_QAT\)=n,\1=y,' build/.config + sed -i 's,\(CONFIG_RTE_LIBRTE_PMD_QAT_SYM\)=n,\1=y,' build/.config make -To use the DPDK QAT PMD an SRIOV-enabled QAT kernel driver is required. The VF -devices exposed by this driver will be used by the QAT PMD. The devices and -available kernel drivers and device ids are : + +.. _qat_kernel_installation: + +Dependency on the QAT kernel driver +----------------------------------- + +To use the QAT PMD an SRIOV-enabled QAT kernel driver is required. The VF +devices created and initialised by this driver will be used by the QAT PMD. + +Instructions for installation are below, but first an explanation of the +relationships between the PF/VF devices and the PMDs visible to +DPDK applications. + + +Acceleration services - cryptography and compression - are provided to DPDK +applications via PMDs which register to implement the corresponding +cryptodev and compressdev APIs. + +Each QuickAssist VF device can expose one cryptodev PMD and/or one compressdev PMD. +These QAT PMDs share the same underlying device and pci-mgmt code, but are +enumerated independently on their respective APIs and appear as independent +devices to applications. + +.. Note:: + + Each VF can only be used by one DPDK process. It is not possible to share + the same VF across multiple processes, even if these processes are using + different acceleration services. + + Conversely one DPDK process can use one or more QAT VFs and can expose both + cryptodev and compressdev instances on each of those VFs. + + + +Device and driver naming +------------------------ + +* The qat cryptodev driver name is "crypto_qat". + The rte_cryptodev_devices_get() returns the devices exposed by this driver. + +* Each qat crypto device has a unique name, in format + <pci bdf>_<service>, e.g. "0000:41:01.0_qat_sym". + This name can be passed to rte_cryptodev_get_dev_id() to get the device_id. + +.. Note:: + + The qat crypto driver name is passed to the dpdk-test-crypto-perf tool in the -devtype parameter. + + The qat crypto device name is in the format of the slave parameter passed to the crypto scheduler. + +* The qat compressdev driver name is "comp_qat". + The rte_compressdev_devices_get() returns the devices exposed by this driver. + +* Each qat compression device has a unique name, in format + <pci bdf>_<service>, e.g. "0000:41:01.0_qat_comp". + This name can be passed to rte_compressdev_get_dev_id() to get the device_id. + + +Available kernel drivers +------------------------ + +Kernel drivers for each device are listed in the following table. Scroll right +to check that the driver and device supports the servic you require. + .. _table_qat_pmds_drivers: .. table:: QAT device generations, devices and drivers - +-----+----------+--------+---------------+------------+--------+------+--------+--------+ - | Gen | Device | Driver | Kernel Module | Pci Driver | PF Did | #PFs | Vf Did | VFs/PF | - +=====+==========+========+===============+============+========+======+========+========+ - | 1 | DH895xCC | 01.org | icp_qa_al | n/a | 435 | 1 | 443 | 32 | - +-----+----------+--------+---------------+------------+--------+------+--------+--------+ - | 1 | DH895xCC | 4.4+ | qat_dh895xcc | dh895xcc | 435 | 1 | 443 | 32 | - +-----+----------+--------+---------------+------------+--------+------+--------+--------+ - | 2 | C62x | 4.5+ | qat_c62x | c6xx | 37c8 | 3 | 37c9 | 16 | - +-----+----------+--------+---------------+------------+--------+------+--------+--------+ - | 2 | C3xxx | 4.5+ | qat_c3xxx | c3xxx | 19e2 | 1 | 19e3 | 16 | - +-----+----------+--------+---------------+------------+--------+------+--------+--------+ - | 2 | D15xx | p | qat_d15xx | d15xx | 6f54 | 1 | 6f55 | 16 | - +-----+----------+--------+---------------+------------+--------+------+--------+--------+ + +-----+----------+---------------+---------------+------------+--------+------+--------+--------+-----------+-------------+ + | Gen | Device | Driver/ver | Kernel Module | Pci Driver | PF Did | #PFs | VF Did | VFs/PF | cryptodev | compressdev | + +=====+==========+===============+===============+============+========+======+========+========+===========+=============+ + | 1 | DH895xCC | linux/4.4+ | qat_dh895xcc | dh895xcc | 435 | 1 | 443 | 32 | Yes | No | + +-----+----------+---------------+---------------+------------+--------+------+--------+--------+-----------+-------------+ + | " | " | 01.org/4.2.0+ | " | " | " | " | " | " | Yes | No | + +-----+----------+---------------+---------------+------------+--------+------+--------+--------+-----------+-------------+ + | 2 | C62x | linux/4.5+ | qat_c62x | c6xx | 37c8 | 3 | 37c9 | 16 | Yes | No | + +-----+----------+---------------+---------------+------------+--------+------+--------+--------+-----------+-------------+ + | " | " | 01.org/4.2.0+ | " | " | " | " | " | " | Yes | Yes | + +-----+----------+---------------+---------------+------------+--------+------+--------+--------+-----------+-------------+ + | 2 | C3xxx | linux/4.5+ | qat_c3xxx | c3xxx | 19e2 | 1 | 19e3 | 16 | Yes | No | + +-----+----------+---------------+---------------+------------+--------+------+--------+--------+-----------+-------------+ + | " | " | 01.org/4.2.0+ | " | " | " | " | " | " | Yes | Yes | + +-----+----------+---------------+---------------+------------+--------+------+--------+--------+-----------+-------------+ + | 2 | D15xx | p | qat_d15xx | d15xx | 6f54 | 1 | 6f55 | 16 | Yes | No | + +-----+----------+---------------+---------------+------------+--------+------+--------+--------+-----------+-------------+ The ``Driver`` column indicates either the Linux kernel version in which @@ -196,9 +282,9 @@ Consult the *Getting Started Guide* at the same URL for further information. The steps below assume you are: -* Building on a platform with one ``DH895xCC`` device. -* Using package ``qatmux.l.2.3.0-34.tgz``. -* On Fedora21 kernel ``3.17.4-301.fc21.x86_64``. +* Building on a platform with one ``C62x`` device. +* Using package ``qat1.7.l.4.2.0-000xx.tar.gz``. +* On Fedora26 kernel ``4.11.11-300.fc26.x86_64``. In the BIOS ensure that SRIOV is enabled and VT-d is disabled. @@ -206,21 +292,30 @@ Uninstall any existing QAT driver, for example by running: * ``./installer.sh uninstall`` in the directory where originally installed. -* or ``rmmod qat_dh895xcc; rmmod intel_qat``. Build and install the SRIOV-enabled QAT driver:: mkdir /QAT cd /QAT - # Copy qatmux.l.2.3.0-34.tgz to this location - tar zxof qatmux.l.2.3.0-34.tgz + # Copy the package to this location and unpack + tar zxof qat1.7.l.4.2.0-000xx.tar.gz + + ./configure --enable-icp-sriov=host + make install + +You can use ``cat /sys/kernel/debug/qat<your device type and bdf>/version/fw`` to confirm the driver is correctly installed and is using firmware version 4.2.0. +You can use ``lspci -d:37c9`` to confirm the presence of the 16 VF devices available per ``C62x`` PF. + +Confirm the driver is correctly installed and is using firmware version 4.2.0:: + + cat /sys/kernel/debug/qat<your device type and bdf>/version/fw - export ICP_WITHOUT_IOMMU=1 - ./installer.sh install QAT1.6 host -You can use ``cat /proc/icp_dh895xcc_dev0/version`` to confirm the driver is correctly installed. -You can use ``lspci -d:443`` to confirm the of the 32 VF devices available per ``DH895xCC`` device. +Confirm the presence of 48 VF devices - 16 per PF:: + + lspci -d:37c9 + To complete the installation - follow instructions in `Binding the available VFs to the DPDK UIO driver`_. @@ -261,6 +356,7 @@ To complete the installation - follow instructions in `Binding the available VFs sudo yum install zlib-devel sudo yum install openssl-devel + sudo yum install libudev-devel .. Note:: @@ -343,19 +439,28 @@ Another way to bind the VFs to the DPDK UIO driver is by using the ./usertools/dpdk-devbind.py -b igb_uio 0000:03:01.1 -Extra notes on KASUMI F9 ------------------------- +Debugging +---------------------------------------- -When using KASUMI F9 authentication algorithm, the input buffer must be -constructed according to the 3GPP KASUMI specifications (section 4.4, page 13): -`<http://cryptome.org/3gpp/35201-900.pdf>`_. -Input buffer has to have COUNT (4 bytes), FRESH (4 bytes), MESSAGE and DIRECTION (1 bit) -concatenated. After the DIRECTION bit, a single '1' bit is appended, followed by -between 0 and 7 '0' bits, so that the total length of the buffer is multiple of 8 bits. -Note that the actual message can be any length, specified in bits. +There are 2 sets of trace available via the dynamic logging feature: -Once this buffer is passed this way, when creating the crypto operation, -length of data to authenticate (op.sym.auth.data.length) must be the length -of all the items described above, including the padding at the end. -Also, offset of data to authenticate (op.sym.auth.data.offset) -must be such that points at the start of the COUNT bytes. +* pmd.qat_dp exposes trace on the data-path. +* pmd.qat_general exposes all other trace. + +pmd.qat exposes both sets of traces. +They can be enabled using the log-level option (where 8=maximum log level) on +the process cmdline, e.g. using any of the following:: + + --log-level="pmd.qat_general,8" + --log-level="pmd.qat_dp,8" + --log-level="pmd.qat,8" + +.. Note:: + + The global RTE_LOG_DP_LEVEL overrides data-path trace so must be set to + RTE_LOG_DEBUG to see all the trace. This variable is in config/rte_config.h + for meson build and config/common_base for gnu make. + Also the dynamic global log level overrides both sets of trace, so e.g. no + QAT trace would display in this case:: + + --log-level="7" --log-level="pmd.qat_general,8" diff --git a/doc/guides/cryptodevs/scheduler.rst b/doc/guides/cryptodevs/scheduler.rst index d67894d5..a754a27e 100644 --- a/doc/guides/cryptodevs/scheduler.rst +++ b/doc/guides/cryptodevs/scheduler.rst @@ -71,6 +71,11 @@ two calls: mode parameter values are specified in the "Cryptodev Scheduler Modes Overview" section. +* mode_param: Specify the mode-specific parameter. Some scheduling modes + may be initialized with specific parameters other than the default ones, + such as the **threshold** packet size of **packet-size-distr** mode. This + parameter fulfills the purpose. + * ordering: Specify the status of the crypto operations ordering feature. The value of this parameter can be "enable" or "disable". This feature is disabled by default. @@ -132,7 +137,12 @@ operation: **option_type** must be **CDEV_SCHED_OPTION_THRESHOLD** and **option** should point to a rte_cryptodev_scheduler_threshold_option structure filled with appropriate threshold value. Please NOTE this threshold has be a power-of-2 - unsigned integer. + unsigned integer. It is possible to use **mode_param** initialization + parameter to achieve the same purpose. For example: + + ... --vdev "crypto_scheduler,mode=packet-size-distr,mode_param=threshold:512" ... + + The above parameter will overwrite the threshold value to 512. * **CDEV_SCHED_MODE_FAILOVER:** diff --git a/doc/guides/cryptodevs/snow3g.rst b/doc/guides/cryptodevs/snow3g.rst index 24b4f661..7cba712c 100644 --- a/doc/guides/cryptodevs/snow3g.rst +++ b/doc/guides/cryptodevs/snow3g.rst @@ -33,11 +33,11 @@ Installation ------------ To build DPDK with the SNOW3G_PMD the user is required to download -the export controlled ``libsso_snow3g`` library, by requesting it from -`<https://networkbuilders.intel.com/network-technologies/dpdk>`_. -Once approval has been granted, the user needs to log in -`<https://networkbuilders.intel.com/dpdklogin>`_ -and click on "Snow3G Bit Stream crypto library" link, to download the library. +the export controlled ``libsso_snow3g`` library, by registering in +`Intel Resource & Design Center <https://www.intel.com/content/www/us/en/design/resource-design-center.html>`_. +Once approval has been granted, the user needs to search for +*Snow3G F8 F9 3GPP cryptographic algorithms Software Library* to download the +library or directly through this `link <https://cdrdv2.intel.com/v1/dl/getContent/575867>`_. After downloading the library, the user needs to unpack and compile it on their system before building DPDK:: diff --git a/doc/guides/cryptodevs/virtio.rst b/doc/guides/cryptodevs/virtio.rst new file mode 100644 index 00000000..f3aa7c65 --- /dev/null +++ b/doc/guides/cryptodevs/virtio.rst @@ -0,0 +1,117 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + +Virtio Crypto Poll Mode Driver +============================== + +The virtio crypto PMD provides poll mode driver support for the virtio crypto +device. + +Features +-------- + +The virtio crypto PMD has support for: + +Cipher algorithms: + +* ``RTE_CRYPTO_CIPHER_AES_CBC`` + +Hash algorithms: + +* ``RTE_CRYPTO_AUTH_SHA1_HMAC`` + +Limitations +----------- + +* Only supports the session-oriented API implementation (session-less APIs are + not supported). +* Only supports modern mode since virtio crypto conforms to virtio-1.0. +* Only has two types of queues: data queue and control queue. These two queues + only support indirect buffers to communication with the virtio backend. +* Only supports AES_CBC cipher only algorithm and AES_CBC with HMAC_SHA1 + chaining algorithm since the vhost crypto backend only these algorithms + are supported. +* Does not support Link State interrupt. +* Does not support runtime configuration. + +Virtio crypto PMD Rx/Tx Callbacks +--------------------------------- + +Rx callbacks: + +* ``virtio_crypto_pkt_rx_burst`` + +Tx callbacks: + +* ``virtio_crypto_pkt_tx_burst`` + +Installation +------------ + +Quick instructions are as follows: + +Firstly run DPDK vhost crypto sample as a server side and build QEMU with +vhost crypto enabled. +QEMU can then be started using the following parameters: + +.. code-block:: console + + qemu-system-x86_64 \ + [...] \ + -chardev socket,id=charcrypto0,path=/path/to/your/socket \ + -object cryptodev-vhost-user,id=cryptodev0,chardev=charcrypto0 \ + -device virtio-crypto-pci,id=crypto0,cryptodev=cryptodev0 + [...] + +Secondly bind the uio_generic driver for the virtio-crypto device. +For example, 0000:00:04.0 is the domain, bus, device and function +number of the virtio-crypto device: + +.. code-block:: console + + modprobe uio_pci_generic + echo -n 0000:00:04.0 > /sys/bus/pci/drivers/virtio-pci/unbind + echo "1af4 1054" > /sys/bus/pci/drivers/uio_pci_generic/new_id + +Finally the front-end virtio crypto PMD driver can be installed: + +.. code-block:: console + + cd to the top-level DPDK directory + sed -i 's,\(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO\)=n,\1=y,' config/common_base + make config T=x86_64-native-linuxapp-gcc + make install T=x86_64-native-linuxapp-gcc + +Tests +----- + +The unit test cases can be tested as below: + +.. code-block:: console + + reserve enough huge pages + cd to the top-level DPDK directory + export RTE_TARGET=x86_64-native-linuxapp-gcc + export RTE_SDK=`pwd` + cd to test/test + type the command "make" to compile + run the tests with "./test" + type the command "cryptodev_virtio_autotest" to test + +The performance can be tested as below: + +.. code-block:: console + + reserve enough huge pages + cd to the top-level DPDK directory + export RTE_TARGET=x86_64-native-linuxapp-gcc + export RTE_SDK=`pwd` + cd to app/test-crypto-perf + type the command "make" to compile + run the tests with the following command: + + ./dpdk-test-crypto-perf -l 0,1 -- --devtype crypto_virtio \ + --ptest throughput --optype cipher-then-auth --cipher-algo aes-cbc \ + --cipher-op encrypt --cipher-key-sz 16 --auth-algo sha1-hmac \ + --auth-op generate --auth-key-sz 64 --digest-sz 12 \ + --total-ops 100000000 --burst-sz 64 --buffer-sz 2048 diff --git a/doc/guides/cryptodevs/zuc.rst b/doc/guides/cryptodevs/zuc.rst index e226ef9d..e3898996 100644 --- a/doc/guides/cryptodevs/zuc.rst +++ b/doc/guides/cryptodevs/zuc.rst @@ -35,11 +35,11 @@ Installation ------------ To build DPDK with the ZUC_PMD the user is required to download -the export controlled ``libsso_zuc`` library, by requesting it from -`<https://networkbuilders.intel.com/network-technologies/dpdk>`_. -Once approval has been granted, the user needs to log in -`<https://networkbuilders.intel.com/dpdklogin>`_ -and click on "ZUC Library" link, to download the library. +the export controlled ``libsso_zuc`` library, by registering in +`Intel Resource & Design Center <https://www.intel.com/content/www/us/en/design/resource-design-center.html>`_. +Once approval has been granted, the user needs to search for +*ZUC 128-EAA3 and 128-EIA3 3GPP cryptographic algorithms Software Library* to download the +library or directly through this `link <https://cdrdv2.intel.com/v1/dl/getContent/575868>`_. After downloading the library, the user needs to unpack and compile it on their system before building DPDK:: diff --git a/doc/guides/eventdevs/dpaa2.rst b/doc/guides/eventdevs/dpaa2.rst index 5b8da95d..ad94f24b 100644 --- a/doc/guides/eventdevs/dpaa2.rst +++ b/doc/guides/eventdevs/dpaa2.rst @@ -129,7 +129,19 @@ Example: .. code-block:: console - ./your_eventdev_application --vdev="event_dpaa2" + ./your_eventdev_application --vdev="event_dpaa2" + +Enabling logs +------------- + +For enabling logs, use the following EAL parameter: + +.. code-block:: console + + ./your_eventdev_application <EAL args> --log-level=pmd.event.dpaa2,<level> + +Using ``eventdev.dpaa2`` as log matching criteria, all Event PMD logs can be +enabled which are lower than logging ``level``. Limitations ----------- diff --git a/doc/guides/eventdevs/octeontx.rst b/doc/guides/eventdevs/octeontx.rst index 4fabe54f..18cfc7a9 100644 --- a/doc/guides/eventdevs/octeontx.rst +++ b/doc/guides/eventdevs/octeontx.rst @@ -28,6 +28,9 @@ Features of the OCTEONTX SSOVF PMD are: - Open system with configurable amount of outstanding events - HW accelerated dequeue timeout support to enable power management - SR-IOV VF +- HW managed event timers support through TIMVF, with high precision and + time granularity of 1us. +- Up to 64 event timer adapters. Supported OCTEONTX SoCs ----------------------- @@ -36,7 +39,7 @@ Supported OCTEONTX SoCs Prerequisites ------------- -See :doc: `../platform/octeontx` for setup information. +See :doc:`../platform/octeontx` for setup information. Pre-Installation Configuration ------------------------------ @@ -93,7 +96,17 @@ The tests are run once the vdev creation is successfully complete. .. code-block:: console - --vdev="event_octeontx,self_test=1" + --vdev="event_octeontx,selftest=1" + + +Enable TIMvf stats +------------------ +TIMvf stats can be enabled by using this option, by default the stats are +disabled. + +.. code-block:: console + + --vdev="event_octeontx,timvf_stats=1" Limitations @@ -110,3 +123,19 @@ Rx adapter support When eth_octeontx is used as Rx adapter event schedule type ``RTE_SCHED_TYPE_PARALLEL`` is not supported. + +Event timer adapter support +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When timvf is used as Event timer adapter the clock source mapping is as +follows: + +.. code-block:: console + + RTE_EVENT_TIMER_ADAPTER_CPU_CLK = TIM_CLK_SRC_SCLK + RTE_EVENT_TIMER_ADAPTER_EXT_CLK0 = TIM_CLK_SRC_GPIO + RTE_EVENT_TIMER_ADAPTER_EXT_CLK1 = TIM_CLK_SRC_GTI + RTE_EVENT_TIMER_ADAPTER_EXT_CLK2 = TIM_CLK_SRC_PTP + +When timvf is used as Event timer adapter event schedule type +``RTE_SCHED_TYPE_PARALLEL`` is not supported. diff --git a/doc/guides/faq/faq.rst b/doc/guides/faq/faq.rst index c1132fb4..f19c1389 100644 --- a/doc/guides/faq/faq.rst +++ b/doc/guides/faq/faq.rst @@ -62,19 +62,16 @@ the wrong socket, the application simply will not start. On application startup, there is a lot of EAL information printed. Is there any way to reduce this? --------------------------------------------------------------------------------------------------- -Yes, the option ``--log-level=`` accepts one of these numbers: - -.. code-block:: c - - #define RTE_LOG_EMERG 1U /* System is unusable. */ - #define RTE_LOG_ALERT 2U /* Action must be taken immediately. */ - #define RTE_LOG_CRIT 3U /* Critical conditions. */ - #define RTE_LOG_ERR 4U /* Error conditions. */ - #define RTE_LOG_WARNING 5U /* Warning conditions. */ - #define RTE_LOG_NOTICE 6U /* Normal but significant condition. */ - #define RTE_LOG_INFO 7U /* Informational. */ - #define RTE_LOG_DEBUG 8U /* Debug-level messages. */ - +Yes, the option ``--log-level=`` accepts either symbolic names (or numbers): + +1. emergency +2. alert +3. critical +4. error +5. warning +6. notice +7. info +8. debug How can I tune my network application to achieve lower latency? --------------------------------------------------------------- diff --git a/doc/guides/freebsd_gsg/build_sample_apps.rst b/doc/guides/freebsd_gsg/build_sample_apps.rst index 90f05023..a085b618 100644 --- a/doc/guides/freebsd_gsg/build_sample_apps.rst +++ b/doc/guides/freebsd_gsg/build_sample_apps.rst @@ -128,6 +128,9 @@ The EAL options for FreeBSD are as follows: * ``--proc-type``: The type of process instance. +* ``-m MB``: + Memory to allocate from hugepages, regardless of processor socket. + Other options, specific to Linux and are not supported under FreeBSD are as follows: * ``socket-mem``: @@ -142,10 +145,6 @@ Other options, specific to Linux and are not supported under FreeBSD are as foll * ``--file-prefix``: The prefix text used for hugepage filenames. -* ``-m MB``: - Memory to allocate from hugepages, regardless of processor socket. - It is recommended that ``--socket-mem`` be used instead of this option. - The ``-c`` or ``-l`` option is mandatory; the others are optional. Copy the DPDK application binary to your target, then run the application diff --git a/doc/guides/howto/rte_flow.rst b/doc/guides/howto/rte_flow.rst index 4a27c995..caa4e1af 100644 --- a/doc/guides/howto/rte_flow.rst +++ b/doc/guides/howto/rte_flow.rst @@ -1,33 +1,5 @@ -.. BSD LICENSE - Copyright(c) 2017 Mellanox Corporation. All rights reserved. - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Mellanox Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - +.. SPDX-License-Identifier: BSD-3-Clause + Copyright 2017 Mellanox Technologies, Ltd Generic flow API - examples =========================== @@ -276,7 +248,7 @@ Code /* end the pattern array */ pattern[2].type = RTE_FLOW_ITEM)TYPE_END; - /* create the drop action */ + /* create the queue action */ actions[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; actions[0].conf = &queue actions[1].type = RTE_FLOW_ACTION_TYPE_END; diff --git a/doc/guides/howto/virtio_user_for_container_networking.rst b/doc/guides/howto/virtio_user_for_container_networking.rst index aa68b531..476ce3a6 100644 --- a/doc/guides/howto/virtio_user_for_container_networking.rst +++ b/doc/guides/howto/virtio_user_for_container_networking.rst @@ -109,7 +109,8 @@ We have below limitations in this solution: * Cannot work with --no-huge option. Currently, DPDK uses anonymous mapping under this option which cannot be reopened to share with vhost backend. * Cannot work when there are more than VHOST_MEMORY_MAX_NREGIONS(8) hugepages. - In another word, do not use 2MB hugepage so far. + If you have more regions (especially when 2MB hugepages are used), the option, + --single-file-segments, can help to reduce the number of shared files. * Applications should not use file name like HUGEFILE_FMT ("%smap_%d"). That will bring confusion when sharing hugepage files with backend by name. * Root privilege is a must. DPDK resolves physical addresses of hugepages diff --git a/doc/guides/index.rst b/doc/guides/index.rst index d60529dd..8a9ed65c 100644 --- a/doc/guides/index.rst +++ b/doc/guides/index.rst @@ -17,7 +17,9 @@ DPDK documentation nics/index bbdevs/index cryptodevs/index + compressdevs/index eventdevs/index + rawdevs/index mempool/index platform/index contributing/index diff --git a/doc/guides/linux_gsg/build_sample_apps.rst b/doc/guides/linux_gsg/build_sample_apps.rst index 39a47b71..332424e0 100644 --- a/doc/guides/linux_gsg/build_sample_apps.rst +++ b/doc/guides/linux_gsg/build_sample_apps.rst @@ -110,7 +110,13 @@ The EAL options are as follows: ``[domain:]bus:devid.func`` values. Cannot be used with ``-b`` option. * ``--socket-mem``: - Memory to allocate from hugepages on specific sockets. + Memory to allocate from hugepages on specific sockets. In dynamic memory mode, + this memory will also be pinned (i.e. not released back to the system until + application closes). + +* ``--socket-limit``: + Limit maximum memory available for allocation on each socket. Does not support + legacy memory mode. * ``-d``: Add a driver or driver directory to be loaded. @@ -148,6 +154,14 @@ The EAL options are as follows: * ``--vfio-intr``: Specify interrupt type to be used by VFIO (has no effect if VFIO is not used). +* ``--legacy-mem``: + Run DPDK in legacy memory mode (disable memory reserve/unreserve at runtime, + but provide more IOVA-contiguous memory). + +* ``--single-file-segments``: + Store memory segments in fewer files (dynamic memory mode only - does not + affect legacy memory mode). + The ``-c`` or ``-l`` and option is mandatory; the others are optional. Copy the DPDK application binary to your target, then run the application as follows diff --git a/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst b/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst new file mode 100644 index 00000000..9d1f0fa0 --- /dev/null +++ b/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst @@ -0,0 +1,132 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2018 ARM Corporation. + +Cross compile DPDK for ARM64 +============================ +This chapter describes how to cross compile DPDK for ARM64 from x86 build hosts. + +.. note:: + + Whilst it is recommended to natively build DPDK on ARM64 (just + like with x86), it is also possible to cross-build DPDK for ARM64. An + ARM64 cross compile GNU toolchain is used for this. + +Obtain the cross tool chain +--------------------------- +The latest cross compile tool chain can be downloaded from: +https://releases.linaro.org/components/toolchain/binaries/latest/aarch64-linux-gnu/. + +Following is the step to get the version 7.2.1, latest one at the time of this writing. + +.. code-block:: console + + wget https://releases.linaro.org/components/toolchain/binaries/latest/aarch64-linux-gnu/gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu.tar.xz + +Unzip and add into the PATH +--------------------------- + +.. code-block:: console + + tar -xvf gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu.tar.xz + export PATH=$PATH:<cross_install_dir>/gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu/bin + +.. note:: + + For the host requirements and other info, refer to the release note section: https://releases.linaro.org/components/toolchain/binaries/latest/ + +Getting the prerequisite library +-------------------------------- + +NUMA is required by most modern machines, not needed for non-NUMA architectures. + +.. note:: + + For compiling the NUMA lib, run libtool --version to ensure the libtool version >= 2.2, + otherwise the compilation will fail with errors. + +.. code-block:: console + + git clone https://github.com/numactl/numactl.git + cd numactl + git checkout v2.0.11 -b v2.0.11 + ./autogen.sh + autoconf -i + ./configure --host=aarch64-linux-gnu CC=aarch64-linux-gnu-gcc --prefix=<numa install dir> + make install + +The numa header files and lib file is generated in the include and lib folder respectively under <numa install dir>. + +.. _augment_the_cross_toolchain_with_numa_support: + +Augment the cross toolchain with NUMA support +--------------------------------------------- + +.. note:: + + This way is optional, an alternative is to use extra CFLAGS and LDFLAGS, depicted in :ref:`configure_and_cross_compile_dpdk_build` below. + +Copy the NUMA header files and lib to the cross compiler's directories: + +.. code-block:: console + + cp <numa_install_dir>/include/numa*.h <cross_install_dir>/gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu/bin/../aarch64-linux-gnu/libc/usr/include/ + cp <numa_install_dir>/lib/libnuma.a <cross_install_dir>/gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu/lib/gcc/aarch64-linux-gnu/7.2.1/ + +.. _configure_and_cross_compile_dpdk_build: + +Configure and cross compile DPDK Build +-------------------------------------- +To configure a build, choose one of the target configurations, like arm64-dpaa2-linuxapp-gcc and arm64-thunderx-linuxapp-gcc. + +.. code-block:: console + + make config T=arm64-armv8a-linuxapp-gcc + +To cross-compile, without compiling the kernel modules, use the following command: + +.. code-block:: console + + make -j CROSS=aarch64-linux-gnu- CONFIG_RTE_KNI_KMOD=n CONFIG_RTE_EAL_IGB_UIO=n + +To cross-compile, including the kernel modules, the kernel source tree needs to be specified by setting +RTE_KERNELDIR: + +.. code-block:: console + + make -j CROSS=aarch64-linux-gnu- RTE_KERNELDIR=<kernel_src_rootdir> CROSS_COMPILE=aarch64-linux-gnu- + +To compile for non-NUMA targets, without compiling the kernel modules, use the following command: + +.. code-block:: console + + make -j CROSS=aarch64-linux-gnu- CONFIG_RTE_KNI_KMOD=n CONFIG_RTE_EAL_IGB_UIO=n CONFIG_RTE_LIBRTE_VHOST_NUMA=n CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n + +.. note:: + + 1. EXTRA_CFLAGS and EXTRA_LDFLAGS should be added to include the NUMA headers and link the library respectively, + if the above step :ref:`augment_the_cross_toolchain_with_numa_support` was skipped therefore the toolchain was not + augmented with NUMA support. + + 2. "-isystem <numa_install_dir>/include" should be add to EXTRA_CFLAGS, otherwise the numa.h file will get a lot of compiling + errors of Werror=cast-qual, Werror=strict-prototypes and Werror=old-style-definition. + + An example is given below: + + .. code-block:: console + + make -j CROSS=aarch64-linux-gnu- CONFIG_RTE_KNI_KMOD=n CONFIG_RTE_EAL_IGB_UIO=n EXTRA_CFLAGS="-isystem <numa_install_dir>/include" EXTRA_LDFLAGS="-L<numa_install_dir>/lib -lnuma" + +Meson Cross Compiling DPDK +-------------------------- + +To cross-compile DPDK on a desired target machine we can use the following +command:: + + meson cross-build --cross-file <target_machine_configuration> + ninja -C cross-build + +For example if the target machine is arm64 we can use the following +command:: + + meson arm64-build --cross-file config/arm/arm64_armv8_linuxapp_gcc + ninja -C arm64-build diff --git a/doc/guides/linux_gsg/index.rst b/doc/guides/linux_gsg/index.rst index 2a7bdfe9..077f9302 100644 --- a/doc/guides/linux_gsg/index.rst +++ b/doc/guides/linux_gsg/index.rst @@ -13,6 +13,7 @@ Getting Started Guide for Linux intro sys_reqs build_dpdk + cross_build_dpdk_for_arm64 linux_drivers build_sample_apps enable_func diff --git a/doc/guides/linux_gsg/linux_drivers.rst b/doc/guides/linux_gsg/linux_drivers.rst index 14381eed..371a817f 100644 --- a/doc/guides/linux_gsg/linux_drivers.rst +++ b/doc/guides/linux_gsg/linux_drivers.rst @@ -1,6 +1,6 @@ .. SPDX-License-Identifier: BSD-3-Clause Copyright(c) 2010-2015 Intel Corporation. - Copyright(c) 2017 Mellanox Corporation. + Copyright 2017 Mellanox Technologies, Ltd All rights reserved. .. _linux_gsg_linux_drivers: diff --git a/doc/guides/nics/axgbe.rst b/doc/guides/nics/axgbe.rst new file mode 100644 index 00000000..e30f4944 --- /dev/null +++ b/doc/guides/nics/axgbe.rst @@ -0,0 +1,89 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved. + +AXGBE Poll Mode Driver +====================== + +The AXGBE poll mode driver library (**librte_pmd_axgbe**) implements support +for AMD 10 Gbps family of adapters. It is compiled and tested in standard linux distro like Ubuntu. + +Detailed information about SoCs that use these devices can be found here: + +- `AMD EPYC™ EMBEDDED 3000 family <https://www.amd.com/en/products/embedded-epyc-3000-series>`_. + + +Supported Features +------------------ + +AXGBE PMD has support for: + +- Base L2 features +- TSS (Transmit Side Scaling) +- Promiscuous mode +- Port statistics +- Multicast mode +- RSS (Receive Side Scaling) +- Checksum offload +- Jumbo Frame upto 9K + + +Configuration Information +------------------------- + +The following options can be modified in the ``.config`` file. Please note that +enabling debugging options may affect system performance. + +- ``CONFIG_RTE_LIBRTE_AXGBE_PMD`` (default **y**) + + Toggle compilation of axgbe PMD. + +- ``CONFIG_RTE_LIBRTE_AXGBE_PMD_DEBUG`` (default **n**) + + Toggle display for PMD debug related messages. + + +Building DPDK +------------- + +See the :ref:`DPDK Getting Started Guide for Linux <linux_gsg>` for +instructions on how to build DPDK. + +By default the AXGBE PMD library will be built into the DPDK library. + +For configuring and using UIO frameworks, please also refer :ref:`the +documentation that comes with DPDK suite <linux_gsg>`. + + +Prerequisites and Pre-conditions +-------------------------------- +- Prepare the system as recommended by DPDK suite. + +- Bind the intended AMD device to ``igb_uio`` or ``vfio-pci`` module. + +Now system is ready to run DPDK application. + + +Usage Example +------------- + +Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>` +for details. + +Example output: + +.. code-block:: console + + [...] + EAL: PCI device 0000:02:00.4 on NUMA socket 0 + EAL: probe driver: 1022:1458 net_axgbe + Interactive-mode selected + USER1: create a new mbuf pool <mbuf_pool_socket_0>: n=171456, size=2176, socket=0 + USER1: create a new mbuf pool <mbuf_pool_socket_1>: n=171456, size=2176, socket=1 + USER1: create a new mbuf pool <mbuf_pool_socket_2>: n=171456, size=2176, socket=2 + USER1: create a new mbuf pool <mbuf_pool_socket_3>: n=171456, size=2176, socket=3 + Configuring Port 0 (socket 0) + Port 0: 00:00:1A:1C:6A:17 + Checking link statuses... + Port 0 Link Up - speed 10000 Mbps - full-duplex + Done + testpmd> diff --git a/doc/guides/nics/bnx2x.rst b/doc/guides/nics/bnx2x.rst index 31f146a0..cecbfc2e 100644 --- a/doc/guides/nics/bnx2x.rst +++ b/doc/guides/nics/bnx2x.rst @@ -194,6 +194,7 @@ This section provides instructions to configure SR-IOV with Linux OS. using the instructions outlined in the Application notes below. #. Running testpmd: + (Supply ``--log-level="pmd.net.bnx2x.driver",7`` to view informational messages): Follow instructions available in the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>` diff --git a/doc/guides/nics/bnxt.rst b/doc/guides/nics/bnxt.rst index 9826b350..697b97e6 100644 --- a/doc/guides/nics/bnxt.rst +++ b/doc/guides/nics/bnxt.rst @@ -1,46 +1,20 @@ -.. BSD LICENSE - Copyright 2016 Broadcom Limited - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Broadcom Limited nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +.. SPDX-License-Identifier: BSD-3-Clause + Copyright 2016-2018 Broadcom BNXT Poll Mode Driver ===================== The bnxt poll mode library (**librte_pmd_bnxt**) implements support for: - * **Broadcom NetXtreme-C®/NetXtreme-E® BCM5730X and BCM574XX family of - Ethernet Network Controllers** + * **Broadcom NetXtreme-C®/NetXtreme-E®/NetXtreme-S® + BCM5730X / BCM574XX / BCM58000 family of Ethernet Network Controllers** These adapters support Standards compliant 10/25/50/100Gbps 30MPPS full-duplex throughput. Information about the NetXtreme family of adapters can be found in the `NetXtreme® Brand section - <https://www.broadcom.com/products/ethernet-communication-and-switching?technology%5B%5D=88>`_ + <https://www.broadcom.com/products/ethernet-connectivity/controllers/>`_ of the `Broadcom website <http://www.broadcom.com/>`_. * **Broadcom StrataGX® BCM5871X Series of Communucations Processors** diff --git a/doc/guides/nics/cxgbe.rst b/doc/guides/nics/cxgbe.rst index 8651a7be..58d88eef 100644 --- a/doc/guides/nics/cxgbe.rst +++ b/doc/guides/nics/cxgbe.rst @@ -1,32 +1,6 @@ -.. BSD LICENSE - Copyright 2015-2017 Chelsio Communications. - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Chelsio Communications nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2014-2018 Chelsio Communications. + All rights reserved. CXGBE Poll Mode Driver ====================== @@ -35,22 +9,28 @@ The CXGBE PMD (**librte_pmd_cxgbe**) provides poll mode driver support for **Chelsio Terminator** 10/25/40/100 Gbps family of adapters. CXGBE PMD has support for the latest Linux and FreeBSD operating systems. +CXGBEVF PMD provides poll mode driver support for SR-IOV Virtual functions +and has support for the latest Linux operating systems. + More information can be found at `Chelsio Communications Official Website <http://www.chelsio.com>`_. Features -------- -CXGBE PMD has support for: +CXGBE and CXGBEVF PMD has support for: - Multiple queues for TX and RX - Receiver Side Steering (RSS) + Receiver Side Steering (RSS) on IPv4, IPv6, IPv4-TCP/UDP, IPv6-TCP/UDP. + For 4-tuple, enabling 'RSS on TCP' and 'RSS on TCP + UDP' is supported. - VLAN filtering - Checksum offload - Promiscuous mode - All multicast mode - Port hardware statistics - Jumbo frames +- Flow API - Support for both Wildcard (LE-TCAM) and Exact (HASH) match filters. Limitations ----------- @@ -63,6 +43,8 @@ port. For this reason, one cannot whitelist/blacklist a single port without whitelisting/blacklisting the other ports on the same device. +.. _t5-nics: + Supported Chelsio T5 NICs ------------------------- @@ -71,16 +53,24 @@ Supported Chelsio T5 NICs - 40G NICs: T580-CR, T580-LP-CR, T580-SO-CR - Other T5 NICs: T522-CR +.. _t6-nics: + Supported Chelsio T6 NICs ------------------------- - 25G NICs: T6425-CR, T6225-CR, T6225-LL-CR, T6225-SO-CR - 100G NICs: T62100-CR, T62100-LP-CR, T62100-SO-CR +Supported SR-IOV Chelsio NICs +----------------------------- + +SR-IOV virtual functions are supported on all the Chelsio NICs listed +in :ref:`t5-nics` and :ref:`t6-nics`. + Prerequisites ------------- -- Requires firmware version **1.16.43.0** and higher. Visit +- Requires firmware version **1.17.14.0** and higher. Visit `Chelsio Download Center <http://service.chelsio.com>`_ to get latest firmware bundled with the latest Chelsio Unified Wire package. @@ -110,6 +100,10 @@ enabling debugging options may affect system performance. Toggle compilation of librte_pmd_cxgbe driver. + .. note:: + + This controls compilation of both CXGBE and CXGBEVF PMD. + - ``CONFIG_RTE_LIBRTE_CXGBE_DEBUG`` (default **n**) Toggle display of generic debugging messages. @@ -134,6 +128,28 @@ enabling debugging options may affect system performance. Toggle behaviour to prefer Throughput or Latency. +Runtime Options +~~~~~~~~~~~~~~~ + +The following ``devargs`` options can be enabled at runtime. They must +be passed as part of EAL arguments. For example, + +.. code-block:: console + + testpmd -w 02:00.4,keep_ovlan=1 -- -i + +- ``keep_ovlan`` (default **0**) + + Toggle behaviour to keep/strip outer VLAN in Q-in-Q packets. If + enabled, the outer VLAN tag is preserved in Q-in-Q packets. Otherwise, + the outer VLAN tag is stripped in Q-in-Q packets. + +- ``force_link_up`` (default **0**) + + When set to 1, CXGBEVF PMD always forces link as up for all VFs on + underlying Chelsio NICs. This enables multiple VFs on the same NIC + to send traffic to each other even when the physical link is down. + .. _driver-compilation: Driver compilation and testing @@ -208,7 +224,7 @@ Unified Wire package for Linux operating system are as follows: .. code-block:: console - firmware-version: 1.16.43.0, TP 0.1.4.9 + firmware-version: 1.17.14.0, TP 0.1.4.9 Running testpmd ~~~~~~~~~~~~~~~ @@ -266,7 +282,7 @@ devices managed by librte_pmd_cxgbe in Linux operating system. EAL: PCI memory mapped at 0x7fd7c0200000 EAL: PCI memory mapped at 0x7fd77cdfd000 EAL: PCI memory mapped at 0x7fd7c10b7000 - PMD: rte_cxgbe_pmd: fw: 1.16.43.0, TP: 0.1.4.9 + PMD: rte_cxgbe_pmd: fw: 1.17.14.0, TP: 0.1.4.9 PMD: rte_cxgbe_pmd: Coming up as MASTER: Initializing adapter Interactive-mode selected Configuring Port 0 (socket 0) @@ -286,6 +302,114 @@ devices managed by librte_pmd_cxgbe in Linux operating system. Flow control pause TX/RX is disabled by default and can be enabled via testpmd. Refer section :ref:`flow-control` for more details. +Configuring SR-IOV Virtual Functions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This section demonstrates how to enable SR-IOV virtual functions +on Chelsio NICs and demonstrates how to run testpmd with SR-IOV +virtual functions. + +#. Load the kernel module: + + .. code-block:: console + + modprobe cxgb4 + +#. Get the PCI bus addresses of the interfaces bound to cxgb4 driver: + + .. code-block:: console + + dmesg | tail -2 + + Example output: + + .. code-block:: console + + cxgb4 0000:02:00.4 p1p1: renamed from eth0 + cxgb4 0000:02:00.4 p1p2: renamed from eth1 + + .. note:: + + Both the interfaces of a Chelsio 2-port adapter are bound to the + same PCI bus address. + +#. Use ifconfig to get the interface name assigned to Chelsio card: + + .. code-block:: console + + ifconfig -a | grep "00:07:43" + + Example output: + + .. code-block:: console + + p1p1 Link encap:Ethernet HWaddr 00:07:43:2D:EA:C0 + p1p2 Link encap:Ethernet HWaddr 00:07:43:2D:EA:C8 + +#. Bring up the interfaces: + + .. code-block:: console + + ifconfig p1p1 up + ifconfig p1p2 up + +#. Instantiate SR-IOV Virtual Functions. PF0..3 can be used for + SR-IOV VFs. Multiple VFs can be instantiated on each of PF0..3. + To instantiate one SR-IOV VF on each PF0 and PF1: + + .. code-block:: console + + echo 1 > /sys/bus/pci/devices/0000\:02\:00.0/sriov_numvfs + echo 1 > /sys/bus/pci/devices/0000\:02\:00.1/sriov_numvfs + +#. Get the PCI bus addresses of the virtual functions: + + .. code-block:: console + + lspci | grep -i "Chelsio" | grep -i "VF" + + Example output: + + .. code-block:: console + + 02:01.0 Ethernet controller: Chelsio Communications Inc T540-CR Unified Wire Ethernet Controller [VF] + 02:01.1 Ethernet controller: Chelsio Communications Inc T540-CR Unified Wire Ethernet Controller [VF] + +#. Running testpmd + + Follow instructions available in the document + :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>` + to bind virtual functions and run testpmd. + + Example output: + + .. code-block:: console + + [...] + EAL: PCI device 0000:02:01.0 on NUMA socket 0 + EAL: probe driver: 1425:5803 net_cxgbevf + PMD: rte_cxgbe_pmd: Firmware version: 1.17.14.0 + PMD: rte_cxgbe_pmd: TP Microcode version: 0.1.4.9 + PMD: rte_cxgbe_pmd: Chelsio rev 0 + PMD: rte_cxgbe_pmd: No bootstrap loaded + PMD: rte_cxgbe_pmd: No Expansion ROM loaded + PMD: rte_cxgbe_pmd: 0000:02:01.0 Chelsio rev 0 1G/10GBASE-SFP + EAL: PCI device 0000:02:01.1 on NUMA socket 0 + EAL: probe driver: 1425:5803 net_cxgbevf + PMD: rte_cxgbe_pmd: Firmware version: 1.17.14.0 + PMD: rte_cxgbe_pmd: TP Microcode version: 0.1.4.9 + PMD: rte_cxgbe_pmd: Chelsio rev 0 + PMD: rte_cxgbe_pmd: No bootstrap loaded + PMD: rte_cxgbe_pmd: No Expansion ROM loaded + PMD: rte_cxgbe_pmd: 0000:02:01.1 Chelsio rev 0 1G/10GBASE-SFP + Configuring Port 0 (socket 0) + Port 0: 06:44:29:44:40:00 + Configuring Port 1 (socket 0) + Port 1: 06:44:29:44:40:10 + Checking link statuses... + Done + testpmd> + FreeBSD ------- @@ -350,7 +474,7 @@ Unified Wire package for FreeBSD operating system are as follows: .. code-block:: console - dev.t5nex.0.firmware_version: 1.16.43.0 + dev.t5nex.0.firmware_version: 1.17.14.0 Running testpmd ~~~~~~~~~~~~~~~ @@ -468,7 +592,7 @@ devices managed by librte_pmd_cxgbe in FreeBSD operating system. EAL: PCI memory mapped at 0x8007ec000 EAL: PCI memory mapped at 0x842800000 EAL: PCI memory mapped at 0x80086c000 - PMD: rte_cxgbe_pmd: fw: 1.16.43.0, TP: 0.1.4.9 + PMD: rte_cxgbe_pmd: fw: 1.17.14.0, TP: 0.1.4.9 PMD: rte_cxgbe_pmd: Coming up as MASTER: Initializing adapter Interactive-mode selected Configuring Port 0 (socket 0) diff --git a/doc/guides/nics/dpaa.rst b/doc/guides/nics/dpaa.rst index 0a13996c..620c045d 100644 --- a/doc/guides/nics/dpaa.rst +++ b/doc/guides/nics/dpaa.rst @@ -162,6 +162,16 @@ Manager. this pool. +Whitelisting & Blacklisting +--------------------------- + +For blacklisting a DPAA device, following commands can be used. + + .. code-block:: console + + <dpdk app> <EAL args> -b "dpaa_bus:fmX-macY" -- ... + e.g. "dpaa_bus:fm1-mac4" + Supported DPAA SoCs ------------------- diff --git a/doc/guides/nics/dpaa2.rst b/doc/guides/nics/dpaa2.rst index 9c66edd4..66c03e10 100644 --- a/doc/guides/nics/dpaa2.rst +++ b/doc/guides/nics/dpaa2.rst @@ -494,28 +494,12 @@ Please note that enabling debugging options may affect system performance. - ``CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER`` (default ``n``) - Toggle display of generic debugging messages + Toggle display of debugging messages/logic - ``CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA`` (default ``y``) Toggle to use physical address vs virtual address for hardware accelerators. -- ``CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT`` (default ``n``) - - Toggle display of initialization related messages. - -- ``CONFIG_RTE_LIBRTE_DPAA2_DEBUG_RX`` (default ``n``) - - Toggle display of receive fast path run-time message - -- ``CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX`` (default ``n``) - - Toggle display of transmit fast path run-time message - -- ``CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX_FREE`` (default ``n``) - - Toggle display of transmit fast path buffer free run-time message - Driver compilation and testing ------------------------------ @@ -532,8 +516,7 @@ for details. .. code-block:: console - ./arm64-dpaa2-linuxapp-gcc/testpmd -c 0xff -n 1 \ - -- -i --portmask=0x3 --nb-cores=1 --no-flush-rx + ./testpmd -c 0xff -n 1 -- -i --portmask=0x3 --nb-cores=1 --no-flush-rx ..... EAL: Registered [pci] bus. @@ -557,6 +540,38 @@ for details. Done testpmd> +Enabling logs +------------- + +For enabling logging for DPAA2 PMD, following log-level prefix can be used: + + .. code-block:: console + + <dpdk app> <EAL args> --log-level=bus.fslmc:<level> -- ... + +Using ``bus.fslmc`` as log matching criteria, all FSLMC bus logs can be enabled +which are lower than logging ``level``. + + Or + + .. code-block:: console + + <dpdk app> <EAL args> --log-level=pmd.net.dpaa2:<level> -- ... + +Using ``pmd.dpaa2`` as log matching criteria, all PMD logs can be enabled +which are lower than logging ``level``. + +Whitelisting & Blacklisting +--------------------------- + +For blacklisting a DPAA2 device, following commands can be used. + + .. code-block:: console + + <dpdk app> <EAL args> -b "fslmc:dpni.x" -- ... + +Where x is the device object id as configured in resource container. + Limitations ----------- diff --git a/doc/guides/nics/enic.rst b/doc/guides/nics/enic.rst index 4dffce1a..438a83d5 100644 --- a/doc/guides/nics/enic.rst +++ b/doc/guides/nics/enic.rst @@ -1,32 +1,7 @@ -.. BSD LICENSE +.. SPDX-License-Identifier: BSD-3-Clause Copyright (c) 2017, Cisco Systems, Inc. All rights reserved. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - POSSIBILITY OF SUCH DAMAGE. - ENIC Poll Mode Driver ===================== @@ -114,11 +89,24 @@ Configuration information - **Interrupts** - Only one interrupt per vNIC interface should be configured in the UCS + At least one interrupt per vNIC interface should be configured in the UCS manager regardless of the number receive/transmit queues. The ENIC PMD uses this interrupt to get information about link status and errors in the fast path. + In addition to the interrupt for link status and errors, when using Rx queue + interrupts, increase the number of configured interrupts so that there is at + least one interrupt for each Rx queue. For example, if the app uses 3 Rx + queues and wants to use per-queue interrupts, configure 4 (3 + 1) interrupts. + + - **Receive Side Scaling** + + In order to fully utilize RSS in DPDK, enable all RSS related settings in + CIMC or UCSM. These include the following items listed under + Receive Side Scaling: + TCP, IPv4, TCP-IPv4, IPv6, TCP-IPv6, IPv6 Extension, TCP-IPv6 Extension. + + .. _enic-flow-director: Flow director support @@ -140,20 +128,21 @@ perfect filtering of the 5-tuple with no masking of fields supported. SR-IOV mode utilization ----------------------- -UCS blade servers configured with dynamic vNIC connection policies in UCS -manager are capable of supporting assigned devices on virtual machines (VMs) -through a KVM hypervisor. Assigned devices, also known as 'passthrough' -devices, are SR-IOV virtual functions (VFs) on the host which are exposed -to VM instances. +UCS blade servers configured with dynamic vNIC connection policies in UCSM +are capable of supporting SR-IOV. SR-IOV virtual functions (VFs) are +specialized vNICs, distinct from regular Ethernet vNICs. These VFs can be +directly assigned to virtual machines (VMs) as 'passthrough' devices. -The Cisco Virtual Machine Fabric Extender (VM-FEX) gives the VM a dedicated +In UCS, SR-IOV VFs require the use of the Cisco Virtual Machine Fabric Extender +(VM-FEX), which gives the VM a dedicated interface on the Fabric Interconnect (FI). Layer 2 switching is done at the FI. This may eliminate the requirement for software switching on the host to route intra-host VM traffic. Please refer to `Creating a Dynamic vNIC Connection Policy <http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/sw/vm_fex/vmware/gui/config_guide/b_GUI_VMware_VM-FEX_UCSM_Configuration_Guide/b_GUI_VMware_VM-FEX_UCSM_Configuration_Guide_chapter_010.html#task_433E01651F69464783A68E66DA8A47A5>`_ -for information on configuring SR-IOV adapter policies using UCS manager. +for information on configuring SR-IOV adapter policies and port profiles +using UCSM. Once the policies are in place and the host OS is rebooted, VFs should be visible on the host, E.g.: @@ -170,30 +159,37 @@ visible on the host, E.g.: 0d:00.6 Ethernet controller: Cisco Systems Inc VIC SR-IOV VF (rev a2) 0d:00.7 Ethernet controller: Cisco Systems Inc VIC SR-IOV VF (rev a2) -Enable Intel IOMMU on the host and install KVM and libvirt. A VM instance should -be created with an assigned device. When using libvirt, this configuration can -be done within the domain (i.e. VM) config file. For example this entry maps -host VF 0d:00:01 into the VM. +Enable Intel IOMMU on the host and install KVM and libvirt, and reboot again as +required. Then, using libvirt, create a VM instance with an assigned device. +Below is an example ``interface`` block (part of the domain configuration XML) +that adds the host VF 0d:00:01 to the VM. ``profileid='pp-vlan-25'`` indicates +the port profile that has been configured in UCSM. .. code-block:: console <interface type='hostdev' managed='yes'> <mac address='52:54:00:ac:ff:b6'/> + <driver name='vfio'/> <source> <address type='pci' domain='0x0000' bus='0x0d' slot='0x00' function='0x1'/> </source> + <virtualport type='802.1Qbh'> + <parameters profileid='pp-vlan-25'/> + </virtualport> + </interface> + Alternatively, the configuration can be done in a separate file using the ``network`` keyword. These methods are described in the libvirt documentation for `Network XML format <https://libvirt.org/formatnetwork.html>`_. -When the VM instance is started, the ENIC KVM driver will bind the host VF to +When the VM instance is started, libvirt will bind the host VF to vfio, complete provisioning on the FI and bring up the link. .. note:: It is not possible to use a VF directly from the host because it is not - fully provisioned until the hypervisor brings up the VM that it is assigned + fully provisioned until libvirt brings up the VM that it is assigned to. In the VM instance, the VF will now be visible. E.g., here the VF 00:04.0 is @@ -207,9 +203,27 @@ seen on the VM instance and should be available for binding to a DPDK. Follow the normal DPDK install procedure, binding the VF to either ``igb_uio`` or ``vfio`` in non-IOMMU mode. +In the VM, the kernel enic driver may be automatically bound to the VF during +boot. Unbinding it currently hangs due to a known issue with the driver. To +work around the issue, blacklist the enic module as follows. Please see :ref:`Limitations <enic_limitations>` for limitations in the use of SR-IOV. +.. code-block:: console + + # cat /etc/modprobe.d/enic.conf + blacklist enic + + # dracut --force + +.. note:: + + Passthrough does not require SR-IOV. If VM-FEX is not desired, the user + may create as many regular vNICs as necessary and assign them to VMs as + passthrough devices. Since these vNICs are not SR-IOV VFs, using them as + passthrough devices do not require libvirt, port profiles, and VM-FEX. + + .. _enic-genic-flow-api: Generic Flow API support @@ -227,7 +241,7 @@ Generic Flow API is supported. The baseline support is: - Actions: queue and void - Selectors: 'is' -- **1300 series VICS with advanced filters disabled** +- **1300 and later series VICS with advanced filters disabled** With advanced filters disabled, an IPv4 or IPv6 item must be specified in the pattern. @@ -238,17 +252,99 @@ Generic Flow API is supported. The baseline support is: - Selectors: 'is', 'spec' and 'mask'. 'last' is not supported - In total, up to 64 bytes of mask is allowed across all headers -- **1300 series VICS with advanced filters enabled** +- **1300 and later series VICS with advanced filters enabled** - Attributes: ingress - Items: eth, ipv4, ipv6, udp, tcp, vxlan, inner eth, ipv4, ipv6, udp, tcp - - Actions: queue, mark, flag and void + - Actions: queue, mark, drop, flag and void - Selectors: 'is', 'spec' and 'mask'. 'last' is not supported - In total, up to 64 bytes of mask is allowed across all headers More features may be added in future firmware and new versions of the VIC. Please refer to the release notes. +.. _overlay_offload: + +Overlay Offload +--------------- + +Recent hardware models support overlay offload. When enabled, the NIC performs +the following operations for VXLAN, NVGRE, and GENEVE packets. In all cases, +inner and outer packets can be IPv4 or IPv6. + +- TSO for VXLAN and GENEVE packets. + + Hardware supports NVGRE TSO, but DPDK currently has no NVGRE offload flags. + +- Tx checksum offloads. + + The NIC fills in IPv4/UDP/TCP checksums for both inner and outer packets. + +- Rx checksum offloads. + + The NIC validates IPv4/UDP/TCP checksums of both inner and outer packets. + Good checksum flags (e.g. ``PKT_RX_L4_CKSUM_GOOD``) indicate that the inner + packet has the correct checksum, and if applicable, the outer packet also + has the correct checksum. Bad checksum flags (e.g. ``PKT_RX_L4_CKSUM_BAD``) + indicate that the inner and/or outer packets have invalid checksum values. + +- Inner Rx packet type classification + + PMD sets inner L3/L4 packet types (e.g. ``RTE_PTYPE_INNER_L4_TCP``), and + ``RTE_PTYPE_TUNNEL_GRENAT`` to indicate that the packet is tunneled. + PMD does not set L3/L4 packet types for outer packets. + +- Inner RSS + + RSS hash calculation, therefore queue selection, is done on inner packets. + +In order to enable overlay offload, the 'Enable VXLAN' box should be checked +via CIMC or UCSM followed by a reboot of the server. When PMD successfully +enables overlay offload, it prints the following message on the console. + +.. code-block:: console + + Overlay offload is enabled + +By default, PMD enables overlay offload if hardware supports it. To disable +it, set ``devargs`` parameter ``disable-overlay=1``. For example:: + + -w 12:00.0,disable-overlay=1 + +By default, the NIC uses 4789 as the VXLAN port. The user may change +it through ``rte_eth_dev_udp_tunnel_port_{add,delete}``. However, as +the current NIC has a single VXLAN port number, the user cannot +configure multiple port numbers. + +Ingress VLAN Rewrite +-------------------- + +VIC adapters can tag, untag, or modify the VLAN headers of ingress +packets. The ingress VLAN rewrite mode controls this behavior. By +default, it is set to pass-through, where the NIC does not modify the +VLAN header in any way so that the application can see the original +header. This mode is sufficient for many applications, but may not be +suitable for others. Such applications may change the mode by setting +``devargs`` parameter ``ig-vlan-rewrite`` to one of the following. + +- ``pass``: Pass-through mode. The NIC does not modify the VLAN + header. This is the default mode. + +- ``priority``: Priority-tag default VLAN mode. If the ingress packet + is tagged with the default VLAN, the NIC replaces its VLAN header + with the priority tag (VLAN ID 0). + +- ``trunk``: Default trunk mode. The NIC tags untagged ingress packets + with the default VLAN. Tagged ingress packets are not modified. To + the application, every packet appears as tagged. + +- ``untag``: Untag default VLAN mode. If the ingress packet is tagged + with the default VLAN, the NIC removes or untags its VLAN header so + that the application sees an untagged packet. As a result, the + default VLAN becomes `untagged`. This mode can be useful for + applications such as OVS-DPDK performance benchmarks that utilize + only the default VLAN and want to see only untagged packets. + .. _enic_limitations: Limitations @@ -264,9 +360,10 @@ Limitations In test setups where an Ethernet port of a Cisco adapter in TRUNK mode is connected point-to-point to another adapter port or connected though a router instead of a switch, all ingress packets will be VLAN tagged. Programs such - as l3fwd which do not account for VLAN tags in packets will misbehave. The - solution is to enable VLAN stripping on ingress. The following code fragment is - an example of how to accomplish this: + as l3fwd may not account for VLAN tags in packets and may misbehave. One + solution is to enable VLAN stripping on ingress so the VLAN tag is removed + from the packet and put into the mbuf->vlan_tci field. Here is an example + of how to accomplish this: .. code-block:: console @@ -274,6 +371,14 @@ Limitations vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; rte_eth_dev_set_vlan_offload(port, vlan_offload); +Another alternative is modify the adapter's ingress VLAN rewrite mode so that +packets with the default VLAN tag are stripped by the adapter and presented to +DPDK as untagged packets. In this case mbuf->vlan_tci and the PKT_RX_VLAN and +PKT_RX_VLAN_STRIPPED mbuf flags would not be set. This mode is enabled with the +``devargs`` parameter ``ig-vlan-rewrite=untag``. For example:: + + -w 12:00.0,ig-vlan-rewrite=untag + - Limited flow director support on 1200 series and 1300 series Cisco VIC adapters with old firmware. Please see :ref:`enic-flow-director`. @@ -305,6 +410,24 @@ Limitations were added. Since there currently is no grouping or priority support, 'catch-all' filters should be added last. +- **Statistics** + + - ``rx_good_bytes`` (ibytes) always includes VLAN header (4B) and CRC bytes (4B). + This behavior applies to 1300 and older series VIC adapters. + 1400 series VICs do not count CRC bytes, and count VLAN header only when VLAN + stripping is disabled. + - When the NIC drops a packet because the Rx queue has no free buffers, + ``rx_good_bytes`` still increments by 4B if the packet is not VLAN tagged or + VLAN stripping is disabled, or by 8B if the packet is VLAN tagged and stripping + is enabled. + This behavior applies to 1300 and older series VIC adapters. 1400 series VICs + do not increment this byte counter when packets are dropped. + +- **RSS Hashing** + + - Hardware enables and disables UDP and TCP RSS hashing together. The driver + cannot control UDP and TCP hashing individually. + How to build the suite ---------------------- @@ -322,17 +445,9 @@ Supported Cisco VIC adapters ENIC PMD supports all recent generations of Cisco VIC adapters including: -- VIC 1280 -- VIC 1240 -- VIC 1225 -- VIC 1285 -- VIC 1225T -- VIC 1227 -- VIC 1227T -- VIC 1380 -- VIC 1340 -- VIC 1385 -- VIC 1387 +- VIC 1200 series +- VIC 1300 series +- VIC 1400 series Supported Operating Systems --------------------------- @@ -356,10 +471,16 @@ Supported features - VLAN filtering (supported via UCSM/CIMC only) - Execution of application by unprivileged system users - IPV4, IPV6 and TCP RSS hashing +- UDP RSS hashing (1400 series and later adapters) - Scattered Rx - MTU update - SR-IOV on UCS managed servers connected to Fabric Interconnects - Flow API +- Overlay offload + + - Rx/Tx checksum offloads for VXLAN, NVGRE, GENEVE + - TSO for VXLAN and GENEVE packets + - Inner RSS Known bugs and unsupported features in this release --------------------------------------------------- @@ -369,8 +490,8 @@ Known bugs and unsupported features in this release - VLAN based flow direction - Non-IPV4 flow direction - Setting of extended VLAN -- UDP RSS hashing - MTU update only works if Scattered Rx mode is disabled +- Maximum receive packet length is ignored if Scattered Rx mode is used Prerequisites ------------- @@ -427,4 +548,4 @@ Any questions or bugs should be reported to DPDK community and to the ENIC PMD maintainers: - John Daley <johndale@cisco.com> -- Nelson Escobar <neescoba@cisco.com> +- Hyong Youb Kim <hyonkim@cisco.com> diff --git a/doc/guides/nics/fail_safe.rst b/doc/guides/nics/fail_safe.rst index 3f72b593..6c02d7ef 100644 --- a/doc/guides/nics/fail_safe.rst +++ b/doc/guides/nics/fail_safe.rst @@ -1,32 +1,6 @@ -.. BSD LICENSE +.. SPDX-License-Identifier: BSD-3-Clause Copyright 2017 6WIND S.A. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of 6WIND S.A. nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - Fail-safe poll mode driver library ================================== diff --git a/doc/guides/nics/features.rst b/doc/guides/nics/features.rst index 1b4fb979..cddc877d 100644 --- a/doc/guides/nics/features.rst +++ b/doc/guides/nics/features.rst @@ -278,6 +278,17 @@ Supports RSS hashing on RX. * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``. +.. _nic_features_inner_rss: + +Inner RSS +--------- + +Supports RX RSS hashing on Inner headers. + +* **[users] rte_flow_action_rss**: ``level``. +* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``. + + .. _nic_features_rss_key_update: RSS key update @@ -503,7 +514,7 @@ CRC offload Supports CRC stripping by hardware. -* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_CRC_STRIP``. +* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_CRC_STRIP,DEV_RX_OFFLOAD_KEEP_CRC``. .. _nic_features_vlan_offload: @@ -566,7 +577,6 @@ Supports L4 checksum offload. * **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM``. * **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``. -* **[uses] user config**: ``dev_conf.rxmode.hw_ip_checksum``. * **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``, ``mbuf.ol_flags:PKT_TX_L4_NO_CKSUM`` | ``PKT_TX_TCP_CKSUM`` | ``PKT_TX_SCTP_CKSUM`` | ``PKT_TX_UDP_CKSUM``. @@ -749,6 +759,17 @@ Supports getting/setting device eeprom data. ``rte_eth_dev_set_eeprom()``. +.. _nic_features_module_eeprom_dump: + +Module EEPROM dump +------------------ + +Supports getting information and data of plugin module eeprom. + +* **[implements] eth_dev_ops**: ``get_module_info``, ``get_module_eeprom``. +* **[related] API**: ``rte_eth_dev_get_module_info()``, ``rte_eth_dev_get_module_eeprom()``. + + .. _nic_features_register_dump: Registers dump @@ -892,7 +913,25 @@ Documentation describes performance values. See ``dpdk.org/doc/perf/*``. +.. _nic_features_runtime_rx_queue_setup: + +Runtime Rx queue setup +---------------------- + +Supports Rx queue setup after device started. + +* **[provides] rte_eth_dev_info**: ``dev_capa:RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP``. +* **[related] API**: ``rte_eth_dev_info_get()``. +.. _nic_features_runtime_tx_queue_setup: + +Runtime Tx queue setup +---------------------- + +Supports Tx queue setup after device started. + +* **[provides] rte_eth_dev_info**: ``dev_capa:RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP``. +* **[related] API**: ``rte_eth_dev_info_get()``. .. _nic_features_other: diff --git a/doc/guides/nics/features/avf.ini b/doc/guides/nics/features/avf.ini index ccb9edde..35ceada2 100644 --- a/doc/guides/nics/features/avf.ini +++ b/doc/guides/nics/features/avf.ini @@ -6,7 +6,6 @@ [Features] Speed capabilities = Y Link status = Y -Link status event = Y Rx interrupt = Y Queue start/stop = Y MTU update = Y diff --git a/doc/guides/nics/features/avf_vec.ini b/doc/guides/nics/features/avf_vec.ini index 89249948..3050bc4a 100644 --- a/doc/guides/nics/features/avf_vec.ini +++ b/doc/guides/nics/features/avf_vec.ini @@ -6,7 +6,6 @@ [Features] Speed capabilities = Y Link status = Y -Link status event = Y Rx interrupt = Y Queue start/stop = Y MTU update = Y diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini new file mode 100644 index 00000000..ab4da559 --- /dev/null +++ b/doc/guides/nics/features/axgbe.ini @@ -0,0 +1,19 @@ +; +; Supported features of the 'axgbe' network poll mode driver. +; +; Refer to default.ini for the full list of available PMD features. +; +[Features] +Speed capabilities = Y +Link status = Y +Jumbo frame = Y +Promiscuous mode = Y +Allmulticast mode = Y +RSS hash = Y +CRC offload = Y +L3 checksum offload = Y +L4 checksum offload = Y +Basic stats = Y +Linux UIO = Y +x86-32 = Y +x86-64 = Y diff --git a/doc/guides/nics/features/cxgbe.ini b/doc/guides/nics/features/cxgbe.ini index 3d0fde2f..88f2f92b 100644 --- a/doc/guides/nics/features/cxgbe.ini +++ b/doc/guides/nics/features/cxgbe.ini @@ -14,7 +14,9 @@ TSO = Y Promiscuous mode = Y Allmulticast mode = Y RSS hash = Y +RSS key update = Y Flow control = Y +Flow API = Y CRC offload = Y VLAN offload = Y L3 checksum offload = Y @@ -24,6 +26,7 @@ Basic stats = Y Stats per queue = Y EEPROM dump = Y Registers dump = Y +Multiprocess aware = Y BSD nic_uio = Y Linux UIO = Y Linux VFIO = Y diff --git a/doc/guides/nics/features/cxgbevf.ini b/doc/guides/nics/features/cxgbevf.ini new file mode 100644 index 00000000..b41fc365 --- /dev/null +++ b/doc/guides/nics/features/cxgbevf.ini @@ -0,0 +1,29 @@ +; +; Supported features of the 'cxgbevf' network poll mode driver. +; +; Refer to default.ini for the full list of available PMD features. +; +[Features] +Speed capabilities = Y +Link status = Y +Queue start/stop = Y +MTU update = Y +Jumbo frame = Y +Scattered Rx = Y +TSO = Y +Promiscuous mode = Y +Allmulticast mode = Y +RSS hash = Y +CRC offload = Y +VLAN offload = Y +L3 checksum offload = Y +L4 checksum offload = Y +Packet type parsing = Y +Basic stats = Y +Stats per queue = Y +Multiprocess aware = Y +Linux UIO = Y +Linux VFIO = Y +x86-32 = Y +x86-64 = Y +Usage doc = Y diff --git a/doc/guides/nics/features/default.ini b/doc/guides/nics/features/default.ini index dae2ad77..f1a39d0f 100644 --- a/doc/guides/nics/features/default.ini +++ b/doc/guides/nics/features/default.ini @@ -17,6 +17,8 @@ Lock-free Tx queue = Fast mbuf free = Free Tx mbuf on demand = Queue start/stop = +Runtime Rx queue setup = +Runtime Tx queue setup = MTU update = Jumbo frame = Scattered Rx = @@ -29,6 +31,7 @@ Multicast MAC filter = RSS hash = RSS key update = RSS reta update = +Inner RSS = VMDq = SR-IOV = DCB = @@ -63,6 +66,7 @@ Extended stats = Stats per queue = FW version = EEPROM dump = +Module EEPROM dump = Registers dump = LED = Multiprocess aware = diff --git a/doc/guides/nics/features/enic.ini b/doc/guides/nics/features/enic.ini index 498341f0..8a4bad29 100644 --- a/doc/guides/nics/features/enic.ini +++ b/doc/guides/nics/features/enic.ini @@ -6,23 +6,29 @@ [Features] Link status = Y Link status event = Y +Rx interrupt = Y Queue start/stop = Y MTU update = Y Jumbo frame = Y Scattered Rx = Y TSO = Y Promiscuous mode = Y +Allmulticast mode = Y Unicast MAC filter = Y -Multicast MAC filter = Y +Multicast MAC filter = RSS hash = Y +RSS key update = Y +RSS reta update = Y +Inner RSS = Y SR-IOV = Y -VLAN filter = Y CRC offload = Y VLAN offload = Y Flow director = Y Flow API = Y L3 checksum offload = Y L4 checksum offload = Y +Inner L3 checksum = Y +Inner L4 checksum = Y Packet type parsing = Y Basic stats = Y Multiprocess aware = Y diff --git a/doc/guides/nics/features/fm10k.ini b/doc/guides/nics/features/fm10k.ini index f0f61a7d..0acdf0d3 100644 --- a/doc/guides/nics/features/fm10k.ini +++ b/doc/guides/nics/features/fm10k.ini @@ -5,6 +5,8 @@ ; [Features] Speed capabilities = P +Link status = Y +Link status event = Y Rx interrupt = Y Queue start/stop = Y Jumbo frame = Y @@ -24,6 +26,8 @@ VLAN offload = Y L3 checksum offload = Y L4 checksum offload = Y Packet type parsing = Y +Rx descriptor status = Y +Tx descriptor status = Y Basic stats = Y Extended stats = Y Stats per queue = Y diff --git a/doc/guides/nics/features/fm10k_vf.ini b/doc/guides/nics/features/fm10k_vf.ini index 32b93df4..44b50faa 100644 --- a/doc/guides/nics/features/fm10k_vf.ini +++ b/doc/guides/nics/features/fm10k_vf.ini @@ -5,6 +5,8 @@ ; [Features] Speed capabilities = P +Link status = Y +Link status event = Y Rx interrupt = Y Queue start/stop = Y Jumbo frame = Y diff --git a/doc/guides/nics/features/i40e.ini b/doc/guides/nics/features/i40e.ini index e862712c..16eab7f4 100644 --- a/doc/guides/nics/features/i40e.ini +++ b/doc/guides/nics/features/i40e.ini @@ -9,6 +9,8 @@ Link status = Y Link status event = Y Rx interrupt = Y Queue start/stop = Y +Runtime Rx queue setup = Y +Runtime Tx queue setup = Y Jumbo frame = Y Scattered Rx = Y TSO = Y @@ -44,6 +46,7 @@ Tx descriptor status = Y Basic stats = Y Extended stats = Y FW version = Y +Module EEPROM dump = Y Multiprocess aware = Y BSD nic_uio = Y Linux UIO = Y diff --git a/doc/guides/nics/features/i40e_vec.ini b/doc/guides/nics/features/i40e_vec.ini index 7d7b3a92..c65e8b03 100644 --- a/doc/guides/nics/features/i40e_vec.ini +++ b/doc/guides/nics/features/i40e_vec.ini @@ -34,6 +34,7 @@ Rx descriptor status = Y Tx descriptor status = Y Basic stats = Y Extended stats = Y +Module EEPROM dump = Y Multiprocess aware = Y BSD nic_uio = Y Linux UIO = Y diff --git a/doc/guides/nics/features/i40e_vf.ini b/doc/guides/nics/features/i40e_vf.ini index 46e0d9fc..ba2d8cbe 100644 --- a/doc/guides/nics/features/i40e_vf.ini +++ b/doc/guides/nics/features/i40e_vf.ini @@ -5,6 +5,7 @@ ; [Features] Rx interrupt = Y +Link status = Y Queue start/stop = Y Jumbo frame = Y Scattered Rx = Y diff --git a/doc/guides/nics/features/i40e_vf_vec.ini b/doc/guides/nics/features/i40e_vf_vec.ini index c2c6c19f..421ed919 100644 --- a/doc/guides/nics/features/i40e_vf_vec.ini +++ b/doc/guides/nics/features/i40e_vf_vec.ini @@ -5,6 +5,7 @@ ; [Features] Rx interrupt = Y +Link status = Y Queue start/stop = Y Jumbo frame = Y Scattered Rx = Y diff --git a/doc/guides/nics/features/ifcvf.ini b/doc/guides/nics/features/ifcvf.ini new file mode 100644 index 00000000..ef1fc471 --- /dev/null +++ b/doc/guides/nics/features/ifcvf.ini @@ -0,0 +1,8 @@ +; +; Supported features of the 'ifcvf' vDPA driver. +; +; Refer to default.ini for the full list of available PMD features. +; +[Features] +x86-32 = Y +x86-64 = Y diff --git a/doc/guides/nics/features/igb.ini b/doc/guides/nics/features/igb.ini index 33d64d99..c53fd075 100644 --- a/doc/guides/nics/features/igb.ini +++ b/doc/guides/nics/features/igb.ini @@ -41,6 +41,7 @@ Basic stats = Y Extended stats = Y FW version = Y EEPROM dump = Y +Module EEPROM dump = Y Registers dump = Y BSD nic_uio = Y Linux UIO = Y diff --git a/doc/guides/nics/features/igb_vf.ini b/doc/guides/nics/features/igb_vf.ini index e641a2c9..d9653234 100644 --- a/doc/guides/nics/features/igb_vf.ini +++ b/doc/guides/nics/features/igb_vf.ini @@ -4,6 +4,7 @@ ; Refer to default.ini for the full list of available PMD features. ; [Features] +Link status = Y Rx interrupt = Y Scattered Rx = Y TSO = Y diff --git a/doc/guides/nics/features/ixgbe.ini b/doc/guides/nics/features/ixgbe.ini index 1d68ee8e..41431117 100644 --- a/doc/guides/nics/features/ixgbe.ini +++ b/doc/guides/nics/features/ixgbe.ini @@ -51,6 +51,7 @@ Extended stats = Y Stats per queue = Y FW version = Y EEPROM dump = Y +Module EEPROM dump = Y Registers dump = Y Multiprocess aware = Y BSD nic_uio = Y diff --git a/doc/guides/nics/features/ixgbe_vec.ini b/doc/guides/nics/features/ixgbe_vec.ini index 28bc0547..ef3ee688 100644 --- a/doc/guides/nics/features/ixgbe_vec.ini +++ b/doc/guides/nics/features/ixgbe_vec.ini @@ -40,6 +40,7 @@ Basic stats = Y Extended stats = Y Stats per queue = Y EEPROM dump = Y +Module EEPROM dump = Y Registers dump = Y Multiprocess aware = Y BSD nic_uio = Y diff --git a/doc/guides/nics/features/mlx4.ini b/doc/guides/nics/features/mlx4.ini index f6efd21d..98a3f611 100644 --- a/doc/guides/nics/features/mlx4.ini +++ b/doc/guides/nics/features/mlx4.ini @@ -13,6 +13,7 @@ Queue start/stop = Y MTU update = Y Jumbo frame = Y Scattered Rx = Y +TSO = Y Promiscuous mode = Y Allmulticast mode = Y Unicast MAC filter = Y diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini index c3636391..b28b43e5 100644 --- a/doc/guides/nics/features/mlx5.ini +++ b/doc/guides/nics/features/mlx5.ini @@ -21,6 +21,7 @@ Multicast MAC filter = Y RSS hash = Y RSS key update = Y RSS reta update = Y +Inner RSS = Y SR-IOV = Y VLAN filter = Y Flow director = Y @@ -29,6 +30,9 @@ CRC offload = Y VLAN offload = Y L3 checksum offload = Y L4 checksum offload = Y +Timestamp offload = Y +Inner L3 checksum = Y +Inner L4 checksum = Y Packet type parsing = Y Rx descriptor status = Y Tx descriptor status = Y @@ -39,5 +43,6 @@ Multiprocess aware = Y Other kdrv = Y ARMv8 = Y Power8 = Y +x86-32 = Y x86-64 = Y Usage doc = Y diff --git a/doc/guides/nics/features/mrvl.ini b/doc/guides/nics/features/mrvl.ini deleted file mode 100644 index 00d96218..00000000 --- a/doc/guides/nics/features/mrvl.ini +++ /dev/null @@ -1,23 +0,0 @@ -; -; Supported features of the 'mrvl' network poll mode driver. -; -; Refer to default.ini for the full list of available PMD features. -; -[Features] -Speed capabilities = Y -Link status = Y -MTU update = Y -Jumbo frame = Y -Promiscuous mode = Y -Allmulticast mode = Y -Unicast MAC filter = Y -Multicast MAC filter = Y -RSS hash = Y -VLAN filter = Y -CRC offload = Y -L3 checksum offload = Y -L4 checksum offload = Y -Packet type parsing = Y -Basic stats = Y -ARMv8 = Y -Usage doc = Y diff --git a/doc/guides/nics/features/mvpp2.ini b/doc/guides/nics/features/mvpp2.ini new file mode 100644 index 00000000..ef47546d --- /dev/null +++ b/doc/guides/nics/features/mvpp2.ini @@ -0,0 +1,25 @@ +; +; Supported features of the 'mvpp2' network poll mode driver. +; +; Refer to default.ini for the full list of available PMD features. +; +[Features] +Speed capabilities = Y +Link status = Y +MTU update = Y +Jumbo frame = Y +Promiscuous mode = Y +Allmulticast mode = Y +Unicast MAC filter = Y +Multicast MAC filter = Y +RSS hash = Y +Flow control = Y +VLAN filter = Y +CRC offload = Y +L3 checksum offload = Y +L4 checksum offload = Y +Packet type parsing = Y +Basic stats = Y +Extended stats = Y +ARMv8 = Y +Usage doc = Y diff --git a/doc/guides/nics/features/netvsc.ini b/doc/guides/nics/features/netvsc.ini new file mode 100644 index 00000000..2ff6042b --- /dev/null +++ b/doc/guides/nics/features/netvsc.ini @@ -0,0 +1,23 @@ +; +; Supported features of the 'netvsc' network poll mode driver. +; +; Refer to default.ini for the full list of available PMD features. +; +[Features] +Speed capabilities = P +Link status = Y +Queue start/stop = Y +Scattered Rx = Y +Promiscuous mode = Y +Allmulticast mode = Y +Basic stats = Y +Stats per queue = Y +Extended stats = Y +Multiprocess aware = Y +Other kdrv = Y +ARMv7 = Y +ARMv8 = Y +x86-32 = Y +x86-64 = Y +Usage doc = Y +MTU update = Y diff --git a/doc/guides/nics/features/qede.ini b/doc/guides/nics/features/qede.ini index cbadc194..0d081002 100644 --- a/doc/guides/nics/features/qede.ini +++ b/doc/guides/nics/features/qede.ini @@ -6,10 +6,11 @@ [Features] Speed capabilities = Y Link status = Y -Link status event = Y MTU update = Y Jumbo frame = Y Scattered Rx = Y +LRO = Y +TSO = Y Promiscuous mode = Y Allmulticast mode = Y Unicast MAC filter = Y @@ -18,12 +19,14 @@ RSS hash = Y RSS key update = Y RSS reta update = Y VLAN filter = Y +N-tuple filter = Y +Tunnel filter = Y +Flow director = Y Flow control = Y CRC offload = Y VLAN offload = Y L3 checksum offload = Y L4 checksum offload = Y -Tunnel filter = Y Inner L3 checksum = Y Inner L4 checksum = Y Packet type parsing = Y @@ -32,11 +35,8 @@ Extended stats = Y Stats per queue = Y Multiprocess aware = Y Linux UIO = Y +Linux VFIO = Y ARMv8 = Y x86-32 = Y x86-64 = Y Usage doc = Y -N-tuple filter = Y -Flow director = Y -LRO = Y -TSO = Y diff --git a/doc/guides/nics/features/qede_vf.ini b/doc/guides/nics/features/qede_vf.ini index 18857b6e..e796b313 100644 --- a/doc/guides/nics/features/qede_vf.ini +++ b/doc/guides/nics/features/qede_vf.ini @@ -6,7 +6,6 @@ [Features] Speed capabilities = Y Link status = Y -Link status event = Y MTU update = Y Jumbo frame = Y Scattered Rx = Y @@ -30,6 +29,7 @@ Extended stats = Y Stats per queue = Y Multiprocess aware = Y Linux UIO = Y +Linux VFIO = Y ARMv8 = Y x86-32 = Y x86-64 = Y diff --git a/doc/guides/nics/features/softnic.ini b/doc/guides/nics/features/softnic.ini new file mode 100644 index 00000000..0583381c --- /dev/null +++ b/doc/guides/nics/features/softnic.ini @@ -0,0 +1,9 @@ +; +; Supported features of the 'softnic' poll mode driver. +; +; Refer to default.ini for the full list of available PMD features. +; +[Features] +x86-32 = Y +x86-64 = Y +Usage doc = Y diff --git a/doc/guides/nics/features/vhost.ini b/doc/guides/nics/features/vhost.ini index dffd1f49..ef81abb4 100644 --- a/doc/guides/nics/features/vhost.ini +++ b/doc/guides/nics/features/vhost.ini @@ -5,7 +5,6 @@ ; [Features] Link status = Y -Link status event = Y Free Tx mbuf on demand = Y Queue status event = Y Basic stats = Y diff --git a/doc/guides/nics/features/virtio.ini b/doc/guides/nics/features/virtio.ini index 16e577df..a16b8172 100644 --- a/doc/guides/nics/features/virtio.ini +++ b/doc/guides/nics/features/virtio.ini @@ -6,6 +6,7 @@ [Features] Speed capabilities = P Link status = Y +Link status event = Y Rx interrupt = Y Queue start/stop = Y Scattered Rx = Y diff --git a/doc/guides/nics/features/virtio_vec.ini b/doc/guides/nics/features/virtio_vec.ini index c06c860d..e60fe36a 100644 --- a/doc/guides/nics/features/virtio_vec.ini +++ b/doc/guides/nics/features/virtio_vec.ini @@ -6,6 +6,7 @@ [Features] Speed capabilities = P Link status = Y +Link status event = Y Rx interrupt = Y Queue start/stop = Y Promiscuous mode = Y diff --git a/doc/guides/nics/fm10k.rst b/doc/guides/nics/fm10k.rst index c44e226e..d1391e99 100644 --- a/doc/guides/nics/fm10k.rst +++ b/doc/guides/nics/fm10k.rst @@ -79,14 +79,14 @@ Other features are supported using optional MACRO configuration. They include: To enable via ``RX_OLFLAGS`` use ``RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y``. -To guarantee the constraint, the following configuration flags in ``dev_conf.rxmode`` +To guarantee the constraint, the following capabilities in ``dev_conf.rxmode.offloads`` will be checked: -* ``hw_vlan_extend`` +* ``DEV_RX_OFFLOAD_VLAN_EXTEND`` -* ``hw_ip_checksum`` +* ``DEV_RX_OFFLOAD_CHECKSUM`` -* ``header_split`` +* ``DEV_RX_OFFLOAD_HEADER_SPLIT`` * ``fdir_conf->mode`` @@ -106,19 +106,9 @@ TX Constraint Features not Supported by TX Vector PMD ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -TX vPMD only works when ``txq_flags`` is set to ``FM10K_SIMPLE_TX_FLAG``. -This means that it does not support TX multi-segment, VLAN offload or TX csum -offload. The following MACROs are used for these three features: +TX vPMD only works when offloads is set to 0 -* ``ETH_TXQ_FLAGS_NOMULTSEGS`` - -* ``ETH_TXQ_FLAGS_NOVLANOFFL`` - -* ``ETH_TXQ_FLAGS_NOXSUMSCTP`` - -* ``ETH_TXQ_FLAGS_NOXSUMUDP`` - -* ``ETH_TXQ_FLAGS_NOXSUMTCP`` +This means that it does not support any TX offload. Limitations ----------- @@ -149,9 +139,8 @@ CRC striping ~~~~~~~~~~~~ The FM10000 family of NICs strip the CRC for every packets coming into the -host interface. So, CRC will be stripped even when the -``rxmode.hw_strip_crc`` member is set to 0 in ``struct rte_eth_conf``. - +host interface. So, CRC will be stripped even when ``DEV_RX_OFFLOAD_CRC_STRIP`` +in ``rxmode.offloads`` is NOT set in ``struct rte_eth_conf``. Maximum packet length ~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst index e1b8083c..65d87f86 100644 --- a/doc/guides/nics/i40e.rst +++ b/doc/guides/nics/i40e.rst @@ -4,14 +4,16 @@ I40E Poll Mode Driver ====================== -The I40E PMD (librte_pmd_i40e) provides poll mode driver support -for the Intel X710/XL710/X722 10/40 Gbps family of adapters. +The i40e PMD (librte_pmd_i40e) provides poll mode driver support for +10/25/40 Gbps Intel® Ethernet 700 Series Network Adapters based on +the Intel Ethernet Controller X710/XL710/XXV710 and Intel Ethernet +Connection X722 (only support part of features). Features -------- -Features of the I40E PMD are: +Features of the i40e PMD are: - Multiple queues for TX and RX - Receiver Side Scaling (RSS) @@ -40,6 +42,7 @@ Features of the I40E PMD are: - VF Daemon (VFD) - EXPERIMENTAL - Dynamic Device Personalization (DDP) - Queue region configuration +- Virtual Function Port Representors Prerequisites ------------- @@ -53,7 +56,37 @@ Prerequisites section of the :ref:`Getting Started Guide for Linux <linux_gsg>`. - Upgrade the NVM/FW version following the `Intel® Ethernet NVM Update Tool Quick Usage Guide for Linux - <https://www-ssl.intel.com/content/www/us/en/embedded/products/networking/nvm-update-tool-quick-linux-usage-guide.html>`_ if needed. + <https://www-ssl.intel.com/content/www/us/en/embedded/products/networking/nvm-update-tool-quick-linux-usage-guide.html>`_ and `Intel® Ethernet NVM Update Tool: Quick Usage Guide for EFI <https://www.intel.com/content/www/us/en/embedded/products/networking/nvm-update-tool-quick-efi-usage-guide.html>`_ if needed. + +Recommended Matching List +------------------------- + +It is highly recommended to upgrade the i40e kernel driver and firmware to +avoid the compatibility issues with i40e PMD. Here is the suggested matching +list which has been tested and verified. The detailed information can refer +to chapter Tested Platforms/Tested NICs in release notes. + + +--------------+-----------------------+------------------+ + | DPDK version | Kernel driver version | Firmware version | + +==============+=======================+==================+ + | 18.05 | 2.4.6 | 6.01 | + +--------------+-----------------------+------------------+ + | 18.02 | 2.4.3 | 6.01 | + +--------------+-----------------------+------------------+ + | 17.11 | 2.1.26 | 6.01 | + +--------------+-----------------------+------------------+ + | 17.08 | 2.0.19 | 6.01 | + +--------------+-----------------------+------------------+ + | 17.05 | 1.5.23 | 5.05 | + +--------------+-----------------------+------------------+ + | 17.02 | 1.5.23 | 5.05 | + +--------------+-----------------------+------------------+ + | 16.11 | 1.5.23 | 5.05 | + +--------------+-----------------------+------------------+ + | 16.07 | 1.4.25 | 5.04 | + +--------------+-----------------------+------------------+ + | 16.04 | 1.4.25 | 5.02 | + +--------------+-----------------------+------------------+ Pre-Installation Configuration ------------------------------ @@ -93,11 +126,6 @@ Please note that enabling debugging options may affect system performance. Number of queues reserved for each VMDQ Pool. -- ``CONFIG_RTE_LIBRTE_I40E_ITR_INTERVAL`` (default ``-1``) - - Interrupt Throttling interval. - - Runtime Config Options ~~~~~~~~~~~~~~~~~~~~~~ @@ -121,6 +149,20 @@ Runtime Config Options will switch PF interrupt from IntN to Int0 to avoid interrupt conflict between DPDK and Linux Kernel. +- ``Support VF Port Representor`` (default ``not enabled``) + + The i40e PF PMD supports the creation of VF port representors for the control + and monitoring of i40e virtual function devices. Each port representor + corresponds to a single virtual function of that device. Using the ``devargs`` + option ``representor`` the user can specify which virtual functions to create + port representors for on initialization of the PF PMD by passing the VF IDs of + the VFs which are required.:: + + -w DBDF,representor=[0,1,4] + + Currently hot-plugging of representor ports is not supported so all required + representors must be specified on the creation of the PF. + Driver compilation and testing ------------------------------ @@ -324,7 +366,7 @@ Delete all flow director rules on a port: Floating VEB ~~~~~~~~~~~~~ -The Intel® Ethernet Controller X710 and XL710 Family support a feature called +The Intel® Ethernet 700 Series support a feature called "Floating VEB". A Virtual Ethernet Bridge (VEB) is an IEEE Edge Virtual Bridging (EVB) term @@ -370,21 +412,22 @@ or greater. Dynamic Device Personalization (DDP) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The Intel® Ethernet Controller X*710 support a feature called "Dynamic Device -Personalization (DDP)", which is used to configure hardware by downloading -a profile to support protocols/filters which are not supported by default. -The DDP functionality requires a NIC firmware version of 6.0 or greater. +The Intel® Ethernet 700 Series except for the Intel Ethernet Connection +X722 support a feature called "Dynamic Device Personalization (DDP)", +which is used to configure hardware by downloading a profile to support +protocols/filters which are not supported by default. The DDP +functionality requires a NIC firmware version of 6.0 or greater. -Current implementation supports MPLSoUDP/MPLSoGRE/GTP-C/GTP-U/PPPoE/PPPoL2TP, +Current implementation supports GTP-C/GTP-U/PPPoE/PPPoL2TP, steering can be used with rte_flow API. -Load a profile which supports MPLSoUDP/MPLSoGRE and store backup profile: +Load a profile which supports GTP and store backup profile: .. code-block:: console - testpmd> ddp add 0 ./mpls.pkgo,./backup.pkgo + testpmd> ddp add 0 ./gtp.pkgo,./backup.pkgo -Delete a MPLS profile and restore backup profile: +Delete a GTP profile and restore backup profile: .. code-block:: console @@ -396,11 +439,11 @@ Get loaded DDP package info list: testpmd> ddp get list 0 -Display information about a MPLS profile: +Display information about a GTP profile: .. code-block:: console - testpmd> ddp get info ./mpls.pkgo + testpmd> ddp get info ./gtp.pkgo Input set configuration ~~~~~~~~~~~~~~~~~~~~~~~ @@ -416,7 +459,7 @@ For example, to use only 48bit prefix for IPv6 src address for IPv6 TCP RSS: Queue region configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The Ethernet Controller X710/XL710 supports a feature of queue regions +The Intel® Ethernet 700 Series supports a feature of queue regions configuration for RSS in the PF, so that different traffic classes or different packet classification types can be separated to different queues in different queue regions. There is an API for configuration @@ -440,8 +483,8 @@ details please refer to :doc:`../testpmd_app_ug/index`. Limitations or Known issues --------------------------- -MPLS packet classification on X710/XL710 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +MPLS packet classification +~~~~~~~~~~~~~~~~~~~~~~~~~~ For firmware versions prior to 5.0, MPLS packets are not recognized by the NIC. The L2 Payload flow type in flow director can be used to classify MPLS packet @@ -489,14 +532,14 @@ Incorrect Rx statistics when packet is oversize ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When a packet is over maximum frame size, the packet is dropped. -However the Rx statistics, when calling `rte_eth_stats_get` incorrectly +However, the Rx statistics, when calling `rte_eth_stats_get` incorrectly shows it as received. VF & TC max bandwidth setting ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The per VF max bandwidth and per TC max bandwidth cannot be enabled in parallel. -The dehavior is different when handling per VF and per TC max bandwidth setting. +The behavior is different when handling per VF and per TC max bandwidth setting. When enabling per VF max bandwidth, SW will check if per TC max bandwidth is enabled. If so, return failure. When enabling per TC max bandwidth, SW will check if per VF max bandwidth @@ -517,11 +560,11 @@ VF performance is impacted by PCI extended tag setting ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To reach maximum NIC performance in the VF the PCI extended tag must be -enabled. The DPDK I40E PF driver will set this feature during initialization, +enabled. The DPDK i40e PF driver will set this feature during initialization, but the kernel PF driver does not. So when running traffic on a VF which is managed by the kernel PF driver, a significant NIC performance downgrade has -been observed (for 64 byte packets, there is about 25% linerate downgrade for -a 25G device and about 35% for a 40G device). +been observed (for 64 byte packets, there is about 25% line-rate downgrade for +a 25GbE device and about 35% for a 40GbE device). For kernel version >= 4.11, the kernel's PCI driver will enable the extended tag if it detects that the device supports it. So by default, this is not an @@ -562,12 +605,12 @@ with DPDK, then the configuration will also impact port B in the NIC with kernel driver, which don't want to use the TPID. So PMD reports warning to clarify what is changed by writing global register. -High Performance of Small Packets on 40G NIC --------------------------------------------- +High Performance of Small Packets on 40GbE NIC +---------------------------------------------- As there might be firmware fixes for performance enhancement in latest version of firmware image, the firmware update might be needed for getting high performance. -Check with the local Intel's Network Division application engineers for firmware updates. +Check the Intel support website for the latest firmware updates. Users should consult the release notes specific to a DPDK release to identify the validated firmware version for a NIC using the i40e driver. @@ -577,23 +620,13 @@ Use 16 Bytes RX Descriptor Size As i40e PMD supports both 16 and 32 bytes RX descriptor sizes, and 16 bytes size can provide helps to high performance of small packets. Configuration of ``CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC`` in config files can be changed to use 16 bytes size RX descriptors. -High Performance and per Packet Latency Tradeoff -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Due to the hardware design, the interrupt signal inside NIC is needed for per -packet descriptor write-back. The minimum interval of interrupts could be set -at compile time by ``CONFIG_RTE_LIBRTE_I40E_ITR_INTERVAL`` in configuration files. -Though there is a default configuration, the interval could be tuned by the -users with that configuration item depends on what the user cares about more, -performance or per packet latency. - Example of getting best performance with l3fwd example ------------------------------------------------------ -The following is an example of running the DPDK ``l3fwd`` sample application to get high performance with an -Intel server platform and Intel XL710 NICs. +The following is an example of running the DPDK ``l3fwd`` sample application to get high performance with a +server with Intel Xeon processors and Intel Ethernet CNA XL710. -The example scenario is to get best performance with two Intel XL710 40GbE ports. +The example scenario is to get best performance with two Intel Ethernet CNA XL710 40GbE ports. See :numref:`figure_intel_perf_test_setup` for the performance test setup. .. _figure_intel_perf_test_setup: @@ -603,9 +636,9 @@ See :numref:`figure_intel_perf_test_setup` for the performance test setup. Performance Test Setup -1. Add two Intel XL710 NICs to the platform, and use one port per card to get best performance. - The reason for using two NICs is to overcome a PCIe Gen3's limitation since it cannot provide 80G bandwidth - for two 40G ports, but two different PCIe Gen3 x8 slot can. +1. Add two Intel Ethernet CNA XL710 to the platform, and use one port per card to get best performance. + The reason for using two NICs is to overcome a PCIe v3.0 limitation since it cannot provide 80GbE bandwidth + for two 40GbE ports, but two different PCIe v3.0 x8 slot can. Refer to the sample NICs output above, then we can select ``82:00.0`` and ``85:00.0`` as test ports:: 82:00.0 Ethernet [0200]: Intel XL710 for 40GbE QSFP+ [8086:1583] @@ -621,7 +654,7 @@ See :numref:`figure_intel_perf_test_setup` for the performance test setup. 4. Bind these two ports to igb_uio. -5. As to XL710 40G port, we need at least two queue pairs to achieve best performance, then two queues per port +5. As to Intel Ethernet CNA XL710 40GbE port, we need at least two queue pairs to achieve best performance, then two queues per port will be required, and each queue pair will need a dedicated CPU core for receiving/transmitting packets. 6. The DPDK sample application ``l3fwd`` will be used for performance testing, with using two ports for bi-directional forwarding. diff --git a/doc/guides/nics/ifc.rst b/doc/guides/nics/ifc.rst new file mode 100644 index 00000000..48f9adf1 --- /dev/null +++ b/doc/guides/nics/ifc.rst @@ -0,0 +1,96 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2018 Intel Corporation. + +IFCVF vDPA driver +================= + +The IFCVF vDPA (vhost data path acceleration) driver provides support for the +Intel FPGA 100G VF (IFCVF). IFCVF's datapath is virtio ring compatible, it +works as a HW vhost backend which can send/receive packets to/from virtio +directly by DMA. Besides, it supports dirty page logging and device state +report/restore, this driver enables its vDPA functionality. + + +Pre-Installation Configuration +------------------------------ + +Config File Options +~~~~~~~~~~~~~~~~~~~ + +The following option can be modified in the ``config`` file. + +- ``CONFIG_RTE_LIBRTE_IFCVF_VDPA_PMD`` (default ``y`` for linux) + + Toggle compilation of the ``librte_ifcvf_vdpa`` driver. + + +IFCVF vDPA Implementation +------------------------- + +IFCVF's vendor ID and device ID are same as that of virtio net pci device, +with its specific subsystem vendor ID and device ID. To let the device be +probed by IFCVF driver, adding "vdpa=1" parameter helps to specify that this +device is to be used in vDPA mode, rather than polling mode, virtio pmd will +skip when it detects this message. + +Different VF devices serve different virtio frontends which are in different +VMs, so each VF needs to have its own DMA address translation service. During +the driver probe a new container is created for this device, with this +container vDPA driver can program DMA remapping table with the VM's memory +region information. + +Key IFCVF vDPA driver ops +~~~~~~~~~~~~~~~~~~~~~~~~~ + +- ifcvf_dev_config: + Enable VF data path with virtio information provided by vhost lib, including + IOMMU programming to enable VF DMA to VM's memory, VFIO interrupt setup to + route HW interrupt to virtio driver, create notify relay thread to translate + virtio driver's kick to a MMIO write onto HW, HW queues configuration. + + This function gets called to set up HW data path backend when virtio driver + in VM gets ready. + +- ifcvf_dev_close: + Revoke all the setup in ifcvf_dev_config. + + This function gets called when virtio driver stops device in VM. + +To create a vhost port with IFC VF +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Create a vhost socket and assign a VF's device ID to this socket via + vhost API. When QEMU vhost connection gets ready, the assigned VF will + get configured automatically. + + +Features +-------- + +Features of the IFCVF driver are: + +- Compatibility with virtio 0.95 and 1.0. + + +Prerequisites +------------- + +- Platform with IOMMU feature. IFC VF needs address translation service to + Rx/Tx directly with virtio driver in VM. + + +Limitations +----------- + +Dependency on vfio-pci +~~~~~~~~~~~~~~~~~~~~~~ + +vDPA driver needs to setup VF MSIX interrupts, each queue's interrupt vector +is mapped to a callfd associated with a virtio ring. Currently only vfio-pci +allows multiple interrupts, so the IFCVF driver is dependent on vfio-pci. + +Live Migration with VIRTIO_NET_F_GUEST_ANNOUNCE +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +IFC VF doesn't support RARP packet generation, virtio frontend supporting +VIRTIO_NET_F_GUEST_ANNOUNCE feature can help to do that. diff --git a/doc/guides/nics/img/szedata2_nfb200g_architecture.svg b/doc/guides/nics/img/szedata2_nfb200g_architecture.svg new file mode 100644 index 00000000..e152e4a8 --- /dev/null +++ b/doc/guides/nics/img/szedata2_nfb200g_architecture.svg @@ -0,0 +1,214 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<svg + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + id="svg2" + stroke-miterlimit="10" + stroke-linecap="square" + stroke="none" + fill="none" + viewBox="0.0 0.0 568.7322834645669 352.3937007874016" + version="1.1"> + <metadata + id="metadata65"> + <rdf:RDF> + <cc:Work + rdf:about=""> + <dc:format>image/svg+xml</dc:format> + <dc:type + rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> + <dc:title></dc:title> + </cc:Work> + </rdf:RDF> + </metadata> + <defs + id="defs63" /> + <clipPath + id="p.0"> + <path + id="path5" + clip-rule="nonzero" + d="m0 0l568.7323 0l0 352.3937l-568.7323 0l0 -352.3937z" /> + </clipPath> + <g + id="g7" + clip-path="url(#p.0)"> + <path + id="path9" + fill-rule="evenodd" + d="m0 0l568.7323 0l0 352.3937l-568.7323 0z" + fill-opacity="0.0" + fill="#000000" /> + <path + id="path11" + d="m 40.564137,14.365075 254.362203,0 0,131.842535 -254.362203,0 z" + style="fill:#47c3d3;fill-rule:evenodd" /> + <path + id="path15" + d="m 54.075948,146.2076 227.338592,0 0,32.94488 -227.338592,0 z" + style="fill:#c2c2c2;fill-rule:evenodd" /> + <path + id="path19" + d="m 321.90535,146.2076 227.33856,0 0,32.94488 -227.33856,0 z" + style="fill:#c2c2c2;fill-rule:evenodd" /> + <path + id="path23" + d="m 440.30217,146.24338 -11.82364,-20.50632 6.86313,0 0,-44.550399 -120.12924,0 0,6.938519 -20.28345,-11.953539 20.28345,-11.953547 0,6.93852 130.0503,0 0,54.580446 6.8631,0 z" + style="fill:#9a9a9a;fill-rule:evenodd" /> + <path + id="path25" + d="m 112.39353,263.09765 0,0 c 0,-8.08875 6.55722,-14.64597 14.64597,-14.64597 l 58.58208,0 0,0 c 3.88435,0 7.60962,1.54305 10.35626,4.28971 2.74666,2.74664 4.28971,6.47189 4.28971,10.35626 l 0,58.58209 c 0,8.08875 -6.55722,14.64597 -14.64597,14.64597 l -58.58208,0 c -8.08875,0 -14.64597,-6.55722 -14.64597,-14.64597 z" + style="fill:#c2c2c2;fill-rule:evenodd" /> + <path + id="path29" + d="m 391.63763,263.09765 0,0 c 0,-8.08875 6.55722,-14.64597 14.64597,-14.64597 l 58.58209,0 0,0 c 3.88437,0 7.60962,1.54305 10.35626,4.28971 2.74664,2.74664 4.2897,6.47189 4.2897,10.35626 l 0,58.58209 c 0,8.08875 -6.55722,14.64597 -14.64596,14.64597 l -58.58209,0 c -8.08875,0 -14.64597,-6.55722 -14.64597,-14.64597 z" + style="fill:#c2c2c2;fill-rule:evenodd" /> + <path + id="path33" + d="m 135.20981,199.01075 19.85826,-19.85826 19.85828,19.85826 -9.92914,0 0,29.5748 9.92914,0 -19.85828,19.85827 -19.85826,-19.85827 9.92914,0 0,-29.5748 z" + style="fill:#9a9a9a;fill-rule:evenodd" /> + <path + id="path35" + d="m 415.71635,199.01064 19.85828,-19.85826 19.85827,19.85826 -9.92914,0 0,29.57481 9.92914,0 -19.85827,19.85826 -19.85828,-19.85826 9.92914,0 0,-29.57481 z" + style="fill:#9a9a9a;fill-rule:evenodd" /> + <path + id="path37" + d="m 15.205,31.273212 74.362206,0 0,32.944885 -74.362206,0 z" + style="fill:#ff8434;fill-rule:evenodd" /> + <path + id="path41" + d="m 16.05531,80.231216 74.3622,0 0,32.944884 -74.3622,0 z" + style="fill:#ff8434;fill-rule:evenodd" /> + <path + id="path45" + d="m 275.44377,174.07111 0,111.55905 -37.16536,0 0,-111.55905 z" + style="fill:#ff8434;fill-rule:evenodd" /> + <path + id="path49" + d="m 97.923493,174.07111 0,111.55905 -37.16535,0 0,-111.55905 z" + style="fill:#ff8434;fill-rule:evenodd" /> + <path + id="path53" + d="m 366.27543,174.07111 0,111.55905 -37.16537,0 0,-111.55905 z" + style="fill:#ff8434;fill-rule:evenodd" /> + <path + id="path57" + d="m 542.0392,174.07111 0,111.55905 -37.16534,0 0,-111.55905 z" + style="fill:#ff8434;fill-rule:evenodd" /> + <text + id="text4480" + y="54.570911" + x="24.425898" + style="font-style:normal;font-weight:normal;font-size:18.75px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" + xml:space="preserve"><tspan + y="54.570911" + x="24.425898" + id="tspan4482">ETH 0</tspan></text> + <text + id="text4480-3" + y="103.53807" + x="25.51882" + style="font-style:normal;font-weight:normal;font-size:20px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" + xml:space="preserve"><tspan + style="font-size:18.75px" + id="tspan4502" + y="103.53807" + x="25.51882">ETH 1</tspan></text> + <text + id="text4480-7" + y="86.200645" + x="103.15979" + style="font-style:normal;font-weight:normal;font-size:18.75px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" + xml:space="preserve"><tspan + id="tspan4524" + y="86.200645" + x="103.15979">NFB-200G2QL card</tspan></text> + <text + id="text4480-7-3" + y="169.2041" + x="92.195312" + style="font-style:normal;font-weight:normal;font-size:18.75px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" + xml:space="preserve"><tspan + style="font-size:18.75px" + id="tspan4546" + y="169.2041" + x="92.195312">PCI-E master slot</tspan></text> + <text + id="text4480-7-3-6" + y="169.20409" + x="367.98856" + style="font-style:normal;font-weight:normal;font-size:18.75px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" + xml:space="preserve"><tspan + style="font-size:18.75px" + id="tspan4546-2" + y="169.20409" + x="367.98856">PCI-E slave slot</tspan></text> + <text + transform="matrix(0,1,-1,0,0,0)" + id="text4480-3-9" + y="-73.591309" + x="182.29367" + style="font-style:normal;font-weight:normal;font-size:20px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" + xml:space="preserve"><tspan + style="font-size:18.75px" + id="tspan4502-1" + y="-73.591309" + x="182.29367">QUEUE 0</tspan></text> + <text + transform="matrix(0,1.0000002,-0.99999976,0,0,0)" + id="text4480-3-9-2" + y="-251.11163" + x="182.29283" + style="font-style:normal;font-weight:normal;font-size:20px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" + xml:space="preserve"><tspan + style="font-size:18.75px" + id="tspan4502-1-7" + y="-251.11163" + x="182.29283">QUEUE 15</tspan></text> + <text + transform="matrix(0,1,-1,0,0,0)" + id="text4480-3-9-2-0" + y="-341.94324" + x="182.29311" + style="font-style:normal;font-weight:normal;font-size:20px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" + xml:space="preserve"><tspan + style="font-size:18.75px" + id="tspan4502-1-7-9" + y="-341.94324" + x="182.29311">QUEUE 16</tspan></text> + <text + transform="matrix(0,1,-1,0,0,0)" + id="text4480-3-9-2-3" + y="-517.70703" + x="182.29356" + style="font-style:normal;font-weight:normal;font-size:20px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" + xml:space="preserve"><tspan + style="font-size:18.75px" + id="tspan4502-1-7-6" + y="-517.70703" + x="182.29356">QUEUE 31</tspan></text> + <text + id="text4480-3-0" + y="299.21396" + x="128.3978" + style="font-style:normal;font-weight:normal;font-size:20px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" + xml:space="preserve"><tspan + style="font-size:18.75px" + id="tspan4502-6" + y="299.21396" + x="128.3978">CPU 0</tspan></text> + <text + id="text4480-3-0-2" + y="299.21396" + x="407.88452" + style="font-style:normal;font-weight:normal;font-size:20px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" + xml:space="preserve"><tspan + style="font-size:18.75px" + id="tspan4502-6-6" + y="299.21396" + x="407.88452">CPU 1</tspan></text> + </g> +</svg> diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst index 59419f43..59f6063d 100644 --- a/doc/guides/nics/index.rst +++ b/doc/guides/nics/index.rst @@ -13,6 +13,7 @@ Network Interface Controller Drivers build_and_test ark avp + axgbe bnx2x bnxt cxgbe @@ -23,6 +24,7 @@ Network Interface Controller Drivers enic fm10k i40e + ifc igb ixgbe intel_vf @@ -30,11 +32,13 @@ Network Interface Controller Drivers liquidio mlx4 mlx5 - mrvl + mvpp2 + netvsc nfp octeontx qede sfc_efx + softnic szedata2 tap thunderx diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst index 0c660f29..16d63902 100644 --- a/doc/guides/nics/ixgbe.rst +++ b/doc/guides/nics/ixgbe.rst @@ -68,15 +68,15 @@ Other features are supported using optional MACRO configuration. They include: * HW extend dual VLAN -To guarantee the constraint, configuration flags in dev_conf.rxmode will be checked: +To guarantee the constraint, capabilities in dev_conf.rxmode.offloads will be checked: -* hw_vlan_strip +* DEV_RX_OFFLOAD_VLAN_STRIP -* hw_vlan_extend +* DEV_RX_OFFLOAD_VLAN_EXTEND -* hw_ip_checksum +* DEV_RX_OFFLOAD_CHECKSUM -* header_split +* DEV_RX_OFFLOAD_HEADER_SPLIT * dev_conf @@ -102,20 +102,9 @@ Consequently, by default the tx_rs_thresh value is in the range 32 to 64. Feature not Supported by TX Vector PMD ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -TX vPMD only works when txq_flags is set to IXGBE_SIMPLE_FLAGS. +TX vPMD only works when offloads is set to 0 -This means that it does not support TX multi-segment, VLAN offload and TX csum offload. -The following MACROs are used for these three features: - -* ETH_TXQ_FLAGS_NOMULTSEGS - -* ETH_TXQ_FLAGS_NOVLANOFFL - -* ETH_TXQ_FLAGS_NOXSUMSCTP - -* ETH_TXQ_FLAGS_NOXSUMUDP - -* ETH_TXQ_FLAGS_NOXSUMTCP +This means that it does not support any TX offload. Application Programming Interface --------------------------------- @@ -130,13 +119,13 @@ l3fwd ~~~~~ When running l3fwd with vPMD, there is one thing to note. -In the configuration, ensure that port_conf.rxmode.hw_ip_checksum=0. +In the configuration, ensure that DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set. Otherwise, by default, RX vPMD is disabled. load_balancer ~~~~~~~~~~~~~ -As in the case of l3fwd, set configure port_conf.rxmode.hw_ip_checksum=0 to enable vPMD. +As in the case of l3fwd, to enable vPMD, do NOT set DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads. In addition, for improved performance, use -bsz "(32,32),(64,64),(32,32)" in load_balancer to avoid using the default burst size of 144. @@ -228,6 +217,20 @@ For more details see the IPsec Security Gateway Sample Application and Security library documentation. +Virtual Function Port Representors +---------------------------------- +The IXGBE PF PMD supports the creation of VF port representors for the control +and monitoring of IXGBE virtual function devices. Each port representor +corresponds to a single virtual function of that device. Using the ``devargs`` +option ``representor`` the user can specify which virtual functions to create +port representors for on initialization of the PF PMD by passing the VF IDs of +the VFs which are required.:: + + -w DBDF,representor=[0,1,4] + +Currently hot-plugging of representor ports is not supported so all required +representors must be specified on the creation of the PF. + Supported Chipsets and NICs --------------------------- diff --git a/doc/guides/nics/liquidio.rst b/doc/guides/nics/liquidio.rst index 61485ade..87b42cdc 100644 --- a/doc/guides/nics/liquidio.rst +++ b/doc/guides/nics/liquidio.rst @@ -201,6 +201,4 @@ Number of descriptors for Rx/Tx ring should be in the range 128 to 512. CRC striping ~~~~~~~~~~~~ -LiquidIO adapters strip ethernet FCS of every packet coming to the host -interface. So, CRC will be stripped even when the ``rxmode.hw_strip_crc`` -member is set to 0 in ``struct rte_eth_conf``. +LiquidIO adapters strip ethernet FCS of every packet coming to the host interface. diff --git a/doc/guides/nics/mlx4.rst b/doc/guides/nics/mlx4.rst index 98b97166..4a57c7a6 100644 --- a/doc/guides/nics/mlx4.rst +++ b/doc/guides/nics/mlx4.rst @@ -1,32 +1,6 @@ -.. BSD LICENSE +.. SPDX-License-Identifier: BSD-3-Clause Copyright 2012 6WIND S.A. - Copyright 2015 Mellanox - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of 6WIND S.A. nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + Copyright 2015 Mellanox Technologies, Ltd MLX4 poll mode driver library ============================= @@ -98,9 +72,10 @@ These options can be modified in the ``.config`` file. missing with ``ldd(1)``. It works by moving these dependencies to a purpose-built rdma-core "glue" - plug-in, which must either be installed in ``CONFIG_RTE_EAL_PMD_PATH`` if - set, or in a standard location for the dynamic linker (e.g. ``/lib``) if - left to the default empty string (``""``). + plug-in which must either be installed in a directory whose name is based + on ``CONFIG_RTE_EAL_PMD_PATH`` suffixed with ``-glue`` if set, or in a + standard location for the dynamic linker (e.g. ``/lib``) if left to the + default empty string (``""``). This option has no performance impact. @@ -110,14 +85,6 @@ These options can be modified in the ``.config`` file. adds additional run-time checks and debugging messages at the cost of lower performance. -- ``CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE`` (default **8**) - - Maximum number of cached memory pools (MPs) per TX queue. Each MP from - which buffers are to be transmitted must be associated to memory regions - (MRs). This is a slow operation that must be cached. - - This value is always 1 for RX queues since they use a single MP. - Environment variables ~~~~~~~~~~~~~~~~~~~~~ @@ -168,6 +135,16 @@ below. following limitation: VLAN filtering is not supported with this mode. This is the recommended mode in case VLAN filter is not needed. +Limitations +----------- + +- CRC stripping is supported by default and always reported as "true". + The ability to enable/disable CRC stripping requires OFED version + 4.3-1.5.0.0 and above or rdma-core version v18 and above. + +- TSO (Transmit Segmentation Offload) is supported in OFED version + 4.4 and above. + Prerequisites ------------- @@ -236,7 +213,7 @@ Current RDMA core package and Linux kernel (recommended) Mellanox OFED as a fallback ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- `Mellanox OFED`_ version: **4.2, 4.3**. +- `Mellanox OFED`_ version: **4.3, 4.4**. - firmware version: **2.42.5000** and above. .. _`Mellanox OFED`: http://www.mellanox.com/page/products_dyn?product_family=26&mtag=linux_sw_drivers @@ -396,6 +373,12 @@ Performance tuning The XXX can be different on different systems. Make sure to configure according to the setpci output. +6. To minimize overhead of searching Memory Regions: + + - '--socket-mem' is recommended to pin memory by predictable amount. + - Configure per-lcore cache when creating Mempools for packet buffer. + - Refrain from dynamically allocating/freeing memory in run-time. + Usage example ------------- diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst index 0e6e525c..52e1213c 100644 --- a/doc/guides/nics/mlx5.rst +++ b/doc/guides/nics/mlx5.rst @@ -1,40 +1,14 @@ -.. BSD LICENSE +.. SPDX-License-Identifier: BSD-3-Clause Copyright 2015 6WIND S.A. - Copyright 2015 Mellanox - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of 6WIND S.A. nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + Copyright 2015 Mellanox Technologies, Ltd MLX5 poll mode driver ===================== The MLX5 poll mode driver library (**librte_pmd_mlx5**) provides support -for **Mellanox ConnectX-4**, **Mellanox ConnectX-4 Lx** and **Mellanox -ConnectX-5** families of 10/25/40/50/100 Gb/s adapters as well as their -virtual functions (VF) in SR-IOV context. +for **Mellanox ConnectX-4**, **Mellanox ConnectX-4 Lx** , **Mellanox +ConnectX-5** and **Mellanox Bluefield** families of 10/25/40/50/100 Gb/s +adapters as well as their virtual functions (VF) in SR-IOV context. Information and documentation about these adapters can be found on the `Mellanox website <http://www.mellanox.com>`__. Help is also provided by the @@ -75,7 +49,7 @@ libibverbs. Features -------- -- Multi arch support: x86_64, POWER8, ARMv8. +- Multi arch support: x86_64, POWER8, ARMv8, i686. - Multiple TX and RX queues. - Support for scattered TX and RX frames. - IPv4, IPv6, TCPv4, TCPv6, UDPv4 and UDPv6 RSS on any number of queues. @@ -95,17 +69,17 @@ Features - Multiple process. - KVM and VMware ESX SR-IOV modes are supported. - RSS hash result is supported. -- Hardware TSO. -- Hardware checksum TX offload for VXLAN and GRE. +- Hardware TSO for generic IP or UDP tunnel, including VXLAN and GRE. +- Hardware checksum Tx offload for generic IP or UDP tunnel, including VXLAN and GRE. - RX interrupts. - Statistics query including Basic, Extended and per queue. - Rx HW timestamp. +- Tunnel types: VXLAN, L3 VXLAN, VXLAN-GPE, GRE, MPLSoGRE, MPLSoUDP. +- Tunnel HW offloads: packet type, inner/outer RSS, IP and UDP checksum verification. Limitations ----------- -- Inner RSS for VXLAN frames is not supported yet. -- Hardware checksum RX offloads for VXLAN inner header are not supported yet. - For secondary process: - Forked secondary process not supported. @@ -131,11 +105,37 @@ Limitations - A multi segment packet must have less than 6 segments in case the Tx burst function is set to multi-packet send or Enhanced multi-packet send. Otherwise it must have less than 50 segments. + - Count action for RTE flow is **only supported in Mellanox OFED**. + - Flows with a VXLAN Network Identifier equal (or ends to be equal) to 0 are not supported. + - VXLAN TSO and checksum offloads are not supported on VM. +- L3 VXLAN and VXLAN-GPE tunnels cannot be supported together with MPLSoGRE and MPLSoUDP. + +- VF: flow rules created on VF devices can only match traffic targeted at the + configured MAC addresses (see ``rte_eth_dev_mac_addr_add()``). + +.. note:: + + MAC addresses not already present in the bridge table of the associated + kernel network device will be added and cleaned up by the PMD when closing + the device. In case of ungraceful program termination, some entries may + remain present and should be removed manually by other means. + +- When Multi-Packet Rx queue is configured (``mprq_en``), a Rx packet can be + externally attached to a user-provided mbuf with having EXT_ATTACHED_MBUF in + ol_flags. As the mempool for the external buffer is managed by PMD, all the + Rx mbufs must be freed before the device is closed. Otherwise, the mempool of + the external buffers will be freed by PMD and the application which still + holds the external buffers may be corrupted. + +- If Multi-Packet Rx queue is configured (``mprq_en``) and Rx CQE compression is + enabled (``rxq_cqe_comp_en``) at the same time, RSS hash result is not fully + supported. Some Rx packets may not have PKT_RX_RSS_HASH. + Statistics ---------- @@ -171,9 +171,10 @@ These options can be modified in the ``.config`` file. missing with ``ldd(1)``. It works by moving these dependencies to a purpose-built rdma-core "glue" - plug-in, which must either be installed in ``CONFIG_RTE_EAL_PMD_PATH`` if - set, or in a standard location for the dynamic linker (e.g. ``/lib``) if - left to the default empty string (``""``). + plug-in which must either be installed in a directory whose name is based + on ``CONFIG_RTE_EAL_PMD_PATH`` suffixed with ``-glue`` if set, or in a + standard location for the dynamic linker (e.g. ``/lib``) if left to the + default empty string (``""``). This option has no performance impact. @@ -183,14 +184,6 @@ These options can be modified in the ``.config`` file. adds additional run-time checks and debugging messages at the cost of lower performance. -- ``CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE`` (default **8**) - - Maximum number of cached memory pools (MPs) per TX queue. Each MP from - which buffers are to be transmitted must be associated to memory regions - (MRs). This is a slow operation that must be cached. - - This value is always 1 for RX queues since they use a single MP. - Environment variables ~~~~~~~~~~~~~~~~~~~~~ @@ -250,8 +243,55 @@ Run-time configuration Supported on: - - x86_64 with ConnectX-4, ConnectX-4 LX and ConnectX-5. - - POWER8 and ARMv8 with ConnectX-4 LX and ConnectX-5. + - x86_64 with ConnectX-4, ConnectX-4 LX, ConnectX-5 and Bluefield. + - POWER8 and ARMv8 with ConnectX-4 LX, ConnectX-5 and Bluefield. + +- ``mprq_en`` parameter [int] + + A nonzero value enables configuring Multi-Packet Rx queues. Rx queue is + configured as Multi-Packet RQ if the total number of Rx queues is + ``rxqs_min_mprq`` or more and Rx scatter isn't configured. Disabled by + default. + + Multi-Packet Rx Queue (MPRQ a.k.a Striding RQ) can further save PCIe bandwidth + by posting a single large buffer for multiple packets. Instead of posting a + buffers per a packet, one large buffer is posted in order to receive multiple + packets on the buffer. A MPRQ buffer consists of multiple fixed-size strides + and each stride receives one packet. MPRQ can improve throughput for + small-packet tarffic. + + When MPRQ is enabled, max_rx_pkt_len can be larger than the size of + user-provided mbuf even if DEV_RX_OFFLOAD_SCATTER isn't enabled. PMD will + configure large stride size enough to accommodate max_rx_pkt_len as long as + device allows. Note that this can waste system memory compared to enabling Rx + scatter and multi-segment packet. + +- ``mprq_log_stride_num`` parameter [int] + + Log 2 of the number of strides for Multi-Packet Rx queue. Configuring more + strides can reduce PCIe tarffic further. If configured value is not in the + range of device capability, the default value will be set with a warning + message. The default value is 4 which is 16 strides per a buffer, valid only + if ``mprq_en`` is set. + + The size of Rx queue should be bigger than the number of strides. + +- ``mprq_max_memcpy_len`` parameter [int] + + The maximum length of packet to memcpy in case of Multi-Packet Rx queue. Rx + packet is mem-copied to a user-provided mbuf if the size of Rx packet is less + than or equal to this parameter. Otherwise, PMD will attach the Rx packet to + the mbuf by external buffer attachment - ``rte_pktmbuf_attach_extbuf()``. + A mempool for external buffers will be allocated and managed by PMD. If Rx + packet is externally attached, ol_flags field of the mbuf will have + EXT_ATTACHED_MBUF and this flag must be preserved. ``RTE_MBUF_HAS_EXTBUF()`` + checks the flag. The default value is 128, valid only if ``mprq_en`` is set. + +- ``rxqs_min_mprq`` parameter [int] + + Configure Rx queues as Multi-Packet RQ if the total number of Rx queues is + greater or equal to this value. The default value is 12, valid only if + ``mprq_en`` is set. - ``txq_inline`` parameter [int] @@ -270,34 +310,35 @@ Run-time configuration This option should be used in combination with ``txq_inline`` above. - On ConnectX-4, ConnectX-4 LX and ConnectX-5 without Enhanced MPW: + On ConnectX-4, ConnectX-4 LX, ConnectX-5 and Bluefield without + Enhanced MPW: - Disabled by default. - In case ``txq_inline`` is set recommendation is 4. - On ConnectX-5 with Enhanced MPW: + On ConnectX-5 and Bluefield with Enhanced MPW: - Set to 8 by default. - ``txq_mpw_en`` parameter [int] A nonzero value enables multi-packet send (MPS) for ConnectX-4 Lx and - enhanced multi-packet send (Enhanced MPS) for ConnectX-5. MPS allows the - TX burst function to pack up multiple packets in a single descriptor - session in order to save PCI bandwidth and improve performance at the - cost of a slightly higher CPU usage. When ``txq_inline`` is set along - with ``txq_mpw_en``, TX burst function tries to copy entire packet data - on to TX descriptor instead of including pointer of packet only if there - is enough room remained in the descriptor. ``txq_inline`` sets - per-descriptor space for either pointers or inlined packets. In addition, - Enhanced MPS supports hybrid mode - mixing inlined packets and pointers - in the same descriptor. + enhanced multi-packet send (Enhanced MPS) for ConnectX-5 and Bluefield. + MPS allows the TX burst function to pack up multiple packets in a + single descriptor session in order to save PCI bandwidth and improve + performance at the cost of a slightly higher CPU usage. When + ``txq_inline`` is set along with ``txq_mpw_en``, TX burst function tries + to copy entire packet data on to TX descriptor instead of including + pointer of packet only if there is enough room remained in the + descriptor. ``txq_inline`` sets per-descriptor space for either pointers + or inlined packets. In addition, Enhanced MPS supports hybrid mode - + mixing inlined packets and pointers in the same descriptor. This option cannot be used with certain offloads such as ``DEV_TX_OFFLOAD_TCP_TSO, DEV_TX_OFFLOAD_VXLAN_TNL_TSO, DEV_TX_OFFLOAD_GRE_TNL_TSO, DEV_TX_OFFLOAD_VLAN_INSERT``. When those offloads are requested the MPS send function will not be used. - It is currently only supported on the ConnectX-4 Lx and ConnectX-5 + It is currently only supported on the ConnectX-4 Lx, ConnectX-5 and Bluefield families of adapters. Enabled by default. - ``txq_mpw_hdr_dseg_en`` parameter [int] @@ -318,14 +359,14 @@ Run-time configuration - ``tx_vec_en`` parameter [int] - A nonzero value enables Tx vector on ConnectX-5 only NIC if the number of + A nonzero value enables Tx vector on ConnectX-5 and Bluefield NICs if the number of global Tx queues on the port is lesser than MLX5_VPMD_MIN_TXQS. This option cannot be used with certain offloads such as ``DEV_TX_OFFLOAD_TCP_TSO, DEV_TX_OFFLOAD_VXLAN_TNL_TSO, DEV_TX_OFFLOAD_GRE_TNL_TSO, DEV_TX_OFFLOAD_VLAN_INSERT``. When those offloads are requested the MPS send function will not be used. - Enabled by default on ConnectX-5. + Enabled by default on ConnectX-5 and Bluefield. - ``rx_vec_en`` parameter [int] @@ -334,6 +375,53 @@ Run-time configuration Enabled by default. +- ``vf_nl_en`` parameter [int] + + A nonzero value enables Netlink requests from the VF to add/remove MAC + addresses or/and enable/disable promiscuous/all multicast on the Netdevice. + Otherwise the relevant configuration must be run with Linux iproute2 tools. + This is a prerequisite to receive this kind of traffic. + + Enabled by default, valid only on VF devices ignored otherwise. + +- ``l3_vxlan_en`` parameter [int] + + A nonzero value allows L3 VXLAN and VXLAN-GPE flow creation. To enable + L3 VXLAN or VXLAN-GPE, users has to configure firmware and enable this + parameter. This is a prerequisite to receive this kind of traffic. + + Disabled by default. + +- ``representor`` parameter [list] + + This parameter can be used to instantiate DPDK Ethernet devices from + existing port (or VF) representors configured on the device. + + It is a standard parameter whose format is described in + :ref:`ethernet_device_standard_device_arguments`. + + For instance, to probe port representors 0 through 2:: + + representor=[0-2] + +Firmware configuration +~~~~~~~~~~~~~~~~~~~~~~ + +- L3 VXLAN and VXLAN-GPE destination UDP port + + .. code-block:: console + + mlxconfig -d <mst device> set IP_OVER_VXLAN_EN=1 + mlxconfig -d <mst device> set IP_OVER_VXLAN_PORT=<udp dport> + + Verify configurations are set: + + .. code-block:: console + + mlxconfig -d <mst device> query | grep IP_OVER_VXLAN + IP_OVER_VXLAN_EN True(1) + IP_OVER_VXLAN_PORT <udp dport> + Prerequisites ------------- @@ -353,12 +441,19 @@ DPDK and must be installed separately: - **libmlx5** - Low-level user space driver library for Mellanox ConnectX-4/ConnectX-5 - devices, it is automatically loaded by libibverbs. + Low-level user space driver library for Mellanox + ConnectX-4/ConnectX-5/Bluefield devices, it is automatically loaded + by libibverbs. This library basically implements send/receive calls to the hardware queues. +- **libmnl** + + Minimalistic Netlink library mainly relied on to manage E-Switch flow + rules (i.e. those with the "transfer" attribute and typically involving + port representors). + - **Kernel modules** They provide the kernel-side Verbs API and low level device drivers that @@ -368,15 +463,16 @@ DPDK and must be installed separately: Unlike most other PMDs, these modules must remain loaded and bound to their devices: - - mlx5_core: hardware driver managing Mellanox ConnectX-4/ConnectX-5 - devices and related Ethernet kernel network devices. + - mlx5_core: hardware driver managing Mellanox + ConnectX-4/ConnectX-5/Bluefield devices and related Ethernet kernel + network devices. - mlx5_ib: InifiniBand device driver. - ib_uverbs: user space driver for Verbs (entry point for libibverbs). - **Firmware update** - Mellanox OFED releases include firmware updates for ConnectX-4/ConnectX-5 - adapters. + Mellanox OFED releases include firmware updates for + ConnectX-4/ConnectX-5/Bluefield adapters. Because each release provides new features, these updates must be applied to match the kernel modules and libraries they come with. @@ -399,6 +495,10 @@ RMDA Core with Linux Kernel - Minimal kernel version : v4.14 or the most recent 4.14-rc (see `Linux installation documentation`_) - Minimal rdma-core version: v15+ commit 0c5f5765213a ("Merge pull request #227 from yishaih/tm") (see `RDMA Core installation documentation`_) +- When building for i686 use: + + - rdma-core version 18.0 or above built with 32bit support. + - Kernel version 4.14.41 or above. .. _`Linux installation documentation`: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git/plain/Documentation/admin-guide/README.rst .. _`RDMA Core installation documentation`: https://raw.githubusercontent.com/linux-rdma/rdma-core/master/README.md @@ -406,13 +506,14 @@ RMDA Core with Linux Kernel Mellanox OFED ^^^^^^^^^^^^^ -- Mellanox OFED version: **4.2, 4.3**. +- Mellanox OFED version: **4.3, 4.4**. - firmware version: - ConnectX-4: **12.21.1000** and above. - ConnectX-4 Lx: **14.21.1000** and above. - ConnectX-5: **16.21.1000** and above. - ConnectX-5 Ex: **16.21.1000** and above. + - Bluefield: **18.99.3950** and above. While these libraries and kernel modules are available on OpenFabrics Alliance's `website <https://www.openfabrics.org/>`__ and provided by package @@ -431,6 +532,19 @@ required from that distribution. this DPDK release was developed and tested against is strongly recommended. Please check the `prerequisites`_. +Libmnl +^^^^^^ + +Minimal version for libmnl is **1.0.3**. + +As a dependency of the **iproute2** suite, this library is often installed +by default. It is otherwise readily available through standard system +packages. + +Its development headers must be installed in order to compile this PMD. +These packages are usually named **libmnl-dev** or **libmnl-devel** +depending on the Linux distribution. + Supported NICs -------------- @@ -603,6 +717,12 @@ Performance tuning The XXX can be different on different systems. Make sure to configure according to the setpci output. +7. To minimize overhead of searching Memory Regions: + + - '--socket-mem' is recommended to pin memory by predictable amount. + - Configure per-lcore cache when creating Mempools for packet buffer. + - Refrain from dynamically allocating/freeing memory in run-time. + Notes for testpmd ----------------- @@ -624,7 +744,7 @@ Usage example ------------- This section demonstrates how to launch **testpmd** with Mellanox -ConnectX-4/ConnectX-5 devices managed by librte_pmd_mlx5. +ConnectX-4/ConnectX-5/Bluefield devices managed by librte_pmd_mlx5. #. Load the kernel modules: diff --git a/doc/guides/nics/mrvl.rst b/doc/guides/nics/mrvl.rst deleted file mode 100644 index b7f32921..00000000 --- a/doc/guides/nics/mrvl.rst +++ /dev/null @@ -1,275 +0,0 @@ -.. BSD LICENSE - Copyright(c) 2017 Marvell International Ltd. - Copyright(c) 2017 Semihalf. - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -.. _mrvl_poll_mode_driver: - -MRVL Poll Mode Driver -====================== - -The MRVL PMD (librte_pmd_mrvl) provides poll mode driver support -for the Marvell PPv2 (Packet Processor v2) 1/10 Gbps adapter. - -Detailed information about SoCs that use PPv2 can be obtained here: - -* https://www.marvell.com/embedded-processors/armada-70xx/ -* https://www.marvell.com/embedded-processors/armada-80xx/ - -.. Note:: - - Due to external dependencies, this driver is disabled by default. It must - be enabled manually by setting relevant configuration option manually. - Please refer to `Config File Options`_ section for further details. - - -Features --------- - -Features of the MRVL PMD are: - -- Speed capabilities -- Link status -- Queue start/stop -- MTU update -- Jumbo frame -- Promiscuous mode -- Allmulticast mode -- Unicast MAC filter -- Multicast MAC filter -- RSS hash -- VLAN filter -- CRC offload -- L3 checksum offload -- L4 checksum offload -- Packet type parsing -- Basic stats -- QoS - - -Limitations ------------ - -- Number of lcores is limited to 9 by MUSDK internal design. If more lcores - need to be allocated, locking will have to be considered. Number of available - lcores can be changed via ``MRVL_MUSDK_HIFS_RESERVED`` define in - ``mrvl_ethdev.c`` source file. - -- Flushing vlans added for filtering is not possible due to MUSDK missing - functionality. Current workaround is to reset board so that PPv2 has a - chance to start in a sane state. - - -Prerequisites -------------- - -- Custom Linux Kernel sources - - .. code-block:: console - - git clone https://github.com/MarvellEmbeddedProcessors/linux-marvell.git -b linux-4.4.52-armada-17.10 - -- Out of tree `mvpp2x_sysfs` kernel module sources - - .. code-block:: console - - git clone https://github.com/MarvellEmbeddedProcessors/mvpp2x-marvell.git -b mvpp2x-armada-17.10 - -- MUSDK (Marvell User-Space SDK) sources - - .. code-block:: console - - git clone https://github.com/MarvellEmbeddedProcessors/musdk-marvell.git -b musdk-armada-17.10 - - MUSDK is a light-weight library that provides direct access to Marvell's - PPv2 (Packet Processor v2). Alternatively prebuilt MUSDK library can be - requested from `Marvell Extranet <https://extranet.marvell.com>`_. Once - approval has been granted, library can be found by typing ``musdk`` in - the search box. - - MUSDK must be configured with the following features: - - .. code-block:: console - - --enable-bpool-dma=64 - -- DPDK environment - - Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup - DPDK environment. - - -Config File Options -------------------- - -The following options can be modified in the ``config`` file. - -- ``CONFIG_RTE_LIBRTE_MRVL_PMD`` (default ``n``) - - Toggle compilation of the librte_pmd_mrvl driver. - - -QoS Configuration ------------------ - -QoS configuration is done through external configuration file. Path to the -file must be given as `cfg` in driver's vdev parameter list. - -Configuration syntax -~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: console - - [port <portnum> default] - default_tc = <default_tc> - mapping_priority = <mapping_priority> - - [port <portnum> tc <traffic_class>] - rxq = <rx_queue_list> - pcp = <pcp_list> - dscp = <dscp_list> - - [port <portnum> tc <traffic_class>] - rxq = <rx_queue_list> - pcp = <pcp_list> - dscp = <dscp_list> - -Where: - -- ``<portnum>``: DPDK Port number (0..n). - -- ``<default_tc>``: Default traffic class (e.g. 0) - -- ``<mapping_priority>``: QoS priority for mapping (`ip`, `vlan`, `ip/vlan` or `vlan/ip`). - -- ``<traffic_class>``: Traffic Class to be configured. - -- ``<rx_queue_list>``: List of DPDK RX queues (e.g. 0 1 3-4) - -- ``<pcp_list>``: List of PCP values to handle in particular TC (e.g. 0 1 3-4 7). - -- ``<dscp_list>``: List of DSCP values to handle in particular TC (e.g. 0-12 32-48 63). - -Setting PCP/DSCP values for the default TC is not required. All PCP/DSCP -values not assigned explicitly to particular TC will be handled by the -default TC. - -Configuration file example -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: console - - [port 0 default] - default_tc = 0 - qos_mode = ip - - [port 0 tc 0] - rxq = 0 1 - - [port 0 tc 1] - rxq = 2 - pcp = 5 6 7 - dscp = 26-38 - - [port 1 default] - default_tc = 0 - qos_mode = vlan/ip - - [port 1 tc 0] - rxq = 0 - - [port 1 tc 1] - rxq = 1 2 - pcp = 5 6 7 - dscp = 26-38 - -Usage example -^^^^^^^^^^^^^ - -.. code-block:: console - - ./testpmd --vdev=eth_mrvl,iface=eth0,iface=eth2,cfg=/home/user/mrvl.conf \ - -c 7 -- -i -a --rxq=2 - - -Building DPDK -------------- - -Driver needs precompiled MUSDK library during compilation. - -.. code-block:: console - - export CROSS_COMPILE=<toolchain>/bin/aarch64-linux-gnu- - ./bootstrap - ./configure --host=aarch64-linux-gnu --enable-bpool-dma=64 - make install - -MUSDK will be installed to `usr/local` under current directory. -For the detailed build instructions please consult ``doc/musdk_get_started.txt``. - -Before the DPDK build process the environmental variable ``LIBMUSDK_PATH`` with -the path to the MUSDK installation directory needs to be exported. - -.. code-block:: console - - export LIBMUSDK_PATH=<musdk>/usr/local - export CROSS=aarch64-linux-gnu- - make config T=arm64-armv8a-linuxapp-gcc - sed -ri 's,(MRVL_PMD=)n,\1y,' build/.config - make - -Usage Example -------------- - -MRVL PMD requires extra out of tree kernel modules to function properly. -`musdk_uio` and `mv_pp_uio` sources are part of the MUSDK. Please consult -``doc/musdk_get_started.txt`` for the detailed build instructions. -For `mvpp2x_sysfs` please consult ``Documentation/pp22_sysfs.txt`` for the -detailed build instructions. - -.. code-block:: console - - insmod musdk_uio.ko - insmod mv_pp_uio.ko - insmod mvpp2x_sysfs.ko - -Additionally interfaces used by DPDK application need to be put up: - -.. code-block:: console - - ip link set eth0 up - ip link set eth2 up - -In order to run testpmd example application following command can be used: - -.. code-block:: console - - ./testpmd --vdev=eth_mrvl,iface=eth0,iface=eth2 -c 7 -- \ - --burst=128 --txd=2048 --rxd=1024 --rxq=2 --txq=2 --nb-cores=2 \ - -i -a --rss-udp diff --git a/doc/guides/nics/mvpp2.rst b/doc/guides/nics/mvpp2.rst new file mode 100644 index 00000000..0408752c --- /dev/null +++ b/doc/guides/nics/mvpp2.rst @@ -0,0 +1,520 @@ +.. BSD LICENSE + Copyright(c) 2017 Marvell International Ltd. + Copyright(c) 2017 Semihalf. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +.. _mvpp2_poll_mode_driver: + +MVPP2 Poll Mode Driver +====================== + +The MVPP2 PMD (librte_pmd_mvpp2) provides poll mode driver support +for the Marvell PPv2 (Packet Processor v2) 1/10 Gbps adapter. + +Detailed information about SoCs that use PPv2 can be obtained here: + +* https://www.marvell.com/embedded-processors/armada-70xx/ +* https://www.marvell.com/embedded-processors/armada-80xx/ + +.. Note:: + + Due to external dependencies, this driver is disabled by default. It must + be enabled manually by setting relevant configuration option manually. + Please refer to `Config File Options`_ section for further details. + + +Features +-------- + +Features of the MVPP2 PMD are: + +- Speed capabilities +- Link status +- Queue start/stop +- MTU update +- Jumbo frame +- Promiscuous mode +- Allmulticast mode +- Unicast MAC filter +- Multicast MAC filter +- RSS hash +- VLAN filter +- CRC offload +- L3 checksum offload +- L4 checksum offload +- Packet type parsing +- Basic stats +- Extended stats +- QoS +- RX flow control +- TX queue start/stop + + +Limitations +----------- + +- Number of lcores is limited to 9 by MUSDK internal design. If more lcores + need to be allocated, locking will have to be considered. Number of available + lcores can be changed via ``MRVL_MUSDK_HIFS_RESERVED`` define in + ``mrvl_ethdev.c`` source file. + +- Flushing vlans added for filtering is not possible due to MUSDK missing + functionality. Current workaround is to reset board so that PPv2 has a + chance to start in a sane state. + + +Prerequisites +------------- + +- Custom Linux Kernel sources + + .. code-block:: console + + git clone https://github.com/MarvellEmbeddedProcessors/linux-marvell.git -b linux-4.4.52-armada-17.10 + +- Out of tree `mvpp2x_sysfs` kernel module sources + + .. code-block:: console + + git clone https://github.com/MarvellEmbeddedProcessors/mvpp2x-marvell.git -b mvpp2x-armada-17.10 + +- MUSDK (Marvell User-Space SDK) sources + + .. code-block:: console + + git clone https://github.com/MarvellEmbeddedProcessors/musdk-marvell.git -b musdk-armada-17.10 + + MUSDK is a light-weight library that provides direct access to Marvell's + PPv2 (Packet Processor v2). Alternatively prebuilt MUSDK library can be + requested from `Marvell Extranet <https://extranet.marvell.com>`_. Once + approval has been granted, library can be found by typing ``musdk`` in + the search box. + + To get better understanding of the library one can consult documentation + available in the ``doc`` top level directory of the MUSDK sources. + + MUSDK must be configured with the following features: + + .. code-block:: console + + --enable-bpool-dma=64 + +- DPDK environment + + Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup + DPDK environment. + + +Config File Options +------------------- + +The following options can be modified in the ``config`` file. + +- ``CONFIG_RTE_LIBRTE_MVPP2_PMD`` (default ``n``) + + Toggle compilation of the librte mvpp2 driver. + + +QoS Configuration +----------------- + +QoS configuration is done through external configuration file. Path to the +file must be given as `cfg` in driver's vdev parameter list. + +Configuration syntax +~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: console + + [port <portnum> default] + default_tc = <default_tc> + mapping_priority = <mapping_priority> + policer_enable = <policer_enable> + token_unit = <token_unit> + color = <color_mode> + cir = <cir> + ebs = <ebs> + cbs = <cbs> + + rate_limit_enable = <rate_limit_enable> + rate_limit = <rate_limit> + burst_size = <burst_size> + + [port <portnum> tc <traffic_class>] + rxq = <rx_queue_list> + pcp = <pcp_list> + dscp = <dscp_list> + default_color = <default_color> + + [port <portnum> tc <traffic_class>] + rxq = <rx_queue_list> + pcp = <pcp_list> + dscp = <dscp_list> + + [port <portnum> txq <txqnum>] + sched_mode = <sched_mode> + wrr_weight = <wrr_weight> + + rate_limit_enable = <rate_limit_enable> + rate_limit = <rate_limit> + burst_size = <burst_size> + +Where: + +- ``<portnum>``: DPDK Port number (0..n). + +- ``<default_tc>``: Default traffic class (e.g. 0) + +- ``<mapping_priority>``: QoS priority for mapping (`ip`, `vlan`, `ip/vlan` or `vlan/ip`). + +- ``<traffic_class>``: Traffic Class to be configured. + +- ``<rx_queue_list>``: List of DPDK RX queues (e.g. 0 1 3-4) + +- ``<pcp_list>``: List of PCP values to handle in particular TC (e.g. 0 1 3-4 7). + +- ``<dscp_list>``: List of DSCP values to handle in particular TC (e.g. 0-12 32-48 63). + +- ``<policer_enable>``: Enable ingress policer. + +- ``<token_unit>``: Policer token unit (`bytes` or `packets`). + +- ``<color_mode>``: Policer color mode (`aware` or `blind`). + +- ``<cir>``: Committed information rate in unit of kilo bits per second (data rate) or packets per second. + +- ``<cbs>``: Committed burst size in unit of kilo bytes or number of packets. + +- ``<ebs>``: Excess burst size in unit of kilo bytes or number of packets. + +- ``<default_color>``: Default color for specific tc. + +- ``<rate_limit_enable>``: Enables per port or per txq rate limiting. + +- ``<rate_limit>``: Committed information rate, in kilo bits per second. + +- ``<burst_size>``: Committed burst size, in kilo bytes. + +- ``<sched_mode>``: Egress scheduler mode (`wrr` or `sp`). + +- ``<wrr_weight>``: Txq weight. + +Setting PCP/DSCP values for the default TC is not required. All PCP/DSCP +values not assigned explicitly to particular TC will be handled by the +default TC. + +Configuration file example +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: console + + [port 0 default] + default_tc = 0 + mapping_priority = ip + + rate_limit_enable = 1 + rate_limit = 1000 + burst_size = 2000 + + [port 0 tc 0] + rxq = 0 1 + + [port 0 txq 0] + sched_mode = wrr + wrr_weight = 10 + + [port 0 txq 1] + sched_mode = wrr + wrr_weight = 100 + + [port 0 txq 2] + sched_mode = sp + + [port 0 tc 1] + rxq = 2 + pcp = 5 6 7 + dscp = 26-38 + + [port 1 default] + default_tc = 0 + mapping_priority = vlan/ip + + policer_enable = 1 + token_unit = bytes + color = blind + cir = 100000 + ebs = 64 + cbs = 64 + + [port 1 tc 0] + rxq = 0 + dscp = 10 + + [port 1 tc 1] + rxq = 1 + dscp = 11-20 + + [port 1 tc 2] + rxq = 2 + dscp = 30 + + [port 1 txq 0] + rate_limit_enable = 1 + rate_limit = 10000 + burst_size = 2000 + +Usage example +^^^^^^^^^^^^^ + +.. code-block:: console + + ./testpmd --vdev=eth_mvpp2,iface=eth0,iface=eth2,cfg=/home/user/mrvl.conf \ + -c 7 -- -i -a --disable-hw-vlan-strip --rxq=3 --txq=3 + + +Building DPDK +------------- + +Driver needs precompiled MUSDK library during compilation. + +.. code-block:: console + + export CROSS_COMPILE=<toolchain>/bin/aarch64-linux-gnu- + ./bootstrap + ./configure --host=aarch64-linux-gnu --enable-bpool-dma=64 + make install + +MUSDK will be installed to `usr/local` under current directory. +For the detailed build instructions please consult ``doc/musdk_get_started.txt``. + +Before the DPDK build process the environmental variable ``LIBMUSDK_PATH`` with +the path to the MUSDK installation directory needs to be exported. + +.. code-block:: console + + export LIBMUSDK_PATH=<musdk>/usr/local + export CROSS=aarch64-linux-gnu- + make config T=arm64-armv8a-linuxapp-gcc + sed -ri 's,(MVPP2_PMD=)n,\1y,' build/.config + make + +Flow API +-------- + +PPv2 offers packet classification capabilities via classifier engine which +can be configured via generic flow API offered by DPDK. + +Supported flow actions +~~~~~~~~~~~~~~~~~~~~~~ + +Following flow action items are supported by the driver: + +* DROP +* QUEUE + +Supported flow items +~~~~~~~~~~~~~~~~~~~~ + +Following flow items and their respective fields are supported by the driver: + +* ETH + + * source MAC + * destination MAC + * ethertype + +* VLAN + + * PCP + * VID + +* IPV4 + + * DSCP + * protocol + * source address + * destination address + +* IPV6 + + * flow label + * next header + * source address + * destination address + +* UDP + + * source port + * destination port + +* TCP + + * source port + * destination port + +Classifier match engine +~~~~~~~~~~~~~~~~~~~~~~~ + +Classifier has an internal match engine which can be configured to +operate in either exact or maskable mode. + +Mode is selected upon creation of the first unique flow rule as follows: + +* maskable, if key size is up to 8 bytes. +* exact, otherwise, i.e for keys bigger than 8 bytes. + +Where the key size equals the number of bytes of all fields specified +in the flow items. + +.. table:: Examples of key size calculation + + +----------------------------------------------------------------------------+-------------------+-------------+ + | Flow pattern | Key size in bytes | Used engine | + +============================================================================+===================+=============+ + | ETH (destination MAC) / VLAN (VID) | 6 + 2 = 8 | Maskable | + +----------------------------------------------------------------------------+-------------------+-------------+ + | VLAN (VID) / IPV4 (source address) | 2 + 4 = 6 | Maskable | + +----------------------------------------------------------------------------+-------------------+-------------+ + | TCP (source port, destination port) | 2 + 2 = 4 | Maskable | + +----------------------------------------------------------------------------+-------------------+-------------+ + | VLAN (priority) / IPV4 (source address) | 1 + 4 = 5 | Maskable | + +----------------------------------------------------------------------------+-------------------+-------------+ + | IPV4 (destination address) / UDP (source port, destination port) | 6 + 2 + 2 = 10 | Exact | + +----------------------------------------------------------------------------+-------------------+-------------+ + | VLAN (VID) / IPV6 (flow label, destination address) | 2 + 3 + 16 = 21 | Exact | + +----------------------------------------------------------------------------+-------------------+-------------+ + | IPV4 (DSCP, source address, destination address) | 1 + 4 + 4 = 9 | Exact | + +----------------------------------------------------------------------------+-------------------+-------------+ + | IPV6 (flow label, source address, destination address) | 3 + 16 + 16 = 35 | Exact | + +----------------------------------------------------------------------------+-------------------+-------------+ + +From the user perspective maskable mode means that masks specified +via flow rules are respected. In case of exact match mode, masks +which do not provide exact matching (all bits masked) are ignored. + +If the flow matches more than one classifier rule the first +(with the lowest index) matched takes precedence. + +Flow rules usage example +~~~~~~~~~~~~~~~~~~~~~~~~ + +Before proceeding run testpmd user application: + +.. code-block:: console + + ./testpmd --vdev=eth_mvpp2,iface=eth0,iface=eth2 -c 3 -- -i --p 3 -a --disable-hw-vlan-strip + +Example #1 +^^^^^^^^^^ + +.. code-block:: console + + testpmd> flow create 0 ingress pattern eth src is 10:11:12:13:14:15 / end actions drop / end + +In this case key size is 6 bytes thus maskable type is selected. Testpmd +will set mask to ff:ff:ff:ff:ff:ff i.e traffic explicitly matching +above rule will be dropped. + +Example #2 +^^^^^^^^^^ + +.. code-block:: console + + testpmd> flow create 0 ingress pattern ipv4 src spec 10.10.10.0 src mask 255.255.255.0 / tcp src spec 0x10 src mask 0x10 / end action drop / end + +In this case key size is 8 bytes thus maskable type is selected. +Flows which have IPv4 source addresses ranging from 10.10.10.0 to 10.10.10.255 +and tcp source port set to 16 will be dropped. + +Example #3 +^^^^^^^^^^ + +.. code-block:: console + + testpmd> flow create 0 ingress pattern vlan vid spec 0x10 vid mask 0x10 / ipv4 src spec 10.10.1.1 src mask 255.255.0.0 dst spec 11.11.11.1 dst mask 255.255.255.0 / end actions drop / end + +In this case key size is 10 bytes thus exact type is selected. +Even though each item has partial mask set, masks will be ignored. +As a result only flows with VID set to 16 and IPv4 source and destination +addresses set to 10.10.1.1 and 11.11.11.1 respectively will be dropped. + +Limitations +~~~~~~~~~~~ + +Following limitations need to be taken into account while creating flow rules: + +* For IPv4 exact match type the key size must be up to 12 bytes. +* For IPv6 exact match type the key size must be up to 36 bytes. +* Following fields cannot be partially masked (all masks are treated as + if they were exact): + + * ETH: ethertype + * VLAN: PCP, VID + * IPv4: protocol + * IPv6: next header + * TCP/UDP: source port, destination port + +* Only one classifier table can be created thus all rules in the table + have to match table format. Table format is set during creation of + the first unique flow rule. +* Up to 5 fields can be specified per flow rule. +* Up to 20 flow rules can be added. + +For additional information about classifier please consult +``doc/musdk_cls_user_guide.txt``. + +Usage Example +------------- + +MVPP2 PMD requires extra out of tree kernel modules to function properly. +`musdk_uio` and `mv_pp_uio` sources are part of the MUSDK. Please consult +``doc/musdk_get_started.txt`` for the detailed build instructions. +For `mvpp2x_sysfs` please consult ``Documentation/pp22_sysfs.txt`` for the +detailed build instructions. + +.. code-block:: console + + insmod musdk_uio.ko + insmod mv_pp_uio.ko + insmod mvpp2x_sysfs.ko + +Additionally interfaces used by DPDK application need to be put up: + +.. code-block:: console + + ip link set eth0 up + ip link set eth2 up + +In order to run testpmd example application following command can be used: + +.. code-block:: console + + ./testpmd --vdev=eth_mvpp2,iface=eth0,iface=eth2 -c 7 -- \ + --burst=128 --txd=2048 --rxd=1024 --rxq=2 --txq=2 --nb-cores=2 \ + -i -a --rss-udp diff --git a/doc/guides/nics/netvsc.rst b/doc/guides/nics/netvsc.rst new file mode 100644 index 00000000..345f393c --- /dev/null +++ b/doc/guides/nics/netvsc.rst @@ -0,0 +1,105 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) Microsoft Corporation. + +Netvsc poll mode driver +======================= + +The Netvsc Poll Mode driver (PMD) provides support for the paravirtualized +network device for Microsoft Hyper-V. It can be used with +Window Server 2008/2012/2016, Windows 10. +The device offers multi-queue support (if kernel and host support it), +checksum and segmentation offloads. + + +Features and Limitations of Hyper-V PMD +--------------------------------------- + +In this release, the hyper PMD driver provides the basic functionality of packet reception and transmission. + +* It supports merge-able buffers per packet when receiving packets and scattered buffer per packet + when transmitting packets. The packet size supported is from 64 to 65536. + +* The PMD supports multicast packets and promiscuous mode subject to restrictions on the host. + In order to this to work, the guest network configuration on Hyper-V must be configured to allow MAC address + spoofing. + +* The device has only a single MAC address. + Hyper-V driver does not support MAC or VLAN filtering because the Hyper-V host does not support it. + +* VLAN tags are always stripped and presented in mbuf tci field. + +* The Hyper-V driver does not use or support Link State or Rx interrupt. + +* The maximum number of queues is limited by the host (currently 64). + When used with 4.16 kernel only a single queue is available. + +.. note:: + This driver is intended for use with **Hyper-V only** and is + not recommended for use on Azure because accelerated Networking + (SR-IOV) is not supported. + + On Azure, use the :doc:`vdev_netvsc` which + automatically configures the necessary TAP and failsave drivers. + + +Installation +------------ + +The Netvsc PMD is a standalone driver, similar to virtio and vmxnet3. +Using Netvsc PMD requires that the associated VMBUS device be bound to the userspace +I/O device driver for Hyper-V (uio_hv_generic). By default, all netvsc devices +will be bound to the Linux kernel driver; in order to use netvsc PMD the +device must first be overridden. + +The first step is to identify the network device to override. +VMBUS uses Universal Unique Identifiers +(`UUID`_) to identify devices on the bus similar to how PCI uses Domain:Bus:Function. +The UUID associated with a Linux kernel network device can be determined +by looking at the sysfs information. To find the UUID for eth1 and +store it in a shell variable: + + .. code-block:: console + + DEV_UUID=$(basename $(readlink /sys/class/net/eth1/device)) + + +.. _`UUID`: https://en.wikipedia.org/wiki/Universally_unique_identifier + +There are several possible ways to assign the uio device driver for a device. +The easiest way (but only on 4.18 or later) +is to use the `driverctl Device Driver control utility`_ to override +the normal kernel device. + + .. code-block:: console + + driverctl -b vmbus set-override $DEV_UUID uio_hv_generic + +.. _`driverctl Device Driver control utility`: https://gitlab.com/driverctl/driverctl + +Any settings done with driverctl are by default persistent and will be reapplied +on reboot. + +On older kernels, the same effect can be had by manual sysfs bind and unbind +operations: + + .. code-block:: console + + NET_UUID="f8615163-df3e-46c5-913f-f2d2f965ed0e" + modprobe uio_hv_generic + echo $NET_UUID > /sys/bus/vmbus/drivers/uio_hv_generic/new_id + echo $DEV_UUID > /sys/bus/vmbus/drivers/hv_netvsc/unbind + echo $DEV_UUID > /sys/bus/vmbus/drivers/uio_hv_generic/bind + +.. Note:: + + The dpkd-devbind.py script can not be used since it only handles PCI devices. + + +Prerequisites +------------- + +The following prerequisites apply: + +* Linux kernel support for UIO on vmbus is done with the uio_hv_generic driver. + Full support of multiple queues requires the 4.17 kernel. It is possible + to use the netvsc PMD with 4.16 kernel but it is limited to a single queue. diff --git a/doc/guides/nics/nfp.rst b/doc/guides/nics/nfp.rst index 99a3b76e..927c03c6 100644 --- a/doc/guides/nics/nfp.rst +++ b/doc/guides/nics/nfp.rst @@ -34,14 +34,14 @@ NFP poll mode driver library Netronome's sixth generation of flow processors pack 216 programmable cores and over 100 hardware accelerators that uniquely combine packet, flow, security and content processing in a single device that scales -up to 400 Gbps. +up to 400-Gb/s. This document explains how to use DPDK with the Netronome Poll Mode Driver (PMD) supporting Netronome's Network Flow Processor 6xxx (NFP-6xxx) and Netronome's Flow Processor 4xxx (NFP-4xxx). NFP is a SRIOV capable device and the PMD driver supports the physical -function (PF) and virtual functions (VFs). +function (PF) and the virtual functions (VFs). Dependencies ------------ @@ -49,17 +49,18 @@ Dependencies Before using the Netronome's DPDK PMD some NFP configuration, which is not related to DPDK, is required. The system requires installation of **Netronome's BSP (Board Support Package)** along -with some specific NFP firmware application. Netronome's NSP ABI +with a specific NFP firmware application. Netronome's NSP ABI version should be 0.20 or higher. If you have a NFP device you should already have the code and -documentation for doing all this configuration. Contact +documentation for this configuration. Contact **support@netronome.com** to obtain the latest available firmware. -The NFP Linux netdev kernel driver for VFs is part of vanilla kernel -since kernel version 4.5, and support for the PF since kernel version -4.11. Support for older kernels can be obtained on Github at -**https://github.com/Netronome/nfp-drv-kmods** along with build +The NFP Linux netdev kernel driver for VFs has been a part of the +vanilla kernel since kernel version 4.5, and support for the PF +since kernel version 4.11. Support for older kernels can be obtained +on Github at +**https://github.com/Netronome/nfp-drv-kmods** along with the build instructions. NFP PMD needs to be used along with UIO ``igb_uio`` or VFIO (``vfio-pci``) @@ -70,15 +71,15 @@ Building the software Netronome's PMD code is provided in the **drivers/net/nfp** directory. Although NFP PMD has Netronome´s BSP dependencies, it is possible to -compile it along with other DPDK PMDs even if no BSP was installed before. +compile it along with other DPDK PMDs even if no BSP was installed previously. Of course, a DPDK app will require such a BSP installed for using the NFP PMD, along with a specific NFP firmware application. -Default PMD configuration is at **common_linuxapp configuration** file: +Default PMD configuration is at the **common_linuxapp configuration** file: - **CONFIG_RTE_LIBRTE_NFP_PMD=y** -Once DPDK is built all the DPDK apps and examples include support for +Once the DPDK is built all the DPDK apps and examples include support for the NFP PMD. @@ -91,37 +92,55 @@ for details. Using the PF ------------ -NFP PMD has support for using the NFP PF as another DPDK port, but it does not +NFP PMD supports using the NFP PF as another DPDK port, but it does not have any functionality for controlling VFs. In fact, it is not possible to use the PMD with the VFs if the PF is being used by DPDK, that is, with the NFP PF -bound to ``igb_uio`` or ``vfio-pci`` kernel drivers. Future DPDK version will +bound to ``igb_uio`` or ``vfio-pci`` kernel drivers. Future DPDK versions will have a PMD able to work with the PF and VFs at the same time and with the PF implementing VF management along with other PF-only functionalities/offloads. The PMD PF has extra work to do which will delay the DPDK app initialization -like checking if a firmware is already available in the device, uploading the -firmware if necessary, and configure the Link state properly when starting or -stopping a PF port. Note that firmware upload is not always necessary which is -the main delay for NFP PF PMD initialization. +like uploading the firmware and configure the Link state properly when starting or +stopping a PF port. Since DPDK 18.05 the firmware upload happens when +a PF is initialized, which was not always true with older DPDK versions. Depending on the Netronome product installed in the system, firmware files should be available under ``/lib/firmware/netronome``. DPDK PMD supporting the -PF requires a specific link, ``/lib/firmware/netronome/nic_dpdk_default.nffw``, -which should be created automatically with Netronome's Agilio products -installation. +PF looks for a firmware file in this order: + + 1) First try to find a firmware image specific for this device using the + NFP serial number: + + serial-00-15-4d-12-20-65-10-ff.nffw + + 2) Then try the PCI name: + + pci-0000:04:00.0.nffw + + 3) Finally try the card type and media: + + nic_AMDA0099-0001_2x25.nffw + +Netronome's software packages install firmware files under ``/lib/firmware/netronome`` +to support all the Netronome's SmartNICs and different firmware applications. +This is usually done using file names based on SmartNIC type and media and with a +directory per firmware application. Options 1 and 2 for firmware filenames allow +more than one SmartNIC, same type of SmartNIC or different ones, and to upload a +different firmware to each SmartNIC. + PF multiport support -------------------- Some NFP cards support several physical ports with just one single PCI device. -DPDK core is designed with the 1:1 relationship between PCI devices and DPDK +The DPDK core is designed with a 1:1 relationship between PCI devices and DPDK ports, so NFP PMD PF support requires handling the multiport case specifically. During NFP PF initialization, the PMD will extract the information about the number of PF ports from the firmware and will create as many DPDK ports as needed. Because the unusual relationship between a single PCI device and several DPDK -ports, there are some limitations when using more than one PF DPDK ports: there +ports, there are some limitations when using more than one PF DPDK port: there is no support for RX interrupts and it is not possible either to use those PF ports with the device hotplug functionality. @@ -136,7 +155,7 @@ System configuration get the drivers from the above Github repository and follow the instructions for building and installing it. - Virtual Functions need to be enabled before they can be used with the PMD. + VFs need to be enabled before they can be used with the PMD. Before enabling the VFs it is useful to obtain information about the current NFP PCI device detected by the system: diff --git a/doc/guides/nics/octeontx.rst b/doc/guides/nics/octeontx.rst index 8e2a2b75..f8eaaa63 100644 --- a/doc/guides/nics/octeontx.rst +++ b/doc/guides/nics/octeontx.rst @@ -165,8 +165,7 @@ CRC striping ~~~~~~~~~~~~ The OCTEONTX SoC family NICs strip the CRC for every packets coming into the -host interface. So, CRC will be stripped even when the -``rxmode.hw_strip_crc`` member is set to 0 in ``struct rte_eth_conf``. +host interface irrespective of the offload configuration. Maximum packet length ~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/guides/nics/overview.rst b/doc/guides/nics/overview.rst index 0df0ef81..20cd52b0 100644 --- a/doc/guides/nics/overview.rst +++ b/doc/guides/nics/overview.rst @@ -1,32 +1,6 @@ -.. BSD LICENSE +.. SPDX-License-Identifier: BSD-3-Clause Copyright 2016 6WIND S.A. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of 6WIND S.A. nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - Overview of Networking Drivers ============================== diff --git a/doc/guides/nics/pcap_ring.rst b/doc/guides/nics/pcap_ring.rst index 7fd063c9..879e5430 100644 --- a/doc/guides/nics/pcap_ring.rst +++ b/doc/guides/nics/pcap_ring.rst @@ -71,11 +71,19 @@ The different stream types are: tx_pcap=/path/to/file.pcap * rx_iface: Defines a reception stream based on a network interface name. - The driver reads packets coming from the given interface using the Linux kernel driver for that interface. + The driver reads packets from the given interface using the Linux kernel driver for that interface. + The driver captures both the incoming and outgoing packets on that interface. The value is an interface name. rx_iface=eth0 +* rx_iface_in: Defines a reception stream based on a network interface name. + The driver reads packets from the given interface using the Linux kernel driver for that interface. + The driver captures only the incoming packets on that interface. + The value is an interface name. + + rx_iface_in=eth0 + * tx_iface: Defines a transmission stream based on a network interface name. The driver sends packets to the given interface using the Linux kernel driver for that interface. The value is an interface name. @@ -122,6 +130,21 @@ Forward packets through two network interfaces: $RTE_TARGET/app/testpmd -l 0-3 -n 4 \ --vdev 'net_pcap0,iface=eth0' --vdev='net_pcap1;iface=eth1' +Enable 2 tx queues on a network interface: + +.. code-block:: console + + $RTE_TARGET/app/testpmd -l 0-3 -n 4 \ + --vdev 'net_pcap0,rx_iface=eth1,tx_iface=eth1,tx_iface=eth1' \ + -- --txq 2 + +Read only incoming packets from a network interface and write them back to the same network interface: + +.. code-block:: console + + $RTE_TARGET/app/testpmd -l 0-3 -n 4 \ + --vdev 'net_pcap0,rx_iface_in=eth1,tx_iface=eth1' + Using libpcap-based PMD with the testpmd Application ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/guides/nics/qede.rst b/doc/guides/nics/qede.rst index 63ce9b4c..cba38868 100644 --- a/doc/guides/nics/qede.rst +++ b/doc/guides/nics/qede.rst @@ -35,15 +35,15 @@ Supported Features - N-tuple filter and flow director (limited support) - NPAR (NIC Partitioning) - SR-IOV VF -- VXLAN Tunneling offload +- GRE Tunneling offload - GENEVE Tunneling offload +- VXLAN Tunneling offload - MPLSoUDP Tx Tunneling offload Non-supported Features ---------------------- - SR-IOV PF -- GRE and NVGRE Tunneling offloads Co-existence considerations --------------------------- @@ -58,19 +58,21 @@ Supported QLogic Adapters Prerequisites ------------- -- Requires storm firmware version **8.30.12.0**. Firmware may be available +- Requires storm firmware version **8.33.12.0**. Firmware may be available inbox in certain newer Linux distros under the standard directory - ``E.g. /lib/firmware/qed/qed_init_values-8.30.12.0.bin`` + ``E.g. /lib/firmware/qed/qed_init_values-8.33.12.0.bin``. If the required firmware files are not available then download it from - `QLogic Driver Download Center <http://driverdownloads.qlogic.com/QLogicDriverDownloads_UI/DefaultNewSearch.aspx>`_. - For downloading firmware file, select adapter category, model and DPDK Poll Mode Driver. - -- Requires management firmware (MFW) version **8.30.x.x** or higher to be - flashed on to the adapter. If the required management firmware is not - available then download from - `QLogic Driver Download Center <http://driverdownloads.qlogic.com/QLogicDriverDownloads_UI/DefaultNewSearch.aspx>`_. - For downloading firmware upgrade utility, select adapter category, model and Linux distro. - To flash the management firmware refer to the instructions in the QLogic Firmware Upgrade Utility Readme document. + `linux-firmware git repository <http://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/qed>`_ + or `QLogic Driver Download Center <http://driverdownloads.qlogic.com/QLogicDriverDownloads_UI/DefaultNewSearch.aspx>`_. + To download firmware file from QLogic website, select adapter category, model and DPDK Poll Mode Driver. + +- Requires the NIC be updated minimally with **8.30.x.x** Management firmware(MFW) version supported for that NIC. + It is highly recommended that the NIC be updated with the latest available management firmware version to get latest feature set. + Management Firmware and Firmware Upgrade Utility for Cavium FastLinQ(r) branded adapters can be downloaded from + `Driver Download Center <http://driverdownloads.qlogic.com/QLogicDriverDownloads_UI/DefaultNewSearch.aspx>`_. + For downloading Firmware Upgrade Utility, select NIC category, model and Linux distro. + To update the management firmware, refer to the instructions in the Firmware Upgrade Utility Readme document. + For OEM branded adapters please follow the instruction provided by the OEM to update the Management Firmware on the NIC. - SR-IOV requires Linux PF driver version **8.20.x.x** or higher. If the required PF driver is not available then download it from @@ -104,7 +106,7 @@ enabling debugging options may affect system performance. - ``CONFIG_RTE_LIBRTE_QEDE_FW`` (default **""**) Gives absolute path of firmware file. - ``Eg: "/lib/firmware/qed/qed_init_values-8.30.12.0.bin"`` + ``Eg: "/lib/firmware/qed/qed_init_values-8.33.12.0.bin"`` Empty string indicates driver will pick up the firmware file from the default location /lib/firmware/qed. CAUTION this option is more for custom firmware, it is not @@ -121,7 +123,7 @@ SR-IOV: Prerequisites and Sample Application Notes This section provides instructions to configure SR-IOV with Linux OS. -**Note**: librte_pmd_qede will be used to bind to SR-IOV VF device and Linux native kernel driver (qede) will function as SR-IOV PF driver. Requires PF driver to be 8.10.x.x or higher. +**Note**: librte_pmd_qede will be used to bind to SR-IOV VF device and Linux native kernel driver (qede) will function as SR-IOV PF driver. Requires PF driver to be 8.20.x.x or higher. #. Verify SR-IOV and ARI capability is enabled on the adapter using ``lspci``: @@ -193,7 +195,7 @@ This section provides instructions to configure SR-IOV with Linux OS. #. Running testpmd - (Supply ``--log-level="pmd.net.qede.driver",7`` to view informational messages): + (Supply ``--log-level="pmd.net.qede.driver:info`` to view informational messages): Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>` to run diff --git a/doc/guides/nics/sfc_efx.rst b/doc/guides/nics/sfc_efx.rst index ccdf5ff0..63939ec8 100644 --- a/doc/guides/nics/sfc_efx.rst +++ b/doc/guides/nics/sfc_efx.rst @@ -30,7 +30,8 @@ Solarflare libefx-based Poll Mode Driver ======================================== The SFC EFX PMD (**librte_pmd_sfc_efx**) provides poll mode driver support -for **Solarflare SFN7xxx and SFN8xxx** family of 10/40 Gbps adapters. +for **Solarflare SFN7xxx and SFN8xxx** family of 10/40 Gbps adapters and +**Solarflare XtremeScale X2xxx** family of 10/25/40/50/100 Gbps adapters. SFC EFX PMD has support for the latest Linux and FreeBSD operating systems. More information can be found at `Solarflare Communications website @@ -87,6 +88,8 @@ SFC EFX PMD has support for: - Flow API +- Loopback + Non-supported Features ---------------------- @@ -97,8 +100,6 @@ The features not yet supported include: - Priority-based flow control -- Loopback - - Configurable RX CRC stripping (always stripped) - Header split on receive @@ -120,22 +121,37 @@ required in the receive buffer. It should be taken into account when mbuf pool for receive is created. +Equal stride super-buffer mode +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When the receive queue uses equal stride super-buffer DMA mode, one HW Rx +descriptor carries many Rx buffers which contiguously follow each other +with some stride (equal to total size of rte_mbuf as mempool object). +Each Rx buffer is an independent rte_mbuf. +However dedicated mempool manager must be used when mempool for the Rx +queue is created. The manager must support dequeue of the contiguous +block of objects and provide mempool info API to get the block size. + +Another limitation of a equal stride super-buffer mode, imposed by the +firmware, is that it allows for a single RSS context. + + Tunnels support --------------- -NVGRE, VXLAN and GENEVE tunnels are supported on SFN8xxx family adapters -with full-feature firmware variant running. +NVGRE, VXLAN and GENEVE tunnels are supported on SFN8xxx and X2xxx family +adapters with full-feature firmware variant running. **sfboot** should be used to configure NIC to run full-feature firmware variant. See Solarflare Server Adapter User's Guide for details. -SFN8xxx family adapters provide either inner or outer packet classes. +SFN8xxx and X2xxx family adapters provide either inner or outer packet classes. If adapter firmware advertises support for tunnels then the PMD configures the hardware to report inner classes, and outer classes are not reported in received packets. However, for VXLAN and GENEVE tunnels the PMD does report UDP as the outer layer 4 packet type. -SFN8xxx family adapters report GENEVE packets as VXLAN. +SFN8xxx and X2xxx family adapters report GENEVE packets as VXLAN. If UDP ports are configured for only one tunnel type then it is safe to treat VXLAN packet type indication as the corresponding UDP tunnel type. @@ -152,7 +168,9 @@ Supported pattern items: - VOID - ETH (exact match of source/destination addresses, individual/group match - of destination address, EtherType) + of destination address, EtherType in the outer frame and exact match of + destination addresses, individual/group match of destination address in + the inner frame) - VLAN (exact match of VID, double-tagging is supported) @@ -166,6 +184,13 @@ Supported pattern items: - UDP (exact match of source/destination ports) +- VXLAN (exact match of VXLAN network identifier) + +- GENEVE (exact match of virtual network identifier, only Ethernet (0x6558) + protocol type is supported) + +- NVGRE (exact match of virtual subnet ID) + Supported actions: - VOID @@ -174,6 +199,12 @@ Supported actions: - RSS +- DROP + +- FLAG (supported only with ef10_essb Rx datapath) + +- MARK (supported only with ef10_essb Rx datapath) + Validating flow rules depends on the firmware variant. Ethernet destinaton individual/group match @@ -184,10 +215,31 @@ in the mask of destination address. If destinaton address in the spec is multicast, it matches all multicast (and broadcast) packets, oherwise it matches unicast packets that are not filtered by other flow rules. +Exceptions to flow rules +~~~~~~~~~~~~~~~~~~~~~~~~ + +There is a list of exceptional flow rule patterns which will not be +accepted by the PMD. A pattern will be rejected if at least one of the +conditions is met: + +- Filtering by IPv4 or IPv6 EtherType without pattern items of internet + layer and above. + +- The last item is IPV4 or IPV6, and it's empty. + +- Filtering by TCP or UDP IP transport protocol without pattern items of + transport layer and above. + +- The last item is TCP or UDP, and it's empty. + Supported NICs -------------- +- Solarflare XtremeScale Adapters: + + - Solarflare X2522 Dual Port SFP28 10/25GbE Adapter + - Solarflare Flareon [Ultra] Server Adapters: - Solarflare SFN8522 Dual Port SFP+ Server Adapter @@ -258,15 +310,18 @@ whitelist option like "-w 02:00.0,arg1=value1,...". Case-insensitive 1/y/yes/on or 0/n/no/off may be used to specify boolean parameters value. -- ``rx_datapath`` [auto|efx|ef10] (default **auto**) +- ``rx_datapath`` [auto|efx|ef10|ef10_esps] (default **auto**) Choose receive datapath implementation. **auto** allows the driver itself to make a choice based on firmware features available and required by the datapath implementation. **efx** chooses libefx-based datapath which supports Rx scatter. - **ef10** chooses EF10 (SFN7xxx, SFN8xxx) native datapath which is + **ef10** chooses EF10 (SFN7xxx, SFN8xxx, X2xxx) native datapath which is more efficient than libefx-based and provides richer packet type classification, but lacks Rx scatter support. + **ef10_esps** chooses SFNX2xxx equal stride packed stream datapath + which may be used on DPDK firmware variant only + (see notes about its limitations above). - ``tx_datapath`` [auto|efx|ef10|ef10_simple] (default **auto**) @@ -277,12 +332,12 @@ boolean parameters value. (full-feature firmware variant only), TSO and multi-segment mbufs. Mbuf segments may come from different mempools, and mbuf reference counters are treated responsibly. - **ef10** chooses EF10 (SFN7xxx, SFN8xxx) native datapath which is + **ef10** chooses EF10 (SFN7xxx, SFN8xxx, X2xxx) native datapath which is more efficient than libefx-based but has no VLAN insertion and TSO support yet. Mbuf segments may come from different mempools, and mbuf reference counters are treated responsibly. - **ef10_simple** chooses EF10 (SFN7xxx, SFN8xxx) native datapath which + **ef10_simple** chooses EF10 (SFN7xxx, SFN8xxx, X2xxx) native datapath which is even more faster then **ef10** but does not support multi-segment mbufs, disallows multiple mempools and neglects mbuf reference counters. @@ -293,21 +348,73 @@ boolean parameters value. **auto** allows NIC firmware to make a choice based on installed licences and firmware variant configured using **sfboot**. -- ``debug_init`` [bool] (default **n**) - - Enable extra logging during device initialization and startup. - -- ``mcdi_logging`` [bool] (default **n**) - - Enable extra logging of the communication with the NIC's management CPU. - The logging is done using RTE_LOG() with INFO level and PMD type. - The format is consumed by the Solarflare netlogdecode cross-platform tool. - - ``stats_update_period_ms`` [long] (default **1000**) Adjust period in milliseconds to update port hardware statistics. The accepted range is 0 to 65535. The value of **0** may be used to disable periodic statistics update. One should note that it's - only possible to set an arbitrary value on SFN8xxx provided that + only possible to set an arbitrary value on SFN8xxx and X2xxx provided that firmware version is 6.2.1.1033 or higher, otherwise any positive value will select a fixed update period of **1000** milliseconds + +- ``fw_variant`` [dont-care|full-feature|ultra-low-latency| + capture-packed-stream|dpdk] (default **dont-care**) + + Choose the preferred firmware variant to use. In order for the selected + option to have an effect, the **sfboot** utility must be configured with the + **auto** firmware-variant option. The preferred firmware variant applies to + all ports on the NIC. + **dont-care** ensures that the driver can attach to an unprivileged function. + The datapath firmware type to use is controlled by the **sfboot** + utility. + **full-feature** chooses full featured firmware. + **ultra-low-latency** chooses firmware with fewer features but lower latency. + **capture-packed-stream** chooses firmware for SolarCapture packed stream + mode. + **dpdk** chooses DPDK firmware with equal stride super-buffer Rx mode + for higher Rx packet rate and packet marks support and firmware subvariant + without checksumming on transmit for higher Tx packet rate if + checksumming is not required. + +- ``rxd_wait_timeout_ns`` [long] (default **200 us**) + + Adjust timeout in nanoseconds to head-of-line block to wait for + Rx descriptors. + The accepted range is 0 to 400 ms. + Flow control should be enabled to make it work. + The value of **0** disables it and packets are dropped immediately. + When a packet is dropped because of no Rx descriptors, + ``rx_nodesc_drop_cnt`` counter grows. + The feature is supported only by the DPDK firmware variant when equal + stride super-buffer Rx mode is used. + + +Dynamic Logging Parameters +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +One may leverage EAL option "--log-level" to change default levels +for the log types supported by the driver. The option is used with +an argument typically consisting of two parts separated by a colon. + +Level value is the last part which takes a symbolic name (or integer). +Log type is the former part which may shell match syntax. +Depending on the choice of the expression, the given log level may +be used either for some specific log type or for a subset of types. + +SFC EFX PMD provides the following log types available for control: + +- ``pmd.net.sfc.driver`` (default level is **notice**) + + Affects driver-wide messages unrelated to any particular devices. + +- ``pmd.net.sfc.main`` (default level is **notice**) + + Matches a subset of per-port log types registered during runtime. + A full name for a particular type may be obtained by appending a + dot and a PCI device identifier (``XXXX:XX:XX.X``) to the prefix. + +- ``pmd.net.sfc.mcdi`` (default level is **notice**) + + Extra logging of the communication with the NIC's management CPU. + The format of the log is consumed by the Solarflare netlogdecode + cross-platform tool. May be managed per-port, as explained above. diff --git a/doc/guides/nics/softnic.rst b/doc/guides/nics/softnic.rst new file mode 100644 index 00000000..6c2287a1 --- /dev/null +++ b/doc/guides/nics/softnic.rst @@ -0,0 +1,250 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2018 Intel Corporation. + +Soft NIC Poll Mode Driver +========================= + +The Soft NIC allows building custom NIC pipelines in software. The Soft NIC pipeline +is DIY and reconfigurable through ``firmware`` (DPDK Packet Framework script). + +The Soft NIC leverages the DPDK Packet Framework libraries (librte_port, +librte_table and librte_pipeline) to make it modular, flexible and extensible +with new functionality. Please refer to DPDK Programmer's Guide, Chapter +``Packet Framework`` and DPDK Sample Application User Guide, +Chapter ``IP Pipeline Application`` for more details. + +The Soft NIC is configured through the standard DPDK ethdev API (ethdev, flow, +QoS, security). The internal framework is not externally visible. + +Key benefits: + - Can be used to augment missing features to HW NICs. + - Allows consumption of advanced DPDK features without application redesign. + - Allows out-of-the-box performance boost of DPDK consumers applications simply by + instantiating this type of Ethernet device. + +Flow +---- +* ``Device creation``: Each Soft NIC instance is a virtual device. + +* ``Device start``: The Soft NIC firmware script is executed every time the device + is started. The firmware script typically creates several internal objects, + such as: memory pools, SW queues, traffic manager, action profiles, pipelines, + etc. + +* ``Device stop``: All the internal objects that were previously created by the + firmware script during device start are now destroyed. + +* ``Device run``: Each Soft NIC device needs one or several CPU cores to run. + The firmware script maps each internal pipeline to a CPU core. Multiple + pipelines can be mapped to the same CPU core. In order for a given pipeline + assigned to CPU core X to run, the application needs to periodically call on + CPU core X the `rte_pmd_softnic_run()` function for the current Soft NIC + device. + +* ``Application run``: The application reads packets from the Soft NIC device RX + queues and writes packets to the Soft NIC device TX queues. + +Supported Operating Systems +--------------------------- + +Any Linux distribution fulfilling the conditions described in ``System Requirements`` +section of :ref:`the DPDK documentation <linux_gsg>` or refer to *DPDK +Release Notes*. + +Build options +------------- + +The default PMD configuration available in the common_linuxapp configuration file: + +CONFIG_RTE_LIBRTE_PMD_SOFTNIC=y + +Once the DPDK is built, all the DPDK applications include support for the +Soft NIC PMD. + +Soft NIC PMD arguments +---------------------- + +The user can specify below arguments in EAL ``--vdev`` options to create the +Soft NIC device instance: + + --vdev "net_softnic0,firmware=firmware.cli,conn_port=8086" + +#. ``firmware``: path to the firmware script used for Soft NIC configuration. + The example "firmware" script is provided at `drivers/net/softnic/`. + (Optional: No, Default = NA) + +#. ``conn_port``: tcp connection port (non-zero value) used by remote client + (for examples- telnet, netcat, etc.) to connect and configure Soft NIC device in run-time. + (Optional: yes, Default value: 0, no connection with external client) + +#. ``cpu_id``: numa node id. (Optional: yes, Default value: 0) + +#. ``tm_n_queues``: number of traffic manager's scheduler queues. The traffic manager + is based on DPDK *librte_sched* library. (Optional: yes, Default value: 65,536 queues) + +#. ``tm_qsize0``: size of scheduler queue 0 per traffic class of the pipes/subscribers. + (Optional: yes, Default: 64) + +#. ``tm_qsize1``: size of scheduler queue 1 per traffic class of the pipes/subscribers. + (Optional: yes, Default: 64) + +#. ``tm_qsize2``: size of scheduler queue 2 per traffic class of the pipes/subscribers. + (Optional: yes, Default: 64) + +#. ``tm_qsize3``: size of scheduler queue 3 per traffic class of the pipes/subscribers. + (Optional: yes, Default: 64) + + +Soft NIC testing +---------------- + +* Run testpmd application in Soft NIC forwarding mode with loopback feature + enabled on Soft NIC port: + + .. code-block:: console + + ./testpmd -c 0x3 --vdev 'net_softnic0,firmware=<script path>/firmware.cli,cpu_id=0,conn_port=8086' -- -i + --forward-mode=softnic --portmask=0x2 + + .. code-block:: console + + ... + Interactive-mode selected + Set softnic packet forwarding mode + ... + Configuring Port 0 (socket 0) + Port 0: 90:E2:BA:37:9D:DC + Configuring Port 1 (socket 0) + + ; SPDX-License-Identifier: BSD-3-Clause + ; Copyright(c) 2018 Intel Corporation + + link LINK dev 0000:02:00.0 + + pipeline RX period 10 offset_port_id 0 + pipeline RX port in bsz 32 link LINK rxq 0 + pipeline RX port out bsz 32 swq RXQ0 + pipeline RX table match stub + pipeline RX port in 0 table 0 + + pipeline TX period 10 offset_port_id 0 + pipeline TX port in bsz 32 swq TXQ0 + pipeline TX port out bsz 32 link LINK txq 0 + pipeline TX table match stub + pipeline TX port in 0 table 0 + + thread 1 pipeline RX enable + thread 1 pipeline TX enable + Port 1: 00:00:00:00:00:00 + Checking link statuses... + Done + testpmd> + +* Start forwarding + + .. code-block:: console + + testpmd> start + softnic packet forwarding - ports=1 - cores=1 - streams=1 - NUMA support enabled, MP over anonymous pages disabled + Logical Core 1 (socket 0) forwards packets on 1 streams: + RX P=2/Q=0 (socket 0) -> TX P=2/Q=0 (socket 0) peer=02:00:00:00:00:02 + + softnic packet forwarding packets/burst=32 + nb forwarding cores=1 - nb forwarding ports=1 + port 0: RX queue number: 1 Tx queue number: 1 + Rx offloads=0x1000 Tx offloads=0x0 + RX queue: 0 + RX desc=512 - RX free threshold=32 + RX threshold registers: pthresh=8 hthresh=8 wthresh=0 + RX Offloads=0x0 + TX queue: 0 + TX desc=512 - TX free threshold=32 + TX threshold registers: pthresh=32 hthresh=0 wthresh=0 + TX offloads=0x0 - TX RS bit threshold=32 + port 1: RX queue number: 1 Tx queue number: 1 + Rx offloads=0x0 Tx offloads=0x0 + RX queue: 0 + RX desc=0 - RX free threshold=0 + RX threshold registers: pthresh=0 hthresh=0 wthresh=0 + RX Offloads=0x0 + TX queue: 0 + TX desc=0 - TX free threshold=0 + TX threshold registers: pthresh=0 hthresh=0 wthresh=0 + TX offloads=0x0 - TX RS bit threshold=0 + +* Start remote client (e.g. telnet) to communicate with the softnic device: + + .. code-block:: console + + $ telnet 127.0.0.1 8086 + Trying 127.0.0.1... + Connected to 127.0.0.1. + Escape character is '^]'. + + Welcome to Soft NIC! + + softnic> + +* Add/update Soft NIC pipeline table match-action entries from telnet client: + + .. code-block:: console + + softnic> pipeline RX table 0 rule add match default action fwd port 0 + softnic> pipeline TX table 0 rule add match default action fwd port 0 + +Soft NIC Firmware +----------------- + +The Soft NIC firmware, for example- `softnic/firmware.cli`, consists of following CLI commands +for creating and managing software based NIC pipelines. For more details, please refer to CLI +command description provided in `softnic/rte_eth_softnic_cli.c`. + +* Physical port for packets send/receive: + + .. code-block:: console + + link LINK dev 0000:02:00.0 + +* Pipeline create: + + .. code-block:: console + + pipeline RX period 10 offset_port_id 0 (Soft NIC rx-path pipeline) + pipeline TX period 10 offset_port_id 0 (Soft NIC tx-path pipeline) + +* Pipeline input/output port create + + .. code-block:: console + + pipeline RX port in bsz 32 link LINK rxq 0 (Soft NIC rx pipeline input port) + pipeline RX port out bsz 32 swq RXQ0 (Soft NIC rx pipeline output port) + pipeline TX port in bsz 32 swq TXQ0 (Soft NIC tx pipeline input port) + pipeline TX port out bsz 32 link LINK txq 0 (Soft NIC tx pipeline output port) + +* Pipeline table create + + .. code-block:: console + + pipeline RX table match stub (Soft NIC rx pipeline match-action table) + pipeline TX table match stub (Soft NIC tx pipeline match-action table) + +* Pipeline input port connection with table + + .. code-block:: console + + pipeline RX port in 0 table 0 (Soft NIC rx pipeline input port 0 connection with table 0) + pipeline TX port in 0 table 0 (Soft NIC tx pipeline input port 0 connection with table 0) + +* Pipeline table match-action rules add + + .. code-block:: console + + pipeline RX table 0 rule add match default action fwd port 0 (Soft NIC rx pipeline table 0 rule) + pipeline TX table 0 rule add match default action fwd port 0 (Soft NIC tx pipeline table 0 rule) + +* Enable pipeline on CPU thread + + .. code-block:: console + + thread 1 pipeline RX enable (Soft NIC rx pipeline enable on cpu thread id 1) + thread 1 pipeline TX enable (Soft NIC tx pipeline enable on cpu thread id 1) diff --git a/doc/guides/nics/szedata2.rst b/doc/guides/nics/szedata2.rst index 1a5d4138..a2092f9c 100644 --- a/doc/guides/nics/szedata2.rst +++ b/doc/guides/nics/szedata2.rst @@ -1,32 +1,5 @@ -.. BSD LICENSE +.. SPDX-License-Identifier: BSD-3-Clause Copyright 2015 - 2016 CESNET - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of CESNET nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SZEDATA2 poll mode driver library ================================= @@ -70,8 +43,10 @@ separately: * **Kernel modules** + * combo6core * combov3 - * szedata2_cv3 + * szedata2 + * szedata2_cv3 or szedata2_cv3_fdt Kernel modules manage initialization of hardware, allocation and sharing of resources for user space applications. @@ -79,6 +54,15 @@ separately: Information about getting the dependencies can be found `here <http://www.netcope.com/en/company/community-support/dpdk-libsze2>`_. +Versions of the packages +~~~~~~~~~~~~~~~~~~~~~~~~ + +The minimum version of the provided packages: + +* for DPDK from 18.05: **4.4.1** + +* for DPDK up to 18.02 (including): **3.0.5** + Configuration ------------- @@ -89,45 +73,53 @@ These configuration options can be modified before compilation in the Value **y** enables compilation of szedata2 PMD. -* ``CONFIG_RTE_LIBRTE_PMD_SZEDATA2_AS`` default value: **0** - - This option defines type of firmware address space and must be set - according to the used card and mode. - Currently supported values are: - - * **0** - for cards (modes): - - * NFB-100G1 (100G1) +Using the SZEDATA2 PMD +---------------------- - * **1** - for cards (modes): +From DPDK version 16.04 the type of SZEDATA2 PMD is changed to PMD_PDEV. +SZEDATA2 device is automatically recognized during EAL initialization. +No special command line options are needed. - * NFB-100G2Q (100G1) +Kernel modules have to be loaded before running the DPDK application. - * **2** - for cards (modes): +NFB card architecture +--------------------- - * NFB-40G2 (40G2) - * NFB-100G2C (100G2) - * NFB-100G2Q (40G2) +The NFB cards are multi-port multi-queue cards, where (generally) data from any +Ethernet port may be sent to any queue. +They were historically represented in DPDK as a single port. - * **3** - for cards (modes): +However, the new NFB-200G2QL card employs an addon cable which allows to connect +it to two physical PCI-E slots at the same time (see the diagram below). +This is done to allow 200 Gbps of traffic to be transferred through the PCI-E +bus (note that a single PCI-E 3.0 x16 slot provides only 125 Gbps theoretical +throughput). - * NFB-40G2 (10G8) - * NFB-100G2Q (10G8) +Since each slot may be connected to a different CPU and therefore to a different +NUMA node, the card is represented as two ports in DPDK (each with half of the +queues), which allows DPDK to work with data from the individual queues on the +right NUMA node. - * **4** - for cards (modes): +.. figure:: img/szedata2_nfb200g_architecture.* + :align: center - * NFB-100G1 (10G10) + NFB-200G2QL high-level diagram - * **5** - for experimental firmwares and future use +Limitations +----------- -Using the SZEDATA2 PMD ----------------------- +The SZEDATA2 PMD does not support operations related to Ethernet ports +(link_up, link_down, set_mac_address, etc.). -From DPDK version 16.04 the type of SZEDATA2 PMD is changed to PMD_PDEV. -SZEDATA2 device is automatically recognized during EAL initialization. -No special command line options are needed. +NFB cards employ multiple Ethernet ports. +Until now, Ethernet port-related operations were performed on all of them +(since the whole card was represented as a single port). +With NFB-200G2QL card, this is no longer viable (see above). -Kernel modules have to be loaded before running the DPDK application. +Since there is no fixed mapping between the queues and Ethernet ports, and since +a single card can be represented as two ports in DPDK, there is no way of +telling which (if any) physical ports should be associated with individual +ports in DPDK. Example of usage ---------------- diff --git a/doc/guides/nics/tap.rst b/doc/guides/nics/tap.rst index ea61be38..27148681 100644 --- a/doc/guides/nics/tap.rst +++ b/doc/guides/nics/tap.rst @@ -1,8 +1,8 @@ .. SPDX-License-Identifier: BSD-3-Clause Copyright(c) 2016 Intel Corporation. -Tap Poll Mode Driver -==================== +Tun|Tap Poll Mode Driver +======================== The ``rte_eth_tap.c`` PMD creates a device using TAP interfaces on the local host. The PMD allows for DPDK and the host to communicate using a raw @@ -37,6 +37,12 @@ for each interface string containing ``mac=fixed``. The MAC address is formatted as 00:'d':'t':'a':'p':[00-FF]. Convert the characters to hex and you get the actual MAC address: ``00:64:74:61:70:[00-FF]``. + --vdev=net_tap0,mac="00:64:74:61:70:11" + +The MAC address will have a user value passed as string. The MAC address is in +format with delimeter ``:``. The string is byte converted to hex and you get +the actual MAC address: ``00:64:74:61:70:11``. + It is possible to specify a remote netdevice to capture packets from by adding ``remote=foo1``, for example:: @@ -77,6 +83,17 @@ can utilize that stack to handle the network protocols. Plus you would be able to address the interface using an IP address assigned to the internal interface. +The TUN PMD allows user to create a TUN device on host. The PMD allows user +to transmit and receive packets via DPDK API calls with L3 header and payload. +The devices in host can be accessed via ``ifconfig`` or ``ip`` command. TUN +interfaces are passed to DPDK ``rte_eal_init`` arguments as ``--vdev=net_tunX``, +where X stands for unique id, example:: + + --vdev=net_tun0 --vdev=net_tun1,iface=foo1, ... + +Unlike TAP PMD, TUN PMD does not support user arguments as ``MAC`` or ``remote`` user +options. Default interface name is ``dtunX``, where X stands for unique id. + Flow API support ---------------- @@ -91,7 +108,7 @@ The kernel support can be checked with this command:: Supported items: - eth: src and dst (with variable masks), and eth_type (0xffff mask). -- vlan: vid, pcp, tpid, but not eid. (requires kernel 4.9) +- vlan: vid, pcp, but not eid. (requires kernel 4.9) - ipv4/6: src and dst (with variable masks), and ip_proto (0xffff mask). - udp/tcp: src and dst port (0xffff) mask. @@ -149,7 +166,7 @@ Run pktgen from the pktgen directory in a terminal with a commandline like the following:: sudo ./app/app/x86_64-native-linuxapp-gcc/app/pktgen -l 1-5 -n 4 \ - --proc-type auto --log-level 8 --socket-mem 512,512 --file-prefix pg \ + --proc-type auto --log-level debug --socket-mem 512,512 --file-prefix pg \ --vdev=net_tap0 --vdev=net_tap1 -b 05:00.0 -b 05:00.1 \ -b 04:00.0 -b 04:00.1 -b 04:00.2 -b 04:00.3 \ -b 81:00.0 -b 81:00.1 -b 81:00.2 -b 81:00.3 \ @@ -241,6 +258,11 @@ Please refer to ``iproute2`` package file ``lib/bpf.c`` function An example utility for eBPF instruction generation in the format of C arrays will be added in next releases +TAP reports on supported RSS functions as part of dev_infos_get callback: +``ETH_RSS_IP``, ``ETH_RSS_UDP`` and ``ETH_RSS_TCP``. +**Known limitation:** TAP supports all of the above hash functions together +and not in partial combinations. + Systems supporting flow API --------------------------- diff --git a/doc/guides/nics/thunderx.rst b/doc/guides/nics/thunderx.rst index 5270ef23..e84eaafe 100644 --- a/doc/guides/nics/thunderx.rst +++ b/doc/guides/nics/thunderx.rst @@ -30,6 +30,7 @@ Features of the ThunderX PMD are: - SR-IOV VF - NUMA support - Multi queue set support (up to 96 queues (12 queue sets)) per port +- Skip data bytes Supported ThunderX SoCs ----------------------- @@ -312,6 +313,21 @@ We will choose four secondary queue sets from the ending of the list (0002:01:01 The nicvf thunderx driver will make use of attached secondary VFs automatically during the interface configuration stage. + +Module params +-------------- + +skip_data_bytes +~~~~~~~~~~~~~~~ +This feature is used to create a hole between HEADROOM and actual data. Size of hole is specified +in bytes as module param("skip_data_bytes") to pmd. +This scheme is useful when application would like to insert vlan header without disturbing HEADROOM. + +Example: + .. code-block:: console + + -w 0002:01:00.2,skip_data_bytes=8 + Limitations ----------- @@ -319,8 +335,7 @@ CRC striping ~~~~~~~~~~~~ The ThunderX SoC family NICs strip the CRC for every packets coming into the -host interface. So, CRC will be stripped even when the -``rxmode.hw_strip_crc`` member is set to 0 in ``struct rte_eth_conf``. +host interface irrespective of the offload configuration. Maximum packet length ~~~~~~~~~~~~~~~~~~~~~ @@ -336,3 +351,8 @@ Maximum packet segments The ThunderX SoC family NICs support up to 12 segments per packet when working in scatter/gather mode. So, setting MTU will result with ``EINVAL`` when the frame size does not fit in the maximum number of segments. + +skip_data_bytes +~~~~~~~~~~~~~~~ + +Maximum limit of skip_data_bytes is 128 bytes and number of bytes should be multiple of 8. diff --git a/doc/guides/nics/vdev_netvsc.rst b/doc/guides/nics/vdev_netvsc.rst index 55d130a3..d1da0711 100644 --- a/doc/guides/nics/vdev_netvsc.rst +++ b/doc/guides/nics/vdev_netvsc.rst @@ -1,6 +1,6 @@ .. SPDX-License-Identifier: BSD-3-Clause Copyright 2017 6WIND S.A. - Copyright 2017 Mellanox Technologies, Ltd. + Copyright 2017 Mellanox Technologies, Ltd VDEV_NETVSC driver ================== @@ -89,12 +89,16 @@ The following device parameters are supported: - ``force`` [int] If nonzero, forces the use of specified interfaces even if not detected as - NetVSC or detected as routed NETVSC. + NetVSC. - ``ignore`` [int] - If nonzero, ignores the driver runnig (actually used to disable the + If nonzero, ignores the driver running (actually used to disable the auto-detection in Hyper-V VM). -Not specifying either ``iface`` or ``mac`` makes this driver attach itself to -all unrouted NetVSC interfaces found on the system. +.. note:: + + Not specifying either ``iface`` or ``mac`` makes this driver attach itself to + all unrouted NetVSC interfaces found on the system. + Specifying the device makes this driver attach itself to the device + regardless the device routes. diff --git a/doc/guides/nics/virtio.rst b/doc/guides/nics/virtio.rst index ca09cd20..7c099fb7 100644 --- a/doc/guides/nics/virtio.rst +++ b/doc/guides/nics/virtio.rst @@ -201,7 +201,7 @@ The packet transmission flow is: Virtio PMD Rx/Tx Callbacks -------------------------- -Virtio driver has 3 Rx callbacks and 2 Tx callbacks. +Virtio driver has 4 Rx callbacks and 3 Tx callbacks. Rx callbacks: @@ -215,6 +215,9 @@ Rx callbacks: Vector version without mergeable Rx buffer support, also fixes the available ring indexes and uses vector instructions to optimize performance. +#. ``virtio_recv_mergeable_pkts_inorder``: + In-order version with mergeable Rx buffer support. + Tx callbacks: #. ``virtio_xmit_pkts``: @@ -223,6 +226,8 @@ Tx callbacks: #. ``virtio_xmit_pkts_simple``: Vector version fixes the available ring indexes to optimize performance. +#. ``virtio_xmit_pkts_inorder``: + In-order version. By default, the non-vector callbacks are used: @@ -234,7 +239,7 @@ By default, the non-vector callbacks are used: Vector callbacks will be used when: -* ``txq_flags`` is set to ``VIRTIO_SIMPLE_FLAGS`` (0xF01), which implies: +* ``txmode.offloads`` is set to ``0x0``, which implies: * Single segment is specified. @@ -252,8 +257,14 @@ The corresponding callbacks are: Example of using the vector version of the virtio poll mode driver in ``testpmd``:: - testpmd -l 0-2 -n 4 -- -i --txqflags=0xF01 --rxq=1 --txq=1 --nb-cores=1 + testpmd -l 0-2 -n 4 -- -i --tx-offloads=0x0 --rxq=1 --txq=1 --nb-cores=1 + +In-order callbacks only work on simulated virtio user vdev. + +* For Rx: If mergeable Rx buffers is enabled and in-order is enabled then + ``virtio_xmit_pkts_inorder`` is used. +* For Tx: If in-order is enabled then ``virtio_xmit_pkts_inorder`` is used. Interrupt mode -------------- @@ -318,3 +329,26 @@ Here we use l3fwd-power as an example to show how to get started. $ l3fwd-power -l 0-1 -- -p 1 -P --config="(0,0,1)" \ --no-numa --parse-ptype + + +Virtio PMD arguments +-------------------- + +The user can specify below argument in devargs. + +#. ``vdpa``: + + A virtio device could also be driven by vDPA (vhost data path acceleration) + driver, and works as a HW vhost backend. This argument is used to specify + a virtio device needs to work in vDPA mode. + (Default: 0 (disabled)) + +#. ``mrg_rxbuf``: + + It is used to enable virtio device mergeable Rx buffer feature. + (Default: 1 (enabled)) + +#. ``in_order``: + + It is used to enable virtio device in-order feature. + (Default: 1 (enabled)) diff --git a/doc/guides/platform/octeontx.rst b/doc/guides/platform/octeontx.rst index dc1aa4fe..b0a99c38 100644 --- a/doc/guides/platform/octeontx.rst +++ b/doc/guides/platform/octeontx.rst @@ -16,11 +16,11 @@ Common Offload HW Block Drivers ------------------------------- 1. **Eventdev Driver** - See :doc: `../eventdevs/octeontx.rst` for octeontx ssovf eventdev driver + See :doc:`../eventdevs/octeontx` for octeontx ssovf eventdev driver information. 2. **Mempool Driver** - See :doc: `../mempool/octeontx.rst` for octeontx fpavf mempool driver + See :doc:`../mempool/octeontx` for octeontx fpavf mempool driver information. Steps To Setup Platform @@ -52,4 +52,4 @@ OCTEONTX compatible board: SDK and related information can be obtained from: `Cavium support site <https://support.cavium.com/>`_. -- Follow the DPDK :doc: `../linux_gsg/index.rst` to setup the basic DPDK environment. +- Follow the DPDK :doc:`../linux_gsg/index` to setup the basic DPDK environment. diff --git a/doc/guides/prog_guide/bbdev.rst b/doc/guides/prog_guide/bbdev.rst index d40c7f4c..9de14443 100644 --- a/doc/guides/prog_guide/bbdev.rst +++ b/doc/guides/prog_guide/bbdev.rst @@ -6,12 +6,12 @@ Wireless Baseband Device Library The Wireless Baseband library provides a common programming framework that abstracts HW accelerators based on FPGA and/or Fixed Function Accelerators that -assist with 3gpp Physical Layer processing. Furthermore, it decouples the +assist with 3GPP Physical Layer processing. Furthermore, it decouples the application from the compute-intensive wireless functions by abstracting their optimized libraries to appear as virtual bbdev devices. The functional scope of the BBDEV library are those functions in relation to -the 3gpp Layer 1 signal processing (channel coding, modulation, ...). +the 3GPP Layer 1 signal processing (channel coding, modulation, ...). The framework currently only supports Turbo Code FEC function. @@ -42,13 +42,13 @@ From the command line using the --vdev EAL option .. code-block:: console - --vdev 'turbo_sw,max_nb_queues=8,socket_id=0' + --vdev 'baseband_turbo_sw,max_nb_queues=8,socket_id=0' Our using the rte_vdev_init API within the application code. .. code-block:: c - rte_vdev_init("turbo_sw", "max_nb_queues=2,socket_id=0") + rte_vdev_init("baseband_turbo_sw", "max_nb_queues=2,socket_id=0") All virtual bbdev devices support the following initialization parameters: @@ -167,7 +167,7 @@ Logical Cores, Memory and Queues Relationships ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The bbdev device Library as the Poll Mode Driver library support NUMA for when -a processor’s logical cores and interfaces utilize its local memory. Therefore +a processor's logical cores and interfaces utilize its local memory. Therefore baseband operations, the mbuf being operated on should be allocated from memory pools created in the local memory. The buffers should, if possible, remain on the local processor to obtain the best performance results and buffer @@ -202,7 +202,7 @@ structure in the *DPDK API Reference*. A device reports its capabilities when registering itself in the bbdev framework. With the aid of this capabilities mechanism, an application can query devices to -discover which operations within the 3gpp physical layer they are capable of +discover which operations within the 3GPP physical layer they are capable of performing. Below is an example of the capabilities for a PMD it supports in relation to Turbo Encoding and Decoding operations. @@ -216,7 +216,10 @@ relation to Turbo Encoding and Decoding operations. RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE | RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN | RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN | - RTE_BBDEV_TURBO_CRC_TYPE_24B, + RTE_BBDEV_TURBO_CRC_TYPE_24B | + RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP | + RTE_BBDEV_TURBO_EARLY_TERMINATION, + .max_llr_modulus = 16, .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS, .num_buffers_hard_out = RTE_BBDEV_MAX_CODE_BLOCKS, @@ -228,6 +231,7 @@ relation to Turbo Encoding and Decoding operations. .cap.turbo_enc = { .capability_flags = RTE_BBDEV_TURBO_CRC_24B_ATTACH | + RTE_BBDEV_TURBO_CRC_24A_ATTACH | RTE_BBDEV_TURBO_RATE_MATCH | RTE_BBDEV_TURBO_RV_INDEX_BYPASS, .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS, @@ -391,27 +395,101 @@ operation to its allocating pool. void rte_bbdev_enc_op_free_bulk(struct rte_bbdev_enc_op **ops, unsigned int num_ops) -BBDEV Operations -~~~~~~~~~~~~~~~~ +BBDEV Inbound/Outbound Memory +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The bbdev operation structure contains all the mutable data relating to -performing Turbo code processing on a referenced mbuf data buffer. It is used -for either encode or decode operations. +performing Turbo coding on a referenced mbuf data buffer. It is used for either +encode or decode operations. Turbo Encode operation accepts one input and one output. - Turbo Decode operation accepts one input and two outputs, called *hard-decision* and *soft-decision* outputs. *Soft-decision* output is optional. -It is expected that the application provides input and output ``mbuf`` pointers +It is expected that the application provides input and output mbuf pointers allocated and ready to use. The baseband framework supports turbo coding on Code Blocks (CB) and Transport Blocks (TB). -For the output buffer(s), the application needs only to provide an allocated and -free mbuf (containing only one mbuf segment), so that bbdev can write the -operation outcome. +For the output buffer(s), the application is required to provide an allocated +and free mbuf, so that bbdev write back the resulting output. + +The support of split "scattered" buffers is a driver-specific feature, so it is +reported individually by the supporting driver as a capability. + +Input and output data buffers are identified by ``rte_bbdev_op_data`` structure, +as follows: + +.. code-block:: c + + struct rte_bbdev_op_data { + struct rte_mbuf *data; + uint32_t offset; + uint32_t length; + }; + + +This structure has three elements: + +- ``data``: This is the mbuf data structure representing the data for BBDEV + operation. + + This mbuf pointer can point to one Code Block (CB) data buffer or multiple CBs + contiguously located next to each other. A Transport Block (TB) represents a + whole piece of data that is divided into one or more CBs. Maximum number of + CBs can be contained in one TB is defined by ``RTE_BBDEV_MAX_CODE_BLOCKS``. + + An mbuf data structure cannot represent more than one TB. The smallest piece + of data that can be contained in one mbuf is one CB. + An mbuf can include one contiguous CB, subset of contiguous CBs that are + belonging to one TB, or all contiguous CBs that are belonging to one TB. + + If a BBDEV PMD supports the extended capability "Scatter-Gather", then it is + capable of collecting (gathering) non-contiguous (scattered) data from + multiple locations in the memory. + This capability is reported by the capability flags: + + - ``RTE_BBDEV_TURBO_ENC_SCATTER_GATHER``, and -**Turbo Encode Op structure** + - ``RTE_BBDEV_TURBO_DEC_SCATTER_GATHER``. + + Only if a BBDEV PMD supports this feature, chained mbuf data structures are + accepted. A chained mbuf can represent one non-contiguous CB or multiple + non-contiguous CBs. + The first mbuf segment in the given chained mbuf represents the first piece + of the CB. Offset is only applicable to the first segment. ``length`` is the + total length of the CB. + + BBDEV driver is responsible for identifying where the split is and enqueue + the split data to its internal queues. + + If BBDEV PMD does not support this feature, it will assume inbound mbuf data + contains one segment. + + The output mbuf data though is always one segment, even if the input was a + chained mbuf. + + +- ``offset``: This is the starting point of the BBDEV (encode/decode) operation, + in bytes. + + BBDEV starts to read data past this offset. + In case of chained mbuf, this offset applies only to the first mbuf segment. + + +- ``length``: This is the total data length to be processed in one operation, + in bytes. + + In case the mbuf data is representing one CB, this is the length of the CB + undergoing the operation. + If it is for multiple CBs, this is the total length of those CBs undergoing + the operation. + If it is for one TB, this is the total length of the TB under operation. + In case of chained mbuf, this data length includes the lengths of the + "scattered" data segments undergoing the operation. + + +BBDEV Turbo Encode Operation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: c @@ -428,8 +506,78 @@ operation outcome. }; }; +The Turbo encode structure is composed of the ``input`` and ``output`` mbuf +data pointers. The provided mbuf pointer of ``input`` needs to be big enough to +stretch for extra CRC trailers. + +``op_flags`` parameter holds all operation related flags, like whether CRC24A is +included by the application or not. + +``code_block_mode`` flag identifies the mode in which bbdev is operating in. + +The encode interface works on both the code block (CB) and the transport block +(TB). An operation executes in "CB-mode" when the CB is standalone. While +"TB-mode" executes when an operation performs on one or multiple CBs that +belong to a TB. Therefore, a given data can be standalone CB, full-size TB or +partial TB. Partial TB means that only a subset of CBs belonging to a bigger TB +are being enqueued. -**Turbo Decode Op structure** + **NOTE:** It is assumed that all enqueued ops in one ``rte_bbdev_enqueue_enc_ops()`` + call belong to one mode, either CB-mode or TB-mode. + +In case that the CB is smaller than Z (6144 bits), then effectively the TB = CB. +CRC24A is appended to the tail of the CB. The application is responsible for +calculating and appending CRC24A before calling BBDEV in case that the +underlying driver does not support CRC24A generation. + +In CB-mode, CRC24A/B is an optional operation. +The input ``k`` is the size of the CB (this maps to K as described in 3GPP TS +36.212 section 5.1.2), this size is inclusive of CRC24A/B. +The ``length`` is inclusive of CRC24A/B and equals to ``k`` in this case. + +Not all BBDEV PMDs are capable of CRC24A/B calculation. Flags +``RTE_BBDEV_TURBO_CRC_24A_ATTACH`` and ``RTE_BBDEV_TURBO_CRC_24B_ATTACH`` +informs the application with relevant capability. These flags can be set in the +``op_flags`` parameter to indicate BBDEV to calculate and append CRC24A to CB +before going forward with Turbo encoding. + +Output format of the CB encode will have the encoded CB in ``e`` size output +(this maps to E described in 3GPP TS 36.212 section 5.1.4.1.2). The output mbuf +buffer size needs to be big enough to hold the encoded buffer of size ``e``. + +In TB-mode, CRC24A is assumed to be pre-calculated and appended to the inbound +TB mbuf data buffer. +The output mbuf data structure is expected to be allocated by the application +with enough room for the output data. + +The difference between the partial and full-size TB is that we need to know the +index of the first CB in this group and the number of CBs contained within. +The first CB index is given by ``r`` but the number of the remaining CBs is +calculated automatically by BBDEV before passing down to the driver. + +The number of remaining CBs should not be confused with ``c``. ``c`` is the +total number of CBs that composes the whole TB (this maps to C as +described in 3GPP TS 36.212 section 5.1.2). + +The ``length`` is total size of the CBs inclusive of any CRC24A and CRC24B in +case they were appended by the application. + +The case when one CB belongs to TB and is being enqueued individually to BBDEV, +this case is considered as a special case of partial TB where its number of CBs +is 1. Therefore, it requires to get processed in TB-mode. + +The figure below visualizes the encoding of CBs using BBDEV interface in +TB-mode. CB-mode is a reduced version, where only one CB exists: + +.. _figure_turbo_tb_encode: + +.. figure:: img/turbo_tb_encode.* + + Turbo encoding of Code Blocks in mbuf structure + + +BBDEV Turbo Decode Operation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: c @@ -452,18 +600,75 @@ operation outcome. }; }; -Input and output data buffers are identified by ``rte_bbdev_op_data`` structure. -This structure has three elements: +The Turbo decode structure is composed of the ``input`` and ``output`` mbuf +data pointers. + +``op_flags`` parameter holds all operation related flags, like whether CRC24B is +retained or not. + +``code_block_mode`` flag identifies the mode in which bbdev is operating in. + +Similarly, the decode interface works on both the code block (CB) and the +transport block (TB). An operation executes in "CB-mode" when the CB is +standalone. While "TB-mode" executes when an operation performs on one or +multiple CBs that belong to a TB. Therefore, a given data can be standalone CB, +full-size TB or partial TB. Partial TB means that only a subset of CBs belonging +to a bigger TB are being enqueued. + + **NOTE:** It is assumed that all enqueued ops in one ``rte_bbdev_enqueue_dec_ops()`` + call belong to one mode, either CB-mode or TB-mode. + +The input ``k`` is the size of the decoded CB (this maps to K as described in +3GPP TS 36.212 section 5.1.2), this size is inclusive of CRC24A/B. +The ``length`` is inclusive of CRC24A/B and equals to ``k`` in this case. + +The input encoded CB data is the Virtual Circular Buffer data stream, wk, with +the null padding included as described in 3GPP TS 36.212 section 5.1.4.1.2 and +shown in 3GPP TS 36.212 section 5.1.4.1 Figure 5.1.4-1. +The size of the virtual circular buffer is 3*Kpi, where Kpi is the 32 byte +aligned value of K, as specified in 3GPP TS 36.212 section 5.1.4.1.1. + +Each byte in the input circular buffer is the LLR value of each bit of the +original CB. + +``hard_output`` is a mandatory capability that all BBDEV PMDs support. This is +the decoded CBs of K sizes (CRC24A/B is the last 24-bit in each decoded CB). +Soft output is an optional capability for BBDEV PMDs. Setting flag +``RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP`` in ``op_flags`` directs BBDEV to retain +CRC24B at the end of each CB. This might be useful for the application in debug +mode. +An LLR rate matched output is computed in the ``soft_output`` buffer structure +for the given ``e`` size (this maps to E described in 3GPP TS 36.212 section +5.1.4.1.2). The output mbuf buffer size needs to be big enough to hold the +encoded buffer of size ``e``. + +The first CB Virtual Circular Buffer (VCB) index is given by ``r`` but the +number of the remaining CB VCBs is calculated automatically by BBDEV before +passing down to the driver. + +The number of remaining CB VCBs should not be confused with ``c``. ``c`` is the +total number of CBs that composes the whole TB (this maps to C as +described in 3GPP TS 36.212 section 5.1.2). + +The ``length`` is total size of the CBs inclusive of any CRC24A and CRC24B in +case they were appended by the application. + +The case when one CB belongs to TB and is being enqueued individually to BBDEV, +this case is considered as a special case of partial TB where its number of CBs +is 1. Therefore, it requires to get processed in TB-mode. + +The output mbuf data structure is expected to be allocated by the application +with enough room for the output data. + +The figure below visualizes the decoding of CBs using BBDEV interface in +TB-mode. CB-mode is a reduced version, where only one CB exists: + +.. _figure_turbo_tb_decode: -- ``data`` - This is the mbuf reference +.. figure:: img/turbo_tb_decode.* -- ``offset`` - The starting point for the Turbo input/output, in bytes, from the - start of the data in the data buffer. It must be smaller than data_len of the - mbuf's first segment + Turbo decoding of Code Blocks in mbuf structure -- ``length`` - The length, in bytes, of the buffer on which the Turbo operation - will or has been computed. For the input, the length is set by the application. - For the output(s), the length is computed by the bbdev PMD driver. Sample code ----------- diff --git a/doc/guides/prog_guide/bpf_lib.rst b/doc/guides/prog_guide/bpf_lib.rst new file mode 100644 index 00000000..7c08e6b2 --- /dev/null +++ b/doc/guides/prog_guide/bpf_lib.rst @@ -0,0 +1,38 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2018 Intel Corporation. + +Berkeley Packet Filter Library +============================== + +The DPDK provides an BPF library that gives the ability +to load and execute Enhanced Berkeley Packet Filter (eBPF) bytecode within +user-space dpdk application. + +It supports basic set of features from eBPF spec. +Please refer to the +`eBPF spec <https://www.kernel.org/doc/Documentation/networking/filter.txt>` +for more information. +Also it introduces basic framework to load/unload BPF-based filters +on eth devices (right now only via SW RX/TX callbacks). + +The library API provides the following basic operations: + +* Create a new BPF execution context and load user provided eBPF code into it. + +* Destroy an BPF execution context and its runtime structures and free the associated memory. + +* Execute eBPF bytecode associated with provided input parameter. + +* Provide information about natively compiled code for given BPF context. + +* Load BPF program from the ELF file and install callback to execute it on given ethdev port/queue. + +Not currently supported eBPF features +------------------------------------- + + - JIT for non X86_64 platforms + - cBPF + - tail-pointer call + - eBPF MAP + - skb + - external function calls for 32-bit platforms diff --git a/doc/guides/prog_guide/compressdev.rst b/doc/guides/prog_guide/compressdev.rst new file mode 100644 index 00000000..87e26490 --- /dev/null +++ b/doc/guides/prog_guide/compressdev.rst @@ -0,0 +1,623 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2017-2018 Cavium Networks. + +Compression Device Library +=========================== + +The compression framework provides a generic set of APIs to perform compression services +as well as to query and configure compression devices both physical(hardware) and virtual(software) +to perform those services. The framework currently only supports lossless compression schemes: +Deflate and LZS. + +Device Management +----------------- + +Device Creation +~~~~~~~~~~~~~~~ + +Physical compression devices are discovered during the bus probe of the EAL function +which is executed at DPDK initialization, based on their unique device identifier. +For eg. PCI devices can be identified using PCI BDF (bus/bridge, device, function). +Specific physical compression devices, like other physical devices in DPDK can be +white-listed or black-listed using the EAL command line options. + +Virtual devices can be created by two mechanisms, either using the EAL command +line options or from within the application using an EAL API directly. + +From the command line using the --vdev EAL option + +.. code-block:: console + + --vdev '<pmd name>,socket_id=0' + +.. Note:: + + * If DPDK application requires multiple software compression PMD devices then required + number of ``--vdev`` with appropriate libraries are to be added. + + * An Application with multiple compression device instances exposed by the same PMD must + specify a unique name for each device. + + Example: ``--vdev 'pmd0' --vdev 'pmd1'`` + +Or, by using the rte_vdev_init API within the application code. + +.. code-block:: c + + rte_vdev_init("<pmd_name>","socket_id=0") + +All virtual compression devices support the following initialization parameters: + +* ``socket_id`` - socket on which to allocate the device resources on. + +Device Identification +~~~~~~~~~~~~~~~~~~~~~ + +Each device, whether virtual or physical is uniquely designated by two +identifiers: + +- A unique device index used to designate the compression device in all functions + exported by the compressdev API. + +- A device name used to designate the compression device in console messages, for + administration or debugging purposes. + +Device Configuration +~~~~~~~~~~~~~~~~~~~~ + +The configuration of each compression device includes the following operations: + +- Allocation of resources, including hardware resources if a physical device. +- Resetting the device into a well-known default state. +- Initialization of statistics counters. + +The ``rte_compressdev_configure`` API is used to configure a compression device. + +The ``rte_compressdev_config`` structure is used to pass the configuration +parameters. + +See *DPDK API Reference* for details. + +Configuration of Queue Pairs +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Each compression device queue pair is individually configured through the +``rte_compressdev_queue_pair_setup`` API. + +The ``max_inflight_ops`` is used to pass maximum number of +rte_comp_op that could be present in a queue at-a-time. +PMD then can allocate resources accordingly on a specified socket. + +See *DPDK API Reference* for details. + +Logical Cores, Memory and Queues Pair Relationships +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Library supports NUMA similarly as described in Cryptodev library section. + +A queue pair cannot be shared and should be exclusively used by a single processing +context for enqueuing operations or dequeuing operations on the same compression device +since sharing would require global locks and hinder performance. It is however possible +to use a different logical core to dequeue an operation on a queue pair from the logical +core on which it was enqueued. This means that a compression burst enqueue/dequeue +APIs are a logical place to transition from one logical core to another in a +data processing pipeline. + +Device Features and Capabilities +--------------------------------- + +Compression devices define their functionality through two mechanisms, global device +features and algorithm features. Global devices features identify device +wide level features which are applicable to the whole device such as supported hardware +acceleration and CPU features. List of compression device features can be seen in the +RTE_COMPDEV_FF_XXX macros. + +The algorithm features lists individual algo feature which device supports per-algorithm, +such as a stateful compression/decompression, checksums operation etc. List of algorithm +features can be seen in the RTE_COMP_FF_XXX macros. + +Capabilities +~~~~~~~~~~~~ +Each PMD has a list of capabilities, including algorithms listed in +enum ``rte_comp_algorithm`` and its associated feature flag and +sliding window range in log base 2 value. Sliding window tells +the minimum and maximum size of lookup window that algorithm uses +to find duplicates. + +See *DPDK API Reference* for details. + +Each Compression poll mode driver defines its array of capabilities +for each algorithm it supports. See PMD implementation for capability +initialization. + +Capabilities Discovery +~~~~~~~~~~~~~~~~~~~~~~ + +PMD capability and features are discovered via ``rte_compressdev_info_get`` function. + +The ``rte_compressdev_info`` structure contains all the relevant information for the device. + +See *DPDK API Reference* for details. + +Compression Operation +---------------------- + +DPDK compression supports two types of compression methodologies: + +- Stateless, data associated to a compression operation is compressed without any reference + to another compression operation. + +- Stateful, data in each compression operation is compressed with reference to previous compression + operations in the same data stream i.e. history of data is maintained between the operations. + +For more explanation, please refer RFC https://www.ietf.org/rfc/rfc1951.txt + +Operation Representation +~~~~~~~~~~~~~~~~~~~~~~~~ + +Compression operation is described via ``struct rte_comp_op``, which contains both input and +output data. The operation structure includes the operation type (stateless or stateful), +the operation status and the priv_xform/stream handle, source, destination and checksum buffer +pointers. It also contains the source mempool from which the operation is allocated. +PMD updates consumed field with amount of data read from source buffer and produced +field with amount of data of written into destination buffer along with status of +operation. See section *Produced, Consumed And Operation Status* for more details. + +Compression operations mempool also has an ability to allocate private memory with the +operation for application's purposes. Application software is responsible for specifying +all the operation specific fields in the ``rte_comp_op`` structure which are then used +by the compression PMD to process the requested operation. + + +Operation Management and Allocation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The compressdev library provides an API set for managing compression operations which +utilize the Mempool Library to allocate operation buffers. Therefore, it ensures +that the compression operation is interleaved optimally across the channels and +ranks for optimal processing. + +A ``rte_comp_op`` contains a field indicating the pool it originated from. + +``rte_comp_op_alloc()`` and ``rte_comp_op_bulk_alloc()`` are used to allocate +compression operations from a given compression operation mempool. +The operation gets reset before being returned to a user so that operation +is always in a good known state before use by the application. + +``rte_comp_op_free()`` is called by the application to return an operation to +its allocating pool. + +See *DPDK API Reference* for details. + +Passing source data as mbuf-chain +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +If input data is scattered across several different buffers, then +Application can either parse through all such buffers and make one +mbuf-chain and enqueue it for processing or, alternatively, it can +make multiple sequential enqueue_burst() calls for each of them +processing them statefully. See *Compression API Stateful Operation* +for stateful processing of ops. + +Operation Status +~~~~~~~~~~~~~~~~ +Each operation carries a status information updated by PMD after it is processed. +following are currently supported status: + +- RTE_COMP_OP_STATUS_SUCCESS, + Operation is successfully completed + +- RTE_COMP_OP_STATUS_NOT_PROCESSED, + Operation has not yet been processed by the device + +- RTE_COMP_OP_STATUS_INVALID_ARGS, + Operation failed due to invalid arguments in request + +- RTE_COMP_OP_STATUS_ERROR, + Operation failed because of internal error + +- RTE_COMP_OP_STATUS_INVALID_STATE, + Operation is invoked in invalid state + +- RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED, + Output buffer ran out of space during processing. Error case, + PMD cannot continue from here. + +- RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE, + Output buffer ran out of space before operation completed, but this + is not an error case. Output data up to op.produced can be used and + next op in the stream should continue on from op.consumed+1. + +Produced, Consumed And Operation Status +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- If status is RTE_COMP_OP_STATUS_SUCCESS, + consumed = amount of data read from input buffer, and + produced = amount of data written in destination buffer +- If status is RTE_COMP_OP_STATUS_FAILURE, + consumed = produced = 0 or undefined +- If status is RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED, + consumed = 0 and + produced = usually 0, but in decompression cases a PMD may return > 0 + i.e. amount of data successfully produced until out of space condition + hit. Application can consume output data in this case, if required. +- If status is RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE, + consumed = amount of data read, and + produced = amount of data successfully produced until + out of space condition hit. PMD has ability to recover + from here, so application can submit next op from + consumed+1 and a destination buffer with available space. + +Transforms +---------- + +Compression transforms (``rte_comp_xform``) are the mechanism +to specify the details of the compression operation such as algorithm, +window size and checksum. + +Compression API Hash support +---------------------------- + +Compression API allows application to enable digest calculation +alongside compression and decompression of data. A PMD reflects its +support for hash algorithms via capability algo feature flags. +If supported, PMD calculates digest always on plaintext i.e. +before compression and after decompression. + +Currently supported list of hash algos are SHA-1 and SHA2 family +SHA256. + +See *DPDK API Reference* for details. + +If required, application should set valid hash algo in compress +or decompress xforms during ``rte_compressdev_stream_create()`` +or ``rte_compressdev_private_xform_create()`` and pass a valid +output buffer in ``rte_comp_op`` hash field struct to store the +resulting digest. Buffer passed should be contiguous and large +enough to store digest which is 20 bytes for SHA-1 and +32 bytes for SHA2-256. + +Compression API Stateless operation +------------------------------------ + +An op is processed stateless if it has +- op_type set to RTE_COMP_OP_STATELESS +- flush value set to RTE_FLUSH_FULL or RTE_FLUSH_FINAL +(required only on compression side), +- All required input in source buffer + +When all of the above conditions are met, PMD initiates stateless processing +and releases acquired resources after processing of current operation is +complete. Application can enqueue multiple stateless ops in a single burst +and must attach priv_xform handle to such ops. + +priv_xform in Stateless operation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +priv_xform is PMD internally managed private data that it maintains to do stateless processing. +priv_xforms are initialized provided a generic xform structure by an application via making call +to ``rte_comp_private_xform_create``, at an output PMD returns an opaque priv_xform reference. +If PMD support SHAREABLE priv_xform indicated via algorithm feature flag, then application can +attach same priv_xform with many stateless ops at-a-time. If not, then application needs to +create as many priv_xforms as it expects to have stateless operations in-flight. + +.. figure:: img/stateless-op.* + + Stateless Ops using Non-Shareable priv_xform + + +.. figure:: img/stateless-op-shared.* + + Stateless Ops using Shareable priv_xform + + +Application should call ``rte_compressdev_private_xform_create()`` and attach to stateless op before +enqueuing them for processing and free via ``rte_compressdev_private_xform_free()`` during termination. + +An example pseudocode to setup and process NUM_OPS stateless ops with each of length OP_LEN +using priv_xform would look like: + +.. code-block:: c + + /* + * pseudocode for stateless compression + */ + + uint8_t cdev_id = rte_compdev_get_dev_id(<pmd name>); + + /* configure the device. */ + if (rte_compressdev_configure(cdev_id, &conf) < 0) + rte_exit(EXIT_FAILURE, "Failed to configure compressdev %u", cdev_id); + + if (rte_compressdev_queue_pair_setup(cdev_id, 0, NUM_MAX_INFLIGHT_OPS, + socket_id()) < 0) + rte_exit(EXIT_FAILURE, "Failed to setup queue pair\n"); + + if (rte_compressdev_start(cdev_id) < 0) + rte_exit(EXIT_FAILURE, "Failed to start device\n"); + + /* setup compress transform */ + struct rte_compress_compress_xform compress_xform = { + .type = RTE_COMP_COMPRESS, + .compress = { + .algo = RTE_COMP_ALGO_DEFLATE, + .deflate = { + .huffman = RTE_COMP_HUFFMAN_DEFAULT + }, + .level = RTE_COMP_LEVEL_PMD_DEFAULT, + .chksum = RTE_COMP_CHECKSUM_NONE, + .window_size = DEFAULT_WINDOW_SIZE, + .hash_algo = RTE_COMP_HASH_ALGO_NONE + } + }; + + /* create priv_xform and initialize it for the compression device. */ + void *priv_xform = NULL; + rte_compressdev_info_get(cdev_id, &dev_info); + if(dev_info.capability->comps_feature_flag & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) { + rte_comp_priv_xform_create(cdev_id, &compress_xform, &priv_xform); + } else { + shareable = 0; + } + + /* create operation pool via call to rte_comp_op_pool_create and alloc ops */ + rte_comp_op_bulk_alloc(op_pool, comp_ops, NUM_OPS); + + /* prepare ops for compression operations */ + for (i = 0; i < NUM_OPS; i++) { + struct rte_comp_op *op = comp_ops[i]; + if (!shareable) + rte_priv_xform_create(cdev_id, &compress_xform, &op->priv_xform) + else + op->priv_xform = priv_xform; + op->type = RTE_COMP_OP_STATELESS; + op->flush = RTE_COMP_FLUSH_FINAL; + + op->src.offset = 0; + op->dst.offset = 0; + op->src.length = OP_LEN; + op->input_chksum = 0; + setup op->m_src and op->m_dst; + } + num_enqd = rte_compressdev_enqueue_burst(cdev_id, 0, comp_ops, NUM_OPS); + /* wait for this to complete before enqueing next*/ + do { + num_deque = rte_compressdev_dequeue_burst(cdev_id, 0 , &processed_ops, NUM_OPS); + } while (num_dqud < num_enqd); + + +Stateless and OUT_OF_SPACE +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +OUT_OF_SPACE is a condition when output buffer runs out of space and where PMD +still has more data to produce. If PMD runs into such condition, then PMD returns +RTE_COMP_OP_OUT_OF_SPACE_TERMINATED error. In such case, PMD resets itself and can set +consumed=0 and produced=amount of output it could produce before hitting out_of_space. +Application would need to resubmit the whole input with a larger output buffer, if it +wants the operation to be completed. + +Hash in Stateless +~~~~~~~~~~~~~~~~~ +If hash is enabled, digest buffer will contain valid data after op is successfully +processed i.e. dequeued with status = RTE_COMP_OP_STATUS_SUCCESS. + +Checksum in Stateless +~~~~~~~~~~~~~~~~~~~~~ +If checksum is enabled, checksum will only be available after op is successfully +processed i.e. dequeued with status = RTE_COMP_OP_STATUS_SUCCESS. + +Compression API Stateful operation +----------------------------------- + +Compression API provide RTE_COMP_FF_STATEFUL_COMPRESSION and +RTE_COMP_FF_STATEFUL_DECOMPRESSION feature flag for PMD to reflect +its support for Stateful operations. + +A Stateful operation in DPDK compression means application invokes enqueue +burst() multiple times to process related chunk of data because +application broke data into several ops. + +In such case +- ops are setup with op_type RTE_COMP_OP_STATEFUL, +- all ops except last set to flush value = RTE_COMP_NO/SYNC_FLUSH +and last set to flush value RTE_COMP_FULL/FINAL_FLUSH. + +In case of either one or all of the above conditions, PMD initiates +stateful processing and releases acquired resources after processing +operation with flush value = RTE_COMP_FLUSH_FULL/FINAL is complete. +Unlike stateless, application can enqueue only one stateful op from +a particular stream at a time and must attach stream handle +to each op. + +Stream in Stateful operation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +`stream` in DPDK compression is a logical entity which identifies related set of ops, say, a one large +file broken into multiple chunks then file is represented by a stream and each chunk of that file is +represented by compression op `rte_comp_op`. Whenever application wants a stateful processing of such +data, then it must get a stream handle via making call to ``rte_comp_stream_create()`` +with xform, at an output the target PMD will return an opaque stream handle to application which +it must attach to all of the ops carrying data of that stream. In stateful processing, every op +requires previous op data for compression/decompression. A PMD allocates and set up resources such +as history, states, etc. within a stream, which are maintained during the processing of the related ops. + +Unlike priv_xforms, stream is always a NON_SHAREABLE entity. One stream handle must be attached to only +one set of related ops and cannot be reused until all of them are processed with status Success or failure. + +.. figure:: img/stateful-op.* + + Stateful Ops + + +Application should call ``rte_comp_stream_create()`` and attach to op before +enqueuing them for processing and free via ``rte_comp_stream_free()`` during +termination. All ops that are to be processed statefully should carry *same* stream. + +See *DPDK API Reference* document for details. + +An example pseudocode to set up and process a stream having NUM_CHUNKS with each chunk size of CHUNK_LEN would look like: + +.. code-block:: c + + /* + * pseudocode for stateful compression + */ + + uint8_t cdev_id = rte_compdev_get_dev_id(<pmd name>); + + /* configure the device. */ + if (rte_compressdev_configure(cdev_id, &conf) < 0) + rte_exit(EXIT_FAILURE, "Failed to configure compressdev %u", cdev_id); + + if (rte_compressdev_queue_pair_setup(cdev_id, 0, NUM_MAX_INFLIGHT_OPS, + socket_id()) < 0) + rte_exit(EXIT_FAILURE, "Failed to setup queue pair\n"); + + if (rte_compressdev_start(cdev_id) < 0) + rte_exit(EXIT_FAILURE, "Failed to start device\n"); + + /* setup compress transform. */ + struct rte_compress_compress_xform compress_xform = { + .type = RTE_COMP_COMPRESS, + .compress = { + .algo = RTE_COMP_ALGO_DEFLATE, + .deflate = { + .huffman = RTE_COMP_HUFFMAN_DEFAULT + }, + .level = RTE_COMP_LEVEL_PMD_DEFAULT, + .chksum = RTE_COMP_CHECKSUM_NONE, + .window_size = DEFAULT_WINDOW_SIZE, + .hash_algo = RTE_COMP_HASH_ALGO_NONE + } + }; + + /* create stream */ + rte_comp_stream_create(cdev_id, &compress_xform, &stream); + + /* create an op pool and allocate ops */ + rte_comp_op_bulk_alloc(op_pool, comp_ops, NUM_CHUNKS); + + /* Prepare source and destination mbufs for compression operations */ + unsigned int i; + for (i = 0; i < NUM_CHUNKS; i++) { + if (rte_pktmbuf_append(mbufs[i], CHUNK_LEN) == NULL) + rte_exit(EXIT_FAILURE, "Not enough room in the mbuf\n"); + comp_ops[i]->m_src = mbufs[i]; + if (rte_pktmbuf_append(dst_mbufs[i], CHUNK_LEN) == NULL) + rte_exit(EXIT_FAILURE, "Not enough room in the mbuf\n"); + comp_ops[i]->m_dst = dst_mbufs[i]; + } + + /* Set up the compress operations. */ + for (i = 0; i < NUM_CHUNKS; i++) { + struct rte_comp_op *op = comp_ops[i]; + op->stream = stream; + op->m_src = src_buf[i]; + op->m_dst = dst_buf[i]; + op->type = RTE_COMP_OP_STATEFUL; + if(i == NUM_CHUNKS-1) { + /* set to final, if last chunk*/ + op->flush = RTE_COMP_FLUSH_FINAL; + } else { + /* set to NONE, for all intermediary ops */ + op->flush = RTE_COMP_FLUSH_NONE; + } + op->src.offset = 0; + op->dst.offset = 0; + op->src.length = CHUNK_LEN; + op->input_chksum = 0; + num_enqd = rte_compressdev_enqueue_burst(cdev_id, 0, &op[i], 1); + /* wait for this to complete before enqueing next*/ + do { + num_deqd = rte_compressdev_dequeue_burst(cdev_id, 0 , &processed_ops, 1); + } while (num_deqd < num_enqd); + /* push next op*/ + } + + +Stateful and OUT_OF_SPACE +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If PMD supports stateful operation, then OUT_OF_SPACE status is not an actual +error for the PMD. In such case, PMD returns with status +RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE with consumed = number of input bytes +read and produced = length of complete output buffer. +Application should enqueue next op with source starting at consumed+1 and an +output buffer with available space. + +Hash in Stateful +~~~~~~~~~~~~~~~~ +If enabled, digest buffer will contain valid digest after last op in stream +(having flush = RTE_COMP_OP_FLUSH_FINAL) is successfully processed i.e. dequeued +with status = RTE_COMP_OP_STATUS_SUCCESS. + +Checksum in Stateful +~~~~~~~~~~~~~~~~~~~~ +If enabled, checksum will only be available after last op in stream +(having flush = RTE_COMP_OP_FLUSH_FINAL) is successfully processed i.e. dequeued +with status = RTE_COMP_OP_STATUS_SUCCESS. + +Burst in compression API +------------------------- + +Scheduling of compression operations on DPDK's application data path is +performed using a burst oriented asynchronous API set. A queue pair on a compression +device accepts a burst of compression operations using enqueue burst API. On physical +devices the enqueue burst API will place the operations to be processed +on the device's hardware input queue, for virtual devices the processing of the +operations is usually completed during the enqueue call to the compression +device. The dequeue burst API will retrieve any processed operations available +from the queue pair on the compression device, from physical devices this is usually +directly from the devices processed queue, and for virtual device's from a +``rte_ring`` where processed operations are place after being processed on the +enqueue call. + +A burst in DPDK compression can be a combination of stateless and stateful operations with a condition +that for stateful ops only one op at-a-time should be enqueued from a particular stream i.e. no-two ops +should belong to same stream in a single burst. However a burst may contain multiple stateful ops as long +as each op is attached to a different stream i.e. a burst can look like: + ++---------------+--------------+--------------+-----------------+--------------+--------------+ +| enqueue_burst | op1.no_flush | op2.no_flush | op3.flush_final | op4.no_flush | op5.no_flush | ++---------------+--------------+--------------+-----------------+--------------+--------------+ + +Where, op1 .. op5 all belong to different independent data units. op1, op2, op4, op5 must be stateful +as stateless ops can only use flush full or final and op3 can be of type stateless or stateful. +Every op with type set to RTE_COMP_OP_TYPE_STATELESS must be attached to priv_xform and +Every op with type set to RTE_COMP_OP_TYPE_STATEFUL *must* be attached to stream. + +Since each operation in a burst is independent and thus can be completed +out-of-order, applications which need ordering, should setup per-op user data +area with reordering information so that it can determine enqueue order at +dequeue. + +Also if multiple threads calls enqueue_burst() on same queue pair then it’s +application onus to use proper locking mechanism to ensure exclusive enqueuing +of operations. + +Enqueue / Dequeue Burst APIs +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The burst enqueue API uses a compression device identifier and a queue pair +identifier to specify the compression device queue pair to schedule the processing on. +The ``nb_ops`` parameter is the number of operations to process which are +supplied in the ``ops`` array of ``rte_comp_op`` structures. +The enqueue function returns the number of operations it actually enqueued for +processing, a return value equal to ``nb_ops`` means that all packets have been +enqueued. + +The dequeue API uses the same format as the enqueue API but +the ``nb_ops`` and ``ops`` parameters are now used to specify the max processed +operations the user wishes to retrieve and the location in which to store them. +The API call returns the actual number of processed operations returned, this +can never be larger than ``nb_ops``. + +Sample code +----------- + +There are unit test applications that show how to use the compressdev library inside +test/test/test_compressdev.c + +Compression Device API +~~~~~~~~~~~~~~~~~~~~~~ + +The compressdev Library API is described in the *DPDK API Reference* document. diff --git a/doc/guides/prog_guide/cryptodev_lib.rst b/doc/guides/prog_guide/cryptodev_lib.rst index 066fe2d2..90d01e93 100644 --- a/doc/guides/prog_guide/cryptodev_lib.rst +++ b/doc/guides/prog_guide/cryptodev_lib.rst @@ -8,7 +8,7 @@ The cryptodev library provides a Crypto device framework for management and provisioning of hardware and software Crypto poll mode drivers, defining generic APIs which support a number of different Crypto operations. The framework currently only supports cipher, authentication, chained cipher/authentication -and AEAD symmetric Crypto operations. +and AEAD symmetric and asymmetric Crypto operations. Design Principles @@ -41,7 +41,7 @@ From the command line using the --vdev EAL option .. code-block:: console - --vdev 'crypto_aesni_mb0,max_nb_queue_pairs=2,max_nb_sessions=1024,socket_id=0' + --vdev 'crypto_aesni_mb0,max_nb_queue_pairs=2,socket_id=0' .. Note:: @@ -57,12 +57,11 @@ Our using the rte_vdev_init API within the application code. .. code-block:: c rte_vdev_init("crypto_aesni_mb", - "max_nb_queue_pairs=2,max_nb_sessions=1024,socket_id=0") + "max_nb_queue_pairs=2,socket_id=0") All virtual Crypto devices support the following initialization parameters: * ``max_nb_queue_pairs`` - maximum number of queue pairs supported by the device. -* ``max_nb_sessions`` - maximum number of sessions supported by the device * ``socket_id`` - socket on which to allocate the device resources on. @@ -159,8 +158,8 @@ Device Features and Capabilities Crypto devices define their functionality through two mechanisms, global device features and algorithm capabilities. Global devices features identify device wide level features which are applicable to the whole device such as -the device having hardware acceleration or supporting symmetric Crypto -operations, +the device having hardware acceleration or supporting symmetric and/or asymmetric +Crypto operations. The capabilities mechanism defines the individual algorithms/functions which the device supports, such as a specific symmetric Crypto cipher, @@ -269,7 +268,7 @@ relevant information for the device. struct rte_cryptodev_info { const char *driver_name; uint8_t driver_id; - struct rte_pci_device *pci_dev; + struct rte_device *device; uint64_t feature_flags; @@ -299,6 +298,33 @@ directly from the devices processed queue, and for virtual device's from a enqueue call. +Private data +~~~~~~~~~~~~ +For session-based operations, the set and get API provides a mechanism for an +application to store and retrieve the private user data information stored along +with the crypto session. + +For example, suppose an application is submitting a crypto operation with a session +associated and wants to indicate private user data information which is required to be +used after completion of the crypto operation. In this case, the application can use +the set API to set the user data and retrieve it using get API. + +.. code-block:: c + + int rte_cryptodev_sym_session_set_user_data( + struct rte_cryptodev_sym_session *sess, void *data, uint16_t size); + + void * rte_cryptodev_sym_session_get_user_data( + struct rte_cryptodev_sym_session *sess); + + +For session-less mode, the private user data information can be placed along with the +``struct rte_crypto_op``. The ``rte_crypto_op::private_data_offset`` indicates the +start of private data information. The offset is counted from the start of the +rte_crypto_op including other crypto information such as the IVs (since there can +be an IV also for authentication). + + Enqueue / Dequeue Burst APIs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -419,7 +445,7 @@ Crypto workloads. .. figure:: img/cryptodev_sym_sess.* -The Crypto device framework provides APIs to allocate and initizalize sessions +The Crypto device framework provides APIs to allocate and initialize sessions for crypto devices, where sessions are mempool objects. It is the application's responsibility to create and manage the session mempools. This approach allows for different scenarios such as having a single session @@ -427,12 +453,12 @@ mempool for all crypto devices (where the mempool object size is big enough to hold the private session of any crypto device), as well as having multiple session mempools of different sizes for better memory usage. -An application can use ``rte_cryptodev_get_private_session_size()`` to +An application can use ``rte_cryptodev_sym_get_private_session_size()`` to get the private session size of given crypto device. This function would allow an application to calculate the max device session size of all crypto devices to create a single session mempool. If instead an application creates multiple session mempools, the Crypto device -framework also provides ``rte_cryptodev_get_header_session_size`` to get +framework also provides ``rte_cryptodev_sym_get_header_session_size`` to get the size of an uninitialized session. Once the session mempools have been created, ``rte_cryptodev_sym_session_create()`` @@ -635,7 +661,7 @@ using one of the crypto PMDs available in DPDK. uint8_t cdev_id = rte_cryptodev_get_dev_id(crypto_name); /* Get private session data size. */ - session_size = rte_cryptodev_get_private_session_size(cdev_id); + session_size = rte_cryptodev_sym_get_private_session_size(cdev_id); /* * Create session mempool, with two objects per session, @@ -761,14 +787,260 @@ using one of the crypto PMDs available in DPDK. num_dequeued_ops); } while (total_num_dequeued_ops < num_enqueued_ops); - Asymmetric Cryptography ----------------------- -Asymmetric functionality is currently not supported by the cryptodev API. +The cryptodev library currently provides support for the following asymmetric +Crypto operations; RSA, Modular exponentiation and inversion, Diffie-Hellman +public and/or private key generation and shared secret compute, DSA Signature +generation and verification. + +Session and Session Management +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Sessions are used in asymmetric cryptographic processing to store the immutable +data defined in asymmetric cryptographic transform which is further used in the +operation processing. Sessions typically stores information, such as, public +and private key information or domain params or prime modulus data i.e. immutable +across data sets. Crypto sessions cache this immutable data in a optimal way for the +underlying PMD and this allows further acceleration of the offload of Crypto workloads. + +Like symmetric, the Crypto device framework provides APIs to allocate and initialize +asymmetric sessions for crypto devices, where sessions are mempool objects. +It is the application's responsibility to create and manage the session mempools. +Application using both symmetric and asymmetric sessions should allocate and maintain +different sessions pools for each type. + +An application can use ``rte_cryptodev_get_asym_session_private_size()`` to +get the private size of asymmetric session on a given crypto device. This +function would allow an application to calculate the max device asymmetric +session size of all crypto devices to create a single session mempool. +If instead an application creates multiple asymmetric session mempools, +the Crypto device framework also provides ``rte_cryptodev_asym_get_header_session_size()`` to get +the size of an uninitialized session. + +Once the session mempools have been created, ``rte_cryptodev_asym_session_create()`` +is used to allocate an uninitialized asymmetric session from the given mempool. +The session then must be initialized using ``rte_cryptodev_asym_session_init()`` +for each of the required crypto devices. An asymmetric transform chain +is used to specify the operation and its parameters. See the section below for +details on transforms. + +When a session is no longer used, user must call ``rte_cryptodev_asym_session_clear()`` +for each of the crypto devices that are using the session, to free all driver +private asymmetric session data. Once this is done, session should be freed using +``rte_cryptodev_asym_session_free()`` which returns them to their mempool. + +Asymmetric Sessionless Support +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Currently asymmetric crypto framework does not support sessionless. + +Transforms and Transform Chaining +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Asymmetric Crypto transforms (``rte_crypto_asym_xform``) are the mechanism used +to specify the details of the asymmetric Crypto operation. Next pointer within +xform allows transform to be chained together. Also it is important to note that +the order in which the transforms are passed indicates the order of the chaining. + +Not all asymmetric crypto xforms are supported for chaining. Currently supported +asymmetric crypto chaining is Diffie-Hellman private key generation followed by +public generation. Also, currently API does not support chaining of symmetric and +asymmetric crypto xfroms. + +Each xform defines specific asymmetric crypto algo. Currently supported are: +* RSA +* Modular operations (Exponentiation and Inverse) +* Diffie-Hellman +* DSA +* None - special case where PMD may support a passthrough mode. More for diagnostic purpose + +See *DPDK API Reference* for details on each rte_crypto_xxx_xform struct + +Asymmetric Operations +~~~~~~~~~~~~~~~~~~~~~ + +The asymmetric Crypto operation structure contains all the mutable data relating +to asymmetric cryptographic processing on an input data buffer. It uses either +RSA, Modular, Diffie-Hellman or DSA operations depending upon session it is attached +to. + +Every operation must carry a valid session handle which further carries information +on xform or xform-chain to be performed on op. Every xform type defines its own set +of operational params in their respective rte_crypto_xxx_op_param struct. Depending +on xform information within session, PMD picks up and process respective op_param +struct. +Unlike symmetric, asymmetric operations do not use mbufs for input/output. +They operate on data buffer of type ``rte_crypto_param``. + +See *DPDK API Reference* for details on each rte_crypto_xxx_op_param struct + +Asymmetric crypto Sample code +----------------------------- + +There's a unit test application test_cryptodev_asym.c inside unit test framework that +show how to setup and process asymmetric operations using cryptodev library. + +The following sample code shows the basic steps to compute modular exponentiation +using 1024-bit modulus length using openssl PMD available in DPDK (performing other +crypto operations is similar except change to respective op and xform setup). + +.. code-block:: c + + /* + * Simple example to compute modular exponentiation with 1024-bit key + * + */ + #define MAX_ASYM_SESSIONS 10 + #define NUM_ASYM_BUFS 10 + + struct rte_mempool *crypto_op_pool, *asym_session_pool; + unsigned int asym_session_size; + int ret; + + /* Initialize EAL. */ + ret = rte_eal_init(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n"); + + uint8_t socket_id = rte_socket_id(); + + /* Create crypto operation pool. */ + crypto_op_pool = rte_crypto_op_pool_create( + "crypto_op_pool", + RTE_CRYPTO_OP_TYPE_ASYMMETRIC, + NUM_ASYM_BUFS, 0, 0, + socket_id); + if (crypto_op_pool == NULL) + rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n"); + /* Create the virtual crypto device. */ + char args[128]; + const char *crypto_name = "crypto_openssl"; + snprintf(args, sizeof(args), "socket_id=%d", socket_id); + ret = rte_vdev_init(crypto_name, args); + if (ret != 0) + rte_exit(EXIT_FAILURE, "Cannot create virtual device"); -Crypto Device API -~~~~~~~~~~~~~~~~~ + uint8_t cdev_id = rte_cryptodev_get_dev_id(crypto_name); + + /* Get private asym session data size. */ + asym_session_size = rte_cryptodev_get_asym_private_session_size(cdev_id); + + /* + * Create session mempool, with two objects per session, + * one for the session header and another one for the + * private asym session data for the crypto device. + */ + asym_session_pool = rte_mempool_create("asym_session_pool", + MAX_ASYM_SESSIONS * 2, + asym_session_size, + 0, + 0, NULL, NULL, NULL, + NULL, socket_id, + 0); + + /* Configure the crypto device. */ + struct rte_cryptodev_config conf = { + .nb_queue_pairs = 1, + .socket_id = socket_id + }; + struct rte_cryptodev_qp_conf qp_conf = { + .nb_descriptors = 2048 + }; + + if (rte_cryptodev_configure(cdev_id, &conf) < 0) + rte_exit(EXIT_FAILURE, "Failed to configure cryptodev %u", cdev_id); + + if (rte_cryptodev_queue_pair_setup(cdev_id, 0, &qp_conf, + socket_id, asym_session_pool) < 0) + rte_exit(EXIT_FAILURE, "Failed to setup queue pair\n"); + + if (rte_cryptodev_start(cdev_id) < 0) + rte_exit(EXIT_FAILURE, "Failed to start device\n"); + + /* Setup crypto xform to do modular exponentiation with 1024 bit + * length modulus + */ + struct rte_crypto_asym_xform modex_xform = { + .next = NULL, + .xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX, + .modex = { + .modulus = { + .data = + (uint8_t *) + ("\xb3\xa1\xaf\xb7\x13\x08\x00\x0a\x35\xdc\x2b\x20\x8d" + "\xa1\xb5\xce\x47\x8a\xc3\x80\xf4\x7d\x4a\xa2\x62\xfd\x61\x7f" + "\xb5\xa8\xde\x0a\x17\x97\xa0\xbf\xdf\x56\x5a\x3d\x51\x56\x4f" + "\x70\x70\x3f\x63\x6a\x44\x5b\xad\x84\x0d\x3f\x27\x6e\x3b\x34" + "\x91\x60\x14\xb9\xaa\x72\xfd\xa3\x64\xd2\x03\xa7\x53\x87\x9e" + "\x88\x0b\xc1\x14\x93\x1a\x62\xff\xb1\x5d\x74\xcd\x59\x63\x18" + "\x11\x3d\x4f\xba\x75\xd4\x33\x4e\x23\x6b\x7b\x57\x44\xe1\xd3" + "\x03\x13\xa6\xf0\x8b\x60\xb0\x9e\xee\x75\x08\x9d\x71\x63\x13" + "\xcb\xa6\x81\x92\x14\x03\x22\x2d\xde\x55"), + .length = 128 + }, + .exponent = { + .data = (uint8_t *)("\x01\x00\x01"), + .length = 3 + } + } + }; + /* Create asym crypto session and initialize it for the crypto device. */ + struct rte_cryptodev_asym_session *asym_session; + asym_session = rte_cryptodev_asym_session_create(asym_session_pool); + if (asym_session == NULL) + rte_exit(EXIT_FAILURE, "Session could not be created\n"); + + if (rte_cryptodev_asym_session_init(cdev_id, asym_session, + &modex_xform, asym_session_pool) < 0) + rte_exit(EXIT_FAILURE, "Session could not be initialized " + "for the crypto device\n"); + + /* Get a burst of crypto operations. */ + struct rte_crypto_op *crypto_ops[1]; + if (rte_crypto_op_bulk_alloc(crypto_op_pool, + RTE_CRYPTO_OP_TYPE_ASYMMETRIC, + crypto_ops, 1) == 0) + rte_exit(EXIT_FAILURE, "Not enough crypto operations available\n"); + + /* Set up the crypto operations. */ + struct rte_crypto_asym_op *asym_op = crypto_ops[0]->asym; + + /* calculate mod exp of value 0xf8 */ + static unsigned char base[] = {0xF8}; + asym_op->modex.base.data = base; + asym_op->modex.base.length = sizeof(base); + asym_op->modex.base.iova = base; + + /* Attach the asym crypto session to the operation */ + rte_crypto_op_attach_asym_session(op, asym_session); + + /* Enqueue the crypto operations in the crypto device. */ + uint16_t num_enqueued_ops = rte_cryptodev_enqueue_burst(cdev_id, 0, + crypto_ops, 1); + + /* + * Dequeue the crypto operations until all the operations + * are processed in the crypto device. + */ + uint16_t num_dequeued_ops, total_num_dequeued_ops = 0; + do { + struct rte_crypto_op *dequeued_ops[1]; + num_dequeued_ops = rte_cryptodev_dequeue_burst(cdev_id, 0, + dequeued_ops, 1); + total_num_dequeued_ops += num_dequeued_ops; + + /* Check if operation was processed successfully */ + if (dequeued_ops[0]->status != RTE_CRYPTO_OP_STATUS_SUCCESS) + rte_exit(EXIT_FAILURE, + "Some operations were not processed correctly"); + + } while (total_num_dequeued_ops < num_enqueued_ops); + + +Asymmetric Crypto Device API +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The cryptodev Library API is described in the *DPDK API Reference* document. +The cryptodev Library API is described in the +`DPDK API Reference <http://dpdk.org/doc/api/>`_ diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst index 9a2fab1e..d362c920 100644 --- a/doc/guides/prog_guide/env_abstraction_layer.rst +++ b/doc/guides/prog_guide/env_abstraction_layer.rst @@ -94,10 +94,125 @@ The allocation of large contiguous physical memory is done using the hugetlbfs k The EAL provides an API to reserve named memory zones in this contiguous memory. The physical address of the reserved memory for that memory zone is also returned to the user by the memory zone reservation API. +There are two modes in which DPDK memory subsystem can operate: dynamic mode, +and legacy mode. Both modes are explained below. + .. note:: Memory reservations done using the APIs provided by rte_malloc are also backed by pages from the hugetlbfs filesystem. ++ Dynamic memory mode + +Currently, this mode is only supported on Linux. + +In this mode, usage of hugepages by DPDK application will grow and shrink based +on application's requests. Any memory allocation through ``rte_malloc()``, +``rte_memzone_reserve()`` or other methods, can potentially result in more +hugepages being reserved from the system. Similarly, any memory deallocation can +potentially result in hugepages being released back to the system. + +Memory allocated in this mode is not guaranteed to be IOVA-contiguous. If large +chunks of IOVA-contiguous are required (with "large" defined as "more than one +page"), it is recommended to either use VFIO driver for all physical devices (so +that IOVA and VA addresses can be the same, thereby bypassing physical addresses +entirely), or use legacy memory mode. + +For chunks of memory which must be IOVA-contiguous, it is recommended to use +``rte_memzone_reserve()`` function with ``RTE_MEMZONE_IOVA_CONTIG`` flag +specified. This way, memory allocator will ensure that, whatever memory mode is +in use, either reserved memory will satisfy the requirements, or the allocation +will fail. + +There is no need to preallocate any memory at startup using ``-m`` or +``--socket-mem`` command-line parameters, however it is still possible to do so, +in which case preallocate memory will be "pinned" (i.e. will never be released +by the application back to the system). It will be possible to allocate more +hugepages, and deallocate those, but any preallocated pages will not be freed. +If neither ``-m`` nor ``--socket-mem`` were specified, no memory will be +preallocated, and all memory will be allocated at runtime, as needed. + +Another available option to use in dynamic memory mode is +``--single-file-segments`` command-line option. This option will put pages in +single files (per memseg list), as opposed to creating a file per page. This is +normally not needed, but can be useful for use cases like userspace vhost, where +there is limited number of page file descriptors that can be passed to VirtIO. + +If the application (or DPDK-internal code, such as device drivers) wishes to +receive notifications about newly allocated memory, it is possible to register +for memory event callbacks via ``rte_mem_event_callback_register()`` function. +This will call a callback function any time DPDK's memory map has changed. + +If the application (or DPDK-internal code, such as device drivers) wishes to be +notified about memory allocations above specified threshold (and have a chance +to deny them), allocation validator callbacks are also available via +``rte_mem_alloc_validator_callback_register()`` function. + +A default validator callback is provided by EAL, which can be enabled with a +``--socket-limit`` command-line option, for a simple way to limit maximum amount +of memory that can be used by DPDK application. + +.. note:: + + In multiprocess scenario, all related processes (i.e. primary process, and + secondary processes running with the same prefix) must be in the same memory + modes. That is, if primary process is run in dynamic memory mode, all of its + secondary processes must be run in the same mode. The same is applicable to + ``--single-file-segments`` command-line option - both primary and secondary + processes must shared this mode. + ++ Legacy memory mode + +This mode is enabled by specifying ``--legacy-mem`` command-line switch to the +EAL. This switch will have no effect on FreeBSD as FreeBSD only supports +legacy mode anyway. + +This mode mimics historical behavior of EAL. That is, EAL will reserve all +memory at startup, sort all memory into large IOVA-contiguous chunks, and will +not allow acquiring or releasing hugepages from the system at runtime. + +If neither ``-m`` nor ``--socket-mem`` were specified, the entire available +hugepage memory will be preallocated. + ++ 32-bit support + +Additional restrictions are present when running in 32-bit mode. In dynamic +memory mode, by default maximum of 2 gigabytes of VA space will be preallocated, +and all of it will be on master lcore NUMA node unless ``--socket-mem`` flag is +used. + +In legacy mode, VA space will only be preallocated for segments that were +requested (plus padding, to keep IOVA-contiguousness). + ++ Maximum amount of memory + +All possible virtual memory space that can ever be used for hugepage mapping in +a DPDK process is preallocated at startup, thereby placing an upper limit on how +much memory a DPDK application can have. DPDK memory is stored in segment lists, +each segment is strictly one physical page. It is possible to change the amount +of virtual memory being preallocated at startup by editing the following config +variables: + +* ``CONFIG_RTE_MAX_MEMSEG_LISTS`` controls how many segment lists can DPDK have +* ``CONFIG_RTE_MAX_MEM_MB_PER_LIST`` controls how much megabytes of memory each + segment list can address +* ``CONFIG_RTE_MAX_MEMSEG_PER_LIST`` controls how many segments each segment can + have +* ``CONFIG_RTE_MAX_MEMSEG_PER_TYPE`` controls how many segments each memory type + can have (where "type" is defined as "page size + NUMA node" combination) +* ``CONFIG_RTE_MAX_MEM_MB_PER_TYPE`` controls how much megabytes of memory each + memory type can address +* ``CONFIG_RTE_MAX_MEM_MB`` places a global maximum on the amount of memory + DPDK can reserve + +Normally, these options do not need to be changed. + +.. note:: + + Preallocated virtual memory is not to be confused with preallocated hugepage + memory! All DPDK processes preallocate virtual memory at startup. Hugepages + can later be mapped into that preallocated VA space (if dynamic memory mode + is enabled), and can optionally be mapped into it at startup. + PCI Access ~~~~~~~~~~ @@ -211,7 +326,7 @@ Memory Segments and Memory Zones (memzone) The mapping of physical memory is provided by this feature in the EAL. As physical memory can have gaps, the memory is described in a table of descriptors, -and each descriptor (called rte_memseg ) describes a contiguous portion of memory. +and each descriptor (called rte_memseg ) describes a physical page. On top of this, the memzone allocator's role is to reserve contiguous portions of physical memory. These zones are identified by a unique name when the memory is reserved. @@ -225,6 +340,9 @@ Memory zones can be reserved with specific start address alignment by supplying The alignment value should be a power of two and not less than the cache line size (64 bytes). Memory zones can also be reserved from either 2 MB or 1 GB hugepages, provided that both are available on the system. +Both memsegs and memzones are stored using ``rte_fbarray`` structures. Please +refer to *DPDK API Reference* for more information. + Multiple pthread ---------------- @@ -331,17 +449,21 @@ Known Issues Bypassing this constraint may cause the 2nd pthread to spin until the 1st one is scheduled again. Moreover, if the 1st pthread is preempted by a context that has an higher priority, it may even cause a dead lock. - This does not mean it cannot be used, simply, there is a need to narrow down the situation when it is used by multi-pthread on the same core. + This means, use cases involving preemptible pthreads should consider using rte_ring carefully. + + 1. It CAN be used for preemptible single-producer and single-consumer use case. - 1. It CAN be used for any single-producer or single-consumer situation. + 2. It CAN be used for non-preemptible multi-producer and preemptible single-consumer use case. - 2. It MAY be used by multi-producer/consumer pthread whose scheduling policy are all SCHED_OTHER(cfs). User SHOULD be aware of the performance penalty before using it. + 3. It CAN be used for preemptible single-producer and non-preemptible multi-consumer use case. - 3. It MUST not be used by multi-producer/consumer pthreads, whose scheduling policies are SCHED_FIFO or SCHED_RR. + 4. It MAY be used by preemptible multi-producer and/or preemptible multi-consumer pthreads whose scheduling policy are all SCHED_OTHER(cfs), SCHED_IDLE or SCHED_BATCH. User SHOULD be aware of the performance penalty before using it. + + 5. It MUST not be used by multi-producer/consumer pthreads, whose scheduling policies are SCHED_FIFO or SCHED_RR. + rte_timer - Running ``rte_timer_manager()`` on a non-EAL pthread is not allowed. However, resetting/stopping the timer from a non-EAL pthread is allowed. + Running ``rte_timer_manage()`` on a non-EAL pthread is not allowed. However, resetting/stopping the timer from a non-EAL pthread is allowed. + rte_log @@ -453,11 +575,9 @@ The key fields of the heap structure and their function are described below * free_head - this points to the first element in the list of free nodes for this malloc heap. -.. note:: +* first - this points to the first element in the heap. - The malloc_heap structure does not keep track of in-use blocks of memory, - since these are never touched except when they are to be freed again - - at which point the pointer to the block is an input to the free() function. +* last - this points to the last element in the heap. .. _figure_malloc_heap: @@ -473,16 +593,21 @@ Structure: malloc_elem The malloc_elem structure is used as a generic header structure for various blocks of memory. -It is used in three different ways - all shown in the diagram above: +It is used in two different ways - all shown in the diagram above: #. As a header on a block of free or allocated memory - normal case #. As a padding header inside a block of memory -#. As an end-of-memseg marker - The most important fields in the structure and how they are used are described below. +Malloc heap is a doubly-linked list, where each element keeps track of its +previous and next elements. Due to the fact that hugepage memory can come and +go, neighbouring malloc elements may not necessarily be adjacent in memory. +Also, since a malloc element may span multiple pages, its contents may not +necessarily be IOVA-contiguous either - each malloc element is only guaranteed +to be virtually contiguous. + .. note:: If the usage of a particular field in one of the above three usages is not @@ -495,13 +620,20 @@ The most important fields in the structure and how they are used are described b It is used for normal memory blocks when they are being freed, to add the newly-freed block to the heap's free-list. -* prev - this pointer points to the header element/block in the memseg - immediately behind the current one. When freeing a block, this pointer is - used to reference the previous block to check if that block is also free. - If so, then the two free blocks are merged to form a single larger block. +* prev - this pointer points to previous header element/block in memory. When + freeing a block, this pointer is used to reference the previous block to + check if that block is also free. If so, and the two blocks are immediately + adjacent to each other, then the two free blocks are merged to form a single + larger block. -* next_free - this pointer is used to chain the free-list of unallocated - memory blocks together. +* next - this pointer points to next header element/block in memory. When + freeing a block, this pointer is used to reference the next block to check + if that block is also free. If so, and the two blocks are immediately + adjacent to each other, then the two free blocks are merged to form a single + larger block. + +* free_list - this is a structure pointing to previous and next elements in + this heap's free list. It is only used in normal memory blocks; on ``malloc()`` to find a suitable free block to allocate and on ``free()`` to add the newly freed element to the free-list. @@ -515,9 +647,6 @@ The most important fields in the structure and how they are used are described b constraints. In that case, the pad header is used to locate the actual malloc element header for the block. - For the end-of-memseg structure, this is always a ``BUSY`` value, which - ensures that no element, on being freed, searches beyond the end of the - memseg for other blocks to merge with into a larger free area. * pad - this holds the length of the padding present at the start of the block. In the case of a normal block header, it is added to the address of the end @@ -528,22 +657,19 @@ The most important fields in the structure and how they are used are described b actual block header. * size - the size of the data block, including the header itself. - For end-of-memseg structures, this size is given as zero, though it is never - actually checked. - For normal blocks which are being freed, this size value is used in place of - a "next" pointer to identify the location of the next block of memory that - in the case of being ``FREE``, the two free blocks can be merged into one. Memory Allocation ^^^^^^^^^^^^^^^^^ -On EAL initialization, all memsegs are setup as part of the malloc heap. -This setup involves placing a dummy structure at the end with ``BUSY`` state, -which may contain a sentinel value if ``CONFIG_RTE_MALLOC_DEBUG`` is enabled, -and a proper :ref:`element header<malloc_elem>` with ``FREE`` at the start -for each memseg. +On EAL initialization, all preallocated memory segments are setup as part of the +malloc heap. This setup involves placing an :ref:`element header<malloc_elem>` +with ``FREE`` at the start of each virtually contiguous segment of memory. The ``FREE`` element is then added to the ``free_list`` for the malloc heap. +This setup also happens whenever memory is allocated at runtime (if supported), +in which case newly allocated pages are also added to the heap, merging with any +adjacent free segments if there are any. + When an application makes a call to a malloc-like function, the malloc function will first index the ``lcore_config`` structure for the calling thread, and determine the NUMA node of that thread. @@ -574,8 +700,34 @@ the start and/or end of the element, resulting in the following behavior: The advantage of allocating the memory from the end of the existing element is that no adjustment of the free list needs to take place - the existing element -on the free list just has its size pointer adjusted, and the following element -has its "prev" pointer redirected to the newly created element. +on the free list just has its size value adjusted, and the next/previous elements +have their "prev"/"next" pointers redirected to the newly created element. + +In case when there is not enough memory in the heap to satisfy allocation +request, EAL will attempt to allocate more memory from the system (if supported) +and, following successful allocation, will retry reserving the memory again. In +a multiprocessing scenario, all primary and secondary processes will synchronize +their memory maps to ensure that any valid pointer to DPDK memory is guaranteed +to be valid at all times in all currently running processes. + +Failure to synchronize memory maps in one of the processes will cause allocation +to fail, even though some of the processes may have allocated the memory +successfully. The memory is not added to the malloc heap unless primary process +has ensured that all other processes have mapped this memory successfully. + +Any successful allocation event will trigger a callback, for which user +applications and other DPDK subsystems can register. Additionally, validation +callbacks will be triggered before allocation if the newly allocated memory will +exceed threshold set by the user, giving a chance to allow or deny allocation. + +.. note:: + + Any allocation of new pages has to go through primary process. If the + primary process is not active, no memory will be allocated even if it was + theoretically possible to do so. This is because primary's process map acts + as an authority on what should or should not be mapped, while each secondary + process has its own, local memory map. Secondary processes do not update the + shared memory map, they only copy its contents to their local memory map. Freeing Memory ^^^^^^^^^^^^^^ @@ -589,8 +741,17 @@ the pointer to get the proper element header for the entire block. From this element header, we get pointers to the heap from which the block was allocated and to where it must be freed, as well as the pointer to the previous -element, and via the size field, we can calculate the pointer to the next element. -These next and previous elements are then checked to see if they are also -``FREE``, and if so, they are merged with the current element. -This means that we can never have two ``FREE`` memory blocks adjacent to one -another, as they are always merged into a single block. +and next elements. These next and previous elements are then checked to see if +they are also ``FREE`` and are immediately adjacent to the current one, and if +so, they are merged with the current element. This means that we can never have +two ``FREE`` memory blocks adjacent to one another, as they are always merged +into a single block. + +If deallocating pages at runtime is supported, and the free element encloses +one or more pages, those pages can be deallocated and be removed from the heap. +If DPDK was started with command-line parameters for preallocating memory +(``-m`` or ``--socket-mem``), then those pages that were allocated at startup +will not be deallocated. + +Any successful deallocation event will trigger a callback, for which user +applications and other DPDK subsystems can register. diff --git a/doc/guides/prog_guide/event_crypto_adapter.rst b/doc/guides/prog_guide/event_crypto_adapter.rst new file mode 100644 index 00000000..9fe09c80 --- /dev/null +++ b/doc/guides/prog_guide/event_crypto_adapter.rst @@ -0,0 +1,296 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2018 Intel Corporation. All rights reserved. + +Event Crypto Adapter Library +============================ + +The DPDK :doc:`Eventdev library <eventdev>` provides event driven +programming model with features to schedule events. +The :doc:`Cryptodev library <cryptodev_lib>` provides an interface to +the crypto poll mode drivers which supports different crypto operations. +The Event Crypto Adapter is one of the adapter which is intended to +bridge between the event device and the crypto device. + +The packet flow from crypto device to the event device can be accomplished +using SW and HW based transfer mechanism. +The Adapter queries an eventdev PMD to determine which mechanism to be used. +The adapter uses an EAL service core function for SW based packet transfer +and uses the eventdev PMD functions to configure HW based packet transfer +between the crypto device and the event device. The crypto adapter uses a new +event type called ``RTE_EVENT_TYPE_CRYPTODEV`` to indicate the event source. + +The application can choose to submit a crypto operation directly to +crypto device or send it to the crypto adapter via eventdev based on +RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability. +The first mode is known as the event new(RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) +mode and the second as the event forward(RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) +mode. The choice of mode can be specified while creating the adapter. +In the former mode, it is an application responsibility to enable ingress +packet ordering. In the latter mode, it is the adapter responsibility to +enable the ingress packet ordering. + + +Adapter Mode +------------ + +RTE_EVENT_CRYPTO_ADAPTER_OP_NEW mode +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the RTE_EVENT_CRYPTO_ADAPTER_OP_NEW mode, application submits crypto +operations directly to crypto device. The adapter then dequeues crypto +completions from crypto device and enqueues them as events to the event device. +This mode does not ensure ingress ordering, if the application directly +enqueues to the cryptodev without going through crypto/atomic stage. +In this mode, events dequeued from the adapter will be treated as new events. +The application needs to specify event information (response information) +which is needed to enqueue an event after the crypto operation is completed. + +.. _figure_event_crypto_adapter_op_new: + +.. figure:: img/event_crypto_adapter_op_new.* + + Working model of ``RTE_EVENT_CRYPTO_ADAPTER_OP_NEW`` mode + + +RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode, if HW supports +RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability the application +can directly submit the crypto operations to the cryptodev. +If not, application retrieves crypto adapter's event port using +rte_event_crypto_adapter_event_port_get() API. Then, links its event +queue to this port and starts enqueuing crypto operations as events +to the eventdev. The adapter then dequeues the events and submits the +crypto operations to the cryptodev. After the crypto completions, the +adapter enqueues events to the event device. +Application can use this mode, when ingress packet ordering is needed. +In this mode, events dequeued from the adapter will be treated as +forwarded events. The application needs to specify the cryptodev ID +and queue pair ID (request information) needed to enqueue a crypto +operation in addition to the event information (response information) +needed to enqueue an event after the crypto operation has completed. + +.. _figure_event_crypto_adapter_op_forward: + +.. figure:: img/event_crypto_adapter_op_forward.* + + Working model of ``RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD`` mode + + +API Overview +------------ + +This section has a brief introduction to the event crypto adapter APIs. +The application is expected to create an adapter which is associated with +a single eventdev, then add cryptodev and queue pair to the adapter instance. + +Create an adapter instance +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +An adapter instance is created using ``rte_event_crypto_adapter_create()``. This +function is called with event device to be associated with the adapter and port +configuration for the adapter to setup an event port(if the adapter needs to use +a service function). + +Adapter can be started in ``RTE_EVENT_CRYPTO_ADAPTER_OP_NEW`` or +``RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD`` mode. + +.. code-block:: c + + int err; + uint8_t dev_id, id; + struct rte_event_dev_info dev_info; + struct rte_event_port_conf conf; + enum rte_event_crypto_adapter_mode mode; + + err = rte_event_dev_info_get(id, &dev_info); + + conf.new_event_threshold = dev_info.max_num_events; + conf.dequeue_depth = dev_info.max_event_port_dequeue_depth; + conf.enqueue_depth = dev_info.max_event_port_enqueue_depth; + mode = RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD; + err = rte_event_crypto_adapter_create(id, dev_id, &conf, mode); + +If the application desires to have finer control of eventdev port allocation +and setup, it can use the ``rte_event_crypto_adapter_create_ext()`` function. +The ``rte_event_crypto_adapter_create_ext()`` function is passed as a callback +function. The callback function is invoked if the adapter needs to use a +service function and needs to create an event port for it. The callback is +expected to fill the ``struct rte_event_crypto_adapter_conf`` structure +passed to it. + +For RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode, the event port created by adapter +can be retrieved using ``rte_event_crypto_adapter_event_port_get()`` API. +Application can use this event port to link with event queue on which it +enqueues events towards the crypto adapter. + +.. code-block:: c + + uint8_t id, evdev, crypto_ev_port_id, app_qid; + struct rte_event ev; + int ret; + + ret = rte_event_crypto_adapter_event_port_get(id, &crypto_ev_port_id); + ret = rte_event_queue_setup(evdev, app_qid, NULL); + ret = rte_event_port_link(evdev, crypto_ev_port_id, &app_qid, NULL, 1); + + // Fill in event info and update event_ptr with rte_crypto_op + memset(&ev, 0, sizeof(ev)); + ev.queue_id = app_qid; + . + . + ev.event_ptr = op; + ret = rte_event_enqueue_burst(evdev, app_ev_port_id, ev, nb_events); + +Querying adapter capabilities +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ``rte_event_crypto_adapter_caps_get()`` function allows +the application to query the adapter capabilities for an eventdev and cryptodev +combination. This API provides whether cryptodev and eventdev are connected using +internal HW port or not. + +.. code-block:: c + + rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap); + +Adding queue pair to the adapter instance +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Cryptodev device id and queue pair are created using cryptodev APIs. +For more information see :doc:`here <cryptodev_lib>`. + +.. code-block:: c + + struct rte_cryptodev_config conf; + struct rte_cryptodev_qp_conf qp_conf; + uint8_t cdev_id = 0; + uint16_t qp_id = 0; + + rte_cryptodev_configure(cdev_id, &conf); + rte_cryptodev_queue_pair_setup(cdev_id, qp_id, &qp_conf); + +These cryptodev id and queue pair are added to the instance using the +``rte_event_crypto_adapter_queue_pair_add()`` API. +The same is removed using ``rte_event_crypto_adapter_queue_pair_del()`` API. +If HW supports RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND +capability, event information must be passed to the add API. + +.. code-block:: c + + uint32_t cap; + int ret; + + ret = rte_event_crypto_adapter_caps_get(id, evdev, &cap); + if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) { + struct rte_event event; + + // Fill in event information & pass it to add API + rte_event_crypto_adapter_queue_pair_add(id, cdev_id, qp_id, &event); + } else + rte_event_crypto_adapter_queue_pair_add(id, cdev_id, qp_id, NULL); + +Configure the service function +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If the adapter uses a service function, the application is required to assign +a service core to the service function as show below. + +.. code-block:: c + + uint32_t service_id; + + if (rte_event_crypto_adapter_service_id_get(id, &service_id) == 0) + rte_service_map_lcore_set(service_id, CORE_ID); + +Set event request/response information +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode, the application needs +to specify the cryptodev ID and queue pair ID (request information) in +addition to the event information (response information) needed to enqueue +an event after the crypto operation has completed. The request and response +information are specified in the ``struct rte_crypto_op`` private data or +session's private data. + +In the RTE_EVENT_CRYPTO_ADAPTER_OP_NEW mode, the application is required +to provide only the response information. + +The SW adapter or HW PMD uses ``rte_crypto_op::sess_type`` to +decide whether request/response data is located in the crypto session/ +crypto security session or at an offset in the ``struct rte_crypto_op``. +The ``rte_crypto_op::private_data_offset`` is used to locate the request/ +response in the ``rte_crypto_op``. + +For crypto session, ``rte_cryptodev_sym_session_set_user_data()`` API +will be used to set request/response data. The same data will be obtained +by ``rte_cryptodev_sym_session_get_user_data()`` API. The +RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA capability indicates +whether HW or SW supports this feature. + +For security session, ``rte_security_session_set_private_data()`` API +will be used to set request/response data. The same data will be obtained +by ``rte_security_session_get_private_data()`` API. + +For session-less it is mandatory to place the request/response data with +the ``rte_crypto_op``. + +.. code-block:: c + + union rte_event_crypto_metadata m_data; + struct rte_event ev; + struct rte_crypto_op *op; + + /* Allocate & fill op structure */ + op = rte_crypto_op_alloc(); + + memset(&m_data, 0, sizeof(m_data)); + memset(&ev, 0, sizeof(ev)); + /* Fill event information and update event_ptr to rte_crypto_op */ + ev.event_ptr = op; + + if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { + /* Copy response information */ + rte_memcpy(&m_data.response_info, &ev, sizeof(ev)); + /* Copy request information */ + m_data.request_info.cdev_id = cdev_id; + m_data.request_info.queue_pair_id = qp_id; + /* Call set API to store private data information */ + rte_cryptodev_sym_session_set_user_data( + op->sym->session, + &m_data, + sizeof(m_data)); + } if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { + uint32_t len = IV_OFFSET + MAXIMUM_IV_LENGTH + + (sizeof(struct rte_crypto_sym_xform) * 2); + op->private_data_offset = len; + /* Copy response information */ + rte_memcpy(&m_data.response_info, &ev, sizeof(ev)); + /* Copy request information */ + m_data.request_info.cdev_id = cdev_id; + m_data.request_info.queue_pair_id = qp_id; + /* Store private data information along with rte_crypto_op */ + rte_memcpy(op + len, &m_data, sizeof(m_data)); + } + +Start the adapter instance +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The application calls ``rte_event_crypto_adapter_start()`` to start the adapter. +This function calls the start callbacks of the eventdev PMDs for hardware based +eventdev-cryptodev connections and ``rte_service_run_state_set()`` to enable the +service function if one exists. + +.. code-block:: c + + rte_event_crypto_adapter_start(id, mode); + +Get adapter statistics +~~~~~~~~~~~~~~~~~~~~~~ + +The ``rte_event_crypto_adapter_stats_get()`` function reports counters defined +in struct ``rte_event_crypto_adapter_stats``. The received packet and +enqueued event counts are a sum of the counts from the eventdev PMD callbacks +if the callback is supported, and the counts maintained by the service function, +if one exists. diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index 4ab87a37..0166bb45 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -12,7 +12,11 @@ be supported in hardware or require a software thread to receive packets from the ethdev port using ethdev poll mode APIs and enqueue these as events to the event device using the eventdev API. Both transfer mechanisms may be present on the same platform depending on the particular combination of the ethdev and -the event device. +the event device. For SW based packet transfer, if the mbuf does not have a +timestamp set, the adapter adds a timestamp to the mbuf using +rte_get_tsc_cycles(), this provides a more accurate timestamp as compared to +if the application were to set the timestamp since it avoids event device +schedule latency. The Event Ethernet Rx Adapter library is intended for the application code to configure both transfer mechanisms using a common API. A capability API allows @@ -140,3 +144,44 @@ enqueued event counts are a sum of the counts from the eventdev PMD callbacks if the callback is supported, and the counts maintained by the service function, if one exists. The service function also maintains a count of cycles for which it was not able to enqueue to the event device. + +Interrupt Based Rx Queues +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The service core function is typically set up to poll ethernet Rx queues for +packets. Certain queues may have low packet rates and it would be more +efficient to enable the Rx queue interrupt and read packets after receiving +the interrupt. + +The servicing_weight member of struct rte_event_eth_rx_adapter_queue_conf +is applicable when the adapter uses a service core function. The application +has to enable Rx queue interrupts when configuring the ethernet device +using the ``rte_eth_dev_configure()`` function and then use a servicing_weight +of zero when addding the Rx queue to the adapter. + +The adapter creates a thread blocked on the interrupt, on an interrupt this +thread enqueues the port id and the queue id to a ring buffer. The adapter +service function dequeues the port id and queue id from the ring buffer, +invokes the ``rte_eth_rx_burst()`` to receive packets on the queue and +converts the received packets to events in the same manner as packets +received on a polled Rx queue. The interrupt thread is affinitized to the same +CPUs as the lcores of the Rx adapter service function, if the Rx adapter +service function has not been mapped to any lcores, the interrupt thread +is mapped to the master lcore. + +Rx Callback for SW Rx Adapter +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For SW based packet transfers, i.e., when the +``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` is not set in the adapter's +capabilities flags for a particular ethernet device, the service function +temporarily enqueues mbufs to an event buffer before batch enqueueing these +to the event device. If the buffer fills up, the service function stops +dequeueing packets from the ethernet device. The application may want to +monitor the buffer fill level and instruct the service function to selectively +enqueue packets to the event device. The application may also use some other +criteria to decide which packets should enter the event device even when +the event buffer fill level is low. The +``rte_event_eth_rx_adapter_cb_register()`` function allow the application +to register a callback that selects which packets to enqueue to the event +device. diff --git a/doc/guides/prog_guide/event_timer_adapter.rst b/doc/guides/prog_guide/event_timer_adapter.rst new file mode 100644 index 00000000..7bbbdfe9 --- /dev/null +++ b/doc/guides/prog_guide/event_timer_adapter.rst @@ -0,0 +1,296 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2017 Intel Corporation. All rights reserved. + +Event Timer Adapter Library +=========================== + +The DPDK +`Event Device library <http://dpdk.org/doc/guides/prog_guide/eventdev.html>`_ +introduces an event driven programming model which presents applications with +an alternative to the polling model traditionally used in DPDK +applications. Event devices can be coupled with arbitrary components to provide +new event sources by using **event adapters**. The Event Timer Adapter is one +such adapter; it bridges event devices and timer mechanisms. + +The Event Timer Adapter library extends the event driven model +by introducing a :ref:`new type of event <timer_expiry_event>` that represents +a timer expiration, and providing an API with which adapters can be created or +destroyed, and :ref:`event timers <event_timer>` can be armed and canceled. + +The Event Timer Adapter library is designed to interface with hardware or +software implementations of the timer mechanism; it will query an eventdev PMD +to determine which implementation should be used. The default software +implementation manages timers using the DPDK +`Timer library <http://dpdk.org/doc/guides/prog_guide/timer_lib.html>`_. + +Examples of using the API are presented in the `API Overview`_ and +`Processing Timer Expiry Events`_ sections. Code samples are abstracted and +are based on the example of handling a TCP retransmission. + +.. _event_timer: + +Event Timer struct +------------------ +Event timers are timers that enqueue a timer expiration event to an event +device upon timer expiration. + +The Event Timer Adapter API represents each event timer with a generic struct, +which contains an event and user metadata. The ``rte_event_timer`` struct is +defined in ``lib/librte_event/librte_event_timer_adapter.h``. + +.. _timer_expiry_event: + +Timer Expiry Event +~~~~~~~~~~~~~~~~~~ + +The event contained by an event timer is enqueued in the event device when the +timer expires, and the event device uses the attributes below when scheduling +it: + +* ``event_queue_id`` - Application should set this to specify an event queue to + which the timer expiry event should be enqueued +* ``event_priority`` - Application can set this to indicate the priority of the + timer expiry event in the event queue relative to other events +* ``sched_type`` - Application can set this to specify the scheduling type of + the timer expiry event +* ``flow_id`` - Application can set this to indicate which flow this timer + expiry event corresponds to +* ``op`` - Will be set to ``RTE_EVENT_OP_NEW`` by the event timer adapter +* ``event_type`` - Will be set to ``RTE_EVENT_TYPE_TIMER`` by the event timer + adapter + +Timeout Ticks +~~~~~~~~~~~~~ + +The number of ticks from now in which the timer will expire. The ticks value +has a resolution (``timer_tick_ns``) that is specified in the event timer +adapter configuration. + +State +~~~~~ + +Before arming an event timer, the application should initialize its state to +RTE_EVENT_TIMER_NOT_ARMED. The event timer's state will be updated when a +request to arm or cancel it takes effect. + +If the application wishes to rearm the timer after it has expired, it should +reset the state back to RTE_EVENT_TIMER_NOT_ARMED before doing so. + +User Metadata +~~~~~~~~~~~~~ + +Memory to store user specific metadata. The event timer adapter implementation +will not modify this area. + +API Overview +------------ + +This section will introduce the reader to the event timer adapter API, showing +how to create and configure an event timer adapter and use it to manage event +timers. + +From a high level, the setup steps are: + +* rte_event_timer_adapter_create() +* rte_event_timer_adapter_start() + +And to start and stop timers: + +* rte_event_timer_arm_burst() +* rte_event_timer_cancel_burst() + +Create and Configure an Adapter Instance +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To create an event timer adapter instance, initialize an +``rte_event_timer_adapter_conf`` struct with the desired values, and pass it +to ``rte_event_timer_adapter_create()``. + +.. code-block:: c + + #define NSECPERSEC 1E9 // No of ns in 1 sec + const struct rte_event_timer_adapter_conf adapter_config = { + .event_dev_id = event_dev_id, + .timer_adapter_id = 0, + .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, + .timer_tick_ns = NSECPERSEC / 10, // 100 milliseconds + .max_tmo_nsec = 180 * NSECPERSEC // 2 minutes + .nb_timers = 40000, + .timer_adapter_flags = 0, + }; + + struct rte_event_timer_adapter *adapter = NULL; + adapter = rte_event_timer_adapter_create(&adapter_config); + + if (adapter == NULL) { ... }; + +Before creating an instance of a timer adapter, the application should create +and configure an event device along with its event ports. Based on the event +device capability, it might require creating an additional event port to be +used by the timer adapter. If required, the +``rte_event_timer_adapter_create()`` function will use a default method to +configure an event port; it will examine the current event device +configuration, determine the next available port identifier number, and create +a new event port with a default port configuration. + +If the application desires to have finer control of event port allocation +and setup, it can use the ``rte_event_timer_adapter_create_ext()`` function. +This function is passed a callback function that will be invoked if the +adapter needs to create an event port, giving the application the opportunity +to control how it is done. + +Retrieve Event Timer Adapter Contextual Information +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The event timer adapter implementation may have constraints on tick resolution +or maximum timer expiry timeout based on the given event timer adapter or +system. In this case, the implementation may adjust the tick resolution or +maximum timeout to the best possible configuration. + +Upon successful event timer adapter creation, the application can get the +configured resolution and max timeout with +``rte_event_timer_adapter_get_info()``. This function will return an +``rte_event_timer_adapter_info`` struct, which contains the following members: + +* ``min_resolution_ns`` - Minimum timer adapter tick resolution in ns. +* ``max_tmo_ns`` - Maximum timer timeout(expiry) in ns. +* ``adapter_conf`` - Configured event timer adapter attributes + +Configuring the Service Component +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If the adapter uses a service component, the application is required to map +the service to a service core before starting the adapter: + +.. code-block:: c + + uint32_t service_id; + + if (rte_event_timer_adapter_service_id_get(adapter, &service_id) == 0) + rte_service_map_lcore_set(service_id, EVTIM_CORE_ID); + +An event timer adapter uses a service component if the event device PMD +indicates that the adapter should use a software implementation. + +Starting the Adapter Instance +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The application should call ``rte_event_timer_adapter_start()`` to start +running the event timer adapter. This function calls the start entry points +defined by eventdev PMDs for hardware implementations or puts a service +component into the running state in the software implementation. + +Arming Event Timers +~~~~~~~~~~~~~~~~~~~ + +Once an event timer adapter has been started, an application can begin to +manage event timers with it. + +The application should allocate ``struct rte_event_timer`` objects from a +mempool or huge-page backed application buffers of required size. Upon +successful allocation, the application should initialize the event timer, and +then set any of the necessary event attributes described in the +`Timer Expiry Event`_ section. In the following example, assume ``conn`` +represents a TCP connection and that ``event_timer_pool`` is a mempool that +was created previously: + +.. code-block:: c + + rte_mempool_get(event_timer_pool, (void **)&conn->evtim); + if (conn->evtim == NULL) { ... } + + /* Set up the event timer. */ + conn->evtim->ev.op = RTE_EVENT_OP_NEW; + conn->evtim->ev.queue_id = event_queue_id; + conn->evtim->ev.sched_type = RTE_SCHED_TYPE_ATOMIC; + conn->evtim->ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; + conn->evtim->ev.event_type = RTE_EVENT_TYPE_TIMER; + conn->evtim->ev.event_ptr = conn; + conn->evtim->state = RTE_EVENT_TIMER_NOT_ARMED; + conn->evtim->timeout_ticks = 30; //3 sec Per RFC1122(TCP returns) + +Note that it is necessary to initialize the event timer state to +RTE_EVENT_TIMER_NOT_ARMED. Also note that we have saved a pointer to the +``conn`` object in the timer's event payload. This will allow us to locate +the connection object again once we dequeue the timer expiry event from the +event device later. As a convenience, the application may specify no value for +ev.event_ptr, and the adapter will by default set it to point at the event +timer itself. + +Now we can arm the event timer with ``rte_event_timer_arm_burst()``: + +.. code-block:: c + + ret = rte_event_timer_arm_burst(adapter, &conn->evtim, 1); + if (ret != 1) { ... } + +Once an event timer expires, the application may free it or rearm it as +necessary. If the application will rearm the timer, the state should be reset +to RTE_EVENT_TIMER_NOT_ARMED by the application before rearming it. + +Multiple Event Timers with Same Expiry Value +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In the special case that there is a set of event timers that should all expire +at the same time, the application may call +``rte_event_timer_arm_tmo_tick_burst()``, which allows the implementation to +optimize the operation if possible. + +Canceling Event Timers +~~~~~~~~~~~~~~~~~~~~~~ + +An event timer that has been armed as described in `Arming Event Timers`_ can +be canceled by calling ``rte_event_timer_cancel_burst()``: + +.. code-block:: c + + /* Ack for the previous tcp data packet has been received; + * cancel the retransmission timer + */ + rte_event_timer_cancel_burst(adapter, &conn->timer, 1); + +Processing Timer Expiry Events +------------------------------ + +Once an event timer has successfully enqueued a timer expiry event in the event +device, the application will subsequently dequeue it from the event device. +The application can use the event payload to retrieve a pointer to the object +associated with the event timer. It can then re-arm the event timer or free the +event timer object as desired: + +.. code-block:: c + + void + event_processing_loop(...) + { + while (...) { + /* Receive events from the configured event port. */ + rte_event_dequeue_burst(event_dev_id, event_port, &ev, 1, 0); + ... + switch(ev.event_type) { + ... + case RTE_EVENT_TYPE_TIMER: + process_timer_event(ev); + ... + break; + } + } + } + + uint8_t + process_timer_event(...) + { + /* A retransmission timeout for the connection has been received. */ + conn = ev.event_ptr; + /* Retransmit last packet (e.g. TCP segment). */ + ... + /* Re-arm timer using original values. */ + rte_event_timer_arm_burst(adapter_id, &conn->timer, 1); + } + +Summary +------- + +The Event Timer Adapter library extends the DPDK event-based programming model +by representing timer expirations as events in the system and allowing +applications to use existing event processing loops to arm and cancel event +timers or handle timer expiry events. diff --git a/doc/guides/prog_guide/eventdev.rst b/doc/guides/prog_guide/eventdev.rst index ce19997d..8fcae546 100644 --- a/doc/guides/prog_guide/eventdev.rst +++ b/doc/guides/prog_guide/eventdev.rst @@ -1,5 +1,6 @@ .. SPDX-License-Identifier: BSD-3-Clause Copyright(c) 2017 Intel Corporation. + Copyright(c) 2018 Arm Limited. Event Device Library ==================== @@ -129,8 +130,10 @@ API Walk-through This section will introduce the reader to the eventdev API, showing how to create and configure an eventdev and use it for a two-stage atomic pipeline -with a single core for TX. The diagram below shows the final state of the -application after this walk-through: +with one core each for RX and TX. RX and TX cores are shown here for +illustration, refer to Eventdev Adapter documentation for further details. +The diagram below shows the final state of the application after this +walk-through: .. _figure_eventdev-usage1: @@ -196,23 +199,29 @@ calling the setup function. Repeat this step for each queue, starting from .nb_atomic_flows = 1024, .nb_atomic_order_sequences = 1024, }; + struct rte_event_queue_conf single_link_conf = { + .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK, + }; int dev_id = 0; - int queue_id = 0; - int err = rte_event_queue_setup(dev_id, queue_id, &atomic_conf); + int atomic_q_1 = 0; + int atomic_q_2 = 1; + int single_link_q = 2; + int err = rte_event_queue_setup(dev_id, atomic_q_1, &atomic_conf); + int err = rte_event_queue_setup(dev_id, atomic_q_2, &atomic_conf); + int err = rte_event_queue_setup(dev_id, single_link_q, &single_link_conf); -The remainder of this walk-through assumes that the queues are configured as -follows: +As shown above, queue IDs are as follows: * id 0, atomic queue #1 * id 1, atomic queue #2 * id 2, single-link queue +These queues are used for the remainder of this walk-through. + Setting up Ports ~~~~~~~~~~~~~~~~ -Once queues are set up successfully, create the ports as required. Each port -should be set up with its corresponding port_conf type, worker for worker cores, -rx and tx for the RX and TX cores: +Once queues are set up successfully, create the ports as required. .. code-block:: c @@ -232,15 +241,24 @@ rx and tx for the RX and TX cores: .new_event_threshold = 4096, }; int dev_id = 0; - int port_id = 0; - int err = rte_event_port_setup(dev_id, port_id, &CORE_FUNCTION_conf); + int rx_port_id = 0; + int err = rte_event_port_setup(dev_id, rx_port_id, &rx_conf); + + for(int worker_port_id = 1; worker_port_id <= 4; worker_port_id++) { + int err = rte_event_port_setup(dev_id, worker_port_id, &worker_conf); + } -It is now assumed that: + int tx_port_id = 5; + int err = rte_event_port_setup(dev_id, tx_port_id, &tx_conf); + +As shown above: * port 0: RX core * ports 1,2,3,4: Workers * port 5: TX core +These ports are used for the remainder of this walk-through. + Linking Queues and Ports ~~~~~~~~~~~~~~~~~~~~~~~~ @@ -254,15 +272,14 @@ can be achieved like this: .. code-block:: c - uint8_t port_id = 0; + uint8_t rx_port_id = 0; + uint8_t tx_port_id = 5; uint8_t atomic_qs[] = {0, 1}; uint8_t single_link_q = 2; - uint8_t tx_port_id = 5; uin8t_t priority = RTE_EVENT_DEV_PRIORITY_NORMAL; - for(int i = 0; i < 4; i++) { - int worker_port = i + 1; - int links_made = rte_event_port_link(dev_id, worker_port, atomic_qs, NULL, 2); + for(int worker_port_id = 1; worker_port_id <= 4; worker_port_id++) { + int links_made = rte_event_port_link(dev_id, worker_port_id, atomic_qs, NULL, 2); } int links_made = rte_event_port_link(dev_id, tx_port_id, &single_link_q, &priority, 1); @@ -295,14 +312,14 @@ The following code shows how those packets can be enqueued into the eventdev: ev[i].flow_id = mbufs[i]->hash.rss; ev[i].op = RTE_EVENT_OP_NEW; ev[i].sched_type = RTE_SCHED_TYPE_ATOMIC; - ev[i].queue_id = 0; + ev[i].queue_id = atomic_q_1; ev[i].event_type = RTE_EVENT_TYPE_ETHDEV; ev[i].sub_event_type = 0; ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL; ev[i].mbuf = mbufs[i]; } - const int nb_tx = rte_event_enqueue_burst(dev_id, port_id, ev, nb_rx); + const int nb_tx = rte_event_enqueue_burst(dev_id, rx_port_id, ev, nb_rx); if (nb_tx != nb_rx) { for(i = nb_tx; i < nb_rx; i++) rte_pktmbuf_free(mbufs[i]); @@ -334,7 +351,7 @@ the event to the next stage in the pipeline. events[i].queue_id++; } - uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id, events, nb_rx); + uint16_t nb_tx = rte_event_enqueue_burst(dev_id, worker_port_id, events, nb_rx); Egress of Events diff --git a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst index 9959f0d2..0cfc1198 100644 --- a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst +++ b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst @@ -43,6 +43,7 @@ Limitations #. Currently, the GSO library supports the following IPv4 packet types: - TCP + - UDP - VxLAN - GRE @@ -146,6 +147,15 @@ TCP/IPv4 GSO TCP/IPv4 GSO supports segmentation of suitably large TCP/IPv4 packets, which may also contain an optional VLAN tag. +UDP/IPv4 GSO +~~~~~~~~~~~~ +UDP/IPv4 GSO supports segmentation of suitably large UDP/IPv4 packets, which +may also contain an optional VLAN tag. UDP GSO is the same as IP fragmentation. +Specifically, UDP GSO treats the UDP header as a part of the payload and +does not modify it during segmentation. Therefore, after UDP GSO, only the +first output packet has the original UDP header, and others just have l2 +and l3 headers. + VxLAN GSO ~~~~~~~~~ VxLAN packets GSO supports segmentation of suitably large VxLAN packets, diff --git a/doc/guides/prog_guide/glossary.rst b/doc/guides/prog_guide/glossary.rst index e101bc02..dda45bd1 100644 --- a/doc/guides/prog_guide/glossary.rst +++ b/doc/guides/prog_guide/glossary.rst @@ -41,9 +41,6 @@ CPU CRC Cyclic Redundancy Check -ctrlmbuf - An *mbuf* carrying control data. - Data Plane In contrast to the control plane, the data plane in a network architecture are the layers involved when forwarding packets. These layers must be diff --git a/doc/guides/prog_guide/hash_lib.rst b/doc/guides/prog_guide/hash_lib.rst index 180c776e..76a1f323 100644 --- a/doc/guides/prog_guide/hash_lib.rst +++ b/doc/guides/prog_guide/hash_lib.rst @@ -19,6 +19,8 @@ The main configuration parameters for the hash are: * Size of the key in bytes +* An extra flag used to describe additional settings, for example the multithreading mode of operation (as will be described later) + The hash also allows the configuration of some low-level implementation related parameters such as: * Hash function to translate the key into a bucket index @@ -49,8 +51,7 @@ Apart from these method explained above, the API allows the user three more opti Also, the API contains a method to allow the user to look up entries in bursts, achieving higher performance than looking up individual entries, as the function prefetches next entries at the time it is operating with the first ones, which reduces significantly the impact of the necessary memory accesses. -Notice that this method uses a pipeline of 8 entries (4 stages of 2 entries), so it is highly recommended -to use at least 8 entries per burst. + The actual data associated with each key can be either managed by the user using a separate table that mirrors the hash in terms of number of entries and position of each entry, @@ -63,11 +64,33 @@ However, this table could also be used for more sophisticated features and provi Multi-process support --------------------- -The hash library can be used in a multi-process environment, minding that only lookups are thread-safe. +The hash library can be used in a multi-process environment. The only function that can only be used in single-process mode is rte_hash_set_cmp_func(), which sets up a custom compare function, which is assigned to a function pointer (therefore, it is not supported in multi-process mode). + +Multi-thread support +--------------------- + +The hash library supports multithreading, and the user specifies the needed mode of operation at the creation time of the hash table +by appropriately setting the flag. In all modes of operation lookups are thread-safe meaning lookups can be called from multiple +threads concurrently. + +For concurrent writes, and concurrent reads and writes the following flag values define the corresponding modes of operation: + +* If the multi-writer flag (RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) is set, multiple threads writing to the table is allowed. + Key add, delete, and table reset are protected from other writer threads. With only this flag set, readers are not protected from ongoing writes. + +* If the read/write concurrency (RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) is set, multithread read/write operation is safe + (i.e., no need to stop the readers from accessing the hash table until writers finish their updates. Reads and writes can operate table concurrently). + +* In addition to these two flag values, if the transactional memory flag (RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT) is also set, + hardware transactional memory will be used to guarantee the thread safety as long as it is supported by the hardware (for example the Intel® TSX support). + +If the platform supports Intel® TSX, it is advised to set the transactional memory flag, as this will speed up concurrent table operations. +Otherwise concurrent operations will be slower because of the overhead associated with the software locking mechanisms. + Implementation Details ---------------------- diff --git a/doc/guides/prog_guide/img/event_crypto_adapter_op_forward.svg b/doc/guides/prog_guide/img/event_crypto_adapter_op_forward.svg new file mode 100644 index 00000000..54466f2e --- /dev/null +++ b/doc/guides/prog_guide/img/event_crypto_adapter_op_forward.svg @@ -0,0 +1,1078 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<!-- Created with Inkscape (http://www.inkscape.org/) --> + +<svg + xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" + xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" + width="720px" + height="486px" + id="svg13237" + version="1.1" + inkscape:version="0.48.4 r9939" + sodipodi:docname="event_crypto_adapter_enq_deq.svg"> + <defs + id="defs13239"> + <marker + inkscape:stockid="Arrow1Sstart" + orient="auto" + refY="0.0" + refX="0.0" + id="Arrow1Sstart" + style="overflow:visible"> + <path + id="path8416" + d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z " + style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt" + transform="scale(0.2) translate(6,0)" /> + </marker> + <marker + inkscape:stockid="Arrow1Send" + orient="auto" + refY="0.0" + refX="0.0" + id="Arrow1Send" + style="overflow:visible;"> + <path + id="path8419" + d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z " + style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;" + transform="scale(0.2) rotate(180) translate(6,0)" /> + </marker> + <marker + inkscape:stockid="DiamondL" + orient="auto" + refY="0.0" + refX="0.0" + id="DiamondL" + style="overflow:visible"> + <path + id="path8483" + d="M 0,-7.0710768 L -7.0710894,0 L 0,7.0710589 L 7.0710462,0 L 0,-7.0710768 z " + style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt" + transform="scale(0.8)" /> + </marker> + <marker + inkscape:stockid="DotL" + orient="auto" + refY="0.0" + refX="0.0" + id="DotL" + style="overflow:visible"> + <path + id="path8465" + d="M -2.5,-1.0 C -2.5,1.7600000 -4.7400000,4.0 -7.5,4.0 C -10.260000,4.0 -12.5,1.7600000 -12.5,-1.0 C -12.5,-3.7600000 -10.260000,-6.0 -7.5,-6.0 C -4.7400000,-6.0 -2.5,-3.7600000 -2.5,-1.0 z " + style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt" + transform="scale(0.8) translate(7.4, 1)" /> + </marker> + <marker + inkscape:stockid="SquareL" + orient="auto" + refY="0.0" + refX="0.0" + id="SquareL" + style="overflow:visible"> + <path + id="path8474" + d="M -5.0,-5.0 L -5.0,5.0 L 5.0,5.0 L 5.0,-5.0 L -5.0,-5.0 z " + style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt" + transform="scale(0.8)" /> + </marker> + <marker + inkscape:stockid="TriangleOutL" + orient="auto" + refY="0.0" + refX="0.0" + id="TriangleOutL" + style="overflow:visible"> + <path + id="path8546" + d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z " + style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt" + transform="scale(0.8)" /> + </marker> + <marker + inkscape:stockid="Arrow1Lstart" + orient="auto" + refY="0.0" + refX="0.0" + id="Arrow1Lstart" + style="overflow:visible"> + <path + id="path8404" + d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z " + style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt" + transform="scale(0.8) translate(12.5,0)" /> + </marker> + <marker + inkscape:stockid="Arrow1Mend" + orient="auto" + refY="0.0" + refX="0.0" + id="Arrow1Mend" + style="overflow:visible;"> + <path + id="path8413" + d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z " + style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;" + transform="scale(0.4) rotate(180) translate(10,0)" /> + </marker> + <marker + inkscape:stockid="Arrow2Lend" + orient="auto" + refY="0.0" + refX="0.0" + id="Arrow2Lend" + style="overflow:visible;"> + <path + id="path8425" + style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;" + d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z " + transform="scale(1.1) rotate(180) translate(1,0)" /> + </marker> + <marker + inkscape:stockid="Arrow1Lend" + orient="auto" + refY="0.0" + refX="0.0" + id="Arrow1Lend" + style="overflow:visible;"> + <path + id="path8407" + d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z " + style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;" + transform="scale(0.8) rotate(180) translate(12.5,0)" /> + </marker> + <filter + id="filter_2" + color-interpolation-filters="sRGB"> + <feGaussianBlur + stdDeviation="2" + id="feGaussianBlur15" /> + </filter> + <filter + id="filter_2-3" + color-interpolation-filters="sRGB"> + <feGaussianBlur + stdDeviation="2" + id="feGaussianBlur15-1" /> + </filter> + <filter + id="filter_2-0" + color-interpolation-filters="sRGB"> + <feGaussianBlur + stdDeviation="2" + id="feGaussianBlur15-7" /> + </filter> + <filter + id="filter_2-0-8" + color-interpolation-filters="sRGB"> + <feGaussianBlur + stdDeviation="2" + id="feGaussianBlur15-7-7" /> + </filter> + <filter + id="filter_2-3-9" + color-interpolation-filters="sRGB"> + <feGaussianBlur + stdDeviation="2" + id="feGaussianBlur15-1-6" /> + </filter> + <filter + id="filter_2-3-6" + color-interpolation-filters="sRGB"> + <feGaussianBlur + stdDeviation="2" + id="feGaussianBlur15-1-63" /> + </filter> + <filter + id="filter_2-3-91" + color-interpolation-filters="sRGB"> + <feGaussianBlur + stdDeviation="2" + id="feGaussianBlur15-1-3" /> + </filter> + <marker + inkscape:stockid="Arrow1Lend" + orient="auto" + refY="0" + refX="0" + id="Arrow1Lend-5" + style="overflow:visible"> + <path + inkscape:connector-curvature="0" + id="path8407-3" + d="M 0,0 5,-5 -12.5,0 5,5 0,0 z" + style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt" + transform="matrix(-0.8,0,0,-0.8,-10,0)" /> + </marker> + <marker + inkscape:stockid="Arrow1Lend" + orient="auto" + refY="0" + refX="0" + id="Arrow1Lend-6" + style="overflow:visible"> + <path + inkscape:connector-curvature="0" + id="path8407-0" + d="M 0,0 5,-5 -12.5,0 5,5 0,0 z" + style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt" + transform="matrix(-0.8,0,0,-0.8,-10,0)" /> + </marker> + <marker + inkscape:stockid="Arrow1Lstart" + orient="auto" + refY="0" + refX="0" + id="Arrow1Lstart-7" + style="overflow:visible"> + <path + inkscape:connector-curvature="0" + id="path8404-0" + d="M 0,0 5,-5 -12.5,0 5,5 0,0 z" + style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt" + transform="matrix(0.8,0,0,0.8,10,0)" /> + </marker> + <marker + inkscape:stockid="Arrow1Lend" + orient="auto" + refY="0" + refX="0" + id="Arrow1Lend-51" + style="overflow:visible"> + <path + inkscape:connector-curvature="0" + id="path8407-1" + d="M 0,0 5,-5 -12.5,0 5,5 0,0 z" + style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt" + transform="matrix(-0.8,0,0,-0.8,-10,0)" /> + </marker> + <marker + inkscape:stockid="Arrow1Lend" + orient="auto" + refY="0" + refX="0" + id="Arrow1Lend-3" + style="overflow:visible"> + <path + inkscape:connector-curvature="0" + id="path8407-6" + d="M 0,0 5,-5 -12.5,0 5,5 0,0 z" + style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt" + transform="matrix(-0.8,0,0,-0.8,-10,0)" /> + </marker> + <marker + inkscape:stockid="Arrow1Lend" + orient="auto" + refY="0" + refX="0" + id="Arrow1Lend-62" + style="overflow:visible"> + <path + inkscape:connector-curvature="0" + id="path8407-9" + d="M 0,0 5,-5 -12.5,0 5,5 0,0 z" + style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt" + transform="matrix(-0.8,0,0,-0.8,-10,0)" /> + </marker> + <marker + inkscape:stockid="Arrow1Lend" + orient="auto" + refY="0" + refX="0" + id="Arrow1Lend-2" + style="overflow:visible"> + <path + inkscape:connector-curvature="0" + id="path8407-7" + d="M 0,0 5,-5 -12.5,0 5,5 0,0 z" + style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt" + transform="matrix(-0.8,0,0,-0.8,-10,0)" /> + </marker> + <marker + inkscape:stockid="Arrow1Lstart" + orient="auto" + refY="0" + refX="0" + id="Arrow1Lstart-7-9" + style="overflow:visible"> + <path + inkscape:connector-curvature="0" + id="path8404-0-3" + d="M 0,0 5,-5 -12.5,0 5,5 0,0 z" + style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt" + transform="matrix(0.8,0,0,0.8,10,0)" /> + </marker> + <filter + id="filter_2-3-6-1" + color-interpolation-filters="sRGB"> + <feGaussianBlur + stdDeviation="2" + id="feGaussianBlur15-1-63-8" /> + </filter> + <filter + id="filter_2-3-92" + color-interpolation-filters="sRGB"> + <feGaussianBlur + stdDeviation="2" + id="feGaussianBlur15-1-2" /> + </filter> + <filter + id="filter_2-3-94" + color-interpolation-filters="sRGB"> + <feGaussianBlur + stdDeviation="2" + id="feGaussianBlur15-1-7" /> + </filter> + <marker + inkscape:stockid="Arrow1Lstart" + orient="auto" + refY="0" + refX="0" + id="Arrow1Lstart-7-6" + style="overflow:visible"> + <path + inkscape:connector-curvature="0" + id="path8404-0-1" + d="M 0,0 5,-5 -12.5,0 5,5 0,0 z" + style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt" + transform="matrix(0.8,0,0,0.8,10,0)" /> + </marker> + <marker + inkscape:stockid="Arrow1Lend" + orient="auto" + refY="0" + refX="0" + id="Arrow1Lend-55" + style="overflow:visible"> + <path + inkscape:connector-curvature="0" + id="path8407-4" + d="M 0,0 5,-5 -12.5,0 5,5 0,0 z" + style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt" + transform="matrix(-0.8,0,0,-0.8,-10,0)" /> + </marker> + </defs> + <sodipodi:namedview + id="base" + pagecolor="#ffffff" + bordercolor="#666666" + borderopacity="1.0" + inkscape:pageopacity="0.0" + inkscape:pageshadow="2" + inkscape:zoom="1" + inkscape:cx="359.77003" + inkscape:cy="287.74194" + inkscape:document-units="px" + inkscape:current-layer="layer1" + showgrid="false" + inkscape:window-width="1200" + inkscape:window-height="898" + inkscape:window-x="0" + inkscape:window-y="31" + inkscape:window-maximized="1" + inkscape:snap-nodes="false"> + <inkscape:grid + type="xygrid" + id="grid13454" /> + </sodipodi:namedview> + <metadata + id="metadata13242"> + <rdf:RDF> + <cc:Work + rdf:about=""> + <dc:format>image/svg+xml</dc:format> + <dc:type + rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> + <dc:title /> + </cc:Work> + </rdf:RDF> + </metadata> + <g + id="layer1" + inkscape:label="Layer 1" + inkscape:groupmode="layer"> + <g + style="font-size:12px;fill:none;stroke-linecap:square;stroke-miterlimit:3;overflow:visible" + id="shape1-1-2-4" + v:mID="1" + v:groupContext="shape" + transform="matrix(2.1604167,0,0,1.5671361,88.874699,-812.39909)"> + <title + id="title22-7-5">Square + Atomic Queue #1 + + + + + + + + + + + + + + + + + + Square + Atomic Queue #1 + + + + + + + + + + + + + + + + + + + + + + 1 + + + 2 + + + + 8 + + + + + 7 + + + + + 3 + + + + 4 + + + 5 + + + Square + Atomic Queue #1 + + + + + + + + + + + + + + + + Square + Atomic Queue #1 + + + + + + + + + + + + + + + + + + 6 + + + Eventdev + + + CryptoAdapter + + + Applicationin orderedstage + + + Cryptodev + + + 1. Events from the previous stage.2. Application in ordered stage dequeues events from eventdev.3. Application enqueues crypto operations as events to eventdev.4. Crypto adapter dequeues event from eventdev.5. Crypto adapter submits crypto operations to cryptodev (Atomic stage)6. Crypto adapter dequeues crypto completions from cryptodev7. Crypto adapter enqueues events to the eventdev8. Events to the next stage + + + diff --git a/doc/guides/prog_guide/img/event_crypto_adapter_op_new.svg b/doc/guides/prog_guide/img/event_crypto_adapter_op_new.svg new file mode 100644 index 00000000..256862ed --- /dev/null +++ b/doc/guides/prog_guide/img/event_crypto_adapter_op_new.svg @@ -0,0 +1,1061 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + Square + Atomic Queue #1 + + + + + + + + + + + + + + + + Square + Atomic Queue #1 + + + + + + + + + + + + + + + + Square + Atomic Queue #1 + + + + + + + + + + + + + + + + + Square + Atomic Queue #1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + + 2 + + + + + 3 + + + 4 + + + 6 + + + Eventdev + + + Atomic stage+enqueue tocryptodev + + + 5 + + + Cryptodev + + + Cryptoadapter + + + 1. Application dequeues events from the previous stage2. Application prepares the crypto operations.3. Crypto operations are submitted to cryptodev by application..4. Crypto adapter dequeues crypto completions from cryptodev.5. Crypto adapter enqueues events to the eventdev.6. Application dequeues from eventdev and prepare for further processing + + + Square + Atomic Queue #1 + + + + + + + + + + + + + + + + Application + + + diff --git a/doc/guides/prog_guide/img/eventdev_usage.svg b/doc/guides/prog_guide/img/eventdev_usage.svg index 7765649b..c19818b9 100644 --- a/doc/guides/prog_guide/img/eventdev_usage.svg +++ b/doc/guides/prog_guide/img/eventdev_usage.svg @@ -1,994 +1,549 @@ + + + + -image/svg+xml - - - - - - - - | - +--------------------+------------------------------------------------------+----------------------------------------------+ - | input port enable | Enable given input port for specific pipeline | p port in enable | - | | instance. | | - +--------------------+------------------------------------------------------+----------------------------------------------+ - | input port disable | Disable given input port for specific pipeline | p port in disable | - | | instance. | | - +--------------------+------------------------------------------------------+----------------------------------------------+ -Pipeline type specific CLI commands -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Disable given pipeline instance for specific data plane thread :: -The pipeline specific CLI commands are part of the pipeline type front-end. + thread pipeline disable diff --git a/doc/guides/sample_app_ug/kernel_nic_interface.rst b/doc/guides/sample_app_ug/kernel_nic_interface.rst index 580d06b5..1b3ee9a5 100644 --- a/doc/guides/sample_app_ug/kernel_nic_interface.rst +++ b/doc/guides/sample_app_ug/kernel_nic_interface.rst @@ -202,74 +202,8 @@ Setup of mbuf pool, driver and queues is similar to the setup done in the :doc:` In addition, one or more kernel NIC interfaces are allocated for each of the configured ports according to the command line parameters. -The code for allocating the kernel NIC interfaces for a specific port is as follows: - -.. code-block:: c - - static int - kni_alloc(uint16_t port_id) - { - uint8_t i; - struct rte_kni *kni; - struct rte_kni_conf conf; - struct kni_port_params **params = kni_port_params_array; - - if (port_id >= RTE_MAX_ETHPORTS || !params[port_id]) - return -1; - - params[port_id]->nb_kni = params[port_id]->nb_lcore_k ? params[port_id]->nb_lcore_k : 1; - - for (i = 0; i < params[port_id]->nb_kni; i++) { - - /* Clear conf at first */ - - memset(&conf, 0, sizeof(conf)); - if (params[port_id]->nb_lcore_k) { - snprintf(conf.name, RTE_KNI_NAMESIZE, "vEth%u_%u", port_id, i); - conf.core_id = params[port_id]->lcore_k[i]; - conf.force_bind = 1; - } else - snprintf(conf.name, RTE_KNI_NAMESIZE, "vEth%u", port_id); - conf.group_id = (uint16_t)port_id; - conf.mbuf_size = MAX_PACKET_SZ; - - /* - * The first KNI device associated to a port - * is the master, for multiple kernel thread - * environment. - */ - - if (i == 0) { - struct rte_kni_ops ops; - struct rte_eth_dev_info dev_info; - - memset(&dev_info, 0, sizeof(dev_info)); rte_eth_dev_info_get(port_id, &dev_info); - - conf.addr = dev_info.pci_dev->addr; - conf.id = dev_info.pci_dev->id; - - /* Get the interface default mac address */ - rte_eth_macaddr_get(port_id, (struct ether_addr *)&conf.mac_addr); - - memset(&ops, 0, sizeof(ops)); - - ops.port_id = port_id; - ops.change_mtu = kni_change_mtu; - ops.config_network_if = kni_config_network_interface; - ops.config_mac_address = kni_config_mac_address; - - kni = rte_kni_alloc(pktmbuf_pool, &conf, &ops); - } else - kni = rte_kni_alloc(pktmbuf_pool, &conf, NULL); - - if (!kni) - rte_exit(EXIT_FAILURE, "Fail to create kni for " - "port: %d\n", port_id); - - params[port_id]->kni[i] = kni; - } - return 0; - } +The code for allocating the kernel NIC interfaces for a specific port is +in the function ``kni_alloc``. The other step in the initialization process that is unique to this sample application is the association of each port with lcores for RX, TX and kernel threads. @@ -280,105 +214,8 @@ is the association of each port with lcores for RX, TX and kernel threads. * Other lcores for pinning the kernel threads on one by one -This is done by using the`kni_port_params_array[]` array, which is indexed by the port ID. -The code is as follows: - -.. code-block:: console - - static int - parse_config(const char *arg) - { - const char *p, *p0 = arg; - char s[256], *end; - unsigned size; - enum fieldnames { - FLD_PORT = 0, - FLD_LCORE_RX, - FLD_LCORE_TX, - _NUM_FLD = KNI_MAX_KTHREAD + 3, - }; - int i, j, nb_token; - char *str_fld[_NUM_FLD]; - unsigned long int_fld[_NUM_FLD]; - uint16_t port_id, nb_kni_port_params = 0; - - memset(&kni_port_params_array, 0, sizeof(kni_port_params_array)); - - while (((p = strchr(p0, '(')) != NULL) && nb_kni_port_params < RTE_MAX_ETHPORTS) { - p++; - if ((p0 = strchr(p, ')')) == NULL) - goto fail; - - size = p0 - p; - - if (size >= sizeof(s)) { - printf("Invalid config parameters\n"); - goto fail; - } - - snprintf(s, sizeof(s), "%.*s", size, p); - nb_token = rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ','); - - if (nb_token <= FLD_LCORE_TX) { - printf("Invalid config parameters\n"); - goto fail; - } - - for (i = 0; i < nb_token; i++) { - errno = 0; - int_fld[i] = strtoul(str_fld[i], &end, 0); - if (errno != 0 || end == str_fld[i]) { - printf("Invalid config parameters\n"); - goto fail; - } - } - - i = 0; - port_id = (uint8_t)int_fld[i++]; - - if (port_id >= RTE_MAX_ETHPORTS) { - printf("Port ID %u could not exceed the maximum %u\n", port_id, RTE_MAX_ETHPORTS); - goto fail; - } - - if (kni_port_params_array[port_id]) { - printf("Port %u has been configured\n", port_id); - goto fail; - } - - kni_port_params_array[port_id] = (struct kni_port_params*)rte_zmalloc("KNI_port_params", sizeof(struct kni_port_params), RTE_CACHE_LINE_SIZE); - kni_port_params_array[port_id]->port_id = port_id; - kni_port_params_array[port_id]->lcore_rx = (uint8_t)int_fld[i++]; - kni_port_params_array[port_id]->lcore_tx = (uint8_t)int_fld[i++]; - - if (kni_port_params_array[port_id]->lcore_rx >= RTE_MAX_LCORE || kni_port_params_array[port_id]->lcore_tx >= RTE_MAX_LCORE) { - printf("lcore_rx %u or lcore_tx %u ID could not " - "exceed the maximum %u\n", - kni_port_params_array[port_id]->lcore_rx, kni_port_params_array[port_id]->lcore_tx, RTE_MAX_LCORE); - goto fail; - } - - for (j = 0; i < nb_token && j < KNI_MAX_KTHREAD; i++, j++) - kni_port_params_array[port_id]->lcore_k[j] = (uint8_t)int_fld[i]; - kni_port_params_array[port_id]->nb_lcore_k = j; - } - - print_config(); - - return 0; - - fail: - - for (i = 0; i < RTE_MAX_ETHPORTS; i++) { - if (kni_port_params_array[i]) { - rte_free(kni_port_params_array[i]); - kni_port_params_array[i] = NULL; - } - } - - return -1; - - } +This is done by using the ``kni_port_params_array[]`` array, which is indexed by the port ID. +The code is in the function ``parse_config``. Packet Forwarding ~~~~~~~~~~~~~~~~~ @@ -387,99 +224,18 @@ After the initialization steps are completed, the main_loop() function is run on This function first checks the lcore_id against the user provided lcore_rx and lcore_tx to see if this lcore is reading from or writing to kernel NIC interfaces. -For the case that reads from a NIC port and writes to the kernel NIC interfaces, +For the case that reads from a NIC port and writes to the kernel NIC interfaces (``kni_ingress``), the packet reception is the same as in L2 Forwarding sample application (see :ref:`l2_fwd_app_rx_tx_packets`). The packet transmission is done by sending mbufs into the kernel NIC interfaces by rte_kni_tx_burst(). The KNI library automatically frees the mbufs after the kernel successfully copied the mbufs. -.. code-block:: c - - /** - * Interface to burst rx and enqueue mbufs into rx_q - */ - - static void - kni_ingress(struct kni_port_params *p) - { - uint8_t i, nb_kni, port_id; - unsigned nb_rx, num; - struct rte_mbuf *pkts_burst[PKT_BURST_SZ]; - - if (p == NULL) - return; - - nb_kni = p->nb_kni; - port_id = p->port_id; - - for (i = 0; i < nb_kni; i++) { - /* Burst rx from eth */ - nb_rx = rte_eth_rx_burst(port_id, 0, pkts_burst, PKT_BURST_SZ); - if (unlikely(nb_rx > PKT_BURST_SZ)) { - RTE_LOG(ERR, APP, "Error receiving from eth\n"); - return; - } - - /* Burst tx to kni */ - num = rte_kni_tx_burst(p->kni[i], pkts_burst, nb_rx); - kni_stats[port_id].rx_packets += num; - rte_kni_handle_request(p->kni[i]); - - if (unlikely(num < nb_rx)) { - /* Free mbufs not tx to kni interface */ - kni_burst_free_mbufs(&pkts_burst[num], nb_rx - num); - kni_stats[port_id].rx_dropped += nb_rx - num; - } - } - } - -For the other case that reads from kernel NIC interfaces and writes to a physical NIC port, packets are retrieved by reading -mbufs from kernel NIC interfaces by `rte_kni_rx_burst()`. +For the other case that reads from kernel NIC interfaces +and writes to a physical NIC port (``kni_egress``), +packets are retrieved by reading mbufs from kernel NIC interfaces by ``rte_kni_rx_burst()``. The packet transmission is the same as in the L2 Forwarding sample application (see :ref:`l2_fwd_app_rx_tx_packets`). -.. code-block:: c - - /** - * Interface to dequeue mbufs from tx_q and burst tx - */ - - static void - - kni_egress(struct kni_port_params *p) - { - uint8_t i, nb_kni, port_id; - unsigned nb_tx, num; - struct rte_mbuf *pkts_burst[PKT_BURST_SZ]; - - if (p == NULL) - return; - - nb_kni = p->nb_kni; - port_id = p->port_id; - - for (i = 0; i < nb_kni; i++) { - /* Burst rx from kni */ - num = rte_kni_rx_burst(p->kni[i], pkts_burst, PKT_BURST_SZ); - if (unlikely(num > PKT_BURST_SZ)) { - RTE_LOG(ERR, APP, "Error receiving from KNI\n"); - return; - } - - /* Burst tx to eth */ - - nb_tx = rte_eth_tx_burst(port_id, 0, pkts_burst, (uint16_t)num); - - kni_stats[port_id].tx_packets += nb_tx; - - if (unlikely(nb_tx < num)) { - /* Free mbufs not tx to NIC */ - kni_burst_free_mbufs(&pkts_burst[nb_tx], num - nb_tx); - kni_stats[port_id].tx_dropped += num - nb_tx; - } - } - } - Callbacks for Kernel Requests ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -492,106 +248,3 @@ Application may choose to not implement following callbacks: - ``config_mac_address`` - ``config_promiscusity`` - - -.. code-block:: c - - static struct rte_kni_ops kni_ops = { - .change_mtu = kni_change_mtu, - .config_network_if = kni_config_network_interface, - .config_mac_address = kni_config_mac_address, - .config_promiscusity = kni_config_promiscusity, - }; - - /* Callback for request of changing MTU */ - - static int - kni_change_mtu(uint16_t port_id, unsigned new_mtu) - { - int ret; - struct rte_eth_conf conf; - - if (port_id >= rte_eth_dev_count()) { - RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id); - return -EINVAL; - } - - RTE_LOG(INFO, APP, "Change MTU of port %d to %u\n", port_id, new_mtu); - - /* Stop specific port */ - - rte_eth_dev_stop(port_id); - - memcpy(&conf, &port_conf, sizeof(conf)); - - /* Set new MTU */ - - if (new_mtu > ETHER_MAX_LEN) - conf.rxmode.jumbo_frame = 1; - else - conf.rxmode.jumbo_frame = 0; - - /* mtu + length of header + length of FCS = max pkt length */ - - conf.rxmode.max_rx_pkt_len = new_mtu + KNI_ENET_HEADER_SIZE + KNI_ENET_FCS_SIZE; - - ret = rte_eth_dev_configure(port_id, 1, 1, &conf); - if (ret < 0) { - RTE_LOG(ERR, APP, "Fail to reconfigure port %d\n", port_id); - return ret; - } - - /* Restart specific port */ - - ret = rte_eth_dev_start(port_id); - if (ret < 0) { - RTE_LOG(ERR, APP, "Fail to restart port %d\n", port_id); - return ret; - } - - return 0; - } - - /* Callback for request of configuring network interface up/down */ - - static int - kni_config_network_interface(uint16_t port_id, uint8_t if_up) - { - int ret = 0; - - if (port_id >= rte_eth_dev_count() || port_id >= RTE_MAX_ETHPORTS) { - RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id); - return -EINVAL; - } - - RTE_LOG(INFO, APP, "Configure network interface of %d %s\n", - - port_id, if_up ? "up" : "down"); - - if (if_up != 0) { - /* Configure network interface up */ - rte_eth_dev_stop(port_id); - ret = rte_eth_dev_start(port_id); - } else /* Configure network interface down */ - rte_eth_dev_stop(port_id); - - if (ret < 0) - RTE_LOG(ERR, APP, "Failed to start port %d\n", port_id); - return ret; - } - - /* Callback for request of configuring device mac address */ - - static int - kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[]) - { - ..... - } - - /* Callback for request of configuring promiscuous mode */ - - static int - kni_config_promiscusity(uint16_t port_id, uint8_t to_on) - { - ..... - } diff --git a/doc/guides/sample_app_ug/l2_forward_job_stats.rst b/doc/guides/sample_app_ug/l2_forward_job_stats.rst index bfdf9c8f..ba73d855 100644 --- a/doc/guides/sample_app_ug/l2_forward_job_stats.rst +++ b/doc/guides/sample_app_ug/l2_forward_job_stats.rst @@ -178,11 +178,6 @@ in the *DPDK Programmer's Guide* and the *DPDK API Reference*. .. code-block:: c - nb_ports = rte_eth_dev_count(); - - if (nb_ports == 0) - rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n"); - /* reset l2fwd_dst_ports */ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) @@ -193,7 +188,7 @@ in the *DPDK Programmer's Guide* and the *DPDK API Reference*. /* * Each logical core is assigned a dedicated TX queue on each port. */ - for (portid = 0; portid < nb_ports; portid++) { + RTE_ETH_FOREACH_DEV(portid) { /* skip ports that are not enabled */ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) continue; @@ -223,25 +218,6 @@ The rte_eth_dev_configure() function is used to configure the number of queues f "err=%d, port=%u\n", ret, portid); -The global configuration is stored in a static structure: - -.. code-block:: c - - static const struct rte_eth_conf port_conf = { - .rxmode = { - .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc= 0, /**< CRC stripped by hardware */ - }, - - .txmode = { - .mq_mode = ETH_DCB_NONE - }, - }; - RX Queue Initialization ~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/guides/sample_app_ug/l2_forward_real_virtual.rst b/doc/guides/sample_app_ug/l2_forward_real_virtual.rst index f02be05c..2b2d5afa 100644 --- a/doc/guides/sample_app_ug/l2_forward_real_virtual.rst +++ b/doc/guides/sample_app_ug/l2_forward_real_virtual.rst @@ -197,11 +197,6 @@ in the *DPDK Programmer's Guide* - Rel 1.4 EAR and the *DPDK API Reference*. if (rte_pci_probe() < 0) rte_exit(EXIT_FAILURE, "Cannot probe PCI\n"); - nb_ports = rte_eth_dev_count(); - - if (nb_ports == 0) - rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n"); - /* reset l2fwd_dst_ports */ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) @@ -213,7 +208,7 @@ in the *DPDK Programmer's Guide* - Rel 1.4 EAR and the *DPDK API Reference*. * Each logical core is assigned a dedicated TX queue on each port. */ - for (portid = 0; portid < nb_ports; portid++) { + RTE_ETH_FOREACH_DEV(portid) { /* skip ports that are not enabled */ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) @@ -250,25 +245,6 @@ The rte_eth_dev_configure() function is used to configure the number of queues f "err=%d, port=%u\n", ret, portid); -The global configuration is stored in a static structure: - -.. code-block:: c - - static const struct rte_eth_conf port_conf = { - .rxmode = { - .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc= 0, /**< CRC stripped by hardware */ - }, - - .txmode = { - .mq_mode = ETH_DCB_NONE - }, - }; - .. _l2_fwd_app_rx_init: RX Queue Initialization diff --git a/doc/guides/sample_app_ug/link_status_intr.rst b/doc/guides/sample_app_ug/link_status_intr.rst index 8fa6d76c..c7665fe5 100644 --- a/doc/guides/sample_app_ug/link_status_intr.rst +++ b/doc/guides/sample_app_ug/link_status_intr.rst @@ -91,15 +91,11 @@ To fully understand this code, it is recommended to study the chapters that rela if (rte_pci_probe() < 0) rte_exit(EXIT_FAILURE, "Cannot probe PCI\n"); - nb_ports = rte_eth_dev_count(); - if (nb_ports == 0) - rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n"); - /* * Each logical core is assigned a dedicated TX queue on each port. */ - for (portid = 0; portid < nb_ports; portid++) { + RTE_ETH_FOREACH_DEV(portid) { /* skip ports that are not enabled */ if ((lsi_enabled_port_mask & (1 << portid)) == 0) @@ -141,10 +137,7 @@ The global configuration is stored in a static structure: static const struct rte_eth_conf port_conf = { .rxmode = { .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .hw_strip_crc= 0, /**< CRC stripped by hardware */ + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, }, .txmode = {}, .intr_conf = { diff --git a/doc/guides/sample_app_ug/multi_process.rst b/doc/guides/sample_app_ug/multi_process.rst index b84e9a74..9c374da6 100644 --- a/doc/guides/sample_app_ug/multi_process.rst +++ b/doc/guides/sample_app_ug/multi_process.rst @@ -321,409 +321,3 @@ In both the server and the client processes, outgoing packets are buffered befor so as to allow the sending of multiple packets in a single burst to improve efficiency. For example, the client process will buffer packets to send, until either the buffer is full or until we receive no further packets from the server. - -Master-slave Multi-process Example -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The fourth example of DPDK multi-process support demonstrates a master-slave model that -provide the capability of application recovery if a slave process crashes or meets unexpected conditions. -In addition, it also demonstrates the floating process, -which can run among different cores in contrast to the traditional way of binding a process/thread to a specific CPU core, -using the local cache mechanism of mempool structures. - -This application performs the same functionality as the L2 Forwarding sample application, -therefore this chapter does not cover that part but describes functionality that is introduced in this multi-process example only. -Please refer to :doc:`l2_forward_real_virtual` for more information. - -Unlike previous examples where all processes are started from the command line with input arguments, in this example, -only one process is spawned from the command line and that process creates other processes. -The following section describes this in more detail. - -Master-slave Process Models -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The process spawned from the command line is called the *master process* in this document. -A process created by the master is called a *slave process*. -The application has only one master process, but could have multiple slave processes. - -Once the master process begins to run, it tries to initialize all the resources such as -memory, CPU cores, driver, ports, and so on, as the other examples do. -Thereafter, it creates slave processes, as shown in the following figure. - -.. _figure_master_slave_proc: - -.. figure:: img/master_slave_proc.* - - Master-slave Process Workflow - - -The master process calls the rte_eal_mp_remote_launch() EAL function to launch an application function for each pinned thread through the pipe. -Then, it waits to check if any slave processes have exited. -If so, the process tries to re-initialize the resources that belong to that slave and launch them in the pinned thread entry again. -The following section describes the recovery procedures in more detail. - -For each pinned thread in EAL, after reading any data from the pipe, it tries to call the function that the application specified. -In this master specified function, a fork() call creates a slave process that performs the L2 forwarding task. -Then, the function waits until the slave exits, is killed or crashes. Thereafter, it notifies the master of this event and returns. -Finally, the EAL pinned thread waits until the new function is launched. - -After discussing the master-slave model, it is necessary to mention another issue, global and static variables. - -For multiple-thread cases, all global and static variables have only one copy and they can be accessed by any thread if applicable. -So, they can be used to sync or share data among threads. - -In the previous examples, each process has separate global and static variables in memory and are independent of each other. -If it is necessary to share the knowledge, some communication mechanism should be deployed, such as, memzone, ring, shared memory, and so on. -The global or static variables are not a valid approach to share data among processes. -For variables in this example, on the one hand, the slave process inherits all the knowledge of these variables after being created by the master. -On the other hand, other processes cannot know if one or more processes modifies them after slave creation since that -is the nature of a multiple process address space. -But this does not mean that these variables cannot be used to share or sync data; it depends on the use case. -The following are the possible use cases: - -#. The master process starts and initializes a variable and it will never be changed after slave processes created. This case is OK. - -#. After the slave processes are created, the master or slave cores need to change a variable, but other processes do not need to know the change. - This case is also OK. - -#. After the slave processes are created, the master or a slave needs to change a variable. - In the meantime, one or more other process needs to be aware of the change. - In this case, global and static variables cannot be used to share knowledge. Another communication mechanism is needed. - A simple approach without lock protection can be a heap buffer allocated by rte_malloc or mem zone. - -Slave Process Recovery Mechanism -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Before talking about the recovery mechanism, it is necessary to know what is needed before a new slave instance can run if a previous one exited. - -When a slave process exits, the system returns all the resources allocated for this process automatically. -However, this does not include the resources that were allocated by the DPDK. All the hardware resources are shared among the processes, -which include memzone, mempool, ring, a heap buffer allocated by the rte_malloc library, and so on. -If the new instance runs and the allocated resource is not returned, either resource allocation failed or the hardware resource is lost forever. - -When a slave process runs, it may have dependencies on other processes. -They could have execution sequence orders; they could share the ring to communicate; they could share the same port for reception and forwarding; -they could use lock structures to do exclusive access in some critical path. -What happens to the dependent process(es) if the peer leaves? -The consequence are varied since the dependency cases are complex. -It depends on what the processed had shared. -However, it is necessary to notify the peer(s) if one slave exited. -Then, the peer(s) will be aware of that and wait until the new instance begins to run. - -Therefore, to provide the capability to resume the new slave instance if the previous one exited, it is necessary to provide several mechanisms: - -#. Keep a resource list for each slave process. - Before a slave process run, the master should prepare a resource list. - After it exits, the master could either delete the allocated resources and create new ones, - or re-initialize those for use by the new instance. - -#. Set up a notification mechanism for slave process exit cases. After the specific slave leaves, - the master should be notified and then help to create a new instance. - This mechanism is provided in Section `Master-slave Process Models`_. - -#. Use a synchronization mechanism among dependent processes. - The master should have the capability to stop or kill slave processes that have a dependency on the one that has exited. - Then, after the new instance of exited slave process begins to run, the dependency ones could resume or run from the start. - The example sends a STOP command to slave processes dependent on the exited one, then they will exit. - Thereafter, the master creates new instances for the exited slave processes. - -The following diagram describes slave process recovery. - -.. _figure_slave_proc_recov: - -.. figure:: img/slave_proc_recov.* - - Slave Process Recovery Process Flow - - -Floating Process Support -^^^^^^^^^^^^^^^^^^^^^^^^ - -When the DPDK application runs, there is always a -c option passed in to indicate the cores that are enabled. -Then, the DPDK creates a thread for each enabled core. -By doing so, it creates a 1:1 mapping between the enabled core and each thread. -The enabled core always has an ID, therefore, each thread has a unique core ID in the DPDK execution environment. -With the ID, each thread can easily access the structures or resources exclusively belonging to it without using function parameter passing. -It can easily use the rte_lcore_id() function to get the value in every function that is called. - -For threads/processes not created in that way, either pinned to a core or not, they will not own a unique ID and the -rte_lcore_id() function will not work in the correct way. -However, sometimes these threads/processes still need the unique ID mechanism to do easy access on structures or resources. -For example, the DPDK mempool library provides a local cache mechanism -(refer to :ref:`mempool_local_cache`) -for fast element allocation and freeing. -If using a non-unique ID or a fake one, -a race condition occurs if two or more threads/ processes with the same core ID try to use the local cache. - -Therefore, unused core IDs from the passing of parameters with the -c option are used to organize the core ID allocation array. -Once the floating process is spawned, it tries to allocate a unique core ID from the array and release it on exit. - -A natural way to spawn a floating process is to use the fork() function and allocate a unique core ID from the unused core ID array. -However, it is necessary to write new code to provide a notification mechanism for slave exit -and make sure the process recovery mechanism can work with it. - -To avoid producing redundant code, the Master-Slave process model is still used to spawn floating processes, -then cancel the affinity to specific cores. -Besides that, clear the core ID assigned to the DPDK spawning a thread that has a 1:1 mapping with the core mask. -Thereafter, get a new core ID from the unused core ID allocation array. - -Run the Application -^^^^^^^^^^^^^^^^^^^ - -This example has a command line similar to the L2 Forwarding sample application with a few differences. - -To run the application, start one copy of the l2fwd_fork binary in one terminal. -Unlike the L2 Forwarding example, -this example requires at least three cores since the master process will wait and be accountable for slave process recovery. -The command is as follows: - -.. code-block:: console - - #./build/l2fwd_fork -l 2-4 -n 4 -- -p 3 -f - -This example provides another -f option to specify the use of floating process. -If not specified, the example will use a pinned process to perform the L2 forwarding task. - -To verify the recovery mechanism, proceed as follows: First, check the PID of the slave processes: - -.. code-block:: console - - #ps -fe | grep l2fwd_fork - root 5136 4843 29 11:11 pts/1 00:00:05 ./build/l2fwd_fork - root 5145 5136 98 11:11 pts/1 00:00:11 ./build/l2fwd_fork - root 5146 5136 98 11:11 pts/1 00:00:11 ./build/l2fwd_fork - -Then, kill one of the slaves: - -.. code-block:: console - - #kill -9 5145 - -After 1 or 2 seconds, check whether the slave has resumed: - -.. code-block:: console - - #ps -fe | grep l2fwd_fork - root 5136 4843 3 11:11 pts/1 00:00:06 ./build/l2fwd_fork - root 5247 5136 99 11:14 pts/1 00:00:01 ./build/l2fwd_fork - root 5248 5136 99 11:14 pts/1 00:00:01 ./build/l2fwd_fork - -It can also monitor the traffic generator statics to see whether slave processes have resumed. - -Explanation -^^^^^^^^^^^ - -As described in previous sections, -not all global and static variables need to change to be accessible in multiple processes; -it depends on how they are used. -In this example, -the statics info on packets dropped/forwarded/received count needs to be updated by the slave process, -and the master needs to see the update and print them out. -So, it needs to allocate a heap buffer using rte_zmalloc. -In addition, if the -f option is specified, -an array is needed to store the allocated core ID for the floating process so that the master can return it -after a slave has exited accidentally. - -.. code-block:: c - - static int - l2fwd_malloc_shared_struct(void) - { - port_statistics = rte_zmalloc("port_stat", sizeof(struct l2fwd_port_statistics) * RTE_MAX_ETHPORTS, 0); - - if (port_statistics == NULL) - return -1; - - /* allocate mapping_id array */ - - if (float_proc) { - int i; - - mapping_id = rte_malloc("mapping_id", sizeof(unsigned) * RTE_MAX_LCORE, 0); - if (mapping_id == NULL) - return -1; - - for (i = 0 ;i < RTE_MAX_LCORE; i++) - mapping_id[i] = INVALID_MAPPING_ID; - - } - return 0; - } - -For each slave process, packets are received from one port and forwarded to another port that another slave is operating on. -If the other slave exits accidentally, the port it is operating on may not work normally, -so the first slave cannot forward packets to that port. -There is a dependency on the port in this case. So, the master should recognize the dependency. -The following is the code to detect this dependency: - -.. code-block:: c - - for (portid = 0; portid < nb_ports; portid++) { - /* skip ports that are not enabled */ - - if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) - continue; - - /* Find pair ports' lcores */ - - find_lcore = find_pair_lcore = 0; - pair_port = l2fwd_dst_ports[portid]; - - for (i = 0; i < RTE_MAX_LCORE; i++) { - if (!rte_lcore_is_enabled(i)) - continue; - - for (j = 0; j < lcore_queue_conf[i].n_rx_port;j++) { - if (lcore_queue_conf[i].rx_port_list[j] == portid) { - lcore = i; - find_lcore = 1; - break; - } - - if (lcore_queue_conf[i].rx_port_list[j] == pair_port) { - pair_lcore = i; - find_pair_lcore = 1; - break; - } - } - - if (find_lcore && find_pair_lcore) - break; - } - - if (!find_lcore || !find_pair_lcore) - rte_exit(EXIT_FAILURE, "Not find port=%d pair\\n", portid); - - printf("lcore %u and %u paired\\n", lcore, pair_lcore); - - lcore_resource[lcore].pair_id = pair_lcore; - lcore_resource[pair_lcore].pair_id = lcore; - } - -Before launching the slave process, -it is necessary to set up the communication channel between the master and slave so that -the master can notify the slave if its peer process with the dependency exited. -In addition, the master needs to register a callback function in the case where a specific slave exited. - -.. code-block:: c - - for (i = 0; i < RTE_MAX_LCORE; i++) { - if (lcore_resource[i].enabled) { - /* Create ring for master and slave communication */ - - ret = create_ms_ring(i); - if (ret != 0) - rte_exit(EXIT_FAILURE, "Create ring for lcore=%u failed",i); - - if (flib_register_slave_exit_notify(i,slave_exit_cb) != 0) - rte_exit(EXIT_FAILURE, "Register master_trace_slave_exit failed"); - } - } - -After launching the slave process, the master waits and prints out the port statics periodically. -If an event indicating that a slave process exited is detected, -it sends the STOP command to the peer and waits until it has also exited. -Then, it tries to clean up the execution environment and prepare new resources. -Finally, the new slave instance is launched. - -.. code-block:: c - - while (1) { - sleep(1); - cur_tsc = rte_rdtsc(); - diff_tsc = cur_tsc - prev_tsc; - - /* if timer is enabled */ - - if (timer_period > 0) { - /* advance the timer */ - timer_tsc += diff_tsc; - - /* if timer has reached its timeout */ - if (unlikely(timer_tsc >= (uint64_t) timer_period)) { - print_stats(); - - /* reset the timer */ - timer_tsc = 0; - } - } - - prev_tsc = cur_tsc; - - /* Check any slave need restart or recreate */ - - rte_spinlock_lock(&res_lock); - - for (i = 0; i < RTE_MAX_LCORE; i++) { - struct lcore_resource_struct *res = &lcore_resource[i]; - struct lcore_resource_struct *pair = &lcore_resource[res->pair_id]; - - /* If find slave exited, try to reset pair */ - - if (res->enabled && res->flags && pair->enabled) { - if (!pair->flags) { - master_sendcmd_with_ack(pair->lcore_id, CMD_STOP); - rte_spinlock_unlock(&res_lock); - sleep(1); - rte_spinlock_lock(&res_lock); - if (pair->flags) - continue; - } - - if (reset_pair(res->lcore_id, pair->lcore_id) != 0) - rte_exit(EXIT_FAILURE, "failed to reset slave"); - - res->flags = 0; - pair->flags = 0; - } - } - rte_spinlock_unlock(&res_lock); - } - -When the slave process is spawned and starts to run, it checks whether the floating process option is applied. -If so, it clears the affinity to a specific core and also sets the unique core ID to 0. -Then, it tries to allocate a new core ID. -Since the core ID has changed, the resource allocated by the master cannot work, -so it remaps the resource to the new core ID slot. - -.. code-block:: c - - static int - l2fwd_launch_one_lcore( attribute ((unused)) void *dummy) - { - unsigned lcore_id = rte_lcore_id(); - - if (float_proc) { - unsigned flcore_id; - - /* Change it to floating process, also change it's lcore_id */ - - clear_cpu_affinity(); - - RTE_PER_LCORE(_lcore_id) = 0; - - /* Get a lcore_id */ - - if (flib_assign_lcore_id() < 0 ) { - printf("flib_assign_lcore_id failed\n"); - return -1; - } - - flcore_id = rte_lcore_id(); - - /* Set mapping id, so master can return it after slave exited */ - - mapping_id[lcore_id] = flcore_id; - printf("Org lcore_id = %u, cur lcore_id = %u\n",lcore_id, flcore_id); - remapping_slave_resource(lcore_id, flcore_id); - } - - l2fwd_main_loop(); - - /* return lcore_id before return */ - if (float_proc) { - flib_free_lcore_id(rte_lcore_id()); - mapping_id[lcore_id] = INVALID_MAPPING_ID; - } - return 0; - } diff --git a/doc/guides/sample_app_ug/quota_watermark.rst b/doc/guides/sample_app_ug/quota_watermark.rst index 8baec4df..67200e15 100644 --- a/doc/guides/sample_app_ug/quota_watermark.rst +++ b/doc/guides/sample_app_ug/quota_watermark.rst @@ -163,7 +163,7 @@ Then, a call to init_dpdk(), defined in init.c, is made to initialize the poll m if (ret < 0) rte_exit(EXIT_FAILURE, "rte_pci_probe(): error %d\n", ret); - if (rte_eth_dev_count() < 2) + if (rte_eth_dev_count_avail() < 2) rte_exit(EXIT_FAILURE, "Not enough Ethernet port available\n"); } diff --git a/doc/guides/sample_app_ug/rxtx_callbacks.rst b/doc/guides/sample_app_ug/rxtx_callbacks.rst index 0bb0d3e0..85d96d8a 100644 --- a/doc/guides/sample_app_ug/rxtx_callbacks.rst +++ b/doc/guides/sample_app_ug/rxtx_callbacks.rst @@ -83,9 +83,6 @@ comments: int retval; uint16_t q; - if (port >= rte_eth_dev_count()) - return -1; - /* Configure the Ethernet device. */ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); if (retval != 0) diff --git a/doc/guides/sample_app_ug/skeleton.rst b/doc/guides/sample_app_ug/skeleton.rst index 0503584d..95956171 100644 --- a/doc/guides/sample_app_ug/skeleton.rst +++ b/doc/guides/sample_app_ug/skeleton.rst @@ -81,7 +81,7 @@ The ``main()`` function also initializes all the ports using the user defined .. code-block:: c - for (portid = 0; portid < nb_ports; portid++) { + RTE_ETH_FOREACH_DEV(portid) { if (port_init(portid, mbuf_pool) != 0) { rte_exit(EXIT_FAILURE, "Cannot init port %" PRIu8 "\n", portid); @@ -119,7 +119,7 @@ Forwarding application is shown below: int retval; uint16_t q; - if (port >= rte_eth_dev_count()) + if (!rte_eth_dev_is_valid_port(port)) return -1; /* Configure the Ethernet device. */ @@ -192,14 +192,13 @@ looks like the following: static __attribute__((noreturn)) void lcore_main(void) { - const uint16_t nb_ports = rte_eth_dev_count(); uint16_t port; /* * Check that the port is on the same NUMA node as the polling thread * for best performance. */ - for (port = 0; port < nb_ports; port++) + RTE_ETH_FOREACH_DEV(port) if (rte_eth_dev_socket_id(port) > 0 && rte_eth_dev_socket_id(port) != (int)rte_socket_id()) @@ -216,7 +215,7 @@ looks like the following: * Receive packets on a port and forward them on the paired * port. The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc. */ - for (port = 0; port < nb_ports; port++) { + RTE_ETH_FOREACH_DEV(port) { /* Get burst of RX packets, from first port of pair. */ struct rte_mbuf *bufs[BURST_SIZE]; @@ -246,7 +245,7 @@ The main work of the application is done within the loop: .. code-block:: c for (;;) { - for (port = 0; port < nb_ports; port++) { + RTE_ETH_FOREACH_DEV(port) { /* Get burst of RX packets, from first port of pair. */ struct rte_mbuf *bufs[BURST_SIZE]; diff --git a/doc/guides/sample_app_ug/vhost.rst b/doc/guides/sample_app_ug/vhost.rst index a4bdc6a4..fd42cb3f 100644 --- a/doc/guides/sample_app_ug/vhost.rst +++ b/doc/guides/sample_app_ug/vhost.rst @@ -147,7 +147,10 @@ retries on an RX burst, it takes effect only when rx retry is enabled. The default value is 15. **--dequeue-zero-copy** -Dequeue zero copy will be enabled when this option is given. +Dequeue zero copy will be enabled when this option is given. it is worth to +note that if NIC is binded to driver with iommu enabled, dequeue zero copy +cannot work at VM2NIC mode (vm2vm=0) due to currently we don't setup iommu +dma mapping for guest memory. **--vlan-strip 0|1** VLAN strip option is removed, because different NICs have different behaviors @@ -155,6 +158,10 @@ when disabling VLAN strip. Such feature, which heavily depends on hardware, should be removed from this example to reduce confusion. Now, VLAN strip is enabled and cannot be disabled. +**--builtin-net-driver** +A very simple vhost-user net driver which demonstrates how to use the generic +vhost APIs will be used when this option is given. It is disabled by default. + Common Issues ------------- @@ -175,15 +182,22 @@ Common Issues The command above indicates how many hugepages are free to support QEMU's allocation request. -* vhost-user will not work with QEMU without the ``-mem-prealloc`` option +* Failed to build DPDK in VM - The current implementation works properly only when the guest memory is - pre-allocated. + Make sure "-cpu host" QEMU option is given. -* vhost-user will not work with a QEMU version without shared memory mapping: +* Device start fails if NIC's max queues > the default number of 128 - Make sure ``share=on`` QEMU option is given. + mbuf pool size is dependent on the MAX_QUEUES configuration, if NIC's + max queue number is larger than 128, device start will fail due to + insufficient mbuf. -* Failed to build DPDK in VM + Change the default number to make it work as below, just set the number + according to the NIC's property. :: - Make sure "-cpu host" QEMU option is given. + make EXTRA_CFLAGS="-DMAX_QUEUES=320" + +* Option "builtin-net-driver" is incompatible with QEMU + + QEMU vhost net device start will fail if protocol feature is not negotiated. + DPDK virtio-user pmd can be the replacement of QEMU. diff --git a/doc/guides/sample_app_ug/vhost_crypto.rst b/doc/guides/sample_app_ug/vhost_crypto.rst new file mode 100644 index 00000000..65c86a53 --- /dev/null +++ b/doc/guides/sample_app_ug/vhost_crypto.rst @@ -0,0 +1,82 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2017-2018 Intel Corporation. + +Vhost_Crypto Sample Application +=============================== + +The vhost_crypto sample application implemented a simple Crypto device, +which used as the backend of Qemu vhost-user-crypto device. Similar with +vhost-user-net and vhost-user-scsi device, the sample application used +domain socket to communicate with Qemu, and the virtio ring was processed +by vhost_crypto sample application. + +Testing steps +------------- + +This section shows the steps how to start a VM with the crypto device as +fast data path for critical application. + +Compiling the Application +------------------------- + +To compile the sample application see :doc:`compiling`. + +The application is located in the ``examples`` sub-directory. + +Start the vhost_crypto example +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: console + + ./vhost_crypto [EAL options] -- [--socket-file PATH] + [--cdev-id ID] [--cdev-queue-id ID] [--zero-copy] [--guest-polling] + +where, + +* socket-file PATH: the path of UNIX socket file to be created, multiple + instances of this config item is supported. Upon absence of this item, + the default socket-file `/tmp/vhost_crypto1.socket` is used. + +* cdev-id ID: the target DPDK Cryptodev's ID to process the actual crypto + workload. Upon absence of this item the default value of `0` will be used. + For details of DPDK Cryptodev, please refer to DPDK Cryptodev Library + Programmers' Guide. + +* cdev-queue-id ID: the target DPDK Cryptodev's queue ID to process the + actual crypto workload. Upon absence of this item the default value of `0` + will be used. For details of DPDK Cryptodev, please refer to DPDK Cryptodev + Library Programmers' Guide. + +* zero-copy: the presence of this item means the ZERO-COPY feature will be + enabled. Otherwise it is disabled. PLEASE NOTE the ZERO-COPY feature is still + in experimental stage and may cause the problem like segmentation fault. If + the user wants to use LKCF in the guest, this feature shall be turned off. + +* guest-polling: the presence of this item means the application assumes the + guest works in polling mode, thus will NOT notify the guest completion of + processing. + +The application requires that crypto devices capable of performing +the specified crypto operation are available on application initialization. +This means that HW crypto device/s must be bound to a DPDK driver or +a SW crypto device/s (virtual crypto PMD) must be created (using --vdev). + +.. _vhost_crypto_app_run_vm: + +Start the VM +~~~~~~~~~~~~ + +.. code-block:: console + + qemu-system-x86_64 -machine accel=kvm \ + -m $mem -object memory-backend-file,id=mem,size=$mem,\ + mem-path=/dev/hugepages,share=on -numa node,memdev=mem \ + -drive file=os.img,if=none,id=disk \ + -device ide-hd,drive=disk,bootindex=0 \ + -chardev socket,id={chardev_id},path={PATH} \ + -object cryptodev-vhost-user,id={obj_id},chardev={chardev_id} \ + -device virtio-crypto-pci,id={dev_id},cryptodev={obj_id} \ + ... + +.. note:: + You must check whether your Qemu can support "vhost-user-crypto" or not. diff --git a/doc/guides/sample_app_ug/vm_power_management.rst b/doc/guides/sample_app_ug/vm_power_management.rst index cd7c7f34..855570d6 100644 --- a/doc/guides/sample_app_ug/vm_power_management.rst +++ b/doc/guides/sample_app_ug/vm_power_management.rst @@ -20,7 +20,7 @@ running on Virtual Machines(VMs). The Virtual Machine Power Management solution shows an example of how a DPDK application can indicate its processing requirements using VM local -only information(vCPU/lcore) to a Host based Monitor which is responsible +only information(vCPU/lcore, etc.) to a Host based Monitor which is responsible for accepting requests for frequency changes for a vCPU, translating the vCPU to a pCPU via libvirt and affecting the change in frequency. @@ -38,6 +38,26 @@ The solution is comprised of two high-level components: to the librte_power ACPI cpufreq sysfs based library. The Host Application relies on both qemu-kvm and libvirt to function. + This monitoring application is responsible for: + + - Accepting requests from client applications: Client applications can + request frequency changes for a vCPU, translating + the vCPU to a pCPU via libvirt and affecting the change in frequency. + + - Accepting policies from client applications: Client application can + send a policy to the host application. The + host application will then apply the rules of the policy independent + of the application. For example, the policy can contain time-of-day + information for busy/quiet periods, and the host application can scale + up/down the relevant cores when required. See the details of the guest + application below for more information on setting the policy values. + + - Out-of-band monitoring of workloads via cores hardware event counters: + The host application can manage power for an application in a virtualised + OR non-virtualised environment by looking at the event counters of the + cores and taking action based on the branch hit/miss ratio. See the host + application '--core-list' command line parameter below. + #. librte_power for Virtual Machines Using an alternate implementation for the librte_power API, requests for @@ -174,13 +194,20 @@ Compiling and Running the Host Application Compiling ~~~~~~~~~ -Compiling the Application -------------------------- - -To compile the sample application see :doc:`compiling`. +For information on compiling DPDK and the sample applications +see :doc:`compiling`. The application is located in the ``vm_power_manager`` sub-directory. +To build just the ``vm_power_manager`` application: + +.. code-block:: console + + export RTE_SDK=/path/to/rte_sdk + export RTE_TARGET=build + cd ${RTE_SDK}/examples/vm_power_manager/ + make + Running ~~~~~~~ @@ -287,38 +314,129 @@ Manual control and inspection can also be carried in relation CPU frequency scal set_cpu_freq {core_num} up|down|min|max +There are also some command line parameters for enabling the out-of-band +monitoring of branch ratio on cores doing busy polling via PMDs. + + .. code-block:: console + + --core-list {list of cores} + + When this parameter is used, the list of cores specified will monitor the ratio + between branch hits and branch misses. A tightly polling PMD thread will have a + very low branch ratio, so the core frequency will be scaled down to the minimim + allowed value. When packets are received, the code path will alter, causing the + branch ratio to increase. When the ratio goes above the ratio threshold, the + core frequency will be scaled up to the maximum allowed value. + + .. code-block:: console + + --branch-ratio {ratio} + + The branch ratio is a floating point number that specifies the threshold at which + to scale up or down for the given workload. The default branch ratio is 0.01, + and will need to be adjusted for different workloads. + + Compiling and Running the Guest Applications -------------------------------------------- -For compiling and running l3fwd-power, see :doc:`l3_forward_power_man`. +l3fwd-power is one sample application that can be used with vm_power_manager. A guest CLI is also provided for validating the setup. For both l3fwd-power and guest CLI, the channels for the VM must be monitored by the -host application using the *add_channels* command on the host. +host application using the *add_channels* command on the host. This typically uses +the following commands in the host application: + +.. code-block:: console + + vm_power> add_vm vmname + vm_power> add_channels vmname all + vm_power> set_channel_status vmname all enabled + vm_power> show_vm vmname + Compiling ~~~~~~~~~ -#. export RTE_SDK=/path/to/rte_sdk -#. cd ${RTE_SDK}/examples/vm_power_manager/guest_cli -#. make +For information on compiling DPDK and the sample applications +see :doc:`compiling`. + +For compiling and running l3fwd-power, see :doc:`l3_forward_power_man`. + +The application is located in the ``guest_cli`` sub-directory under ``vm_power_manager``. + +To build just the ``guest_vm_power_manager`` application: + +.. code-block:: console + + export RTE_SDK=/path/to/rte_sdk + export RTE_TARGET=build + cd ${RTE_SDK}/examples/vm_power_manager/guest_cli/ + make Running ~~~~~~~ -The application does not have any specific command line options other than *EAL*: +The standard *EAL* command line parameters are required: .. code-block:: console - ./build/vm_power_mgr [EAL options] + ./build/guest_vm_power_mgr [EAL options] -- [guest options] -The application for example purposes uses a channel for each lcore enabled, -for example to run on cores 0,1,2,3 on a system with 4 memory channels: +The guest example uses a channel for each lcore enabled. For example, +to run on cores 0,1,2,3: .. code-block:: console - ./build/guest_vm_power_mgr -l 0-3 -n 4 + ./build/guest_vm_power_mgr -l 0-3 + +Optionally, there is a list of command line parameter should the user wish to send a power +policy down to the host application. These parameters are as follows: + + .. code-block:: console + + --vm-name {name of guest vm} + + This parameter allows the user to change the Virtual Machine name passed down to the + host application via the power policy. The default is "ubuntu2" + + .. code-block:: console + + --vcpu-list {list vm cores} + + A comma-separated list of cores in the VM that the user wants the host application to + monitor. The list of cores in any vm starts at zero, and these are mapped to the + physical cores by the host application once the policy is passed down. + Valid syntax includes individial cores '2,3,4', or a range of cores '2-4', or a + combination of both '1,3,5-7' + + .. code-block:: console + + --busy-hours {list of busy hours} + + A comma-separated list of hours within which to set the core frequency to maximum. + Valid syntax includes individial hours '2,3,4', or a range of hours '2-4', or a + combination of both '1,3,5-7'. Valid hours are 0 to 23. + + .. code-block:: console + + --quiet-hours {list of quiet hours} + + A comma-separated list of hours within which to set the core frequency to minimum. + Valid syntax includes individial hours '2,3,4', or a range of hours '2-4', or a + combination of both '1,3,5-7'. Valid hours are 0 to 23. + + .. code-block:: console + + --policy {policy type} + + The type of policy. This can be one of the following values: + TRAFFIC - based on incoming traffic rates on the NIC. + TIME - busy/quiet hours policy. + BRANCH_RATIO - uses branch ratio counters to determine core busyness. + Not all parameters are needed for all policy types. For example, BRANCH_RATIO + only needs the vcpu-list parameter, not any of the hours. After successful initialization the user is presented with VM Power Manager Guest CLI: @@ -333,3 +451,20 @@ Where {core_num} is the lcore and channel to change frequency by scaling up/down .. code-block:: console set_cpu_freq {core_num} up|down|min|max + +To start the application and configure the power policy, and send it to the host: + +.. code-block:: console + + ./build/guest_vm_power_mgr -l 0-3 -n 4 -- --vm-name=ubuntu --policy=BRANCH_RATIO --vcpu-list=2-4 + +Once the VM Power Manager Guest CLI appears, issuing the 'send_policy now' command +will send the policy to the host: + +.. code-block:: console + + send_policy now + +Once the policy is sent to the host, the host application takes over the power monitoring +of the specified cores in the policy. + diff --git a/doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst b/doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst index 83fcdf63..0e9da970 100644 --- a/doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst +++ b/doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst @@ -102,10 +102,6 @@ a default structure is provided for VMDQ and DCB configuration to be filled in l .rxmode = { .mq_mode = ETH_MQ_RX_VMDQ_DCB, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ }, .txmode = { .mq_mode = ETH_MQ_TX_VMDQ_DCB, diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst index 1fd53958..f301c2b6 100644 --- a/doc/guides/testpmd_app_ug/run_app.rst +++ b/doc/guides/testpmd_app_ug/run_app.rst @@ -372,7 +372,9 @@ The commandline options are: * ``--burst=N`` Set the number of packets per burst to N, where 1 <= N <= 512. - The default value is 16. + The default value is 32. + If set to 0, driver default is used if defined. Else, if driver + default is not defined, default of 32 is used. * ``--mbcache=N`` @@ -479,3 +481,20 @@ The commandline options are: Set the hexadecimal bitmask of TX queue offloads. The default value is 0. + +* ``--hot-plug`` + + Enable device event monitor machenism for hotplug. + +* ``--vxlan-gpe-port=N`` + + Set the UDP port number of tunnel VXLAN-GPE to N. + The default value is 4790. + +* ``--mlockall`` + + Enable locking all memory. + +* ``--no-mlockall`` + + Disable locking all memory. diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst index a766ac79..dde205a2 100644 --- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst +++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst @@ -320,12 +320,8 @@ The available information categories are: * ``ieee1588``: Demonstrate L2 IEEE1588 V2 PTP timestamping for RX and TX. Requires ``CONFIG_RTE_LIBRTE_IEEE1588=y``. -* ``tm``: Traffic Management forwarding mode - Demonstrates the use of ethdev traffic management APIs and softnic PMD for - QoS traffic management. In this mode, 5-level hierarchical QoS scheduler is - available as an default option that can be enabled through CLI. The user can - also modify the default hierarchy or specify the new hierarchy through CLI for - implementing QoS scheduler. Requires ``CONFIG_RTE_LIBRTE_PMD_SOFTNIC=y`` ``CONFIG_RTE_LIBRTE_SCHED=y``. +* ``softnic``: Demonstrates the softnic forwarding operation. In this mode, packet forwarding is + similar to I/O mode except for the fact that packets are loopback to the softnic ports only. Therefore, portmask parameter should be set to softnic port only. The various software based custom NIC pipelines specified through the softnic firmware (DPDK packet framework script) can be tested in this mode. Furthermore, it allows to build 5-level hierarchical QoS scheduler as a default option that can be enabled through CLI once testpmd application is initialised. The user can modify the default scheduler hierarchy or can specify the new QoS Scheduler hierarchy through CLI. Requires ``CONFIG_RTE_LIBRTE_PMD_SOFTNIC=y``. Example:: @@ -393,6 +389,34 @@ List all items from the pctype mapping table:: testpmd> show port (port_id) pctype mapping +show rx offloading capabilities +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +List all per queue and per port Rx offloading capabilities of a port:: + + testpmd> show port (port_id) rx_offload capabilities + +show rx offloading configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +List port level and all queue level Rx offloading configuration:: + + testpmd> show port (port_id) rx_offload configuration + +show tx offloading capabilities +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +List all per queue and per port Tx offloading capabilities of a port:: + + testpmd> show port (port_id) tx_offload capabilities + +show tx offloading configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +List port level and all queue level Tx offloading configuration:: + + testpmd> show port (port_id) tx_offload configuration + Configuration Functions ----------------------- @@ -1031,6 +1055,13 @@ By default, GSO is disabled for all ports. testpmd> csum set tcp hw + UDP GSO is the same as IP fragmentation, which treats the UDP header + as the payload and does not modify it during segmentation. That is, + after UDP GSO, only the first output fragment has the original UDP + header. Therefore, users need to enable HW IP checksum calculation + and SW UDP checksum calculation for GSO-enabled ports, if they want + correct checksums for UDP/IPv4 packets. + set gso segsz ~~~~~~~~~~~~~ @@ -1444,6 +1475,101 @@ Reset ptype mapping table:: testpmd> ptype mapping reset (port_id) +config per port Rx offloading +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Enable or disable a per port Rx offloading on all Rx queues of a port:: + + testpmd> port config (port_id) rx_offload (offloading) on|off + +* ``offloading``: can be any of these offloading capability: + vlan_strip, ipv4_cksum, udp_cksum, tcp_cksum, tcp_lro, + qinq_strip, outer_ipv4_cksum, macsec_strip, + header_split, vlan_filter, vlan_extend, jumbo_frame, + crc_strip, scatter, timestamp, security, keep_crc + +This command should be run when the port is stopped, or else it will fail. + +config per queue Rx offloading +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Enable or disable a per queue Rx offloading only on a specific Rx queue:: + + testpmd> port (port_id) rxq (queue_id) rx_offload (offloading) on|off + +* ``offloading``: can be any of these offloading capability: + vlan_strip, ipv4_cksum, udp_cksum, tcp_cksum, tcp_lro, + qinq_strip, outer_ipv4_cksum, macsec_strip, + header_split, vlan_filter, vlan_extend, jumbo_frame, + crc_strip, scatter, timestamp, security, keep_crc + +This command should be run when the port is stopped, or else it will fail. + +config per port Tx offloading +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Enable or disable a per port Tx offloading on all Tx queues of a port:: + + testpmd> port config (port_id) tx_offload (offloading) on|off + +* ``offloading``: can be any of these offloading capability: + vlan_insert, ipv4_cksum, udp_cksum, tcp_cksum, + sctp_cksum, tcp_tso, udp_tso, outer_ipv4_cksum, + qinq_insert, vxlan_tnl_tso, gre_tnl_tso, + ipip_tnl_tso, geneve_tnl_tso, macsec_insert, + mt_lockfree, multi_segs, mbuf_fast_free, security + +This command should be run when the port is stopped, or else it will fail. + +config per queue Tx offloading +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Enable or disable a per queue Tx offloading only on a specific Tx queue:: + + testpmd> port (port_id) txq (queue_id) tx_offload (offloading) on|off + +* ``offloading``: can be any of these offloading capability: + vlan_insert, ipv4_cksum, udp_cksum, tcp_cksum, + sctp_cksum, tcp_tso, udp_tso, outer_ipv4_cksum, + qinq_insert, vxlan_tnl_tso, gre_tnl_tso, + ipip_tnl_tso, geneve_tnl_tso, macsec_insert, + mt_lockfree, multi_segs, mbuf_fast_free, security + +This command should be run when the port is stopped, or else it will fail. + +Config VXLAN Encap outer layers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Configure the outer layer to encapsulate a packet inside a VXLAN tunnel:: + + set vxlan ip-version (ipv4|ipv6) vni (vni) udp-src (udp-src) \ + udp-dst (udp-dst) ip-src (ip-src) ip-dst (ip-dst) eth-src (eth-src) \ + eth-dst (eth-dst) + + set vxlan-with-vlan ip-version (ipv4|ipv6) vni (vni) udp-src (udp-src) \ + udp-dst (udp-dst) ip-src (ip-src) ip-dst (ip-dst) vlan-tci (vlan-tci) \ + eth-src (eth-src) eth-dst (eth-dst) + +Those command will set an internal configuration inside testpmd, any following +flow rule using the action vxlan_encap will use the last configuration set. +To have a different encapsulation header, one of those commands must be called +before the flow rule creation. + +Config NVGRE Encap outer layers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Configure the outer layer to encapsulate a packet inside a NVGRE tunnel:: + + set nvgre ip-version (ipv4|ipv6) tni (tni) ip-src (ip-src) ip-dst (ip-dst) \ + eth-src (eth-src) eth-dst (eth-dst) + set nvgre-with-vlan ip-version (ipv4|ipv6) tni (tni) ip-src (ip-src) \ + ip-dst (ip-dst) vlan-tci (vlan-tci) eth-src (eth-src) eth-dst (eth-dst) + +Those command will set an internal configuration inside testpmd, any following +flow rule using the action nvgre_encap will use the last configuration set. +To have a different encapsulation header, one of those commands must be called +before the flow rule creation. + Port Functions -------------- @@ -1623,6 +1749,15 @@ Close all ports or a specific port:: testpmd> port close (port_id|all) +port config - queue ring size +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Configure a rx/tx queue ring size:: + + testpmd> port (port_id) (rxq|txq) (queue_id) ring_size (value) + +Only take effect after command that (re-)start the port or command that setup specific queue. + port start/stop queue ~~~~~~~~~~~~~~~~~~~~~ @@ -1630,6 +1765,13 @@ Start/stop a rx/tx queue on a specific port:: testpmd> port (port_id) (rxq|txq) (queue_id) (start|stop) +port setup queue +~~~~~~~~~~~~~~~~~~~~~ + +Setup a rx/tx queue on a specific port:: + + testpmd> port (port_id) (rxq|txq) (queue_id) setup + Only take effect when port is started. port config - speed @@ -1751,10 +1893,12 @@ port config - RSS Set the RSS (Receive Side Scaling) mode on or off:: - testpmd> port config all rss (all|ip|tcp|udp|sctp|ether|port|vxlan|geneve|nvgre|none) + testpmd> port config all rss (all|default|ip|tcp|udp|sctp|ether|port|vxlan|geneve|nvgre|none) RSS is on by default. +The ``all`` option is equivalent to ip|tcp|udp|sctp|ether. +The ``default`` option enables all supported RSS types reported by device info. The ``none`` option is equivalent to the ``--disable-rss`` command-line option. port config - RSS Reta @@ -1856,6 +2000,12 @@ where: * ``pctype_id``: hardware packet classification types. * ``field_idx``: hardware field index. +port config udp_tunnel_port +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Add/remove UDP tunnel port for VXLAN/GENEVE tunneling protocols:: + testpmd> port config (port_id) udp_tunnel_port add|rm vxlan|geneve (udp_port) + Link Bonding Functions ---------------------- @@ -1871,7 +2021,7 @@ Create a new bonding device:: For example, to create a bonded device in mode 1 on socket 0:: - testpmd> create bonded 1 0 + testpmd> create bonded device 1 0 created new bonded device (port X) add bonding slave @@ -2483,6 +2633,16 @@ success depends on the port support for this operation, as advertised through the port capability set. This function is valid for all nodes of the traffic management hierarchy except root node. +Suspend port traffic management hierarchy node +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + testpmd> suspend port tm node (port_id) (node_id) + +Resume port traffic management hierarchy node +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + testpmd> resume port tm node (port_id) (node_id) + Commit port traffic management hierarchy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -2496,8 +2656,8 @@ where: call failure. On the other hand, hierarchy is preserved when this parameter is equal to zero. -Set port traffic management default hierarchy (tm forwarding mode) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Set port traffic management default hierarchy (softnic forwarding mode) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ set the traffic management default hierarchy on the port:: @@ -2970,14 +3130,14 @@ following sections. - Check whether a flow rule can be created:: flow validate {port_id} - [group {group_id}] [priority {level}] [ingress] [egress] + [group {group_id}] [priority {level}] [ingress] [egress] [transfer] pattern {item} [/ {item} [...]] / end actions {action} [/ {action} [...]] / end - Create a flow rule:: flow create {port_id} - [group {group_id}] [priority {level}] [ingress] [egress] + [group {group_id}] [priority {level}] [ingress] [egress] [transfer] pattern {item} [/ {item} [...]] / end actions {action} [/ {action} [...]] / end @@ -3010,7 +3170,7 @@ underlying device in its current state but stops short of creating it. It is bound to ``rte_flow_validate()``:: flow validate {port_id} - [group {group_id}] [priority {level}] [ingress] [egress] + [group {group_id}] [priority {level}] [ingress] [egress] [transfer] pattern {item} [/ {item} [...]] / end actions {action} [/ {action} [...]] / end @@ -3047,7 +3207,7 @@ Creating flow rules to ``rte_flow_create()``:: flow create {port_id} - [group {group_id}] [priority {level}] [ingress] [egress] + [group {group_id}] [priority {level}] [ingress] [egress] [transfer] pattern {item} [/ {item} [...]] / end actions {action} [/ {action} [...]] / end @@ -3061,7 +3221,7 @@ Otherwise it will show an error message of the form:: Parameters describe in the following order: -- Attributes (*group*, *priority*, *ingress*, *egress* tokens). +- Attributes (*group*, *priority*, *ingress*, *egress*, *transfer* tokens). - A matching pattern, starting with the *pattern* token and terminated by an *end* pattern item. - Actions, starting with the *actions* token and terminated by an *end* @@ -3089,6 +3249,7 @@ specified before the ``pattern`` token. - ``priority {level}``: priority level within group. - ``ingress``: rule applies to ingress traffic. - ``egress``: rule applies to egress traffic. +- ``transfer``: apply rule directly to endpoints found in pattern. Each instance of an attribute specified several times overrides the previous value as shown below (group 4 is used):: @@ -3201,16 +3362,24 @@ This section lists supported pattern items and their attributes, if any. - ``num {unsigned}``: number of layers covered. -- ``pf``: match packets addressed to the physical function. +- ``pf``: match traffic from/to the physical function. -- ``vf``: match packets addressed to a virtual function ID. +- ``vf``: match traffic from/to a virtual function ID. - - ``id {unsigned}``: destination VF ID. + - ``id {unsigned}``: VF ID. -- ``port``: device-specific physical port index to use. +- ``phy_port``: match traffic from/to a specific physical port. - ``index {unsigned}``: physical port index. +- ``port_id``: match traffic from/to a given DPDK port ID. + + - ``id {unsigned}``: DPDK port ID. + +- ``mark``: match value set in previously matched flow rule using the mark action. + + - ``id {unsigned}``: arbitrary integer value. + - ``raw``: match an arbitrary byte string. - ``relative {boolean}``: look for pattern after the previous item. @@ -3223,15 +3392,15 @@ This section lists supported pattern items and their attributes, if any. - ``dst {MAC-48}``: destination MAC. - ``src {MAC-48}``: source MAC. - - ``type {unsigned}``: EtherType. + - ``type {unsigned}``: EtherType or TPID. - ``vlan``: match 802.1Q/ad VLAN tag. - - ``tpid {unsigned}``: tag protocol identifier. - ``tci {unsigned}``: tag control information. - ``pcp {unsigned}``: priority code point. - ``dei {unsigned}``: drop eligible indicator. - ``vid {unsigned}``: VLAN identifier. + - ``inner_type {unsigned}``: inner EtherType or TPID. - ``ipv4``: match IPv4 header. @@ -3305,6 +3474,48 @@ This section lists supported pattern items and their attributes, if any. - ``vni {unsigned}``: virtual network identifier. - ``protocol {unsigned}``: protocol type. +- ``vxlan-gpe``: match VXLAN-GPE header. + + - ``vni {unsigned}``: VXLAN-GPE identifier. + +- ``arp_eth_ipv4``: match ARP header for Ethernet/IPv4. + + - ``sha {MAC-48}``: sender hardware address. + - ``spa {ipv4 address}``: sender IPv4 address. + - ``tha {MAC-48}``: target hardware address. + - ``tpa {ipv4 address}``: target IPv4 address. + +- ``ipv6_ext``: match presence of any IPv6 extension header. + + - ``next_hdr {unsigned}``: next header. + +- ``icmp6``: match any ICMPv6 header. + + - ``type {unsigned}``: ICMPv6 type. + - ``code {unsigned}``: ICMPv6 code. + +- ``icmp6_nd_ns``: match ICMPv6 neighbor discovery solicitation. + + - ``target_addr {ipv6 address}``: target address. + +- ``icmp6_nd_na``: match ICMPv6 neighbor discovery advertisement. + + - ``target_addr {ipv6 address}``: target address. + +- ``icmp6_nd_opt``: match presence of any ICMPv6 neighbor discovery option. + + - ``type {unsigned}``: ND option type. + +- ``icmp6_nd_opt_sla_eth``: match ICMPv6 neighbor discovery source Ethernet + link-layer address option. + + - ``sla {MAC-48}``: source Ethernet LLA. + +- ``icmp6_nd_opt_sla_eth``: match ICMPv6 neighbor discovery target Ethernet + link-layer address option. + + - ``tla {MAC-48}``: target Ethernet LLA. + Actions list ^^^^^^^^^^^^ @@ -3361,10 +3572,6 @@ actions can sometimes be combined when the end result is unambiguous:: drop / queue index 6 / end # drop has no effect -:: - - drop / dup index 6 / end # same as above - :: queue index 6 / rss queues 6 7 8 / end # queue has no effect @@ -3386,6 +3593,10 @@ This section lists supported actions and their attributes, if any. - ``passthru``: let subsequent rule process matched packets. +- ``jump``: redirect traffic to group on device. + + - ``group {unsigned}``: group to redirect to. + - ``mark``: attach 32 bit value to packets. - ``id {unsigned}``: 32 bit value to return with packets. @@ -3400,20 +3611,91 @@ This section lists supported actions and their attributes, if any. - ``count``: enable counters for this rule. -- ``dup``: duplicate packets to a given queue index. +- ``rss``: spread packets among several queues. - - ``index {unsigned}``: queue index to duplicate packets to. + - ``func {hash function}``: RSS hash function to apply, allowed tokens are + the same as `set_hash_global_config`_. -- ``rss``: spread packets among several queues. + - ``level {unsigned}``: encapsulation level for ``types``. + + - ``types [{RSS hash type} [...]] end``: specific RSS hash types, allowed + tokens are the same as `set_hash_input_set`_, except that an empty list + does not disable RSS but instead requests unspecified "best-effort" + settings. + + - ``key {string}``: RSS hash key, overrides ``key_len``. + + - ``key_len {unsigned}``: RSS hash key length in bytes, can be used in + conjunction with ``key`` to pad or truncate it. - ``queues [{unsigned} [...]] end``: queue indices to use. -- ``pf``: redirect packets to physical device function. +- ``pf``: direct traffic to physical function. -- ``vf``: redirect packets to virtual device function. +- ``vf``: direct traffic to a virtual function ID. - ``original {boolean}``: use original VF ID if possible. - - ``id {unsigned}``: VF ID to redirect packets to. + - ``id {unsigned}``: VF ID. + +- ``phy_port``: direct packets to physical port index. + + - ``original {boolean}``: use original port index if possible. + - ``index {unsigned}``: physical port index. + +- ``port_id``: direct matching traffic to a given DPDK port ID. + + - ``original {boolean}``: use original DPDK port ID if possible. + - ``id {unsigned}``: DPDK port ID. + +- ``of_set_mpls_ttl``: OpenFlow's ``OFPAT_SET_MPLS_TTL``. + + - ``mpls_ttl``: MPLS TTL. + +- ``of_dec_mpls_ttl``: OpenFlow's ``OFPAT_DEC_MPLS_TTL``. + +- ``of_set_nw_ttl``: OpenFlow's ``OFPAT_SET_NW_TTL``. + + - ``nw_ttl``: IP TTL. + +- ``of_dec_nw_ttl``: OpenFlow's ``OFPAT_DEC_NW_TTL``. + +- ``of_copy_ttl_out``: OpenFlow's ``OFPAT_COPY_TTL_OUT``. + +- ``of_copy_ttl_in``: OpenFlow's ``OFPAT_COPY_TTL_IN``. + +- ``of_pop_vlan``: OpenFlow's ``OFPAT_POP_VLAN``. + +- ``of_push_vlan``: OpenFlow's ``OFPAT_PUSH_VLAN``. + + - ``ethertype``: Ethertype. + +- ``of_set_vlan_vid``: OpenFlow's ``OFPAT_SET_VLAN_VID``. + + - ``vlan_vid``: VLAN id. + +- ``of_set_vlan_pcp``: OpenFlow's ``OFPAT_SET_VLAN_PCP``. + + - ``vlan_pcp``: VLAN priority. + +- ``of_pop_mpls``: OpenFlow's ``OFPAT_POP_MPLS``. + + - ``ethertype``: Ethertype. + +- ``of_push_mpls``: OpenFlow's ``OFPAT_PUSH_MPLS``. + + - ``ethertype``: Ethertype. + +- ``vxlan_encap``: Performs a VXLAN encapsulation, outer layer configuration + is done through `Config VXLAN Encap outer layers`_. + +- ``vxlan_decap``: Performs a decapsulation action by stripping all headers of + the VXLAN tunnel network overlay from the matched flow. + +- ``nvgre_encap``: Performs a NVGRE encapsulation, outer layer configuration + is done through `Config NVGRE Encap outer layers`_. + +- ``nvgre_decap``: Performs a decapsulation action by stripping all headers of + the NVGRE tunnel network overlay from the matched flow. Destroying flow rules ~~~~~~~~~~~~~~~~~~~~~ @@ -3679,3 +3961,122 @@ Validate and create a QinQ rule on port 0 to steer traffic to a queue on the hos ID Group Prio Attr Rule 0 0 0 i- ETH VLAN VLAN=>VF QUEUE 1 0 0 i- ETH VLAN VLAN=>PF QUEUE + +Sample VXLAN encapsulation rule +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +VXLAN encapsulation outer layer has default value pre-configured in testpmd +source code, those can be changed by using the following commands + +IPv4 VXLAN outer header:: + + testpmd> set vxlan ip-version ipv4 vni 4 udp-src 4 udp-dst 4 ip-src 127.0.0.1 + ip-dst 128.0.0.1 eth-src 11:11:11:11:11:11 eth-dst 22:22:22:22:22:22 + testpmd> flow create 0 ingress pattern end actions vxlan_encap / + queue index 0 / end + + testpmd> set vxlan-with-vlan ip-version ipv4 vni 4 udp-src 4 udp-dst 4 ip-src + 127.0.0.1 ip-dst 128.0.0.1 vlan-tci 34 eth-src 11:11:11:11:11:11 + eth-dst 22:22:22:22:22:22 + testpmd> flow create 0 ingress pattern end actions vxlan_encap / + queue index 0 / end + +IPv6 VXLAN outer header:: + + testpmd> set vxlan ip-version ipv6 vni 4 udp-src 4 udp-dst 4 ip-src ::1 + ip-dst ::2222 eth-src 11:11:11:11:11:11 eth-dst 22:22:22:22:22:22 + testpmd> flow create 0 ingress pattern end actions vxlan_encap / + queue index 0 / end + + testpmd> set vxlan-with-vlan ip-version ipv6 vni 4 udp-src 4 udp-dst 4 + ip-src ::1 ip-dst ::2222 vlan-tci 34 eth-src 11:11:11:11:11:11 + eth-dst 22:22:22:22:22:22 + testpmd> flow create 0 ingress pattern end actions vxlan_encap / + queue index 0 / end + +Sample NVGRE encapsulation rule +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +NVGRE encapsulation outer layer has default value pre-configured in testpmd +source code, those can be changed by using the following commands + +IPv4 NVGRE outer header:: + + testpmd> set nvgre ip-version ipv4 tni 4 ip-src 127.0.0.1 ip-dst 128.0.0.1 + eth-src 11:11:11:11:11:11 eth-dst 22:22:22:22:22:22 + testpmd> flow create 0 ingress pattern end actions nvgre_encap / + queue index 0 / end + + testpmd> set nvgre-with-vlan ip-version ipv4 tni 4 ip-src 127.0.0.1 + ip-dst 128.0.0.1 vlan-tci 34 eth-src 11:11:11:11:11:11 + eth-dst 22:22:22:22:22:22 + testpmd> flow create 0 ingress pattern end actions nvgre_encap / + queue index 0 / end + +IPv6 NVGRE outer header:: + + testpmd> set nvgre ip-version ipv6 tni 4 ip-src ::1 ip-dst ::2222 + eth-src 11:11:11:11:11:11 eth-dst 22:22:22:22:22:22 + testpmd> flow create 0 ingress pattern end actions nvgre_encap / + queue index 0 / end + + testpmd> set nvgre-with-vlan ip-version ipv6 tni 4 ip-src ::1 ip-dst ::2222 + vlan-tci 34 eth-src 11:11:11:11:11:11 eth-dst 22:22:22:22:22:22 + testpmd> flow create 0 ingress pattern end actions nvgre_encap / + queue index 0 / end + +BPF Functions +-------------- + +The following sections show functions to load/unload eBPF based filters. + +bpf-load +~~~~~~~~ + +Load an eBPF program as a callback for partciular RX/TX queue:: + + testpmd> bpf-load rx|tx (portid) (queueid) (load-flags) (bpf-prog-filename) + +The available load-flags are: + +* ``J``: use JIT generated native code, otherwise BPF interpreter will be used. + +* ``M``: assume input parameter is a pointer to rte_mbuf, otherwise assume it is a pointer to first segment's data. + +* ``-``: none. + +.. note:: + + You'll need clang v3.7 or above to build bpf program you'd like to load + +For example: + +.. code-block:: console + + cd test/bpf + clang -O2 -target bpf -c t1.c + +Then to load (and JIT compile) t1.o at RX queue 0, port 1:: + +.. code-block:: console + + testpmd> bpf-load rx 1 0 J ./dpdk.org/test/bpf/t1.o + +To load (not JITed) t1.o at TX queue 0, port 0:: + +.. code-block:: console + + testpmd> bpf-load tx 0 0 - ./dpdk.org/test/bpf/t1.o + +bpf-unload +~~~~~~~~~~ + +Unload previously loaded eBPF program for partciular RX/TX queue:: + + testpmd> bpf-unload rx|tx (portid) (queueid) + +For example to unload BPF filter from TX queue 0, port 0: + +.. code-block:: console + + testpmd> bpf-load tx 0 0 - ./dpdk.org/test/bpf/t1.o diff --git a/doc/guides/tools/cryptoperf.rst b/doc/guides/tools/cryptoperf.rst index 3c0e7d94..c366af4e 100644 --- a/doc/guides/tools/cryptoperf.rst +++ b/doc/guides/tools/cryptoperf.rst @@ -181,7 +181,7 @@ The following are the appication command-line options: crypto_dpaa2_sec crypto_armv8 crypto_scheduler - crypto_mrvl + crypto_mvsam * ``--optype `` diff --git a/doc/guides/tools/testbbdev.rst b/doc/guides/tools/testbbdev.rst index 5c7112de..234a64f5 100644 --- a/doc/guides/tools/testbbdev.rst +++ b/doc/guides/tools/testbbdev.rst @@ -21,7 +21,7 @@ The bbdevice drivers PMD which should be tested can be enabled by setting ``CONFIG_RTE_LIBRTE_PMD_=y`` -Setting example for (*turbo_sw*) PMD +Setting example for (*baseband_turbo_sw*) PMD ``CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW=y`` @@ -70,30 +70,6 @@ The following are the command-line options: ``-c TEST_CASE [TEST_CASE ...], --test_cases TEST_CASE [TEST_CASE ...]`` Defines test cases to run. If not specified all available tests are run. - The following tests can be run: - - * unittest - Small unit tests witch check basic functionality of bbdev library. - * latency - Test calculates three latency metrics: - - * offload_latency_tc - measures the cost of offloading enqueue and dequeue operations. - * offload_latency_empty_q_tc - measures the cost of offloading a dequeue operation from an empty queue. - checks how long last dequeueing if there is no operations to dequeue - * operation_latency_tc - measures the time difference from the first attempt to enqueue till the - first successful dequeue. - * validation - Test do enqueue on given vector and compare output after dequeueing. - * throughput - Test measures the achieved throughput on the available lcores. - Results are printed in million operations per second and million bits per second. - * interrupt - The same test as 'throughput' but uses interrupts instead of PMD to perform - the dequeue. - **Example usage:** ``./test-bbdev.py -c validation`` @@ -105,18 +81,18 @@ The following are the command-line options: ``-v TEST_VECTOR [TEST_VECTOR ...], --test_vector TEST_VECTOR [TEST_VECTOR ...]`` Specifies paths to the test vector files. If not specified path is set based on *$RTE_SDK* environment variable concatenated with - "*/app/test-bbdev/test_vectors/bbdev_vector_null.data*" and indicates default + "*/app/test-bbdev/test_vectors/bbdev_null.data*" and indicates default data file. **Example usage:** - ``./test-bbdev.py -v app/test-bbdev/test_vectors/bbdev_vector_td_test1.data`` - Fills vector based on bbdev_vector_td_test1.data file and runs all tests + ``./test-bbdev.py -v app/test-bbdev/test_vectors/turbo_dec_test1.data`` + Fills vector based on turbo_dec_test1.data file and runs all tests - ``./test-bbdev.py -v bbdev_vector_td_test1.data bbdev_vector_te_test2.data`` + ``./test-bbdev.py -v turbo_dec_test1.data turbo_enc_test2.data`` The bbdev test app is executed twice. First time vector is filled based on - *bbdev_vector_td_test1.data* file and second time based on - *bbdev_vector_te_test2.data* file. For both executions all tests are run. + *turbo_dec_test1.data* file and second time based on + *turb_enc_test2.data* file. For both executions all tests are run. ``-n NUM_OPS, --num_ops NUM_OPS`` Specifies number of operations to process on device. If not specified num_ops @@ -130,8 +106,54 @@ The following are the command-line options: Specifies operations enqueue/dequeue burst size. If not specified burst_size is set to 32. Maximum is 512. - -Parameter globbing +Test Cases +~~~~~~~~~~ + +There are 6 main test cases that can be executed using testbbdev tool: + +* Sanity checks [-c unittest] + - Performs sanity checks on BBDEV interface, validating basic functionality + +* Validation tests [-c validation] + - Performs full operation of enqueue and dequeue + - Compares the dequeued data buffer with a expected values in the test + vector (TV) being used + - Fails if any dequeued value does not match the data in the TV + +* Offload Cost measurement [-c offload] + - Measures the CPU cycles consumed from the receipt of a user enqueue + until it is put on the device queue + - The test measures 4 metrics + (a) *SW Enq Offload Cost*: Software only enqueue offload cost, the cycle + counts and time (us) from the point the enqueue API is called until + the point the operation is put on the accelerator queue. + (b) *Acc Enq Offload Cost*: The cycle count and time (us) from the + point the operation is put on the accelerator queue until the return + from enqueue. + (c) *SW Deq Offload Cost*: Software dequeue cost, the cycle counts and + time (us) consumed to dequeue one operation. + (d) *Empty Queue Enq Offload Cost*: The cycle count and time (us) + consumed to dequeue from an empty queue. + +* Latency measurement [-c latency] + - Measures the time consumed from the first enqueue until the first + appearance of a dequeued result + - This measurment represents the full latency of a bbdev operation + (encode or decode) to execute + +* Poll-mode Throughput measurement [-c throughput] + - Performs full operation of enqueue and dequeue + - Executes in poll mode + - Measures the achieved throughput on a subset or all available CPU cores + - Dequeued data is not validated against expected values stored in TV + - Results are printed in million operations per second and million bits + per second + +* Interrupt-mode Throughput [-c interrupt] + - Similar to Throughput test case, but using interrupts. No polling. + + +Parameter Globbing ~~~~~~~~~~~~~~~~~~ Thanks to the globbing functionality in python test-bbdev.py script allows to @@ -141,26 +163,63 @@ run tests with different set of vector files without giving all of them explicit .. code-block:: console - ./test-bbdev.py -v app/test-bbdev/test_vectors/bbdev_vector_*.data + ./test-bbdev.py -v app/test-bbdev/test_vectors/turbo__c_k_r_e_.data It runs all tests with following vectors: -- ``bbdev_vector_null.data`` +- ``bbdev_null.data`` + +- ``turbo_dec_c1_k6144_r0_e34560_sbd_negllr.data`` + +- ``turbo_enc_c1_k40_r0_e1196_rm.data`` + +- ``turbo_enc_c2_k5952_r0_e17868_crc24b.data`` + +- ``turbo_dec_c1_k40_r0_e17280_sbd_negllr.data`` + +- ``turbo_dec_c1_k6144_r0_e34560_sbd_posllr.data`` + +- ``turbo_enc_c1_k40_r0_e272_rm.data`` + +- ``turbo_enc_c3_k4800_r2_e14412_crc24b.data`` + +- ``turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data`` + +- ``turbo_dec_c2_k3136_r0_e4920_sbd_negllr_crc24b.data`` + +- ``turbo_enc_c1_k6144_r0_e120_rm_rvidx.data`` + +- ``turbo_enc_c4_k4800_r2_e14412_crc24b.data`` + +- ``turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_low_snr.data`` + +- ``turbo_dec_c2_k3136_r0_e4920_sbd_negllr.data`` + +- ``turbo_enc_c1_k6144_r0_e18444.data`` + +- ``turbo_dec_c1_k6144_r0_e34560_negllr.data`` + +- ``turbo_enc_c1_k40_r0_e1190_rm.data`` + +- ``turbo_enc_c1_k6144_r0_e18448_crc24a.data`` -- ``bbdev_vector_td_default.data`` +- ``turbo_dec_c1_k6144_r0_e34560_posllr.data`` -- ``bbdev_vector_te_default.data`` +- ``turbo_enc_c1_k40_r0_e1194_rm.data`` +- ``turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data`` .. code-block:: console - ./test-bbdev.py -v app/test-bbdev/test_vectors/bbdev_vector_t?_default.data + ./test-bbdev.py -v app/test-bbdev/turbo_*_default.data -It runs all tests with "default" vectors: +It runs all tests with "default" vectors. -- ``bbdev_vector_te_default.data`` +* ``turbo_dec_default.data`` is a soft link to + ``turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data`` -- ``bbdev_vector_td_default.data`` +* ``turbo_enc_default.data`` is a soft link to + ``turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data`` Running Tests @@ -174,9 +233,27 @@ x86_64-native-linuxapp-icc target: |-- app |-- test-bbdev |-- test_vectors - |-- bbdev_vector_null.data - |-- bbdev_vector_td_default.data - |-- bbdev_vector_te_default.data + |-- bbdev_null.data + |-- turbo_dec_c1_k6144_r0_e34560_sbd_negllr.data + |-- turbo_enc_c1_k40_r0_e1196_rm.data + |-- turbo_enc_c2_k5952_r0_e17868_crc24b.data + |-- turbo_dec_c1_k40_r0_e17280_sbd_negllr.data + |-- turbo_dec_c1_k6144_r0_e34560_sbd_posllr.data + |-- turbo_enc_c1_k40_r0_e272_rm.data + |-- turbo_enc_c3_k4800_r2_e14412_crc24b.data + |-- turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data + |-- turbo_dec_c2_k3136_r0_e4920_sbd_negllr_crc24b.data + |-- turbo_enc_c1_k6144_r0_e120_rm_rvidx.data + |-- turbo_enc_c4_k4800_r2_e14412_crc24b.data + |-- turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_low_snr.data + |-- turbo_dec_c2_k3136_r0_e4920_sbd_negllr.data + |-- turbo_enc_c1_k6144_r0_e18444.data + |-- turbo_dec_c1_k6144_r0_e34560_negllr.data + |-- turbo_enc_c1_k40_r0_e1190_rm.data + |-- turbo_enc_c1_k6144_r0_e18448_crc24a.data + |-- turbo_dec_c1_k6144_r0_e34560_posllr.data + |-- turbo_enc_c1_k40_r0_e1194_rm.data + |-- turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data |-- x86_64-native-linuxapp-icc |-- app @@ -188,43 +265,43 @@ All bbdev devices .. code-block:: console ./test-bbdev.py -p ../../x86_64-native-linuxapp-icc/app/testbbdev - -v ./test_vectors/bbdev_vector_td_default.data + -v turbo_dec_default.data It runs all available tests using the test vector filled based on -*bbdev_vector_td_default.data* file. +*turbo_dec_default.data* file. By default number of operations to process on device is set to 32, timeout is set to 300s and operations enqueue/dequeue burst size is set to 32. -Moreover a bbdev (*bbdev_null*) device will be created. +Moreover a bbdev (*baseband_null*) device will be created. -bbdev turbo_sw device -~~~~~~~~~~~~~~~~~~~~~ +baseband turbo_sw device +~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: console ./test-bbdev.py -p ../../x86_64-native-linuxapp-icc/app/testbbdev - -e="--vdev=turbo_sw" -t 120 -c validation - -v ./test_vectors/bbdev_vector_t?_default.data -n 64 -b 8 32 + -e="--vdev=baseband_turbo_sw" -t 120 -c validation + -v ./test_vectors/turbo_* -n 64 -b 8 32 It runs **validation** test for each vector file that matches the given pattern. Number of operations to process on device is set to 64 and operations timeout is set to 120s and enqueue/dequeue burst size is set to 8 and to 32. -Moreover a bbdev (*turbo_sw*) device will be created. +Moreover a bbdev (*baseband_turbo_sw*) device will be created. bbdev null device ~~~~~~~~~~~~~~~~~ -Executing bbdev null device with *bbdev_vector_null.data* helps in measuring the +Executing bbdev null device with *bbdev_null.data* helps in measuring the overhead introduced by the bbdev framework. .. code-block:: console - ./test-bbdev.py -e="--vdev=bbdev_null0" - -v ./test_vectors/bbdev_vector_null.data + ./test-bbdev.py -e="--vdev=baseband_null0" + -v ./test_vectors/bbdev_null.data **Note:** -bbdev_null device does not have to be defined explicitly as it is created by default. +baseband_null device does not have to be defined explicitly as it is created by default. diff --git a/doc/guides/tools/testeventdev.rst b/doc/guides/tools/testeventdev.rst index 77480ffe..46effd87 100644 --- a/doc/guides/tools/testeventdev.rst +++ b/doc/guides/tools/testeventdev.rst @@ -123,6 +123,36 @@ The following are the application command-line options: Use ethernet device as producer. +* ``--prod_type_timerdev`` + + Use event timer adapter as producer. + + * ``--prod_type_timerdev_burst`` + + Use burst mode event timer adapter as producer. + + * ``--timer_tick_nsec`` + + Used to dictate number of nano seconds between bucket traversal of the + event timer adapter. Refer `rte_event_timer_adapter_conf`. + + * ``--max_tmo_nsec`` + + Used to configure event timer adapter max arm timeout in nano seconds. + + * ``--expiry_nsec`` + + Dictate the number of nano seconds after which the event timer expires. + + * ``--nb_timers`` + + Number of event timers each producer core will generate. + + * ``--nb_timer_adptrs`` + + Number of event timer adapters to be used. Each adapter is used in + round robin manner by the producer cores. + Eventdev Tests -------------- @@ -347,6 +377,13 @@ Supported application command line options are following:: --fwd_latency --queue_priority --prod_type_ethdev + --prod_type_timerdev_burst + --prod_type_timerdev + --timer_tick_nsec + --max_tmo_nsec + --expiry_nsec + --nb_timers + --nb_timer_adptrs Example ^^^^^^^ @@ -365,6 +402,14 @@ Example command to run perf queue test with ethernet ports: sudo build/app/dpdk-test-eventdev --vdev=event_sw0 -- \ --test=perf_queue --plcores=2 --wlcore=3 --stlist=p --prod_type_ethdev +Example command to run perf queue test with event timer adapter: + +.. code-block:: console + + sudo build/app/dpdk-test-eventdev --vdev="event_octeontx" -- \ + --wlcores 4 --plcores 12 --test perf_queue --stlist=a \ + --prod_type_timerdev --fwd_latency + PERF_ATQ Test ~~~~~~~~~~~~~~~ @@ -431,6 +476,13 @@ Supported application command line options are following:: --worker_deq_depth --fwd_latency --prod_type_ethdev + --prod_type_timerdev_burst + --prod_type_timerdev + --timer_tick_nsec + --max_tmo_nsec + --expiry_nsec + --nb_timers + --nb_timer_adptrs Example ^^^^^^^ @@ -442,6 +494,14 @@ Example command to run perf ``all types queue`` test: sudo build/app/dpdk-test-eventdev --vdev=event_octeontx -- \ --test=perf_atq --plcores=2 --wlcore=3 --stlist=p --nb_pkts=0 +Example command to run perf ``all types queue`` test with event timer adapter: + +.. code-block:: console + + sudo build/app/dpdk-test-eventdev --vdev="event_octeontx" -- \ + --wlcores 4 --plcores 12 --test perf_atq --verbose 20 \ + --stlist=a --prod_type_timerdev --fwd_latency + PIPELINE_QUEUE Test ~~~~~~~~~~~~~~~~~~~ diff --git a/drivers/Makefile b/drivers/Makefile index ee65c87b..75660765 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -3,18 +3,23 @@ include $(RTE_SDK)/mk/rte.vars.mk +DIRS-y += common DIRS-y += bus DIRS-y += mempool -DEPDIRS-mempool := bus +DEPDIRS-mempool := common bus DIRS-y += net -DEPDIRS-net := bus mempool -DIRS-$(CONFIG_RTE_LIBRTE_BBDEV) += bbdev -DEPDIRS-bbdev := bus mempool +DEPDIRS-net := common bus mempool +DIRS-$(CONFIG_RTE_LIBRTE_BBDEV) += baseband +DEPDIRS-baseband := common bus mempool DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += crypto -DEPDIRS-crypto := bus mempool +DEPDIRS-crypto := common bus mempool +DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += common/qat +DEPDIRS-common/qat := bus mempool +DIRS-$(CONFIG_RTE_LIBRTE_COMPRESSDEV) += compress +DEPDIRS-compress := bus mempool DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += event -DEPDIRS-event := bus mempool net +DEPDIRS-event := common bus mempool net DIRS-$(CONFIG_RTE_LIBRTE_RAWDEV) += raw -DEPDIRS-raw := bus mempool net event +DEPDIRS-raw := common bus mempool net event include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/drivers/baseband/Makefile b/drivers/baseband/Makefile new file mode 100644 index 00000000..4ec83b0a --- /dev/null +++ b/drivers/baseband/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +core-libs := librte_eal librte_mbuf librte_mempool librte_ring +core-libs += librte_bbdev librte_kvargs librte_cfgfile + +DIRS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL) += null +DEPDIRS-null = $(core-libs) +DIRS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW) += turbo_sw +DEPDIRS-turbo_sw = $(core-libs) + +include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/drivers/baseband/null/Makefile b/drivers/baseband/null/Makefile new file mode 100644 index 00000000..f885a97b --- /dev/null +++ b/drivers/baseband/null/Makefile @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk +# library name +LIB = librte_pmd_bbdev_null.a + +# build flags +CFLAGS += -DALLOW_EXPERIMENTAL_API +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -lrte_kvargs +LDLIBS += -lrte_bbdev +LDLIBS += -lrte_bus_vdev + +# versioning export map +EXPORT_MAP := rte_pmd_bbdev_null_version.map + +# library version +LIBABIVER := 1 + +# library source files +SRCS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL) += bbdev_null.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/baseband/null/bbdev_null.c b/drivers/baseband/null/bbdev_null.c new file mode 100644 index 00000000..2f251510 --- /dev/null +++ b/drivers/baseband/null/bbdev_null.c @@ -0,0 +1,356 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#include + +#include +#include +#include +#include +#include + +#include +#include + +#define DRIVER_NAME baseband_null + +/* NULL BBDev logging ID */ +static int bbdev_null_logtype; + +/* Helper macro for logging */ +#define rte_bbdev_log(level, fmt, ...) \ + rte_log(RTE_LOG_ ## level, bbdev_null_logtype, fmt "\n", ##__VA_ARGS__) + +#define rte_bbdev_log_debug(fmt, ...) \ + rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \ + ##__VA_ARGS__) + +/* Initialisation params structure that can be used by null BBDEV driver */ +struct bbdev_null_params { + int socket_id; /*< Null BBDEV socket */ + uint16_t queues_num; /*< Null BBDEV queues number */ +}; + +/* Accecptable params for null BBDEV devices */ +#define BBDEV_NULL_MAX_NB_QUEUES_ARG "max_nb_queues" +#define BBDEV_NULL_SOCKET_ID_ARG "socket_id" + +static const char * const bbdev_null_valid_params[] = { + BBDEV_NULL_MAX_NB_QUEUES_ARG, + BBDEV_NULL_SOCKET_ID_ARG +}; + +/* private data structure */ +struct bbdev_private { + unsigned int max_nb_queues; /**< Max number of queues */ +}; + +/* queue */ +struct bbdev_queue { + struct rte_ring *processed_pkts; /* Ring for processed packets */ +} __rte_cache_aligned; + +/* Get device info */ +static void +info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info) +{ + struct bbdev_private *internals = dev->data->dev_private; + + static const struct rte_bbdev_op_cap bbdev_capabilities[] = { + RTE_BBDEV_END_OF_CAPABILITIES_LIST(), + }; + + static struct rte_bbdev_queue_conf default_queue_conf = { + .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT, + }; + + default_queue_conf.socket = dev->data->socket_id; + + dev_info->driver_name = RTE_STR(DRIVER_NAME); + dev_info->max_num_queues = internals->max_nb_queues; + dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT; + dev_info->hardware_accelerated = false; + dev_info->max_dl_queue_priority = 0; + dev_info->max_ul_queue_priority = 0; + dev_info->default_queue_conf = default_queue_conf; + dev_info->capabilities = bbdev_capabilities; + dev_info->cpu_flag_reqs = NULL; + dev_info->min_alignment = 0; + + rte_bbdev_log_debug("got device info from %u", dev->data->dev_id); +} + +/* Release queue */ +static int +q_release(struct rte_bbdev *dev, uint16_t q_id) +{ + struct bbdev_queue *q = dev->data->queues[q_id].queue_private; + + if (q != NULL) { + rte_ring_free(q->processed_pkts); + rte_free(q); + dev->data->queues[q_id].queue_private = NULL; + } + + rte_bbdev_log_debug("released device queue %u:%u", + dev->data->dev_id, q_id); + return 0; +} + +/* Setup a queue */ +static int +q_setup(struct rte_bbdev *dev, uint16_t q_id, + const struct rte_bbdev_queue_conf *queue_conf) +{ + struct bbdev_queue *q; + char ring_name[RTE_RING_NAMESIZE]; + snprintf(ring_name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME) "%u:%u", + dev->data->dev_id, q_id); + + /* Allocate the queue data structure. */ + q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q), + RTE_CACHE_LINE_SIZE, queue_conf->socket); + if (q == NULL) { + rte_bbdev_log(ERR, "Failed to allocate queue memory"); + return -ENOMEM; + } + + q->processed_pkts = rte_ring_create(ring_name, queue_conf->queue_size, + queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ); + if (q->processed_pkts == NULL) { + rte_bbdev_log(ERR, "Failed to create ring"); + goto free_q; + } + + dev->data->queues[q_id].queue_private = q; + rte_bbdev_log_debug("setup device queue %s", ring_name); + return 0; + +free_q: + rte_free(q); + return -EFAULT; +} + +static const struct rte_bbdev_ops pmd_ops = { + .info_get = info_get, + .queue_setup = q_setup, + .queue_release = q_release +}; + +/* Enqueue decode burst */ +static uint16_t +enqueue_dec_ops(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_dec_op **ops, uint16_t nb_ops) +{ + struct bbdev_queue *q = q_data->queue_private; + uint16_t nb_enqueued = rte_ring_enqueue_burst(q->processed_pkts, + (void **)ops, nb_ops, NULL); + + q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued; + q_data->queue_stats.enqueued_count += nb_enqueued; + + return nb_enqueued; +} + +/* Enqueue encode burst */ +static uint16_t +enqueue_enc_ops(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_enc_op **ops, uint16_t nb_ops) +{ + struct bbdev_queue *q = q_data->queue_private; + uint16_t nb_enqueued = rte_ring_enqueue_burst(q->processed_pkts, + (void **)ops, nb_ops, NULL); + + q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued; + q_data->queue_stats.enqueued_count += nb_enqueued; + + return nb_enqueued; +} + +/* Dequeue decode burst */ +static uint16_t +dequeue_dec_ops(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_dec_op **ops, uint16_t nb_ops) +{ + struct bbdev_queue *q = q_data->queue_private; + uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts, + (void **)ops, nb_ops, NULL); + q_data->queue_stats.dequeued_count += nb_dequeued; + + return nb_dequeued; +} + +/* Dequeue encode burst */ +static uint16_t +dequeue_enc_ops(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_enc_op **ops, uint16_t nb_ops) +{ + struct bbdev_queue *q = q_data->queue_private; + uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts, + (void **)ops, nb_ops, NULL); + q_data->queue_stats.dequeued_count += nb_dequeued; + + return nb_dequeued; +} + +/* Parse 16bit integer from string argument */ +static inline int +parse_u16_arg(const char *key, const char *value, void *extra_args) +{ + uint16_t *u16 = extra_args; + unsigned int long result; + + if ((value == NULL) || (extra_args == NULL)) + return -EINVAL; + errno = 0; + result = strtoul(value, NULL, 0); + if ((result >= (1 << 16)) || (errno != 0)) { + rte_bbdev_log(ERR, "Invalid value %lu for %s", result, key); + return -ERANGE; + } + *u16 = (uint16_t)result; + return 0; +} + +/* Parse parameters used to create device */ +static int +parse_bbdev_null_params(struct bbdev_null_params *params, + const char *input_args) +{ + struct rte_kvargs *kvlist = NULL; + int ret = 0; + + if (params == NULL) + return -EINVAL; + if (input_args) { + kvlist = rte_kvargs_parse(input_args, bbdev_null_valid_params); + if (kvlist == NULL) + return -EFAULT; + + ret = rte_kvargs_process(kvlist, bbdev_null_valid_params[0], + &parse_u16_arg, ¶ms->queues_num); + if (ret < 0) + goto exit; + + ret = rte_kvargs_process(kvlist, bbdev_null_valid_params[1], + &parse_u16_arg, ¶ms->socket_id); + if (ret < 0) + goto exit; + + if (params->socket_id >= RTE_MAX_NUMA_NODES) { + rte_bbdev_log(ERR, "Invalid socket, must be < %u", + RTE_MAX_NUMA_NODES); + goto exit; + } + } + +exit: + if (kvlist) + rte_kvargs_free(kvlist); + return ret; +} + +/* Create device */ +static int +null_bbdev_create(struct rte_vdev_device *vdev, + struct bbdev_null_params *init_params) +{ + struct rte_bbdev *bbdev; + const char *name = rte_vdev_device_name(vdev); + + bbdev = rte_bbdev_allocate(name); + if (bbdev == NULL) + return -ENODEV; + + bbdev->data->dev_private = rte_zmalloc_socket(name, + sizeof(struct bbdev_private), RTE_CACHE_LINE_SIZE, + init_params->socket_id); + if (bbdev->data->dev_private == NULL) { + rte_bbdev_release(bbdev); + return -ENOMEM; + } + + bbdev->dev_ops = &pmd_ops; + bbdev->device = &vdev->device; + bbdev->data->socket_id = init_params->socket_id; + bbdev->intr_handle = NULL; + + /* register rx/tx burst functions for data path */ + bbdev->dequeue_enc_ops = dequeue_enc_ops; + bbdev->dequeue_dec_ops = dequeue_dec_ops; + bbdev->enqueue_enc_ops = enqueue_enc_ops; + bbdev->enqueue_dec_ops = enqueue_dec_ops; + ((struct bbdev_private *) bbdev->data->dev_private)->max_nb_queues = + init_params->queues_num; + + return 0; +} + +/* Initialise device */ +static int +null_bbdev_probe(struct rte_vdev_device *vdev) +{ + struct bbdev_null_params init_params = { + rte_socket_id(), + RTE_BBDEV_DEFAULT_MAX_NB_QUEUES + }; + const char *name; + const char *input_args; + + if (vdev == NULL) + return -EINVAL; + + name = rte_vdev_device_name(vdev); + if (name == NULL) + return -EINVAL; + + input_args = rte_vdev_device_args(vdev); + parse_bbdev_null_params(&init_params, input_args); + + rte_bbdev_log_debug("Init %s on NUMA node %d with max queues: %d", + name, init_params.socket_id, init_params.queues_num); + + return null_bbdev_create(vdev, &init_params); +} + +/* Uninitialise device */ +static int +null_bbdev_remove(struct rte_vdev_device *vdev) +{ + struct rte_bbdev *bbdev; + const char *name; + + if (vdev == NULL) + return -EINVAL; + + name = rte_vdev_device_name(vdev); + if (name == NULL) + return -EINVAL; + + bbdev = rte_bbdev_get_named_dev(name); + if (bbdev == NULL) + return -EINVAL; + + rte_free(bbdev->data->dev_private); + + return rte_bbdev_release(bbdev); +} + +static struct rte_vdev_driver bbdev_null_pmd_drv = { + .probe = null_bbdev_probe, + .remove = null_bbdev_remove +}; + +RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_null_pmd_drv); +RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME, + BBDEV_NULL_MAX_NB_QUEUES_ARG"= " + BBDEV_NULL_SOCKET_ID_ARG"="); +RTE_PMD_REGISTER_ALIAS(DRIVER_NAME, bbdev_null); + +RTE_INIT(null_bbdev_init_log) +{ + bbdev_null_logtype = rte_log_register("pmd.bb.null"); + if (bbdev_null_logtype >= 0) + rte_log_set_level(bbdev_null_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/baseband/null/rte_pmd_bbdev_null_version.map b/drivers/baseband/null/rte_pmd_bbdev_null_version.map new file mode 100644 index 00000000..58b94270 --- /dev/null +++ b/drivers/baseband/null/rte_pmd_bbdev_null_version.map @@ -0,0 +1,3 @@ +DPDK_18.02 { + local: *; +}; diff --git a/drivers/baseband/turbo_sw/Makefile b/drivers/baseband/turbo_sw/Makefile new file mode 100644 index 00000000..79eb5547 --- /dev/null +++ b/drivers/baseband/turbo_sw/Makefile @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +ifeq ($(FLEXRAN_SDK),) +$(error "Please define FLEXRAN_SDK environment variable") +endif + +# library name +LIB = librte_pmd_bbdev_turbo_sw.a + +# build flags +CFLAGS += -DALLOW_EXPERIMENTAL_API +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -lrte_kvargs +LDLIBS += -lrte_bbdev +LDLIBS += -lrte_bus_vdev + +# versioning export map +EXPORT_MAP := rte_pmd_bbdev_turbo_sw_version.map + +# external library dependencies +CFLAGS += -I$(FLEXRAN_SDK)/lib_common +CFLAGS += -I$(FLEXRAN_SDK)/lib_turbo +CFLAGS += -I$(FLEXRAN_SDK)/lib_crc +CFLAGS += -I$(FLEXRAN_SDK)/lib_rate_matching + +LDLIBS += -L$(FLEXRAN_SDK)/lib_crc -lcrc +LDLIBS += -L$(FLEXRAN_SDK)/lib_turbo -lturbo +LDLIBS += -L$(FLEXRAN_SDK)/lib_rate_matching -lrate_matching +LDLIBS += -L$(FLEXRAN_SDK)/lib_common -lcommon +LDLIBS += -lstdc++ -lirc -limf -lipps + +# library version +LIBABIVER := 1 + +# library source files +SRCS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW) += bbdev_turbo_software.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/baseband/turbo_sw/bbdev_turbo_software.c b/drivers/baseband/turbo_sw/bbdev_turbo_software.c new file mode 100644 index 00000000..8ceb2769 --- /dev/null +++ b/drivers/baseband/turbo_sw/bbdev_turbo_software.c @@ -0,0 +1,1307 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#define DRIVER_NAME baseband_turbo_sw + +/* Turbo SW PMD logging ID */ +static int bbdev_turbo_sw_logtype; + +/* Helper macro for logging */ +#define rte_bbdev_log(level, fmt, ...) \ + rte_log(RTE_LOG_ ## level, bbdev_turbo_sw_logtype, fmt "\n", \ + ##__VA_ARGS__) + +#define rte_bbdev_log_debug(fmt, ...) \ + rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \ + ##__VA_ARGS__) + +#define DEINT_INPUT_BUF_SIZE (((RTE_BBDEV_MAX_CB_SIZE >> 3) + 1) * 48) +#define DEINT_OUTPUT_BUF_SIZE (DEINT_INPUT_BUF_SIZE * 6) +#define ADAPTER_OUTPUT_BUF_SIZE ((RTE_BBDEV_MAX_CB_SIZE + 4) * 48) + +/* private data structure */ +struct bbdev_private { + unsigned int max_nb_queues; /**< Max number of queues */ +}; + +/* Initialisation params structure that can be used by Turbo SW driver */ +struct turbo_sw_params { + int socket_id; /*< Turbo SW device socket */ + uint16_t queues_num; /*< Turbo SW device queues number */ +}; + +/* Accecptable params for Turbo SW devices */ +#define TURBO_SW_MAX_NB_QUEUES_ARG "max_nb_queues" +#define TURBO_SW_SOCKET_ID_ARG "socket_id" + +static const char * const turbo_sw_valid_params[] = { + TURBO_SW_MAX_NB_QUEUES_ARG, + TURBO_SW_SOCKET_ID_ARG +}; + +/* queue */ +struct turbo_sw_queue { + /* Ring for processed (encoded/decoded) operations which are ready to + * be dequeued. + */ + struct rte_ring *processed_pkts; + /* Stores input for turbo encoder (used when CRC attachment is + * performed + */ + uint8_t *enc_in; + /* Stores output from turbo encoder */ + uint8_t *enc_out; + /* Alpha gamma buf for bblib_turbo_decoder() function */ + int8_t *ag; + /* Temp buf for bblib_turbo_decoder() function */ + uint16_t *code_block; + /* Input buf for bblib_rate_dematching_lte() function */ + uint8_t *deint_input; + /* Output buf for bblib_rate_dematching_lte() function */ + uint8_t *deint_output; + /* Output buf for bblib_turbodec_adapter_lte() function */ + uint8_t *adapter_output; + /* Operation type of this queue */ + enum rte_bbdev_op_type type; +} __rte_cache_aligned; + +/* Calculate index based on Table 5.1.3-3 from TS34.212 */ +static inline int32_t +compute_idx(uint16_t k) +{ + int32_t result = 0; + + if (k < RTE_BBDEV_MIN_CB_SIZE || k > RTE_BBDEV_MAX_CB_SIZE) + return -1; + + if (k > 2048) { + if ((k - 2048) % 64 != 0) + result = -1; + + result = 124 + (k - 2048) / 64; + } else if (k <= 512) { + if ((k - 40) % 8 != 0) + result = -1; + + result = (k - 40) / 8 + 1; + } else if (k <= 1024) { + if ((k - 512) % 16 != 0) + result = -1; + + result = 60 + (k - 512) / 16; + } else { /* 1024 < k <= 2048 */ + if ((k - 1024) % 32 != 0) + result = -1; + + result = 92 + (k - 1024) / 32; + } + + return result; +} + +/* Read flag value 0/1 from bitmap */ +static inline bool +check_bit(uint32_t bitmap, uint32_t bitmask) +{ + return bitmap & bitmask; +} + +/* Get device info */ +static void +info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info) +{ + struct bbdev_private *internals = dev->data->dev_private; + + static const struct rte_bbdev_op_cap bbdev_capabilities[] = { + { + .type = RTE_BBDEV_OP_TURBO_DEC, + .cap.turbo_dec = { + .capability_flags = + RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE | + RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN | + RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN | + RTE_BBDEV_TURBO_CRC_TYPE_24B | + RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP | + RTE_BBDEV_TURBO_EARLY_TERMINATION, + .max_llr_modulus = 16, + .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS, + .num_buffers_hard_out = + RTE_BBDEV_MAX_CODE_BLOCKS, + .num_buffers_soft_out = 0, + } + }, + { + .type = RTE_BBDEV_OP_TURBO_ENC, + .cap.turbo_enc = { + .capability_flags = + RTE_BBDEV_TURBO_CRC_24B_ATTACH | + RTE_BBDEV_TURBO_CRC_24A_ATTACH | + RTE_BBDEV_TURBO_RATE_MATCH | + RTE_BBDEV_TURBO_RV_INDEX_BYPASS, + .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS, + .num_buffers_dst = RTE_BBDEV_MAX_CODE_BLOCKS, + } + }, + RTE_BBDEV_END_OF_CAPABILITIES_LIST() + }; + + static struct rte_bbdev_queue_conf default_queue_conf = { + .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT, + }; + + static const enum rte_cpu_flag_t cpu_flag = RTE_CPUFLAG_SSE4_2; + + default_queue_conf.socket = dev->data->socket_id; + + dev_info->driver_name = RTE_STR(DRIVER_NAME); + dev_info->max_num_queues = internals->max_nb_queues; + dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT; + dev_info->hardware_accelerated = false; + dev_info->max_dl_queue_priority = 0; + dev_info->max_ul_queue_priority = 0; + dev_info->default_queue_conf = default_queue_conf; + dev_info->capabilities = bbdev_capabilities; + dev_info->cpu_flag_reqs = &cpu_flag; + dev_info->min_alignment = 64; + + rte_bbdev_log_debug("got device info from %u\n", dev->data->dev_id); +} + +/* Release queue */ +static int +q_release(struct rte_bbdev *dev, uint16_t q_id) +{ + struct turbo_sw_queue *q = dev->data->queues[q_id].queue_private; + + if (q != NULL) { + rte_ring_free(q->processed_pkts); + rte_free(q->enc_out); + rte_free(q->enc_in); + rte_free(q->ag); + rte_free(q->code_block); + rte_free(q->deint_input); + rte_free(q->deint_output); + rte_free(q->adapter_output); + rte_free(q); + dev->data->queues[q_id].queue_private = NULL; + } + + rte_bbdev_log_debug("released device queue %u:%u", + dev->data->dev_id, q_id); + return 0; +} + +/* Setup a queue */ +static int +q_setup(struct rte_bbdev *dev, uint16_t q_id, + const struct rte_bbdev_queue_conf *queue_conf) +{ + int ret; + struct turbo_sw_queue *q; + char name[RTE_RING_NAMESIZE]; + + /* Allocate the queue data structure. */ + q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q), + RTE_CACHE_LINE_SIZE, queue_conf->socket); + if (q == NULL) { + rte_bbdev_log(ERR, "Failed to allocate queue memory"); + return -ENOMEM; + } + + /* Allocate memory for encoder output. */ + ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_enc_o%u:%u", + dev->data->dev_id, q_id); + if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { + rte_bbdev_log(ERR, + "Creating queue name for device %u queue %u failed", + dev->data->dev_id, q_id); + return -ENAMETOOLONG; + } + q->enc_out = rte_zmalloc_socket(name, + ((RTE_BBDEV_MAX_TB_SIZE >> 3) + 3) * + sizeof(*q->enc_out) * 3, + RTE_CACHE_LINE_SIZE, queue_conf->socket); + if (q->enc_out == NULL) { + rte_bbdev_log(ERR, + "Failed to allocate queue memory for %s", name); + goto free_q; + } + + /* Allocate memory for rate matching output. */ + ret = snprintf(name, RTE_RING_NAMESIZE, + RTE_STR(DRIVER_NAME)"_enc_i%u:%u", dev->data->dev_id, + q_id); + if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { + rte_bbdev_log(ERR, + "Creating queue name for device %u queue %u failed", + dev->data->dev_id, q_id); + return -ENAMETOOLONG; + } + q->enc_in = rte_zmalloc_socket(name, + (RTE_BBDEV_MAX_CB_SIZE >> 3) * sizeof(*q->enc_in), + RTE_CACHE_LINE_SIZE, queue_conf->socket); + if (q->enc_in == NULL) { + rte_bbdev_log(ERR, + "Failed to allocate queue memory for %s", name); + goto free_q; + } + + /* Allocate memory for Aplha Gamma temp buffer. */ + ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_ag%u:%u", + dev->data->dev_id, q_id); + if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { + rte_bbdev_log(ERR, + "Creating queue name for device %u queue %u failed", + dev->data->dev_id, q_id); + return -ENAMETOOLONG; + } + q->ag = rte_zmalloc_socket(name, + RTE_BBDEV_MAX_CB_SIZE * 10 * sizeof(*q->ag), + RTE_CACHE_LINE_SIZE, queue_conf->socket); + if (q->ag == NULL) { + rte_bbdev_log(ERR, + "Failed to allocate queue memory for %s", name); + goto free_q; + } + + /* Allocate memory for code block temp buffer. */ + ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_cb%u:%u", + dev->data->dev_id, q_id); + if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { + rte_bbdev_log(ERR, + "Creating queue name for device %u queue %u failed", + dev->data->dev_id, q_id); + return -ENAMETOOLONG; + } + q->code_block = rte_zmalloc_socket(name, + RTE_BBDEV_MAX_CB_SIZE * sizeof(*q->code_block), + RTE_CACHE_LINE_SIZE, queue_conf->socket); + if (q->code_block == NULL) { + rte_bbdev_log(ERR, + "Failed to allocate queue memory for %s", name); + goto free_q; + } + + /* Allocate memory for Deinterleaver input. */ + ret = snprintf(name, RTE_RING_NAMESIZE, + RTE_STR(DRIVER_NAME)"_de_i%u:%u", + dev->data->dev_id, q_id); + if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { + rte_bbdev_log(ERR, + "Creating queue name for device %u queue %u failed", + dev->data->dev_id, q_id); + return -ENAMETOOLONG; + } + q->deint_input = rte_zmalloc_socket(name, + DEINT_INPUT_BUF_SIZE * sizeof(*q->deint_input), + RTE_CACHE_LINE_SIZE, queue_conf->socket); + if (q->deint_input == NULL) { + rte_bbdev_log(ERR, + "Failed to allocate queue memory for %s", name); + goto free_q; + } + + /* Allocate memory for Deinterleaver output. */ + ret = snprintf(name, RTE_RING_NAMESIZE, + RTE_STR(DRIVER_NAME)"_de_o%u:%u", + dev->data->dev_id, q_id); + if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { + rte_bbdev_log(ERR, + "Creating queue name for device %u queue %u failed", + dev->data->dev_id, q_id); + return -ENAMETOOLONG; + } + q->deint_output = rte_zmalloc_socket(NULL, + DEINT_OUTPUT_BUF_SIZE * sizeof(*q->deint_output), + RTE_CACHE_LINE_SIZE, queue_conf->socket); + if (q->deint_output == NULL) { + rte_bbdev_log(ERR, + "Failed to allocate queue memory for %s", name); + goto free_q; + } + + /* Allocate memory for Adapter output. */ + ret = snprintf(name, RTE_RING_NAMESIZE, + RTE_STR(DRIVER_NAME)"_ada_o%u:%u", + dev->data->dev_id, q_id); + if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { + rte_bbdev_log(ERR, + "Creating queue name for device %u queue %u failed", + dev->data->dev_id, q_id); + return -ENAMETOOLONG; + } + q->adapter_output = rte_zmalloc_socket(NULL, + ADAPTER_OUTPUT_BUF_SIZE * sizeof(*q->adapter_output), + RTE_CACHE_LINE_SIZE, queue_conf->socket); + if (q->adapter_output == NULL) { + rte_bbdev_log(ERR, + "Failed to allocate queue memory for %s", name); + goto free_q; + } + + /* Create ring for packets awaiting to be dequeued. */ + ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"%u:%u", + dev->data->dev_id, q_id); + if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { + rte_bbdev_log(ERR, + "Creating queue name for device %u queue %u failed", + dev->data->dev_id, q_id); + return -ENAMETOOLONG; + } + q->processed_pkts = rte_ring_create(name, queue_conf->queue_size, + queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ); + if (q->processed_pkts == NULL) { + rte_bbdev_log(ERR, "Failed to create ring for %s", name); + goto free_q; + } + + q->type = queue_conf->op_type; + + dev->data->queues[q_id].queue_private = q; + rte_bbdev_log_debug("setup device queue %s", name); + return 0; + +free_q: + rte_ring_free(q->processed_pkts); + rte_free(q->enc_out); + rte_free(q->enc_in); + rte_free(q->ag); + rte_free(q->code_block); + rte_free(q->deint_input); + rte_free(q->deint_output); + rte_free(q->adapter_output); + rte_free(q); + return -EFAULT; +} + +static const struct rte_bbdev_ops pmd_ops = { + .info_get = info_get, + .queue_setup = q_setup, + .queue_release = q_release +}; + +/* Checks if the encoder input buffer is correct. + * Returns 0 if it's valid, -1 otherwise. + */ +static inline int +is_enc_input_valid(const uint16_t k, const int32_t k_idx, + const uint16_t in_length) +{ + if (k_idx < 0) { + rte_bbdev_log(ERR, "K Index is invalid"); + return -1; + } + + if (in_length - (k >> 3) < 0) { + rte_bbdev_log(ERR, + "Mismatch between input length (%u bytes) and K (%u bits)", + in_length, k); + return -1; + } + + if (k > RTE_BBDEV_MAX_CB_SIZE) { + rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d", + k, RTE_BBDEV_MAX_CB_SIZE); + return -1; + } + + return 0; +} + +/* Checks if the decoder input buffer is correct. + * Returns 0 if it's valid, -1 otherwise. + */ +static inline int +is_dec_input_valid(int32_t k_idx, int16_t kw, int16_t in_length) +{ + if (k_idx < 0) { + rte_bbdev_log(ERR, "K index is invalid"); + return -1; + } + + if (in_length - kw < 0) { + rte_bbdev_log(ERR, + "Mismatch between input length (%u) and kw (%u)", + in_length, kw); + return -1; + } + + if (kw > RTE_BBDEV_MAX_KW) { + rte_bbdev_log(ERR, "Input length (%u) is too big, max: %d", + kw, RTE_BBDEV_MAX_KW); + return -1; + } + + return 0; +} + +static inline void +process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op, + uint8_t r, uint8_t c, uint16_t k, uint16_t ncb, + uint32_t e, struct rte_mbuf *m_in, struct rte_mbuf *m_out, + uint16_t in_offset, uint16_t out_offset, uint16_t total_left, + struct rte_bbdev_stats *q_stats) +{ + int ret; + int16_t k_idx; + uint16_t m; + uint8_t *in, *out0, *out1, *out2, *tmp_out, *rm_out; + uint64_t first_3_bytes = 0; + struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc; + struct bblib_crc_request crc_req; + struct bblib_crc_response crc_resp; + struct bblib_turbo_encoder_request turbo_req; + struct bblib_turbo_encoder_response turbo_resp; + struct bblib_rate_match_dl_request rm_req; + struct bblib_rate_match_dl_response rm_resp; +#ifdef RTE_BBDEV_OFFLOAD_COST + uint64_t start_time; +#else + RTE_SET_USED(q_stats); +#endif + + k_idx = compute_idx(k); + in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset); + + /* CRC24A (for TB) */ + if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH) && + (enc->code_block_mode == 1)) { + ret = is_enc_input_valid(k - 24, k_idx, total_left); + if (ret != 0) { + op->status |= 1 << RTE_BBDEV_DATA_ERROR; + return; + } + crc_req.data = in; + crc_req.len = k - 24; + /* Check if there is a room for CRC bits if not use + * the temporary buffer. + */ + if (rte_pktmbuf_append(m_in, 3) == NULL) { + rte_memcpy(q->enc_in, in, (k - 24) >> 3); + in = q->enc_in; + } else { + /* Store 3 first bytes of next CB as they will be + * overwritten by CRC bytes. If it is the last CB then + * there is no point to store 3 next bytes and this + * if..else branch will be omitted. + */ + first_3_bytes = *((uint64_t *)&in[(k - 32) >> 3]); + } + + crc_resp.data = in; +#ifdef RTE_BBDEV_OFFLOAD_COST + start_time = rte_rdtsc_precise(); +#endif + bblib_lte_crc24a_gen(&crc_req, &crc_resp); +#ifdef RTE_BBDEV_OFFLOAD_COST + q_stats->offload_time += rte_rdtsc_precise() - start_time; +#endif + } else if (enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) { + /* CRC24B */ + ret = is_enc_input_valid(k - 24, k_idx, total_left); + if (ret != 0) { + op->status |= 1 << RTE_BBDEV_DATA_ERROR; + return; + } + crc_req.data = in; + crc_req.len = k - 24; + /* Check if there is a room for CRC bits if this is the last + * CB in TB. If not use temporary buffer. + */ + if ((c - r == 1) && (rte_pktmbuf_append(m_in, 3) == NULL)) { + rte_memcpy(q->enc_in, in, (k - 24) >> 3); + in = q->enc_in; + } else if (c - r > 1) { + /* Store 3 first bytes of next CB as they will be + * overwritten by CRC bytes. If it is the last CB then + * there is no point to store 3 next bytes and this + * if..else branch will be omitted. + */ + first_3_bytes = *((uint64_t *)&in[(k - 32) >> 3]); + } + + crc_resp.data = in; +#ifdef RTE_BBDEV_OFFLOAD_COST + start_time = rte_rdtsc_precise(); +#endif + bblib_lte_crc24b_gen(&crc_req, &crc_resp); +#ifdef RTE_BBDEV_OFFLOAD_COST + q_stats->offload_time += rte_rdtsc_precise() - start_time; +#endif + } else { + ret = is_enc_input_valid(k, k_idx, total_left); + if (ret != 0) { + op->status |= 1 << RTE_BBDEV_DATA_ERROR; + return; + } + } + + /* Turbo encoder */ + + /* Each bit layer output from turbo encoder is (k+4) bits long, i.e. + * input length + 4 tail bits. That's (k/8) + 1 bytes after rounding up. + * So dst_data's length should be 3*(k/8) + 3 bytes. + * In Rate-matching bypass case outputs pointers passed to encoder + * (out0, out1 and out2) can directly point to addresses of output from + * turbo_enc entity. + */ + if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) { + out0 = q->enc_out; + out1 = RTE_PTR_ADD(out0, (k >> 3) + 1); + out2 = RTE_PTR_ADD(out1, (k >> 3) + 1); + } else { + out0 = (uint8_t *)rte_pktmbuf_append(m_out, (k >> 3) * 3 + 2); + if (out0 == NULL) { + op->status |= 1 << RTE_BBDEV_DATA_ERROR; + rte_bbdev_log(ERR, + "Too little space in output mbuf"); + return; + } + enc->output.length += (k >> 3) * 3 + 2; + /* rte_bbdev_op_data.offset can be different than the + * offset of the appended bytes + */ + out0 = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset); + out1 = rte_pktmbuf_mtod_offset(m_out, uint8_t *, + out_offset + (k >> 3) + 1); + out2 = rte_pktmbuf_mtod_offset(m_out, uint8_t *, + out_offset + 2 * ((k >> 3) + 1)); + } + + turbo_req.case_id = k_idx; + turbo_req.input_win = in; + turbo_req.length = k >> 3; + turbo_resp.output_win_0 = out0; + turbo_resp.output_win_1 = out1; + turbo_resp.output_win_2 = out2; + +#ifdef RTE_BBDEV_OFFLOAD_COST + start_time = rte_rdtsc_precise(); +#endif + + if (bblib_turbo_encoder(&turbo_req, &turbo_resp) != 0) { + op->status |= 1 << RTE_BBDEV_DRV_ERROR; + rte_bbdev_log(ERR, "Turbo Encoder failed"); + return; + } + +#ifdef RTE_BBDEV_OFFLOAD_COST + q_stats->offload_time += rte_rdtsc_precise() - start_time; +#endif + + /* Restore 3 first bytes of next CB if they were overwritten by CRC*/ + if (first_3_bytes != 0) + *((uint64_t *)&in[(k - 32) >> 3]) = first_3_bytes; + + /* Rate-matching */ + if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) { + uint8_t mask_id; + /* Integer round up division by 8 */ + uint16_t out_len = (e + 7) >> 3; + /* The mask array is indexed using E%8. E is an even number so + * there are only 4 possible values. + */ + const uint8_t mask_out[] = {0xFF, 0xC0, 0xF0, 0xFC}; + + /* get output data starting address */ + rm_out = (uint8_t *)rte_pktmbuf_append(m_out, out_len); + if (rm_out == NULL) { + op->status |= 1 << RTE_BBDEV_DATA_ERROR; + rte_bbdev_log(ERR, + "Too little space in output mbuf"); + return; + } + /* rte_bbdev_op_data.offset can be different than the offset + * of the appended bytes + */ + rm_out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset); + + /* index of current code block */ + rm_req.r = r; + /* total number of code block */ + rm_req.C = c; + /* For DL - 1, UL - 0 */ + rm_req.direction = 1; + /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nsoft, KMIMO + * and MDL_HARQ are used for Ncb calculation. As Ncb is already + * known we can adjust those parameters + */ + rm_req.Nsoft = ncb * rm_req.C; + rm_req.KMIMO = 1; + rm_req.MDL_HARQ = 1; + /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nl, Qm and G + * are used for E calculation. As E is already known we can + * adjust those parameters + */ + rm_req.NL = e; + rm_req.Qm = 1; + rm_req.G = rm_req.NL * rm_req.Qm * rm_req.C; + + rm_req.rvidx = enc->rv_index; + rm_req.Kidx = k_idx - 1; + rm_req.nLen = k + 4; + rm_req.tin0 = out0; + rm_req.tin1 = out1; + rm_req.tin2 = out2; + rm_resp.output = rm_out; + rm_resp.OutputLen = out_len; + if (enc->op_flags & RTE_BBDEV_TURBO_RV_INDEX_BYPASS) + rm_req.bypass_rvidx = 1; + else + rm_req.bypass_rvidx = 0; + +#ifdef RTE_BBDEV_OFFLOAD_COST + start_time = rte_rdtsc_precise(); +#endif + + if (bblib_rate_match_dl(&rm_req, &rm_resp) != 0) { + op->status |= 1 << RTE_BBDEV_DRV_ERROR; + rte_bbdev_log(ERR, "Rate matching failed"); + return; + } + + /* SW fills an entire last byte even if E%8 != 0. Clear the + * superfluous data bits for consistency with HW device. + */ + mask_id = (e & 7) >> 1; + rm_out[out_len - 1] &= mask_out[mask_id]; + +#ifdef RTE_BBDEV_OFFLOAD_COST + q_stats->offload_time += rte_rdtsc_precise() - start_time; +#endif + + enc->output.length += rm_resp.OutputLen; + } else { + /* Rate matching is bypassed */ + + /* Completing last byte of out0 (where 4 tail bits are stored) + * by moving first 4 bits from out1 + */ + tmp_out = (uint8_t *) --out1; + *tmp_out = *tmp_out | ((*(tmp_out + 1) & 0xF0) >> 4); + tmp_out++; + /* Shifting out1 data by 4 bits to the left */ + for (m = 0; m < k >> 3; ++m) { + uint8_t *first = tmp_out; + uint8_t second = *(tmp_out + 1); + *first = (*first << 4) | ((second & 0xF0) >> 4); + tmp_out++; + } + /* Shifting out2 data by 8 bits to the left */ + for (m = 0; m < (k >> 3) + 1; ++m) { + *tmp_out = *(tmp_out + 1); + tmp_out++; + } + *tmp_out = 0; + } +} + +static inline void +enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op, + struct rte_bbdev_stats *queue_stats) +{ + uint8_t c, r, crc24_bits = 0; + uint16_t k, ncb; + uint32_t e; + struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc; + uint16_t in_offset = enc->input.offset; + uint16_t out_offset = enc->output.offset; + struct rte_mbuf *m_in = enc->input.data; + struct rte_mbuf *m_out = enc->output.data; + uint16_t total_left = enc->input.length; + + /* Clear op status */ + op->status = 0; + + if (total_left > RTE_BBDEV_MAX_TB_SIZE >> 3) { + rte_bbdev_log(ERR, "TB size (%u) is too big, max: %d", + total_left, RTE_BBDEV_MAX_TB_SIZE); + op->status = 1 << RTE_BBDEV_DATA_ERROR; + return; + } + + if (m_in == NULL || m_out == NULL) { + rte_bbdev_log(ERR, "Invalid mbuf pointer"); + op->status = 1 << RTE_BBDEV_DATA_ERROR; + return; + } + + if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) || + (enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH)) + crc24_bits = 24; + + if (enc->code_block_mode == 0) { /* For Transport Block mode */ + c = enc->tb_params.c; + r = enc->tb_params.r; + } else {/* For Code Block mode */ + c = 1; + r = 0; + } + + while (total_left > 0 && r < c) { + if (enc->code_block_mode == 0) { + k = (r < enc->tb_params.c_neg) ? + enc->tb_params.k_neg : enc->tb_params.k_pos; + ncb = (r < enc->tb_params.c_neg) ? + enc->tb_params.ncb_neg : enc->tb_params.ncb_pos; + e = (r < enc->tb_params.cab) ? + enc->tb_params.ea : enc->tb_params.eb; + } else { + k = enc->cb_params.k; + ncb = enc->cb_params.ncb; + e = enc->cb_params.e; + } + + process_enc_cb(q, op, r, c, k, ncb, e, m_in, + m_out, in_offset, out_offset, total_left, + queue_stats); + /* Update total_left */ + total_left -= (k - crc24_bits) >> 3; + /* Update offsets for next CBs (if exist) */ + in_offset += (k - crc24_bits) >> 3; + if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) + out_offset += e >> 3; + else + out_offset += (k >> 3) * 3 + 2; + r++; + } + + /* check if all input data was processed */ + if (total_left != 0) { + op->status |= 1 << RTE_BBDEV_DATA_ERROR; + rte_bbdev_log(ERR, + "Mismatch between mbuf length and included CBs sizes"); + } +} + +static inline uint16_t +enqueue_enc_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_enc_op **ops, + uint16_t nb_ops, struct rte_bbdev_stats *queue_stats) +{ + uint16_t i; +#ifdef RTE_BBDEV_OFFLOAD_COST + queue_stats->offload_time = 0; +#endif + + for (i = 0; i < nb_ops; ++i) + enqueue_enc_one_op(q, ops[i], queue_stats); + + return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops, + NULL); +} + +/* Remove the padding bytes from a cyclic buffer. + * The input buffer is a data stream wk as described in 3GPP TS 36.212 section + * 5.1.4.1.2 starting from w0 and with length Ncb bytes. + * The output buffer is a data stream wk with pruned padding bytes. It's length + * is 3*D bytes and the order of non-padding bytes is preserved. + */ +static inline void +remove_nulls_from_circular_buf(const uint8_t *in, uint8_t *out, uint16_t k, + uint16_t ncb) +{ + uint32_t in_idx, out_idx, c_idx; + const uint32_t d = k + 4; + const uint32_t kw = (ncb / 3); + const uint32_t nd = kw - d; + const uint32_t r_subblock = kw / RTE_BBDEV_C_SUBBLOCK; + /* Inter-column permutation pattern */ + const uint32_t P[RTE_BBDEV_C_SUBBLOCK] = {0, 16, 8, 24, 4, 20, 12, 28, + 2, 18, 10, 26, 6, 22, 14, 30, 1, 17, 9, 25, 5, 21, 13, + 29, 3, 19, 11, 27, 7, 23, 15, 31}; + in_idx = 0; + out_idx = 0; + + /* The padding bytes are at the first Nd positions in the first row. */ + for (c_idx = 0; in_idx < kw; in_idx += r_subblock, ++c_idx) { + if (P[c_idx] < nd) { + rte_memcpy(&out[out_idx], &in[in_idx + 1], + r_subblock - 1); + out_idx += r_subblock - 1; + } else { + rte_memcpy(&out[out_idx], &in[in_idx], r_subblock); + out_idx += r_subblock; + } + } + + /* First and second parity bits sub-blocks are interlaced. */ + for (c_idx = 0; in_idx < ncb - 2 * r_subblock; + in_idx += 2 * r_subblock, ++c_idx) { + uint32_t second_block_c_idx = P[c_idx]; + uint32_t third_block_c_idx = P[c_idx] + 1; + + if (second_block_c_idx < nd && third_block_c_idx < nd) { + rte_memcpy(&out[out_idx], &in[in_idx + 2], + 2 * r_subblock - 2); + out_idx += 2 * r_subblock - 2; + } else if (second_block_c_idx >= nd && + third_block_c_idx >= nd) { + rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock); + out_idx += 2 * r_subblock; + } else if (second_block_c_idx < nd) { + out[out_idx++] = in[in_idx]; + rte_memcpy(&out[out_idx], &in[in_idx + 2], + 2 * r_subblock - 2); + out_idx += 2 * r_subblock - 2; + } else { + rte_memcpy(&out[out_idx], &in[in_idx + 1], + 2 * r_subblock - 1); + out_idx += 2 * r_subblock - 1; + } + } + + /* Last interlaced row is different - its last byte is the only padding + * byte. We can have from 4 up to 28 padding bytes (Nd) per sub-block. + * After interlacing the 1st and 2nd parity sub-blocks we can have 0, 1 + * or 2 padding bytes each time we make a step of 2 * R_SUBBLOCK bytes + * (moving to another column). 2nd parity sub-block uses the same + * inter-column permutation pattern as the systematic and 1st parity + * sub-blocks but it adds '1' to the resulting index and calculates the + * modulus of the result and Kw. Last column is mapped to itself (id 31) + * so the first byte taken from the 2nd parity sub-block will be the + * 32nd (31+1) byte, then 64th etc. (step is C_SUBBLOCK == 32) and the + * last byte will be the first byte from the sub-block: + * (32 + 32 * (R_SUBBLOCK-1)) % Kw == Kw % Kw == 0. Nd can't be smaller + * than 4 so we know that bytes with ids 0, 1, 2 and 3 must be the + * padding bytes. The bytes from the 1st parity sub-block are the bytes + * from the 31st column - Nd can't be greater than 28 so we are sure + * that there are no padding bytes in 31st column. + */ + rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock - 1); +} + +static inline void +move_padding_bytes(const uint8_t *in, uint8_t *out, uint16_t k, + uint16_t ncb) +{ + uint16_t d = k + 4; + uint16_t kpi = ncb / 3; + uint16_t nd = kpi - d; + + rte_memcpy(&out[nd], in, d); + rte_memcpy(&out[nd + kpi + 64], &in[kpi], d); + rte_memcpy(&out[(nd - 1) + 2 * (kpi + 64)], &in[2 * kpi], d); +} + +static inline void +process_dec_cb(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op, + uint8_t c, uint16_t k, uint16_t kw, struct rte_mbuf *m_in, + struct rte_mbuf *m_out, uint16_t in_offset, uint16_t out_offset, + bool check_crc_24b, uint16_t crc24_overlap, uint16_t total_left) +{ + int ret; + int32_t k_idx; + int32_t iter_cnt; + uint8_t *in, *out, *adapter_input; + int32_t ncb, ncb_without_null; + struct bblib_turbo_adapter_ul_response adapter_resp; + struct bblib_turbo_adapter_ul_request adapter_req; + struct bblib_turbo_decoder_request turbo_req; + struct bblib_turbo_decoder_response turbo_resp; + struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec; + + k_idx = compute_idx(k); + + ret = is_dec_input_valid(k_idx, kw, total_left); + if (ret != 0) { + op->status |= 1 << RTE_BBDEV_DATA_ERROR; + return; + } + + in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset); + ncb = kw; + ncb_without_null = (k + 4) * 3; + + if (check_bit(dec->op_flags, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE)) { + struct bblib_deinterleave_ul_request deint_req; + struct bblib_deinterleave_ul_response deint_resp; + + /* SW decoder accepts only a circular buffer without NULL bytes + * so the input needs to be converted. + */ + remove_nulls_from_circular_buf(in, q->deint_input, k, ncb); + + deint_req.pharqbuffer = q->deint_input; + deint_req.ncb = ncb_without_null; + deint_resp.pinteleavebuffer = q->deint_output; + bblib_deinterleave_ul(&deint_req, &deint_resp); + } else + move_padding_bytes(in, q->deint_output, k, ncb); + + adapter_input = q->deint_output; + + if (dec->op_flags & RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN) + adapter_req.isinverted = 1; + else if (dec->op_flags & RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN) + adapter_req.isinverted = 0; + else { + op->status |= 1 << RTE_BBDEV_DRV_ERROR; + rte_bbdev_log(ERR, "LLR format wasn't specified"); + return; + } + + adapter_req.ncb = ncb_without_null; + adapter_req.pinteleavebuffer = adapter_input; + adapter_resp.pharqout = q->adapter_output; + bblib_turbo_adapter_ul(&adapter_req, &adapter_resp); + + out = (uint8_t *)rte_pktmbuf_append(m_out, ((k - crc24_overlap) >> 3)); + if (out == NULL) { + op->status |= 1 << RTE_BBDEV_DATA_ERROR; + rte_bbdev_log(ERR, "Too little space in output mbuf"); + return; + } + /* rte_bbdev_op_data.offset can be different than the offset of the + * appended bytes + */ + out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset); + if (check_crc_24b) + turbo_req.c = c + 1; + else + turbo_req.c = c; + turbo_req.input = (int8_t *)q->adapter_output; + turbo_req.k = k; + turbo_req.k_idx = k_idx; + turbo_req.max_iter_num = dec->iter_max; + turbo_req.early_term_disable = !check_bit(dec->op_flags, + RTE_BBDEV_TURBO_EARLY_TERMINATION); + turbo_resp.ag_buf = q->ag; + turbo_resp.cb_buf = q->code_block; + turbo_resp.output = out; + iter_cnt = bblib_turbo_decoder(&turbo_req, &turbo_resp); + dec->hard_output.length += (k >> 3); + + if (iter_cnt > 0) { + /* Temporary solution for returned iter_count from SDK */ + iter_cnt = (iter_cnt - 1) / 2; + dec->iter_count = RTE_MAX(iter_cnt, dec->iter_count); + } else { + op->status |= 1 << RTE_BBDEV_DATA_ERROR; + rte_bbdev_log(ERR, "Turbo Decoder failed"); + return; + } +} + +static inline void +enqueue_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op) +{ + uint8_t c, r = 0; + uint16_t kw, k = 0; + uint16_t crc24_overlap = 0; + struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec; + struct rte_mbuf *m_in = dec->input.data; + struct rte_mbuf *m_out = dec->hard_output.data; + uint16_t in_offset = dec->input.offset; + uint16_t total_left = dec->input.length; + uint16_t out_offset = dec->hard_output.offset; + + /* Clear op status */ + op->status = 0; + + if (m_in == NULL || m_out == NULL) { + rte_bbdev_log(ERR, "Invalid mbuf pointer"); + op->status = 1 << RTE_BBDEV_DATA_ERROR; + return; + } + + if (dec->code_block_mode == 0) { /* For Transport Block mode */ + c = dec->tb_params.c; + } else { /* For Code Block mode */ + k = dec->cb_params.k; + c = 1; + } + + if ((c > 1) && !check_bit(dec->op_flags, + RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP)) + crc24_overlap = 24; + + while (total_left > 0) { + if (dec->code_block_mode == 0) + k = (r < dec->tb_params.c_neg) ? + dec->tb_params.k_neg : dec->tb_params.k_pos; + + /* Calculates circular buffer size (Kw). + * According to 3gpp 36.212 section 5.1.4.2 + * Kw = 3 * Kpi, + * where: + * Kpi = nCol * nRow + * where nCol is 32 and nRow can be calculated from: + * D =< nCol * nRow + * where D is the size of each output from turbo encoder block + * (k + 4). + */ + kw = RTE_ALIGN_CEIL(k + 4, RTE_BBDEV_C_SUBBLOCK) * 3; + + process_dec_cb(q, op, c, k, kw, m_in, m_out, in_offset, + out_offset, check_bit(dec->op_flags, + RTE_BBDEV_TURBO_CRC_TYPE_24B), crc24_overlap, + total_left); + /* To keep CRC24 attached to end of Code block, use + * RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP flag as it + * removed by default once verified. + */ + + /* Update total_left */ + total_left -= kw; + /* Update offsets for next CBs (if exist) */ + in_offset += kw; + out_offset += ((k - crc24_overlap) >> 3); + r++; + } + if (total_left != 0) { + op->status |= 1 << RTE_BBDEV_DATA_ERROR; + rte_bbdev_log(ERR, + "Mismatch between mbuf length and included Circular buffer sizes"); + } +} + +static inline uint16_t +enqueue_dec_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_dec_op **ops, + uint16_t nb_ops) +{ + uint16_t i; + + for (i = 0; i < nb_ops; ++i) + enqueue_dec_one_op(q, ops[i]); + + return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops, + NULL); +} + +/* Enqueue burst */ +static uint16_t +enqueue_enc_ops(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_enc_op **ops, uint16_t nb_ops) +{ + void *queue = q_data->queue_private; + struct turbo_sw_queue *q = queue; + uint16_t nb_enqueued = 0; + + nb_enqueued = enqueue_enc_all_ops(q, ops, nb_ops, &q_data->queue_stats); + + q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued; + q_data->queue_stats.enqueued_count += nb_enqueued; + + return nb_enqueued; +} + +/* Enqueue burst */ +static uint16_t +enqueue_dec_ops(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_dec_op **ops, uint16_t nb_ops) +{ + void *queue = q_data->queue_private; + struct turbo_sw_queue *q = queue; + uint16_t nb_enqueued = 0; + + nb_enqueued = enqueue_dec_all_ops(q, ops, nb_ops); + + q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued; + q_data->queue_stats.enqueued_count += nb_enqueued; + + return nb_enqueued; +} + +/* Dequeue decode burst */ +static uint16_t +dequeue_dec_ops(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_dec_op **ops, uint16_t nb_ops) +{ + struct turbo_sw_queue *q = q_data->queue_private; + uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts, + (void **)ops, nb_ops, NULL); + q_data->queue_stats.dequeued_count += nb_dequeued; + + return nb_dequeued; +} + +/* Dequeue encode burst */ +static uint16_t +dequeue_enc_ops(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_enc_op **ops, uint16_t nb_ops) +{ + struct turbo_sw_queue *q = q_data->queue_private; + uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts, + (void **)ops, nb_ops, NULL); + q_data->queue_stats.dequeued_count += nb_dequeued; + + return nb_dequeued; +} + +/* Parse 16bit integer from string argument */ +static inline int +parse_u16_arg(const char *key, const char *value, void *extra_args) +{ + uint16_t *u16 = extra_args; + unsigned int long result; + + if ((value == NULL) || (extra_args == NULL)) + return -EINVAL; + errno = 0; + result = strtoul(value, NULL, 0); + if ((result >= (1 << 16)) || (errno != 0)) { + rte_bbdev_log(ERR, "Invalid value %lu for %s", result, key); + return -ERANGE; + } + *u16 = (uint16_t)result; + return 0; +} + +/* Parse parameters used to create device */ +static int +parse_turbo_sw_params(struct turbo_sw_params *params, const char *input_args) +{ + struct rte_kvargs *kvlist = NULL; + int ret = 0; + + if (params == NULL) + return -EINVAL; + if (input_args) { + kvlist = rte_kvargs_parse(input_args, turbo_sw_valid_params); + if (kvlist == NULL) + return -EFAULT; + + ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[0], + &parse_u16_arg, ¶ms->queues_num); + if (ret < 0) + goto exit; + + ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[1], + &parse_u16_arg, ¶ms->socket_id); + if (ret < 0) + goto exit; + + if (params->socket_id >= RTE_MAX_NUMA_NODES) { + rte_bbdev_log(ERR, "Invalid socket, must be < %u", + RTE_MAX_NUMA_NODES); + goto exit; + } + } + +exit: + if (kvlist) + rte_kvargs_free(kvlist); + return ret; +} + +/* Create device */ +static int +turbo_sw_bbdev_create(struct rte_vdev_device *vdev, + struct turbo_sw_params *init_params) +{ + struct rte_bbdev *bbdev; + const char *name = rte_vdev_device_name(vdev); + + bbdev = rte_bbdev_allocate(name); + if (bbdev == NULL) + return -ENODEV; + + bbdev->data->dev_private = rte_zmalloc_socket(name, + sizeof(struct bbdev_private), RTE_CACHE_LINE_SIZE, + init_params->socket_id); + if (bbdev->data->dev_private == NULL) { + rte_bbdev_release(bbdev); + return -ENOMEM; + } + + bbdev->dev_ops = &pmd_ops; + bbdev->device = &vdev->device; + bbdev->data->socket_id = init_params->socket_id; + bbdev->intr_handle = NULL; + + /* register rx/tx burst functions for data path */ + bbdev->dequeue_enc_ops = dequeue_enc_ops; + bbdev->dequeue_dec_ops = dequeue_dec_ops; + bbdev->enqueue_enc_ops = enqueue_enc_ops; + bbdev->enqueue_dec_ops = enqueue_dec_ops; + ((struct bbdev_private *) bbdev->data->dev_private)->max_nb_queues = + init_params->queues_num; + + return 0; +} + +/* Initialise device */ +static int +turbo_sw_bbdev_probe(struct rte_vdev_device *vdev) +{ + struct turbo_sw_params init_params = { + rte_socket_id(), + RTE_BBDEV_DEFAULT_MAX_NB_QUEUES + }; + const char *name; + const char *input_args; + + if (vdev == NULL) + return -EINVAL; + + name = rte_vdev_device_name(vdev); + if (name == NULL) + return -EINVAL; + input_args = rte_vdev_device_args(vdev); + parse_turbo_sw_params(&init_params, input_args); + + rte_bbdev_log_debug( + "Initialising %s on NUMA node %d with max queues: %d\n", + name, init_params.socket_id, init_params.queues_num); + + return turbo_sw_bbdev_create(vdev, &init_params); +} + +/* Uninitialise device */ +static int +turbo_sw_bbdev_remove(struct rte_vdev_device *vdev) +{ + struct rte_bbdev *bbdev; + const char *name; + + if (vdev == NULL) + return -EINVAL; + + name = rte_vdev_device_name(vdev); + if (name == NULL) + return -EINVAL; + + bbdev = rte_bbdev_get_named_dev(name); + if (bbdev == NULL) + return -EINVAL; + + rte_free(bbdev->data->dev_private); + + return rte_bbdev_release(bbdev); +} + +static struct rte_vdev_driver bbdev_turbo_sw_pmd_drv = { + .probe = turbo_sw_bbdev_probe, + .remove = turbo_sw_bbdev_remove +}; + +RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_turbo_sw_pmd_drv); +RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME, + TURBO_SW_MAX_NB_QUEUES_ARG"= " + TURBO_SW_SOCKET_ID_ARG"="); +RTE_PMD_REGISTER_ALIAS(DRIVER_NAME, turbo_sw); + +RTE_INIT(turbo_sw_bbdev_init_log) +{ + bbdev_turbo_sw_logtype = rte_log_register("pmd.bb.turbo_sw"); + if (bbdev_turbo_sw_logtype >= 0) + rte_log_set_level(bbdev_turbo_sw_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/baseband/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map b/drivers/baseband/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map new file mode 100644 index 00000000..58b94270 --- /dev/null +++ b/drivers/baseband/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map @@ -0,0 +1,3 @@ +DPDK_18.02 { + local: *; +}; diff --git a/drivers/bbdev/Makefile b/drivers/bbdev/Makefile deleted file mode 100644 index 4ec83b0a..00000000 --- a/drivers/bbdev/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2017 Intel Corporation - -include $(RTE_SDK)/mk/rte.vars.mk - -core-libs := librte_eal librte_mbuf librte_mempool librte_ring -core-libs += librte_bbdev librte_kvargs librte_cfgfile - -DIRS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL) += null -DEPDIRS-null = $(core-libs) -DIRS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW) += turbo_sw -DEPDIRS-turbo_sw = $(core-libs) - -include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/drivers/bbdev/null/Makefile b/drivers/bbdev/null/Makefile deleted file mode 100644 index f885a97b..00000000 --- a/drivers/bbdev/null/Makefile +++ /dev/null @@ -1,25 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2017 Intel Corporation - -include $(RTE_SDK)/mk/rte.vars.mk -# library name -LIB = librte_pmd_bbdev_null.a - -# build flags -CFLAGS += -DALLOW_EXPERIMENTAL_API -CFLAGS += -O3 -CFLAGS += $(WERROR_FLAGS) -LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -lrte_kvargs -LDLIBS += -lrte_bbdev -LDLIBS += -lrte_bus_vdev - -# versioning export map -EXPORT_MAP := rte_pmd_bbdev_null_version.map - -# library version -LIBABIVER := 1 - -# library source files -SRCS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL) += bbdev_null.c - -include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/bbdev/null/bbdev_null.c b/drivers/bbdev/null/bbdev_null.c deleted file mode 100644 index 6bc84917..00000000 --- a/drivers/bbdev/null/bbdev_null.c +++ /dev/null @@ -1,356 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017 Intel Corporation - */ - -#include - -#include -#include -#include -#include -#include - -#include -#include - -#define DRIVER_NAME bbdev_null - -/* NULL BBDev logging ID */ -static int bbdev_null_logtype; - -/* Helper macro for logging */ -#define rte_bbdev_log(level, fmt, ...) \ - rte_log(RTE_LOG_ ## level, bbdev_null_logtype, fmt "\n", ##__VA_ARGS__) - -#define rte_bbdev_log_debug(fmt, ...) \ - rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \ - ##__VA_ARGS__) - -/* Initialisation params structure that can be used by null BBDEV driver */ -struct bbdev_null_params { - int socket_id; /*< Null BBDEV socket */ - uint16_t queues_num; /*< Null BBDEV queues number */ -}; - -/* Accecptable params for null BBDEV devices */ -#define BBDEV_NULL_MAX_NB_QUEUES_ARG "max_nb_queues" -#define BBDEV_NULL_SOCKET_ID_ARG "socket_id" - -static const char * const bbdev_null_valid_params[] = { - BBDEV_NULL_MAX_NB_QUEUES_ARG, - BBDEV_NULL_SOCKET_ID_ARG -}; - -/* private data structure */ -struct bbdev_private { - unsigned int max_nb_queues; /**< Max number of queues */ -}; - -/* queue */ -struct bbdev_queue { - struct rte_ring *processed_pkts; /* Ring for processed packets */ -} __rte_cache_aligned; - -/* Get device info */ -static void -info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info) -{ - struct bbdev_private *internals = dev->data->dev_private; - - static const struct rte_bbdev_op_cap bbdev_capabilities[] = { - RTE_BBDEV_END_OF_CAPABILITIES_LIST(), - }; - - static struct rte_bbdev_queue_conf default_queue_conf = { - .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT, - }; - - default_queue_conf.socket = dev->data->socket_id; - - dev_info->driver_name = RTE_STR(DRIVER_NAME); - dev_info->max_num_queues = internals->max_nb_queues; - dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT; - dev_info->hardware_accelerated = false; - dev_info->max_queue_priority = 0; - dev_info->default_queue_conf = default_queue_conf; - dev_info->capabilities = bbdev_capabilities; - dev_info->cpu_flag_reqs = NULL; - dev_info->min_alignment = 0; - - rte_bbdev_log_debug("got device info from %u", dev->data->dev_id); -} - -/* Release queue */ -static int -q_release(struct rte_bbdev *dev, uint16_t q_id) -{ - struct bbdev_queue *q = dev->data->queues[q_id].queue_private; - - if (q != NULL) { - rte_ring_free(q->processed_pkts); - rte_free(q); - dev->data->queues[q_id].queue_private = NULL; - } - - rte_bbdev_log_debug("released device queue %u:%u", - dev->data->dev_id, q_id); - return 0; -} - -/* Setup a queue */ -static int -q_setup(struct rte_bbdev *dev, uint16_t q_id, - const struct rte_bbdev_queue_conf *queue_conf) -{ - struct bbdev_queue *q; - char ring_name[RTE_RING_NAMESIZE]; - snprintf(ring_name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME) "%u:%u", - dev->data->dev_id, q_id); - - /* Allocate the queue data structure. */ - q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q), - RTE_CACHE_LINE_SIZE, queue_conf->socket); - if (q == NULL) { - rte_bbdev_log(ERR, "Failed to allocate queue memory"); - return -ENOMEM; - } - - q->processed_pkts = rte_ring_create(ring_name, queue_conf->queue_size, - queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ); - if (q->processed_pkts == NULL) { - rte_bbdev_log(ERR, "Failed to create ring"); - goto free_q; - } - - dev->data->queues[q_id].queue_private = q; - rte_bbdev_log_debug("setup device queue %s", ring_name); - return 0; - -free_q: - rte_free(q); - return -EFAULT; -} - -static const struct rte_bbdev_ops pmd_ops = { - .info_get = info_get, - .queue_setup = q_setup, - .queue_release = q_release -}; - -/* Enqueue decode burst */ -static uint16_t -enqueue_dec_ops(struct rte_bbdev_queue_data *q_data, - struct rte_bbdev_dec_op **ops, uint16_t nb_ops) -{ - struct bbdev_queue *q = q_data->queue_private; - uint16_t nb_enqueued = rte_ring_enqueue_burst(q->processed_pkts, - (void **)ops, nb_ops, NULL); - - q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued; - q_data->queue_stats.enqueued_count += nb_enqueued; - - return nb_enqueued; -} - -/* Enqueue encode burst */ -static uint16_t -enqueue_enc_ops(struct rte_bbdev_queue_data *q_data, - struct rte_bbdev_enc_op **ops, uint16_t nb_ops) -{ - struct bbdev_queue *q = q_data->queue_private; - uint16_t nb_enqueued = rte_ring_enqueue_burst(q->processed_pkts, - (void **)ops, nb_ops, NULL); - - q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued; - q_data->queue_stats.enqueued_count += nb_enqueued; - - return nb_enqueued; -} - -/* Dequeue decode burst */ -static uint16_t -dequeue_dec_ops(struct rte_bbdev_queue_data *q_data, - struct rte_bbdev_dec_op **ops, uint16_t nb_ops) -{ - struct bbdev_queue *q = q_data->queue_private; - uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts, - (void **)ops, nb_ops, NULL); - q_data->queue_stats.dequeued_count += nb_dequeued; - - return nb_dequeued; -} - -/* Dequeue encode burst */ -static uint16_t -dequeue_enc_ops(struct rte_bbdev_queue_data *q_data, - struct rte_bbdev_enc_op **ops, uint16_t nb_ops) -{ - struct bbdev_queue *q = q_data->queue_private; - uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts, - (void **)ops, nb_ops, NULL); - q_data->queue_stats.dequeued_count += nb_dequeued; - - return nb_dequeued; -} - -/* Parse 16bit integer from string argument */ -static inline int -parse_u16_arg(const char *key, const char *value, void *extra_args) -{ - uint16_t *u16 = extra_args; - unsigned int long result; - - if ((value == NULL) || (extra_args == NULL)) - return -EINVAL; - errno = 0; - result = strtoul(value, NULL, 0); - if ((result >= (1 << 16)) || (errno != 0)) { - rte_bbdev_log(ERR, "Invalid value %lu for %s", result, key); - return -ERANGE; - } - *u16 = (uint16_t)result; - return 0; -} - -/* Parse parameters used to create device */ -static int -parse_bbdev_null_params(struct bbdev_null_params *params, - const char *input_args) -{ - struct rte_kvargs *kvlist = NULL; - int ret = 0; - - if (params == NULL) - return -EINVAL; - if (input_args) { - kvlist = rte_kvargs_parse(input_args, bbdev_null_valid_params); - if (kvlist == NULL) - return -EFAULT; - - ret = rte_kvargs_process(kvlist, bbdev_null_valid_params[0], - &parse_u16_arg, ¶ms->queues_num); - if (ret < 0) - goto exit; - - ret = rte_kvargs_process(kvlist, bbdev_null_valid_params[1], - &parse_u16_arg, ¶ms->socket_id); - if (ret < 0) - goto exit; - - if (params->socket_id >= RTE_MAX_NUMA_NODES) { - rte_bbdev_log(ERR, "Invalid socket, must be < %u", - RTE_MAX_NUMA_NODES); - goto exit; - } - } - -exit: - if (kvlist) - rte_kvargs_free(kvlist); - return ret; -} - -/* Create device */ -static int -null_bbdev_create(struct rte_vdev_device *vdev, - struct bbdev_null_params *init_params) -{ - struct rte_bbdev *bbdev; - const char *name = rte_vdev_device_name(vdev); - - bbdev = rte_bbdev_allocate(name); - if (bbdev == NULL) - return -ENODEV; - - bbdev->data->dev_private = rte_zmalloc_socket(name, - sizeof(struct bbdev_private), RTE_CACHE_LINE_SIZE, - init_params->socket_id); - if (bbdev->data->dev_private == NULL) { - rte_bbdev_release(bbdev); - return -ENOMEM; - } - - bbdev->dev_ops = &pmd_ops; - bbdev->device = &vdev->device; - bbdev->data->socket_id = init_params->socket_id; - bbdev->intr_handle = NULL; - - /* register rx/tx burst functions for data path */ - bbdev->dequeue_enc_ops = dequeue_enc_ops; - bbdev->dequeue_dec_ops = dequeue_dec_ops; - bbdev->enqueue_enc_ops = enqueue_enc_ops; - bbdev->enqueue_dec_ops = enqueue_dec_ops; - ((struct bbdev_private *) bbdev->data->dev_private)->max_nb_queues = - init_params->queues_num; - - return 0; -} - -/* Initialise device */ -static int -null_bbdev_probe(struct rte_vdev_device *vdev) -{ - struct bbdev_null_params init_params = { - rte_socket_id(), - RTE_BBDEV_DEFAULT_MAX_NB_QUEUES - }; - const char *name; - const char *input_args; - - if (vdev == NULL) - return -EINVAL; - - name = rte_vdev_device_name(vdev); - if (name == NULL) - return -EINVAL; - - input_args = rte_vdev_device_args(vdev); - parse_bbdev_null_params(&init_params, input_args); - - rte_bbdev_log_debug("Init %s on NUMA node %d with max queues: %d", - name, init_params.socket_id, init_params.queues_num); - - return null_bbdev_create(vdev, &init_params); -} - -/* Uninitialise device */ -static int -null_bbdev_remove(struct rte_vdev_device *vdev) -{ - struct rte_bbdev *bbdev; - const char *name; - - if (vdev == NULL) - return -EINVAL; - - name = rte_vdev_device_name(vdev); - if (name == NULL) - return -EINVAL; - - bbdev = rte_bbdev_get_named_dev(name); - if (bbdev == NULL) - return -EINVAL; - - rte_free(bbdev->data->dev_private); - - return rte_bbdev_release(bbdev); -} - -static struct rte_vdev_driver bbdev_null_pmd_drv = { - .probe = null_bbdev_probe, - .remove = null_bbdev_remove -}; - -RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_null_pmd_drv); -RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME, - BBDEV_NULL_MAX_NB_QUEUES_ARG"= " - BBDEV_NULL_SOCKET_ID_ARG"="); - -RTE_INIT(null_bbdev_init_log); -static void -null_bbdev_init_log(void) -{ - bbdev_null_logtype = rte_log_register("pmd.bb.null"); - if (bbdev_null_logtype >= 0) - rte_log_set_level(bbdev_null_logtype, RTE_LOG_NOTICE); -} diff --git a/drivers/bbdev/null/rte_pmd_bbdev_null_version.map b/drivers/bbdev/null/rte_pmd_bbdev_null_version.map deleted file mode 100644 index 58b94270..00000000 --- a/drivers/bbdev/null/rte_pmd_bbdev_null_version.map +++ /dev/null @@ -1,3 +0,0 @@ -DPDK_18.02 { - local: *; -}; diff --git a/drivers/bbdev/turbo_sw/Makefile b/drivers/bbdev/turbo_sw/Makefile deleted file mode 100644 index 79eb5547..00000000 --- a/drivers/bbdev/turbo_sw/Makefile +++ /dev/null @@ -1,42 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2017 Intel Corporation - -include $(RTE_SDK)/mk/rte.vars.mk - -ifeq ($(FLEXRAN_SDK),) -$(error "Please define FLEXRAN_SDK environment variable") -endif - -# library name -LIB = librte_pmd_bbdev_turbo_sw.a - -# build flags -CFLAGS += -DALLOW_EXPERIMENTAL_API -CFLAGS += -O3 -CFLAGS += $(WERROR_FLAGS) -LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -lrte_kvargs -LDLIBS += -lrte_bbdev -LDLIBS += -lrte_bus_vdev - -# versioning export map -EXPORT_MAP := rte_pmd_bbdev_turbo_sw_version.map - -# external library dependencies -CFLAGS += -I$(FLEXRAN_SDK)/lib_common -CFLAGS += -I$(FLEXRAN_SDK)/lib_turbo -CFLAGS += -I$(FLEXRAN_SDK)/lib_crc -CFLAGS += -I$(FLEXRAN_SDK)/lib_rate_matching - -LDLIBS += -L$(FLEXRAN_SDK)/lib_crc -lcrc -LDLIBS += -L$(FLEXRAN_SDK)/lib_turbo -lturbo -LDLIBS += -L$(FLEXRAN_SDK)/lib_rate_matching -lrate_matching -LDLIBS += -L$(FLEXRAN_SDK)/lib_common -lcommon -LDLIBS += -lstdc++ -lirc -limf -lipps - -# library version -LIBABIVER := 1 - -# library source files -SRCS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW) += bbdev_turbo_software.c - -include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/bbdev/turbo_sw/bbdev_turbo_software.c b/drivers/bbdev/turbo_sw/bbdev_turbo_software.c deleted file mode 100644 index 302abf5c..00000000 --- a/drivers/bbdev/turbo_sw/bbdev_turbo_software.c +++ /dev/null @@ -1,1217 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017 Intel Corporation - */ - -#include - -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include - -#define DRIVER_NAME turbo_sw - -/* Turbo SW PMD logging ID */ -static int bbdev_turbo_sw_logtype; - -/* Helper macro for logging */ -#define rte_bbdev_log(level, fmt, ...) \ - rte_log(RTE_LOG_ ## level, bbdev_turbo_sw_logtype, fmt "\n", \ - ##__VA_ARGS__) - -#define rte_bbdev_log_debug(fmt, ...) \ - rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \ - ##__VA_ARGS__) - -/* Number of columns in sub-block interleaver (36.212, section 5.1.4.1.1) */ -#define C_SUBBLOCK (32) -#define MAX_TB_SIZE (391656) -#define MAX_CB_SIZE (6144) -#define MAX_KW (18528) - -/* private data structure */ -struct bbdev_private { - unsigned int max_nb_queues; /**< Max number of queues */ -}; - -/* Initialisation params structure that can be used by Turbo SW driver */ -struct turbo_sw_params { - int socket_id; /*< Turbo SW device socket */ - uint16_t queues_num; /*< Turbo SW device queues number */ -}; - -/* Accecptable params for Turbo SW devices */ -#define TURBO_SW_MAX_NB_QUEUES_ARG "max_nb_queues" -#define TURBO_SW_SOCKET_ID_ARG "socket_id" - -static const char * const turbo_sw_valid_params[] = { - TURBO_SW_MAX_NB_QUEUES_ARG, - TURBO_SW_SOCKET_ID_ARG -}; - -/* queue */ -struct turbo_sw_queue { - /* Ring for processed (encoded/decoded) operations which are ready to - * be dequeued. - */ - struct rte_ring *processed_pkts; - /* Stores input for turbo encoder (used when CRC attachment is - * performed - */ - uint8_t *enc_in; - /* Stores output from turbo encoder */ - uint8_t *enc_out; - /* Alpha gamma buf for bblib_turbo_decoder() function */ - int8_t *ag; - /* Temp buf for bblib_turbo_decoder() function */ - uint16_t *code_block; - /* Input buf for bblib_rate_dematching_lte() function */ - uint8_t *deint_input; - /* Output buf for bblib_rate_dematching_lte() function */ - uint8_t *deint_output; - /* Output buf for bblib_turbodec_adapter_lte() function */ - uint8_t *adapter_output; - /* Operation type of this queue */ - enum rte_bbdev_op_type type; -} __rte_cache_aligned; - -/* Calculate index based on Table 5.1.3-3 from TS34.212 */ -static inline int32_t -compute_idx(uint16_t k) -{ - int32_t result = 0; - - if (k < 40 || k > MAX_CB_SIZE) - return -1; - - if (k > 2048) { - if ((k - 2048) % 64 != 0) - result = -1; - - result = 124 + (k - 2048) / 64; - } else if (k <= 512) { - if ((k - 40) % 8 != 0) - result = -1; - - result = (k - 40) / 8 + 1; - } else if (k <= 1024) { - if ((k - 512) % 16 != 0) - result = -1; - - result = 60 + (k - 512) / 16; - } else { /* 1024 < k <= 2048 */ - if ((k - 1024) % 32 != 0) - result = -1; - - result = 92 + (k - 1024) / 32; - } - - return result; -} - -/* Read flag value 0/1 from bitmap */ -static inline bool -check_bit(uint32_t bitmap, uint32_t bitmask) -{ - return bitmap & bitmask; -} - -/* Get device info */ -static void -info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info) -{ - struct bbdev_private *internals = dev->data->dev_private; - - static const struct rte_bbdev_op_cap bbdev_capabilities[] = { - { - .type = RTE_BBDEV_OP_TURBO_DEC, - .cap.turbo_dec = { - .capability_flags = - RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE | - RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN | - RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN | - RTE_BBDEV_TURBO_CRC_TYPE_24B | - RTE_BBDEV_TURBO_EARLY_TERMINATION, - .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS, - .num_buffers_hard_out = - RTE_BBDEV_MAX_CODE_BLOCKS, - .num_buffers_soft_out = 0, - } - }, - { - .type = RTE_BBDEV_OP_TURBO_ENC, - .cap.turbo_enc = { - .capability_flags = - RTE_BBDEV_TURBO_CRC_24B_ATTACH | - RTE_BBDEV_TURBO_CRC_24A_ATTACH | - RTE_BBDEV_TURBO_RATE_MATCH | - RTE_BBDEV_TURBO_RV_INDEX_BYPASS, - .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS, - .num_buffers_dst = RTE_BBDEV_MAX_CODE_BLOCKS, - } - }, - RTE_BBDEV_END_OF_CAPABILITIES_LIST() - }; - - static struct rte_bbdev_queue_conf default_queue_conf = { - .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT, - }; - - static const enum rte_cpu_flag_t cpu_flag = RTE_CPUFLAG_SSE4_2; - - default_queue_conf.socket = dev->data->socket_id; - - dev_info->driver_name = RTE_STR(DRIVER_NAME); - dev_info->max_num_queues = internals->max_nb_queues; - dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT; - dev_info->hardware_accelerated = false; - dev_info->max_queue_priority = 0; - dev_info->default_queue_conf = default_queue_conf; - dev_info->capabilities = bbdev_capabilities; - dev_info->cpu_flag_reqs = &cpu_flag; - dev_info->min_alignment = 64; - - rte_bbdev_log_debug("got device info from %u\n", dev->data->dev_id); -} - -/* Release queue */ -static int -q_release(struct rte_bbdev *dev, uint16_t q_id) -{ - struct turbo_sw_queue *q = dev->data->queues[q_id].queue_private; - - if (q != NULL) { - rte_ring_free(q->processed_pkts); - rte_free(q->enc_out); - rte_free(q->enc_in); - rte_free(q->ag); - rte_free(q->code_block); - rte_free(q->deint_input); - rte_free(q->deint_output); - rte_free(q->adapter_output); - rte_free(q); - dev->data->queues[q_id].queue_private = NULL; - } - - rte_bbdev_log_debug("released device queue %u:%u", - dev->data->dev_id, q_id); - return 0; -} - -/* Setup a queue */ -static int -q_setup(struct rte_bbdev *dev, uint16_t q_id, - const struct rte_bbdev_queue_conf *queue_conf) -{ - int ret; - struct turbo_sw_queue *q; - char name[RTE_RING_NAMESIZE]; - - /* Allocate the queue data structure. */ - q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q), - RTE_CACHE_LINE_SIZE, queue_conf->socket); - if (q == NULL) { - rte_bbdev_log(ERR, "Failed to allocate queue memory"); - return -ENOMEM; - } - - /* Allocate memory for encoder output. */ - ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_enc_out%u:%u", - dev->data->dev_id, q_id); - if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { - rte_bbdev_log(ERR, - "Creating queue name for device %u queue %u failed", - dev->data->dev_id, q_id); - return -ENAMETOOLONG; - } - q->enc_out = rte_zmalloc_socket(name, - ((MAX_TB_SIZE >> 3) + 3) * sizeof(*q->enc_out) * 3, - RTE_CACHE_LINE_SIZE, queue_conf->socket); - if (q->enc_out == NULL) { - rte_bbdev_log(ERR, - "Failed to allocate queue memory for %s", name); - goto free_q; - } - - /* Allocate memory for rate matching output. */ - ret = snprintf(name, RTE_RING_NAMESIZE, - RTE_STR(DRIVER_NAME)"_enc_in%u:%u", dev->data->dev_id, - q_id); - if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { - rte_bbdev_log(ERR, - "Creating queue name for device %u queue %u failed", - dev->data->dev_id, q_id); - return -ENAMETOOLONG; - } - q->enc_in = rte_zmalloc_socket(name, - (MAX_CB_SIZE >> 3) * sizeof(*q->enc_in), - RTE_CACHE_LINE_SIZE, queue_conf->socket); - if (q->enc_in == NULL) { - rte_bbdev_log(ERR, - "Failed to allocate queue memory for %s", name); - goto free_q; - } - - /* Allocate memory for Aplha Gamma temp buffer. */ - ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_ag%u:%u", - dev->data->dev_id, q_id); - if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { - rte_bbdev_log(ERR, - "Creating queue name for device %u queue %u failed", - dev->data->dev_id, q_id); - return -ENAMETOOLONG; - } - q->ag = rte_zmalloc_socket(name, - MAX_CB_SIZE * 10 * sizeof(*q->ag), - RTE_CACHE_LINE_SIZE, queue_conf->socket); - if (q->ag == NULL) { - rte_bbdev_log(ERR, - "Failed to allocate queue memory for %s", name); - goto free_q; - } - - /* Allocate memory for code block temp buffer. */ - ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_cb%u:%u", - dev->data->dev_id, q_id); - if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { - rte_bbdev_log(ERR, - "Creating queue name for device %u queue %u failed", - dev->data->dev_id, q_id); - return -ENAMETOOLONG; - } - q->code_block = rte_zmalloc_socket(name, - (6144 >> 3) * sizeof(*q->code_block), - RTE_CACHE_LINE_SIZE, queue_conf->socket); - if (q->code_block == NULL) { - rte_bbdev_log(ERR, - "Failed to allocate queue memory for %s", name); - goto free_q; - } - - /* Allocate memory for Deinterleaver input. */ - ret = snprintf(name, RTE_RING_NAMESIZE, - RTE_STR(DRIVER_NAME)"_deint_input%u:%u", - dev->data->dev_id, q_id); - if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { - rte_bbdev_log(ERR, - "Creating queue name for device %u queue %u failed", - dev->data->dev_id, q_id); - return -ENAMETOOLONG; - } - q->deint_input = rte_zmalloc_socket(name, - MAX_KW * sizeof(*q->deint_input), - RTE_CACHE_LINE_SIZE, queue_conf->socket); - if (q->deint_input == NULL) { - rte_bbdev_log(ERR, - "Failed to allocate queue memory for %s", name); - goto free_q; - } - - /* Allocate memory for Deinterleaver output. */ - ret = snprintf(name, RTE_RING_NAMESIZE, - RTE_STR(DRIVER_NAME)"_deint_output%u:%u", - dev->data->dev_id, q_id); - if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { - rte_bbdev_log(ERR, - "Creating queue name for device %u queue %u failed", - dev->data->dev_id, q_id); - return -ENAMETOOLONG; - } - q->deint_output = rte_zmalloc_socket(NULL, - MAX_KW * sizeof(*q->deint_output), - RTE_CACHE_LINE_SIZE, queue_conf->socket); - if (q->deint_output == NULL) { - rte_bbdev_log(ERR, - "Failed to allocate queue memory for %s", name); - goto free_q; - } - - /* Allocate memory for Adapter output. */ - ret = snprintf(name, RTE_RING_NAMESIZE, - RTE_STR(DRIVER_NAME)"_adapter_output%u:%u", - dev->data->dev_id, q_id); - if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { - rte_bbdev_log(ERR, - "Creating queue name for device %u queue %u failed", - dev->data->dev_id, q_id); - return -ENAMETOOLONG; - } - q->adapter_output = rte_zmalloc_socket(NULL, - MAX_CB_SIZE * 6 * sizeof(*q->adapter_output), - RTE_CACHE_LINE_SIZE, queue_conf->socket); - if (q->adapter_output == NULL) { - rte_bbdev_log(ERR, - "Failed to allocate queue memory for %s", name); - goto free_q; - } - - /* Create ring for packets awaiting to be dequeued. */ - ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"%u:%u", - dev->data->dev_id, q_id); - if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { - rte_bbdev_log(ERR, - "Creating queue name for device %u queue %u failed", - dev->data->dev_id, q_id); - return -ENAMETOOLONG; - } - q->processed_pkts = rte_ring_create(name, queue_conf->queue_size, - queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ); - if (q->processed_pkts == NULL) { - rte_bbdev_log(ERR, "Failed to create ring for %s", name); - goto free_q; - } - - q->type = queue_conf->op_type; - - dev->data->queues[q_id].queue_private = q; - rte_bbdev_log_debug("setup device queue %s", name); - return 0; - -free_q: - rte_ring_free(q->processed_pkts); - rte_free(q->enc_out); - rte_free(q->enc_in); - rte_free(q->ag); - rte_free(q->code_block); - rte_free(q->deint_input); - rte_free(q->deint_output); - rte_free(q->adapter_output); - rte_free(q); - return -EFAULT; -} - -static const struct rte_bbdev_ops pmd_ops = { - .info_get = info_get, - .queue_setup = q_setup, - .queue_release = q_release -}; - -/* Checks if the encoder input buffer is correct. - * Returns 0 if it's valid, -1 otherwise. - */ -static inline int -is_enc_input_valid(const uint16_t k, const int32_t k_idx, - const uint16_t in_length) -{ - if (k_idx < 0) { - rte_bbdev_log(ERR, "K Index is invalid"); - return -1; - } - - if (in_length - (k >> 3) < 0) { - rte_bbdev_log(ERR, - "Mismatch between input length (%u bytes) and K (%u bits)", - in_length, k); - return -1; - } - - if (k > MAX_CB_SIZE) { - rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d", - k, MAX_CB_SIZE); - return -1; - } - - return 0; -} - -/* Checks if the decoder input buffer is correct. - * Returns 0 if it's valid, -1 otherwise. - */ -static inline int -is_dec_input_valid(int32_t k_idx, int16_t kw, int16_t in_length) -{ - if (k_idx < 0) { - rte_bbdev_log(ERR, "K index is invalid"); - return -1; - } - - if (in_length - kw < 0) { - rte_bbdev_log(ERR, - "Mismatch between input length (%u) and kw (%u)", - in_length, kw); - return -1; - } - - if (kw > MAX_KW) { - rte_bbdev_log(ERR, "Input length (%u) is too big, max: %d", - kw, MAX_KW); - return -1; - } - - return 0; -} - -static inline void -process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op, - uint8_t cb_idx, uint8_t c, uint16_t k, uint16_t ncb, - uint32_t e, struct rte_mbuf *m_in, struct rte_mbuf *m_out, - uint16_t in_offset, uint16_t out_offset, uint16_t total_left) -{ - int ret; - int16_t k_idx; - uint16_t m; - uint8_t *in, *out0, *out1, *out2, *tmp_out, *rm_out; - struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc; - struct bblib_crc_request crc_req; - struct bblib_turbo_encoder_request turbo_req; - struct bblib_turbo_encoder_response turbo_resp; - struct bblib_rate_match_dl_request rm_req; - struct bblib_rate_match_dl_response rm_resp; - - k_idx = compute_idx(k); - in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset); - - /* CRC24A (for TB) */ - if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH) && - (enc->code_block_mode == 1)) { - ret = is_enc_input_valid(k - 24, k_idx, total_left); - if (ret != 0) { - op->status |= 1 << RTE_BBDEV_DATA_ERROR; - return; - } - /* copy the input to the temporary buffer to be able to extend - * it by 3 CRC bytes - */ - rte_memcpy(q->enc_in, in, (k - 24) >> 3); - crc_req.data = q->enc_in; - crc_req.len = (k - 24) >> 3; - if (bblib_lte_crc24a_gen(&crc_req) == -1) { - op->status |= 1 << RTE_BBDEV_CRC_ERROR; - rte_bbdev_log(ERR, "CRC24a generation failed"); - return; - } - in = q->enc_in; - } else if (enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) { - /* CRC24B */ - ret = is_enc_input_valid(k - 24, k_idx, total_left); - if (ret != 0) { - op->status |= 1 << RTE_BBDEV_DATA_ERROR; - return; - } - /* copy the input to the temporary buffer to be able to extend - * it by 3 CRC bytes - */ - rte_memcpy(q->enc_in, in, (k - 24) >> 3); - crc_req.data = q->enc_in; - crc_req.len = (k - 24) >> 3; - if (bblib_lte_crc24b_gen(&crc_req) == -1) { - op->status |= 1 << RTE_BBDEV_CRC_ERROR; - rte_bbdev_log(ERR, "CRC24b generation failed"); - return; - } - in = q->enc_in; - } else { - ret = is_enc_input_valid(k, k_idx, total_left); - if (ret != 0) { - op->status |= 1 << RTE_BBDEV_DATA_ERROR; - return; - } - } - - /* Turbo encoder */ - - /* Each bit layer output from turbo encoder is (k+4) bits long, i.e. - * input length + 4 tail bits. That's (k/8) + 1 bytes after rounding up. - * So dst_data's length should be 3*(k/8) + 3 bytes. - */ - out0 = q->enc_out; - out1 = RTE_PTR_ADD(out0, (k >> 3) + 1); - out2 = RTE_PTR_ADD(out1, (k >> 3) + 1); - - turbo_req.case_id = k_idx; - turbo_req.input_win = in; - turbo_req.length = k >> 3; - turbo_resp.output_win_0 = out0; - turbo_resp.output_win_1 = out1; - turbo_resp.output_win_2 = out2; - if (bblib_turbo_encoder(&turbo_req, &turbo_resp) != 0) { - op->status |= 1 << RTE_BBDEV_DRV_ERROR; - rte_bbdev_log(ERR, "Turbo Encoder failed"); - return; - } - - /* Rate-matching */ - if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) { - /* get output data starting address */ - rm_out = (uint8_t *)rte_pktmbuf_append(m_out, (e >> 3)); - if (rm_out == NULL) { - op->status |= 1 << RTE_BBDEV_DATA_ERROR; - rte_bbdev_log(ERR, - "Too little space in output mbuf"); - return; - } - /* rte_bbdev_op_data.offset can be different than the offset - * of the appended bytes - */ - rm_out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset); - - /* index of current code block */ - rm_req.r = cb_idx; - /* total number of code block */ - rm_req.C = c; - /* For DL - 1, UL - 0 */ - rm_req.direction = 1; - /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nsoft, KMIMO - * and MDL_HARQ are used for Ncb calculation. As Ncb is already - * known we can adjust those parameters - */ - rm_req.Nsoft = ncb * rm_req.C; - rm_req.KMIMO = 1; - rm_req.MDL_HARQ = 1; - /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nl, Qm and G - * are used for E calculation. As E is already known we can - * adjust those parameters - */ - rm_req.NL = e; - rm_req.Qm = 1; - rm_req.G = rm_req.NL * rm_req.Qm * rm_req.C; - - rm_req.rvidx = enc->rv_index; - rm_req.Kidx = k_idx - 1; - rm_req.nLen = k + 4; - rm_req.tin0 = out0; - rm_req.tin1 = out1; - rm_req.tin2 = out2; - rm_resp.output = rm_out; - rm_resp.OutputLen = (e >> 3); - if (enc->op_flags & RTE_BBDEV_TURBO_RV_INDEX_BYPASS) - rm_req.bypass_rvidx = 1; - else - rm_req.bypass_rvidx = 0; - - if (bblib_rate_match_dl(&rm_req, &rm_resp) != 0) { - op->status |= 1 << RTE_BBDEV_DRV_ERROR; - rte_bbdev_log(ERR, "Rate matching failed"); - return; - } - enc->output.length += rm_resp.OutputLen; - } else { - /* Rate matching is bypassed */ - - /* Completing last byte of out0 (where 4 tail bits are stored) - * by moving first 4 bits from out1 - */ - tmp_out = (uint8_t *) --out1; - *tmp_out = *tmp_out | ((*(tmp_out + 1) & 0xF0) >> 4); - tmp_out++; - /* Shifting out1 data by 4 bits to the left */ - for (m = 0; m < k >> 3; ++m) { - uint8_t *first = tmp_out; - uint8_t second = *(tmp_out + 1); - *first = (*first << 4) | ((second & 0xF0) >> 4); - tmp_out++; - } - /* Shifting out2 data by 8 bits to the left */ - for (m = 0; m < (k >> 3) + 1; ++m) { - *tmp_out = *(tmp_out + 1); - tmp_out++; - } - *tmp_out = 0; - - /* copy shifted output to turbo_enc entity */ - out0 = (uint8_t *)rte_pktmbuf_append(m_out, - (k >> 3) * 3 + 2); - if (out0 == NULL) { - op->status |= 1 << RTE_BBDEV_DATA_ERROR; - rte_bbdev_log(ERR, - "Too little space in output mbuf"); - return; - } - enc->output.length += (k >> 3) * 3 + 2; - /* rte_bbdev_op_data.offset can be different than the - * offset of the appended bytes - */ - out0 = rte_pktmbuf_mtod_offset(m_out, uint8_t *, - out_offset); - rte_memcpy(out0, q->enc_out, (k >> 3) * 3 + 2); - } -} - -static inline void -enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op) -{ - uint8_t c, r, crc24_bits = 0; - uint16_t k, ncb; - uint32_t e; - struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc; - uint16_t in_offset = enc->input.offset; - uint16_t out_offset = enc->output.offset; - struct rte_mbuf *m_in = enc->input.data; - struct rte_mbuf *m_out = enc->output.data; - uint16_t total_left = enc->input.length; - - /* Clear op status */ - op->status = 0; - - if (total_left > MAX_TB_SIZE >> 3) { - rte_bbdev_log(ERR, "TB size (%u) is too big, max: %d", - total_left, MAX_TB_SIZE); - op->status = 1 << RTE_BBDEV_DATA_ERROR; - return; - } - - if (m_in == NULL || m_out == NULL) { - rte_bbdev_log(ERR, "Invalid mbuf pointer"); - op->status = 1 << RTE_BBDEV_DATA_ERROR; - return; - } - - if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) || - (enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH)) - crc24_bits = 24; - - if (enc->code_block_mode == 0) { /* For Transport Block mode */ - c = enc->tb_params.c; - r = enc->tb_params.r; - } else {/* For Code Block mode */ - c = 1; - r = 0; - } - - while (total_left > 0 && r < c) { - if (enc->code_block_mode == 0) { - k = (r < enc->tb_params.c_neg) ? - enc->tb_params.k_neg : enc->tb_params.k_pos; - ncb = (r < enc->tb_params.c_neg) ? - enc->tb_params.ncb_neg : enc->tb_params.ncb_pos; - e = (r < enc->tb_params.cab) ? - enc->tb_params.ea : enc->tb_params.eb; - } else { - k = enc->cb_params.k; - ncb = enc->cb_params.ncb; - e = enc->cb_params.e; - } - - process_enc_cb(q, op, r, c, k, ncb, e, m_in, - m_out, in_offset, out_offset, total_left); - /* Update total_left */ - total_left -= (k - crc24_bits) >> 3; - /* Update offsets for next CBs (if exist) */ - in_offset += (k - crc24_bits) >> 3; - if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) - out_offset += e >> 3; - else - out_offset += (k >> 3) * 3 + 2; - r++; - } - - /* check if all input data was processed */ - if (total_left != 0) { - op->status |= 1 << RTE_BBDEV_DATA_ERROR; - rte_bbdev_log(ERR, - "Mismatch between mbuf length and included CBs sizes"); - } -} - -static inline uint16_t -enqueue_enc_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_enc_op **ops, - uint16_t nb_ops) -{ - uint16_t i; - - for (i = 0; i < nb_ops; ++i) - enqueue_enc_one_op(q, ops[i]); - - return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops, - NULL); -} - -/* Remove the padding bytes from a cyclic buffer. - * The input buffer is a data stream wk as described in 3GPP TS 36.212 section - * 5.1.4.1.2 starting from w0 and with length Ncb bytes. - * The output buffer is a data stream wk with pruned padding bytes. It's length - * is 3*D bytes and the order of non-padding bytes is preserved. - */ -static inline void -remove_nulls_from_circular_buf(const uint8_t *in, uint8_t *out, uint16_t k, - uint16_t ncb) -{ - uint32_t in_idx, out_idx, c_idx; - const uint32_t d = k + 4; - const uint32_t kw = (ncb / 3); - const uint32_t nd = kw - d; - const uint32_t r_subblock = kw / C_SUBBLOCK; - /* Inter-column permutation pattern */ - const uint32_t P[C_SUBBLOCK] = {0, 16, 8, 24, 4, 20, 12, 28, 2, 18, 10, - 26, 6, 22, 14, 30, 1, 17, 9, 25, 5, 21, 13, 29, 3, 19, - 11, 27, 7, 23, 15, 31}; - in_idx = 0; - out_idx = 0; - - /* The padding bytes are at the first Nd positions in the first row. */ - for (c_idx = 0; in_idx < kw; in_idx += r_subblock, ++c_idx) { - if (P[c_idx] < nd) { - rte_memcpy(&out[out_idx], &in[in_idx + 1], - r_subblock - 1); - out_idx += r_subblock - 1; - } else { - rte_memcpy(&out[out_idx], &in[in_idx], r_subblock); - out_idx += r_subblock; - } - } - - /* First and second parity bits sub-blocks are interlaced. */ - for (c_idx = 0; in_idx < ncb - 2 * r_subblock; - in_idx += 2 * r_subblock, ++c_idx) { - uint32_t second_block_c_idx = P[c_idx]; - uint32_t third_block_c_idx = P[c_idx] + 1; - - if (second_block_c_idx < nd && third_block_c_idx < nd) { - rte_memcpy(&out[out_idx], &in[in_idx + 2], - 2 * r_subblock - 2); - out_idx += 2 * r_subblock - 2; - } else if (second_block_c_idx >= nd && - third_block_c_idx >= nd) { - rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock); - out_idx += 2 * r_subblock; - } else if (second_block_c_idx < nd) { - out[out_idx++] = in[in_idx]; - rte_memcpy(&out[out_idx], &in[in_idx + 2], - 2 * r_subblock - 2); - out_idx += 2 * r_subblock - 2; - } else { - rte_memcpy(&out[out_idx], &in[in_idx + 1], - 2 * r_subblock - 1); - out_idx += 2 * r_subblock - 1; - } - } - - /* Last interlaced row is different - its last byte is the only padding - * byte. We can have from 2 up to 26 padding bytes (Nd) per sub-block. - * After interlacing the 1st and 2nd parity sub-blocks we can have 0, 1 - * or 2 padding bytes each time we make a step of 2 * R_SUBBLOCK bytes - * (moving to another column). 2nd parity sub-block uses the same - * inter-column permutation pattern as the systematic and 1st parity - * sub-blocks but it adds '1' to the resulting index and calculates the - * modulus of the result and Kw. Last column is mapped to itself (id 31) - * so the first byte taken from the 2nd parity sub-block will be the - * 32nd (31+1) byte, then 64th etc. (step is C_SUBBLOCK == 32) and the - * last byte will be the first byte from the sub-block: - * (32 + 32 * (R_SUBBLOCK-1)) % Kw == Kw % Kw == 0. Nd can't be smaller - * than 2 so we know that bytes with ids 0 and 1 must be the padding - * bytes. The bytes from the 1st parity sub-block are the bytes from the - * 31st column - Nd can't be greater than 26 so we are sure that there - * are no padding bytes in 31st column. - */ - rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock - 1); -} - -static inline void -move_padding_bytes(const uint8_t *in, uint8_t *out, uint16_t k, - uint16_t ncb) -{ - uint16_t d = k + 4; - uint16_t kpi = ncb / 3; - uint16_t nd = kpi - d; - - rte_memcpy(&out[nd], in, d); - rte_memcpy(&out[nd + kpi + 64], &in[kpi], d); - rte_memcpy(&out[nd + 2 * (kpi + 64)], &in[2 * kpi], d); -} - -static inline void -process_dec_cb(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op, - uint8_t c, uint16_t k, uint16_t kw, struct rte_mbuf *m_in, - struct rte_mbuf *m_out, uint16_t in_offset, uint16_t out_offset, - bool check_crc_24b, uint16_t total_left) -{ - int ret; - int32_t k_idx; - int32_t iter_cnt; - uint8_t *in, *out, *adapter_input; - int32_t ncb, ncb_without_null; - struct bblib_turbo_adapter_ul_response adapter_resp; - struct bblib_turbo_adapter_ul_request adapter_req; - struct bblib_turbo_decoder_request turbo_req; - struct bblib_turbo_decoder_response turbo_resp; - struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec; - - k_idx = compute_idx(k); - - ret = is_dec_input_valid(k_idx, kw, total_left); - if (ret != 0) { - op->status |= 1 << RTE_BBDEV_DATA_ERROR; - return; - } - - in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset); - ncb = kw; - ncb_without_null = (k + 4) * 3; - - if (check_bit(dec->op_flags, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE)) { - struct bblib_deinterleave_ul_request deint_req; - struct bblib_deinterleave_ul_response deint_resp; - - /* SW decoder accepts only a circular buffer without NULL bytes - * so the input needs to be converted. - */ - remove_nulls_from_circular_buf(in, q->deint_input, k, ncb); - - deint_req.pharqbuffer = q->deint_input; - deint_req.ncb = ncb_without_null; - deint_resp.pinteleavebuffer = q->deint_output; - bblib_deinterleave_ul(&deint_req, &deint_resp); - } else - move_padding_bytes(in, q->deint_output, k, ncb); - - adapter_input = q->deint_output; - - if (dec->op_flags & RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN) - adapter_req.isinverted = 1; - else if (dec->op_flags & RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN) - adapter_req.isinverted = 0; - else { - op->status |= 1 << RTE_BBDEV_DRV_ERROR; - rte_bbdev_log(ERR, "LLR format wasn't specified"); - return; - } - - adapter_req.ncb = ncb_without_null; - adapter_req.pinteleavebuffer = adapter_input; - adapter_resp.pharqout = q->adapter_output; - bblib_turbo_adapter_ul(&adapter_req, &adapter_resp); - - out = (uint8_t *)rte_pktmbuf_append(m_out, (k >> 3)); - if (out == NULL) { - op->status |= 1 << RTE_BBDEV_DATA_ERROR; - rte_bbdev_log(ERR, "Too little space in output mbuf"); - return; - } - /* rte_bbdev_op_data.offset can be different than the offset of the - * appended bytes - */ - out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset); - if (check_crc_24b) - turbo_req.c = c + 1; - else - turbo_req.c = c; - turbo_req.input = (int8_t *)q->adapter_output; - turbo_req.k = k; - turbo_req.k_idx = k_idx; - turbo_req.max_iter_num = dec->iter_max; - turbo_resp.ag_buf = q->ag; - turbo_resp.cb_buf = q->code_block; - turbo_resp.output = out; - iter_cnt = bblib_turbo_decoder(&turbo_req, &turbo_resp); - dec->hard_output.length += (k >> 3); - - if (iter_cnt > 0) { - /* Temporary solution for returned iter_count from SDK */ - iter_cnt = (iter_cnt - 1) / 2; - dec->iter_count = RTE_MAX(iter_cnt, dec->iter_count); - } else { - op->status |= 1 << RTE_BBDEV_DATA_ERROR; - rte_bbdev_log(ERR, "Turbo Decoder failed"); - return; - } -} - -static inline void -enqueue_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op) -{ - uint8_t c, r = 0; - uint16_t kw, k = 0; - struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec; - struct rte_mbuf *m_in = dec->input.data; - struct rte_mbuf *m_out = dec->hard_output.data; - uint16_t in_offset = dec->input.offset; - uint16_t total_left = dec->input.length; - uint16_t out_offset = dec->hard_output.offset; - - /* Clear op status */ - op->status = 0; - - if (m_in == NULL || m_out == NULL) { - rte_bbdev_log(ERR, "Invalid mbuf pointer"); - op->status = 1 << RTE_BBDEV_DATA_ERROR; - return; - } - - if (dec->code_block_mode == 0) { /* For Transport Block mode */ - c = dec->tb_params.c; - } else { /* For Code Block mode */ - k = dec->cb_params.k; - c = 1; - } - - while (total_left > 0) { - if (dec->code_block_mode == 0) - k = (r < dec->tb_params.c_neg) ? - dec->tb_params.k_neg : dec->tb_params.k_pos; - - /* Calculates circular buffer size (Kw). - * According to 3gpp 36.212 section 5.1.4.2 - * Kw = 3 * Kpi, - * where: - * Kpi = nCol * nRow - * where nCol is 32 and nRow can be calculated from: - * D =< nCol * nRow - * where D is the size of each output from turbo encoder block - * (k + 4). - */ - kw = RTE_ALIGN_CEIL(k + 4, C_SUBBLOCK) * 3; - - process_dec_cb(q, op, c, k, kw, m_in, m_out, in_offset, - out_offset, check_bit(dec->op_flags, - RTE_BBDEV_TURBO_CRC_TYPE_24B), total_left); - /* As a result of decoding we get Code Block with included - * decoded CRC24 at the end of Code Block. Type of CRC24 is - * specified by flag. - */ - - /* Update total_left */ - total_left -= kw; - /* Update offsets for next CBs (if exist) */ - in_offset += kw; - out_offset += (k >> 3); - r++; - } - if (total_left != 0) { - op->status |= 1 << RTE_BBDEV_DATA_ERROR; - rte_bbdev_log(ERR, - "Mismatch between mbuf length and included Circular buffer sizes"); - } -} - -static inline uint16_t -enqueue_dec_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_dec_op **ops, - uint16_t nb_ops) -{ - uint16_t i; - - for (i = 0; i < nb_ops; ++i) - enqueue_dec_one_op(q, ops[i]); - - return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops, - NULL); -} - -/* Enqueue burst */ -static uint16_t -enqueue_enc_ops(struct rte_bbdev_queue_data *q_data, - struct rte_bbdev_enc_op **ops, uint16_t nb_ops) -{ - void *queue = q_data->queue_private; - struct turbo_sw_queue *q = queue; - uint16_t nb_enqueued = 0; - - nb_enqueued = enqueue_enc_all_ops(q, ops, nb_ops); - - q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued; - q_data->queue_stats.enqueued_count += nb_enqueued; - - return nb_enqueued; -} - -/* Enqueue burst */ -static uint16_t -enqueue_dec_ops(struct rte_bbdev_queue_data *q_data, - struct rte_bbdev_dec_op **ops, uint16_t nb_ops) -{ - void *queue = q_data->queue_private; - struct turbo_sw_queue *q = queue; - uint16_t nb_enqueued = 0; - - nb_enqueued = enqueue_dec_all_ops(q, ops, nb_ops); - - q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued; - q_data->queue_stats.enqueued_count += nb_enqueued; - - return nb_enqueued; -} - -/* Dequeue decode burst */ -static uint16_t -dequeue_dec_ops(struct rte_bbdev_queue_data *q_data, - struct rte_bbdev_dec_op **ops, uint16_t nb_ops) -{ - struct turbo_sw_queue *q = q_data->queue_private; - uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts, - (void **)ops, nb_ops, NULL); - q_data->queue_stats.dequeued_count += nb_dequeued; - - return nb_dequeued; -} - -/* Dequeue encode burst */ -static uint16_t -dequeue_enc_ops(struct rte_bbdev_queue_data *q_data, - struct rte_bbdev_enc_op **ops, uint16_t nb_ops) -{ - struct turbo_sw_queue *q = q_data->queue_private; - uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts, - (void **)ops, nb_ops, NULL); - q_data->queue_stats.dequeued_count += nb_dequeued; - - return nb_dequeued; -} - -/* Parse 16bit integer from string argument */ -static inline int -parse_u16_arg(const char *key, const char *value, void *extra_args) -{ - uint16_t *u16 = extra_args; - unsigned int long result; - - if ((value == NULL) || (extra_args == NULL)) - return -EINVAL; - errno = 0; - result = strtoul(value, NULL, 0); - if ((result >= (1 << 16)) || (errno != 0)) { - rte_bbdev_log(ERR, "Invalid value %lu for %s", result, key); - return -ERANGE; - } - *u16 = (uint16_t)result; - return 0; -} - -/* Parse parameters used to create device */ -static int -parse_turbo_sw_params(struct turbo_sw_params *params, const char *input_args) -{ - struct rte_kvargs *kvlist = NULL; - int ret = 0; - - if (params == NULL) - return -EINVAL; - if (input_args) { - kvlist = rte_kvargs_parse(input_args, turbo_sw_valid_params); - if (kvlist == NULL) - return -EFAULT; - - ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[0], - &parse_u16_arg, ¶ms->queues_num); - if (ret < 0) - goto exit; - - ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[1], - &parse_u16_arg, ¶ms->socket_id); - if (ret < 0) - goto exit; - - if (params->socket_id >= RTE_MAX_NUMA_NODES) { - rte_bbdev_log(ERR, "Invalid socket, must be < %u", - RTE_MAX_NUMA_NODES); - goto exit; - } - } - -exit: - if (kvlist) - rte_kvargs_free(kvlist); - return ret; -} - -/* Create device */ -static int -turbo_sw_bbdev_create(struct rte_vdev_device *vdev, - struct turbo_sw_params *init_params) -{ - struct rte_bbdev *bbdev; - const char *name = rte_vdev_device_name(vdev); - - bbdev = rte_bbdev_allocate(name); - if (bbdev == NULL) - return -ENODEV; - - bbdev->data->dev_private = rte_zmalloc_socket(name, - sizeof(struct bbdev_private), RTE_CACHE_LINE_SIZE, - init_params->socket_id); - if (bbdev->data->dev_private == NULL) { - rte_bbdev_release(bbdev); - return -ENOMEM; - } - - bbdev->dev_ops = &pmd_ops; - bbdev->device = &vdev->device; - bbdev->data->socket_id = init_params->socket_id; - bbdev->intr_handle = NULL; - - /* register rx/tx burst functions for data path */ - bbdev->dequeue_enc_ops = dequeue_enc_ops; - bbdev->dequeue_dec_ops = dequeue_dec_ops; - bbdev->enqueue_enc_ops = enqueue_enc_ops; - bbdev->enqueue_dec_ops = enqueue_dec_ops; - ((struct bbdev_private *) bbdev->data->dev_private)->max_nb_queues = - init_params->queues_num; - - return 0; -} - -/* Initialise device */ -static int -turbo_sw_bbdev_probe(struct rte_vdev_device *vdev) -{ - struct turbo_sw_params init_params = { - rte_socket_id(), - RTE_BBDEV_DEFAULT_MAX_NB_QUEUES - }; - const char *name; - const char *input_args; - - if (vdev == NULL) - return -EINVAL; - - name = rte_vdev_device_name(vdev); - if (name == NULL) - return -EINVAL; - input_args = rte_vdev_device_args(vdev); - parse_turbo_sw_params(&init_params, input_args); - - rte_bbdev_log_debug( - "Initialising %s on NUMA node %d with max queues: %d\n", - name, init_params.socket_id, init_params.queues_num); - - return turbo_sw_bbdev_create(vdev, &init_params); -} - -/* Uninitialise device */ -static int -turbo_sw_bbdev_remove(struct rte_vdev_device *vdev) -{ - struct rte_bbdev *bbdev; - const char *name; - - if (vdev == NULL) - return -EINVAL; - - name = rte_vdev_device_name(vdev); - if (name == NULL) - return -EINVAL; - - bbdev = rte_bbdev_get_named_dev(name); - if (bbdev == NULL) - return -EINVAL; - - rte_free(bbdev->data->dev_private); - - return rte_bbdev_release(bbdev); -} - -static struct rte_vdev_driver bbdev_turbo_sw_pmd_drv = { - .probe = turbo_sw_bbdev_probe, - .remove = turbo_sw_bbdev_remove -}; - -RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_turbo_sw_pmd_drv); -RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME, - TURBO_SW_MAX_NB_QUEUES_ARG"= " - TURBO_SW_SOCKET_ID_ARG"="); - -RTE_INIT(null_bbdev_init_log); -static void -null_bbdev_init_log(void) -{ - bbdev_turbo_sw_logtype = rte_log_register("pmd.bb.turbo_sw"); - if (bbdev_turbo_sw_logtype >= 0) - rte_log_set_level(bbdev_turbo_sw_logtype, RTE_LOG_NOTICE); -} diff --git a/drivers/bbdev/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map b/drivers/bbdev/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map deleted file mode 100644 index 58b94270..00000000 --- a/drivers/bbdev/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map +++ /dev/null @@ -1,3 +0,0 @@ -DPDK_18.02 { - local: *; -}; diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index 7ef25939..cea3b55e 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -4,8 +4,12 @@ include $(RTE_SDK)/mk/rte.vars.mk DIRS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += dpaa +ifeq ($(CONFIG_RTE_EAL_VFIO),y) DIRS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc +endif +DIRS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga DIRS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci DIRS-$(CONFIG_RTE_LIBRTE_VDEV_BUS) += vdev +DIRS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/drivers/bus/dpaa/base/fman/fman.c b/drivers/bus/dpaa/base/fman/fman.c index bda62e01..bdb70042 100644 --- a/drivers/bus/dpaa/base/fman/fman.c +++ b/drivers/bus/dpaa/base/fman/fman.c @@ -300,7 +300,7 @@ fman_if_init(const struct device_node *dpa_node) _errno = fman_get_mac_index(regs_addr_host, &__if->__if.mac_idx); if (_errno) { - FMAN_ERR(-EINVAL, "Invalid register address: %lu", + FMAN_ERR(-EINVAL, "Invalid register address: %" PRIx64, regs_addr_host); goto err; } @@ -442,6 +442,7 @@ fman_if_init(const struct device_node *dpa_node) if (!pool_node) { FMAN_ERR(-ENXIO, "%s: bad fsl,bman-buffer-pools\n", dname); + free(bpool); goto err; } pname = pool_node->full_name; @@ -449,6 +450,7 @@ fman_if_init(const struct device_node *dpa_node) prop = of_get_property(pool_node, "fsl,bpid", &proplen); if (!prop) { FMAN_ERR(-EINVAL, "%s: no fsl,bpid\n", pname); + free(bpool); goto err; } assert(proplen == sizeof(*prop)); @@ -502,7 +504,7 @@ fman_if_init(const struct device_node *dpa_node) /* Parsing of the network interface is complete, add it to the list */ DPAA_BUS_LOG(DEBUG, "Found %s, Tx Channel = %x, FMAN = %x," - "Port ID = %x\n", + "Port ID = %x", dname, __if->__if.tx_channel_id, __if->__if.fman_idx, __if->__if.mac_idx); diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c b/drivers/bus/dpaa/base/fman/fman_hw.c index 0148b98e..4ebbc3d3 100644 --- a/drivers/bus/dpaa/base/fman/fman_hw.c +++ b/drivers/bus/dpaa/base/fman/fman_hw.c @@ -16,6 +16,9 @@ #include #include +#define FMAN_SP_SG_DISABLE 0x80000000 +#define FMAN_SP_EXT_BUF_MARG_START_SHIFT 16 + /* Instantiate the global variable that the inline CRC64 implementation (in * ) depends on. */ @@ -422,20 +425,16 @@ fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta) int fman_if_get_fdoff(struct fman_if *fm_if) { - u32 fmbm_ricp; + u32 fmbm_rebm; int fdoff; - int iceof_mask = 0x001f0000; - int icsz_mask = 0x0000001f; struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if); assert(fman_ccsr_map_fd != -1); - fmbm_ricp = - in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp); - /*iceof + icsz*/ - fdoff = ((fmbm_ricp & iceof_mask) >> 16) * 16 + - (fmbm_ricp & icsz_mask) * 16; + fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm); + + fdoff = (fmbm_rebm >> FMAN_SP_EXT_BUF_MARG_START_SHIFT) & 0x1ff; return fdoff; } @@ -502,12 +501,16 @@ fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset) { struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if); unsigned int *fmbm_rebm; + int val = 0; + int fmbm_mask = 0x01ff0000; + + val = fd_offset << FMAN_SP_EXT_BUF_MARG_START_SHIFT; assert(fman_ccsr_map_fd != -1); fmbm_rebm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm; - out_be32(fmbm_rebm, in_be32(fmbm_rebm) | (fd_offset << 16)); + out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val); } void @@ -536,6 +539,47 @@ fman_if_get_maxfrm(struct fman_if *fm_if) return (in_be32(reg_maxfrm) | 0x0000FFFF); } +/* MSB in fmbm_rebm register + * 0 - If BMI cannot store the frame in a single buffer it may select a buffer + * of smaller size and store the frame in scatter gather (S/G) buffers + * 1 - Scatter gather format is not enabled for frame storage. If BMI cannot + * store the frame in a single buffer, the frame is discarded. + */ + +int +fman_if_get_sg_enable(struct fman_if *fm_if) +{ + u32 fmbm_rebm; + + struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if); + + assert(fman_ccsr_map_fd != -1); + + fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm); + + return (fmbm_rebm & FMAN_SP_SG_DISABLE) ? 0 : 1; +} + +void +fman_if_set_sg(struct fman_if *fm_if, int enable) +{ + struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if); + unsigned int *fmbm_rebm; + int val; + int fmbm_mask = FMAN_SP_SG_DISABLE; + + if (enable) + val = 0; + else + val = FMAN_SP_SG_DISABLE; + + assert(fman_ccsr_map_fd != -1); + + fmbm_rebm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm; + + out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val); +} + void fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia) { diff --git a/drivers/bus/dpaa/base/fman/netcfg_layer.c b/drivers/bus/dpaa/base/fman/netcfg_layer.c index 3e956ce1..031c6f1a 100644 --- a/drivers/bus/dpaa/base/fman/netcfg_layer.c +++ b/drivers/bus/dpaa/base/fman/netcfg_layer.c @@ -18,11 +18,6 @@ #include #include -/* Structure contains information about all the interfaces given by user - * on command line. - */ -struct netcfg_interface *netcfg_interface; - /* This data structure contaings all configurations information * related to usages of DPA devices. */ diff --git a/drivers/bus/dpaa/base/fman/of.c b/drivers/bus/dpaa/base/fman/of.c index 1b2dbe26..a7f3174e 100644 --- a/drivers/bus/dpaa/base/fman/of.c +++ b/drivers/bus/dpaa/base/fman/of.c @@ -182,6 +182,11 @@ linear_dir(struct dt_dir *d) DPAA_BUS_LOG(DEBUG, "Duplicate lphandle in %s", d->node.node.full_name); d->lphandle = f; + } else if (!strcmp(f->node.node.name, "phandle")) { + if (d->lphandle) + DPAA_BUS_LOG(DEBUG, "Duplicate lphandle in %s", + d->node.node.full_name); + d->lphandle = f; } else if (!strcmp(f->node.node.name, "#address-cells")) { if (d->a_cells) DPAA_BUS_LOG(DEBUG, "Duplicate a_cells in %s", @@ -541,3 +546,42 @@ of_device_is_compatible(const struct device_node *dev_node, return true; return false; } + +static const void *of_get_mac_addr(const struct device_node *np, + const char *name) +{ + return of_get_property(np, name, NULL); +} + +/** + * Search the device tree for the best MAC address to use. 'mac-address' is + * checked first, because that is supposed to contain to "most recent" MAC + * address. If that isn't set, then 'local-mac-address' is checked next, + * because that is the default address. If that isn't set, then the obsolete + * 'address' is checked, just in case we're using an old device tree. + * + * Note that the 'address' property is supposed to contain a virtual address of + * the register set, but some DTS files have redefined that property to be the + * MAC address. + * + * All-zero MAC addresses are rejected, because those could be properties that + * exist in the device tree, but were not set by U-Boot. For example, the + * DTS could define 'mac-address' and 'local-mac-address', with zero MAC + * addresses. Some older U-Boots only initialized 'local-mac-address'. In + * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists + * but is all zeros. + */ +const void *of_get_mac_address(const struct device_node *np) +{ + const void *addr; + + addr = of_get_mac_addr(np, "mac-address"); + if (addr) + return addr; + + addr = of_get_mac_addr(np, "local-mac-address"); + if (addr) + return addr; + + return of_get_mac_addr(np, "address"); +} diff --git a/drivers/bus/dpaa/base/qbman/bman_driver.c b/drivers/bus/dpaa/base/qbman/bman_driver.c index a1ef3921..b14b5905 100644 --- a/drivers/bus/dpaa/base/qbman/bman_driver.c +++ b/drivers/bus/dpaa/base/qbman/bman_driver.c @@ -15,9 +15,9 @@ /* * Global variables of the max portal/pool number this bman version supported */ -u16 bman_ip_rev; +static u16 bman_ip_rev; u16 bman_pool_max; -void *bman_ccsr_map; +static void *bman_ccsr_map; /*****************/ /* Portal driver */ @@ -161,7 +161,7 @@ int bman_init_ccsr(const struct device_node *node) PROT_WRITE, MAP_SHARED, ccsr_map_fd, phys_addr); if (bman_ccsr_map == MAP_FAILED) { pr_err("Can not map BMan CCSR base Bman: " - "0x%x Phys: 0x%lx size 0x%lx", + "0x%x Phys: 0x%" PRIx64 " size 0x%" PRIu64, *bman_addr, phys_addr, regs_size); return -EINVAL; } diff --git a/drivers/bus/dpaa/base/qbman/qman.c b/drivers/bus/dpaa/base/qbman/qman.c index 2b97671b..7c17027f 100644 --- a/drivers/bus/dpaa/base/qbman/qman.c +++ b/drivers/bus/dpaa/base/qbman/qman.c @@ -314,9 +314,9 @@ loop: if (!msg) return 0; } - if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) { + if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) { /* We aren't draining anything but FQRNIs */ - pr_err("Found verb 0x%x in MR\n", msg->verb); + pr_err("Found verb 0x%x in MR\n", msg->ern.verb); return -1; } qm_mr_next(p); @@ -483,7 +483,7 @@ static inline void qm_mr_pvb_update(struct qm_portal *portal) /* when accessing 'verb', use __raw_readb() to ensure that compiler * inlining doesn't try to optimise out "excess reads". */ - if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) { + if ((__raw_readb(&res->ern.verb) & QM_MR_VERB_VBIT) == mr->vbit) { mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); if (!mr->pi) mr->vbit ^= QM_MR_VERB_VBIT; @@ -625,7 +625,7 @@ fail_eqcr: #define MAX_GLOBAL_PORTALS 8 static struct qman_portal global_portals[MAX_GLOBAL_PORTALS]; -rte_atomic16_t global_portals_used[MAX_GLOBAL_PORTALS]; +static rte_atomic16_t global_portals_used[MAX_GLOBAL_PORTALS]; static struct qman_portal * qman_alloc_global_portal(void) @@ -832,7 +832,7 @@ mr_loop: goto mr_done; swapped_msg = *msg; hw_fd_to_cpu(&swapped_msg.ern.fd); - verb = msg->verb & QM_MR_VERB_TYPE_MASK; + verb = msg->ern.verb & QM_MR_VERB_TYPE_MASK; /* The message is a software ERN iff the 0x20 bit is set */ if (verb & 0x20) { switch (verb) { @@ -1058,7 +1058,7 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit, struct qm_portal *portal = &p->p; register struct qm_dqrr *dqrr = &portal->dqrr; struct qm_dqrr_entry *dq[QM_DQRR_SIZE], *shadow[QM_DQRR_SIZE]; - struct qman_fq *fq[QM_DQRR_SIZE]; + struct qman_fq *fq; unsigned int limit = 0, rx_number = 0; uint32_t consume = 0; @@ -1087,18 +1087,18 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit, shadow[rx_number]->fd.opaque = be32_to_cpu(dq[rx_number]->fd.opaque); #else - shadow = dq; + shadow[rx_number] = dq[rx_number]; #endif /* SDQCR: context_b points to the FQ */ #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP - fq[rx_number] = qman_fq_lookup_table[be32_to_cpu( - dq[rx_number]->contextB)]; + fq = qman_fq_lookup_table[be32_to_cpu(dq[rx_number]->contextB)]; #else - fq[rx_number] = (void *)(uintptr_t)be32_to_cpu(dq->contextB); + fq = (void *)be32_to_cpu(dq[rx_number]->contextB); #endif - fq[rx_number]->cb.dqrr_prepare(shadow[rx_number], - &bufs[rx_number]); + if (fq->cb.dqrr_prepare) + fq->cb.dqrr_prepare(shadow[rx_number], + &bufs[rx_number]); consume |= (1 << (31 - DQRR_PTR2IDX(shadow[rx_number]))); rx_number++; @@ -1106,7 +1106,7 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit, } while (++limit < poll_limit); if (rx_number) - fq[0]->cb.dqrr_dpdk_pull_cb(fq, shadow, bufs, rx_number); + fq->cb.dqrr_dpdk_pull_cb(&fq, shadow, bufs, rx_number); /* Consume all the DQRR enries together */ qm_out(DQRR_DCAP, (1 << 8) | consume); @@ -1665,7 +1665,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags) */ struct qm_mr_entry msg; - msg.verb = QM_MR_VERB_FQRNI; + msg.ern.verb = QM_MR_VERB_FQRNI; msg.fq.fqs = mcr->alterfq.fqs; msg.fq.fqid = fq->fqid; #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP @@ -2002,13 +2002,13 @@ int qman_query_congestion(struct qm_mcr_querycongestion *congestion) return 0; } -int qman_set_vdq(struct qman_fq *fq, u16 num) +int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags) { struct qman_portal *p = get_affine_portal(); uint32_t vdqcr; int ret = -EBUSY; - vdqcr = QM_VDQCR_EXACT; + vdqcr = vdqcr_flags; vdqcr |= QM_VDQCR_NUMFRAMES_SET(num); if ((fq->state != qman_fq_state_parked) && @@ -2642,7 +2642,7 @@ int qman_shutdown_fq(u32 fqid) qm_mr_pvb_update(low_p); msg = qm_mr_current(low_p); while (msg) { - if ((msg->verb & + if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) == QM_MR_VERB_FQRN) found_fqrn = 1; @@ -2710,7 +2710,7 @@ int qman_shutdown_fq(u32 fqid) qm_mr_pvb_update(low_p); msg = qm_mr_current(low_p); while (msg) { - if ((msg->verb & QM_MR_VERB_TYPE_MASK) == + if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) == QM_MR_VERB_FQRL) orl_empty = 1; qm_mr_next(low_p); diff --git a/drivers/bus/dpaa/base/qbman/qman_driver.c b/drivers/bus/dpaa/base/qbman/qman_driver.c index 7cfa8ee4..f6ecd6b2 100644 --- a/drivers/bus/dpaa/base/qbman/qman_driver.c +++ b/drivers/bus/dpaa/base/qbman/qman_driver.c @@ -20,9 +20,9 @@ u16 qm_channel_caam = QMAN_CHANNEL_CAAM; u16 qm_channel_pme = QMAN_CHANNEL_PME; /* Ccsr map address to access ccsrbased register */ -void *qman_ccsr_map; +static void *qman_ccsr_map; /* The qman clock frequency */ -u32 qman_clk; +static u32 qman_clk; static __thread int qmfd = -1; static __thread struct qm_portal_config qpcfg; @@ -160,6 +160,7 @@ struct qman_portal *fsl_qman_portal_create(void) &cpuset); if (ret) { error(0, ret, "pthread_getaffinity_np()"); + kfree(q_pcfg); return NULL; } @@ -168,12 +169,14 @@ struct qman_portal *fsl_qman_portal_create(void) if (CPU_ISSET(loop, &cpuset)) { if (q_pcfg->cpu != -1) { pr_err("Thread is not affine to 1 cpu\n"); + kfree(q_pcfg); return NULL; } q_pcfg->cpu = loop; } if (q_pcfg->cpu == -1) { pr_err("Bug in getaffinity handling!\n"); + kfree(q_pcfg); return NULL; } @@ -183,6 +186,7 @@ struct qman_portal *fsl_qman_portal_create(void) ret = process_portal_map(&q_map); if (ret) { error(0, ret, "process_portal_map()"); + kfree(q_pcfg); return NULL; } q_pcfg->channel = q_map.channel; @@ -217,6 +221,7 @@ err2: close(q_fd); err1: process_portal_unmap(&q_map.addr); + kfree(q_pcfg); return NULL; } @@ -246,7 +251,6 @@ int fsl_qman_portal_destroy(struct qman_portal *qp) int qman_global_init(void) { const struct device_node *dt_node; - int ret = 0; size_t lenp; const u32 *chanid; static int ccsr_map_fd; @@ -352,9 +356,7 @@ int qman_global_init(void) qman_clk = be32_to_cpu(*clk); #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP - ret = qman_setup_fq_lookup_table(CONFIG_FSL_QMAN_FQ_LOOKUP_MAX); - if (ret) - return ret; + return qman_setup_fq_lookup_table(CONFIG_FSL_QMAN_FQ_LOOKUP_MAX); #endif return 0; } diff --git a/drivers/bus/dpaa/base/qbman/qman_priv.h b/drivers/bus/dpaa/base/qbman/qman_priv.h index 9e4471e6..02f6301f 100644 --- a/drivers/bus/dpaa/base/qbman/qman_priv.h +++ b/drivers/bus/dpaa/base/qbman/qman_priv.h @@ -139,7 +139,6 @@ struct qm_portal_config { #define QMAN_REV31 0x0301 #define QMAN_REV32 0x0302 extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */ -extern u32 qman_clk; int qm_set_wpm(int wpm); int qm_get_wpm(int *wpm); diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c index f2bb3b15..16fabd1b 100644 --- a/drivers/bus/dpaa/dpaa_bus.c +++ b/drivers/bus/dpaa/dpaa_bus.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -51,10 +50,12 @@ struct rte_dpaa_bus rte_dpaa_bus; struct netcfg_info *dpaa_netcfg; /* define a variable to hold the portal_key, once created.*/ -pthread_key_t dpaa_portal_key; +static pthread_key_t dpaa_portal_key; unsigned int dpaa_svr_family; +#define FSL_DPAA_BUS_NAME dpaa_bus + RTE_DEFINE_PER_LCORE(bool, dpaa_io); RTE_DEFINE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs); @@ -130,6 +131,22 @@ dpaa_sec_available(void) static void dpaa_clean_device_list(void); +static struct rte_devargs * +dpaa_devargs_lookup(struct rte_dpaa_device *dev) +{ + struct rte_devargs *devargs; + char dev_name[32]; + + RTE_EAL_DEVARGS_FOREACH("dpaa_bus", devargs) { + devargs->bus->parse(devargs->name, &dev_name); + if (strcmp(dev_name, dev->device.name) == 0) { + DPAA_BUS_INFO("**Devargs matched %s", dev_name); + return devargs; + } + } + return NULL; +} + static int dpaa_create_device_list(void) { @@ -161,8 +178,9 @@ dpaa_create_device_list(void) memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN); sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1), fman_intf->mac_idx); - DPAA_BUS_LOG(DEBUG, "Device added: %s", dev->name); + DPAA_BUS_LOG(INFO, "%s netdev added", dev->name); dev->device.name = dev->name; + dev->device.devargs = dpaa_devargs_lookup(dev); dpaa_add_to_device_list(dev); } @@ -198,7 +216,9 @@ dpaa_create_device_list(void) */ memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN); sprintf(dev->name, "dpaa-sec%d", i); - DPAA_BUS_LOG(DEBUG, "Device added: %s", dev->name); + DPAA_BUS_LOG(INFO, "%s cryptodev added", dev->name); + dev->device.name = dev->name; + dev->device.devargs = dpaa_devargs_lookup(dev); dpaa_add_to_device_list(dev); } @@ -235,7 +255,7 @@ int rte_dpaa_portal_init(void *arg) BUS_INIT_FUNC_TRACE(); - if ((uint64_t)arg == 1 || cpu == LCORE_ID_ANY) + if ((size_t)arg == 1 || cpu == LCORE_ID_ANY) cpu = rte_get_master_lcore(); /* if the core id is not supported */ else @@ -309,9 +329,15 @@ rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq) /* Affine above created portal with channel*/ u32 sdqcr; struct qman_portal *qp; + int ret; - if (unlikely(!RTE_PER_LCORE(dpaa_io))) - rte_dpaa_portal_init(arg); + if (unlikely(!RTE_PER_LCORE(dpaa_io))) { + ret = rte_dpaa_portal_init(arg); + if (ret < 0) { + DPAA_BUS_LOG(ERR, "portal initialization failure"); + return ret; + } + } /* Initialise qman specific portals */ qp = fsl_qman_portal_create(); @@ -352,6 +378,51 @@ dpaa_portal_finish(void *arg) RTE_PER_LCORE(dpaa_io) = false; } +static int +rte_dpaa_bus_parse(const char *name, void *out_name) +{ + int i, j; + int max_fman = 2, max_macs = 16; + char *sep = strchr(name, ':'); + + if (strncmp(name, RTE_STR(FSL_DPAA_BUS_NAME), + strlen(RTE_STR(FSL_DPAA_BUS_NAME)))) { + return -EINVAL; + } + + if (!sep) { + DPAA_BUS_ERR("Incorrect device name observed"); + return -EINVAL; + } + + sep = (char *) (sep + 1); + + for (i = 0; i < max_fman; i++) { + for (j = 0; j < max_macs; j++) { + char fm_name[16]; + snprintf(fm_name, 16, "fm%d-mac%d", i, j); + if (strcmp(fm_name, sep) == 0) { + if (out_name) + strcpy(out_name, sep); + return 0; + } + } + } + + for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) { + char sec_name[16]; + + snprintf(sec_name, 16, "dpaa-sec%d", i); + if (strcmp(sec_name, sep) == 0) { + if (out_name) + strcpy(out_name, sep); + return 0; + } + } + + return -EINVAL; +} + #define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa" #define DPAA_DEV_PATH2 "/sys/devices/platform/fsl,dpaa" @@ -390,14 +461,11 @@ rte_dpaa_bus_scan(void) return 0; } - DPAA_BUS_LOG(DEBUG, "Bus: Address of netcfg=%p, Ethports=%d", - dpaa_netcfg, dpaa_netcfg->num_ethports); - #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER dump_netcfg(dpaa_netcfg); #endif - DPAA_BUS_LOG(DEBUG, "Number of devices = %d\n", + DPAA_BUS_LOG(DEBUG, "Number of ethernet devices = %d", dpaa_netcfg->num_ethports); ret = dpaa_create_device_list(); if (ret) { @@ -415,9 +483,6 @@ rte_dpaa_bus_scan(void) return ret; } - DPAA_BUS_LOG(DEBUG, "dpaa_portal_key=%u, ret=%d\n", - (unsigned int)dpaa_portal_key, ret); - return 0; } @@ -453,22 +518,15 @@ static int rte_dpaa_device_match(struct rte_dpaa_driver *drv, struct rte_dpaa_device *dev) { - int ret = -1; - - BUS_INIT_FUNC_TRACE(); - if (!drv || !dev) { DPAA_BUS_DEBUG("Invalid drv or dev received."); - return ret; + return -1; } - if (drv->drv_type == dev->device_type) { - DPAA_BUS_INFO("Device: %s matches for driver: %s", - dev->name, drv->driver.name); - ret = 0; /* Found a match */ - } + if (drv->drv_type == dev->device_type) + return 0; - return ret; + return -1; } static int @@ -479,8 +537,14 @@ rte_dpaa_bus_probe(void) struct rte_dpaa_driver *drv; FILE *svr_file = NULL; unsigned int svr_ver; + int probe_all = rte_dpaa_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST; - BUS_INIT_FUNC_TRACE(); + svr_file = fopen(DPAA_SOC_ID_FILE, "r"); + if (svr_file) { + if (fscanf(svr_file, "svr:%x", &svr_ver) > 0) + dpaa_svr_family = svr_ver & SVR_MASK; + fclose(svr_file); + } /* For each registered driver, and device, call the driver->probe */ TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) { @@ -489,13 +553,19 @@ rte_dpaa_bus_probe(void) if (ret) continue; - if (!drv->probe) + if (!drv->probe || + (dev->device.devargs && + dev->device.devargs->policy == RTE_DEV_BLACKLISTED)) continue; - ret = drv->probe(drv, dev); - if (ret) - DPAA_BUS_ERR("Unable to probe.\n"); - + if (probe_all || + (dev->device.devargs && + dev->device.devargs->policy == + RTE_DEV_WHITELISTED)) { + ret = drv->probe(drv, dev); + if (ret) + DPAA_BUS_ERR("Unable to probe.\n"); + } break; } } @@ -506,13 +576,6 @@ rte_dpaa_bus_probe(void) if (!TAILQ_EMPTY(&rte_dpaa_bus.device_list)) rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME); - svr_file = fopen(DPAA_SOC_ID_FILE, "r"); - if (svr_file) { - if (fscanf(svr_file, "svr:%x", &svr_ver) > 0) - dpaa_svr_family = svr_ver & SVR_MASK; - fclose(svr_file); - } - return 0; } @@ -552,6 +615,7 @@ struct rte_dpaa_bus rte_dpaa_bus = { .bus = { .scan = rte_dpaa_bus_scan, .probe = rte_dpaa_bus_probe, + .parse = rte_dpaa_bus_parse, .find_device = rte_dpaa_find_device, .get_iommu_class = rte_dpaa_get_iommu_class, }, @@ -562,9 +626,7 @@ struct rte_dpaa_bus rte_dpaa_bus = { RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus); -RTE_INIT(dpaa_init_log); -static void -dpaa_init_log(void) +RTE_INIT(dpaa_init_log) { dpaa_logtype_bus = rte_log_register("bus.dpaa"); if (dpaa_logtype_bus >= 0) @@ -574,11 +636,11 @@ dpaa_init_log(void) if (dpaa_logtype_mempool >= 0) rte_log_set_level(dpaa_logtype_mempool, RTE_LOG_NOTICE); - dpaa_logtype_pmd = rte_log_register("pmd.dpaa"); + dpaa_logtype_pmd = rte_log_register("pmd.net.dpaa"); if (dpaa_logtype_pmd >= 0) rte_log_set_level(dpaa_logtype_pmd, RTE_LOG_NOTICE); - dpaa_logtype_eventdev = rte_log_register("eventdev.dpaa"); + dpaa_logtype_eventdev = rte_log_register("pmd.event.dpaa"); if (dpaa_logtype_eventdev >= 0) rte_log_set_level(dpaa_logtype_eventdev, RTE_LOG_NOTICE); } diff --git a/drivers/bus/dpaa/include/compat.h b/drivers/bus/dpaa/include/compat.h index 53707bb4..92241d23 100644 --- a/drivers/bus/dpaa/include/compat.h +++ b/drivers/bus/dpaa/include/compat.h @@ -39,6 +39,7 @@ #include #include #include +#include /* The following definitions are primarily to allow the single-source driver * interfaces to be included by arbitrary program code. Ie. for interfaces that @@ -47,9 +48,15 @@ */ /* Required compiler attributes */ +#ifndef __maybe_unused #define __maybe_unused __rte_unused +#endif +#ifndef __always_unused #define __always_unused __rte_unused +#endif +#ifndef __packed #define __packed __rte_packed +#endif #define noinline __attribute__((noinline)) #define L1_CACHE_BYTES 64 @@ -127,13 +134,15 @@ static inline void out_be32(volatile void *__p, u32 val) *p = rte_cpu_to_be_32(val); } +#define hwsync() rte_rmb() +#define lwsync() rte_wmb() + #define dcbt_ro(p) __builtin_prefetch(p, 0) #define dcbt_rw(p) __builtin_prefetch(p, 1) +#if defined(RTE_ARCH_ARM64) #define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); } #define dcbz_64(p) dcbz(p) -#define hwsync() rte_rmb() -#define lwsync() rte_wmb() #define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); } #define dcbf_64(p) dcbf(p) #define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); } @@ -144,9 +153,27 @@ static inline void out_be32(volatile void *__p, u32 val) asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); \ } while (0) +#elif defined(RTE_ARCH_ARM) +#define dcbz(p) memset((p), 0, 32) +#define dcbz_64(p) memset((p), 0, 64) +#define dcbf(p) RTE_SET_USED(p) +#define dcbf_64(p) dcbf(p) +#define dccivac(p) RTE_SET_USED(p) +#define dcbit_ro(p) RTE_SET_USED(p) + +#else +#define dcbz(p) RTE_SET_USED(p) +#define dcbz_64(p) dcbz(p) +#define dcbf(p) RTE_SET_USED(p) +#define dcbf_64(p) dcbf(p) +#define dccivac(p) RTE_SET_USED(p) +#define dcbit_ro(p) RTE_SET_USED(p) +#endif + #define barrier() { asm volatile ("" : : : "memory"); } #define cpu_relax barrier +#if defined(RTE_ARCH_ARM64) static inline uint64_t mfatb(void) { uint64_t ret, ret_new, timeout = 200; @@ -160,6 +187,11 @@ static inline uint64_t mfatb(void) DPAA_BUG_ON(!timeout && (ret != ret_new)); return ret * 64; } +#else + +#define mfatb rte_rdtsc + +#endif /* Spin for a few cycles without bothering the bus */ static inline void cpu_spin(int cycles) diff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h index c0ef1bff..1d1ce867 100644 --- a/drivers/bus/dpaa/include/fsl_fman.h +++ b/drivers/bus/dpaa/include/fsl_fman.h @@ -108,6 +108,12 @@ int fman_if_get_fdoff(struct fman_if *fm_if); /* Set interface fd->offset value */ void fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset); +/* Get interface SG enable status value */ +int fman_if_get_sg_enable(struct fman_if *fm_if); + +/* Set interface SG support mode */ +void fman_if_set_sg(struct fman_if *fm_if, int enable); + /* Get interface Max Frame length (MTU) */ uint16_t fman_if_get_maxfrm(struct fman_if *fm_if); diff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h index e9793f30..b18cf037 100644 --- a/drivers/bus/dpaa/include/fsl_qman.h +++ b/drivers/bus/dpaa/include/fsl_qman.h @@ -284,20 +284,20 @@ static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg) } while (0) /* See 1.5.8.1: "Enqueue Command" */ -struct qm_eqcr_entry { +struct __rte_aligned(8) qm_eqcr_entry { u8 __dont_write_directly__verb; u8 dca; u16 seqnum; u32 orp; /* 24-bit */ u32 fqid; /* 24-bit */ u32 tag; - struct qm_fd fd; + struct qm_fd fd; /* this has alignment 8 */ u8 __reserved3[32]; } __packed; /* "Frame Dequeue Response" */ -struct qm_dqrr_entry { +struct __rte_aligned(8) qm_dqrr_entry { u8 verb; u8 stat; u16 seqnum; /* 15-bit */ @@ -305,7 +305,7 @@ struct qm_dqrr_entry { u8 __reserved2[3]; u32 fqid; /* 24-bit */ u32 contextB; - struct qm_fd fd; + struct qm_fd fd; /* this has alignment 8 */ u8 __reserved4[32]; }; @@ -323,18 +323,19 @@ struct qm_dqrr_entry { /* "ERN Message Response" */ /* "FQ State Change Notification" */ struct qm_mr_entry { - u8 verb; union { struct { + u8 verb; u8 dca; u16 seqnum; u8 rc; /* Rejection Code */ u32 orp:24; u32 fqid; /* 24-bit */ u32 tag; - struct qm_fd fd; - } __packed ern; + struct qm_fd fd; /* this has alignment 8 */ + } __packed __rte_aligned(8) ern; struct { + u8 verb; #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */ u8 __reserved1:4; @@ -349,18 +350,19 @@ struct qm_mr_entry { u32 __reserved3:24; u32 fqid; /* 24-bit */ u32 tag; - struct qm_fd fd; - } __packed dcern; + struct qm_fd fd; /* this has alignment 8 */ + } __packed __rte_aligned(8) dcern; struct { + u8 verb; u8 fqs; /* Frame Queue Status */ u8 __reserved1[6]; u32 fqid; /* 24-bit */ u32 contextB; u8 __reserved2[16]; - } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ + } __packed __rte_aligned(8) fq; /* FQRN/FQRNI/FQRL/FQPN */ }; u8 __reserved2[32]; -} __packed; +} __packed __rte_aligned(8); #define QM_MR_VERB_VBIT 0x80 /* * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb @@ -1330,10 +1332,11 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit, * qman_set_vdq - Issue a volatile dequeue command * @fq: Frame Queue on which the volatile dequeue command is issued * @num: Number of Frames requested for volatile dequeue + * @vdqcr_flags: QM_VDQCR_EXACT flag to for VDQCR command * * This function will issue a volatile dequeue command to the QMAN. */ -int qman_set_vdq(struct qman_fq *fq, u16 num); +int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags); /** * qman_dequeue - Get the DQRR entry after volatile dequeue command diff --git a/drivers/bus/dpaa/include/of.h b/drivers/bus/dpaa/include/of.h index 151be5a3..7ea7608f 100644 --- a/drivers/bus/dpaa/include/of.h +++ b/drivers/bus/dpaa/include/of.h @@ -109,6 +109,8 @@ const struct device_node *of_get_parent(const struct device_node *dev_node); const struct device_node *of_get_next_child(const struct device_node *dev_node, const struct device_node *prev); +const void *of_get_mac_address(const struct device_node *np); + #define for_each_child_node(parent, child) \ for (child = of_get_next_child(parent, NULL); child != NULL; \ child = of_get_next_child(parent, child)) diff --git a/drivers/bus/dpaa/meson.build b/drivers/bus/dpaa/meson.build new file mode 100644 index 00000000..d10b62c0 --- /dev/null +++ b/drivers/bus/dpaa/meson.build @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if host_machine.system() != 'linux' + build = false +endif + +deps += ['eventdev'] +sources = files('base/fman/fman.c', + 'base/fman/fman_hw.c', + 'base/fman/netcfg_layer.c', + 'base/fman/of.c', + 'base/qbman/bman.c', + 'base/qbman/bman_driver.c', + 'base/qbman/dpaa_alloc.c', + 'base/qbman/dpaa_sys.c', + 'base/qbman/process.c', + 'base/qbman/qman.c', + 'base/qbman/qman_driver.c', + 'dpaa_bus.c') + +allow_experimental_apis = true + +if cc.has_argument('-Wno-cast-qual') + cflags += '-Wno-cast-qual' +endif + +includes += include_directories('include', 'base/qbman') +cflags += ['-D_GNU_SOURCE'] diff --git a/drivers/bus/dpaa/rte_bus_dpaa_version.map b/drivers/bus/dpaa/rte_bus_dpaa_version.map index 8d902854..7d6d6243 100644 --- a/drivers/bus/dpaa/rte_bus_dpaa_version.map +++ b/drivers/bus/dpaa/rte_bus_dpaa_version.map @@ -92,3 +92,13 @@ DPDK_18.02 { local: *; } DPDK_17.11; + +DPDK_18.08 { + global: + + fman_if_get_sg_enable; + fman_if_set_sg; + of_get_mac_address; + + local: *; +} DPDK_18.02; diff --git a/drivers/bus/dpaa/rte_dpaa_bus.h b/drivers/bus/dpaa/rte_dpaa_bus.h index 718701b1..15dc6a4a 100644 --- a/drivers/bus/dpaa/rte_dpaa_bus.h +++ b/drivers/bus/dpaa/rte_dpaa_bus.h @@ -15,8 +15,6 @@ #include #include -#define FSL_DPAA_BUS_NAME "FSL_DPAA_BUS" - #define DPAA_MEMPOOL_OPS_NAME "dpaa" #define DEV_TO_DPAA_DEVICE(ptr) \ @@ -95,20 +93,35 @@ struct dpaa_portal { uint64_t tid;/**< Parent Thread id for this portal */ }; -/* TODO - this is costly, need to write a fast coversion routine */ +/* Various structures representing contiguous memory maps */ +struct dpaa_memseg { + TAILQ_ENTRY(dpaa_memseg) next; + char *vaddr; + rte_iova_t iova; + size_t len; +}; + +TAILQ_HEAD(dpaa_memseg_list, dpaa_memseg); +extern struct dpaa_memseg_list rte_dpaa_memsegs; + +/* Either iterate over the list of internal memseg references or fallback to + * EAL memseg based iova2virt. + */ static inline void *rte_dpaa_mem_ptov(phys_addr_t paddr) { - const struct rte_memseg *memseg = rte_eal_get_physmem_layout(); - int i; - - for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr != NULL; i++) { - if (paddr >= memseg[i].iova && paddr < - memseg[i].iova + memseg[i].len) - return (uint8_t *)(memseg[i].addr) + - (paddr - memseg[i].iova); + struct dpaa_memseg *ms; + + /* Check if the address is already part of the memseg list internally + * maintained by the dpaa driver. + */ + TAILQ_FOREACH(ms, &rte_dpaa_memsegs, next) { + if (paddr >= ms->iova && paddr < + ms->iova + ms->len) + return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova)); } - return NULL; + /* If not, Fallback to full memseg list searching */ + return rte_mem_iova2virt(paddr); } /** @@ -151,8 +164,7 @@ void dpaa_portal_finish(void *arg); /** Helper for DPAA device registration from driver (eth, crypto) instance */ #define RTE_PMD_REGISTER_DPAA(nm, dpaa_drv) \ -RTE_INIT(dpaainitfn_ ##nm); \ -static void dpaainitfn_ ##nm(void) \ +RTE_INIT(dpaainitfn_ ##nm) \ {\ (dpaa_drv).driver.name = RTE_STR(nm);\ rte_dpaa_driver_register(&dpaa_drv); \ diff --git a/drivers/bus/dpaa/rte_dpaa_logs.h b/drivers/bus/dpaa/rte_dpaa_logs.h index 0fd70bbf..e4143543 100644 --- a/drivers/bus/dpaa/rte_dpaa_logs.h +++ b/drivers/bus/dpaa/rte_dpaa_logs.h @@ -15,10 +15,7 @@ extern int dpaa_logtype_pmd; extern int dpaa_logtype_eventdev; #define DPAA_BUS_LOG(level, fmt, args...) \ - rte_log(RTE_LOG_ ## level, dpaa_logtype_bus, "%s(): " fmt "\n", \ - __func__, ##args) - -#define BUS_INIT_FUNC_TRACE() DPAA_BUS_LOG(DEBUG, " >>") + rte_log(RTE_LOG_ ## level, dpaa_logtype_bus, "dpaa: " fmt "\n", ##args) #ifdef RTE_LIBRTE_DPAA_DEBUG_BUS #define DPAA_BUS_HWWARN(cond, fmt, args...) \ @@ -31,7 +28,11 @@ extern int dpaa_logtype_eventdev; #endif #define DPAA_BUS_DEBUG(fmt, args...) \ - DPAA_BUS_LOG(DEBUG, fmt, ## args) + rte_log(RTE_LOG_DEBUG, dpaa_logtype_bus, "dpaa: %s(): " fmt "\n", \ + __func__, ##args) + +#define BUS_INIT_FUNC_TRACE() DPAA_BUS_DEBUG(" >>") + #define DPAA_BUS_INFO(fmt, args...) \ DPAA_BUS_LOG(INFO, fmt, ## args) #define DPAA_BUS_ERR(fmt, args...) \ diff --git a/drivers/bus/fslmc/Makefile b/drivers/bus/fslmc/Makefile index de237f0f..515d0f53 100644 --- a/drivers/bus/fslmc/Makefile +++ b/drivers/bus/fslmc/Makefile @@ -9,23 +9,13 @@ include $(RTE_SDK)/mk/rte.vars.mk # LIB = librte_bus_fslmc.a -ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y) -CONFIG_RTE_LIBRTE_FSLMC_BUS = $(CONFIG_RTE_LIBRTE_DPAA2_PMD) -endif - CFLAGS += -DALLOW_EXPERIMENTAL_API -ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y) -CFLAGS += -O0 -g -CFLAGS += "-Wno-error" -else CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) -endif CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include -CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev @@ -42,11 +32,12 @@ SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \ SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \ mc/dpmng.c \ - mc/dpbp.c \ - mc/dpio.c \ - mc/mc_sys.c \ + mc/dpbp.c \ + mc/dpio.c \ + mc/mc_sys.c \ mc/dpcon.c \ - mc/dpci.c + mc/dpci.c \ + mc/dpdmai.c SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpio.c SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpbp.c diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c index 5ee0beb8..d2900edc 100644 --- a/drivers/bus/fslmc/fslmc_bus.c +++ b/drivers/bus/fslmc/fslmc_bus.c @@ -18,11 +18,12 @@ #include #include +#include "fslmc_logs.h" -#define FSLMC_BUS_LOG(level, fmt, args...) \ - RTE_LOG(level, EAL, fmt "\n", ##args) +int dpaa2_logtype_bus; #define VFIO_IOMMU_GROUP_PATH "/sys/kernel/iommu_groups" +#define FSLMC_BUS_NAME fslmc struct rte_fslmc_bus rte_fslmc_bus; uint8_t dpaa2_virt_mode; @@ -93,6 +94,41 @@ insert_in_device_list(struct rte_dpaa2_device *newdev) TAILQ_INSERT_TAIL(&rte_fslmc_bus.device_list, newdev, next); } +static struct rte_devargs * +fslmc_devargs_lookup(struct rte_dpaa2_device *dev) +{ + struct rte_devargs *devargs; + char dev_name[32]; + + RTE_EAL_DEVARGS_FOREACH("fslmc", devargs) { + devargs->bus->parse(devargs->name, &dev_name); + if (strcmp(dev_name, dev->device.name) == 0) { + DPAA2_BUS_INFO("**Devargs matched %s", dev_name); + return devargs; + } + } + return NULL; +} + +static void +dump_device_list(void) +{ + struct rte_dpaa2_device *dev; + uint32_t global_log_level; + int local_log_level; + + /* Only if the log level has been set to Debugging, print list */ + global_log_level = rte_log_get_global_level(); + local_log_level = rte_log_get_level(dpaa2_logtype_bus); + if (global_log_level == RTE_LOG_DEBUG || + local_log_level == RTE_LOG_DEBUG) { + DPAA2_BUS_LOG(DEBUG, "List of devices scanned on bus:"); + TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) { + DPAA2_BUS_LOG(DEBUG, "\t\t%s", dev->device.name); + } + } +} + static int scan_one_fslmc_device(char *dev_name) { @@ -109,7 +145,7 @@ scan_one_fslmc_device(char *dev_name) /* Creating a temporary copy to perform cut-parse over string */ dup_dev_name = strdup(dev_name); if (!dup_dev_name) { - FSLMC_BUS_LOG(ERR, "Out of memory."); + DPAA2_BUS_ERR("Unable to allocate device name memory"); return -ENOMEM; } @@ -120,7 +156,7 @@ scan_one_fslmc_device(char *dev_name) */ dev = calloc(1, sizeof(struct rte_dpaa2_device)); if (!dev) { - FSLMC_BUS_LOG(ERR, "Out of memory."); + DPAA2_BUS_ERR("Unable to allocate device object"); free(dup_dev_name); return -ENOMEM; } @@ -128,7 +164,7 @@ scan_one_fslmc_device(char *dev_name) /* Parse the device name and ID */ t_ptr = strtok(dup_dev_name, "."); if (!t_ptr) { - FSLMC_BUS_LOG(ERR, "Incorrect device string observed."); + DPAA2_BUS_ERR("Incorrect device name observed"); goto cleanup; } if (!strncmp("dpni", t_ptr, 4)) @@ -145,6 +181,8 @@ scan_one_fslmc_device(char *dev_name) dev->dev_type = DPAA2_CI; else if (!strncmp("dpmcp", t_ptr, 5)) dev->dev_type = DPAA2_MPORTAL; + else if (!strncmp("dpdmai", t_ptr, 6)) + dev->dev_type = DPAA2_QDMA; else dev->dev_type = DPAA2_UNKNOWN; @@ -153,17 +191,17 @@ scan_one_fslmc_device(char *dev_name) t_ptr = strtok(NULL, "."); if (!t_ptr) { - FSLMC_BUS_LOG(ERR, "Incorrect device string observed (%s).", - t_ptr); + DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr); goto cleanup; } sscanf(t_ptr, "%hu", &dev->object_id); dev->device.name = strdup(dev_name); if (!dev->device.name) { - FSLMC_BUS_LOG(ERR, "Out of memory."); + DPAA2_BUS_ERR("Unable to clone device name. Out of memory"); goto cleanup; } + dev->device.devargs = fslmc_devargs_lookup(dev); /* Add device in the fslmc device list */ insert_in_device_list(dev); @@ -181,6 +219,54 @@ cleanup: return -1; } +static int +rte_fslmc_parse(const char *name, void *addr) +{ + uint16_t dev_id; + char *t_ptr; + char *sep = strchr(name, ':'); + + if (strncmp(name, RTE_STR(FSLMC_BUS_NAME), + strlen(RTE_STR(FSLMC_BUS_NAME)))) { + return -EINVAL; + } + + if (!sep) { + DPAA2_BUS_ERR("Incorrect device name observed"); + return -EINVAL; + } + + t_ptr = (char *)(sep + 1); + + if (strncmp("dpni", t_ptr, 4) && + strncmp("dpseci", t_ptr, 6) && + strncmp("dpcon", t_ptr, 5) && + strncmp("dpbp", t_ptr, 4) && + strncmp("dpio", t_ptr, 4) && + strncmp("dpci", t_ptr, 4) && + strncmp("dpmcp", t_ptr, 5) && + strncmp("dpdmai", t_ptr, 6)) { + DPAA2_BUS_ERR("Unknown or unsupported device"); + return -EINVAL; + } + + t_ptr = strchr(name, '.'); + if (!t_ptr) { + DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr); + return -EINVAL; + } + + t_ptr = (char *)(t_ptr + 1); + if (sscanf(t_ptr, "%hu", &dev_id) <= 0) { + DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr); + return -EINVAL; + } + + if (addr) + strcpy(addr, (char *)(sep + 1)); + return 0; +} + static int rte_fslmc_scan(void) { @@ -193,8 +279,7 @@ rte_fslmc_scan(void) int groupid; if (process_once) { - FSLMC_BUS_LOG(DEBUG, - "Fslmc bus already scanned. Not rescanning"); + DPAA2_BUS_DEBUG("Fslmc bus already scanned. Not rescanning"); return 0; } process_once = 1; @@ -208,7 +293,7 @@ rte_fslmc_scan(void) groupid); dir = opendir(fslmc_dirpath); if (!dir) { - FSLMC_BUS_LOG(ERR, "Unable to open VFIO group dir."); + DPAA2_BUS_ERR("Unable to open VFIO group directory"); goto scan_fail; } @@ -224,9 +309,12 @@ rte_fslmc_scan(void) device_count += 1; } - FSLMC_BUS_LOG(INFO, "fslmc: Bus scan completed"); - closedir(dir); + + DPAA2_BUS_INFO("FSLMC Bus scan completed"); + /* If debugging is enabled, device list is dumped to log output */ + dump_device_list(); + return 0; scan_fail_cleanup: @@ -235,7 +323,7 @@ scan_fail_cleanup: /* Remove all devices in the list */ cleanup_fslmc_device_list(); scan_fail: - FSLMC_BUS_LOG(DEBUG, "FSLMC Bus Not Available. Skipping."); + DPAA2_BUS_DEBUG("FSLMC Bus Not Available. Skipping"); /* Irrespective of failure, scan only return success */ return 0; } @@ -254,6 +342,8 @@ static int rte_fslmc_probe(void) { int ret = 0; + int probe_all; + struct rte_dpaa2_device *dev; struct rte_dpaa2_driver *drv; @@ -262,16 +352,29 @@ rte_fslmc_probe(void) ret = fslmc_vfio_setup_group(); if (ret) { - FSLMC_BUS_LOG(ERR, "Unable to setup VFIO %d", ret); + DPAA2_BUS_ERR("Unable to setup VFIO %d", ret); + return 0; + } + + /* Map existing segments as well as, in case of hotpluggable memory, + * install callback handler. + */ + ret = rte_fslmc_vfio_dmamap(); + if (ret) { + DPAA2_BUS_ERR("Unable to DMA map existing VAs: (%d)", ret); + /* Not continuing ahead */ + DPAA2_BUS_ERR("FSLMC VFIO Mapping failed"); return 0; } ret = fslmc_vfio_process_group(); if (ret) { - FSLMC_BUS_LOG(ERR, "Unable to setup devices %d", ret); + DPAA2_BUS_ERR("Unable to setup devices %d", ret); return 0; } + probe_all = rte_fslmc_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST; + TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) { TAILQ_FOREACH(drv, &rte_fslmc_bus.driver_list, next) { ret = rte_fslmc_match(drv, dev); @@ -281,9 +384,21 @@ rte_fslmc_probe(void) if (!drv->probe) continue; - ret = drv->probe(drv, dev); - if (ret) - FSLMC_BUS_LOG(ERR, "Unable to probe.\n"); + if (dev->device.devargs && + dev->device.devargs->policy == RTE_DEV_BLACKLISTED) { + DPAA2_BUS_LOG(DEBUG, "%s Blacklisted, skipping", + dev->device.name); + continue; + } + + if (probe_all || + (dev->device.devargs && + dev->device.devargs->policy == + RTE_DEV_WHITELISTED)) { + ret = drv->probe(drv, dev); + if (ret) + DPAA2_BUS_ERR("Unable to probe"); + } break; } } @@ -298,16 +413,19 @@ static struct rte_device * rte_fslmc_find_device(const struct rte_device *start, rte_dev_cmp_t cmp, const void *data) { + const struct rte_dpaa2_device *dstart; struct rte_dpaa2_device *dev; - TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) { - if (start && &dev->device == start) { - start = NULL; /* starting point found */ - continue; - } - + if (start != NULL) { + dstart = RTE_DEV_TO_FSLMC_CONST(start); + dev = TAILQ_NEXT(dstart, next); + } else { + dev = TAILQ_FIRST(&rte_fslmc_bus.device_list); + } + while (dev != NULL) { if (cmp(&dev->device, data) == 0) return &dev->device; + dev = TAILQ_NEXT(dev, next); } return NULL; @@ -390,6 +508,7 @@ struct rte_fslmc_bus rte_fslmc_bus = { .bus = { .scan = rte_fslmc_scan, .probe = rte_fslmc_probe, + .parse = rte_fslmc_parse, .find_device = rte_fslmc_find_device, .get_iommu_class = rte_dpaa2_get_iommu_class, }, @@ -398,4 +517,12 @@ struct rte_fslmc_bus rte_fslmc_bus = { .device_count = {0}, }; -RTE_REGISTER_BUS(fslmc, rte_fslmc_bus.bus); +RTE_REGISTER_BUS(FSLMC_BUS_NAME, rte_fslmc_bus.bus); + +RTE_INIT(fslmc_init_log) +{ + /* Bus level logs */ + dpaa2_logtype_bus = rte_log_register("bus.fslmc"); + if (dpaa2_logtype_bus >= 0) + rte_log_set_level(dpaa2_logtype_bus, RTE_LOG_NOTICE); +} diff --git a/drivers/bus/fslmc/fslmc_logs.h b/drivers/bus/fslmc/fslmc_logs.h index d87b6386..dd74cb7d 100644 --- a/drivers/bus/fslmc/fslmc_logs.h +++ b/drivers/bus/fslmc/fslmc_logs.h @@ -7,44 +7,35 @@ #ifndef _FSLMC_LOGS_H_ #define _FSLMC_LOGS_H_ -#define PMD_INIT_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args) - -#ifdef RTE_LIBRTE_DPAA2_DEBUG_INIT -#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") -#else -#define PMD_INIT_FUNC_TRACE() do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA2_DEBUG_RX -#define PMD_RX_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_RX_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA2_DEBUG_TX -#define PMD_TX_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_TX_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA2_DEBUG_TX_FREE -#define PMD_TX_FREE_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER -#define PMD_DRV_LOG_RAW(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args) -#else -#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0) -#endif - -#define PMD_DRV_LOG(level, fmt, args...) \ - PMD_DRV_LOG_RAW(level, fmt "\n", ## args) +extern int dpaa2_logtype_bus; + +#define DPAA2_BUS_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, dpaa2_logtype_bus, "fslmc: " fmt "\n", \ + ##args) + +/* Debug logs are with Function names */ +#define DPAA2_BUS_DEBUG(fmt, args...) \ + rte_log(RTE_LOG_DEBUG, dpaa2_logtype_bus, "fslmc: %s(): " fmt "\n", \ + __func__, ##args) + +#define BUS_INIT_FUNC_TRACE() DPAA2_BUS_DEBUG(" >>") + +#define DPAA2_BUS_INFO(fmt, args...) \ + DPAA2_BUS_LOG(INFO, fmt, ## args) +#define DPAA2_BUS_ERR(fmt, args...) \ + DPAA2_BUS_LOG(ERR, fmt, ## args) +#define DPAA2_BUS_WARN(fmt, args...) \ + DPAA2_BUS_LOG(WARNING, fmt, ## args) + +/* DP Logs, toggled out at compile time if level lower than current level */ +#define DPAA2_BUS_DP_LOG(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, fmt, ## args) + +#define DPAA2_BUS_DP_DEBUG(fmt, args...) \ + DPAA2_BUS_DP_LOG(DEBUG, fmt, ## args) +#define DPAA2_BUS_DP_INFO(fmt, args...) \ + DPAA2_BUS_DP_LOG(INFO, fmt, ## args) +#define DPAA2_BUS_DP_WARN(fmt, args...) \ + DPAA2_BUS_DP_LOG(WARNING, fmt, ## args) #endif /* _FSLMC_LOGS_H_ */ diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c index 1241295b..4c2cd2a8 100644 --- a/drivers/bus/fslmc/fslmc_vfio.c +++ b/drivers/bus/fslmc/fslmc_vfio.c @@ -30,17 +30,16 @@ #include #include #include +#include #include "rte_fslmc.h" #include "fslmc_vfio.h" +#include "fslmc_logs.h" #include #include "portal/dpaa2_hw_pvt.h" #include "portal/dpaa2_hw_dpio.h" -#define FSLMC_VFIO_LOG(level, fmt, args...) \ - RTE_LOG(level, EAL, fmt "\n", ##args) - /** Pathname of FSL-MC devices directory. */ #define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices" @@ -53,7 +52,6 @@ static int container_device_fd; static char *g_container; static uint32_t *msi_intr_vaddr; void *(*rte_mcp_ptr_list); -static int is_dma_done; static struct rte_dpaa2_object_list dpaa2_obj_list = TAILQ_HEAD_INITIALIZER(dpaa2_obj_list); @@ -76,33 +74,32 @@ fslmc_get_container_group(int *groupid) if (!g_container) { container = getenv("DPRC"); if (container == NULL) { - RTE_LOG(WARNING, EAL, "DPAA2: DPRC not available\n"); + DPAA2_BUS_DEBUG("DPAA2: DPRC not available"); return -EINVAL; } if (strlen(container) >= FSLMC_CONTAINER_MAX_LEN) { - FSLMC_VFIO_LOG(ERR, "Invalid container name: %s\n", - container); + DPAA2_BUS_ERR("Invalid container name: %s", container); return -1; } g_container = strdup(container); if (!g_container) { - FSLMC_VFIO_LOG(ERR, "Out of memory."); + DPAA2_BUS_ERR("Mem alloc failure; Container name"); return -ENOMEM; } } /* get group number */ - ret = vfio_get_group_no(SYSFS_FSL_MC_DEVICES, g_container, groupid); + ret = rte_vfio_get_group_num(SYSFS_FSL_MC_DEVICES, + g_container, groupid); if (ret <= 0) { - FSLMC_VFIO_LOG(ERR, "Unable to find %s IOMMU group", - g_container); + DPAA2_BUS_ERR("Unable to find %s IOMMU group", g_container); return -1; } - FSLMC_VFIO_LOG(DEBUG, "Container: %s has VFIO iommu group id = %d", - g_container, *groupid); + DPAA2_BUS_DEBUG("Container: %s has VFIO iommu group id = %d", + g_container, *groupid); return 0; } @@ -113,14 +110,14 @@ vfio_connect_container(void) int fd, ret; if (vfio_container.used) { - FSLMC_VFIO_LOG(DEBUG, "No container available."); + DPAA2_BUS_DEBUG("No container available"); return -1; } /* Try connecting to vfio container if already created */ if (!ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER, &vfio_container.fd)) { - FSLMC_VFIO_LOG(INFO, + DPAA2_BUS_DEBUG( "Container pre-exists with FD[0x%x] for this group", vfio_container.fd); vfio_group.container = &vfio_container; @@ -128,9 +125,9 @@ vfio_connect_container(void) } /* Opens main vfio file descriptor which represents the "container" */ - fd = vfio_get_container_fd(); + fd = rte_vfio_get_container_fd(); if (fd < 0) { - FSLMC_VFIO_LOG(ERR, "Failed to open VFIO container"); + DPAA2_BUS_ERR("Failed to open VFIO container"); return -errno; } @@ -139,19 +136,19 @@ vfio_connect_container(void) /* Connect group to container */ ret = ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER, &fd); if (ret) { - FSLMC_VFIO_LOG(ERR, "Failed to setup group container"); + DPAA2_BUS_ERR("Failed to setup group container"); close(fd); return -errno; } ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU); if (ret) { - FSLMC_VFIO_LOG(ERR, "Failed to setup VFIO iommu"); + DPAA2_BUS_ERR("Failed to setup VFIO iommu"); close(fd); return -errno; } } else { - FSLMC_VFIO_LOG(ERR, "No supported IOMMU available"); + DPAA2_BUS_ERR("No supported IOMMU available"); close(fd); return -EINVAL; } @@ -179,7 +176,7 @@ static int vfio_map_irq_region(struct fslmc_vfio_group *group) vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE | PROT_READ, MAP_SHARED, container_device_fd, 0x6030000); if (vaddr == MAP_FAILED) { - FSLMC_VFIO_LOG(ERR, "Unable to map region (errno = %d)", errno); + DPAA2_BUS_ERR("Unable to map region (errno = %d)", errno); return -errno; } @@ -189,88 +186,195 @@ static int vfio_map_irq_region(struct fslmc_vfio_group *group) if (ret == 0) return 0; - FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA fails (errno = %d)", errno); + DPAA2_BUS_ERR("Unable to map DMA address (errno = %d)", errno); return -errno; } -int rte_fslmc_vfio_dmamap(void) +static int fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len); +static int fslmc_unmap_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len); + +static void +fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len, + void *arg __rte_unused) { + struct rte_memseg_list *msl; + struct rte_memseg *ms; + size_t cur_len = 0, map_len = 0; + uint64_t virt_addr; + rte_iova_t iova_addr; int ret; + + msl = rte_mem_virt2memseg_list(addr); + + while (cur_len < len) { + const void *va = RTE_PTR_ADD(addr, cur_len); + + ms = rte_mem_virt2memseg(va, msl); + iova_addr = ms->iova; + virt_addr = ms->addr_64; + map_len = ms->len; + + DPAA2_BUS_DEBUG("Request for %s, va=%p, " + "virt_addr=0x%" PRIx64 ", " + "iova=0x%" PRIx64 ", map_len=%zu", + type == RTE_MEM_EVENT_ALLOC ? + "alloc" : "dealloc", + va, virt_addr, iova_addr, map_len); + + if (type == RTE_MEM_EVENT_ALLOC) + ret = fslmc_map_dma(virt_addr, iova_addr, map_len); + else + ret = fslmc_unmap_dma(virt_addr, iova_addr, map_len); + + if (ret != 0) { + DPAA2_BUS_ERR("DMA Mapping/Unmapping failed. " + "Map=%d, addr=%p, len=%zu, err:(%d)", + type, va, map_len, ret); + return; + } + + cur_len += map_len; + } + + if (type == RTE_MEM_EVENT_ALLOC) + DPAA2_BUS_DEBUG("Total Mapped: addr=%p, len=%zu", + addr, len); + else + DPAA2_BUS_DEBUG("Total Unmapped: addr=%p, len=%zu", + addr, len); +} + +static int +fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr __rte_unused, size_t len) +{ struct fslmc_vfio_group *group; struct vfio_iommu_type1_dma_map dma_map = { .argsz = sizeof(struct vfio_iommu_type1_dma_map), .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE, }; + int ret; - int i; - const struct rte_memseg *memseg; + dma_map.size = len; + dma_map.vaddr = vaddr; - if (is_dma_done) - return 0; +#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA + dma_map.iova = iovaddr; +#else + dma_map.iova = dma_map.vaddr; +#endif - memseg = rte_eal_get_physmem_layout(); - if (memseg == NULL) { - FSLMC_VFIO_LOG(ERR, "Cannot get physical layout."); - return -ENODEV; + /* SET DMA MAP for IOMMU */ + group = &vfio_group; + + if (!group->container) { + DPAA2_BUS_ERR("Container is not connected "); + return -1; } - for (i = 0; i < RTE_MAX_MEMSEG; i++) { - if (memseg[i].addr == NULL && memseg[i].len == 0) { - FSLMC_VFIO_LOG(DEBUG, "Total %d segments found.", i); - break; - } + DPAA2_BUS_DEBUG("--> Map address: 0x%"PRIx64", size: %"PRIu64"", + (uint64_t)dma_map.vaddr, (uint64_t)dma_map.size); + ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map); + if (ret) { + DPAA2_BUS_ERR("VFIO_IOMMU_MAP_DMA API(errno = %d)", + errno); + return -1; + } - dma_map.size = memseg[i].len; - dma_map.vaddr = memseg[i].addr_64; -#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA - if (rte_eal_iova_mode() == RTE_IOVA_VA) - dma_map.iova = dma_map.vaddr; - else - dma_map.iova = memseg[i].iova; -#else - dma_map.iova = dma_map.vaddr; -#endif + return 0; +} - /* SET DMA MAP for IOMMU */ - group = &vfio_group; +static int +fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len) +{ + struct fslmc_vfio_group *group; + struct vfio_iommu_type1_dma_unmap dma_unmap = { + .argsz = sizeof(struct vfio_iommu_type1_dma_unmap), + .flags = 0, + }; + int ret; - if (!group->container) { - FSLMC_VFIO_LOG(ERR, "Container is not connected "); - return -1; - } + dma_unmap.size = len; + dma_unmap.iova = vaddr; - FSLMC_VFIO_LOG(DEBUG, "-->Initial SHM Virtual ADDR %llX", - dma_map.vaddr); - FSLMC_VFIO_LOG(DEBUG, "-----> DMA size 0x%llX", dma_map.size); - ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, - &dma_map); - if (ret) { - FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA API(errno = %d)", - errno); - return ret; - } + /* SET DMA MAP for IOMMU */ + group = &vfio_group; + + if (!group->container) { + DPAA2_BUS_ERR("Container is not connected "); + return -1; } - /* Verifying that at least single segment is available */ - if (i <= 0) { - FSLMC_VFIO_LOG(ERR, "No Segments found for VFIO Mapping"); + DPAA2_BUS_DEBUG("--> Unmap address: 0x%"PRIx64", size: %"PRIu64"", + (uint64_t)dma_unmap.iova, (uint64_t)dma_unmap.size); + ret = ioctl(group->container->fd, VFIO_IOMMU_UNMAP_DMA, &dma_unmap); + if (ret) { + DPAA2_BUS_ERR("VFIO_IOMMU_UNMAP_DMA API(errno = %d)", + errno); return -1; } + return 0; +} + +static int +fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused, + const struct rte_memseg *ms, void *arg) +{ + int *n_segs = arg; + int ret; + + ret = fslmc_map_dma(ms->addr_64, ms->iova, ms->len); + if (ret) + DPAA2_BUS_ERR("Unable to VFIO map (addr=%p, len=%zu)", + ms->addr, ms->len); + else + (*n_segs)++; + + return ret; +} + +int rte_fslmc_vfio_dmamap(void) +{ + int i = 0, ret; + struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; + rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock; + + /* Lock before parsing and registering callback to memory subsystem */ + rte_rwlock_read_lock(mem_lock); + + if (rte_memseg_walk(fslmc_dmamap_seg, &i) < 0) { + rte_rwlock_read_unlock(mem_lock); + return -1; + } + + ret = rte_mem_event_callback_register("fslmc_memevent_clb", + fslmc_memevent_cb, NULL); + if (ret && rte_errno == ENOTSUP) + DPAA2_BUS_DEBUG("Memory event callbacks not supported"); + else if (ret) + DPAA2_BUS_DEBUG("Unable to install memory handler"); + else + DPAA2_BUS_DEBUG("Installed memory callback handler"); + + DPAA2_BUS_DEBUG("Total %d segments found.", i); + /* TODO - This is a W.A. as VFIO currently does not add the mapping of * the interrupt region to SMMU. This should be removed once the * support is added in the Kernel. */ - vfio_map_irq_region(group); + vfio_map_irq_region(&vfio_group); - is_dma_done = 1; + /* Existing segments have been mapped and memory callback for hotplug + * has been installed. + */ + rte_rwlock_read_unlock(mem_lock); return 0; } static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj) { - int64_t v_addr = (int64_t)MAP_FAILED; + intptr_t v_addr = (intptr_t)MAP_FAILED; int32_t ret, mc_fd; struct vfio_device_info d_info = { .argsz = sizeof(d_info) }; @@ -279,29 +383,26 @@ static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj) /* getting the mcp object's fd*/ mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj); if (mc_fd < 0) { - FSLMC_VFIO_LOG(ERR, "error in VFIO get dev %s fd from group %d", - mcp_obj, group->fd); + DPAA2_BUS_ERR("Error in VFIO get dev %s fd from group %d", + mcp_obj, group->fd); return v_addr; } /* getting device info*/ ret = ioctl(mc_fd, VFIO_DEVICE_GET_INFO, &d_info); if (ret < 0) { - FSLMC_VFIO_LOG(ERR, "error in VFIO getting DEVICE_INFO"); + DPAA2_BUS_ERR("Error in VFIO getting DEVICE_INFO"); goto MC_FAILURE; } /* getting device region info*/ ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); if (ret < 0) { - FSLMC_VFIO_LOG(ERR, "error in VFIO getting REGION_INFO"); + DPAA2_BUS_ERR("Error in VFIO getting REGION_INFO"); goto MC_FAILURE; } - FSLMC_VFIO_LOG(DEBUG, "region offset = %llx , region size = %llx", - reg_info.offset, reg_info.size); - - v_addr = (uint64_t)mmap(NULL, reg_info.size, + v_addr = (size_t)mmap(NULL, reg_info.size, PROT_WRITE | PROT_READ, MAP_SHARED, mc_fd, reg_info.offset); @@ -334,8 +435,8 @@ int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle, int index) ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); if (ret) { - RTE_LOG(ERR, EAL, "Error:dpaa2 SET IRQs fd=%d, err = %d(%s)\n", - intr_handle->fd, errno, strerror(errno)); + DPAA2_BUS_ERR("Error:dpaa2 SET IRQs fd=%d, err = %d(%s)", + intr_handle->fd, errno, strerror(errno)); return ret; } @@ -359,8 +460,8 @@ int rte_dpaa2_intr_disable(struct rte_intr_handle *intr_handle, int index) ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); if (ret) - RTE_LOG(ERR, EAL, - "Error disabling dpaa2 interrupts for fd %d\n", + DPAA2_BUS_ERR( + "Error disabling dpaa2 interrupts for fd %d", intr_handle->fd); return ret; @@ -383,9 +484,8 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle, ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info); if (ret < 0) { - FSLMC_VFIO_LOG(ERR, - "cannot get IRQ(%d) info, error %i (%s)", - i, errno, strerror(errno)); + DPAA2_BUS_ERR("Cannot get IRQ(%d) info, error %i (%s)", + i, errno, strerror(errno)); return -1; } @@ -399,9 +499,8 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle, /* set up an eventfd for interrupts */ fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); if (fd < 0) { - FSLMC_VFIO_LOG(ERR, - "cannot set up eventfd, error %i (%s)\n", - errno, strerror(errno)); + DPAA2_BUS_ERR("Cannot set up eventfd, error %i (%s)", + errno, strerror(errno)); return -1; } @@ -430,13 +529,14 @@ fslmc_process_iodevices(struct rte_dpaa2_device *dev) dev_fd = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, dev->device.name); if (dev_fd <= 0) { - FSLMC_VFIO_LOG(ERR, "Unable to obtain device FD for device:%s", - dev->device.name); + DPAA2_BUS_ERR("Unable to obtain device FD for device:%s", + dev->device.name); return -1; } if (ioctl(dev_fd, VFIO_DEVICE_GET_INFO, &device_info)) { - FSLMC_VFIO_LOG(ERR, "DPAA2 VFIO_DEVICE_GET_INFO fail"); + DPAA2_BUS_ERR("Unable to obtain information for device:%s", + dev->device.name); return -1; } @@ -461,61 +561,74 @@ fslmc_process_iodevices(struct rte_dpaa2_device *dev) break; } - FSLMC_VFIO_LOG(DEBUG, "Device (%s) abstracted from VFIO", - dev->device.name); + DPAA2_BUS_LOG(DEBUG, "Device (%s) abstracted from VFIO", + dev->device.name); return 0; } static int fslmc_process_mcp(struct rte_dpaa2_device *dev) { - int64_t v_addr; - char *dev_name; + int ret; + intptr_t v_addr; + char *dev_name = NULL; struct fsl_mc_io dpmng = {0}; struct mc_version mc_ver_info = {0}; rte_mcp_ptr_list = malloc(sizeof(void *) * 1); if (!rte_mcp_ptr_list) { - FSLMC_VFIO_LOG(ERR, "Out of memory"); - return -ENOMEM; + DPAA2_BUS_ERR("Unable to allocate MC portal memory"); + ret = -ENOMEM; + goto cleanup; } dev_name = strdup(dev->device.name); if (!dev_name) { - FSLMC_VFIO_LOG(ERR, "Out of memory."); - free(rte_mcp_ptr_list); - rte_mcp_ptr_list = NULL; - return -ENOMEM; + DPAA2_BUS_ERR("Unable to allocate MC device name memory"); + ret = -ENOMEM; + goto cleanup; } v_addr = vfio_map_mcp_obj(&vfio_group, dev_name); - if (v_addr == (int64_t)MAP_FAILED) { - FSLMC_VFIO_LOG(ERR, "Error mapping region (errno = %d)", - errno); - free(rte_mcp_ptr_list); - rte_mcp_ptr_list = NULL; - return -1; + if (v_addr == (intptr_t)MAP_FAILED) { + DPAA2_BUS_ERR("Error mapping region (errno = %d)", errno); + ret = -1; + goto cleanup; } /* check the MC version compatibility */ dpmng.regs = (void *)v_addr; - if (mc_get_version(&dpmng, CMD_PRI_LOW, &mc_ver_info)) - RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n"); + if (mc_get_version(&dpmng, CMD_PRI_LOW, &mc_ver_info)) { + DPAA2_BUS_ERR("Unable to obtain MC version"); + ret = -1; + goto cleanup; + } if ((mc_ver_info.major != MC_VER_MAJOR) || (mc_ver_info.minor < MC_VER_MINOR)) { - RTE_LOG(ERR, PMD, "DPAA2 MC version not compatible!" - " Expected %d.%d.x, Detected %d.%d.%d\n", - MC_VER_MAJOR, MC_VER_MINOR, - mc_ver_info.major, mc_ver_info.minor, - mc_ver_info.revision); - free(rte_mcp_ptr_list); - rte_mcp_ptr_list = NULL; - return -1; + DPAA2_BUS_ERR("DPAA2 MC version not compatible!" + " Expected %d.%d.x, Detected %d.%d.%d", + MC_VER_MAJOR, MC_VER_MINOR, + mc_ver_info.major, mc_ver_info.minor, + mc_ver_info.revision); + ret = -1; + goto cleanup; } rte_mcp_ptr_list[0] = (void *)v_addr; + free(dev_name); return 0; + +cleanup: + if (dev_name) + free(dev_name); + + if (rte_mcp_ptr_list) { + free(rte_mcp_ptr_list); + rte_mcp_ptr_list = NULL; + } + + return ret; } int @@ -530,7 +643,7 @@ fslmc_vfio_process_group(void) if (dev->dev_type == DPAA2_MPORTAL) { ret = fslmc_process_mcp(dev); if (ret) { - FSLMC_VFIO_LOG(DEBUG, "Unable to map Portal."); + DPAA2_BUS_ERR("Unable to map MC Portal"); return -1; } if (!found_mportal) @@ -547,23 +660,19 @@ fslmc_vfio_process_group(void) /* Cannot continue if there is not even a single mportal */ if (!found_mportal) { - FSLMC_VFIO_LOG(DEBUG, - "No MC Portal device found. Not continuing."); + DPAA2_BUS_ERR("No MC Portal device found. Not continuing"); return -1; } TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) { - if (!dev) - break; - switch (dev->dev_type) { case DPAA2_ETH: case DPAA2_CRYPTO: + case DPAA2_QDMA: ret = fslmc_process_iodevices(dev); if (ret) { - FSLMC_VFIO_LOG(DEBUG, - "Dev (%s) init failed.", - dev->device.name); + DPAA2_BUS_DEBUG("Dev (%s) init failed", + dev->device.name); return ret; } break; @@ -576,9 +685,8 @@ fslmc_vfio_process_group(void) */ ret = fslmc_process_iodevices(dev); if (ret) { - FSLMC_VFIO_LOG(DEBUG, - "Dev (%s) init failed.", - dev->device.name); + DPAA2_BUS_DEBUG("Dev (%s) init failed", + dev->device.name); return -1; } @@ -592,8 +700,8 @@ fslmc_vfio_process_group(void) case DPAA2_UNKNOWN: default: /* Unknown - ignore */ - FSLMC_VFIO_LOG(DEBUG, "Found unknown device (%s).", - dev->device.name); + DPAA2_BUS_DEBUG("Found unknown device (%s)", + dev->device.name); TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next); free(dev); dev = NULL; @@ -622,12 +730,12 @@ fslmc_vfio_setup_group(void) * processing. */ if (vfio_group.groupid == groupid) { - FSLMC_VFIO_LOG(ERR, "groupid already exists %d", groupid); + DPAA2_BUS_ERR("groupid already exists %d", groupid); return 0; } /* Get the actual group fd */ - ret = vfio_get_group_fd(groupid); + ret = rte_vfio_get_group_fd(groupid); if (ret < 0) return ret; vfio_group.fd = ret; @@ -635,14 +743,14 @@ fslmc_vfio_setup_group(void) /* Check group viability */ ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_STATUS, &status); if (ret) { - FSLMC_VFIO_LOG(ERR, "VFIO error getting group status"); + DPAA2_BUS_ERR("VFIO error getting group status"); close(vfio_group.fd); rte_vfio_clear_group(vfio_group.fd); return ret; } if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { - FSLMC_VFIO_LOG(ERR, "VFIO group not viable"); + DPAA2_BUS_ERR("VFIO group not viable"); close(vfio_group.fd); rte_vfio_clear_group(vfio_group.fd); return -EPERM; @@ -655,7 +763,7 @@ fslmc_vfio_setup_group(void) /* Now connect this IOMMU group to given container */ ret = vfio_connect_container(); if (ret) { - FSLMC_VFIO_LOG(ERR, + DPAA2_BUS_ERR( "Error connecting container with groupid %d", groupid); close(vfio_group.fd); @@ -667,15 +775,15 @@ fslmc_vfio_setup_group(void) /* Get Device information */ ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, g_container); if (ret < 0) { - FSLMC_VFIO_LOG(ERR, "Error getting device %s fd from group %d", - g_container, vfio_group.groupid); + DPAA2_BUS_ERR("Error getting device %s fd from group %d", + g_container, vfio_group.groupid); close(vfio_group.fd); rte_vfio_clear_group(vfio_group.fd); return ret; } container_device_fd = ret; - FSLMC_VFIO_LOG(DEBUG, "VFIO Container FD is [0x%X]", - container_device_fd); + DPAA2_BUS_DEBUG("VFIO Container FD is [0x%X]", + container_device_fd); return 0; } diff --git a/drivers/bus/fslmc/fslmc_vfio.h b/drivers/bus/fslmc/fslmc_vfio.h index e8fb3445..9e2c4fee 100644 --- a/drivers/bus/fslmc/fslmc_vfio.h +++ b/drivers/bus/fslmc/fslmc_vfio.h @@ -10,8 +10,6 @@ #include -#include "eal_vfio.h" - #define DPAA2_MC_DPNI_DEVID 7 #define DPAA2_MC_DPSECI_DEVID 3 #define DPAA2_MC_DPCON_DEVID 5 diff --git a/drivers/bus/fslmc/mc/dpdmai.c b/drivers/bus/fslmc/mc/dpdmai.c new file mode 100644 index 00000000..528889df --- /dev/null +++ b/drivers/bus/fslmc/mc/dpdmai.c @@ -0,0 +1,429 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 NXP + */ + +#include +#include +#include +#include + +/** + * dpdmai_open() - Open a control session for the specified object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dpdmai_id: DPDMAI unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dpdmai_create() function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_open(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + int dpdmai_id, + uint16_t *token) +{ + struct dpdmai_cmd_open *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dpdmai_cmd_open *)cmd.params; + cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *token = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dpdmai_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_close(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE, + cmd_flags, token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmai_create() - Create the DPDMAI object + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @cfg: Configuration structure + * @obj_id: Returned object id + * + * Create the DPDMAI object, allocate required resources and + * perform required initialization. + * + * The object can be created either by declaring it in the + * DPL file, or by calling this function. + * + * The function accepts an authentication token of a parent + * container that this object should be assigned to. The token + * can be '0' so the object will be assigned to the default container. + * The newly created object can be opened with the returned + * object id and using the container's associated tokens and MC portals. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_create(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + const struct dpdmai_cfg *cfg, + uint32_t *obj_id) +{ + struct dpdmai_cmd_create *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE, + cmd_flags, + dprc_token); + cmd_params = (struct dpdmai_cmd_create *)cmd.params; + cmd_params->priorities[0] = cfg->priorities[0]; + cmd_params->priorities[1] = cfg->priorities[1]; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *obj_id = mc_cmd_read_object_id(&cmd); + + return 0; +} + +/** + * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources. + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @object_id: The object id; it must be a valid id within the container that + * created this object; + * + * The function accepts the authentication token of the parent container that + * created the object (not the one that currently owns the object). The object + * is searched within parent using the provided 'object_id'. + * All tokens to the object must be closed before calling destroy. + * + * Return: '0' on Success; error code otherwise. + */ +int dpdmai_destroy(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + uint32_t object_id) +{ + struct dpdmai_cmd_destroy *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY, + cmd_flags, + dprc_token); + cmd_params = (struct dpdmai_cmd_destroy *)cmd.params; + cmd_params->dpdmai_id = cpu_to_le32(object_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmai_is_enabled() - Check if the DPDMAI is enabled. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * @en: Returns '1' if object is enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_is_enabled(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en) +{ + struct dpdmai_rsp_is_enabled *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmai_rsp_is_enabled *)cmd.params; + *en = dpdmai_get_field(rsp_params->en, ENABLE); + + return 0; +} + +/** + * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_reset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmai_get_attributes() - Retrieve DPDMAI attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * @attr: Returned object's attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpdmai_attr *attr) +{ + struct dpdmai_rsp_get_attr *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmai_rsp_get_attr *)cmd.params; + attr->id = le32_to_cpu(rsp_params->id); + attr->num_of_priorities = rsp_params->num_of_priorities; + + return 0; +} + +/** + * dpdmai_set_rx_queue() - Set Rx queue configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * @priority: Select the queue relative to number of + * priorities configured at DPDMAI creation; use + * DPDMAI_ALL_QUEUES to configure all Rx queues + * identically. + * @cfg: Rx queue configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t priority, + const struct dpdmai_rx_queue_cfg *cfg) +{ + struct dpdmai_cmd_set_rx_queue *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpdmai_cmd_set_rx_queue *)cmd.params; + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); + cmd_params->dest_priority = cfg->dest_cfg.priority; + cmd_params->priority = priority; + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx); + cmd_params->options = cpu_to_le32(cfg->options); + dpdmai_set_field(cmd_params->dest_type, + DEST_TYPE, + cfg->dest_cfg.dest_type); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmai_get_rx_queue() - Retrieve Rx queue attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * @priority: Select the queue relative to number of + * priorities configured at DPDMAI creation + * @attr: Returned Rx queue attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t priority, + struct dpdmai_rx_queue_attr *attr) +{ + struct dpdmai_cmd_get_queue *cmd_params; + struct dpdmai_rsp_get_rx_queue *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpdmai_cmd_get_queue *)cmd.params; + cmd_params->priority = priority; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmai_rsp_get_rx_queue *)cmd.params; + attr->user_ctx = le64_to_cpu(rsp_params->user_ctx); + attr->fqid = le32_to_cpu(rsp_params->fqid); + attr->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id); + attr->dest_cfg.priority = le32_to_cpu(rsp_params->dest_priority); + attr->dest_cfg.dest_type = dpdmai_get_field(rsp_params->dest_type, + DEST_TYPE); + + return 0; +} + +/** + * dpdmai_get_tx_queue() - Retrieve Tx queue attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * @priority: Select the queue relative to number of + * priorities configured at DPDMAI creation + * @attr: Returned Tx queue attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t priority, + struct dpdmai_tx_queue_attr *attr) +{ + struct dpdmai_cmd_get_queue *cmd_params; + struct dpdmai_rsp_get_tx_queue *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpdmai_cmd_get_queue *)cmd.params; + cmd_params->priority = priority; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params; + attr->fqid = le32_to_cpu(rsp_params->fqid); + + return 0; +} diff --git a/drivers/bus/fslmc/mc/fsl_dpdmai.h b/drivers/bus/fslmc/mc/fsl_dpdmai.h new file mode 100644 index 00000000..03e46ec1 --- /dev/null +++ b/drivers/bus/fslmc/mc/fsl_dpdmai.h @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 NXP + */ + +#ifndef __FSL_DPDMAI_H +#define __FSL_DPDMAI_H + +struct fsl_mc_io; + +/* Data Path DMA Interface API + * Contains initialization APIs and runtime control APIs for DPDMAI + */ + +/* General DPDMAI macros */ + +/** + * Maximum number of Tx/Rx priorities per DPDMAI object + */ +#define DPDMAI_PRIO_NUM 2 + +/** + * All queues considered; see dpdmai_set_rx_queue() + */ +#define DPDMAI_ALL_QUEUES (uint8_t)(-1) + +int dpdmai_open(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + int dpdmai_id, + uint16_t *token); + +int dpdmai_close(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * struct dpdmai_cfg - Structure representing DPDMAI configuration + * @priorities: Priorities for the DMA hardware processing; valid priorities are + * configured with values 1-8; the entry following last valid entry + * should be configured with 0 + */ +struct dpdmai_cfg { + uint8_t priorities[DPDMAI_PRIO_NUM]; +}; + +int dpdmai_create(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + const struct dpdmai_cfg *cfg, + uint32_t *obj_id); + +int dpdmai_destroy(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + uint32_t object_id); + +int dpdmai_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +int dpdmai_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +int dpdmai_is_enabled(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en); + +int dpdmai_reset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * struct dpdmai_attr - Structure representing DPDMAI attributes + * @id: DPDMAI object ID + * @num_of_priorities: number of priorities + */ +struct dpdmai_attr { + int id; + uint8_t num_of_priorities; +}; + +int dpdmai_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpdmai_attr *attr); + +/** + * enum dpdmai_dest - DPDMAI destination types + * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode + * and does not generate FQDAN notifications; user is expected to dequeue + * from the queue based on polling or other user-defined method + * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN + * notifications to the specified DPIO; user is expected to dequeue + * from the queue only after notification is received + * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate + * FQDAN notifications, but is connected to the specified DPCON object; + * user is expected to dequeue from the DPCON channel + */ +enum dpdmai_dest { + DPDMAI_DEST_NONE = 0, + DPDMAI_DEST_DPIO = 1, + DPDMAI_DEST_DPCON = 2 +}; + +/** + * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters + * @dest_type: Destination type + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type + * @priority: Priority selection within the DPIO or DPCON channel; valid values + * are 0-1 or 0-7, depending on the number of priorities in that + * channel; not relevant for 'DPDMAI_DEST_NONE' option + */ +struct dpdmai_dest_cfg { + enum dpdmai_dest dest_type; + int dest_id; + uint8_t priority; +}; + +/* DPDMAI queue modification options */ + +/** + * Select to modify the user's context associated with the queue + */ +#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001 + +/** + * Select to modify the queue's destination + */ +#define DPDMAI_QUEUE_OPT_DEST 0x00000002 + +/** + * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration + * @options: Flags representing the suggested modifications to the queue; + * Use any combination of 'DPDMAI_QUEUE_OPT_' flags + * @user_ctx: User context value provided in the frame descriptor of each + * dequeued frame; + * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options' + * @dest_cfg: Queue destination parameters; + * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options' + */ +struct dpdmai_rx_queue_cfg { + uint32_t options; + uint64_t user_ctx; + struct dpdmai_dest_cfg dest_cfg; + +}; + +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t priority, + const struct dpdmai_rx_queue_cfg *cfg); + +/** + * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues + * @user_ctx: User context value provided in the frame descriptor of each + * dequeued frame + * @dest_cfg: Queue destination configuration + * @fqid: Virtual FQID value to be used for dequeue operations + */ +struct dpdmai_rx_queue_attr { + uint64_t user_ctx; + struct dpdmai_dest_cfg dest_cfg; + uint32_t fqid; +}; + +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t priority, + struct dpdmai_rx_queue_attr *attr); + +/** + * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues + * @fqid: Virtual FQID to be used for sending frames to DMA hardware + */ + +struct dpdmai_tx_queue_attr { + uint32_t fqid; +}; + +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t priority, + struct dpdmai_tx_queue_attr *attr); + +#endif /* __FSL_DPDMAI_H */ diff --git a/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h b/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h new file mode 100644 index 00000000..618e19ea --- /dev/null +++ b/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 NXP + */ + +#ifndef _FSL_DPDMAI_CMD_H +#define _FSL_DPDMAI_CMD_H + +/* DPDMAI Version */ +#define DPDMAI_VER_MAJOR 3 +#define DPDMAI_VER_MINOR 2 + +/* Command versioning */ +#define DPDMAI_CMD_BASE_VERSION 1 +#define DPDMAI_CMD_ID_OFFSET 4 + +#define DPDMAI_CMD(id) ((id << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) + +/* Command IDs */ +#define DPDMAI_CMDID_CLOSE DPDMAI_CMD(0x800) +#define DPDMAI_CMDID_OPEN DPDMAI_CMD(0x80E) +#define DPDMAI_CMDID_CREATE DPDMAI_CMD(0x90E) +#define DPDMAI_CMDID_DESTROY DPDMAI_CMD(0x98E) +#define DPDMAI_CMDID_GET_API_VERSION DPDMAI_CMD(0xa0E) + +#define DPDMAI_CMDID_ENABLE DPDMAI_CMD(0x002) +#define DPDMAI_CMDID_DISABLE DPDMAI_CMD(0x003) +#define DPDMAI_CMDID_GET_ATTR DPDMAI_CMD(0x004) +#define DPDMAI_CMDID_RESET DPDMAI_CMD(0x005) +#define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMD(0x006) + +#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMD(0x1A0) +#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMD(0x1A1) +#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMD(0x1A2) + +/* Macros for accessing command fields smaller than 1byte */ +#define DPDMAI_MASK(field) \ + GENMASK(DPDMAI_##field##_SHIFT + DPDMAI_##field##_SIZE - 1, \ + DPDMAI_##field##_SHIFT) +#define dpdmai_set_field(var, field, val) \ + ((var) |= (((val) << DPDMAI_##field##_SHIFT) & DPDMAI_MASK(field))) +#define dpdmai_get_field(var, field) \ + (((var) & DPDMAI_MASK(field)) >> DPDMAI_##field##_SHIFT) + +#pragma pack(push, 1) +struct dpdmai_cmd_open { + uint32_t dpdmai_id; +}; + +struct dpdmai_cmd_create { + uint8_t pad; + uint8_t priorities[2]; +}; + +struct dpdmai_cmd_destroy { + uint32_t dpdmai_id; +}; + +#define DPDMAI_ENABLE_SHIFT 0 +#define DPDMAI_ENABLE_SIZE 1 + +struct dpdmai_rsp_is_enabled { + /* only the LSB bit */ + uint8_t en; +}; + +struct dpdmai_rsp_get_attr { + uint32_t id; + uint8_t num_of_priorities; +}; + +#define DPDMAI_DEST_TYPE_SHIFT 0 +#define DPDMAI_DEST_TYPE_SIZE 4 + +struct dpdmai_cmd_set_rx_queue { + uint32_t dest_id; + uint8_t dest_priority; + uint8_t priority; + /* from LSB: dest_type:4 */ + uint8_t dest_type; + uint8_t pad; + uint64_t user_ctx; + uint32_t options; +}; + +struct dpdmai_cmd_get_queue { + uint8_t pad[5]; + uint8_t priority; +}; + +struct dpdmai_rsp_get_rx_queue { + uint32_t dest_id; + uint8_t dest_priority; + uint8_t pad1; + /* from LSB: dest_type:4 */ + uint8_t dest_type; + uint8_t pad2; + uint64_t user_ctx; + uint32_t fqid; +}; + +struct dpdmai_rsp_get_tx_queue { + uint64_t pad; + uint32_t fqid; +}; + +#pragma pack(pop) +#endif /* _FSL_DPDMAI_CMD_H */ diff --git a/drivers/bus/fslmc/mc/fsl_mc_cmd.h b/drivers/bus/fslmc/mc/fsl_mc_cmd.h index a3c3e798..ac919610 100644 --- a/drivers/bus/fslmc/mc/fsl_mc_cmd.h +++ b/drivers/bus/fslmc/mc/fsl_mc_cmd.h @@ -27,7 +27,7 @@ #define le32_to_cpu rte_le_to_cpu_32 #define le16_to_cpu rte_le_to_cpu_16 -#define BITS_PER_LONG 64 +#define BITS_PER_LONG (__SIZEOF_LONG__ * 8) #define GENMASK(h, l) \ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) diff --git a/drivers/bus/fslmc/meson.build b/drivers/bus/fslmc/meson.build new file mode 100644 index 00000000..22a56a6f --- /dev/null +++ b/drivers/bus/fslmc/meson.build @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if host_machine.system() != 'linux' + build = false +endif + +deps += ['eventdev', 'kvargs'] +sources = files('fslmc_bus.c', + 'fslmc_vfio.c', + 'mc/dpbp.c', + 'mc/dpci.c', + 'mc/dpcon.c', + 'mc/dpdmai.c', + 'mc/dpio.c', + 'mc/dpmng.c', + 'mc/mc_sys.c', + 'portal/dpaa2_hw_dpbp.c', + 'portal/dpaa2_hw_dpci.c', + 'portal/dpaa2_hw_dpio.c', + 'qbman/qbman_portal.c', + 'qbman/qbman_debug.c') + +allow_experimental_apis = true + +includes += include_directories('mc', 'qbman/include', 'portal') +cflags += ['-D_GNU_SOURCE'] diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c index f1f14e29..39c5adf9 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c +++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c @@ -44,7 +44,7 @@ dpaa2_create_dpbp_device(int vdev_fd __rte_unused, /* Allocate DPAA2 dpbp handle */ dpbp_node = rte_malloc(NULL, sizeof(struct dpaa2_dpbp_dev), 0); if (!dpbp_node) { - PMD_INIT_LOG(ERR, "Memory allocation failed for DPBP Device"); + DPAA2_BUS_ERR("Memory allocation failed for DPBP Device"); return -1; } @@ -53,8 +53,8 @@ dpaa2_create_dpbp_device(int vdev_fd __rte_unused, ret = dpbp_open(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_id, &dpbp_node->token); if (ret) { - PMD_INIT_LOG(ERR, "Resource alloc failure with err code: %d", - ret); + DPAA2_BUS_ERR("Unable to open buffer pool object: err(%d)", + ret); rte_free(dpbp_node); return -1; } @@ -62,8 +62,8 @@ dpaa2_create_dpbp_device(int vdev_fd __rte_unused, /* Clean the device first */ ret = dpbp_reset(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token); if (ret) { - PMD_INIT_LOG(ERR, "Failure cleaning dpbp device with" - " error code %d\n", ret); + DPAA2_BUS_ERR("Unable to reset buffer pool device. err(%d)", + ret); dpbp_close(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token); rte_free(dpbp_node); return -1; @@ -74,8 +74,6 @@ dpaa2_create_dpbp_device(int vdev_fd __rte_unused, TAILQ_INSERT_TAIL(&dpbp_dev_list, dpbp_node, next); - RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpbp.%d]\n", dpbp_id); - if (!register_once) { rte_mbuf_set_platform_mempool_ops(DPAA2_MEMPOOL_OPS_NAME); register_once = 1; diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c index fb28e497..5ad0374d 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c +++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c @@ -39,13 +39,14 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused, struct dpci_attr attr; struct dpci_rx_queue_cfg rx_queue_cfg; struct dpci_rx_queue_attr rx_attr; + struct dpci_tx_queue_attr tx_attr; int ret, i; /* Allocate DPAA2 dpci handle */ dpci_node = rte_malloc(NULL, sizeof(struct dpaa2_dpci_dev), 0); if (!dpci_node) { - PMD_INIT_LOG(ERR, "Memory allocation failed for DPCI Device"); - return -1; + DPAA2_BUS_ERR("Memory allocation failed for DPCI Device"); + return -ENOMEM; } /* Open the dpci object */ @@ -53,43 +54,57 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused, ret = dpci_open(&dpci_node->dpci, CMD_PRI_LOW, dpci_id, &dpci_node->token); if (ret) { - PMD_INIT_LOG(ERR, "Resource alloc failure with err code: %d", - ret); - rte_free(dpci_node); - return -1; + DPAA2_BUS_ERR("Resource alloc failure with err code: %d", ret); + goto err; } /* Get the device attributes */ ret = dpci_get_attributes(&dpci_node->dpci, CMD_PRI_LOW, dpci_node->token, &attr); if (ret != 0) { - PMD_INIT_LOG(ERR, "Reading device failed with err code: %d", - ret); - rte_free(dpci_node); - return -1; + DPAA2_BUS_ERR("Reading device failed with err code: %d", ret); + goto err; } - /* Set up the Rx Queue */ - memset(&rx_queue_cfg, 0, sizeof(struct dpci_rx_queue_cfg)); - ret = dpci_set_rx_queue(&dpci_node->dpci, - CMD_PRI_LOW, - dpci_node->token, - 0, &rx_queue_cfg); - if (ret) { - PMD_INIT_LOG(ERR, "Setting Rx queue failed with err code: %d", - ret); - rte_free(dpci_node); - return -1; + for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) { + struct dpaa2_queue *rxq; + + memset(&rx_queue_cfg, 0, sizeof(struct dpci_rx_queue_cfg)); + ret = dpci_set_rx_queue(&dpci_node->dpci, + CMD_PRI_LOW, + dpci_node->token, + i, &rx_queue_cfg); + if (ret) { + DPAA2_BUS_ERR("Setting Rx queue failed with err code: %d", + ret); + goto err; + } + + /* Allocate DQ storage for the DPCI Rx queues */ + rxq = &(dpci_node->rx_queue[i]); + rxq->q_storage = rte_malloc("dq_storage", + sizeof(struct queue_storage_info_t), + RTE_CACHE_LINE_SIZE); + if (!rxq->q_storage) { + DPAA2_BUS_ERR("q_storage allocation failed\n"); + ret = -ENOMEM; + goto err; + } + + memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t)); + ret = dpaa2_alloc_dq_storage(rxq->q_storage); + if (ret) { + DPAA2_BUS_ERR("dpaa2_alloc_dq_storage failed\n"); + goto err; + } } /* Enable the device */ ret = dpci_enable(&dpci_node->dpci, CMD_PRI_LOW, dpci_node->token); if (ret != 0) { - PMD_INIT_LOG(ERR, "Enabling device failed with err code: %d", - ret); - rte_free(dpci_node); - return -1; + DPAA2_BUS_ERR("Enabling device failed with err code: %d", ret); + goto err; } for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) { @@ -99,14 +114,22 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused, dpci_node->token, i, &rx_attr); if (ret != 0) { - PMD_INIT_LOG(ERR, - "Reading device failed with err code: %d", - ret); - rte_free(dpci_node); - return -1; + DPAA2_BUS_ERR("Rx queue fetch failed with err code: %d", + ret); + goto err; } + dpci_node->rx_queue[i].fqid = rx_attr.fqid; - dpci_node->queue[i].fqid = rx_attr.fqid; + ret = dpci_get_tx_queue(&dpci_node->dpci, + CMD_PRI_LOW, + dpci_node->token, i, + &tx_attr); + if (ret != 0) { + DPAA2_BUS_ERR("Reading device failed with err code: %d", + ret); + goto err; + } + dpci_node->tx_queue[i].fqid = tx_attr.fqid; } dpci_node->dpci_id = dpci_id; @@ -114,9 +137,20 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused, TAILQ_INSERT_TAIL(&dpci_dev_list, dpci_node, next); - RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpci.%d]\n", dpci_id); - return 0; + +err: + for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) { + struct dpaa2_queue *rxq = &(dpci_node->rx_queue[i]); + + if (rxq->q_storage) { + dpaa2_free_dq_storage(rxq->q_storage); + rte_free(rxq->q_storage); + } + } + rte_free(dpci_node); + + return ret; } struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void) diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c index eefde155..99f70be1 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c +++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c @@ -101,7 +101,7 @@ static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id) snprintf(string, STRING_LEN, "dpio.%d", dpio_id); file = fopen("/proc/interrupts", "r"); if (!file) { - PMD_DRV_LOG(WARNING, "Failed to open /proc/interrupts file\n"); + DPAA2_BUS_WARN("Failed to open /proc/interrupts file"); return; } while (getline(&temp, &len, file) != -1) { @@ -112,8 +112,8 @@ static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id) } if (!token) { - PMD_DRV_LOG(WARNING, "Failed to get interrupt id for dpio.%d\n", - dpio_id); + DPAA2_BUS_WARN("Failed to get interrupt id for dpio.%d", + dpio_id); if (temp) free(temp); fclose(file); @@ -125,10 +125,10 @@ static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id) cpu_mask, token); ret = system(command); if (ret < 0) - PMD_DRV_LOG(WARNING, - "Failed to affine interrupts on respective core\n"); + DPAA2_BUS_WARN( + "Failed to affine interrupts on respective core"); else - PMD_DRV_LOG(WARNING, " %s command is executed\n", command); + DPAA2_BUS_DEBUG(" %s command is executed", command); free(temp); fclose(file); @@ -143,7 +143,7 @@ static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev) dpio_epoll_fd = epoll_create(1); ret = rte_dpaa2_intr_enable(&dpio_dev->intr_handle, 0); if (ret) { - PMD_DRV_LOG(ERR, "Interrupt registeration failed\n"); + DPAA2_BUS_ERR("Interrupt registeration failed"); return -1; } @@ -166,7 +166,7 @@ static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev) ret = epoll_ctl(dpio_epoll_fd, EPOLL_CTL_ADD, eventfd, &epoll_ev); if (ret < 0) { - PMD_DRV_LOG(ERR, "epoll_ctl failed\n"); + DPAA2_BUS_ERR("epoll_ctl failed"); return -1; } dpio_dev->epoll_fd = dpio_epoll_fd; @@ -185,28 +185,27 @@ configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev) dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io)); if (!dpio_dev->dpio) { - PMD_INIT_LOG(ERR, "Memory allocation failure\n"); + DPAA2_BUS_ERR("Memory allocation failure"); return -1; } - PMD_DRV_LOG(DEBUG, "Allocated DPIO Portal[%p]", dpio_dev->dpio); dpio_dev->dpio->regs = dpio_dev->mc_portal; if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id, &dpio_dev->token)) { - PMD_INIT_LOG(ERR, "Failed to allocate IO space\n"); + DPAA2_BUS_ERR("Failed to allocate IO space"); free(dpio_dev->dpio); return -1; } if (dpio_reset(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) { - PMD_INIT_LOG(ERR, "Failed to reset dpio\n"); + DPAA2_BUS_ERR("Failed to reset dpio"); dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); free(dpio_dev->dpio); return -1; } if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) { - PMD_INIT_LOG(ERR, "Failed to Enable dpio\n"); + DPAA2_BUS_ERR("Failed to Enable dpio"); dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); free(dpio_dev->dpio); return -1; @@ -214,7 +213,7 @@ configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev) if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token, &attr)) { - PMD_INIT_LOG(ERR, "DPIO Get attribute failed\n"); + DPAA2_BUS_ERR("DPIO Get attribute failed"); dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); free(dpio_dev->dpio); @@ -231,7 +230,7 @@ configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev) dpio_dev->sw_portal = qbman_swp_init(&p_des); if (dpio_dev->sw_portal == NULL) { - PMD_DRV_LOG(ERR, " QBMan SW Portal Init failed\n"); + DPAA2_BUS_ERR("QBMan SW Portal Init failed"); dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); free(dpio_dev->dpio); return -1; @@ -249,7 +248,7 @@ dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id) if (cpu_id < 0) { cpu_id = rte_get_master_lcore(); if (cpu_id < 0) { - RTE_LOG(ERR, PMD, "\tGetting CPU Index failed\n"); + DPAA2_BUS_ERR("Getting CPU Index failed"); return -1; } } @@ -258,19 +257,19 @@ dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id) */ sdest = dpaa2_core_cluster_sdest(cpu_id); - PMD_DRV_LOG(DEBUG, "Portal= %d CPU= %u SDEST= %d", - dpio_dev->index, cpu_id, sdest); + DPAA2_BUS_DEBUG("Portal= %d CPU= %u SDEST= %d", + dpio_dev->index, cpu_id, sdest); ret = dpio_set_stashing_destination(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token, sdest); if (ret) { - PMD_DRV_LOG(ERR, "%d ERROR in SDEST\n", ret); + DPAA2_BUS_ERR("%d ERROR in SDEST", ret); return -1; } #ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV if (dpaa2_dpio_intr_init(dpio_dev)) { - PMD_DRV_LOG(ERR, "Interrupt registration failed for dpio\n"); + DPAA2_BUS_ERR("Interrupt registration failed for dpio"); return -1; } #endif @@ -291,12 +290,12 @@ struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id) if (!dpio_dev) return NULL; - PMD_DRV_LOG(DEBUG, "New Portal=0x%x (%d) affined thread - %lu", - dpio_dev, dpio_dev->index, syscall(SYS_gettid)); + DPAA2_BUS_DEBUG("New Portal %p (%d) affined thread - %lu", + dpio_dev, dpio_dev->index, syscall(SYS_gettid)); ret = dpaa2_configure_stashing(dpio_dev, cpu_id); if (ret) - PMD_DRV_LOG(ERR, "dpaa2_configure_stashing failed"); + DPAA2_BUS_ERR("dpaa2_configure_stashing failed"); return dpio_dev; } @@ -314,8 +313,9 @@ dpaa2_affine_qbman_swp(void) return -1; if (dpaa2_io_portal[lcore_id].dpio_dev) { - PMD_DRV_LOG(INFO, "DPAA Portal=0x%x (%d) is being shared" - " between thread %lu and current %lu", + DPAA2_BUS_DP_INFO("DPAA Portal=%p (%d) is being shared" + " between thread %" PRIu64 " and current " + "%" PRIu64 "\n", dpaa2_io_portal[lcore_id].dpio_dev, dpaa2_io_portal[lcore_id].dpio_dev->index, dpaa2_io_portal[lcore_id].net_tid, @@ -326,7 +326,8 @@ dpaa2_affine_qbman_swp(void) [lcore_id].dpio_dev->ref_count); dpaa2_io_portal[lcore_id].net_tid = tid; - PMD_DRV_LOG(DEBUG, "Old Portal=0x%x (%d) affined thread - %lu", + DPAA2_BUS_DP_DEBUG("Old Portal=%p (%d) affined thread - " + "%" PRIu64 "\n", dpaa2_io_portal[lcore_id].dpio_dev, dpaa2_io_portal[lcore_id].dpio_dev->index, tid); @@ -348,7 +349,7 @@ dpaa2_affine_qbman_swp(void) } int -dpaa2_affine_qbman_swp_sec(void) +dpaa2_affine_qbman_ethrx_swp(void) { unsigned int lcore_id = rte_lcore_id(); uint64_t tid = syscall(SYS_gettid); @@ -359,32 +360,36 @@ dpaa2_affine_qbman_swp_sec(void) else if (lcore_id >= RTE_MAX_LCORE) return -1; - if (dpaa2_io_portal[lcore_id].sec_dpio_dev) { - PMD_DRV_LOG(INFO, "DPAA Portal=0x%x (%d) is being shared" - " between thread %lu and current %lu", - dpaa2_io_portal[lcore_id].sec_dpio_dev, - dpaa2_io_portal[lcore_id].sec_dpio_dev->index, - dpaa2_io_portal[lcore_id].sec_tid, - tid); - RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev - = dpaa2_io_portal[lcore_id].sec_dpio_dev; + if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) { + DPAA2_BUS_DP_INFO( + "DPAA Portal=%p (%d) is being shared between thread" + " %" PRIu64 " and current %" PRIu64 "\n", + dpaa2_io_portal[lcore_id].ethrx_dpio_dev, + dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index, + dpaa2_io_portal[lcore_id].sec_tid, + tid); + RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev + = dpaa2_io_portal[lcore_id].ethrx_dpio_dev; rte_atomic16_inc(&dpaa2_io_portal - [lcore_id].sec_dpio_dev->ref_count); + [lcore_id].ethrx_dpio_dev->ref_count); dpaa2_io_portal[lcore_id].sec_tid = tid; - PMD_DRV_LOG(DEBUG, "Old Portal=0x%x (%d) affined thread - %lu", - dpaa2_io_portal[lcore_id].sec_dpio_dev, - dpaa2_io_portal[lcore_id].sec_dpio_dev->index, - tid); + DPAA2_BUS_DP_DEBUG( + "Old Portal=%p (%d) affined thread" + " - %" PRIu64 "\n", + dpaa2_io_portal[lcore_id].ethrx_dpio_dev, + dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index, + tid); return 0; } /* Populate the dpaa2_io_portal structure */ - dpaa2_io_portal[lcore_id].sec_dpio_dev = dpaa2_get_qbman_swp(lcore_id); + dpaa2_io_portal[lcore_id].ethrx_dpio_dev = + dpaa2_get_qbman_swp(lcore_id); - if (dpaa2_io_portal[lcore_id].sec_dpio_dev) { - RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev - = dpaa2_io_portal[lcore_id].sec_dpio_dev; + if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) { + RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev + = dpaa2_io_portal[lcore_id].ethrx_dpio_dev; dpaa2_io_portal[lcore_id].sec_tid = tid; return 0; } else { @@ -401,15 +406,14 @@ dpaa2_create_dpio_device(int vdev_fd, struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)}; if (obj_info->num_regions < NUM_DPIO_REGIONS) { - PMD_INIT_LOG(ERR, "ERROR, Not sufficient number " - "of DPIO regions.\n"); + DPAA2_BUS_ERR("Not sufficient number of DPIO regions"); return -1; } dpio_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpio_dev), RTE_CACHE_LINE_SIZE); if (!dpio_dev) { - PMD_INIT_LOG(ERR, "Memory allocation failed for DPIO Device\n"); + DPAA2_BUS_ERR("Memory allocation failed for DPIO Device"); return -1; } @@ -421,31 +425,31 @@ dpaa2_create_dpio_device(int vdev_fd, reg_info.index = 0; if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) { - PMD_INIT_LOG(ERR, "vfio: error getting region info\n"); + DPAA2_BUS_ERR("vfio: error getting region info"); rte_free(dpio_dev); return -1; } dpio_dev->ce_size = reg_info.size; - dpio_dev->qbman_portal_ce_paddr = (uint64_t)mmap(NULL, reg_info.size, + dpio_dev->qbman_portal_ce_paddr = (size_t)mmap(NULL, reg_info.size, PROT_WRITE | PROT_READ, MAP_SHARED, vdev_fd, reg_info.offset); reg_info.index = 1; if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) { - PMD_INIT_LOG(ERR, "vfio: error getting region info\n"); + DPAA2_BUS_ERR("vfio: error getting region info"); rte_free(dpio_dev); return -1; } dpio_dev->ci_size = reg_info.size; - dpio_dev->qbman_portal_ci_paddr = (uint64_t)mmap(NULL, reg_info.size, + dpio_dev->qbman_portal_ci_paddr = (size_t)mmap(NULL, reg_info.size, PROT_WRITE | PROT_READ, MAP_SHARED, vdev_fd, reg_info.offset); if (configure_dpio_qbman_swp(dpio_dev)) { - PMD_INIT_LOG(ERR, - "Fail to configure the dpio qbman portal for %d\n", + DPAA2_BUS_ERR( + "Fail to configure the dpio qbman portal for %d", dpio_dev->hw_id); rte_free(dpio_dev); return -1; @@ -455,8 +459,8 @@ dpaa2_create_dpio_device(int vdev_fd, dpio_dev->index = io_space_count; if (rte_dpaa2_vfio_setup_intr(&dpio_dev->intr_handle, vdev_fd, 1)) { - PMD_INIT_LOG(ERR, "Fail to setup interrupt for %d\n", - dpio_dev->hw_id); + DPAA2_BUS_ERR("Fail to setup interrupt for %d", + dpio_dev->hw_id); rte_free(dpio_dev); } @@ -466,21 +470,20 @@ dpaa2_create_dpio_device(int vdev_fd, if (mc_get_soc_version(dpio_dev->dpio, CMD_PRI_LOW, &mc_plat_info)) { - PMD_INIT_LOG(ERR, "\tmc_get_soc_version failed\n"); + DPAA2_BUS_ERR("Unable to get SoC version information"); } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LS1080A) { dpaa2_core_cluster_base = 0x02; dpaa2_cluster_sz = 4; - PMD_INIT_LOG(DEBUG, "\tLS108x (A53) Platform Detected"); + DPAA2_BUS_DEBUG("LS108x (A53) Platform Detected"); } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LX2160A) { dpaa2_core_cluster_base = 0x00; dpaa2_cluster_sz = 2; - PMD_INIT_LOG(DEBUG, "\tLX2160 Platform Detected"); + DPAA2_BUS_DEBUG("LX2160 Platform Detected"); } dpaa2_svr_family = (mc_plat_info.svr & 0xffff0000); } TAILQ_INSERT_TAIL(&dpio_dev_list, dpio_dev, next); - RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpio.%d]\n", object_id); return 0; } diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h index c0bd8782..d593eea7 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h +++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h @@ -13,7 +13,7 @@ struct dpaa2_io_portal_t { struct dpaa2_dpio_dev *dpio_dev; - struct dpaa2_dpio_dev *sec_dpio_dev; + struct dpaa2_dpio_dev *ethrx_dpio_dev; uint64_t net_tid; uint64_t sec_tid; void *eventdev; @@ -25,8 +25,8 @@ RTE_DECLARE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io); #define DPAA2_PER_LCORE_DPIO RTE_PER_LCORE(_dpaa2_io).dpio_dev #define DPAA2_PER_LCORE_PORTAL DPAA2_PER_LCORE_DPIO->sw_portal -#define DPAA2_PER_LCORE_SEC_DPIO RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev -#define DPAA2_PER_LCORE_SEC_PORTAL DPAA2_PER_LCORE_SEC_DPIO->sw_portal +#define DPAA2_PER_LCORE_ETHRX_DPIO RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev +#define DPAA2_PER_LCORE_ETHRX_PORTAL DPAA2_PER_LCORE_ETHRX_DPIO->sw_portal /* Variable to store DPAA2 platform type */ extern uint32_t dpaa2_svr_family; @@ -39,7 +39,7 @@ struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id); int dpaa2_affine_qbman_swp(void); /* Affine additional DPIO portal to current crypto processing thread */ -int dpaa2_affine_qbman_swp_sec(void); +int dpaa2_affine_qbman_ethrx_swp(void); /* allocate memory for FQ - dq storage */ int diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h index d421dbf0..82075936 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h +++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h @@ -142,7 +142,8 @@ struct dpaa2_dpci_dev { uint16_t token; rte_atomic16_t in_use; uint32_t dpci_id; /*HW ID for DPCI object */ - struct dpaa2_queue queue[DPAA2_DPCI_MAX_QUEUES]; + struct dpaa2_queue rx_queue[DPAA2_DPCI_MAX_QUEUES]; + struct dpaa2_queue tx_queue[DPAA2_DPCI_MAX_QUEUES]; }; /*! Global MCP list */ @@ -174,7 +175,7 @@ enum qbman_fd_format { }; /*Macros to define operations on FD*/ #define DPAA2_SET_FD_ADDR(fd, addr) do { \ - (fd)->simple.addr_lo = lower_32_bits((uint64_t)(addr)); \ + (fd)->simple.addr_lo = lower_32_bits((size_t)(addr)); \ (fd)->simple.addr_hi = upper_32_bits((uint64_t)(addr)); \ } while (0) #define DPAA2_SET_FD_LEN(fd, length) ((fd)->simple.len = length) @@ -188,50 +189,55 @@ enum qbman_fd_format { ((fd)->simple.frc = (0x80000000 | (len))) #define DPAA2_GET_FD_FRC_PARSE_SUM(fd) \ ((uint16_t)(((fd)->simple.frc & 0xffff0000) >> 16)) -#define DPAA2_SET_FD_FRC(fd, frc) ((fd)->simple.frc = frc) +#define DPAA2_SET_FD_FRC(fd, _frc) ((fd)->simple.frc = _frc) #define DPAA2_RESET_FD_CTRL(fd) ((fd)->simple.ctrl = 0) #define DPAA2_SET_FD_ASAL(fd, asal) ((fd)->simple.ctrl |= (asal << 16)) #define DPAA2_SET_FD_FLC(fd, addr) do { \ - (fd)->simple.flc_lo = lower_32_bits((uint64_t)(addr)); \ + (fd)->simple.flc_lo = lower_32_bits((size_t)(addr)); \ (fd)->simple.flc_hi = upper_32_bits((uint64_t)(addr)); \ } while (0) #define DPAA2_SET_FLE_INTERNAL_JD(fle, len) ((fle)->frc = (0x80000000 | (len))) #define DPAA2_GET_FLE_ADDR(fle) \ - (uint64_t)((((uint64_t)((fle)->addr_hi)) << 32) + (fle)->addr_lo) + (size_t)((((uint64_t)((fle)->addr_hi)) << 32) + (fle)->addr_lo) #define DPAA2_SET_FLE_ADDR(fle, addr) do { \ - (fle)->addr_lo = lower_32_bits((uint64_t)addr); \ - (fle)->addr_hi = upper_32_bits((uint64_t)addr); \ + (fle)->addr_lo = lower_32_bits((size_t)addr); \ + (fle)->addr_hi = upper_32_bits((uint64_t)addr); \ } while (0) #define DPAA2_GET_FLE_CTXT(fle) \ - (uint64_t)((((uint64_t)((fle)->reserved[1])) << 32) + \ - (fle)->reserved[0]) + ((((uint64_t)((fle)->reserved[1])) << 32) + (fle)->reserved[0]) #define DPAA2_FLE_SAVE_CTXT(fle, addr) do { \ - (fle)->reserved[0] = lower_32_bits((uint64_t)addr); \ - (fle)->reserved[1] = upper_32_bits((uint64_t)addr); \ + (fle)->reserved[0] = lower_32_bits((size_t)addr); \ + (fle)->reserved[1] = upper_32_bits((uint64_t)addr); \ } while (0) #define DPAA2_SET_FLE_OFFSET(fle, offset) \ ((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16) -#define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (uint64_t)bpid) +#define DPAA2_SET_FLE_LEN(fle, len) ((fle)->length = len) +#define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (size_t)bpid) #define DPAA2_GET_FLE_BPID(fle) ((fle)->fin_bpid_offset & 0x000000ff) -#define DPAA2_SET_FLE_FIN(fle) ((fle)->fin_bpid_offset |= (uint64_t)1 << 31) +#define DPAA2_SET_FLE_FIN(fle) ((fle)->fin_bpid_offset |= 1 << 31) #define DPAA2_SET_FLE_IVP(fle) (((fle)->fin_bpid_offset |= 0x00004000)) +#define DPAA2_SET_FLE_BMT(fle) (((fle)->fin_bpid_offset |= 0x00008000)) #define DPAA2_SET_FD_COMPOUND_FMT(fd) \ ((fd)->simple.bpid_offset |= (uint32_t)1 << 28) #define DPAA2_GET_FD_ADDR(fd) \ -((uint64_t)((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo)) +(((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo)) #define DPAA2_GET_FD_LEN(fd) ((fd)->simple.len) #define DPAA2_GET_FD_BPID(fd) (((fd)->simple.bpid_offset & 0x00003FFF)) #define DPAA2_GET_FD_IVP(fd) (((fd)->simple.bpid_offset & 0x00004000) >> 14) #define DPAA2_GET_FD_OFFSET(fd) (((fd)->simple.bpid_offset & 0x0FFF0000) >> 16) +#define DPAA2_GET_FD_FRC(fd) ((fd)->simple.frc) +#define DPAA2_GET_FD_FLC(fd) \ + (((uint64_t)((fd)->simple.flc_hi) << 32) + (fd)->simple.flc_lo) +#define DPAA2_GET_FD_ERR(fd) ((fd)->simple.bpid_offset & 0x000000FF) #define DPAA2_GET_FLE_OFFSET(fle) (((fle)->fin_bpid_offset & 0x0FFF0000) >> 16) #define DPAA2_SET_FLE_SG_EXT(fle) ((fle)->fin_bpid_offset |= (uint64_t)1 << 29) #define DPAA2_IS_SET_FLE_SG_EXT(fle) \ (((fle)->fin_bpid_offset & ((uint64_t)1 << 29)) ? 1 : 0) #define DPAA2_INLINE_MBUF_FROM_BUF(buf, meta_data_size) \ - ((struct rte_mbuf *)((uint64_t)(buf) - (meta_data_size))) + ((struct rte_mbuf *)((size_t)(buf) - (meta_data_size))) #define DPAA2_ASAL_VAL (DPAA2_MBUF_HW_ANNOTATION / 64) @@ -255,47 +261,53 @@ enum qbman_fd_format { */ #define DPAA2_EQ_RESP_ALWAYS 1 +/* Various structures representing contiguous memory maps */ +struct dpaa2_memseg { + TAILQ_ENTRY(dpaa2_memseg) next; + char *vaddr; + rte_iova_t iova; + size_t len; +}; + +TAILQ_HEAD(dpaa2_memseg_list, dpaa2_memseg); +extern struct dpaa2_memseg_list rte_dpaa2_memsegs; + #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA extern uint8_t dpaa2_virt_mode; static void *dpaa2_mem_ptov(phys_addr_t paddr) __attribute__((unused)); /* todo - this is costly, need to write a fast coversion routine */ static void *dpaa2_mem_ptov(phys_addr_t paddr) { - const struct rte_memseg *memseg; - int i; + struct dpaa2_memseg *ms; if (dpaa2_virt_mode) - return (void *)paddr; - - memseg = rte_eal_get_physmem_layout(); - - for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { - if (paddr >= memseg[i].iova && - (char *)paddr < (char *)memseg[i].iova + memseg[i].len) - return (void *)(memseg[i].addr_64 - + (paddr - memseg[i].iova)); + return (void *)(size_t)paddr; + + /* Check if the address is already part of the memseg list internally + * maintained by the dpaa2 driver. + */ + TAILQ_FOREACH(ms, &rte_dpaa2_memsegs, next) { + if (paddr >= ms->iova && paddr < + ms->iova + ms->len) + return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova)); } - return NULL; + + /* If not, Fallback to full memseg list searching */ + return rte_mem_iova2virt(paddr); } static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused)); static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) { const struct rte_memseg *memseg; - int i; if (dpaa2_virt_mode) return vaddr; - memseg = rte_eal_get_physmem_layout(); - - for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { - if (vaddr >= memseg[i].addr_64 && - vaddr < memseg[i].addr_64 + memseg[i].len) - return memseg[i].iova - + (vaddr - memseg[i].addr_64); - } - return (phys_addr_t)(NULL); + memseg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL); + if (memseg) + return memseg->phys_addr + RTE_PTR_DIFF(vaddr, memseg->addr); + return (size_t)NULL; } /** @@ -306,28 +318,26 @@ static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) */ #define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_iova) -#define DPAA2_OP_VADDR_TO_IOVA(op) (op->phys_addr) /** * macro to convert Virtual address to IOVA */ -#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((uint64_t)(_vaddr)) +#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((size_t)(_vaddr)) /** * macro to convert IOVA to Virtual address */ -#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((phys_addr_t)(_iova)) +#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((size_t)(_iova)) /** * macro to convert modify the memory containing IOVA to Virtual address */ #define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \ - {_mem = (_type)(dpaa2_mem_ptov((phys_addr_t)(_mem))); } + {_mem = (_type)(dpaa2_mem_ptov((size_t)(_mem))); } #else /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */ #define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_addr) -#define DPAA2_OP_VADDR_TO_IOVA(op) (op) #define DPAA2_VADDR_TO_IOVA(_vaddr) (_vaddr) #define DPAA2_IOVA_TO_VADDR(_iova) (_iova) #define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h index 96269ed4..bb60a98f 100644 --- a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h +++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h @@ -6,8 +6,6 @@ #ifndef _FSL_QBMAN_BASE_H #define _FSL_QBMAN_BASE_H -typedef uint64_t dma_addr_t; - /** * DOC: QBMan basic structures * diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c index e2217335..07145005 100644 --- a/drivers/bus/fslmc/qbman/qbman_portal.c +++ b/drivers/bus/fslmc/qbman/qbman_portal.c @@ -122,8 +122,7 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) p->vdq.valid_bit = QB_VALID_BIT; p->dqrr.next_idx = 0; p->dqrr.valid_bit = QB_VALID_BIT; - qman_version = p->desc.qman_version; - if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) { + if ((p->desc.qman_version & 0xFFFF0000) < QMAN_REV_4100) { p->dqrr.dqrr_size = 4; p->dqrr.reset_bug = 1; } else { @@ -553,10 +552,9 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s, /* Flush all the cacheline without load/store in between */ eqcr_pi = s->eqcr.pi; - addr_cena = (uint64_t)s->sys.addr_cena; + addr_cena = (size_t)s->sys.addr_cena; for (i = 0; i < num_enqueued; i++) { - dcbf((uint64_t *)(addr_cena + - QBMAN_CENA_SWP_EQCR(eqcr_pi & 7))); + dcbf((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7))); eqcr_pi++; eqcr_pi &= 0xF; } @@ -620,10 +618,9 @@ int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s, /* Flush all the cacheline without load/store in between */ eqcr_pi = s->eqcr.pi; - addr_cena = (uint64_t)s->sys.addr_cena; + addr_cena = (size_t)s->sys.addr_cena; for (i = 0; i < num_enqueued; i++) { - dcbf((uint64_t *)(addr_cena + - QBMAN_CENA_SWP_EQCR(eqcr_pi & 7))); + dcbf((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7))); eqcr_pi++; eqcr_pi &= 0xF; } @@ -690,7 +687,7 @@ void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, dma_addr_t storage_phys, int stash) { - d->pull.rsp_addr_virt = (uint64_t)storage; + d->pull.rsp_addr_virt = (size_t)storage; if (!storage) { d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT); @@ -749,7 +746,7 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d) } d->pull.tok = s->sys.idx + 1; - s->vdq.storage = (void *)d->pull.rsp_addr_virt; + s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt; p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR); memcpy(&p[1], &cl[1], 12); diff --git a/drivers/bus/fslmc/qbman/qbman_portal.h b/drivers/bus/fslmc/qbman/qbman_portal.h index 8bff0b4f..dbea22a1 100644 --- a/drivers/bus/fslmc/qbman/qbman_portal.h +++ b/drivers/bus/fslmc/qbman/qbman_portal.h @@ -7,7 +7,6 @@ #include "qbman_sys.h" #include -uint32_t qman_version; #define QMAN_REV_4000 0x04000000 #define QMAN_REV_4100 0x04010000 #define QMAN_REV_4101 0x04010001 diff --git a/drivers/bus/fslmc/qbman/qbman_sys.h b/drivers/bus/fslmc/qbman/qbman_sys.h index 846788ea..2bd33ea5 100644 --- a/drivers/bus/fslmc/qbman/qbman_sys.h +++ b/drivers/bus/fslmc/qbman/qbman_sys.h @@ -20,6 +20,9 @@ #include "qbman_sys_decl.h" +#define CENA_WRITE_ENABLE 0 +#define CINH_WRITE_ENABLE 1 + /* Debugging assists */ static inline void __hexdump(unsigned long start, unsigned long end, unsigned long p, size_t sz, const unsigned char *c) @@ -178,7 +181,11 @@ static inline void *qbman_cena_write_start_wo_shadow(struct qbman_swp_sys *s, s->addr_cena, s->idx, offset); #endif QBMAN_BUG_ON(offset & 63); +#ifdef RTE_ARCH_64 return (s->addr_cena + offset); +#else + return (s->addr_cinh + offset); +#endif } static inline void qbman_cena_write_complete(struct qbman_swp_sys *s, @@ -191,11 +198,19 @@ static inline void qbman_cena_write_complete(struct qbman_swp_sys *s, s->addr_cena, s->idx, offset, shadow); hexdump(cmd, 64); #endif +#ifdef RTE_ARCH_64 for (loop = 15; loop >= 1; loop--) __raw_writel(shadow[loop], s->addr_cena + offset + loop * 4); lwsync(); __raw_writel(shadow[0], s->addr_cena + offset); +#else + for (loop = 15; loop >= 1; loop--) + __raw_writel(shadow[loop], s->addr_cinh + + offset + loop * 4); + lwsync(); + __raw_writel(shadow[0], s->addr_cinh + offset); +#endif dcbf(s->addr_cena + offset); } @@ -224,9 +239,15 @@ static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset) s->addr_cena, s->idx, offset, shadow); #endif +#ifdef RTE_ARCH_64 for (loop = 0; loop < 16; loop++) shadow[loop] = __raw_readl(s->addr_cena + offset + loop * 4); +#else + for (loop = 0; loop < 16; loop++) + shadow[loop] = __raw_readl(s->addr_cinh + offset + + loop * 4); +#endif #ifdef QBMAN_CENA_TRACE hexdump(shadow, 64); #endif @@ -313,6 +334,11 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s, uint8_t dqrr_size) { uint32_t reg; +#ifdef RTE_ARCH_64 + uint8_t wn = CENA_WRITE_ENABLE; +#else + uint8_t wn = CINH_WRITE_ENABLE; +#endif s->addr_cena = d->cena_bar; s->addr_cinh = d->cinh_bar; @@ -333,10 +359,10 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s, QBMAN_BUG_ON(reg); #endif if (s->eqcr_mode == qman_eqcr_vb_array) - reg = qbman_set_swp_cfg(dqrr_size, 0, 0, 3, 2, 3, 1, 1, 1, 1, + reg = qbman_set_swp_cfg(dqrr_size, wn, 0, 3, 2, 3, 1, 1, 1, 1, 1, 1); else - reg = qbman_set_swp_cfg(dqrr_size, 0, 1, 3, 2, 2, 1, 1, 1, 1, + reg = qbman_set_swp_cfg(dqrr_size, wn, 1, 3, 2, 2, 1, 1, 1, 1, 1, 1); qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg); reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG); diff --git a/drivers/bus/fslmc/qbman/qbman_sys_decl.h b/drivers/bus/fslmc/qbman/qbman_sys_decl.h index f82bb18c..fa6977fe 100644 --- a/drivers/bus/fslmc/qbman/qbman_sys_decl.h +++ b/drivers/bus/fslmc/qbman/qbman_sys_decl.h @@ -15,6 +15,7 @@ /****************/ /* arch assists */ /****************/ +#if defined(RTE_ARCH_ARM64) #define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); } #define lwsync() { asm volatile("dmb st" : : : "memory"); } #define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); } @@ -28,3 +29,25 @@ static inline void prefetch_for_store(void *p) { asm volatile("prfm pstl1keep, [%0, #0]" : : "r" (p)); } +#elif defined(RTE_ARCH_ARM) +#define dcbz(p) memset(p, 0, 64) +#define lwsync() { asm volatile("dmb st" : : : "memory"); } +#define dcbf(p) RTE_SET_USED(p) +#define dccivac(p) RTE_SET_USED(p) +#define prefetch_for_load(p) { asm volatile ("pld [%0]" : : "r" (p)); } +#define prefetch_for_store(p) { asm volatile ("pld [%0]" : : "r" (p)); } + +#else +#define dcbz(p) RTE_SET_USED(p) +#define lwsync() +#define dcbf(p) RTE_SET_USED(p) +#define dccivac(p) RTE_SET_USED(p) +static inline void prefetch_for_load(void *p) +{ + RTE_SET_USED(p); +} +static inline void prefetch_for_store(void *p) +{ + RTE_SET_USED(p); +} +#endif diff --git a/drivers/bus/fslmc/rte_bus_fslmc_version.map b/drivers/bus/fslmc/rte_bus_fslmc_version.map index b7db0741..fe45a113 100644 --- a/drivers/bus/fslmc/rte_bus_fslmc_version.map +++ b/drivers/bus/fslmc/rte_bus_fslmc_version.map @@ -2,7 +2,6 @@ DPDK_17.05 { global: dpaa2_affine_qbman_swp; - dpaa2_affine_qbman_swp_sec; dpaa2_alloc_dpbp_dev; dpaa2_alloc_dq_storage; dpaa2_free_dpbp_dev; @@ -101,3 +100,19 @@ DPDK_18.02 { rte_fslmc_get_device_count; } DPDK_17.11; + +DPDK_18.05 { + global: + + dpaa2_affine_qbman_ethrx_swp; + dpdmai_close; + dpdmai_disable; + dpdmai_enable; + dpdmai_get_attributes; + dpdmai_get_rx_queue; + dpdmai_get_tx_queue; + dpdmai_open; + dpdmai_set_rx_queue; + rte_dpaa2_free_dpci_dev; + +} DPDK_18.02; diff --git a/drivers/bus/fslmc/rte_fslmc.h b/drivers/bus/fslmc/rte_fslmc.h index 69d0fec7..cea5b78f 100644 --- a/drivers/bus/fslmc/rte_fslmc.h +++ b/drivers/bus/fslmc/rte_fslmc.h @@ -31,6 +31,7 @@ extern "C" { #include #include #include +#include #include @@ -49,6 +50,9 @@ struct rte_dpaa2_driver; TAILQ_HEAD(rte_fslmc_device_list, rte_dpaa2_device); TAILQ_HEAD(rte_fslmc_driver_list, rte_dpaa2_driver); +#define RTE_DEV_TO_FSLMC_CONST(ptr) \ + container_of(ptr, const struct rte_dpaa2_device, device) + extern struct rte_fslmc_bus rte_fslmc_bus; enum rte_dpaa2_dev_type { @@ -61,6 +65,7 @@ enum rte_dpaa2_dev_type { DPAA2_IO, /**< DPIO type device */ DPAA2_CI, /**< DPCI type device */ DPAA2_MPORTAL, /**< DPMCP type device */ + DPAA2_QDMA, /**< DPDMAI type device */ /* Unknown device placeholder */ DPAA2_UNKNOWN, DPAA2_DEVTYPE_MAX, @@ -91,6 +96,7 @@ struct rte_dpaa2_device { union { struct rte_eth_dev *eth_dev; /**< ethernet device */ struct rte_cryptodev *cryptodev; /**< Crypto Device */ + struct rte_rawdev *rawdev; /**< Raw Device */ }; enum rte_dpaa2_dev_type dev_type; /**< Device Type */ uint16_t object_id; /**< DPAA2 Object ID */ @@ -167,8 +173,7 @@ void rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver); /** Helper for DPAA2 device registration from driver (eth, crypto) instance */ #define RTE_PMD_REGISTER_DPAA2(nm, dpaa2_drv) \ -RTE_INIT(dpaa2initfn_ ##nm); \ -static void dpaa2initfn_ ##nm(void) \ +RTE_INIT(dpaa2initfn_ ##nm) \ {\ (dpaa2_drv).driver.name = RTE_STR(nm);\ rte_fslmc_driver_register(&dpaa2_drv); \ @@ -197,8 +202,7 @@ uint32_t rte_fslmc_get_device_count(enum rte_dpaa2_dev_type device_type); /** Helper for DPAA2 object registration */ #define RTE_PMD_REGISTER_DPAA2_OBJECT(nm, dpaa2_obj) \ -RTE_INIT(dpaa2objinitfn_ ##nm); \ -static void dpaa2objinitfn_ ##nm(void) \ +RTE_INIT(dpaa2objinitfn_ ##nm) \ {\ (dpaa2_obj).name = RTE_STR(nm);\ rte_fslmc_object_register(&dpaa2_obj); \ diff --git a/drivers/bus/ifpga/Makefile b/drivers/bus/ifpga/Makefile new file mode 100644 index 00000000..3ff3bdb8 --- /dev/null +++ b/drivers/bus/ifpga/Makefile @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_bus_ifpga.a + +CFLAGS += -DALLOW_EXPERIMENTAL_API +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lrte_eal +LDLIBS += -lrte_rawdev +LDLIBS += -lrte_kvargs + +# versioning export map +EXPORT_MAP := rte_bus_ifpga_version.map + +# library version +LIBABIVER := 1 + +SRCS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga_bus.c +SRCS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga_common.c + +# +# Export include files +# +SYMLINK-$(CONFIG_RTE_LIBRTE_IFPGA_BUS)-include += rte_bus_ifpga.h + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/bus/ifpga/ifpga_bus.c b/drivers/bus/ifpga/ifpga_bus.c new file mode 100644 index 00000000..b324872e --- /dev/null +++ b/drivers/bus/ifpga/ifpga_bus.c @@ -0,0 +1,467 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_rawdev.h" +#include "rte_rawdev_pmd.h" +#include "rte_bus_ifpga.h" +#include "ifpga_logs.h" +#include "ifpga_common.h" + +int ifpga_bus_logtype; + +/* Forward declaration to access Intel FPGA bus + * on which iFPGA devices are connected + */ +static struct rte_bus rte_ifpga_bus; + +static struct ifpga_afu_dev_list ifpga_afu_dev_list = + TAILQ_HEAD_INITIALIZER(ifpga_afu_dev_list); +static struct ifpga_afu_drv_list ifpga_afu_drv_list = + TAILQ_HEAD_INITIALIZER(ifpga_afu_drv_list); + + +/* register a ifpga bus based driver */ +void rte_ifpga_driver_register(struct rte_afu_driver *driver) +{ + RTE_VERIFY(driver); + + TAILQ_INSERT_TAIL(&ifpga_afu_drv_list, driver, next); +} + +/* un-register a fpga bus based driver */ +void rte_ifpga_driver_unregister(struct rte_afu_driver *driver) +{ + TAILQ_REMOVE(&ifpga_afu_drv_list, driver, next); +} + +static struct rte_afu_device * +ifpga_find_afu_dev(const struct rte_rawdev *rdev, + const struct rte_afu_id *afu_id) +{ + struct rte_afu_device *afu_dev = NULL; + + TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) { + if (afu_dev && + afu_dev->rawdev == rdev && + !ifpga_afu_id_cmp(&afu_dev->id, afu_id)) + return afu_dev; + } + return NULL; +} + +static const char * const valid_args[] = { +#define IFPGA_ARG_NAME "ifpga" + IFPGA_ARG_NAME, +#define IFPGA_ARG_PORT "port" + IFPGA_ARG_PORT, +#define IFPGA_AFU_BTS "afu_bts" + IFPGA_AFU_BTS, + NULL +}; + +/* + * Scan the content of the FPGA bus, and the devices in the devices + * list + */ +static struct rte_afu_device * +ifpga_scan_one(struct rte_rawdev *rawdev, + struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist = NULL; + struct rte_afu_device *afu_dev = NULL; + struct rte_afu_pr_conf afu_pr_conf; + int ret = 0; + char *path = NULL; + + memset(&afu_pr_conf, 0, sizeof(struct rte_afu_pr_conf)); + + kvlist = rte_kvargs_parse(devargs->args, valid_args); + if (!kvlist) { + IFPGA_BUS_ERR("error when parsing param"); + goto end; + } + + if (rte_kvargs_count(kvlist, IFPGA_ARG_PORT) == 1) { + if (rte_kvargs_process(kvlist, IFPGA_ARG_PORT, + &rte_ifpga_get_integer32_arg, &afu_pr_conf.afu_id.port) < 0) { + IFPGA_BUS_ERR("error to parse %s", + IFPGA_ARG_PORT); + goto end; + } + } else { + IFPGA_BUS_ERR("arg %s is mandatory for ifpga bus", + IFPGA_ARG_PORT); + goto end; + } + + if (rte_kvargs_count(kvlist, IFPGA_AFU_BTS) == 1) { + if (rte_kvargs_process(kvlist, IFPGA_AFU_BTS, + &rte_ifpga_get_string_arg, &path) < 0) { + IFPGA_BUS_ERR("Failed to parse %s", + IFPGA_AFU_BTS); + goto end; + } + } else { + IFPGA_BUS_ERR("arg %s is mandatory for ifpga bus", + IFPGA_AFU_BTS); + goto end; + } + + afu_pr_conf.afu_id.uuid.uuid_low = 0; + afu_pr_conf.afu_id.uuid.uuid_high = 0; + afu_pr_conf.pr_enable = path?1:0; + + if (ifpga_find_afu_dev(rawdev, &afu_pr_conf.afu_id)) + goto end; + + afu_dev = calloc(1, sizeof(*afu_dev)); + if (!afu_dev) + goto end; + + afu_dev->device.devargs = devargs; + afu_dev->device.numa_node = SOCKET_ID_ANY; + afu_dev->device.name = devargs->name; + afu_dev->rawdev = rawdev; + afu_dev->id.uuid.uuid_low = 0; + afu_dev->id.uuid.uuid_high = 0; + afu_dev->id.port = afu_pr_conf.afu_id.port; + + if (rawdev->dev_ops && rawdev->dev_ops->dev_info_get) + rawdev->dev_ops->dev_info_get(rawdev, afu_dev); + + if (rawdev->dev_ops && + rawdev->dev_ops->dev_start && + rawdev->dev_ops->dev_start(rawdev)) + goto end; + + strlcpy(afu_pr_conf.bs_path, path, sizeof(afu_pr_conf.bs_path)); + if (rawdev->dev_ops && + rawdev->dev_ops->firmware_load && + rawdev->dev_ops->firmware_load(rawdev, + &afu_pr_conf)){ + IFPGA_BUS_ERR("firmware load error %d\n", ret); + goto end; + } + afu_dev->id.uuid.uuid_low = afu_pr_conf.afu_id.uuid.uuid_low; + afu_dev->id.uuid.uuid_high = afu_pr_conf.afu_id.uuid.uuid_high; + + rte_kvargs_free(kvlist); + free(path); + return afu_dev; + +end: + if (kvlist) + rte_kvargs_free(kvlist); + if (path) + free(path); + if (afu_dev) + free(afu_dev); + + return NULL; +} + +/* + * Scan the content of the FPGA bus, and the devices in the devices + * list + */ +static int +ifpga_scan(void) +{ + struct rte_devargs *devargs; + struct rte_kvargs *kvlist = NULL; + struct rte_rawdev *rawdev = NULL; + char *name = NULL; + char name1[RTE_RAWDEV_NAME_MAX_LEN]; + struct rte_afu_device *afu_dev = NULL; + + /* for FPGA devices we scan the devargs_list populated via cmdline */ + RTE_EAL_DEVARGS_FOREACH(IFPGA_ARG_NAME, devargs) { + if (devargs->bus != &rte_ifpga_bus) + continue; + + kvlist = rte_kvargs_parse(devargs->args, valid_args); + if (!kvlist) { + IFPGA_BUS_ERR("error when parsing param"); + goto end; + } + + if (rte_kvargs_count(kvlist, IFPGA_ARG_NAME) == 1) { + if (rte_kvargs_process(kvlist, IFPGA_ARG_NAME, + &rte_ifpga_get_string_arg, &name) < 0) { + IFPGA_BUS_ERR("error to parse %s", + IFPGA_ARG_NAME); + goto end; + } + } else { + IFPGA_BUS_ERR("arg %s is mandatory for ifpga bus", + IFPGA_ARG_NAME); + goto end; + } + + memset(name1, 0, sizeof(name1)); + snprintf(name1, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%s", name); + + rawdev = rte_rawdev_pmd_get_named_dev(name1); + if (!rawdev) + goto end; + + afu_dev = ifpga_scan_one(rawdev, devargs); + if (afu_dev != NULL) + TAILQ_INSERT_TAIL(&ifpga_afu_dev_list, afu_dev, next); + } + +end: + if (kvlist) + rte_kvargs_free(kvlist); + if (name) + free(name); + + return 0; +} + +/* + * Match the AFU Driver and AFU Device using the ID Table + */ +static int +rte_afu_match(const struct rte_afu_driver *afu_drv, + const struct rte_afu_device *afu_dev) +{ + const struct rte_afu_uuid *id_table; + + for (id_table = afu_drv->id_table; + ((id_table->uuid_low != 0) && (id_table->uuid_high != 0)); + id_table++) { + /* check if device's identifiers match the driver's ones */ + if ((id_table->uuid_low != afu_dev->id.uuid.uuid_low) || + id_table->uuid_high != + afu_dev->id.uuid.uuid_high) + continue; + + return 1; + } + + return 0; +} + +static int +ifpga_probe_one_driver(struct rte_afu_driver *drv, + struct rte_afu_device *afu_dev) +{ + int ret; + + if (!rte_afu_match(drv, afu_dev)) + /* Match of device and driver failed */ + return 1; + + /* reference driver structure */ + afu_dev->driver = drv; + afu_dev->device.driver = &drv->driver; + + /* call the driver probe() function */ + ret = drv->probe(afu_dev); + if (ret) { + afu_dev->driver = NULL; + afu_dev->device.driver = NULL; + } + + return ret; +} + +static int +ifpga_probe_all_drivers(struct rte_afu_device *afu_dev) +{ + struct rte_afu_driver *drv = NULL; + int ret = 0; + + if (afu_dev == NULL) + return -1; + + /* Check if a driver is already loaded */ + if (afu_dev->driver != NULL) + return 0; + + TAILQ_FOREACH(drv, &ifpga_afu_drv_list, next) { + if (ifpga_probe_one_driver(drv, afu_dev)) { + ret = -1; + break; + } + } + return ret; +} + +/* + * Scan the content of the Intel FPGA bus, and call the probe() function for + * all registered drivers that have a matching entry in its id_table + * for discovered devices. + */ +static int +ifpga_probe(void) +{ + struct rte_afu_device *afu_dev = NULL; + int ret = 0; + + TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) { + if (afu_dev->device.driver) + continue; + + ret = ifpga_probe_all_drivers(afu_dev); + if (ret < 0) + IFPGA_BUS_ERR("failed to initialize %s device\n", + rte_ifpga_device_name(afu_dev)); + } + + return ret; +} + +static int +ifpga_plug(struct rte_device *dev) +{ + return ifpga_probe_all_drivers(RTE_DEV_TO_AFU(dev)); +} + +static int +ifpga_remove_driver(struct rte_afu_device *afu_dev) +{ + const char *name; + const struct rte_afu_driver *driver; + + name = rte_ifpga_device_name(afu_dev); + if (!afu_dev->device.driver) { + IFPGA_BUS_DEBUG("no driver attach to device %s\n", name); + return 1; + } + + driver = RTE_DRV_TO_AFU_CONST(afu_dev->device.driver); + return driver->remove(afu_dev); +} + +static int +ifpga_unplug(struct rte_device *dev) +{ + struct rte_afu_device *afu_dev = NULL; + struct rte_devargs *devargs = NULL; + int ret; + + if (dev == NULL) + return -EINVAL; + + afu_dev = RTE_DEV_TO_AFU(dev); + if (!afu_dev) + return -ENOENT; + + devargs = dev->devargs; + + ret = ifpga_remove_driver(afu_dev); + if (ret) + return ret; + + TAILQ_REMOVE(&ifpga_afu_dev_list, afu_dev, next); + + rte_devargs_remove(devargs->bus->name, devargs->name); + free(afu_dev); + return 0; + +} + +static struct rte_device * +ifpga_find_device(const struct rte_device *start, + rte_dev_cmp_t cmp, const void *data) +{ + struct rte_afu_device *afu_dev; + + TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) { + if (start && &afu_dev->device == start) { + start = NULL; + continue; + } + if (cmp(&afu_dev->device, data) == 0) + return &afu_dev->device; + } + + return NULL; +} +static int +ifpga_parse(const char *name, void *addr) +{ + int *out = addr; + struct rte_rawdev *rawdev = NULL; + char rawdev_name[RTE_RAWDEV_NAME_MAX_LEN]; + char *c1 = NULL; + char *c2 = NULL; + int port = IFPGA_BUS_DEV_PORT_MAX; + char str_port[8]; + int str_port_len = 0; + int ret; + + memset(str_port, 0, 8); + c1 = strchr(name, '|'); + if (c1 != NULL) { + str_port_len = c1 - name; + c2 = c1 + 1; + } + + if (str_port_len < 8 && + str_port_len > 0) { + memcpy(str_port, name, str_port_len); + ret = sscanf(str_port, "%d", &port); + if (ret == -1) + return 0; + } + + memset(rawdev_name, 0, sizeof(rawdev_name)); + snprintf(rawdev_name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%s", c2); + rawdev = rte_rawdev_pmd_get_named_dev(rawdev_name); + + if ((port < IFPGA_BUS_DEV_PORT_MAX) && + rawdev && + (addr != NULL)) + *out = port; + + if ((port < IFPGA_BUS_DEV_PORT_MAX) && + rawdev) + return 0; + else + return 1; +} + +static struct rte_bus rte_ifpga_bus = { + .scan = ifpga_scan, + .probe = ifpga_probe, + .find_device = ifpga_find_device, + .plug = ifpga_plug, + .unplug = ifpga_unplug, + .parse = ifpga_parse, +}; + +RTE_REGISTER_BUS(IFPGA_BUS_NAME, rte_ifpga_bus); + +RTE_INIT(ifpga_init_log) +{ + ifpga_bus_logtype = rte_log_register("bus.ifpga"); + if (ifpga_bus_logtype >= 0) + rte_log_set_level(ifpga_bus_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/bus/ifpga/ifpga_common.c b/drivers/bus/ifpga/ifpga_common.c new file mode 100644 index 00000000..78e2eaee --- /dev/null +++ b/drivers/bus/ifpga/ifpga_common.c @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "rte_bus_ifpga.h" +#include "ifpga_logs.h" +#include "ifpga_common.h" + +int rte_ifpga_get_string_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + if (!value || !extra_args) + return -EINVAL; + + *(char **)extra_args = strdup(value); + + if (!*(char **)extra_args) + return -ENOMEM; + + return 0; +} +int rte_ifpga_get_integer32_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + if (!value || !extra_args) + return -EINVAL; + + *(int *)extra_args = strtoull(value, NULL, 0); + + return 0; +} +int ifpga_get_integer64_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + if (!value || !extra_args) + return -EINVAL; + + *(uint64_t *)extra_args = strtoull(value, NULL, 0); + + return 0; +} +int ifpga_get_unsigned_long(const char *str, int base) +{ + unsigned long num; + char *end = NULL; + + errno = 0; + + num = strtoul(str, &end, base); + if ((str[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) + return -1; + + return num; +} + +int ifpga_afu_id_cmp(const struct rte_afu_id *afu_id0, + const struct rte_afu_id *afu_id1) +{ + if ((afu_id0->uuid.uuid_low == afu_id1->uuid.uuid_low) && + (afu_id0->uuid.uuid_high == afu_id1->uuid.uuid_high) && + (afu_id0->port == afu_id1->port)) { + return 0; + } else + return 1; +} diff --git a/drivers/bus/ifpga/ifpga_common.h b/drivers/bus/ifpga/ifpga_common.h new file mode 100644 index 00000000..f9254b9d --- /dev/null +++ b/drivers/bus/ifpga/ifpga_common.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#ifndef _IFPGA_COMMON_H_ +#define _IFPGA_COMMON_H_ + +int rte_ifpga_get_string_arg(const char *key __rte_unused, + const char *value, void *extra_args); +int rte_ifpga_get_integer32_arg(const char *key __rte_unused, + const char *value, void *extra_args); +int ifpga_get_integer64_arg(const char *key __rte_unused, + const char *value, void *extra_args); +int ifpga_get_unsigned_long(const char *str, int base); +int ifpga_afu_id_cmp(const struct rte_afu_id *afu_id0, + const struct rte_afu_id *afu_id1); + +#endif /* _IFPGA_COMMON_H_ */ diff --git a/drivers/bus/ifpga/ifpga_logs.h b/drivers/bus/ifpga/ifpga_logs.h new file mode 100644 index 00000000..873e0a4f --- /dev/null +++ b/drivers/bus/ifpga/ifpga_logs.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#ifndef _IFPGA_LOGS_H_ +#define _IFPGA_LOGS_H_ + +#include + +extern int ifpga_bus_logtype; + +#define IFPGA_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ifpga_bus_logtype, "%s(): " fmt "\n", \ + __func__, ##args) + +#define IFPGA_BUS_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ifpga_bus_logtype, "%s(): " fmt "\n", \ + __func__, ##args) + +#define IFPGA_BUS_FUNC_TRACE() IFPGA_BUS_LOG(DEBUG, ">>") + +#define IFPGA_BUS_DEBUG(fmt, args...) \ + IFPGA_BUS_LOG(DEBUG, fmt, ## args) +#define IFPGA_BUS_INFO(fmt, args...) \ + IFPGA_BUS_LOG(INFO, fmt, ## args) +#define IFPGA_BUS_ERR(fmt, args...) \ + IFPGA_BUS_LOG(ERR, fmt, ## args) +#define IFPGA_BUS_WARN(fmt, args...) \ + IFPGA_BUS_LOG(WARNING, fmt, ## args) + +#endif /* _IFPGA_BUS_LOGS_H_ */ diff --git a/drivers/bus/ifpga/meson.build b/drivers/bus/ifpga/meson.build new file mode 100644 index 00000000..c9b08c86 --- /dev/null +++ b/drivers/bus/ifpga/meson.build @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2018 Intel Corporation + +deps += ['pci', 'kvargs', 'rawdev'] +install_headers('rte_bus_ifpga.h') +sources = files('ifpga_common.c', 'ifpga_bus.c') + +allow_experimental_apis = true diff --git a/drivers/bus/ifpga/rte_bus_ifpga.h b/drivers/bus/ifpga/rte_bus_ifpga.h new file mode 100644 index 00000000..51d5ae0d --- /dev/null +++ b/drivers/bus/ifpga/rte_bus_ifpga.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#ifndef _RTE_BUS_IFPGA_H_ +#define _RTE_BUS_IFPGA_H_ + +/** + * @file + * + * RTE Intel FPGA Bus Interface + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +/** Name of Intel FPGA Bus */ +#define IFPGA_BUS_NAME ifpga + +/* Forward declarations */ +struct rte_afu_device; +struct rte_afu_driver; + +/** Double linked list of Intel FPGA AFU device. */ +TAILQ_HEAD(ifpga_afu_dev_list, rte_afu_device); +/** Double linked list of Intel FPGA AFU device drivers. */ +TAILQ_HEAD(ifpga_afu_drv_list, rte_afu_driver); + +#define IFPGA_BUS_BITSTREAM_PATH_MAX_LEN 256 + +struct rte_afu_uuid { + uint64_t uuid_low; + uint64_t uuid_high; +} __attribute__ ((packed)); + +#define IFPGA_BUS_DEV_PORT_MAX 4 + +/** + * A structure describing an ID for a AFU driver. Each driver provides a + * table of these IDs for each device that it supports. + */ +struct rte_afu_id { + struct rte_afu_uuid uuid; + int port; /**< port number */ +} __attribute__ ((packed)); + +/** + * A structure PR (Partial Reconfiguration) configuration AFU driver. + */ + +struct rte_afu_pr_conf { + struct rte_afu_id afu_id; + int pr_enable; + char bs_path[IFPGA_BUS_BITSTREAM_PATH_MAX_LEN]; +}; + +#define AFU_PRI_STR_SIZE (PCI_PRI_STR_SIZE + 8) + +/** + * A structure describing a AFU device. + */ +struct rte_afu_device { + TAILQ_ENTRY(rte_afu_device) next; /**< Next in device list. */ + struct rte_device device; /**< Inherit core device */ + struct rte_rawdev *rawdev; /**< Point Rawdev */ + struct rte_afu_id id; /**< AFU id within FPGA. */ + uint32_t num_region; /**< number of regions found */ + struct rte_mem_resource mem_resource[PCI_MAX_RESOURCE]; + /**< AFU Memory Resource */ + struct rte_intr_handle intr_handle; /**< Interrupt handle */ + struct rte_afu_driver *driver; /**< Associated driver */ + char path[IFPGA_BUS_BITSTREAM_PATH_MAX_LEN]; +} __attribute__ ((packed)); + +/** + * @internal + * Helper macro for drivers that need to convert to struct rte_afu_device. + */ +#define RTE_DEV_TO_AFU(ptr) \ + container_of(ptr, struct rte_afu_device, device) + +#define RTE_DRV_TO_AFU_CONST(ptr) \ + container_of(ptr, const struct rte_afu_driver, driver) + +/** + * Initialization function for the driver called during FPGA BUS probing. + */ +typedef int (afu_probe_t)(struct rte_afu_device *); + +/** + * Uninitialization function for the driver called during hotplugging. + */ +typedef int (afu_remove_t)(struct rte_afu_device *); + +/** + * A structure describing a AFU device. + */ +struct rte_afu_driver { + TAILQ_ENTRY(rte_afu_driver) next; /**< Next afu driver. */ + struct rte_driver driver; /**< Inherit core driver. */ + afu_probe_t *probe; /**< Device Probe function. */ + afu_remove_t *remove; /**< Device Remove function. */ + const struct rte_afu_uuid *id_table; /**< AFU uuid within FPGA. */ +}; + +static inline const char * +rte_ifpga_device_name(const struct rte_afu_device *afu) +{ + if (afu && afu->device.name) + return afu->device.name; + return NULL; +} + +/** + * Register a ifpga afu device driver. + * + * @param driver + * A pointer to a rte_afu_driver structure describing the driver + * to be registered. + */ +void rte_ifpga_driver_register(struct rte_afu_driver *driver); + +/** + * Unregister a ifpga afu device driver. + * + * @param driver + * A pointer to a rte_afu_driver structure describing the driver + * to be unregistered. + */ +void rte_ifpga_driver_unregister(struct rte_afu_driver *driver); + +#define RTE_PMD_REGISTER_AFU(nm, afudrv)\ +static const char *afudrvinit_ ## nm ## _alias;\ +RTE_INIT(afudrvinitfn_ ##afudrv)\ +{\ + (afudrv).driver.name = RTE_STR(nm);\ + (afudrv).driver.alias = afudrvinit_ ## nm ## _alias;\ + rte_ifpga_driver_register(&afudrv);\ +} \ +RTE_PMD_EXPORT_NAME(nm, __COUNTER__) + +#define RTE_PMD_REGISTER_AFU_ALIAS(nm, alias)\ +static const char *afudrvinit_ ## nm ## _alias = RTE_STR(alias) + +#endif /* _RTE_BUS_IFPGA_H_ */ diff --git a/drivers/bus/ifpga/rte_bus_ifpga_version.map b/drivers/bus/ifpga/rte_bus_ifpga_version.map new file mode 100644 index 00000000..a0279797 --- /dev/null +++ b/drivers/bus/ifpga/rte_bus_ifpga_version.map @@ -0,0 +1,10 @@ +DPDK_18.05 { + global: + + rte_ifpga_get_integer32_arg; + rte_ifpga_get_string_arg; + rte_ifpga_driver_register; + rte_ifpga_driver_unregister; + + local: *; +}; diff --git a/drivers/bus/meson.build b/drivers/bus/meson.build index c6af500e..80de2d91 100644 --- a/drivers/bus/meson.build +++ b/drivers/bus/meson.build @@ -1,7 +1,7 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation -drivers = ['pci', 'vdev'] +drivers = ['dpaa', 'fslmc', 'ifpga', 'pci', 'vdev', 'vmbus'] std_deps = ['eal'] config_flag_fmt = 'RTE_LIBRTE_@0@_BUS' driver_name_fmt = 'rte_bus_@0@' diff --git a/drivers/bus/pci/Makefile b/drivers/bus/pci/Makefile index f3df1c4c..cf373068 100644 --- a/drivers/bus/pci/Makefile +++ b/drivers/bus/pci/Makefile @@ -1,33 +1,5 @@ -# BSD LICENSE -# -# Copyright(c) 2017 6WIND S.A. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 6WIND S.A. include $(RTE_SDK)/mk/rte.vars.mk @@ -37,6 +9,7 @@ EXPORT_MAP := rte_bus_pci_version.map CFLAGS := -I$(SRCDIR) $(CFLAGS) CFLAGS += -O3 $(WERROR_FLAGS) +CFLAGS += -DALLOW_EXPERIMENTAL_API ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),) SYSTEM := linux @@ -49,6 +22,9 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/pci/$(SYSTEM) CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common CFLAGS += -I$(RTE_SDK)/lib/librte_eal/$(SYSTEM)app/eal +# memseg walk is not part of stable API yet +CFLAGS += -DALLOW_EXPERIMENTAL_API + LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_pci diff --git a/drivers/bus/pci/bsd/Makefile b/drivers/bus/pci/bsd/Makefile index 4450913e..c1b54c05 100644 --- a/drivers/bus/pci/bsd/Makefile +++ b/drivers/bus/pci/bsd/Makefile @@ -1,32 +1,4 @@ -# BSD LICENSE -# -# Copyright(c) 2017 6WIND S.A. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 6WIND S.A. SRCS += pci.c diff --git a/drivers/bus/pci/linux/Makefile b/drivers/bus/pci/linux/Makefile index 77c5f970..96ea1d54 100644 --- a/drivers/bus/pci/linux/Makefile +++ b/drivers/bus/pci/linux/Makefile @@ -1,33 +1,5 @@ -# BSD LICENSE -# -# Copyright(c) 2017 6WIND S.A. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 6WIND S.A. SRCS += pci.c SRCS += pci_uio.c diff --git a/drivers/bus/pci/linux/pci.c b/drivers/bus/pci/linux/pci.c index abde6411..04648ac9 100644 --- a/drivers/bus/pci/linux/pci.c +++ b/drivers/bus/pci/linux/pci.c @@ -15,7 +15,6 @@ #include #include -#include "eal_private.h" #include "eal_filesystem.h" #include "private.h" @@ -33,7 +32,8 @@ extern struct rte_pci_bus rte_pci_bus; static int -pci_get_kernel_driver_by_path(const char *filename, char *dri_name) +pci_get_kernel_driver_by_path(const char *filename, char *dri_name, + size_t len) { int count; char path[PATH_MAX]; @@ -54,7 +54,7 @@ pci_get_kernel_driver_by_path(const char *filename, char *dri_name) name = strrchr(path, '/'); if (name) { - strncpy(dri_name, name + 1, strlen(name + 1) + 1); + strlcpy(dri_name, name + 1, len); return 0; } @@ -116,24 +116,28 @@ rte_pci_unmap_device(struct rte_pci_device *dev) } } -void * -pci_find_max_end_va(void) +static int +find_max_end_va(const struct rte_memseg_list *msl, void *arg) { - const struct rte_memseg *seg = rte_eal_get_physmem_layout(); - const struct rte_memseg *last = seg; - unsigned i = 0; + size_t sz = msl->memseg_arr.len * msl->page_sz; + void *end_va = RTE_PTR_ADD(msl->base_va, sz); + void **max_va = arg; - for (i = 0; i < RTE_MAX_MEMSEG; i++, seg++) { - if (seg->addr == NULL) - break; + if (*max_va < end_va) + *max_va = end_va; + return 0; +} - if (seg->addr > last->addr) - last = seg; +void * +pci_find_max_end_va(void) +{ + void *va = NULL; - } - return RTE_PTR_ADD(last->addr, last->len); + rte_memseg_list_walk(find_max_end_va, &va); + return va; } + /* parse one line of the "resource" sysfs file (note that the 'line' * string is modified) */ @@ -310,7 +314,7 @@ pci_scan_one(const char *dirname, const struct rte_pci_addr *addr) /* parse driver */ snprintf(filename, sizeof(filename), "%s/driver", dirname); - ret = pci_get_kernel_driver_by_path(filename, driver); + ret = pci_get_kernel_driver_by_path(filename, driver, sizeof(driver)); if (ret < 0) { RTE_LOG(ERR, EAL, "Fail to get kernel driver\n"); free(dev); diff --git a/drivers/bus/pci/linux/pci_uio.c b/drivers/bus/pci/linux/pci_uio.c index d423e4bb..a7c14421 100644 --- a/drivers/bus/pci/linux/pci_uio.c +++ b/drivers/bus/pci/linux/pci_uio.c @@ -282,22 +282,19 @@ int pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx, struct mapped_pci_resource *uio_res, int map_idx) { - int fd; + int fd = -1; char devname[PATH_MAX]; void *mapaddr; struct rte_pci_addr *loc; struct pci_map *maps; + int wc_activate = 0; + + if (dev->driver != NULL) + wc_activate = dev->driver->drv_flags & RTE_PCI_DRV_WC_ACTIVATE; loc = &dev->addr; maps = uio_res->maps; - /* update devname for mmap */ - snprintf(devname, sizeof(devname), - "%s/" PCI_PRI_FMT "/resource%d", - rte_pci_get_sysfs_path(), - loc->domain, loc->bus, loc->devid, - loc->function, res_idx); - /* allocate memory to keep path */ maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0); if (maps[map_idx].path == NULL) { @@ -309,11 +306,37 @@ pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx, /* * open resource file, to mmap it */ - fd = open(devname, O_RDWR); - if (fd < 0) { - RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", + if (wc_activate) { + /* update devname for mmap */ + snprintf(devname, sizeof(devname), + "%s/" PCI_PRI_FMT "/resource%d_wc", + rte_pci_get_sysfs_path(), + loc->domain, loc->bus, loc->devid, + loc->function, res_idx); + + if (access(devname, R_OK|W_OK) != -1) { + fd = open(devname, O_RDWR); + if (fd < 0) + RTE_LOG(INFO, EAL, "%s cannot be mapped. " + "Fall-back to non prefetchable mode.\n", + devname); + } + } + + if (!wc_activate || fd < 0) { + snprintf(devname, sizeof(devname), + "%s/" PCI_PRI_FMT "/resource%d", + rte_pci_get_sysfs_path(), + loc->domain, loc->bus, loc->devid, + loc->function, res_idx); + + /* then try to map resource file */ + fd = open(devname, O_RDWR); + if (fd < 0) { + RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", devname, strerror(errno)); - goto error; + goto error; + } } /* try mapping somewhere close to the end of hugepages */ diff --git a/drivers/bus/pci/linux/pci_vfio.c b/drivers/bus/pci/linux/pci_vfio.c index aeeaa9ed..686386d6 100644 --- a/drivers/bus/pci/linux/pci_vfio.c +++ b/drivers/bus/pci/linux/pci_vfio.c @@ -584,6 +584,9 @@ pci_vfio_map_resource_secondary(struct rte_pci_device *dev) dev->mem_resource[i].addr = maps[i].addr; } + /* we need save vfio_dev_fd, so it can be used during release */ + dev->intr_handle.vfio_dev_fd = vfio_dev_fd; + return 0; err_vfio_dev_fd: close(vfio_dev_fd); @@ -603,22 +606,58 @@ pci_vfio_map_resource(struct rte_pci_device *dev) return pci_vfio_map_resource_secondary(dev); } -int -pci_vfio_unmap_resource(struct rte_pci_device *dev) +static struct mapped_pci_resource * +find_and_unmap_vfio_resource(struct mapped_pci_res_list *vfio_res_list, + struct rte_pci_device *dev, + const char *pci_addr) +{ + struct mapped_pci_resource *vfio_res = NULL; + struct pci_map *maps; + int i; + + /* Get vfio_res */ + TAILQ_FOREACH(vfio_res, vfio_res_list, next) { + if (rte_pci_addr_cmp(&vfio_res->pci_addr, &dev->addr)) + continue; + break; + } + + if (vfio_res == NULL) + return vfio_res; + + RTE_LOG(INFO, EAL, "Releasing pci mapped resource for %s\n", + pci_addr); + + maps = vfio_res->maps; + for (i = 0; i < (int) vfio_res->nb_maps; i++) { + + /* + * We do not need to be aware of MSI-X table BAR mappings as + * when mapping. Just using current maps array is enough + */ + if (maps[i].addr) { + RTE_LOG(INFO, EAL, "Calling pci_unmap_resource for %s at %p\n", + pci_addr, maps[i].addr); + pci_unmap_resource(maps[i].addr, maps[i].size); + } + } + + return vfio_res; +} + +static int +pci_vfio_unmap_resource_primary(struct rte_pci_device *dev) { char pci_addr[PATH_MAX] = {0}; struct rte_pci_addr *loc = &dev->addr; - int i, ret; struct mapped_pci_resource *vfio_res = NULL; struct mapped_pci_res_list *vfio_res_list; - - struct pci_map *maps; + int ret; /* store PCI address string */ snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT, loc->domain, loc->bus, loc->devid, loc->function); - if (close(dev->intr_handle.fd) < 0) { RTE_LOG(INFO, EAL, "Error when closing eventfd file descriptor for %s\n", pci_addr); @@ -639,13 +678,10 @@ pci_vfio_unmap_resource(struct rte_pci_device *dev) return ret; } - vfio_res_list = RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list); - /* Get vfio_res */ - TAILQ_FOREACH(vfio_res, vfio_res_list, next) { - if (memcmp(&vfio_res->pci_addr, &dev->addr, sizeof(dev->addr))) - continue; - break; - } + vfio_res_list = + RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list); + vfio_res = find_and_unmap_vfio_resource(vfio_res_list, dev, pci_addr); + /* if we haven't found our tailq entry, something's wrong */ if (vfio_res == NULL) { RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n", @@ -653,29 +689,55 @@ pci_vfio_unmap_resource(struct rte_pci_device *dev) return -1; } - /* unmap BARs */ - maps = vfio_res->maps; + TAILQ_REMOVE(vfio_res_list, vfio_res, next); - RTE_LOG(INFO, EAL, "Releasing pci mapped resource for %s\n", - pci_addr); - for (i = 0; i < (int) vfio_res->nb_maps; i++) { + return 0; +} - /* - * We do not need to be aware of MSI-X table BAR mappings as - * when mapping. Just using current maps array is enough - */ - if (maps[i].addr) { - RTE_LOG(INFO, EAL, "Calling pci_unmap_resource for %s at %p\n", - pci_addr, maps[i].addr); - pci_unmap_resource(maps[i].addr, maps[i].size); - } +static int +pci_vfio_unmap_resource_secondary(struct rte_pci_device *dev) +{ + char pci_addr[PATH_MAX] = {0}; + struct rte_pci_addr *loc = &dev->addr; + struct mapped_pci_resource *vfio_res = NULL; + struct mapped_pci_res_list *vfio_res_list; + int ret; + + /* store PCI address string */ + snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT, + loc->domain, loc->bus, loc->devid, loc->function); + + ret = rte_vfio_release_device(rte_pci_get_sysfs_path(), pci_addr, + dev->intr_handle.vfio_dev_fd); + if (ret < 0) { + RTE_LOG(ERR, EAL, + "%s(): cannot release device\n", __func__); + return ret; } - TAILQ_REMOVE(vfio_res_list, vfio_res, next); + vfio_res_list = + RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list); + vfio_res = find_and_unmap_vfio_resource(vfio_res_list, dev, pci_addr); + + /* if we haven't found our tailq entry, something's wrong */ + if (vfio_res == NULL) { + RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n", + pci_addr); + return -1; + } return 0; } +int +pci_vfio_unmap_resource(struct rte_pci_device *dev) +{ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + return pci_vfio_unmap_resource_primary(dev); + else + return pci_vfio_unmap_resource_secondary(dev); +} + int pci_vfio_ioport_map(struct rte_pci_device *dev, int bar, struct rte_pci_ioport *p) diff --git a/drivers/bus/pci/meson.build b/drivers/bus/pci/meson.build index 12756a4d..72939e59 100644 --- a/drivers/bus/pci/meson.build +++ b/drivers/bus/pci/meson.build @@ -14,3 +14,6 @@ else sources += files('bsd/pci.c') includes += include_directories('bsd') endif + +# memseg walk is not part of stable API yet +allow_experimental_apis = true diff --git a/drivers/bus/pci/pci_common.c b/drivers/bus/pci/pci_common.c index 2a00f365..7736b3f9 100644 --- a/drivers/bus/pci/pci_common.c +++ b/drivers/bus/pci/pci_common.c @@ -26,6 +26,7 @@ #include "private.h" + extern struct rte_pci_bus rte_pci_bus; #define SYSFS_PCI_DEVICES "/sys/bus/pci/devices" @@ -45,12 +46,8 @@ static struct rte_devargs *pci_devargs_lookup(struct rte_pci_device *dev) { struct rte_devargs *devargs; struct rte_pci_addr addr; - struct rte_bus *pbus; - pbus = rte_bus_find_by_name("pci"); - TAILQ_FOREACH(devargs, &devargs_list, next) { - if (devargs->bus != pbus) - continue; + RTE_EAL_DEVARGS_FOREACH("pci", devargs) { devargs->bus->parse(devargs->name, &addr); if (!rte_pci_addr_cmp(&dev->addr, &addr)) return devargs; @@ -73,7 +70,7 @@ pci_name_set(struct rte_pci_device *dev) */ if (devargs != NULL) /* If an rte_devargs exists, the generic rte_device uses the - * given name as its namea + * given name as its name. */ dev->device.name = dev->device.devargs->name; else @@ -159,17 +156,24 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr, RTE_LOG(INFO, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id, dev->id.device_id, dr->driver.name); + /* + * reference driver structure + * This needs to be before rte_pci_map_device(), as it enables to use + * driver flags for adjusting configuration. + */ + dev->driver = dr; + dev->device.driver = &dr->driver; + if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) { /* map resources for devices that use igb_uio */ ret = rte_pci_map_device(dev); - if (ret != 0) + if (ret != 0) { + dev->driver = NULL; + dev->device.driver = NULL; return ret; + } } - /* reference driver structure */ - dev->driver = dr; - dev->device.driver = &dr->driver; - /* call the driver probe() function */ ret = dr->probe(dr, dev); if (ret) { @@ -258,81 +262,6 @@ pci_probe_all_drivers(struct rte_pci_device *dev) return 1; } -/* - * Find the pci device specified by pci address, then invoke probe function of - * the driver of the device. - */ -int -rte_pci_probe_one(const struct rte_pci_addr *addr) -{ - struct rte_pci_device *dev = NULL; - - int ret = 0; - - if (addr == NULL) - return -1; - - /* update current pci device in global list, kernel bindings might have - * changed since last time we looked at it. - */ - if (pci_update_device(addr) < 0) - goto err_return; - - FOREACH_DEVICE_ON_PCIBUS(dev) { - if (rte_pci_addr_cmp(&dev->addr, addr)) - continue; - - ret = pci_probe_all_drivers(dev); - if (ret) - goto err_return; - return 0; - } - return -1; - -err_return: - RTE_LOG(WARNING, EAL, - "Requested device " PCI_PRI_FMT " cannot be used\n", - addr->domain, addr->bus, addr->devid, addr->function); - return -1; -} - -/* - * Detach device specified by its pci address. - */ -int -rte_pci_detach(const struct rte_pci_addr *addr) -{ - struct rte_pci_device *dev = NULL; - int ret = 0; - - if (addr == NULL) - return -1; - - FOREACH_DEVICE_ON_PCIBUS(dev) { - if (rte_pci_addr_cmp(&dev->addr, addr)) - continue; - - ret = rte_pci_detach_dev(dev); - if (ret < 0) - /* negative value is an error */ - goto err_return; - if (ret > 0) - /* positive value means driver doesn't support it */ - continue; - - rte_pci_remove_device(dev); - free(dev); - return 0; - } - return -1; - -err_return: - RTE_LOG(WARNING, EAL, "Requested device " PCI_PRI_FMT - " cannot be used\n", dev->addr.domain, dev->addr.bus, - dev->addr.devid, dev->addr.function); - return -1; -} - /* * Scan the content of the PCI bus, and call the probe() function for * all registered drivers that have a matching entry in its id_table @@ -449,7 +378,7 @@ rte_pci_insert_device(struct rte_pci_device *exist_pci_dev, } /* Remove a device from PCI bus */ -void +static void rte_pci_remove_device(struct rte_pci_device *pci_dev) { TAILQ_REMOVE(&rte_pci_bus.device_list, pci_dev, next); @@ -459,17 +388,20 @@ static struct rte_device * pci_find_device(const struct rte_device *start, rte_dev_cmp_t cmp, const void *data) { - struct rte_pci_device *dev; + const struct rte_pci_device *pstart; + struct rte_pci_device *pdev; - FOREACH_DEVICE_ON_PCIBUS(dev) { - if (start && &dev->device == start) { - start = NULL; /* starting point found */ - continue; - } - if (cmp(&dev->device, data) == 0) - return &dev->device; + if (start != NULL) { + pstart = RTE_DEV_TO_PCI_CONST(start); + pdev = TAILQ_NEXT(pstart, next); + } else { + pdev = TAILQ_FIRST(&rte_pci_bus.device_list); + } + while (pdev != NULL) { + if (cmp(&pdev->device, data) == 0) + return &pdev->device; + pdev = TAILQ_NEXT(pdev, next); } - return NULL; } diff --git a/drivers/bus/pci/private.h b/drivers/bus/pci/private.h index 88fa587e..8ddd03e1 100644 --- a/drivers/bus/pci/private.h +++ b/drivers/bus/pci/private.h @@ -32,36 +32,6 @@ rte_pci_probe(void); */ int rte_pci_scan(void); -/** - * Probe the single PCI device. - * - * Scan the content of the PCI bus, and find the pci device specified by pci - * address, then call the probe() function for registered driver that has a - * matching entry in its id_table for discovered device. - * - * @param addr - * The PCI Bus-Device-Function address to probe. - * @return - * - 0 on success. - * - Negative on error. - */ -int rte_pci_probe_one(const struct rte_pci_addr *addr); - -/** - * Close the single PCI device. - * - * Scan the content of the PCI bus, and find the pci device specified by pci - * address, then call the remove() function for registered driver that has a - * matching entry in its id_table for discovered device. - * - * @param addr - * The PCI Bus-Device-Function address to close. - * @return - * - 0 on success. - * - Negative on error. - */ -int rte_pci_detach(const struct rte_pci_addr *addr); - /** * Find the name of a PCI device. */ @@ -93,16 +63,6 @@ void rte_pci_add_device(struct rte_pci_device *pci_dev); void rte_pci_insert_device(struct rte_pci_device *exist_pci_dev, struct rte_pci_device *new_pci_dev); -/** - * Remove a PCI device from the PCI Bus. This sets to NULL the bus references - * in the PCI device object as well as the generic device object. - * - * @param pci_device - * PCI device to be removed from PCI Bus - * @return void - */ -void rte_pci_remove_device(struct rte_pci_device *pci_device); - /** * Update a pci device object by asking the kernel for the latest information. * @@ -116,16 +76,6 @@ void rte_pci_remove_device(struct rte_pci_device *pci_device); */ int pci_update_device(const struct rte_pci_addr *addr); -/** - * Unbind kernel driver for this device - * - * This function is private to EAL. - * - * @return - * 0 on success, negative on error - */ -int pci_unbind_kernel_driver(struct rte_pci_device *dev); - /** * Map the PCI resource of a PCI device in virtual memory * diff --git a/drivers/bus/pci/rte_bus_pci.h b/drivers/bus/pci/rte_bus_pci.h index 357afb91..0d1955ff 100644 --- a/drivers/bus/pci/rte_bus_pci.h +++ b/drivers/bus/pci/rte_bus_pci.h @@ -74,6 +74,9 @@ struct rte_pci_device { */ #define RTE_DEV_TO_PCI(ptr) container_of(ptr, struct rte_pci_device, device) +#define RTE_DEV_TO_PCI_CONST(ptr) \ + container_of(ptr, const struct rte_pci_device, device) + #define RTE_ETH_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device) /** Any PCI device identifier (vendor, device, ...) */ @@ -132,6 +135,8 @@ struct rte_pci_bus { /** Device needs PCI BAR mapping (done with either IGB_UIO or VFIO) */ #define RTE_PCI_DRV_NEED_MAPPING 0x0001 +/** Device needs PCI BAR mapping with enabled write combining (wc) */ +#define RTE_PCI_DRV_WC_ACTIVATE 0x0002 /** Device driver supports link state interrupt */ #define RTE_PCI_DRV_INTR_LSC 0x0008 /** Device driver supports device removal interrupt */ @@ -186,8 +191,7 @@ void rte_pci_register(struct rte_pci_driver *driver); /** Helper for PCI device registration from driver (eth, crypto) instance */ #define RTE_PMD_REGISTER_PCI(nm, pci_drv) \ -RTE_INIT(pciinitfn_ ##nm); \ -static void pciinitfn_ ##nm(void) \ +RTE_INIT(pciinitfn_ ##nm) \ {\ (pci_drv).driver.name = RTE_STR(nm);\ rte_pci_register(&pci_drv); \ diff --git a/drivers/bus/vdev/Makefile b/drivers/bus/vdev/Makefile index 24d424a3..bd0bb895 100644 --- a/drivers/bus/vdev/Makefile +++ b/drivers/bus/vdev/Makefile @@ -10,6 +10,7 @@ LIB = librte_bus_vdev.a CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) +CFLAGS += -DALLOW_EXPERIMENTAL_API # versioning export map EXPORT_MAP := rte_bus_vdev_version.map diff --git a/drivers/bus/vdev/meson.build b/drivers/bus/vdev/meson.build index 03cb87eb..2ee648b4 100644 --- a/drivers/bus/vdev/meson.build +++ b/drivers/bus/vdev/meson.build @@ -3,3 +3,5 @@ sources = files('vdev.c') install_headers('rte_bus_vdev.h') + +allow_experimental_apis = true diff --git a/drivers/bus/vdev/rte_bus_vdev.h b/drivers/bus/vdev/rte_bus_vdev.h index f9d8a238..9ae3eaae 100644 --- a/drivers/bus/vdev/rte_bus_vdev.h +++ b/drivers/bus/vdev/rte_bus_vdev.h @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016 RehiveTech. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of RehiveTech nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 RehiveTech. All rights reserved. */ #ifndef RTE_VDEV_H @@ -53,6 +25,9 @@ struct rte_vdev_device { #define RTE_DEV_TO_VDEV(ptr) \ container_of(ptr, struct rte_vdev_device, device) +#define RTE_DEV_TO_VDEV_CONST(ptr) \ + container_of(ptr, const struct rte_vdev_device, device) + static inline const char * rte_vdev_device_name(const struct rte_vdev_device *dev) { @@ -111,9 +86,8 @@ void rte_vdev_register(struct rte_vdev_driver *driver); void rte_vdev_unregister(struct rte_vdev_driver *driver); #define RTE_PMD_REGISTER_VDEV(nm, vdrv)\ -RTE_INIT(vdrvinitfn_ ##vdrv);\ static const char *vdrvinit_ ## nm ## _alias;\ -static void vdrvinitfn_ ##vdrv(void)\ +RTE_INIT(vdrvinitfn_ ##vdrv)\ {\ (vdrv).driver.name = RTE_STR(nm);\ (vdrv).driver.alias = vdrvinit_ ## nm ## _alias;\ diff --git a/drivers/bus/vdev/vdev.c b/drivers/bus/vdev/vdev.c index e4bc7246..6139dd55 100644 --- a/drivers/bus/vdev/vdev.c +++ b/drivers/bus/vdev/vdev.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016 RehiveTech. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of RehiveTech nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 RehiveTech. All rights reserved. */ #include @@ -46,11 +18,14 @@ #include #include #include +#include #include #include "rte_bus_vdev.h" #include "vdev_logs.h" +#define VDEV_MP_KEY "bus_vdev_mp" + int vdev_logtype_bus; /* Forward declare to access virtual bus name */ @@ -61,6 +36,10 @@ TAILQ_HEAD(vdev_device_list, rte_vdev_device); static struct vdev_device_list vdev_device_list = TAILQ_HEAD_INITIALIZER(vdev_device_list); +/* The lock needs to be recursive because a vdev can manage another vdev. */ +static rte_spinlock_recursive_t vdev_device_list_lock = + RTE_SPINLOCK_RECURSIVE_INITIALIZER; + struct vdev_driver_list vdev_driver_list = TAILQ_HEAD_INITIALIZER(vdev_driver_list); @@ -165,7 +144,7 @@ vdev_probe_all_drivers(struct rte_vdev_device *dev) name = rte_vdev_device_name(dev); - VDEV_LOG(DEBUG, "Search driver %s to probe device %s\n", name, + VDEV_LOG(DEBUG, "Search driver %s to probe device %s", name, rte_vdev_device_name(dev)); if (vdev_parse(name, &driver)) @@ -177,6 +156,7 @@ vdev_probe_all_drivers(struct rte_vdev_device *dev) return ret; } +/* The caller shall be responsible for thread-safe */ static struct rte_vdev_device * find_vdev(const char *name) { @@ -188,7 +168,7 @@ find_vdev(const char *name) TAILQ_FOREACH(dev, &vdev_device_list, next) { const char *devname = rte_vdev_device_name(dev); - if (!strncmp(devname, name, strlen(name))) + if (!strcmp(devname, name)) return dev; } @@ -221,8 +201,8 @@ alloc_devargs(const char *name, const char *args) return devargs; } -int -rte_vdev_init(const char *name, const char *args) +static int +insert_vdev(const char *name, const char *args, struct rte_vdev_device **p_dev) { struct rte_vdev_device *dev; struct rte_devargs *devargs; @@ -231,10 +211,6 @@ rte_vdev_init(const char *name, const char *args) if (name == NULL) return -EINVAL; - dev = find_vdev(name); - if (dev) - return -EEXIST; - devargs = alloc_devargs(name, args); if (!devargs) return -ENOMEM; @@ -249,18 +225,18 @@ rte_vdev_init(const char *name, const char *args) dev->device.numa_node = SOCKET_ID_ANY; dev->device.name = devargs->name; - ret = vdev_probe_all_drivers(dev); - if (ret) { - if (ret > 0) - VDEV_LOG(ERR, "no driver found for %s\n", name); + if (find_vdev(name)) { + ret = -EEXIST; goto fail; } - TAILQ_INSERT_TAIL(&devargs_list, devargs, next); - TAILQ_INSERT_TAIL(&vdev_device_list, dev, next); - return 0; + rte_devargs_insert(devargs); + + if (p_dev) + *p_dev = dev; + return 0; fail: free(devargs->args); free(devargs); @@ -268,6 +244,31 @@ fail: return ret; } +int +rte_vdev_init(const char *name, const char *args) +{ + struct rte_vdev_device *dev; + struct rte_devargs *devargs; + int ret; + + rte_spinlock_recursive_lock(&vdev_device_list_lock); + ret = insert_vdev(name, args, &dev); + if (ret == 0) { + ret = vdev_probe_all_drivers(dev); + if (ret) { + if (ret > 0) + VDEV_LOG(ERR, "no driver found for %s", name); + /* If fails, remove it from vdev list */ + devargs = dev->device.devargs; + TAILQ_REMOVE(&vdev_device_list, dev, next); + rte_devargs_remove(devargs->bus->name, devargs->name); + free(dev); + } + } + rte_spinlock_recursive_unlock(&vdev_device_list_lock); + return ret; +} + static int vdev_remove_driver(struct rte_vdev_device *dev) { @@ -275,7 +276,7 @@ vdev_remove_driver(struct rte_vdev_device *dev) const struct rte_vdev_driver *driver; if (!dev->device.driver) { - VDEV_LOG(DEBUG, "no driver attach to device %s\n", name); + VDEV_LOG(DEBUG, "no driver attach to device %s", name); return 1; } @@ -294,23 +295,98 @@ rte_vdev_uninit(const char *name) if (name == NULL) return -EINVAL; - dev = find_vdev(name); - if (!dev) - return -ENOENT; + rte_spinlock_recursive_lock(&vdev_device_list_lock); - devargs = dev->device.devargs; + dev = find_vdev(name); + if (!dev) { + ret = -ENOENT; + goto unlock; + } ret = vdev_remove_driver(dev); if (ret) - return ret; + goto unlock; TAILQ_REMOVE(&vdev_device_list, dev, next); + devargs = dev->device.devargs; + rte_devargs_remove(devargs->bus->name, devargs->name); + free(dev); - TAILQ_REMOVE(&devargs_list, devargs, next); +unlock: + rte_spinlock_recursive_unlock(&vdev_device_list_lock); + return ret; +} + +struct vdev_param { +#define VDEV_SCAN_REQ 1 +#define VDEV_SCAN_ONE 2 +#define VDEV_SCAN_REP 3 + int type; + int num; + char name[RTE_DEV_NAME_MAX_LEN]; +}; + +static int vdev_plug(struct rte_device *dev); + +/** + * This function works as the action for both primary and secondary process + * for static vdev discovery when a secondary process is booting. + * + * step 1, secondary process sends a sync request to ask for vdev in primary; + * step 2, primary process receives the request, and send vdevs one by one; + * step 3, primary process sends back reply, which indicates how many vdevs + * are sent. + */ +static int +vdev_action(const struct rte_mp_msg *mp_msg, const void *peer) +{ + struct rte_vdev_device *dev; + struct rte_mp_msg mp_resp; + struct vdev_param *ou = (struct vdev_param *)&mp_resp.param; + const struct vdev_param *in = (const struct vdev_param *)mp_msg->param; + const char *devname; + int num; + + strlcpy(mp_resp.name, VDEV_MP_KEY, sizeof(mp_resp.name)); + mp_resp.len_param = sizeof(*ou); + mp_resp.num_fds = 0; + + switch (in->type) { + case VDEV_SCAN_REQ: + ou->type = VDEV_SCAN_ONE; + ou->num = 1; + num = 0; + + rte_spinlock_recursive_lock(&vdev_device_list_lock); + TAILQ_FOREACH(dev, &vdev_device_list, next) { + devname = rte_vdev_device_name(dev); + if (strlen(devname) == 0) { + VDEV_LOG(INFO, "vdev with no name is not sent"); + continue; + } + VDEV_LOG(INFO, "send vdev, %s", devname); + strlcpy(ou->name, devname, RTE_DEV_NAME_MAX_LEN); + if (rte_mp_sendmsg(&mp_resp) < 0) + VDEV_LOG(ERR, "send vdev, %s, failed, %s", + devname, strerror(rte_errno)); + num++; + } + rte_spinlock_recursive_unlock(&vdev_device_list_lock); + + ou->type = VDEV_SCAN_REP; + ou->num = num; + if (rte_mp_reply(&mp_resp, peer) < 0) + VDEV_LOG(ERR, "Failed to reply a scan request"); + break; + case VDEV_SCAN_ONE: + VDEV_LOG(INFO, "receive vdev, %s", in->name); + if (insert_vdev(in->name, NULL, NULL) < 0) + VDEV_LOG(ERR, "failed to add vdev, %s", in->name); + break; + default: + VDEV_LOG(ERR, "vdev cannot recognize this message"); + } - free(devargs->args); - free(devargs); - free(dev); return 0; } @@ -321,13 +397,41 @@ vdev_scan(void) struct rte_devargs *devargs; struct vdev_custom_scan *custom_scan; + if (rte_mp_action_register(VDEV_MP_KEY, vdev_action) < 0 && + rte_errno != EEXIST) { + VDEV_LOG(ERR, "Failed to add vdev mp action"); + return -1; + } + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + struct rte_mp_msg mp_req, *mp_rep; + struct rte_mp_reply mp_reply; + struct timespec ts = {.tv_sec = 5, .tv_nsec = 0}; + struct vdev_param *req = (struct vdev_param *)mp_req.param; + struct vdev_param *resp; + + strlcpy(mp_req.name, VDEV_MP_KEY, sizeof(mp_req.name)); + mp_req.len_param = sizeof(*req); + mp_req.num_fds = 0; + req->type = VDEV_SCAN_REQ; + if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 && + mp_reply.nb_received == 1) { + mp_rep = &mp_reply.msgs[0]; + resp = (struct vdev_param *)mp_rep->param; + VDEV_LOG(INFO, "Received %d vdevs", resp->num); + } else + VDEV_LOG(ERR, "Failed to request vdev from primary"); + + /* Fall through to allow private vdevs in secondary process */ + } + /* call custom scan callbacks if any */ rte_spinlock_lock(&vdev_custom_scan_lock); TAILQ_FOREACH(custom_scan, &vdev_custom_scans, next) { if (custom_scan->callback != NULL) /* * the callback should update devargs list - * by calling rte_eal_devargs_insert() with + * by calling rte_devargs_insert() with * devargs.bus = rte_bus_find_by_name("vdev"); * devargs.type = RTE_DEVTYPE_VIRTUAL; * devargs.policy = RTE_DEV_WHITELISTED; @@ -337,24 +441,27 @@ vdev_scan(void) rte_spinlock_unlock(&vdev_custom_scan_lock); /* for virtual devices we scan the devargs_list populated via cmdline */ - TAILQ_FOREACH(devargs, &devargs_list, next) { - - if (devargs->bus != &rte_vdev_bus) - continue; - - dev = find_vdev(devargs->name); - if (dev) - continue; + RTE_EAL_DEVARGS_FOREACH("vdev", devargs) { dev = calloc(1, sizeof(*dev)); if (!dev) return -1; + rte_spinlock_recursive_lock(&vdev_device_list_lock); + + if (find_vdev(devargs->name)) { + rte_spinlock_recursive_unlock(&vdev_device_list_lock); + free(dev); + continue; + } + dev->device.devargs = devargs; dev->device.numa_node = SOCKET_ID_ANY; dev->device.name = devargs->name; TAILQ_INSERT_TAIL(&vdev_device_list, dev, next); + + rte_spinlock_recursive_unlock(&vdev_device_list_lock); } return 0; @@ -368,12 +475,16 @@ vdev_probe(void) /* call the init function for each virtual device */ TAILQ_FOREACH(dev, &vdev_device_list, next) { + /* we don't use the vdev lock here, as it's only used in DPDK + * initialization; and we don't want to hold such a lock when + * we call each driver probe. + */ if (dev->device.driver) continue; if (vdev_probe_all_drivers(dev)) { - VDEV_LOG(ERR, "failed to initialize %s device\n", + VDEV_LOG(ERR, "failed to initialize %s device", rte_vdev_device_name(dev)); ret = -1; } @@ -386,17 +497,24 @@ static struct rte_device * vdev_find_device(const struct rte_device *start, rte_dev_cmp_t cmp, const void *data) { + const struct rte_vdev_device *vstart; struct rte_vdev_device *dev; - TAILQ_FOREACH(dev, &vdev_device_list, next) { - if (start && &dev->device == start) { - start = NULL; - continue; - } + rte_spinlock_recursive_lock(&vdev_device_list_lock); + if (start != NULL) { + vstart = RTE_DEV_TO_VDEV_CONST(start); + dev = TAILQ_NEXT(vstart, next); + } else { + dev = TAILQ_FIRST(&vdev_device_list); + } + while (dev != NULL) { if (cmp(&dev->device, data) == 0) - return &dev->device; + break; + dev = TAILQ_NEXT(dev, next); } - return NULL; + rte_spinlock_recursive_unlock(&vdev_device_list_lock); + + return dev ? &dev->device : NULL; } static int diff --git a/drivers/bus/vmbus/Makefile b/drivers/bus/vmbus/Makefile new file mode 100644 index 00000000..deee9dd1 --- /dev/null +++ b/drivers/bus/vmbus/Makefile @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: BSD-3-Clause + +include $(RTE_SDK)/mk/rte.vars.mk + +LIB = librte_bus_vmbus.a +LIBABIVER := 1 +EXPORT_MAP := rte_bus_vmbus_version.map + +CFLAGS += -I$(SRCDIR) +CFLAGS += -O3 $(WERROR_FLAGS) +CFLAGS += -DALLOW_EXPERIMENTAL_API + +ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),) +SYSTEM := linux +endif +ifneq ($(CONFIG_RTE_EXEC_ENV_BSDAPP),) +$(error "VMBUS not implemented for BSD yet") +endif + +CFLAGS += -I$(RTE_SDK)/drivers/bus/vmbus/$(SYSTEM) +CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common +CFLAGS += -I$(RTE_SDK)/lib/librte_eal/$(SYSTEM)app/eal + +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev + +include $(RTE_SDK)/drivers/bus/vmbus/$(SYSTEM)/Makefile +SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) := $(addprefix $(SYSTEM)/,$(SRCS)) +SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus_common.c +SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus_channel.c vmbus_bufring.c +SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus_common_uio.c + +SYMLINK-$(CONFIG_RTE_LIBRTE_VMBUS)-include += rte_bus_vmbus.h +SYMLINK-$(CONFIG_RTE_LIBRTE_VMBUS)-include += rte_vmbus_reg.h + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/bus/vmbus/linux/Makefile b/drivers/bus/vmbus/linux/Makefile new file mode 100644 index 00000000..ef0d30b2 --- /dev/null +++ b/drivers/bus/vmbus/linux/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: BSD-3-Clause + +SRCS += vmbus_bus.c vmbus_uio.c diff --git a/drivers/bus/vmbus/linux/vmbus_bus.c b/drivers/bus/vmbus/linux/vmbus_bus.c new file mode 100644 index 00000000..52d6a3c0 --- /dev/null +++ b/drivers/bus/vmbus/linux/vmbus_bus.c @@ -0,0 +1,355 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2018, Microsoft Corporation. + * All Rights Reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "eal_filesystem.h" +#include "private.h" + +/** Pathname of VMBUS devices directory. */ +#define SYSFS_VMBUS_DEVICES "/sys/bus/vmbus/devices" + +extern struct rte_vmbus_bus rte_vmbus_bus; + +/* Read sysfs file to get UUID */ +static int +parse_sysfs_uuid(const char *filename, rte_uuid_t uu) +{ + char buf[BUFSIZ]; + char *cp, *in = buf; + FILE *f; + + f = fopen(filename, "r"); + if (f == NULL) { + VMBUS_LOG(ERR, "cannot open sysfs value %s: %s", + filename, strerror(errno)); + return -1; + } + + if (fgets(buf, sizeof(buf), f) == NULL) { + VMBUS_LOG(ERR, "cannot read sysfs value %s", + filename); + fclose(f); + return -1; + } + fclose(f); + + cp = strchr(buf, '\n'); + if (cp) + *cp = '\0'; + + /* strip { } notation */ + if (buf[0] == '{') { + in = buf + 1; + cp = strchr(in, '}'); + if (cp) + *cp = '\0'; + } + + if (rte_uuid_parse(in, uu) < 0) { + VMBUS_LOG(ERR, "%s %s not a valid UUID", + filename, buf); + return -1; + } + + return 0; +} + +static int +get_sysfs_string(const char *filename, char *buf, size_t buflen) +{ + char *cp; + FILE *f; + + f = fopen(filename, "r"); + if (f == NULL) { + VMBUS_LOG(ERR, "cannot open sysfs value %s:%s", + filename, strerror(errno)); + return -1; + } + + if (fgets(buf, buflen, f) == NULL) { + VMBUS_LOG(ERR, "cannot read sysfs value %s", + filename); + fclose(f); + return -1; + } + fclose(f); + + /* remove trailing newline */ + cp = memchr(buf, '\n', buflen); + if (cp) + *cp = '\0'; + + return 0; +} + +static int +vmbus_get_uio_dev(const struct rte_vmbus_device *dev, + char *dstbuf, size_t buflen) +{ + char dirname[PATH_MAX]; + unsigned int uio_num; + struct dirent *e; + DIR *dir; + + /* Assume recent kernel where uio is in uio/uioX */ + snprintf(dirname, sizeof(dirname), + SYSFS_VMBUS_DEVICES "/%s/uio", dev->device.name); + + dir = opendir(dirname); + if (dir == NULL) + return -1; /* Not a UIO device */ + + /* take the first file starting with "uio" */ + while ((e = readdir(dir)) != NULL) { + const int prefix_len = 3; + char *endptr; + + if (strncmp(e->d_name, "uio", prefix_len) != 0) + continue; + + /* try uio%d */ + errno = 0; + uio_num = strtoull(e->d_name + prefix_len, &endptr, 10); + if (errno == 0 && endptr != (e->d_name + prefix_len)) { + snprintf(dstbuf, buflen, "%s/uio%u", dirname, uio_num); + break; + } + } + closedir(dir); + + if (e == NULL) + return -1; + + return uio_num; +} + +/* Check map names with kernel names */ +static const char *map_names[VMBUS_MAX_RESOURCE] = { + [HV_TXRX_RING_MAP] = "txrx_rings", + [HV_INT_PAGE_MAP] = "int_page", + [HV_MON_PAGE_MAP] = "monitor_page", + [HV_RECV_BUF_MAP] = "recv:", + [HV_SEND_BUF_MAP] = "send:", +}; + + +/* map the resources of a vmbus device in virtual memory */ +int +rte_vmbus_map_device(struct rte_vmbus_device *dev) +{ + char uioname[PATH_MAX], filename[PATH_MAX]; + char dirname[PATH_MAX], mapname[64]; + int i; + + dev->uio_num = vmbus_get_uio_dev(dev, uioname, sizeof(uioname)); + if (dev->uio_num < 0) { + VMBUS_LOG(DEBUG, "Not managed by UIO driver, skipped"); + return 1; + } + + /* Extract resource value */ + for (i = 0; i < VMBUS_MAX_RESOURCE; i++) { + struct rte_mem_resource *res = &dev->resource[i]; + unsigned long len, gpad = 0; + char *cp; + + snprintf(dirname, sizeof(dirname), + "%s/maps/map%d", uioname, i); + + snprintf(filename, sizeof(filename), + "%s/name", dirname); + + if (get_sysfs_string(filename, mapname, sizeof(mapname)) < 0) { + VMBUS_LOG(ERR, "could not read %s", filename); + return -1; + } + + if (strncmp(map_names[i], mapname, strlen(map_names[i])) != 0) { + VMBUS_LOG(ERR, + "unexpected resource %s (expected %s)", + mapname, map_names[i]); + return -1; + } + + snprintf(filename, sizeof(filename), + "%s/size", dirname); + if (eal_parse_sysfs_value(filename, &len) < 0) { + VMBUS_LOG(ERR, + "could not read %s", filename); + return -1; + } + res->len = len; + + /* both send and receive buffers have gpad in name */ + cp = memchr(mapname, ':', sizeof(mapname)); + if (cp) + gpad = strtoul(cp+1, NULL, 0); + + /* put the GPAD value in physical address */ + res->phys_addr = gpad; + } + + return vmbus_uio_map_resource(dev); +} + +void +rte_vmbus_unmap_device(struct rte_vmbus_device *dev) +{ + vmbus_uio_unmap_resource(dev); +} + +/* Scan one vmbus sysfs entry, and fill the devices list from it. */ +static int +vmbus_scan_one(const char *name) +{ + struct rte_vmbus_device *dev, *dev2; + char filename[PATH_MAX]; + char dirname[PATH_MAX]; + unsigned long tmp; + + dev = calloc(1, sizeof(*dev)); + if (dev == NULL) + return -1; + + dev->device.name = strdup(name); + if (!dev->device.name) + goto error; + + /* sysfs base directory + * /sys/bus/vmbus/devices/7a08391f-f5a0-4ac0-9802-d13fd964f8df + * or on older kernel + * /sys/bus/vmbus/devices/vmbus_1 + */ + snprintf(dirname, sizeof(dirname), "%s/%s", + SYSFS_VMBUS_DEVICES, name); + + /* get device id */ + snprintf(filename, sizeof(filename), "%s/device_id", dirname); + if (parse_sysfs_uuid(filename, dev->device_id) < 0) + goto error; + + /* get device class */ + snprintf(filename, sizeof(filename), "%s/class_id", dirname); + if (parse_sysfs_uuid(filename, dev->class_id) < 0) + goto error; + + /* get relid */ + snprintf(filename, sizeof(filename), "%s/id", dirname); + if (eal_parse_sysfs_value(filename, &tmp) < 0) + goto error; + dev->relid = tmp; + + /* get monitor id */ + snprintf(filename, sizeof(filename), "%s/monitor_id", dirname); + if (eal_parse_sysfs_value(filename, &tmp) < 0) + goto error; + dev->monitor_id = tmp; + + /* get numa node (if present) */ + snprintf(filename, sizeof(filename), "%s/numa_node", + dirname); + + if (access(filename, R_OK) == 0) { + if (eal_parse_sysfs_value(filename, &tmp) < 0) + goto error; + dev->device.numa_node = tmp; + } else { + /* if no NUMA support, set default to 0 */ + dev->device.numa_node = SOCKET_ID_ANY; + } + + /* device is valid, add in list (sorted) */ + VMBUS_LOG(DEBUG, "Adding vmbus device %s", name); + + TAILQ_FOREACH(dev2, &rte_vmbus_bus.device_list, next) { + int ret; + + ret = rte_uuid_compare(dev->device_id, dev2->device_id); + if (ret > 0) + continue; + + if (ret < 0) { + vmbus_insert_device(dev2, dev); + } else { /* already registered */ + VMBUS_LOG(NOTICE, + "%s already registered", name); + free(dev); + } + return 0; + } + + vmbus_add_device(dev); + return 0; +error: + VMBUS_LOG(DEBUG, "failed"); + + free(dev); + return -1; +} + +/* + * Scan the content of the vmbus, and the devices in the devices list + */ +int +rte_vmbus_scan(void) +{ + struct dirent *e; + DIR *dir; + + dir = opendir(SYSFS_VMBUS_DEVICES); + if (dir == NULL) { + if (errno == ENOENT) + return 0; + + VMBUS_LOG(ERR, "opendir %s failed: %s", + SYSFS_VMBUS_DEVICES, strerror(errno)); + return -1; + } + + while ((e = readdir(dir)) != NULL) { + if (e->d_name[0] == '.') + continue; + + if (vmbus_scan_one(e->d_name) < 0) + goto error; + } + closedir(dir); + return 0; + +error: + closedir(dir); + return -1; +} + +void rte_vmbus_irq_mask(struct rte_vmbus_device *device) +{ + vmbus_uio_irq_control(device, 1); +} + +void rte_vmbus_irq_unmask(struct rte_vmbus_device *device) +{ + vmbus_uio_irq_control(device, 0); +} + +int rte_vmbus_irq_read(struct rte_vmbus_device *device) +{ + return vmbus_uio_irq_read(device); +} diff --git a/drivers/bus/vmbus/linux/vmbus_uio.c b/drivers/bus/vmbus/linux/vmbus_uio.c new file mode 100644 index 00000000..856c6d66 --- /dev/null +++ b/drivers/bus/vmbus/linux/vmbus_uio.c @@ -0,0 +1,398 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2018, Microsoft Corporation. + * All Rights Reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "private.h" + +/** Pathname of VMBUS devices directory. */ +#define SYSFS_VMBUS_DEVICES "/sys/bus/vmbus/devices" + +static void *vmbus_map_addr; + +/* Control interrupts */ +void vmbus_uio_irq_control(struct rte_vmbus_device *dev, int32_t onoff) +{ + if (write(dev->intr_handle.fd, &onoff, sizeof(onoff)) < 0) { + VMBUS_LOG(ERR, "cannot write to %d:%s", + dev->intr_handle.fd, strerror(errno)); + } +} + +int vmbus_uio_irq_read(struct rte_vmbus_device *dev) +{ + int32_t count; + int cc; + + cc = read(dev->intr_handle.fd, &count, sizeof(count)); + if (cc < (int)sizeof(count)) { + if (cc < 0) { + VMBUS_LOG(ERR, "IRQ read failed %s", + strerror(errno)); + return -errno; + } + VMBUS_LOG(ERR, "can't read IRQ count"); + return -EINVAL; + } + + return count; +} + +void +vmbus_uio_free_resource(struct rte_vmbus_device *dev, + struct mapped_vmbus_resource *uio_res) +{ + rte_free(uio_res); + + if (dev->intr_handle.uio_cfg_fd >= 0) { + close(dev->intr_handle.uio_cfg_fd); + dev->intr_handle.uio_cfg_fd = -1; + } + + if (dev->intr_handle.fd >= 0) { + close(dev->intr_handle.fd); + dev->intr_handle.fd = -1; + dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; + } +} + +int +vmbus_uio_alloc_resource(struct rte_vmbus_device *dev, + struct mapped_vmbus_resource **uio_res) +{ + char devname[PATH_MAX]; /* contains the /dev/uioX */ + + /* save fd if in primary process */ + snprintf(devname, sizeof(devname), "/dev/uio%u", dev->uio_num); + dev->intr_handle.fd = open(devname, O_RDWR); + if (dev->intr_handle.fd < 0) { + VMBUS_LOG(ERR, "Cannot open %s: %s", + devname, strerror(errno)); + goto error; + } + dev->intr_handle.type = RTE_INTR_HANDLE_UIO_INTX; + + /* allocate the mapping details for secondary processes*/ + *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0); + if (*uio_res == NULL) { + VMBUS_LOG(ERR, "cannot store uio mmap details"); + goto error; + } + + strlcpy((*uio_res)->path, devname, PATH_MAX); + rte_uuid_copy((*uio_res)->id, dev->device_id); + + return 0; + +error: + vmbus_uio_free_resource(dev, *uio_res); + return -1; +} + +static int +find_max_end_va(const struct rte_memseg_list *msl, void *arg) +{ + size_t sz = msl->memseg_arr.len * msl->page_sz; + void *end_va = RTE_PTR_ADD(msl->base_va, sz); + void **max_va = arg; + + if (*max_va < end_va) + *max_va = end_va; + return 0; +} + +/* + * TODO: this should be part of memseg api. + * code is duplicated from PCI. + */ +static void * +vmbus_find_max_end_va(void) +{ + void *va = NULL; + + rte_memseg_list_walk(find_max_end_va, &va); + return va; +} + +int +vmbus_uio_map_resource_by_index(struct rte_vmbus_device *dev, int idx, + struct mapped_vmbus_resource *uio_res, + int flags) +{ + size_t size = dev->resource[idx].len; + struct vmbus_map *maps = uio_res->maps; + void *mapaddr; + off_t offset; + int fd; + + /* devname for mmap */ + fd = open(uio_res->path, O_RDWR); + if (fd < 0) { + VMBUS_LOG(ERR, "Cannot open %s: %s", + uio_res->path, strerror(errno)); + return -1; + } + + /* try mapping somewhere close to the end of hugepages */ + if (vmbus_map_addr == NULL) + vmbus_map_addr = vmbus_find_max_end_va(); + + /* offset is special in uio it indicates which resource */ + offset = idx * PAGE_SIZE; + + mapaddr = vmbus_map_resource(vmbus_map_addr, fd, offset, size, flags); + close(fd); + + if (mapaddr == MAP_FAILED) + return -1; + + dev->resource[idx].addr = mapaddr; + vmbus_map_addr = RTE_PTR_ADD(mapaddr, size); + + /* Record result of sucessful mapping for use by secondary */ + maps[idx].addr = mapaddr; + maps[idx].size = size; + + return 0; +} + +static int vmbus_uio_map_primary(struct vmbus_channel *chan, + void **ring_buf, uint32_t *ring_size) +{ + struct mapped_vmbus_resource *uio_res; + + uio_res = vmbus_uio_find_resource(chan->device); + if (!uio_res) { + VMBUS_LOG(ERR, "can not find resources!"); + return -ENOMEM; + } + + if (uio_res->nb_maps < VMBUS_MAX_RESOURCE) { + VMBUS_LOG(ERR, "VMBUS: only %u resources found!", + uio_res->nb_maps); + return -EINVAL; + } + + *ring_size = uio_res->maps[HV_TXRX_RING_MAP].size / 2; + *ring_buf = uio_res->maps[HV_TXRX_RING_MAP].addr; + return 0; +} + +static int vmbus_uio_map_subchan(const struct rte_vmbus_device *dev, + const struct vmbus_channel *chan, + void **ring_buf, uint32_t *ring_size) +{ + char ring_path[PATH_MAX]; + size_t file_size; + struct stat sb; + int fd; + + snprintf(ring_path, sizeof(ring_path), + "%s/%s/channels/%u/ring", + SYSFS_VMBUS_DEVICES, dev->device.name, + chan->relid); + + fd = open(ring_path, O_RDWR); + if (fd < 0) { + VMBUS_LOG(ERR, "Cannot open %s: %s", + ring_path, strerror(errno)); + return -errno; + } + + if (fstat(fd, &sb) < 0) { + VMBUS_LOG(ERR, "Cannot state %s: %s", + ring_path, strerror(errno)); + close(fd); + return -errno; + } + file_size = sb.st_size; + + if (file_size == 0 || (file_size & (PAGE_SIZE - 1))) { + VMBUS_LOG(ERR, "incorrect size %s: %zu", + ring_path, file_size); + + close(fd); + return -EINVAL; + } + + *ring_size = file_size / 2; + *ring_buf = vmbus_map_resource(vmbus_map_addr, fd, + 0, sb.st_size, 0); + close(fd); + + if (ring_buf == MAP_FAILED) + return -EIO; + + vmbus_map_addr = RTE_PTR_ADD(ring_buf, file_size); + return 0; +} + +int vmbus_uio_map_rings(struct vmbus_channel *chan) +{ + const struct rte_vmbus_device *dev = chan->device; + uint32_t ring_size; + void *ring_buf; + int ret; + + /* Primary channel */ + if (chan->subchannel_id == 0) + ret = vmbus_uio_map_primary(chan, &ring_buf, &ring_size); + else + ret = vmbus_uio_map_subchan(dev, chan, &ring_buf, &ring_size); + + if (ret) + return ret; + + vmbus_br_setup(&chan->txbr, ring_buf, ring_size); + vmbus_br_setup(&chan->rxbr, (char *)ring_buf + ring_size, ring_size); + return 0; +} + +static int vmbus_uio_sysfs_read(const char *dir, const char *name, + unsigned long *val, unsigned long max_range) +{ + char path[PATH_MAX]; + FILE *f; + int ret; + + snprintf(path, sizeof(path), "%s/%s", dir, name); + f = fopen(path, "r"); + if (!f) { + VMBUS_LOG(ERR, "can't open %s:%s", + path, strerror(errno)); + return -errno; + } + + if (fscanf(f, "%lu", val) != 1) + ret = -EIO; + else if (*val > max_range) + ret = -ERANGE; + else + ret = 0; + fclose(f); + + return ret; +} + +static bool vmbus_uio_ring_present(const struct rte_vmbus_device *dev, + uint32_t relid) +{ + char ring_path[PATH_MAX]; + + /* Check if kernel has subchannel sysfs files */ + snprintf(ring_path, sizeof(ring_path), + "%s/%s/channels/%u/ring", + SYSFS_VMBUS_DEVICES, dev->device.name, relid); + + return access(ring_path, R_OK|W_OK) == 0; +} + +bool vmbus_uio_subchannels_supported(const struct rte_vmbus_device *dev, + const struct vmbus_channel *chan) +{ + return vmbus_uio_ring_present(dev, chan->relid); +} + +static bool vmbus_isnew_subchannel(struct vmbus_channel *primary, + unsigned long id) +{ + const struct vmbus_channel *c; + + STAILQ_FOREACH(c, &primary->subchannel_list, next) { + if (c->relid == id) + return false; + } + return true; +} + +int vmbus_uio_get_subchan(struct vmbus_channel *primary, + struct vmbus_channel **subchan) +{ + const struct rte_vmbus_device *dev = primary->device; + char chan_path[PATH_MAX], subchan_path[PATH_MAX]; + struct dirent *ent; + DIR *chan_dir; + + snprintf(chan_path, sizeof(chan_path), + "%s/%s/channels", + SYSFS_VMBUS_DEVICES, dev->device.name); + + chan_dir = opendir(chan_path); + if (!chan_dir) { + VMBUS_LOG(ERR, "cannot open %s: %s", + chan_path, strerror(errno)); + return -errno; + } + + while ((ent = readdir(chan_dir))) { + unsigned long relid, subid, monid; + char *endp; + int err; + + if (ent->d_name[0] == '.') + continue; + + errno = 0; + relid = strtoul(ent->d_name, &endp, 0); + if (*endp || errno != 0 || relid > UINT16_MAX) { + VMBUS_LOG(NOTICE, "not a valid channel relid: %s", + ent->d_name); + continue; + } + + snprintf(subchan_path, sizeof(subchan_path), "%s/%lu", + chan_path, relid); + err = vmbus_uio_sysfs_read(subchan_path, "subchannel_id", + &subid, UINT16_MAX); + if (err) { + VMBUS_LOG(NOTICE, "invalid subchannel id %lu", + subid); + closedir(chan_dir); + return err; + } + + if (subid == 0) + continue; /* skip primary channel */ + + if (!vmbus_isnew_subchannel(primary, relid)) + continue; + + if (!vmbus_uio_ring_present(dev, relid)) + continue; /* Ring may not be ready yet */ + + err = vmbus_uio_sysfs_read(subchan_path, "monitor_id", + &monid, UINT8_MAX); + if (err) { + VMBUS_LOG(NOTICE, "invalid monitor id %lu", + monid); + return err; + } + + err = vmbus_chan_create(dev, relid, subid, monid, subchan); + if (err) { + VMBUS_LOG(NOTICE, "subchannel setup failed"); + return err; + } + break; + } + closedir(chan_dir); + + return (ent == NULL) ? -ENOENT : 0; +} diff --git a/drivers/bus/vmbus/meson.build b/drivers/bus/vmbus/meson.build new file mode 100644 index 00000000..18daabec --- /dev/null +++ b/drivers/bus/vmbus/meson.build @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: BSD-3-Clause + +allow_experimental_apis = true + +install_headers('rte_bus_vmbus.h','rte_vmbus_reg.h') + +sources = files('vmbus_common.c', + 'vmbus_channel.c', + 'vmbus_bufring.c', + 'vmbus_common_uio.c') + +if host_machine.system() == 'linux' + sources += files('linux/vmbus_bus.c', + 'linux/vmbus_uio.c') + includes += include_directories('linux') +else + build = false +endif diff --git a/drivers/bus/vmbus/private.h b/drivers/bus/vmbus/private.h new file mode 100644 index 00000000..9964fc42 --- /dev/null +++ b/drivers/bus/vmbus/private.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2018, Microsoft Corporation. + * All Rights Reserved. + */ + +#ifndef _VMBUS_PRIVATE_H_ +#define _VMBUS_PRIVATE_H_ + +#include +#include +#include +#include + +#ifndef PAGE_SIZE +#define PAGE_SIZE 4096 +#endif + +extern int vmbus_logtype_bus; +#define VMBUS_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, vmbus_logtype_bus, "%s(): " fmt "\n", \ + __func__, ##args) + +struct vmbus_br { + struct vmbus_bufring *vbr; + uint32_t dsize; + uint32_t windex; /* next available location */ +}; + +#define UIO_NAME_MAX 64 + +struct vmbus_map { + void *addr; /* user mmap of resource */ + uint64_t size; /* length */ +}; + +/* + * For multi-process we need to reproduce all vmbus mappings in secondary + * processes, so save them in a tailq. + */ +struct mapped_vmbus_resource { + TAILQ_ENTRY(mapped_vmbus_resource) next; + + rte_uuid_t id; + int nb_maps; + struct vmbus_map maps[VMBUS_MAX_RESOURCE]; + char path[PATH_MAX]; +}; + +TAILQ_HEAD(mapped_vmbus_res_list, mapped_vmbus_resource); + +#define HV_MON_TRIG_LEN 32 +#define HV_MON_TRIG_MAX 4 + +struct vmbus_channel { + STAILQ_HEAD(, vmbus_channel) subchannel_list; + STAILQ_ENTRY(vmbus_channel) next; + const struct rte_vmbus_device *device; + + struct vmbus_br rxbr; + struct vmbus_br txbr; + + uint16_t relid; + uint16_t subchannel_id; + uint8_t monitor_id; +}; + +#define VMBUS_MAX_CHANNELS 64 + +int vmbus_chan_create(const struct rte_vmbus_device *device, + uint16_t relid, uint16_t subid, uint8_t monitor_id, + struct vmbus_channel **new_chan); + +void vmbus_add_device(struct rte_vmbus_device *vmbus_dev); +void vmbus_insert_device(struct rte_vmbus_device *exist_vmbus_dev, + struct rte_vmbus_device *new_vmbus_dev); +void vmbus_remove_device(struct rte_vmbus_device *vmbus_device); + +void vmbus_uio_irq_control(struct rte_vmbus_device *dev, int32_t onoff); +int vmbus_uio_irq_read(struct rte_vmbus_device *dev); + +int vmbus_uio_map_resource(struct rte_vmbus_device *dev); +void vmbus_uio_unmap_resource(struct rte_vmbus_device *dev); + +int vmbus_uio_alloc_resource(struct rte_vmbus_device *dev, + struct mapped_vmbus_resource **uio_res); +void vmbus_uio_free_resource(struct rte_vmbus_device *dev, + struct mapped_vmbus_resource *uio_res); + +struct mapped_vmbus_resource * +vmbus_uio_find_resource(const struct rte_vmbus_device *dev); +int vmbus_uio_map_resource_by_index(struct rte_vmbus_device *dev, int res_idx, + struct mapped_vmbus_resource *uio_res, + int flags); + +void *vmbus_map_resource(void *requested_addr, int fd, off_t offset, + size_t size, int additional_flags); +void vmbus_unmap_resource(void *requested_addr, size_t size); + +bool vmbus_uio_subchannels_supported(const struct rte_vmbus_device *dev, + const struct vmbus_channel *chan); +int vmbus_uio_get_subchan(struct vmbus_channel *primary, + struct vmbus_channel **subchan); +int vmbus_uio_map_rings(struct vmbus_channel *chan); + +void vmbus_br_setup(struct vmbus_br *br, void *buf, unsigned int blen); + +/* Amount of space available for write */ +static inline uint32_t +vmbus_br_availwrite(const struct vmbus_br *br, uint32_t windex) +{ + uint32_t rindex = br->vbr->rindex; + + if (windex >= rindex) + return br->dsize - (windex - rindex); + else + return rindex - windex; +} + +static inline uint32_t +vmbus_br_availread(const struct vmbus_br *br) +{ + return br->dsize - vmbus_br_availwrite(br, br->vbr->windex); +} + +int vmbus_txbr_write(struct vmbus_br *tbr, const struct iovec iov[], int iovlen, + bool *need_sig); + +int vmbus_rxbr_peek(const struct vmbus_br *rbr, void *data, size_t dlen); + +int vmbus_rxbr_read(struct vmbus_br *rbr, void *data, size_t dlen, size_t hlen); + +#endif /* _VMBUS_PRIVATE_H_ */ diff --git a/drivers/bus/vmbus/rte_bus_vmbus.h b/drivers/bus/vmbus/rte_bus_vmbus.h new file mode 100644 index 00000000..4a2c1f6f --- /dev/null +++ b/drivers/bus/vmbus/rte_bus_vmbus.h @@ -0,0 +1,407 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2018, Microsoft Corporation. + * All Rights Reserved. + */ + +#ifndef _VMBUS_H_ +#define _VMBUS_H_ + +/** + * @file + * + * VMBUS Interface + */ +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* Forward declarations */ +struct rte_vmbus_device; +struct rte_vmbus_driver; +struct rte_vmbus_bus; +struct vmbus_channel; +struct vmbus_mon_page; + +TAILQ_HEAD(rte_vmbus_device_list, rte_vmbus_device); +TAILQ_HEAD(rte_vmbus_driver_list, rte_vmbus_driver); + +/* VMBus iterators */ +#define FOREACH_DEVICE_ON_VMBUS(p) \ + TAILQ_FOREACH(p, &(rte_vmbus_bus.device_list), next) + +#define FOREACH_DRIVER_ON_VMBUS(p) \ + TAILQ_FOREACH(p, &(rte_vmbus_bus.driver_list), next) + +/** Maximum number of VMBUS resources. */ +enum hv_uio_map { + HV_TXRX_RING_MAP = 0, + HV_INT_PAGE_MAP, + HV_MON_PAGE_MAP, + HV_RECV_BUF_MAP, + HV_SEND_BUF_MAP +}; +#define VMBUS_MAX_RESOURCE 5 + +/** + * A structure describing a VMBUS device. + */ +struct rte_vmbus_device { + TAILQ_ENTRY(rte_vmbus_device) next; /**< Next probed VMBUS device */ + const struct rte_vmbus_driver *driver; /**< Associated driver */ + struct rte_device device; /**< Inherit core device */ + rte_uuid_t device_id; /**< VMBUS device id */ + rte_uuid_t class_id; /**< VMBUS device type */ + uint32_t relid; /**< id for primary */ + uint8_t monitor_id; /**< monitor page */ + int uio_num; /**< UIO device number */ + uint32_t *int_page; /**< VMBUS interrupt page */ + struct vmbus_channel *primary; /**< VMBUS primary channel */ + struct vmbus_mon_page *monitor_page; /**< VMBUS monitor page */ + + struct rte_intr_handle intr_handle; /**< Interrupt handle */ + struct rte_mem_resource resource[VMBUS_MAX_RESOURCE]; +}; + +/** + * Initialization function for the driver called during VMBUS probing. + */ +typedef int (vmbus_probe_t)(struct rte_vmbus_driver *, + struct rte_vmbus_device *); + +/** + * Initialization function for the driver called during hot plugging. + */ +typedef int (vmbus_remove_t)(struct rte_vmbus_device *); + +/** + * A structure describing a VMBUS driver. + */ +struct rte_vmbus_driver { + TAILQ_ENTRY(rte_vmbus_driver) next; /**< Next in list. */ + struct rte_driver driver; + struct rte_vmbus_bus *bus; /**< VM bus reference. */ + vmbus_probe_t *probe; /**< Device Probe function. */ + vmbus_remove_t *remove; /**< Device Remove function. */ + + const rte_uuid_t *id_table; /**< ID table. */ +}; + + +/** + * Structure describing the VM bus + */ +struct rte_vmbus_bus { + struct rte_bus bus; /**< Inherit the generic class */ + struct rte_vmbus_device_list device_list; /**< List of devices */ + struct rte_vmbus_driver_list driver_list; /**< List of drivers */ +}; + +/** + * Scan the content of the VMBUS bus, and the devices in the devices + * list + * + * @return + * 0 on success, negative on error + */ +int rte_vmbus_scan(void); + +/** + * Probe the VMBUS bus + * + * @return + * - 0 on success. + * - !0 on error. + */ +int rte_vmbus_probe(void); + +/** + * Map the VMBUS device resources in user space virtual memory address + * + * @param dev + * A pointer to a rte_vmbus_device structure describing the device + * to use + * + * @return + * 0 on success, negative on error and positive if no driver + * is found for the device. + */ +int rte_vmbus_map_device(struct rte_vmbus_device *dev); + +/** + * Unmap this device + * + * @param dev + * A pointer to a rte_vmbus_device structure describing the device + * to use + */ +void rte_vmbus_unmap_device(struct rte_vmbus_device *dev); + +/** + * Get connection to primary VMBUS channel + * + * @param device + * A pointer to a rte_vmbus_device structure describing the device + * @param chan + * A pointer to a VMBUS channel pointer that will be filled. + * @return + * - 0 Success; channel opened. + * - -ENOMEM: Not enough memory available. + * - -EINVAL: Regions could not be mapped. + */ +int rte_vmbus_chan_open(struct rte_vmbus_device *device, + struct vmbus_channel **chan); + +/** + * Free connection to VMBUS channel + * + * @param chan + * VMBUS channel + */ +void rte_vmbus_chan_close(struct vmbus_channel *chan); + +/** + * Gets the maximum number of channels supported on device + * + * @param device + * A pointer to a rte_vmbus_device structure describing the device + * @return + * Number of channels available. + */ +int rte_vmbus_max_channels(const struct rte_vmbus_device *device); + +/** + * Get a connection to new secondary vmbus channel + * + * @param primary + * A pointer to primary VMBUS channel + * @param chan + * A pointer to a secondary VMBUS channel pointer that will be filled. + * @return + * - 0 Success; channel opened. + * - -ENOMEM: Not enough memory available. + * - -EINVAL: Regions could not be mapped. + */ +int rte_vmbus_subchan_open(struct vmbus_channel *primary, + struct vmbus_channel **new_chan); + +/** + * Disable IRQ for device + * + * @param device + * VMBUS device + */ +void rte_vmbus_irq_mask(struct rte_vmbus_device *device); + +/** + * Enable IRQ for device + * + * @param device + * VMBUS device + */ +void rte_vmbus_irq_unmask(struct rte_vmbus_device *device); + +/** + * Read (and wait) for IRQ + * + * @param device + * VMBUS device + */ +int rte_vmbus_irq_read(struct rte_vmbus_device *device); + +/** + * Test if channel is empty + * + * @param channel + * Pointer to vmbus_channel structure. + * @return + * Return true if no data present in incoming ring. + */ +bool rte_vmbus_chan_rx_empty(const struct vmbus_channel *channel); + +/** + * Send the specified buffer on the given channel + * + * @param channel + * Pointer to vmbus_channel structure. + * @param type + * Type of packet that is being send e.g. negotiate, time + * packet etc. + * @param data + * Pointer to the buffer to send + * @param dlen + * Number of bytes of data to send + * @param xact + * Identifier of the request + * @param flags + * Message type inband, rxbuf, gpa + * @param need_sig + * Is host signal tx is required (optional) + * + * Sends data in buffer directly to hyper-v via the vmbus + */ +int rte_vmbus_chan_send(struct vmbus_channel *channel, uint16_t type, + void *data, uint32_t dlen, + uint64_t xact, uint32_t flags, bool *need_sig); + +/** + * Explicitly signal host that data is available + * + * @param + * Pointer to vmbus_channel structure. + * + * Used when batching multiple sends and only signaling host + * after the last send. + */ +void rte_vmbus_chan_signal_tx(const struct vmbus_channel *channel); + +/* Structure for scatter/gather I/O */ +struct iova_list { + rte_iova_t addr; + uint32_t len; +}; +#define MAX_PAGE_BUFFER_COUNT 32 + +/** + * Send a scattered buffer on the given channel + * + * @param channel + * Pointer to vmbus_channel structure. + * @param type + * Type of packet that is being send e.g. negotiate, time + * packet etc. + * @param gpa + * Array of buffers to send + * @param gpacnt + * Number of elements in iov + * @param data + * Pointer to the buffer additional data to send + * @param dlen + * Maximum size of what the the buffer will hold + * @param xact + * Identifier of the request + * @param flags + * Message type inband, rxbuf, gpa + * @param need_sig + * Is host signal tx is required (optional) + * + * Sends data in buffer directly to hyper-v via the vmbus + */ +int rte_vmbus_chan_send_sglist(struct vmbus_channel *channel, + struct vmbus_gpa gpa[], uint32_t gpacnt, + void *data, uint32_t dlen, + uint64_t xact, bool *need_sig); +/** + * Receive response to request on the given channel + * skips the channel header. + * + * @param channel + * Pointer to vmbus_channel structure. + * @param data + * Pointer to the buffer you want to receive the data into. + * @param len + * Pointer to size of receive buffer (in/out) + * @param + * Pointer to received transaction_id + * @return + * On success, returns 0 + * On failure, returns negative errno. + */ +int rte_vmbus_chan_recv(struct vmbus_channel *chan, + void *data, uint32_t *len, + uint64_t *request_id); + +/** + * Receive response to request on the given channel + * includes the channel header. + * + * @param channel + * Pointer to vmbus_channel structure. + * @param data + * Pointer to the buffer you want to receive the data into. + * @param len + * Pointer to size of receive buffer (in/out) + * @return + * On success, returns number of bytes read. + * On failure, returns negative errno. + */ +int rte_vmbus_chan_recv_raw(struct vmbus_channel *chan, + void *data, uint32_t *len); + +/** + * Notify host of bytes read (after recv_raw) + * Signals host if required. + * + * @param channel + * Pointer to vmbus_channel structure. + * @param bytes_read + * Number of bytes read since last signal + */ +void rte_vmbus_chan_signal_read(struct vmbus_channel *chan, uint32_t bytes_read); + +/** + * Determine sub channel index of the given channel + * + * @param channel + * Pointer to vmbus_channel structure. + * @return + * Sub channel index (0 for primary) + */ +uint16_t rte_vmbus_sub_channel_index(const struct vmbus_channel *chan); + +/** + * Register a VMBUS driver. + * + * @param driver + * A pointer to a rte_vmbus_driver structure describing the driver + * to be registered. + */ +void rte_vmbus_register(struct rte_vmbus_driver *driver); + +/** + * For debug dump contents of ring buffer. + * + * @param channel + * Pointer to vmbus_channel structure. + */ +void rte_vmbus_chan_dump(FILE *f, const struct vmbus_channel *chan); + +/** + * Unregister a VMBUS driver. + * + * @param driver + * A pointer to a rte_vmbus_driver structure describing the driver + * to be unregistered. + */ +void rte_vmbus_unregister(struct rte_vmbus_driver *driver); + +/** Helper for VMBUS device registration from driver instance */ +#define RTE_PMD_REGISTER_VMBUS(nm, vmbus_drv) \ + RTE_INIT(vmbusinitfn_ ##nm); \ + static void vmbusinitfn_ ##nm(void) \ + { \ + (vmbus_drv).driver.name = RTE_STR(nm); \ + rte_vmbus_register(&vmbus_drv); \ + } \ + RTE_PMD_EXPORT_NAME(nm, __COUNTER__) + +#ifdef __cplusplus +} +#endif + +#endif /* _VMBUS_H_ */ diff --git a/drivers/bus/vmbus/rte_bus_vmbus_version.map b/drivers/bus/vmbus/rte_bus_vmbus_version.map new file mode 100644 index 00000000..dabb9203 --- /dev/null +++ b/drivers/bus/vmbus/rte_bus_vmbus_version.map @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ + +DPDK_18.08 { + global: + + rte_vmbus_chan_close; + rte_vmbus_chan_open; + rte_vmbus_chan_recv; + rte_vmbus_chan_recv_raw; + rte_vmbus_chan_rx_empty; + rte_vmbus_chan_send; + rte_vmbus_chan_send_sglist; + rte_vmbus_chan_signal_read; + rte_vmbus_chan_signal_tx; + rte_vmbus_irq_mask; + rte_vmbus_irq_read; + rte_vmbus_irq_unmask; + rte_vmbus_map_device; + rte_vmbus_max_channels; + rte_vmbus_probe; + rte_vmbus_register; + rte_vmbus_scan; + rte_vmbus_sub_channel_index; + rte_vmbus_subchan_open; + rte_vmbus_unmap_device; + rte_vmbus_unregister; + + local: *; +}; diff --git a/drivers/bus/vmbus/rte_vmbus_reg.h b/drivers/bus/vmbus/rte_vmbus_reg.h new file mode 100644 index 00000000..f5a0693d --- /dev/null +++ b/drivers/bus/vmbus/rte_vmbus_reg.h @@ -0,0 +1,344 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2018, Microsoft Corporation. + * All Rights Reserved. + */ + +#ifndef _VMBUS_REG_H_ +#define _VMBUS_REG_H_ + +/* + * Hyper-V SynIC message format. + */ +#define VMBUS_MSG_DSIZE_MAX 240 +#define VMBUS_MSG_SIZE 256 + +struct vmbus_message { + uint32_t type; /* HYPERV_MSGTYPE_ */ + uint8_t dsize; /* data size */ + uint8_t flags; /* VMBUS_MSGFLAG_ */ + uint16_t rsvd; + uint64_t id; + uint8_t data[VMBUS_MSG_DSIZE_MAX]; +} __rte_packed; + +#define VMBUS_MSGFLAG_PENDING 0x01 + +/* + * Hyper-V Monitor Notification Facility + */ + +struct vmbus_mon_trig { + uint32_t pending; + uint32_t armed; +} __rte_packed; + +#define VMBUS_MONTRIGS_MAX 4 +#define VMBUS_MONTRIG_LEN 32 + +/* + * Hyper-V Monitor Notification Facility + */ +struct hyperv_mon_param { + uint32_t connid; + uint16_t evtflag_ofs; + uint16_t rsvd; +} __rte_packed; + +struct vmbus_mon_page { + uint32_t state; + uint32_t rsvd1; + + struct vmbus_mon_trig trigs[VMBUS_MONTRIGS_MAX]; + uint8_t rsvd2[536]; + + uint16_t lat[VMBUS_MONTRIGS_MAX][VMBUS_MONTRIG_LEN]; + uint8_t rsvd3[256]; + + struct hyperv_mon_param + param[VMBUS_MONTRIGS_MAX][VMBUS_MONTRIG_LEN]; + uint8_t rsvd4[1984]; +} __rte_packed; + +/* + * Buffer ring + */ + +struct vmbus_bufring { + volatile uint32_t windex; + volatile uint32_t rindex; + + /* + * Interrupt mask {0,1} + * + * For TX bufring, host set this to 1, when it is processing + * the TX bufring, so that we can safely skip the TX event + * notification to host. + * + * For RX bufring, once this is set to 1 by us, host will not + * further dispatch interrupts to us, even if there are data + * pending on the RX bufring. This effectively disables the + * interrupt of the channel to which this RX bufring is attached. + */ + volatile uint32_t imask; + + /* + * Win8 uses some of the reserved bits to implement + * interrupt driven flow management. On the send side + * we can request that the receiver interrupt the sender + * when the ring transitions from being full to being able + * to handle a message of size "pending_send_sz". + * + * Add necessary state for this enhancement. + */ + volatile uint32_t pending_send; + uint32_t reserved1[12]; + + union { + struct { + uint32_t feat_pending_send_sz:1; + }; + uint32_t value; + } feature_bits; + + /* Pad it to PAGE_SIZE so that data starts on page boundary */ + uint8_t reserved2[4028]; + + /* + * Ring data starts here + RingDataStartOffset + * !!! DO NOT place any fields below this !!! + */ + uint8_t data[0]; +} __rte_packed; + +/* + * Channel packets + */ + +/* Channel packet flags */ +#define VMBUS_CHANPKT_TYPE_INBAND 0x0006 +#define VMBUS_CHANPKT_TYPE_RXBUF 0x0007 +#define VMBUS_CHANPKT_TYPE_GPA 0x0009 +#define VMBUS_CHANPKT_TYPE_COMP 0x000b + +#define VMBUS_CHANPKT_FLAG_NONE 0 +#define VMBUS_CHANPKT_FLAG_RC 0x0001 /* report completion */ + +#define VMBUS_CHANPKT_SIZE_SHIFT 3 +#define VMBUS_CHANPKT_SIZE_ALIGN (1 << VMBUS_CHANPKT_SIZE_SHIFT) +#define VMBUS_CHANPKT_HLEN_MIN \ + (sizeof(struct vmbus_chanpkt_hdr) >> VMBUS_CHANPKT_SIZE_SHIFT) + +static inline uint32_t +vmbus_chanpkt_getlen(uint16_t pktlen) +{ + return (uint32_t)pktlen << VMBUS_CHANPKT_SIZE_SHIFT; +} + +/* + * GPA stuffs. + */ +struct vmbus_gpa_range { + uint32_t len; + uint32_t ofs; + uint64_t page[0]; +} __rte_packed; + +/* This is actually vmbus_gpa_range.gpa_page[1] */ +struct vmbus_gpa { + uint32_t len; + uint32_t ofs; + uint64_t page; +} __rte_packed; + +struct vmbus_chanpkt_hdr { + uint16_t type; /* VMBUS_CHANPKT_TYPE_ */ + uint16_t hlen; /* header len, in 8 bytes */ + uint16_t tlen; /* total len, in 8 bytes */ + uint16_t flags; /* VMBUS_CHANPKT_FLAG_ */ + uint64_t xactid; +} __rte_packed; + +static inline uint32_t +vmbus_chanpkt_datalen(const struct vmbus_chanpkt_hdr *pkt) +{ + return vmbus_chanpkt_getlen(pkt->tlen) + - vmbus_chanpkt_getlen(pkt->hlen); +} + +struct vmbus_chanpkt { + struct vmbus_chanpkt_hdr hdr; +} __rte_packed; + +struct vmbus_rxbuf_desc { + uint32_t len; + uint32_t ofs; +} __rte_packed; + +struct vmbus_chanpkt_rxbuf { + struct vmbus_chanpkt_hdr hdr; + uint16_t rxbuf_id; + uint16_t rsvd; + uint32_t rxbuf_cnt; + struct vmbus_rxbuf_desc rxbuf[]; +} __rte_packed; + +struct vmbus_chanpkt_sglist { + struct vmbus_chanpkt_hdr hdr; + uint32_t rsvd; + uint32_t gpa_cnt; + struct vmbus_gpa gpa[]; +} __rte_packed; + +/* + * Channel messages + * - Embedded in vmbus_message.msg_data, e.g. response and notification. + * - Embedded in hypercall_postmsg_in.hc_data, e.g. request. + */ + +#define VMBUS_CHANMSG_TYPE_CHOFFER 1 /* NOTE */ +#define VMBUS_CHANMSG_TYPE_CHRESCIND 2 /* NOTE */ +#define VMBUS_CHANMSG_TYPE_CHREQUEST 3 /* REQ */ +#define VMBUS_CHANMSG_TYPE_CHOFFER_DONE 4 /* NOTE */ +#define VMBUS_CHANMSG_TYPE_CHOPEN 5 /* REQ */ +#define VMBUS_CHANMSG_TYPE_CHOPEN_RESP 6 /* RESP */ +#define VMBUS_CHANMSG_TYPE_CHCLOSE 7 /* REQ */ +#define VMBUS_CHANMSG_TYPE_GPADL_CONN 8 /* REQ */ +#define VMBUS_CHANMSG_TYPE_GPADL_SUBCONN 9 /* REQ */ +#define VMBUS_CHANMSG_TYPE_GPADL_CONNRESP 10 /* RESP */ +#define VMBUS_CHANMSG_TYPE_GPADL_DISCONN 11 /* REQ */ +#define VMBUS_CHANMSG_TYPE_GPADL_DISCONNRESP 12 /* RESP */ +#define VMBUS_CHANMSG_TYPE_CHFREE 13 /* REQ */ +#define VMBUS_CHANMSG_TYPE_CONNECT 14 /* REQ */ +#define VMBUS_CHANMSG_TYPE_CONNECT_RESP 15 /* RESP */ +#define VMBUS_CHANMSG_TYPE_DISCONNECT 16 /* REQ */ +#define VMBUS_CHANMSG_TYPE_MAX 22 + +struct vmbus_chanmsg_hdr { + uint32_t type; /* VMBUS_CHANMSG_TYPE_ */ + uint32_t rsvd; +} __rte_packed; + +/* VMBUS_CHANMSG_TYPE_CONNECT */ +struct vmbus_chanmsg_connect { + struct vmbus_chanmsg_hdr hdr; + uint32_t ver; + uint32_t rsvd; + uint64_t evtflags; + uint64_t mnf1; + uint64_t mnf2; +} __rte_packed; + +/* VMBUS_CHANMSG_TYPE_CONNECT_RESP */ +struct vmbus_chanmsg_connect_resp { + struct vmbus_chanmsg_hdr hdr; + uint8_t done; +} __rte_packed; + +/* VMBUS_CHANMSG_TYPE_CHREQUEST */ +struct vmbus_chanmsg_chrequest { + struct vmbus_chanmsg_hdr hdr; +} __rte_packed; + +/* VMBUS_CHANMSG_TYPE_DISCONNECT */ +struct vmbus_chanmsg_disconnect { + struct vmbus_chanmsg_hdr hdr; +} __rte_packed; + +/* VMBUS_CHANMSG_TYPE_CHOPEN */ +struct vmbus_chanmsg_chopen { + struct vmbus_chanmsg_hdr hdr; + uint32_t chanid; + uint32_t openid; + uint32_t gpadl; + uint32_t vcpuid; + uint32_t txbr_pgcnt; +#define VMBUS_CHANMSG_CHOPEN_UDATA_SIZE 120 + uint8_t udata[VMBUS_CHANMSG_CHOPEN_UDATA_SIZE]; +} __rte_packed; + +/* VMBUS_CHANMSG_TYPE_CHOPEN_RESP */ +struct vmbus_chanmsg_chopen_resp { + struct vmbus_chanmsg_hdr hdr; + uint32_t chanid; + uint32_t openid; + uint32_t status; +} __rte_packed; + +/* VMBUS_CHANMSG_TYPE_GPADL_CONN */ +struct vmbus_chanmsg_gpadl_conn { + struct vmbus_chanmsg_hdr hdr; + uint32_t chanid; + uint32_t gpadl; + uint16_t range_len; + uint16_t range_cnt; + struct vmbus_gpa_range range; +} __rte_packed; + +#define VMBUS_CHANMSG_GPADL_CONN_PGMAX 26 + +/* VMBUS_CHANMSG_TYPE_GPADL_SUBCONN */ +struct vmbus_chanmsg_gpadl_subconn { + struct vmbus_chanmsg_hdr hdr; + uint32_t msgno; + uint32_t gpadl; + uint64_t gpa_page[]; +} __rte_packed; + +#define VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX 28 + +/* VMBUS_CHANMSG_TYPE_GPADL_CONNRESP */ +struct vmbus_chanmsg_gpadl_connresp { + struct vmbus_chanmsg_hdr hdr; + uint32_t chanid; + uint32_t gpadl; + uint32_t status; +} __rte_packed; + +/* VMBUS_CHANMSG_TYPE_CHCLOSE */ +struct vmbus_chanmsg_chclose { + struct vmbus_chanmsg_hdr hdr; + uint32_t chanid; +} __rte_packed; + +/* VMBUS_CHANMSG_TYPE_GPADL_DISCONN */ +struct vmbus_chanmsg_gpadl_disconn { + struct vmbus_chanmsg_hdr hdr; + uint32_t chanid; + uint32_t gpadl; +} __rte_packed; + +/* VMBUS_CHANMSG_TYPE_CHFREE */ +struct vmbus_chanmsg_chfree { + struct vmbus_chanmsg_hdr hdr; + uint32_t chanid; +} __rte_packed; + +/* VMBUS_CHANMSG_TYPE_CHRESCIND */ +struct vmbus_chanmsg_chrescind { + struct vmbus_chanmsg_hdr hdr; + uint32_t chanid; +} __rte_packed; + +/* VMBUS_CHANMSG_TYPE_CHOFFER */ +struct vmbus_chanmsg_choffer { + struct vmbus_chanmsg_hdr hdr; + rte_uuid_t chtype; + rte_uuid_t chinst; + uint64_t chlat; /* unit: 100ns */ + uint32_t chrev; + uint32_t svrctx_sz; + uint16_t chflags; + uint16_t mmio_sz; /* unit: MB */ + uint8_t udata[120]; + uint16_t subidx; + uint16_t rsvd; + uint32_t chanid; + uint8_t montrig; + uint8_t flags1; /* VMBUS_CHOFFER_FLAG1_ */ + uint16_t flags2; + uint32_t connid; +} __rte_packed; + +#define VMBUS_CHOFFER_FLAG1_HASMNF 0x01 + +#endif /* !_VMBUS_REG_H_ */ diff --git a/drivers/bus/vmbus/vmbus_bufring.c b/drivers/bus/vmbus/vmbus_bufring.c new file mode 100644 index 00000000..c8800160 --- /dev/null +++ b/drivers/bus/vmbus/vmbus_bufring.c @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2009-2012,2016 Microsoft Corp. + * Copyright (c) 2012 NetApp Inc. + * Copyright (c) 2012 Citrix Inc. + * All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "private.h" + +/* Increase bufring index by inc with wraparound */ +static inline uint32_t vmbus_br_idxinc(uint32_t idx, uint32_t inc, uint32_t sz) +{ + idx += inc; + if (idx >= sz) + idx -= sz; + + return idx; +} + +void vmbus_br_setup(struct vmbus_br *br, void *buf, unsigned int blen) +{ + br->vbr = buf; + br->windex = br->vbr->windex; + br->dsize = blen - sizeof(struct vmbus_bufring); +} + +/* + * When we write to the ring buffer, check if the host needs to be + * signaled. + * + * The contract: + * - The host guarantees that while it is draining the TX bufring, + * it will set the br_imask to indicate it does not need to be + * interrupted when new data are added. + * - The host guarantees that it will completely drain the TX bufring + * before exiting the read loop. Further, once the TX bufring is + * empty, it will clear the br_imask and re-check to see if new + * data have arrived. + */ +static inline bool +vmbus_txbr_need_signal(const struct vmbus_br *tbr, uint32_t old_windex) +{ + rte_smp_mb(); + if (tbr->vbr->imask) + return false; + + rte_smp_rmb(); + + /* + * This is the only case we need to signal when the + * ring transitions from being empty to non-empty. + */ + return old_windex == tbr->vbr->rindex; +} + +static inline uint32_t +vmbus_txbr_copyto(const struct vmbus_br *tbr, uint32_t windex, + const void *src0, uint32_t cplen) +{ + uint8_t *br_data = tbr->vbr->data; + uint32_t br_dsize = tbr->dsize; + const uint8_t *src = src0; + + /* XXX use double mapping like Linux kernel? */ + if (cplen > br_dsize - windex) { + uint32_t fraglen = br_dsize - windex; + + /* Wrap-around detected */ + memcpy(br_data + windex, src, fraglen); + memcpy(br_data, src + fraglen, cplen - fraglen); + } else { + memcpy(br_data + windex, src, cplen); + } + + return vmbus_br_idxinc(windex, cplen, br_dsize); +} + +/* + * Write scattered channel packet to TX bufring. + * + * The offset of this channel packet is written as a 64bits value + * immediately after this channel packet. + * + * The write goes through three stages: + * 1. Reserve space in ring buffer for the new data. + * Writer atomically moves priv_write_index. + * 2. Copy the new data into the ring. + * 3. Update the tail of the ring (visible to host) that indicates + * next read location. Writer updates write_index + */ +int +vmbus_txbr_write(struct vmbus_br *tbr, const struct iovec iov[], int iovlen, + bool *need_sig) +{ + struct vmbus_bufring *vbr = tbr->vbr; + uint32_t ring_size = tbr->dsize; + uint32_t old_windex, next_windex, windex, total; + uint64_t save_windex; + int i; + + total = 0; + for (i = 0; i < iovlen; i++) + total += iov[i].iov_len; + total += sizeof(save_windex); + + /* Reserve space in ring */ + do { + uint32_t avail; + + /* Get current free location */ + old_windex = tbr->windex; + + /* Prevent compiler reordering this with calculation */ + rte_compiler_barrier(); + + avail = vmbus_br_availwrite(tbr, old_windex); + + /* If not enough space in ring, then tell caller. */ + if (avail <= total) + return -EAGAIN; + + next_windex = vmbus_br_idxinc(old_windex, total, ring_size); + + /* Atomic update of next write_index for other threads */ + } while (!rte_atomic32_cmpset(&tbr->windex, old_windex, next_windex)); + + /* Space from old..new is now reserved */ + windex = old_windex; + for (i = 0; i < iovlen; i++) { + windex = vmbus_txbr_copyto(tbr, windex, + iov[i].iov_base, iov[i].iov_len); + } + + /* Set the offset of the current channel packet. */ + save_windex = ((uint64_t)old_windex) << 32; + windex = vmbus_txbr_copyto(tbr, windex, &save_windex, + sizeof(save_windex)); + + /* The region reserved should match region used */ + RTE_ASSERT(windex == next_windex); + + /* Ensure that data is available before updating host index */ + rte_smp_wmb(); + + /* Checkin for our reservation. wait for our turn to update host */ + while (!rte_atomic32_cmpset(&vbr->windex, old_windex, next_windex)) + rte_pause(); + + /* If host had read all data before this, then need to signal */ + *need_sig |= vmbus_txbr_need_signal(tbr, old_windex); + return 0; +} + +static inline uint32_t +vmbus_rxbr_copyfrom(const struct vmbus_br *rbr, uint32_t rindex, + void *dst0, size_t cplen) +{ + const uint8_t *br_data = rbr->vbr->data; + uint32_t br_dsize = rbr->dsize; + uint8_t *dst = dst0; + + if (cplen > br_dsize - rindex) { + uint32_t fraglen = br_dsize - rindex; + + /* Wrap-around detected. */ + memcpy(dst, br_data + rindex, fraglen); + memcpy(dst + fraglen, br_data, cplen - fraglen); + } else { + memcpy(dst, br_data + rindex, cplen); + } + + return vmbus_br_idxinc(rindex, cplen, br_dsize); +} + +/* Copy data from receive ring but don't change index */ +int +vmbus_rxbr_peek(const struct vmbus_br *rbr, void *data, size_t dlen) +{ + uint32_t avail; + + /* + * The requested data and the 64bits channel packet + * offset should be there at least. + */ + avail = vmbus_br_availread(rbr); + if (avail < dlen + sizeof(uint64_t)) + return -EAGAIN; + + vmbus_rxbr_copyfrom(rbr, rbr->vbr->rindex, data, dlen); + return 0; +} + +/* + * Copy data from receive ring and change index + * NOTE: + * We assume (dlen + skip) == sizeof(channel packet). + */ +int +vmbus_rxbr_read(struct vmbus_br *rbr, void *data, size_t dlen, size_t skip) +{ + struct vmbus_bufring *vbr = rbr->vbr; + uint32_t br_dsize = rbr->dsize; + uint32_t rindex; + + if (vmbus_br_availread(rbr) < dlen + skip + sizeof(uint64_t)) + return -EAGAIN; + + /* Record where host was when we started read (for debug) */ + rbr->windex = rbr->vbr->windex; + + /* + * Copy channel packet from RX bufring. + */ + rindex = vmbus_br_idxinc(rbr->vbr->rindex, skip, br_dsize); + rindex = vmbus_rxbr_copyfrom(rbr, rindex, data, dlen); + + /* + * Discard this channel packet's 64bits offset, which is useless to us. + */ + rindex = vmbus_br_idxinc(rindex, sizeof(uint64_t), br_dsize); + + /* Update the read index _after_ the channel packet is fetched. */ + rte_compiler_barrier(); + + vbr->rindex = rindex; + + return 0; +} diff --git a/drivers/bus/vmbus/vmbus_channel.c b/drivers/bus/vmbus/vmbus_channel.c new file mode 100644 index 00000000..cc5f3e83 --- /dev/null +++ b/drivers/bus/vmbus/vmbus_channel.c @@ -0,0 +1,405 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2018, Microsoft Corporation. + * All Rights Reserved. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "private.h" + +static inline void +vmbus_sync_set_bit(volatile uint32_t *addr, uint32_t mask) +{ + /* Use GCC builtin which atomic does atomic OR operation */ + __sync_or_and_fetch(addr, mask); +} + +static inline void +vmbus_send_interrupt(const struct rte_vmbus_device *dev, uint32_t relid) +{ + uint32_t *int_addr; + uint32_t int_mask; + + int_addr = dev->int_page + relid / 32; + int_mask = 1u << (relid % 32); + + vmbus_sync_set_bit(int_addr, int_mask); +} + +static inline void +vmbus_set_monitor(const struct rte_vmbus_device *dev, uint32_t monitor_id) +{ + uint32_t *monitor_addr, monitor_mask; + unsigned int trigger_index; + + trigger_index = monitor_id / HV_MON_TRIG_LEN; + monitor_mask = 1u << (monitor_id % HV_MON_TRIG_LEN); + + monitor_addr = &dev->monitor_page->trigs[trigger_index].pending; + vmbus_sync_set_bit(monitor_addr, monitor_mask); +} + +static void +vmbus_set_event(const struct rte_vmbus_device *dev, + const struct vmbus_channel *chan) +{ + vmbus_send_interrupt(dev, chan->relid); + vmbus_set_monitor(dev, chan->monitor_id); +} + +/* + * Notify host that there are data pending on our TX bufring. + * + * Since this in userspace, rely on the monitor page. + * Can't do a hypercall from userspace. + */ +void +rte_vmbus_chan_signal_tx(const struct vmbus_channel *chan) +{ + const struct rte_vmbus_device *dev = chan->device; + const struct vmbus_br *tbr = &chan->txbr; + + /* Make sure all updates are done before signaling host */ + rte_smp_wmb(); + + /* If host is ignoring interrupts? */ + if (tbr->vbr->imask) + return; + + vmbus_set_event(dev, chan); +} + + +/* Do a simple send directly using transmit ring. */ +int rte_vmbus_chan_send(struct vmbus_channel *chan, uint16_t type, + void *data, uint32_t dlen, + uint64_t xactid, uint32_t flags, bool *need_sig) +{ + struct vmbus_chanpkt pkt; + unsigned int pktlen, pad_pktlen; + const uint32_t hlen = sizeof(pkt); + bool send_evt = false; + uint64_t pad = 0; + struct iovec iov[3]; + int error; + + pktlen = hlen + dlen; + pad_pktlen = RTE_ALIGN(pktlen, sizeof(uint64_t)); + + pkt.hdr.type = type; + pkt.hdr.flags = flags; + pkt.hdr.hlen = hlen >> VMBUS_CHANPKT_SIZE_SHIFT; + pkt.hdr.tlen = pad_pktlen >> VMBUS_CHANPKT_SIZE_SHIFT; + pkt.hdr.xactid = xactid; + + iov[0].iov_base = &pkt; + iov[0].iov_len = hlen; + iov[1].iov_base = data; + iov[1].iov_len = dlen; + iov[2].iov_base = &pad; + iov[2].iov_len = pad_pktlen - pktlen; + + error = vmbus_txbr_write(&chan->txbr, iov, 3, &send_evt); + + /* + * caller sets need_sig to non-NULL if it will handle + * signaling if required later. + * if need_sig is NULL, signal now if needed. + */ + if (need_sig) + *need_sig |= send_evt; + else if (error == 0 && send_evt) + rte_vmbus_chan_signal_tx(chan); + return error; +} + +/* Do a scatter/gather send where the descriptor points to data. */ +int rte_vmbus_chan_send_sglist(struct vmbus_channel *chan, + struct vmbus_gpa sg[], uint32_t sglen, + void *data, uint32_t dlen, + uint64_t xactid, bool *need_sig) +{ + struct vmbus_chanpkt_sglist pkt; + unsigned int pktlen, pad_pktlen, hlen; + bool send_evt = false; + struct iovec iov[4]; + uint64_t pad = 0; + int error; + + hlen = offsetof(struct vmbus_chanpkt_sglist, gpa[sglen]); + pktlen = hlen + dlen; + pad_pktlen = RTE_ALIGN(pktlen, sizeof(uint64_t)); + + pkt.hdr.type = VMBUS_CHANPKT_TYPE_GPA; + pkt.hdr.flags = VMBUS_CHANPKT_FLAG_RC; + pkt.hdr.hlen = hlen >> VMBUS_CHANPKT_SIZE_SHIFT; + pkt.hdr.tlen = pad_pktlen >> VMBUS_CHANPKT_SIZE_SHIFT; + pkt.hdr.xactid = xactid; + pkt.rsvd = 0; + pkt.gpa_cnt = sglen; + + iov[0].iov_base = &pkt; + iov[0].iov_len = sizeof(pkt); + iov[1].iov_base = sg; + iov[1].iov_len = sizeof(struct vmbus_gpa) * sglen; + iov[2].iov_base = data; + iov[2].iov_len = dlen; + iov[3].iov_base = &pad; + iov[3].iov_len = pad_pktlen - pktlen; + + error = vmbus_txbr_write(&chan->txbr, iov, 4, &send_evt); + + /* if caller is batching, just propagate the status */ + if (need_sig) + *need_sig |= send_evt; + else if (error == 0 && send_evt) + rte_vmbus_chan_signal_tx(chan); + return error; +} + +bool rte_vmbus_chan_rx_empty(const struct vmbus_channel *channel) +{ + const struct vmbus_br *br = &channel->rxbr; + + return br->vbr->rindex == br->vbr->windex; +} + +/* Signal host after reading N bytes */ +void rte_vmbus_chan_signal_read(struct vmbus_channel *chan, uint32_t bytes_read) +{ + struct vmbus_br *rbr = &chan->rxbr; + uint32_t write_sz, pending_sz; + + /* No need for signaling on older versions */ + if (!rbr->vbr->feature_bits.feat_pending_send_sz) + return; + + /* Make sure reading of pending happens after new read index */ + rte_mb(); + + pending_sz = rbr->vbr->pending_send; + if (!pending_sz) + return; + + rte_smp_rmb(); + write_sz = vmbus_br_availwrite(rbr, rbr->vbr->windex); + + /* If there was space before then host was not blocked */ + if (write_sz - bytes_read > pending_sz) + return; + + /* If pending write will not fit */ + if (write_sz <= pending_sz) + return; + + vmbus_set_event(chan->device, chan); +} + +int rte_vmbus_chan_recv(struct vmbus_channel *chan, void *data, uint32_t *len, + uint64_t *request_id) +{ + struct vmbus_chanpkt_hdr pkt; + uint32_t dlen, hlen, bufferlen = *len; + int error; + + *len = 0; + + error = vmbus_rxbr_peek(&chan->rxbr, &pkt, sizeof(pkt)); + if (error) + return error; + + if (unlikely(pkt.hlen < VMBUS_CHANPKT_HLEN_MIN)) { + VMBUS_LOG(ERR, "VMBUS recv, invalid hlen %u", pkt.hlen); + /* XXX this channel is dead actually. */ + return -EIO; + } + + if (unlikely(pkt.hlen > pkt.tlen)) { + VMBUS_LOG(ERR, "VMBUS recv,invalid hlen %u and tlen %u", + pkt.hlen, pkt.tlen); + return -EIO; + } + + /* Length are in quad words */ + hlen = pkt.hlen << VMBUS_CHANPKT_SIZE_SHIFT; + dlen = (pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT) - hlen; + *len = dlen; + + /* If caller buffer is not large enough */ + if (unlikely(dlen > bufferlen)) + return -ENOBUFS; + + if (request_id) + *request_id = pkt.xactid; + + /* Read data and skip packet header */ + error = vmbus_rxbr_read(&chan->rxbr, data, dlen, hlen); + if (error) + return error; + + rte_vmbus_chan_signal_read(chan, dlen + hlen + sizeof(uint64_t)); + return 0; +} + +/* TODO: replace this with inplace ring buffer (no copy) */ +int rte_vmbus_chan_recv_raw(struct vmbus_channel *chan, + void *data, uint32_t *len) +{ + struct vmbus_chanpkt_hdr pkt; + uint32_t dlen, bufferlen = *len; + int error; + + error = vmbus_rxbr_peek(&chan->rxbr, &pkt, sizeof(pkt)); + if (error) + return error; + + if (unlikely(pkt.hlen < VMBUS_CHANPKT_HLEN_MIN)) { + VMBUS_LOG(ERR, "VMBUS recv, invalid hlen %u", pkt.hlen); + /* XXX this channel is dead actually. */ + return -EIO; + } + + if (unlikely(pkt.hlen > pkt.tlen)) { + VMBUS_LOG(ERR, "VMBUS recv,invalid hlen %u and tlen %u", + pkt.hlen, pkt.tlen); + return -EIO; + } + + /* Length are in quad words */ + dlen = pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT; + *len = dlen; + + /* If caller buffer is not large enough */ + if (unlikely(dlen > bufferlen)) + return -ENOBUFS; + + /* Read data and skip packet header */ + error = vmbus_rxbr_read(&chan->rxbr, data, dlen, 0); + if (error) + return error; + + /* Return the number of bytes read */ + return dlen + sizeof(uint64_t); +} + +int vmbus_chan_create(const struct rte_vmbus_device *device, + uint16_t relid, uint16_t subid, uint8_t monitor_id, + struct vmbus_channel **new_chan) +{ + struct vmbus_channel *chan; + int err; + + chan = rte_zmalloc_socket("VMBUS", sizeof(*chan), RTE_CACHE_LINE_SIZE, + device->device.numa_node); + if (!chan) + return -ENOMEM; + + STAILQ_INIT(&chan->subchannel_list); + chan->device = device; + chan->subchannel_id = subid; + chan->relid = relid; + chan->monitor_id = monitor_id; + *new_chan = chan; + + err = vmbus_uio_map_rings(chan); + if (err) { + rte_free(chan); + return err; + } + + return 0; +} + +/* Setup the primary channel */ +int rte_vmbus_chan_open(struct rte_vmbus_device *device, + struct vmbus_channel **new_chan) +{ + int err; + + err = vmbus_chan_create(device, device->relid, 0, + device->monitor_id, new_chan); + if (!err) + device->primary = *new_chan; + + return err; +} + +int rte_vmbus_max_channels(const struct rte_vmbus_device *device) +{ + if (vmbus_uio_subchannels_supported(device, device->primary)) + return VMBUS_MAX_CHANNELS; + else + return 1; +} + +/* Setup secondary channel */ +int rte_vmbus_subchan_open(struct vmbus_channel *primary, + struct vmbus_channel **new_chan) +{ + struct vmbus_channel *chan; + int err; + + err = vmbus_uio_get_subchan(primary, &chan); + if (err) + return err; + + STAILQ_INSERT_TAIL(&primary->subchannel_list, chan, next); + *new_chan = chan; + return 0; +} + +uint16_t rte_vmbus_sub_channel_index(const struct vmbus_channel *chan) +{ + return chan->subchannel_id; +} + +void rte_vmbus_chan_close(struct vmbus_channel *chan) +{ + const struct rte_vmbus_device *device = chan->device; + struct vmbus_channel *primary = device->primary; + + if (chan != primary) + STAILQ_REMOVE(&primary->subchannel_list, chan, + vmbus_channel, next); + + rte_free(chan); +} + +static void vmbus_dump_ring(FILE *f, const char *id, const struct vmbus_br *br) +{ + const struct vmbus_bufring *vbr = br->vbr; + struct vmbus_chanpkt_hdr pkt; + + fprintf(f, "%s windex=%u rindex=%u mask=%u pending=%u feature=%#x\n", + id, vbr->windex, vbr->rindex, vbr->imask, + vbr->pending_send, vbr->feature_bits.value); + fprintf(f, " size=%u avail write=%u read=%u\n", + br->dsize, vmbus_br_availwrite(br, vbr->windex), + vmbus_br_availread(br)); + + if (vmbus_rxbr_peek(br, &pkt, sizeof(pkt)) == 0) + fprintf(f, " pkt type %#x len %u flags %#x xactid %#"PRIx64"\n", + pkt.type, + pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT, + pkt.flags, pkt.xactid); +} + +void rte_vmbus_chan_dump(FILE *f, const struct vmbus_channel *chan) +{ + fprintf(f, "channel[%u] relid=%u monitor=%u\n", + chan->subchannel_id, chan->relid, chan->monitor_id); + vmbus_dump_ring(f, "rxbr", &chan->rxbr); + vmbus_dump_ring(f, "txbr", &chan->txbr); +} diff --git a/drivers/bus/vmbus/vmbus_common.c b/drivers/bus/vmbus/vmbus_common.c new file mode 100644 index 00000000..c7165ad5 --- /dev/null +++ b/drivers/bus/vmbus/vmbus_common.c @@ -0,0 +1,286 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2018, Microsoft Corporation. + * All Rights Reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "private.h" + +int vmbus_logtype_bus; +extern struct rte_vmbus_bus rte_vmbus_bus; + +/* map a particular resource from a file */ +void * +vmbus_map_resource(void *requested_addr, int fd, off_t offset, size_t size, + int flags) +{ + void *mapaddr; + + /* Map the memory resource of device */ + mapaddr = mmap(requested_addr, size, PROT_READ | PROT_WRITE, + MAP_SHARED | flags, fd, offset); + if (mapaddr == MAP_FAILED) { + VMBUS_LOG(ERR, + "mmap(%d, %p, %zu, %ld) failed: %s", + fd, requested_addr, size, (long)offset, + strerror(errno)); + } + return mapaddr; +} + +/* unmap a particular resource */ +void +vmbus_unmap_resource(void *requested_addr, size_t size) +{ + if (requested_addr == NULL) + return; + + /* Unmap the VMBUS memory resource of device */ + if (munmap(requested_addr, size)) { + VMBUS_LOG(ERR, "munmap(%p, 0x%lx) failed: %s", + requested_addr, (unsigned long)size, + strerror(errno)); + } else + VMBUS_LOG(DEBUG, " VMBUS memory unmapped at %p", + requested_addr); +} + +/** + * Match the VMBUS driver and device using UUID table + * + * @param drv + * VMBUS driver from which ID table would be extracted + * @param pci_dev + * VMBUS device to match against the driver + * @return + * true for successful match + * false for unsuccessful match + */ +static bool +vmbus_match(const struct rte_vmbus_driver *dr, + const struct rte_vmbus_device *dev) +{ + const rte_uuid_t *id_table; + + for (id_table = dr->id_table; !rte_uuid_is_null(*id_table); ++id_table) { + if (rte_uuid_compare(*id_table, dev->class_id) == 0) + return true; + } + + return false; +} + +/* + * If device ID match, call the devinit() function of the driver. + */ +static int +vmbus_probe_one_driver(struct rte_vmbus_driver *dr, + struct rte_vmbus_device *dev) +{ + char guid[RTE_UUID_STRLEN]; + int ret; + + if (!vmbus_match(dr, dev)) + return 1; /* not supported */ + + rte_uuid_unparse(dev->device_id, guid, sizeof(guid)); + VMBUS_LOG(INFO, "VMBUS device %s on NUMA socket %i", + guid, dev->device.numa_node); + + /* TODO add blacklisted */ + + /* map resources for device */ + ret = rte_vmbus_map_device(dev); + if (ret != 0) + return ret; + + /* reference driver structure */ + dev->driver = dr; + dev->device.driver = &dr->driver; + + if (dev->device.numa_node < 0) { + VMBUS_LOG(WARNING, " Invalid NUMA socket, default to 0"); + dev->device.numa_node = 0; + } + + /* call the driver probe() function */ + VMBUS_LOG(INFO, " probe driver: %s", dr->driver.name); + ret = dr->probe(dr, dev); + if (ret) { + dev->driver = NULL; + rte_vmbus_unmap_device(dev); + } + + return ret; +} + +/* + * IF device class GUID mathces, call the probe function of + * registere drivers for the vmbus device. + * Return -1 if initialization failed, + * and 1 if no driver found for this device. + */ +static int +vmbus_probe_all_drivers(struct rte_vmbus_device *dev) +{ + struct rte_vmbus_driver *dr; + int rc; + + /* Check if a driver is already loaded */ + if (dev->driver != NULL) { + VMBUS_LOG(DEBUG, "VMBUS driver already loaded"); + return 0; + } + + FOREACH_DRIVER_ON_VMBUS(dr) { + rc = vmbus_probe_one_driver(dr, dev); + if (rc < 0) /* negative is an error */ + return -1; + + if (rc > 0) /* positive driver doesn't support it */ + continue; + + return 0; + } + return 1; +} + +/* + * Scan the vmbus, and call the devinit() function for + * all registered drivers that have a matching entry in its id_table + * for discovered devices. + */ +int +rte_vmbus_probe(void) +{ + struct rte_vmbus_device *dev; + size_t probed = 0, failed = 0; + char ubuf[RTE_UUID_STRLEN]; + + FOREACH_DEVICE_ON_VMBUS(dev) { + probed++; + + rte_uuid_unparse(dev->device_id, ubuf, sizeof(ubuf)); + + /* TODO: add whitelist/blacklist */ + + if (vmbus_probe_all_drivers(dev) < 0) { + VMBUS_LOG(NOTICE, + "Requested device %s cannot be used", ubuf); + rte_errno = errno; + failed++; + } + } + + return (probed && probed == failed) ? -1 : 0; +} + +static int +vmbus_parse(const char *name, void *addr) +{ + rte_uuid_t guid; + int ret; + + ret = rte_uuid_parse(name, guid); + if (ret == 0 && addr) + memcpy(addr, &guid, sizeof(guid)); + + return ret; +} + +/* register vmbus driver */ +void +rte_vmbus_register(struct rte_vmbus_driver *driver) +{ + VMBUS_LOG(DEBUG, + "Registered driver %s", driver->driver.name); + + TAILQ_INSERT_TAIL(&rte_vmbus_bus.driver_list, driver, next); + driver->bus = &rte_vmbus_bus; +} + +/* unregister vmbus driver */ +void +rte_vmbus_unregister(struct rte_vmbus_driver *driver) +{ + TAILQ_REMOVE(&rte_vmbus_bus.driver_list, driver, next); + driver->bus = NULL; +} + +/* Add a device to VMBUS bus */ +void +vmbus_add_device(struct rte_vmbus_device *vmbus_dev) +{ + TAILQ_INSERT_TAIL(&rte_vmbus_bus.device_list, vmbus_dev, next); +} + +/* Insert a device into a predefined position in VMBUS bus */ +void +vmbus_insert_device(struct rte_vmbus_device *exist_vmbus_dev, + struct rte_vmbus_device *new_vmbus_dev) +{ + TAILQ_INSERT_BEFORE(exist_vmbus_dev, new_vmbus_dev, next); +} + +/* Remove a device from VMBUS bus */ +void +vmbus_remove_device(struct rte_vmbus_device *vmbus_dev) +{ + TAILQ_REMOVE(&rte_vmbus_bus.device_list, vmbus_dev, next); +} + +/* VMBUS doesn't support hotplug */ +static struct rte_device * +vmbus_find_device(const struct rte_device *start, rte_dev_cmp_t cmp, + const void *data) +{ + struct rte_vmbus_device *dev; + + FOREACH_DEVICE_ON_VMBUS(dev) { + if (start && &dev->device == start) { + start = NULL; + continue; + } + if (cmp(&dev->device, data) == 0) + return &dev->device; + } + + return NULL; +} + + +struct rte_vmbus_bus rte_vmbus_bus = { + .bus = { + .scan = rte_vmbus_scan, + .probe = rte_vmbus_probe, + .find_device = vmbus_find_device, + .parse = vmbus_parse, + }, + .device_list = TAILQ_HEAD_INITIALIZER(rte_vmbus_bus.device_list), + .driver_list = TAILQ_HEAD_INITIALIZER(rte_vmbus_bus.driver_list), +}; + +RTE_REGISTER_BUS(vmbus, rte_vmbus_bus.bus); + +RTE_INIT(vmbus_init_log) +{ + vmbus_logtype_bus = rte_log_register("bus.vmbus"); + if (vmbus_logtype_bus >= 0) + rte_log_set_level(vmbus_logtype_bus, RTE_LOG_NOTICE); +} diff --git a/drivers/bus/vmbus/vmbus_common_uio.c b/drivers/bus/vmbus/vmbus_common_uio.c new file mode 100644 index 00000000..5ddd36ab --- /dev/null +++ b/drivers/bus/vmbus/vmbus_common_uio.c @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2018, Microsoft Corporation. + * All Rights Reserved. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "private.h" + +static struct rte_tailq_elem vmbus_tailq = { + .name = "VMBUS_RESOURCE_LIST", +}; +EAL_REGISTER_TAILQ(vmbus_tailq) + +static int +vmbus_uio_map_secondary(struct rte_vmbus_device *dev) +{ + int fd, i; + struct mapped_vmbus_resource *uio_res; + struct mapped_vmbus_res_list *uio_res_list + = RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list); + + TAILQ_FOREACH(uio_res, uio_res_list, next) { + + /* skip this element if it doesn't match our UUID */ + if (rte_uuid_compare(uio_res->id, dev->device_id) != 0) + continue; + + /* open /dev/uioX */ + fd = open(uio_res->path, O_RDWR); + if (fd < 0) { + VMBUS_LOG(ERR, "Cannot open %s: %s", + uio_res->path, strerror(errno)); + return -1; + } + + for (i = 0; i != uio_res->nb_maps; i++) { + void *mapaddr; + + mapaddr = vmbus_map_resource(uio_res->maps[i].addr, + fd, 0, + uio_res->maps[i].size, 0); + + if (mapaddr == uio_res->maps[i].addr) + continue; + + VMBUS_LOG(ERR, + "Cannot mmap device resource file %s to address: %p", + uio_res->path, uio_res->maps[i].addr); + + if (mapaddr != MAP_FAILED) + /* unmap addr wrongly mapped */ + vmbus_unmap_resource(mapaddr, + (size_t)uio_res->maps[i].size); + + /* unmap addrs correctly mapped */ + while (--i >= 0) + vmbus_unmap_resource(uio_res->maps[i].addr, + (size_t)uio_res->maps[i].size); + + close(fd); + return -1; + } + + /* fd is not needed in slave process, close it */ + close(fd); + return 0; + } + + VMBUS_LOG(ERR, "Cannot find resource for device"); + return 1; +} + +static int +vmbus_uio_map_primary(struct rte_vmbus_device *dev) +{ + int i, ret; + struct mapped_vmbus_resource *uio_res = NULL; + struct mapped_vmbus_res_list *uio_res_list = + RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list); + + /* allocate uio resource */ + ret = vmbus_uio_alloc_resource(dev, &uio_res); + if (ret) + return ret; + + /* Map the resources */ + for (i = 0; i < VMBUS_MAX_RESOURCE; i++) { + /* skip empty BAR */ + if (dev->resource[i].len == 0) + continue; + + ret = vmbus_uio_map_resource_by_index(dev, i, uio_res, 0); + if (ret) + goto error; + } + + uio_res->nb_maps = i; + + TAILQ_INSERT_TAIL(uio_res_list, uio_res, next); + + return 0; +error: + while (--i >= 0) { + vmbus_unmap_resource(uio_res->maps[i].addr, + (size_t)uio_res->maps[i].size); + } + vmbus_uio_free_resource(dev, uio_res); + return -1; +} + + +struct mapped_vmbus_resource * +vmbus_uio_find_resource(const struct rte_vmbus_device *dev) +{ + struct mapped_vmbus_resource *uio_res; + struct mapped_vmbus_res_list *uio_res_list = + RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list); + + if (dev == NULL) + return NULL; + + TAILQ_FOREACH(uio_res, uio_res_list, next) { + /* skip this element if it doesn't match our VMBUS address */ + if (rte_uuid_compare(uio_res->id, dev->device_id) == 0) + return uio_res; + } + return NULL; +} + +/* map the VMBUS resource of a VMBUS device in virtual memory */ +int +vmbus_uio_map_resource(struct rte_vmbus_device *dev) +{ + struct mapped_vmbus_resource *uio_res; + int ret; + + /* TODO: handle rescind */ + dev->intr_handle.fd = -1; + dev->intr_handle.uio_cfg_fd = -1; + dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; + + /* secondary processes - use already recorded details */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + ret = vmbus_uio_map_secondary(dev); + else + ret = vmbus_uio_map_primary(dev); + + if (ret != 0) + return ret; + + uio_res = vmbus_uio_find_resource(dev); + if (!uio_res) { + VMBUS_LOG(ERR, "can not find resources!"); + return -EIO; + } + + if (uio_res->nb_maps <= HV_MON_PAGE_MAP) { + VMBUS_LOG(ERR, "VMBUS: only %u resources found!", + uio_res->nb_maps); + return -EINVAL; + } + + dev->int_page = (uint32_t *)((char *)uio_res->maps[HV_INT_PAGE_MAP].addr + + (PAGE_SIZE >> 1)); + dev->monitor_page = uio_res->maps[HV_MON_PAGE_MAP].addr; + return 0; +} + +static void +vmbus_uio_unmap(struct mapped_vmbus_resource *uio_res) +{ + int i; + + if (uio_res == NULL) + return; + + for (i = 0; i != uio_res->nb_maps; i++) { + vmbus_unmap_resource(uio_res->maps[i].addr, + (size_t)uio_res->maps[i].size); + } +} + +/* unmap the VMBUS resource of a VMBUS device in virtual memory */ +void +vmbus_uio_unmap_resource(struct rte_vmbus_device *dev) +{ + struct mapped_vmbus_resource *uio_res; + struct mapped_vmbus_res_list *uio_res_list = + RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list); + + if (dev == NULL) + return; + + /* find an entry for the device */ + uio_res = vmbus_uio_find_resource(dev); + if (uio_res == NULL) + return; + + /* secondary processes - just free maps */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return vmbus_uio_unmap(uio_res); + + TAILQ_REMOVE(uio_res_list, uio_res, next); + + /* unmap all resources */ + vmbus_uio_unmap(uio_res); + + /* free uio resource */ + rte_free(uio_res); + + /* close fd if in primary process */ + close(dev->intr_handle.fd); + if (dev->intr_handle.uio_cfg_fd >= 0) { + close(dev->intr_handle.uio_cfg_fd); + dev->intr_handle.uio_cfg_fd = -1; + } + + dev->intr_handle.fd = -1; + dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; +} diff --git a/drivers/common/Makefile b/drivers/common/Makefile new file mode 100644 index 00000000..0fd22376 --- /dev/null +++ b/drivers/common/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cavium, Inc +# + +include $(RTE_SDK)/mk/rte.vars.mk + +ifeq ($(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF)$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL),yy) +DIRS-y += octeontx +endif + +include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/drivers/common/meson.build b/drivers/common/meson.build new file mode 100644 index 00000000..d7b7d8cf --- /dev/null +++ b/drivers/common/meson.build @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cavium, Inc + +std_deps = ['eal'] +drivers = ['octeontx', 'qat'] +config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON' +driver_name_fmt = 'rte_common_@0@' diff --git a/drivers/common/octeontx/Makefile b/drivers/common/octeontx/Makefile new file mode 100644 index 00000000..dfdb9f19 --- /dev/null +++ b/drivers/common/octeontx/Makefile @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cavium, Inc +# + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_common_octeontx.a + +CFLAGS += $(WERROR_FLAGS) +EXPORT_MAP := rte_common_octeontx_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-y += octeontx_mbox.c + +LDLIBS += -lrte_eal + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/common/octeontx/meson.build b/drivers/common/octeontx/meson.build new file mode 100644 index 00000000..203d1ef4 --- /dev/null +++ b/drivers/common/octeontx/meson.build @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cavium, Inc +# + +sources = files('octeontx_mbox.c') diff --git a/drivers/common/octeontx/octeontx_mbox.c b/drivers/common/octeontx/octeontx_mbox.c new file mode 100644 index 00000000..880f8a40 --- /dev/null +++ b/drivers/common/octeontx/octeontx_mbox.c @@ -0,0 +1,249 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include + +#include +#include +#include +#include +#include + +#include "octeontx_mbox.h" + +/* Mbox operation timeout in seconds */ +#define MBOX_WAIT_TIME_SEC 3 +#define MAX_RAM_MBOX_LEN ((SSOW_BAR4_LEN >> 1) - 8 /* Mbox header */) + +/* Mbox channel state */ +enum { + MBOX_CHAN_STATE_REQ = 1, + MBOX_CHAN_STATE_RES = 0, +}; + +/* Response messages */ +enum { + MBOX_RET_SUCCESS, + MBOX_RET_INVALID, + MBOX_RET_INTERNAL_ERR, +}; + +struct mbox { + int init_once; + uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */ + uint8_t *reg; /* Store to this register triggers PF mbox interrupt */ + uint16_t tag_own; /* Last tag which was written to own channel */ + rte_spinlock_t lock; +}; + +static struct mbox octeontx_mbox; + +/* + * Structure used for mbox synchronization + * This structure sits at the begin of Mbox RAM and used as main + * synchronization point for channel communication + */ +struct mbox_ram_hdr { + union { + uint64_t u64; + struct { + uint8_t chan_state : 1; + uint8_t coproc : 7; + uint8_t msg; + uint8_t vfid; + uint8_t res_code; + uint16_t tag; + uint16_t len; + }; + }; +}; + +int octeontx_logtype_mbox; + +RTE_INIT(otx_init_log) +{ + octeontx_logtype_mbox = rte_log_register("pmd.octeontx.mbox"); + if (octeontx_logtype_mbox >= 0) + rte_log_set_level(octeontx_logtype_mbox, RTE_LOG_NOTICE); +} + +static inline void +mbox_msgcpy(volatile uint8_t *d, volatile const uint8_t *s, uint16_t size) +{ + uint16_t i; + + for (i = 0; i < size; i++) + d[i] = s[i]; +} + +static inline void +mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr, + const void *txmsg, uint16_t txsize) +{ + struct mbox_ram_hdr old_hdr; + struct mbox_ram_hdr new_hdr = { {0} }; + uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base; + uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr); + + /* + * Initialize the channel with the tag left by last send. + * On success full mbox send complete, PF increments the tag by one. + * The sender can validate integrity of PF message with this scheme + */ + old_hdr.u64 = rte_read64(ram_mbox_hdr); + m->tag_own = (old_hdr.tag + 2) & (~0x1ul); /* next even number */ + + /* Copy msg body */ + if (txmsg) + mbox_msgcpy(ram_mbox_msg, txmsg, txsize); + + /* Prepare new hdr */ + new_hdr.chan_state = MBOX_CHAN_STATE_REQ; + new_hdr.coproc = hdr->coproc; + new_hdr.msg = hdr->msg; + new_hdr.vfid = hdr->vfid; + new_hdr.tag = m->tag_own; + new_hdr.len = txsize; + + /* Write the msg header */ + rte_write64(new_hdr.u64, ram_mbox_hdr); + rte_smp_wmb(); + /* Notify PF about the new msg - write to MBOX reg generates PF IRQ */ + rte_write64(0, m->reg); +} + +static inline int +mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr, + void *rxmsg, uint16_t rxsize) +{ + int res = 0, wait; + uint16_t len; + struct mbox_ram_hdr rx_hdr; + uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base; + uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr); + + /* Wait for response */ + wait = MBOX_WAIT_TIME_SEC * 1000 * 10; + while (wait > 0) { + rte_delay_us(100); + rx_hdr.u64 = rte_read64(ram_mbox_hdr); + if (rx_hdr.chan_state == MBOX_CHAN_STATE_RES) + break; + --wait; + } + + hdr->res_code = rx_hdr.res_code; + m->tag_own++; + + /* Timeout */ + if (wait <= 0) { + res = -ETIMEDOUT; + goto error; + } + + /* Tag mismatch */ + if (m->tag_own != rx_hdr.tag) { + res = -EINVAL; + goto error; + } + + /* PF nacked the msg */ + if (rx_hdr.res_code != MBOX_RET_SUCCESS) { + res = -EBADMSG; + goto error; + } + + len = RTE_MIN(rx_hdr.len, rxsize); + if (rxmsg) + mbox_msgcpy(rxmsg, ram_mbox_msg, len); + + return len; + +error: + mbox_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)", + m->tag_own, rx_hdr.tag, hdr->coproc, hdr->msg, res, + hdr->res_code); + return res; +} + +static inline int +mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg, + uint16_t txsize, void *rxmsg, uint16_t rxsize) +{ + int res = -EINVAL; + + if (m->init_once == 0 || hdr == NULL || + txsize > MAX_RAM_MBOX_LEN || rxsize > MAX_RAM_MBOX_LEN) { + mbox_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d", + m->init_once, hdr, txsize, rxsize); + return res; + } + + rte_spinlock_lock(&m->lock); + + mbox_send_request(m, hdr, txmsg, txsize); + res = mbox_wait_response(m, hdr, rxmsg, rxsize); + + rte_spinlock_unlock(&m->lock); + return res; +} + +int +octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base) +{ + struct mbox *m = &octeontx_mbox; + + if (m->init_once) + return -EALREADY; + + if (ram_mbox_base == NULL) { + mbox_log_err("Invalid ram_mbox_base=%p", ram_mbox_base); + return -EINVAL; + } + + m->ram_mbox_base = ram_mbox_base; + + if (m->reg != NULL) { + rte_spinlock_init(&m->lock); + m->init_once = 1; + } + + return 0; +} + +int +octeontx_mbox_set_reg(uint8_t *reg) +{ + struct mbox *m = &octeontx_mbox; + + if (m->init_once) + return -EALREADY; + + if (reg == NULL) { + mbox_log_err("Invalid reg=%p", reg); + return -EINVAL; + } + + m->reg = reg; + + if (m->ram_mbox_base != NULL) { + rte_spinlock_init(&m->lock); + m->init_once = 1; + } + + return 0; +} + +int +octeontx_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata, + uint16_t txlen, void *rxdata, uint16_t rxlen) +{ + struct mbox *m = &octeontx_mbox; + + RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EINVAL; + + return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen); +} diff --git a/drivers/common/octeontx/octeontx_mbox.h b/drivers/common/octeontx/octeontx_mbox.h new file mode 100644 index 00000000..43fbda28 --- /dev/null +++ b/drivers/common/octeontx/octeontx_mbox.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef __OCTEONTX_MBOX_H__ +#define __OCTEONTX_MBOX_H__ + +#include +#include + +#define SSOW_BAR4_LEN (64 * 1024) +#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3)) + +#define MBOX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, octeontx_logtype_mbox,\ + "%s() line %u: " fmt "\n", __func__, __LINE__, ## args) + +#define mbox_log_info(fmt, ...) MBOX_LOG(INFO, fmt, ##__VA_ARGS__) +#define mbox_log_dbg(fmt, ...) MBOX_LOG(DEBUG, fmt, ##__VA_ARGS__) +#define mbox_log_err(fmt, ...) MBOX_LOG(ERR, fmt, ##__VA_ARGS__) +#define mbox_func_trace mbox_log_dbg + +extern int octeontx_logtype_mbox; + +struct octeontx_mbox_hdr { + uint16_t vfid; /* VF index or pf resource index local to the domain */ + uint8_t coproc; /* Coprocessor id */ + uint8_t msg; /* Message id */ + uint8_t res_code; /* Functional layer response code */ +}; + +int octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base); +int octeontx_mbox_set_reg(uint8_t *reg); +int octeontx_mbox_send(struct octeontx_mbox_hdr *hdr, + void *txdata, uint16_t txlen, void *rxdata, uint16_t rxlen); + +#endif /* __OCTEONTX_MBOX_H__ */ diff --git a/drivers/common/octeontx/rte_common_octeontx_version.map b/drivers/common/octeontx/rte_common_octeontx_version.map new file mode 100644 index 00000000..f04b3b7f --- /dev/null +++ b/drivers/common/octeontx/rte_common_octeontx_version.map @@ -0,0 +1,7 @@ +DPDK_18.05 { + global: + + octeontx_mbox_set_ram_mbox_base; + octeontx_mbox_set_reg; + octeontx_mbox_send; +}; diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile new file mode 100644 index 00000000..c68a032a --- /dev/null +++ b/drivers/common/qat/Makefile @@ -0,0 +1,66 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2015-2018 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# build directories +QAT_CRYPTO_DIR := $(RTE_SDK)/drivers/crypto/qat +QAT_COMPRESS_DIR := $(RTE_SDK)/drivers/compress/qat +VPATH=$(QAT_CRYPTO_DIR):$(QAT_COMPRESS_DIR) + +# external library include paths +CFLAGS += -I$(SRCDIR)/qat_adf +CFLAGS += -I$(SRCDIR) +CFLAGS += -I$(QAT_CRYPTO_DIR) +CFLAGS += -I$(QAT_COMPRESS_DIR) + + +ifeq ($(CONFIG_RTE_LIBRTE_COMPRESSDEV),y) + CFLAGS += -DALLOW_EXPERIMENTAL_API + LDLIBS += -lrte_compressdev + SRCS-y += qat_comp.c + SRCS-y += qat_comp_pmd.c + build_qat = yes +endif + +# library symmetric crypto source files +ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y) +ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y) + LDLIBS += -lrte_cryptodev + LDLIBS += -lcrypto + CFLAGS += -DBUILD_QAT_SYM + SRCS-y += qat_sym.c + SRCS-y += qat_sym_session.c + SRCS-y += qat_sym_pmd.c + build_qat = yes +endif +endif + +ifdef build_qat + + # library name + LIB = librte_pmd_qat.a + + # library version + LIBABIVER := 1 + # build flags + CFLAGS += $(WERROR_FLAGS) + CFLAGS += -O3 + + # library common source files + SRCS-y += qat_device.c + SRCS-y += qat_common.c + SRCS-y += qat_logs.c + SRCS-y += qat_qp.c + + LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool + LDLIBS += -lrte_pci -lrte_bus_pci + + # export include files + SYMLINK-y-include += + + # versioning export map + EXPORT_MAP := ../../compress/qat/rte_pmd_qat_version.map +endif + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build new file mode 100644 index 00000000..80b6b25a --- /dev/null +++ b/drivers/common/qat/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017-2018 Intel Corporation + +# This does not build a driver, but instead holds common files for +# the crypto and compression drivers. +build = false +qat_deps = ['bus_pci'] +qat_sources = files('qat_common.c', + 'qat_qp.c', + 'qat_device.c', + 'qat_logs.c') +qat_includes = [include_directories('.', 'qat_adf')] +qat_ext_deps = [] +qat_cflags = [] diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/drivers/common/qat/qat_adf/adf_transport_access_macros.h new file mode 100644 index 00000000..1eef5513 --- /dev/null +++ b/drivers/common/qat/qat_adf/adf_transport_access_macros.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2015-2018 Intel Corporation + */ +#ifndef ADF_TRANSPORT_ACCESS_MACROS_H +#define ADF_TRANSPORT_ACCESS_MACROS_H + +#include + +/* CSR write macro */ +#define ADF_CSR_WR(csrAddr, csrOffset, val) \ + rte_write32(val, (((uint8_t *)csrAddr) + csrOffset)) + +/* CSR read macro */ +#define ADF_CSR_RD(csrAddr, csrOffset) \ + rte_read32((((uint8_t *)csrAddr) + csrOffset)) + +#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL +#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL +#define ADF_RING_CSR_RING_CONFIG 0x000 +#define ADF_RING_CSR_RING_LBASE 0x040 +#define ADF_RING_CSR_RING_UBASE 0x080 +#define ADF_RING_CSR_RING_HEAD 0x0C0 +#define ADF_RING_CSR_RING_TAIL 0x100 +#define ADF_RING_CSR_E_STAT 0x14C +#define ADF_RING_CSR_INT_SRCSEL 0x174 +#define ADF_RING_CSR_INT_SRCSEL_2 0x178 +#define ADF_RING_CSR_INT_COL_EN 0x17C +#define ADF_RING_CSR_INT_COL_CTL 0x180 +#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 +#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 +#define ADF_RING_BUNDLE_SIZE 0x1000 +#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A +#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05 +#define ADF_COALESCING_MIN_TIME 0x1FF +#define ADF_COALESCING_MAX_TIME 0xFFFFF +#define ADF_COALESCING_DEF_TIME 0x27FF +#define ADF_RING_NEAR_WATERMARK_512 0x08 +#define ADF_RING_NEAR_WATERMARK_0 0x00 +#define ADF_RING_EMPTY_SIG 0x7F7F7F7F +#define ADF_RING_EMPTY_SIG_BYTE 0x7F + +/* Valid internal ring size values */ +#define ADF_RING_SIZE_128 0x01 +#define ADF_RING_SIZE_256 0x02 +#define ADF_RING_SIZE_512 0x03 +#define ADF_RING_SIZE_4K 0x06 +#define ADF_RING_SIZE_16K 0x08 +#define ADF_RING_SIZE_4M 0x10 +#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128 +#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M +#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K + +/* Maximum number of qps on a device for any service type */ +#define ADF_MAX_QPS_ON_ANY_SERVICE 2 +#define ADF_RING_DIR_TX 0 +#define ADF_RING_DIR_RX 1 + +/* Valid internal msg size values */ +#define ADF_MSG_SIZE_32 0x01 +#define ADF_MSG_SIZE_64 0x02 +#define ADF_MSG_SIZE_128 0x04 +#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32 +#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128 + +/* Size to bytes conversion macros for ring and msg size values */ +#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5) +#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5) +#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7) +#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7) + +/* Minimum ring bufer size for memory allocation */ +#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \ + ADF_RING_SIZE_4K : SIZE) +#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6) +#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \ + SIZE) & ~0x4) +/* Max outstanding requests */ +#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \ + ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1) +#define BUILD_RING_CONFIG(size) \ + ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \ + | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ + | size) +#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \ + ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \ + | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ + | size) +#define BUILD_RING_BASE_ADDR(addr, size) \ + ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size)) +#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_HEAD + (ring << 2)) +#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_TAIL + (ring << 2)) +#define READ_CSR_E_STAT(csr_base_addr, bank) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_E_STAT) +#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_CONFIG + (ring << 2), value) +#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ +do { \ + uint32_t l_base = 0, u_base = 0; \ + l_base = (uint32_t)(value & 0xFFFFFFFF); \ + u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \ +} while (0) +#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_HEAD + (ring << 2), value) +#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_TAIL + (ring << 2), value) +#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ +do { \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \ +} while (0) +#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_COL_EN, value) +#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_COL_CTL, \ + ADF_RING_CSR_INT_COL_CTL_ENABLE | value) +#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_FLAG_AND_COL, value) + +#endif /*ADF_TRANSPORT_ACCESS_MACROS_H */ diff --git a/drivers/common/qat/qat_adf/icp_qat_fw.h b/drivers/common/qat/qat_adf/icp_qat_fw.h new file mode 100644 index 00000000..8f7cb37b --- /dev/null +++ b/drivers/common/qat/qat_adf/icp_qat_fw.h @@ -0,0 +1,318 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2015-2018 Intel Corporation + */ +#ifndef _ICP_QAT_FW_H_ +#define _ICP_QAT_FW_H_ +#include +#include "icp_qat_hw.h" + +#define QAT_FIELD_SET(flags, val, bitpos, mask) \ +{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \ + (((val) & (mask)) << (bitpos))) ; } + +#define QAT_FIELD_GET(flags, bitpos, mask) \ + (((flags) >> (bitpos)) & (mask)) + +#define ICP_QAT_FW_REQ_DEFAULT_SZ 128 +#define ICP_QAT_FW_RESP_DEFAULT_SZ 32 +#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8 +#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF +#define ICP_QAT_FW_NUM_LONGWORDS_1 1 +#define ICP_QAT_FW_NUM_LONGWORDS_2 2 +#define ICP_QAT_FW_NUM_LONGWORDS_3 3 +#define ICP_QAT_FW_NUM_LONGWORDS_4 4 +#define ICP_QAT_FW_NUM_LONGWORDS_5 5 +#define ICP_QAT_FW_NUM_LONGWORDS_6 6 +#define ICP_QAT_FW_NUM_LONGWORDS_7 7 +#define ICP_QAT_FW_NUM_LONGWORDS_10 10 +#define ICP_QAT_FW_NUM_LONGWORDS_13 13 +#define ICP_QAT_FW_NULL_REQ_SERV_ID 1 + +enum icp_qat_fw_comn_resp_serv_id { + ICP_QAT_FW_COMN_RESP_SERV_NULL, + ICP_QAT_FW_COMN_RESP_SERV_CPM_FW, + ICP_QAT_FW_COMN_RESP_SERV_DELIMITER +}; + +enum icp_qat_fw_comn_request_id { + ICP_QAT_FW_COMN_REQ_NULL = 0, + ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3, + ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4, + ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7, + ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9, + ICP_QAT_FW_COMN_REQ_DELIMITER +}; + +struct icp_qat_fw_comn_req_hdr_cd_pars { + union { + struct { + uint64_t content_desc_addr; + uint16_t content_desc_resrvd1; + uint8_t content_desc_params_sz; + uint8_t content_desc_hdr_resrvd2; + uint32_t content_desc_resrvd3; + } s; + struct { + uint32_t serv_specif_fields[4]; + } s1; + } u; +}; + +struct icp_qat_fw_comn_req_mid { + uint64_t opaque_data; + uint64_t src_data_addr; + uint64_t dest_data_addr; + uint32_t src_length; + uint32_t dst_length; +}; + +struct icp_qat_fw_comn_req_cd_ctrl { + uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5]; +}; + +struct icp_qat_fw_comn_req_hdr { + uint8_t resrvd1; + uint8_t service_cmd_id; + uint8_t service_type; + uint8_t hdr_flags; + uint16_t serv_specif_flags; + uint16_t comn_req_flags; +}; + +struct icp_qat_fw_comn_req_rqpars { + uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13]; +}; + +struct icp_qat_fw_comn_req { + struct icp_qat_fw_comn_req_hdr comn_hdr; + struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; + struct icp_qat_fw_comn_req_mid comn_mid; + struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; + struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; +}; + +struct icp_qat_fw_comn_error { + uint8_t xlat_err_code; + uint8_t cmp_err_code; +}; + +struct icp_qat_fw_comn_resp_hdr { + uint8_t resrvd1; + uint8_t service_id; + uint8_t response_type; + uint8_t hdr_flags; + struct icp_qat_fw_comn_error comn_error; + uint8_t comn_status; + uint8_t cmd_id; +}; + +struct icp_qat_fw_comn_resp { + struct icp_qat_fw_comn_resp_hdr comn_hdr; + uint64_t opaque_data; + uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; +}; + +#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1 +#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0 +#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7 +#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1 +#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F +#define ICP_QAT_FW_COMN_CNV_FLAG_BITPOS 6 +#define ICP_QAT_FW_COMN_CNV_FLAG_MASK 0x1 +#define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5 +#define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1 + +#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \ + icp_qat_fw_comn_req_hdr_t.service_type + +#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \ + icp_qat_fw_comn_req_hdr_t.service_type = val + +#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \ + icp_qat_fw_comn_req_hdr_t.service_cmd_id + +#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \ + icp_qat_fw_comn_req_hdr_t.service_cmd_id = val + +#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \ + ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags) + +#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \ + QAT_FIELD_GET(hdr_flags, \ + ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_CNVNR_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \ + QAT_FIELD_GET(hdr_flags, \ + ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_CNV_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \ + ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) + +#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \ + QAT_FIELD_GET(hdr_flags, \ + ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_VALID_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \ + (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK) + +#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \ + QAT_FIELD_SET((hdr_t.hdr_flags), (val), \ + ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_VALID_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \ + (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \ + ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) + +#define QAT_COMN_PTR_TYPE_BITPOS 0 +#define QAT_COMN_PTR_TYPE_MASK 0x1 +#define QAT_COMN_CD_FLD_TYPE_BITPOS 1 +#define QAT_COMN_CD_FLD_TYPE_MASK 0x1 +#define QAT_COMN_PTR_TYPE_FLAT 0x0 +#define QAT_COMN_PTR_TYPE_SGL 0x1 +#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0 +#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1 + +#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \ + ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \ + | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS)) + +#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \ + QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK) + +#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \ + QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \ + QAT_COMN_CD_FLD_TYPE_MASK) + +#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \ + QAT_COMN_PTR_TYPE_MASK) + +#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \ + QAT_COMN_CD_FLD_TYPE_MASK) + +#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4 +#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0 +#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0 +#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F + +#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \ + ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ + >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) + +#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ + { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ + ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK)); } + +#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \ + (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) + +#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \ + { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ + ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); } + +#define ICP_QAT_FW_COMN_NEXT_ID_SET_2(next_curr_id, val) \ + do { \ + (next_curr_id) = \ + (((next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ + (((val) << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) & \ + ICP_QAT_FW_COMN_NEXT_ID_MASK)) \ + } while (0) + +#define ICP_QAT_FW_COMN_CURR_ID_SET_2(next_curr_id, val) \ + do { \ + (next_curr_id) = \ + (((next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ + ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) \ + } while (0) + +#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7 +#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1 +#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6 +#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1 +#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5 +#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1 +#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4 +#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1 +#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3 +#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1 +#define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2 +#define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1 +#define QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS 0 +#define QAT_COMN_RESP_XLT_WA_APPLIED_MASK 0x1 + +#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \ + QAT_COMN_RESP_CRYPTO_STATUS_MASK) + +#define ICP_QAT_FW_COMN_RESP_PKE_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_PKE_STATUS_BITPOS, \ + QAT_COMN_RESP_PKE_STATUS_MASK) + +#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \ + QAT_COMN_RESP_CMP_STATUS_MASK) + +#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \ + QAT_COMN_RESP_XLAT_STATUS_MASK) + +#define ICP_QAT_FW_COMN_RESP_XLT_WA_APPLIED_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS, \ + QAT_COMN_RESP_XLT_WA_APPLIED_MASK) + +#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \ + QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) + +#define ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \ + QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK) + +#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0 +#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1 +#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0 +#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1 +#define ERR_CODE_NO_ERROR 0 +#define ERR_CODE_INVALID_BLOCK_TYPE -1 +#define ERR_CODE_NO_MATCH_ONES_COMP -2 +#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3 +#define ERR_CODE_INCOMPLETE_LEN -4 +#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5 +#define ERR_CODE_RPT_GT_SPEC_LEN -6 +#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7 +#define ERR_CODE_INV_DIS_CODE_LEN -8 +#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9 +#define ERR_CODE_DIS_TOO_FAR_BACK -10 +#define ERR_CODE_OVERFLOW_ERROR -11 +#define ERR_CODE_SOFT_ERROR -12 +#define ERR_CODE_FATAL_ERROR -13 +#define ERR_CODE_COMP_OUTPUT_CORRUPTION -14 +#define ERR_CODE_HW_INCOMPLETE_FILE -15 +#define ERR_CODE_SSM_ERROR -16 +#define ERR_CODE_ENDPOINT_ERROR -17 +#define ERR_CODE_CNV_ERROR -18 +#define ERR_CODE_EMPTY_DYM_BLOCK -19 +#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_HANDLE -20 +#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_HMAC_FAILED -21 +#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_WRAPPING_ALGO -22 +#define ERR_CODE_KPT_DRNG_SEED_NOT_LOAD -23 + +enum icp_qat_fw_slice { + ICP_QAT_FW_SLICE_NULL = 0, + ICP_QAT_FW_SLICE_CIPHER = 1, + ICP_QAT_FW_SLICE_AUTH = 2, + ICP_QAT_FW_SLICE_DRAM_RD = 3, + ICP_QAT_FW_SLICE_DRAM_WR = 4, + ICP_QAT_FW_SLICE_COMP = 5, + ICP_QAT_FW_SLICE_XLAT = 6, + ICP_QAT_FW_SLICE_DELIMITER +}; +#endif diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_comp.h b/drivers/common/qat/qat_adf/icp_qat_fw_comp.h new file mode 100644 index 00000000..81381772 --- /dev/null +++ b/drivers/common/qat/qat_adf/icp_qat_fw_comp.h @@ -0,0 +1,482 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2015-2018 Intel Corporation + */ +#ifndef _ICP_QAT_FW_COMP_H_ +#define _ICP_QAT_FW_COMP_H_ + +#include "icp_qat_fw.h" + +enum icp_qat_fw_comp_cmd_id { + ICP_QAT_FW_COMP_CMD_STATIC = 0, + /*!< Static Compress Request */ + + ICP_QAT_FW_COMP_CMD_DYNAMIC = 1, + /*!< Dynamic Compress Request */ + + ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2, + /*!< Decompress Request */ + + ICP_QAT_FW_COMP_CMD_DELIMITER + /**< Delimiter type */ +}; + +/**< Flag usage */ + +#define ICP_QAT_FW_COMP_STATELESS_SESSION 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing that session is stateless + */ + +#define ICP_QAT_FW_COMP_STATEFUL_SESSION 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing that session is stateful + */ + +#define ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing that autoselectbest is NOT used + */ + +#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing that autoselectbest is used + */ + +#define ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing that enhanced autoselectbest is NOT used + */ + +#define ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing that enhanced autoselectbest is used + */ + +#define ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing that enhanced autoselectbest is NOT used + */ + +#define ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing that enhanced autoselectbest is used + */ + +#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing secure RAM from being used as + * an intermediate buffer is DISABLED. + */ + +#define ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing secure RAM from being used as + * an intermediate buffer is ENABLED. + */ + +/**< Flag mask & bit position */ + +#define ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS 2 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for the session type + */ + +#define ICP_QAT_FW_COMP_SESSION_TYPE_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask used to determine the session type + */ + +#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS 3 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for auto select best + */ + +#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for auto select best + */ + +#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS 4 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for enhanced auto select best + */ + +#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for enhanced auto select best + */ + +#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS 5 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for disabling type zero header write back + * when Enhanced autoselect best is enabled. If set firmware does + * not return type0 store block header, only copies src to dest. + * (if best output is Type0) + */ + +#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for auto select best + */ + +#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for flag used to disable secure ram from + * being used as an intermediate buffer. + */ + +#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for disable secure ram for use as an intermediate + * buffer. + */ + +#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \ + ret_uncomp, secure_ram) \ + ((((sesstype)&ICP_QAT_FW_COMP_SESSION_TYPE_MASK) \ + << ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \ + (((autoselect)&ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) \ + << ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS) | \ + (((enhanced_asb)&ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) \ + << ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS) | \ + (((ret_uncomp)&ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) \ + << ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS) | \ + (((secure_ram)&ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) \ + << ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS)) + +union icp_qat_fw_comp_req_hdr_cd_pars { + /**< LWs 2-5 */ + struct { + uint64_t content_desc_addr; + /**< Address of the content descriptor */ + + uint16_t content_desc_resrvd1; + /**< Content descriptor reserved field */ + + uint8_t content_desc_params_sz; + /**< Size of the content descriptor parameters in quad words. + * These parameters describe the session setup configuration + * info for the slices that this request relies upon i.e. + * the configuration word and cipher key needed by the cipher + * slice if there is a request for cipher processing. + */ + + uint8_t content_desc_hdr_resrvd2; + /**< Content descriptor reserved field */ + + uint32_t content_desc_resrvd3; + /**< Content descriptor reserved field */ + } s; + + struct { + uint32_t comp_slice_cfg_word[ICP_QAT_FW_NUM_LONGWORDS_2]; + /* Compression Slice Config Word */ + + uint32_t content_desc_resrvd4; + /**< Content descriptor reserved field */ + + } sl; + +}; + +struct icp_qat_fw_comp_req_params { + /**< LW 14 */ + uint32_t comp_len; + /**< Size of input to process in bytes Note: Only EOP requests can be + * odd for decompression. IA must set LSB to zero for odd sized + * intermediate inputs + */ + + /**< LW 15 */ + uint32_t out_buffer_sz; + /**< Size of output buffer in bytes */ + + /**< LW 16 */ + uint32_t initial_crc32; + /**< CRC of previously processed bytes */ + + /**< LW 17 */ + uint32_t initial_adler; + /**< Adler of previously processed bytes */ + + /**< LW 18 */ + uint32_t req_par_flags; + + /**< LW 19 */ + uint32_t rsrvd; +}; + +#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr) \ + ((((sop)&ICP_QAT_FW_COMP_SOP_MASK) << ICP_QAT_FW_COMP_SOP_BITPOS) | \ + (((eop)&ICP_QAT_FW_COMP_EOP_MASK) << ICP_QAT_FW_COMP_EOP_BITPOS) | \ + (((bfinal)&ICP_QAT_FW_COMP_BFINAL_MASK) \ + << ICP_QAT_FW_COMP_BFINAL_BITPOS) | \ + ((cnv & ICP_QAT_FW_COMP_CNV_MASK) << ICP_QAT_FW_COMP_CNV_BITPOS) | \ + ((cnvnr & ICP_QAT_FW_COMP_CNV_RECOVERY_MASK) \ + << ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS)) + +#define ICP_QAT_FW_COMP_NOT_SOP 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing that a request is NOT Start of Packet + */ + +#define ICP_QAT_FW_COMP_SOP 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing that a request IS Start of Packet + */ + +#define ICP_QAT_FW_COMP_NOT_EOP 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing that a request is NOT Start of Packet + */ + +#define ICP_QAT_FW_COMP_EOP 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing that a request IS End of Packet + */ + +#define ICP_QAT_FW_COMP_NOT_BFINAL 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing to indicate firmware this is not the last block + */ + +#define ICP_QAT_FW_COMP_BFINAL 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing to indicate firmware this is the last block + */ + +#define ICP_QAT_FW_COMP_NO_CNV 0 +/**< @ingroup icp_qat_fw_comp + * Flag indicating that NO cnv check is to be performed on the request + */ + +#define ICP_QAT_FW_COMP_CNV 1 +/**< @ingroup icp_qat_fw_comp + * Flag indicating that a cnv check IS to be performed on the request + */ + +#define ICP_QAT_FW_COMP_NO_CNV_RECOVERY 0 +/**< @ingroup icp_qat_fw_comp + * Flag indicating that NO cnv recovery is to be performed on the request + */ + +#define ICP_QAT_FW_COMP_CNV_RECOVERY 1 +/**< @ingroup icp_qat_fw_comp + * Flag indicating that a cnv recovery is to be performed on the request + */ + +#define ICP_QAT_FW_COMP_SOP_BITPOS 0 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for SOP + */ + +#define ICP_QAT_FW_COMP_SOP_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask used to determine SOP + */ + +#define ICP_QAT_FW_COMP_EOP_BITPOS 1 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for EOP + */ + +#define ICP_QAT_FW_COMP_EOP_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask used to determine EOP + */ + +#define ICP_QAT_FW_COMP_BFINAL_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for the bfinal bit + */ + +#define ICP_QAT_FW_COMP_BFINAL_BITPOS 6 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for the bfinal bit + */ + +#define ICP_QAT_FW_COMP_CNV_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for the CNV bit + */ + +#define ICP_QAT_FW_COMP_CNV_BITPOS 16 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for the CNV bit + */ + +#define ICP_QAT_FW_COMP_CNV_RECOVERY_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for the CNV Recovery bit + */ + +#define ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS 17 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for the CNV Recovery bit + */ + +struct icp_qat_fw_xlt_req_params { + /**< LWs 20-21 */ + uint64_t inter_buff_ptr; + /**< This field specifies the physical address of an intermediate + * buffer SGL array. The array contains a pair of 64-bit + * intermediate buffer pointers to SGL buffer descriptors, one pair + * per CPM. Please refer to the CPM1.6 Firmware Interface HLD + * specification for more details. + */ +}; + + +struct icp_qat_fw_comp_cd_hdr { + /**< LW 24 */ + uint16_t ram_bank_flags; + /**< Flags to show which ram banks to access */ + + uint8_t comp_cfg_offset; + /**< Quad word offset from the content descriptor parameters address + * to the parameters for the compression processing + */ + + uint8_t next_curr_id; + /**< This field combines the next and current id (each four bits) - + * the next id is the most significant nibble. + * Next Id: Set to the next slice to pass the compressed data through. + * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through + * anymore slices after compression + * Current Id: Initialised with the compression slice type + */ + + /**< LW 25 */ + uint32_t resrvd; + /**< LWs 26-27 */ + + uint64_t comp_state_addr; + /**< Pointer to compression state */ + + /**< LWs 28-29 */ + uint64_t ram_banks_addr; + /**< Pointer to banks */ + +}; + + +struct icp_qat_fw_xlt_cd_hdr { + /**< LW 30 */ + uint16_t resrvd1; + /**< Reserved field and assumed set to 0 */ + + uint8_t resrvd2; + /**< Reserved field and assumed set to 0 */ + + uint8_t next_curr_id; + /**< This field combines the next and current id (each four bits) - + * the next id is the most significant nibble. + * Next Id: Set to the next slice to pass the translated data through. + * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through + * any more slices after compression + * Current Id: Initialised with the translation slice type + */ + + /**< LW 31 */ + uint32_t resrvd3; + /**< Reserved and should be set to zero, needed for quadword + * alignment + */ +}; + +struct icp_qat_fw_comp_req { + /**< LWs 0-1 */ + struct icp_qat_fw_comn_req_hdr comn_hdr; + /**< Common request header - for Service Command Id, + * use service-specific Compression Command Id. + * Service Specific Flags - use Compression Command Flags + */ + + /**< LWs 2-5 */ + union icp_qat_fw_comp_req_hdr_cd_pars cd_pars; + /**< Compression service-specific content descriptor field which points + * either to a content descriptor parameter block or contains the + * compression slice config word. + */ + + /**< LWs 6-13 */ + struct icp_qat_fw_comn_req_mid comn_mid; + /**< Common request middle section */ + + /**< LWs 14-19 */ + struct icp_qat_fw_comp_req_params comp_pars; + /**< Compression request Parameters block */ + + /**< LWs 20-21 */ + union { + struct icp_qat_fw_xlt_req_params xlt_pars; + /**< Translation request Parameters block */ + uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2]; + /**< Reserved if not used for translation */ + + } u1; + + /**< LWs 22-23 */ + union { + uint32_t resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2]; + /**< Reserved - not used if Batch and Pack is disabled.*/ + + uint64_t bnp_res_table_addr; + /**< A generic pointer to the unbounded list of + * icp_qat_fw_resp_comp_pars members. This pointer is only + * used when the Batch and Pack is enabled. + */ + } u3; + + /**< LWs 24-29 */ + struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl; + /**< Compression request content descriptor control block header */ + + /**< LWs 30-31 */ + union { + struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl; + /**< Translation request content descriptor + * control block header + */ + + uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_2]; + /**< Reserved if not used for translation */ + } u2; +}; + +struct icp_qat_fw_resp_comp_pars { + /**< LW 4 */ + uint32_t input_byte_counter; + /**< Input byte counter */ + + /**< LW 5 */ + uint32_t output_byte_counter; + /**< Output byte counter */ + + /**< LW 6 & 7*/ + union { + uint64_t curr_chksum; + struct { + /**< LW 6 */ + uint32_t curr_crc32; + /**< LW 7 */ + uint32_t curr_adler_32; + }; + }; +}; + +struct icp_qat_fw_comp_resp { + /**< LWs 0-1 */ + struct icp_qat_fw_comn_resp_hdr comn_resp; + /**< Common interface response format see icp_qat_fw.h */ + + /**< LWs 2-3 */ + uint64_t opaque_data; + /**< Opaque data passed from the request to the response message */ + + /**< LWs 4-7 */ + struct icp_qat_fw_resp_comp_pars comp_resp_pars; + /**< Common response params (checksums and byte counts) */ +}; + +#endif diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h new file mode 100644 index 00000000..c33bc3fe --- /dev/null +++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h @@ -0,0 +1,361 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2015-2018 Intel Corporation + */ +#ifndef _ICP_QAT_FW_LA_H_ +#define _ICP_QAT_FW_LA_H_ +#include "icp_qat_fw.h" + +enum icp_qat_fw_la_cmd_id { + ICP_QAT_FW_LA_CMD_CIPHER = 0, + ICP_QAT_FW_LA_CMD_AUTH = 1, + ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2, + ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3, + ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4, + ICP_QAT_FW_LA_CMD_TRNG_TEST = 5, + ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6, + ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7, + ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8, + ICP_QAT_FW_LA_CMD_MGF1 = 9, + ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10, + ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11, + ICP_QAT_FW_LA_CMD_DELIMITER = 12 +}; + +#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK +#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR +#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK +#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR + +struct icp_qat_fw_la_bulk_req { + struct icp_qat_fw_comn_req_hdr comn_hdr; + struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; + struct icp_qat_fw_comn_req_mid comn_mid; + struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; + struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; +}; + +#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1 +#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0 +#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12 +#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1 +#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1 +#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11 +#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1 +#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1 +#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0 +#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10 +#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1 +#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4 +#define ICP_QAT_FW_LA_GCM_PROTO 2 +#define ICP_QAT_FW_LA_CCM_PROTO 1 +#define ICP_QAT_FW_LA_NO_PROTO 0 +#define QAT_LA_PROTO_BITPOS 7 +#define QAT_LA_PROTO_MASK 0x7 +#define ICP_QAT_FW_LA_CMP_AUTH_RES 1 +#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0 +#define QAT_LA_CMP_AUTH_RES_BITPOS 6 +#define QAT_LA_CMP_AUTH_RES_MASK 0x1 +#define ICP_QAT_FW_LA_RET_AUTH_RES 1 +#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0 +#define QAT_LA_RET_AUTH_RES_BITPOS 5 +#define QAT_LA_RET_AUTH_RES_MASK 0x1 +#define ICP_QAT_FW_LA_UPDATE_STATE 1 +#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0 +#define QAT_LA_UPDATE_STATE_BITPOS 4 +#define QAT_LA_UPDATE_STATE_MASK 0x1 +#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0 +#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1 +#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3 +#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1 +#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0 +#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1 +#define QAT_LA_CIPH_IV_FLD_BITPOS 2 +#define QAT_LA_CIPH_IV_FLD_MASK 0x1 +#define ICP_QAT_FW_LA_PARTIAL_NONE 0 +#define ICP_QAT_FW_LA_PARTIAL_START 1 +#define ICP_QAT_FW_LA_PARTIAL_MID 3 +#define ICP_QAT_FW_LA_PARTIAL_END 2 +#define QAT_LA_PARTIAL_BITPOS 0 +#define QAT_LA_PARTIAL_MASK 0x3 +#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \ + cmp_auth, ret_auth, update_state, \ + ciph_iv, ciphcfg, partial) \ + (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \ + QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \ + ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \ + QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \ + ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \ + QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \ + ((proto & QAT_LA_PROTO_MASK) << \ + QAT_LA_PROTO_BITPOS) | \ + ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \ + QAT_LA_CMP_AUTH_RES_BITPOS) | \ + ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \ + QAT_LA_RET_AUTH_RES_BITPOS) | \ + ((update_state & QAT_LA_UPDATE_STATE_MASK) << \ + QAT_LA_UPDATE_STATE_BITPOS) | \ + ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \ + QAT_LA_CIPH_IV_FLD_BITPOS) | \ + ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \ + QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \ + ((partial & QAT_LA_PARTIAL_MASK) << \ + QAT_LA_PARTIAL_BITPOS)) + +#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \ + QAT_LA_CIPH_IV_FLD_MASK) + +#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ + QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) + +#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ + QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) + +#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ + QAT_LA_GCM_IV_LEN_FLAG_MASK) + +#define ICP_QAT_FW_LA_PROTO_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK) + +#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \ + QAT_LA_CMP_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \ + QAT_LA_RET_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ + QAT_LA_DIGEST_IN_BUFFER_MASK) + +#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \ + QAT_LA_UPDATE_STATE_MASK) + +#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \ + QAT_LA_PARTIAL_MASK) + +#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \ + QAT_LA_CIPH_IV_FLD_MASK) + +#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ + QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) + +#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ + QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) + +#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ + QAT_LA_GCM_IV_LEN_FLAG_MASK) + +#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \ + QAT_LA_PROTO_MASK) + +#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \ + QAT_LA_CMP_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \ + QAT_LA_RET_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ + QAT_LA_DIGEST_IN_BUFFER_MASK) + +#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \ + QAT_LA_UPDATE_STATE_MASK) + +#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \ + QAT_LA_PARTIAL_MASK) + +struct icp_qat_fw_cipher_req_hdr_cd_pars { + union { + struct { + uint64_t content_desc_addr; + uint16_t content_desc_resrvd1; + uint8_t content_desc_params_sz; + uint8_t content_desc_hdr_resrvd2; + uint32_t content_desc_resrvd3; + } s; + struct { + uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + } s1; + } u; +}; + +struct icp_qat_fw_cipher_auth_req_hdr_cd_pars { + union { + struct { + uint64_t content_desc_addr; + uint16_t content_desc_resrvd1; + uint8_t content_desc_params_sz; + uint8_t content_desc_hdr_resrvd2; + uint32_t content_desc_resrvd3; + } s; + struct { + uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + } sl; + } u; +}; + +struct icp_qat_fw_cipher_cd_ctrl_hdr { + uint8_t cipher_state_sz; + uint8_t cipher_key_sz; + uint8_t cipher_cfg_offset; + uint8_t next_curr_id; + uint8_t cipher_padding_sz; + uint8_t resrvd1; + uint16_t resrvd2; + uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3]; +}; + +struct icp_qat_fw_auth_cd_ctrl_hdr { + uint32_t resrvd1; + uint8_t resrvd2; + uint8_t hash_flags; + uint8_t hash_cfg_offset; + uint8_t next_curr_id; + uint8_t resrvd3; + uint8_t outer_prefix_sz; + uint8_t final_sz; + uint8_t inner_res_sz; + uint8_t resrvd4; + uint8_t inner_state1_sz; + uint8_t inner_state2_offset; + uint8_t inner_state2_sz; + uint8_t outer_config_offset; + uint8_t outer_state1_sz; + uint8_t outer_res_sz; + uint8_t outer_prefix_offset; +}; + +struct icp_qat_fw_cipher_auth_cd_ctrl_hdr { + uint8_t cipher_state_sz; + uint8_t cipher_key_sz; + uint8_t cipher_cfg_offset; + uint8_t next_curr_id_cipher; + uint8_t cipher_padding_sz; + uint8_t hash_flags; + uint8_t hash_cfg_offset; + uint8_t next_curr_id_auth; + uint8_t resrvd1; + uint8_t outer_prefix_sz; + uint8_t final_sz; + uint8_t inner_res_sz; + uint8_t resrvd2; + uint8_t inner_state1_sz; + uint8_t inner_state2_offset; + uint8_t inner_state2_sz; + uint8_t outer_config_offset; + uint8_t outer_state1_sz; + uint8_t outer_res_sz; + uint8_t outer_prefix_offset; +}; + +#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1 +#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0 +#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240 +#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \ + (sizeof(struct icp_qat_fw_la_cipher_req_params_t)) +#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0) + +struct icp_qat_fw_la_cipher_req_params { + uint32_t cipher_offset; + uint32_t cipher_length; + union { + uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + struct { + uint64_t cipher_IV_ptr; + uint64_t resrvd1; + } s; + } u; +}; + +struct icp_qat_fw_la_auth_req_params { + uint32_t auth_off; + uint32_t auth_len; + union { + uint64_t auth_partial_st_prefix; + uint64_t aad_adr; + } u1; + uint64_t auth_res_addr; + union { + uint8_t inner_prefix_sz; + uint8_t aad_sz; + } u2; + uint8_t resrvd1; + uint8_t hash_state_sz; + uint8_t auth_res_sz; +} __rte_packed; + +struct icp_qat_fw_la_auth_req_params_resrvd_flds { + uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6]; + union { + uint8_t inner_prefix_sz; + uint8_t aad_sz; + } u2; + uint8_t resrvd1; + uint16_t resrvd2; +}; + +struct icp_qat_fw_la_resp { + struct icp_qat_fw_comn_resp_hdr comn_resp; + uint64_t opaque_data; + uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; +}; + +#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \ + ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \ + ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) + +#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \ + ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ + ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } + +#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \ + (((cd_ctrl_hdr_t)->next_curr_id_cipher) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) + +#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \ + ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ + ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } + +#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \ + ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ + >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) + +#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_auth = \ + ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ + ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } + +#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \ + (((cd_ctrl_hdr_t)->next_curr_id_auth) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) + +#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_auth = \ + ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ + ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } + +#endif diff --git a/drivers/common/qat/qat_adf/icp_qat_hw.h b/drivers/common/qat/qat_adf/icp_qat_hw.h new file mode 100644 index 00000000..e7961dba --- /dev/null +++ b/drivers/common/qat/qat_adf/icp_qat_hw.h @@ -0,0 +1,386 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2015-2018 Intel Corporation + */ +#ifndef _ICP_QAT_HW_H_ +#define _ICP_QAT_HW_H_ + +enum icp_qat_hw_ae_id { + ICP_QAT_HW_AE_0 = 0, + ICP_QAT_HW_AE_1 = 1, + ICP_QAT_HW_AE_2 = 2, + ICP_QAT_HW_AE_3 = 3, + ICP_QAT_HW_AE_4 = 4, + ICP_QAT_HW_AE_5 = 5, + ICP_QAT_HW_AE_6 = 6, + ICP_QAT_HW_AE_7 = 7, + ICP_QAT_HW_AE_8 = 8, + ICP_QAT_HW_AE_9 = 9, + ICP_QAT_HW_AE_10 = 10, + ICP_QAT_HW_AE_11 = 11, + ICP_QAT_HW_AE_DELIMITER = 12 +}; + +enum icp_qat_hw_qat_id { + ICP_QAT_HW_QAT_0 = 0, + ICP_QAT_HW_QAT_1 = 1, + ICP_QAT_HW_QAT_2 = 2, + ICP_QAT_HW_QAT_3 = 3, + ICP_QAT_HW_QAT_4 = 4, + ICP_QAT_HW_QAT_5 = 5, + ICP_QAT_HW_QAT_DELIMITER = 6 +}; + +enum icp_qat_hw_auth_algo { + ICP_QAT_HW_AUTH_ALGO_NULL = 0, + ICP_QAT_HW_AUTH_ALGO_SHA1 = 1, + ICP_QAT_HW_AUTH_ALGO_MD5 = 2, + ICP_QAT_HW_AUTH_ALGO_SHA224 = 3, + ICP_QAT_HW_AUTH_ALGO_SHA256 = 4, + ICP_QAT_HW_AUTH_ALGO_SHA384 = 5, + ICP_QAT_HW_AUTH_ALGO_SHA512 = 6, + ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7, + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8, + ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9, + ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10, + ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11, + ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12, + ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13, + ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14, + ICP_QAT_HW_AUTH_RESERVED_1 = 15, + ICP_QAT_HW_AUTH_RESERVED_2 = 16, + ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17, + ICP_QAT_HW_AUTH_RESERVED_3 = 18, + ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19, + ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20 +}; + +enum icp_qat_hw_auth_mode { + ICP_QAT_HW_AUTH_MODE0 = 0, + ICP_QAT_HW_AUTH_MODE1 = 1, + ICP_QAT_HW_AUTH_MODE2 = 2, + ICP_QAT_HW_AUTH_MODE_DELIMITER = 3 +}; + +struct icp_qat_hw_auth_config { + uint32_t config; + uint32_t reserved; +}; + +#define QAT_AUTH_MODE_BITPOS 4 +#define QAT_AUTH_MODE_MASK 0xF +#define QAT_AUTH_ALGO_BITPOS 0 +#define QAT_AUTH_ALGO_MASK 0xF +#define QAT_AUTH_CMP_BITPOS 8 +#define QAT_AUTH_CMP_MASK 0x7F +#define QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS 16 +#define QAT_AUTH_SHA3_PADDING_DISABLE_MASK 0x1 +#define QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS 17 +#define QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK 0x1 +#define QAT_AUTH_ALGO_SHA3_BITPOS 22 +#define QAT_AUTH_ALGO_SHA3_MASK 0x3 +#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS 16 +#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK 0xF +#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS 24 +#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK 0xFF +#define QAT_AUTH_SHA3_HW_PADDING_ENABLE 0 +#define QAT_AUTH_SHA3_HW_PADDING_DISABLE 1 +#define QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT 0 +#define QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT 0 +#define QAT_AUTH_SHA3_PADDING_OVERRIDE_PROGRAMMABLE 1 +#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED 0 +#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED 0 + +#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \ + ((((mode) & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \ + (((algo) & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \ + (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) \ + << QAT_AUTH_ALGO_SHA3_BITPOS) | \ + (((QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT) & \ + QAT_AUTH_SHA3_PADDING_DISABLE_MASK) \ + << QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS) | \ + (((QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT) & \ + QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK) \ + << QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS) | \ + (((cmp_len) & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS)) + +#define ICP_QAT_HW_AUTH_CONFIG_BUILD_UPPER \ + ((((QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED) & \ + QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK) \ + << QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS) | \ + (((QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED) & \ + QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK) \ + << QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS)) + +struct icp_qat_hw_auth_counter { + uint32_t counter; + uint32_t reserved; +}; + +#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF +#define QAT_AUTH_COUNT_BITPOS 0 +#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \ + (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS) + +struct icp_qat_hw_auth_setup { + struct icp_qat_hw_auth_config auth_config; + struct icp_qat_hw_auth_counter auth_counter; +}; + +#define QAT_HW_DEFAULT_ALIGNMENT 8 +#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1))) +#define ICP_QAT_HW_NULL_STATE1_SZ 32 +#define ICP_QAT_HW_MD5_STATE1_SZ 16 +#define ICP_QAT_HW_SHA1_STATE1_SZ 20 +#define ICP_QAT_HW_SHA224_STATE1_SZ 32 +#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28 +#define ICP_QAT_HW_SHA256_STATE1_SZ 32 +#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32 +#define ICP_QAT_HW_SHA384_STATE1_SZ 64 +#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48 +#define ICP_QAT_HW_SHA512_STATE1_SZ 64 +#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64 +#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16 +#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16 +#define ICP_QAT_HW_AES_F9_STATE1_SZ 32 +#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16 +#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16 +#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8 +#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8 + +#define ICP_QAT_HW_NULL_STATE2_SZ 32 +#define ICP_QAT_HW_MD5_STATE2_SZ 16 +#define ICP_QAT_HW_SHA1_STATE2_SZ 20 +#define ICP_QAT_HW_SHA224_STATE2_SZ 32 +#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0 +#define ICP_QAT_HW_SHA256_STATE2_SZ 32 +#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0 +#define ICP_QAT_HW_SHA384_STATE2_SZ 64 +#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0 +#define ICP_QAT_HW_SHA512_STATE2_SZ 64 +#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0 +#define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48 +#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16 +#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16 +#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16 +#define ICP_QAT_HW_F9_IK_SZ 16 +#define ICP_QAT_HW_F9_FK_SZ 16 +#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \ + ICP_QAT_HW_F9_FK_SZ) +#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ +#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24 +#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32 +#define ICP_QAT_HW_GALOIS_H_SZ 16 +#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8 +#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16 + +struct icp_qat_hw_auth_sha512 { + struct icp_qat_hw_auth_setup inner_setup; + uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ]; + struct icp_qat_hw_auth_setup outer_setup; + uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ]; +}; + +struct icp_qat_hw_auth_sha3_512 { + struct icp_qat_hw_auth_setup inner_setup; + uint8_t state1[ICP_QAT_HW_SHA3_512_STATE1_SZ]; + struct icp_qat_hw_auth_setup outer_setup; +}; + +struct icp_qat_hw_auth_algo_blk { + struct icp_qat_hw_auth_sha512 sha; +}; + +#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0 +#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF + +enum icp_qat_hw_cipher_algo { + ICP_QAT_HW_CIPHER_ALGO_NULL = 0, + ICP_QAT_HW_CIPHER_ALGO_DES = 1, + ICP_QAT_HW_CIPHER_ALGO_3DES = 2, + ICP_QAT_HW_CIPHER_ALGO_AES128 = 3, + ICP_QAT_HW_CIPHER_ALGO_AES192 = 4, + ICP_QAT_HW_CIPHER_ALGO_AES256 = 5, + ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6, + ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7, + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8, + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9, + ICP_QAT_HW_CIPHER_DELIMITER = 10 +}; + +enum icp_qat_hw_cipher_mode { + ICP_QAT_HW_CIPHER_ECB_MODE = 0, + ICP_QAT_HW_CIPHER_CBC_MODE = 1, + ICP_QAT_HW_CIPHER_CTR_MODE = 2, + ICP_QAT_HW_CIPHER_F8_MODE = 3, + ICP_QAT_HW_CIPHER_XTS_MODE = 6, + ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7 +}; + +struct icp_qat_hw_cipher_config { + uint32_t val; + uint32_t reserved; +}; + +enum icp_qat_hw_cipher_dir { + ICP_QAT_HW_CIPHER_ENCRYPT = 0, + ICP_QAT_HW_CIPHER_DECRYPT = 1, +}; + +enum icp_qat_hw_auth_op { + ICP_QAT_HW_AUTH_VERIFY = 0, + ICP_QAT_HW_AUTH_GENERATE = 1, +}; + +enum icp_qat_hw_cipher_convert { + ICP_QAT_HW_CIPHER_NO_CONVERT = 0, + ICP_QAT_HW_CIPHER_KEY_CONVERT = 1, +}; + +#define QAT_CIPHER_MODE_BITPOS 4 +#define QAT_CIPHER_MODE_MASK 0xF +#define QAT_CIPHER_ALGO_BITPOS 0 +#define QAT_CIPHER_ALGO_MASK 0xF +#define QAT_CIPHER_CONVERT_BITPOS 9 +#define QAT_CIPHER_CONVERT_MASK 0x1 +#define QAT_CIPHER_DIR_BITPOS 8 +#define QAT_CIPHER_DIR_MASK 0x1 +#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2 +#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2 +#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \ + (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \ + ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \ + ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \ + ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS)) +#define ICP_QAT_HW_DES_BLK_SZ 8 +#define ICP_QAT_HW_3DES_BLK_SZ 8 +#define ICP_QAT_HW_NULL_BLK_SZ 8 +#define ICP_QAT_HW_AES_BLK_SZ 16 +#define ICP_QAT_HW_KASUMI_BLK_SZ 8 +#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8 +#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8 +#define ICP_QAT_HW_NULL_KEY_SZ 256 +#define ICP_QAT_HW_DES_KEY_SZ 8 +#define ICP_QAT_HW_3DES_KEY_SZ 24 +#define ICP_QAT_HW_AES_128_KEY_SZ 16 +#define ICP_QAT_HW_AES_192_KEY_SZ 24 +#define ICP_QAT_HW_AES_256_KEY_SZ 32 +#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_KASUMI_KEY_SZ 16 +#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_ARC4_KEY_SZ 256 +#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16 +#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16 +#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16 +#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16 +#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2 + +#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ + +/* These defines describe position of the bit-fields + * in the flags byte in B0 + */ +#define ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT 6 +#define ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT 3 + +#define ICP_QAT_HW_CCM_BUILD_B0_FLAGS(Adata, t, q) \ + ((((Adata) > 0 ? 1 : 0) << ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT) \ + | ((((t) - 2) >> 1) << ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT) \ + | ((q) - 1)) + +#define ICP_QAT_HW_CCM_NQ_CONST 15 +#define ICP_QAT_HW_CCM_AAD_B0_LEN 16 +#define ICP_QAT_HW_CCM_AAD_LEN_INFO 2 +#define ICP_QAT_HW_CCM_AAD_DATA_OFFSET (ICP_QAT_HW_CCM_AAD_B0_LEN + \ + ICP_QAT_HW_CCM_AAD_LEN_INFO) +#define ICP_QAT_HW_CCM_AAD_ALIGNMENT 16 +#define ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE 4 +#define ICP_QAT_HW_CCM_NONCE_OFFSET 1 + +struct icp_qat_hw_cipher_algo_blk { + struct icp_qat_hw_cipher_config cipher_config; + uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ]; +} __rte_cache_aligned; + +/* ========================================================================= */ +/* COMPRESSION SLICE */ +/* ========================================================================= */ + +enum icp_qat_hw_compression_direction { + ICP_QAT_HW_COMPRESSION_DIR_COMPRESS = 0, + ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS = 1, + ICP_QAT_HW_COMPRESSION_DIR_DELIMITER = 2 +}; + +enum icp_qat_hw_compression_delayed_match { + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED = 0, + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED = 1, + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DELIMITER = 2 +}; + +enum icp_qat_hw_compression_algo { + ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0, + ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1, + ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2 +}; + + +enum icp_qat_hw_compression_depth { + ICP_QAT_HW_COMPRESSION_DEPTH_1 = 0, + ICP_QAT_HW_COMPRESSION_DEPTH_4 = 1, + ICP_QAT_HW_COMPRESSION_DEPTH_8 = 2, + ICP_QAT_HW_COMPRESSION_DEPTH_16 = 3, + ICP_QAT_HW_COMPRESSION_DEPTH_DELIMITER = 4 +}; + +enum icp_qat_hw_compression_file_type { + ICP_QAT_HW_COMPRESSION_FILE_TYPE_0 = 0, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_1 = 1, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_2 = 2, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_3 = 3, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_4 = 4, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_DELIMITER = 5 +}; + +struct icp_qat_hw_compression_config { + uint32_t val; + uint32_t reserved; +}; + +#define QAT_COMPRESSION_DIR_BITPOS 4 +#define QAT_COMPRESSION_DIR_MASK 0x7 +#define QAT_COMPRESSION_DELAYED_MATCH_BITPOS 16 +#define QAT_COMPRESSION_DELAYED_MATCH_MASK 0x1 +#define QAT_COMPRESSION_ALGO_BITPOS 31 +#define QAT_COMPRESSION_ALGO_MASK 0x1 +#define QAT_COMPRESSION_DEPTH_BITPOS 28 +#define QAT_COMPRESSION_DEPTH_MASK 0x7 +#define QAT_COMPRESSION_FILE_TYPE_BITPOS 24 +#define QAT_COMPRESSION_FILE_TYPE_MASK 0xF + +#define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD( \ + dir, delayed, algo, depth, filetype) \ + ((((dir) & QAT_COMPRESSION_DIR_MASK) << QAT_COMPRESSION_DIR_BITPOS) | \ + (((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK) \ + << QAT_COMPRESSION_DELAYED_MATCH_BITPOS) | \ + (((algo) & QAT_COMPRESSION_ALGO_MASK) \ + << QAT_COMPRESSION_ALGO_BITPOS) | \ + (((depth) & QAT_COMPRESSION_DEPTH_MASK) \ + << QAT_COMPRESSION_DEPTH_BITPOS) | \ + (((filetype) & QAT_COMPRESSION_FILE_TYPE_MASK) \ + << QAT_COMPRESSION_FILE_TYPE_BITPOS)) + +#endif diff --git a/drivers/common/qat/qat_common.c b/drivers/common/qat/qat_common.c new file mode 100644 index 00000000..47538669 --- /dev/null +++ b/drivers/common/qat/qat_common.c @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include "qat_common.h" +#include "qat_device.h" +#include "qat_logs.h" + +int +qat_sgl_fill_array(struct rte_mbuf *buf, int64_t offset, + void *list_in, uint32_t data_len, + const uint16_t max_segs) +{ + int res = -EINVAL; + uint32_t buf_len, nr; + struct qat_sgl *list = (struct qat_sgl *)list_in; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + uint8_t *virt_addr[max_segs]; +#endif + + for (nr = buf_len = 0; buf && nr < max_segs; buf = buf->next) { + if (offset >= rte_pktmbuf_data_len(buf)) { + offset -= rte_pktmbuf_data_len(buf); + continue; + } + + list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset; + list->buffers[nr].resrvd = 0; + list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + virt_addr[nr] = rte_pktmbuf_mtod_offset(buf, uint8_t*, offset); +#endif + offset = 0; + buf_len += list->buffers[nr].len; + + if (buf_len >= data_len) { + list->buffers[nr].len -= buf_len - data_len; + res = 0; + break; + } + ++nr; + } + + if (unlikely(res != 0)) { + if (nr == max_segs) { + QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)", + max_segs); + } else { + QAT_DP_LOG(ERR, "Mbuf chain is too short"); + } + } else { + + list->num_bufs = ++nr; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs); + for (nr = 0; nr < list->num_bufs; nr++) { + QAT_DP_LOG(INFO, + "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64, + nr, list->buffers[nr].len, + list->buffers[nr].addr); + QAT_DP_HEXDUMP_LOG(DEBUG, "qat SGL", + virt_addr[nr], + list->buffers[nr].len); + } +#endif + } + + return res; +} + +void qat_stats_get(struct qat_pci_device *dev, + struct qat_common_stats *stats, + enum qat_service_type service) +{ + int i; + struct qat_qp **qp; + + if (stats == NULL || dev == NULL || service >= QAT_SERVICE_INVALID) { + QAT_LOG(ERR, "invalid param: stats %p, dev %p, service %d", + stats, dev, service); + return; + } + + qp = dev->qps_in_use[service]; + for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) { + if (qp[i] == NULL) { + QAT_LOG(DEBUG, "Service %d Uninitialised qp %d", + service, i); + continue; + } + + stats->enqueued_count += qp[i]->stats.enqueued_count; + stats->dequeued_count += qp[i]->stats.dequeued_count; + stats->enqueue_err_count += qp[i]->stats.enqueue_err_count; + stats->dequeue_err_count += qp[i]->stats.dequeue_err_count; + } +} + +void qat_stats_reset(struct qat_pci_device *dev, + enum qat_service_type service) +{ + int i; + struct qat_qp **qp; + + if (dev == NULL || service >= QAT_SERVICE_INVALID) { + QAT_LOG(ERR, "invalid param: dev %p, service %d", + dev, service); + return; + } + + qp = dev->qps_in_use[service]; + for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) { + if (qp[i] == NULL) { + QAT_LOG(DEBUG, "Service %d Uninitialised qp %d", + service, i); + continue; + } + memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats)); + } + + QAT_LOG(DEBUG, "QAT: %d stats cleared", service); +} diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h new file mode 100644 index 00000000..d4bef539 --- /dev/null +++ b/drivers/common/qat/qat_common.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ +#ifndef _QAT_COMMON_H_ +#define _QAT_COMMON_H_ + +#include + +#include + +/**< Intel(R) QAT device name for PCI registration */ +#define QAT_PCI_NAME qat +#define QAT_64_BTYE_ALIGN_MASK (~0x3f) + +/* Intel(R) QuickAssist Technology device generation is enumerated + * from one according to the generation of the device + */ +enum qat_device_gen { + QAT_GEN1 = 1, + QAT_GEN2 +}; + +enum qat_service_type { + QAT_SERVICE_ASYMMETRIC = 0, + QAT_SERVICE_SYMMETRIC, + QAT_SERVICE_COMPRESSION, + QAT_SERVICE_INVALID +}; + +#define QAT_MAX_SERVICES (QAT_SERVICE_INVALID) + +/**< Common struct for scatter-gather list operations */ +struct qat_flat_buf { + uint32_t len; + uint32_t resrvd; + uint64_t addr; +} __rte_packed; + +#define qat_sgl_hdr struct { \ + uint64_t resrvd; \ + uint32_t num_bufs; \ + uint32_t num_mapped_bufs; \ +} + +__extension__ +struct qat_sgl { + qat_sgl_hdr; + /* flexible array of flat buffers*/ + struct qat_flat_buf buffers[0]; +} __rte_packed __rte_cache_aligned; + +/** Common, i.e. not service-specific, statistics */ +struct qat_common_stats { + uint64_t enqueued_count; + /**< Count of all operations enqueued */ + uint64_t dequeued_count; + /**< Count of all operations dequeued */ + + uint64_t enqueue_err_count; + /**< Total error count on operations enqueued */ + uint64_t dequeue_err_count; + /**< Total error count on operations dequeued */ +}; + +struct qat_pci_device; + +int +qat_sgl_fill_array(struct rte_mbuf *buf, int64_t offset, + void *list_in, uint32_t data_len, + const uint16_t max_segs); +void +qat_stats_get(struct qat_pci_device *dev, + struct qat_common_stats *stats, + enum qat_service_type service); +void +qat_stats_reset(struct qat_pci_device *dev, + enum qat_service_type service); + +#endif /* _QAT_COMMON_H_ */ diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c new file mode 100644 index 00000000..f32d7235 --- /dev/null +++ b/drivers/common/qat/qat_device.c @@ -0,0 +1,279 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include + +#include "qat_device.h" +#include "adf_transport_access_macros.h" +#include "qat_sym_pmd.h" + +/* Hardware device information per generation */ +__extension__ +struct qat_gen_hw_data qat_gen_config[] = { + [QAT_GEN1] = { + .dev_gen = QAT_GEN1, + .qp_hw_data = qat_gen1_qps, + }, + [QAT_GEN2] = { + .dev_gen = QAT_GEN2, + .qp_hw_data = qat_gen1_qps, + /* gen2 has same ring layout as gen1 */ + }, +}; + + +static struct qat_pci_device qat_pci_devices[RTE_PMD_QAT_MAX_PCI_DEVICES]; +static int qat_nb_pci_devices; + +/* + * The set of PCI devices this driver supports + */ + +static const struct rte_pci_id pci_id_qat_map[] = { + { + RTE_PCI_DEVICE(0x8086, 0x0443), + }, + { + RTE_PCI_DEVICE(0x8086, 0x37c9), + }, + { + RTE_PCI_DEVICE(0x8086, 0x19e3), + }, + { + RTE_PCI_DEVICE(0x8086, 0x6f55), + }, + {.device_id = 0}, +}; + + +static struct qat_pci_device * +qat_pci_get_dev(uint8_t dev_id) +{ + return &qat_pci_devices[dev_id]; +} + +static struct qat_pci_device * +qat_pci_get_named_dev(const char *name) +{ + struct qat_pci_device *dev; + unsigned int i; + + if (name == NULL) + return NULL; + + for (i = 0; i < RTE_PMD_QAT_MAX_PCI_DEVICES; i++) { + dev = &qat_pci_devices[i]; + + if ((dev->attached == QAT_ATTACHED) && + (strcmp(dev->name, name) == 0)) + return dev; + } + + return NULL; +} + +static uint8_t +qat_pci_find_free_device_index(void) +{ + uint8_t dev_id; + + for (dev_id = 0; dev_id < RTE_PMD_QAT_MAX_PCI_DEVICES; dev_id++) { + if (qat_pci_devices[dev_id].attached == QAT_DETACHED) + break; + } + return dev_id; +} + +struct qat_pci_device * +qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev) +{ + char name[QAT_DEV_NAME_MAX_LEN]; + + rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); + + return qat_pci_get_named_dev(name); +} + +struct qat_pci_device * +qat_pci_device_allocate(struct rte_pci_device *pci_dev) +{ + struct qat_pci_device *qat_dev; + uint8_t qat_dev_id; + char name[QAT_DEV_NAME_MAX_LEN]; + + rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); + snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat"); + if (qat_pci_get_named_dev(name) != NULL) { + QAT_LOG(ERR, "QAT device with name %s already allocated!", + name); + return NULL; + } + + qat_dev_id = qat_pci_find_free_device_index(); + if (qat_dev_id == RTE_PMD_QAT_MAX_PCI_DEVICES) { + QAT_LOG(ERR, "Reached maximum number of QAT devices"); + return NULL; + } + + qat_dev = qat_pci_get_dev(qat_dev_id); + memset(qat_dev, 0, sizeof(*qat_dev)); + strlcpy(qat_dev->name, name, QAT_DEV_NAME_MAX_LEN); + qat_dev->qat_dev_id = qat_dev_id; + qat_dev->pci_dev = pci_dev; + switch (qat_dev->pci_dev->id.device_id) { + case 0x0443: + qat_dev->qat_dev_gen = QAT_GEN1; + break; + case 0x37c9: + case 0x19e3: + case 0x6f55: + qat_dev->qat_dev_gen = QAT_GEN2; + break; + default: + QAT_LOG(ERR, "Invalid dev_id, can't determine generation"); + return NULL; + } + + rte_spinlock_init(&qat_dev->arb_csr_lock); + + qat_dev->attached = QAT_ATTACHED; + + qat_nb_pci_devices++; + + QAT_LOG(DEBUG, "QAT device %d allocated, name %s, total QATs %d", + qat_dev->qat_dev_id, qat_dev->name, qat_nb_pci_devices); + + return qat_dev; +} + +int +qat_pci_device_release(struct rte_pci_device *pci_dev) +{ + struct qat_pci_device *qat_dev; + char name[QAT_DEV_NAME_MAX_LEN]; + + if (pci_dev == NULL) + return -EINVAL; + + rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); + snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat"); + qat_dev = qat_pci_get_named_dev(name); + if (qat_dev != NULL) { + + /* Check that there are no service devs still on pci device */ + if (qat_dev->sym_dev != NULL) + return -EBUSY; + + qat_dev->attached = QAT_DETACHED; + qat_nb_pci_devices--; + } + QAT_LOG(DEBUG, "QAT device %s released, total QATs %d", + name, qat_nb_pci_devices); + return 0; +} + +static int +qat_pci_dev_destroy(struct qat_pci_device *qat_pci_dev, + struct rte_pci_device *pci_dev) +{ + qat_sym_dev_destroy(qat_pci_dev); + qat_comp_dev_destroy(qat_pci_dev); + qat_asym_dev_destroy(qat_pci_dev); + return qat_pci_device_release(pci_dev); +} + +static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + int ret = 0; + struct qat_pci_device *qat_pci_dev; + + QAT_LOG(DEBUG, "Found QAT device at %02x:%02x.%x", + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); + + qat_pci_dev = qat_pci_device_allocate(pci_dev); + if (qat_pci_dev == NULL) + return -ENODEV; + + ret = qat_sym_dev_create(qat_pci_dev); + if (ret != 0) + goto error_out; + + ret = qat_comp_dev_create(qat_pci_dev); + if (ret != 0) + goto error_out; + + ret = qat_asym_dev_create(qat_pci_dev); + if (ret != 0) + goto error_out; + + return 0; + +error_out: + qat_pci_dev_destroy(qat_pci_dev, pci_dev); + return ret; + +} + +static int qat_pci_remove(struct rte_pci_device *pci_dev) +{ + struct qat_pci_device *qat_pci_dev; + + if (pci_dev == NULL) + return -EINVAL; + + qat_pci_dev = qat_get_qat_dev_from_pci_dev(pci_dev); + if (qat_pci_dev == NULL) + return 0; + + return qat_pci_dev_destroy(qat_pci_dev, pci_dev); +} + +static struct rte_pci_driver rte_qat_pmd = { + .id_table = pci_id_qat_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = qat_pci_probe, + .remove = qat_pci_remove +}; + +__attribute__((weak)) int +qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused) +{ + return 0; +} + +__attribute__((weak)) int +qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused) +{ + return 0; +} + +__attribute__((weak)) int +qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused) +{ + return 0; +} + +__attribute__((weak)) int +qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused) +{ + return 0; +} + +__attribute__((weak)) int +qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused) +{ + return 0; +} + +__attribute__((weak)) int +qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused) +{ + return 0; +} + +RTE_PMD_REGISTER_PCI(QAT_PCI_NAME, rte_qat_pmd); +RTE_PMD_REGISTER_PCI_TABLE(QAT_PCI_NAME, pci_id_qat_map); diff --git a/drivers/common/qat/qat_device.h b/drivers/common/qat/qat_device.h new file mode 100644 index 00000000..9599fc59 --- /dev/null +++ b/drivers/common/qat/qat_device.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ +#ifndef _QAT_DEVICE_H_ +#define _QAT_DEVICE_H_ + +#include + +#include "qat_common.h" +#include "qat_logs.h" +#include "adf_transport_access_macros.h" +#include "qat_qp.h" + +#define QAT_DETACHED (0) +#define QAT_ATTACHED (1) + +#define QAT_DEV_NAME_MAX_LEN 64 + +/* + * This struct holds all the data about a QAT pci device + * including data about all services it supports. + * It contains + * - hw_data + * - config data + * - runtime data + */ +struct qat_sym_dev_private; +struct qat_comp_dev_private; + +struct qat_pci_device { + + /* Data used by all services */ + char name[QAT_DEV_NAME_MAX_LEN]; + /**< Name of qat pci device */ + uint8_t qat_dev_id; + /**< Device instance for this qat pci device */ + struct rte_pci_device *pci_dev; + /**< PCI information. */ + enum qat_device_gen qat_dev_gen; + /**< QAT device generation */ + rte_spinlock_t arb_csr_lock; + /**< lock to protect accesses to the arbiter CSR */ + __extension__ + uint8_t attached : 1; + /**< Flag indicating the device is attached */ + + struct qat_qp *qps_in_use[QAT_MAX_SERVICES][ADF_MAX_QPS_ON_ANY_SERVICE]; + /**< links to qps set up for each service, index same as on API */ + + /* Data relating to symmetric crypto service */ + struct qat_sym_dev_private *sym_dev; + /**< link back to cryptodev private data */ + struct rte_device sym_rte_dev; + /**< This represents the crypto subset of this pci device. + * Register with this rather than with the one in + * pci_dev so that its driver can have a crypto-specific name + */ + + /* Data relating to compression service */ + struct qat_comp_dev_private *comp_dev; + /**< link back to compressdev private data */ + + /* Data relating to asymmetric crypto service */ + +}; + +struct qat_gen_hw_data { + enum qat_device_gen dev_gen; + const struct qat_qp_hw_data (*qp_hw_data)[ADF_MAX_QPS_ON_ANY_SERVICE]; +}; + +extern struct qat_gen_hw_data qat_gen_config[]; + +struct qat_pci_device * +qat_pci_device_allocate(struct rte_pci_device *pci_dev); + +int +qat_pci_device_release(struct rte_pci_device *pci_dev); + +struct qat_pci_device * +qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev); + +/* declaration needed for weak functions */ +int +qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused); + +int +qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused); + +int +qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused); + +int +qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused); + +int +qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused); + +int +qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused); + +#endif /* _QAT_DEVICE_H_ */ diff --git a/drivers/common/qat/qat_logs.c b/drivers/common/qat/qat_logs.c new file mode 100644 index 00000000..7a861709 --- /dev/null +++ b/drivers/common/qat/qat_logs.c @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include +#include + +#include "qat_logs.h" + +int qat_gen_logtype; +int qat_dp_logtype; + +int +qat_hexdump_log(uint32_t level, uint32_t logtype, const char *title, + const void *buf, unsigned int len) +{ + if (level > rte_log_get_global_level()) + return 0; + if (level > (uint32_t)(rte_log_get_level(logtype))) + return 0; + + rte_hexdump(rte_logs.file == NULL ? stderr : rte_logs.file, + title, buf, len); + return 0; +} + +RTE_INIT(qat_pci_init_log) +{ + /* Non-data-path logging for pci device and all services */ + qat_gen_logtype = rte_log_register("pmd.qat_general"); + if (qat_gen_logtype >= 0) + rte_log_set_level(qat_gen_logtype, RTE_LOG_NOTICE); + + /* data-path logging for all services */ + qat_dp_logtype = rte_log_register("pmd.qat_dp"); + if (qat_dp_logtype >= 0) + rte_log_set_level(qat_dp_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/common/qat/qat_logs.h b/drivers/common/qat/qat_logs.h new file mode 100644 index 00000000..4baea12c --- /dev/null +++ b/drivers/common/qat/qat_logs.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ + +#ifndef _QAT_LOGS_H_ +#define _QAT_LOGS_H_ + +extern int qat_gen_logtype; +extern int qat_dp_logtype; + +#define QAT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, qat_gen_logtype, \ + "%s(): " fmt "\n", __func__, ## args) + +#define QAT_DP_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, qat_dp_logtype, \ + "%s(): " fmt "\n", __func__, ## args) + +#define QAT_DP_HEXDUMP_LOG(level, title, buf, len) \ + qat_hexdump_log(RTE_LOG_ ## level, qat_dp_logtype, title, buf, len) + +/** + * qat_hexdump_log - Dump out memory in a special hex dump format. + * + * Dump out the message buffer in a special hex dump output format with + * characters printed for each line of 16 hex values. The message will be sent + * to the stream defined by rte_logs.file or to stderr in case of rte_logs.file + * is undefined. + */ +int +qat_hexdump_log(uint32_t level, uint32_t logtype, const char *title, + const void *buf, unsigned int len); + +#endif /* _QAT_LOGS_H_ */ diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c new file mode 100644 index 00000000..7ca7a45e --- /dev/null +++ b/drivers/common/qat/qat_qp.c @@ -0,0 +1,642 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qat_logs.h" +#include "qat_device.h" +#include "qat_qp.h" +#include "qat_sym.h" +#include "qat_comp.h" +#include "adf_transport_access_macros.h" + + +#define ADF_MAX_DESC 4096 +#define ADF_MIN_DESC 128 + +#define ADF_ARB_REG_SLOT 0x1000 +#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C + +#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ + (ADF_ARB_REG_SLOT * index), value) + +__extension__ +const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES] + [ADF_MAX_QPS_ON_ANY_SERVICE] = { + /* queue pairs which provide an asymmetric crypto service */ + [QAT_SERVICE_ASYMMETRIC] = { + { + .service_type = QAT_SERVICE_ASYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 0, + .rx_ring_num = 8, + .tx_msg_size = 64, + .rx_msg_size = 32, + + }, { + .service_type = QAT_SERVICE_ASYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 1, + .rx_ring_num = 9, + .tx_msg_size = 64, + .rx_msg_size = 32, + } + }, + /* queue pairs which provide a symmetric crypto service */ + [QAT_SERVICE_SYMMETRIC] = { + { + .service_type = QAT_SERVICE_SYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 2, + .rx_ring_num = 10, + .tx_msg_size = 128, + .rx_msg_size = 32, + }, + { + .service_type = QAT_SERVICE_SYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 3, + .rx_ring_num = 11, + .tx_msg_size = 128, + .rx_msg_size = 32, + } + }, + /* queue pairs which provide a compression service */ + [QAT_SERVICE_COMPRESSION] = { + { + .service_type = QAT_SERVICE_COMPRESSION, + .hw_bundle_num = 0, + .tx_ring_num = 6, + .rx_ring_num = 14, + .tx_msg_size = 128, + .rx_msg_size = 32, + }, { + .service_type = QAT_SERVICE_COMPRESSION, + .hw_bundle_num = 0, + .tx_ring_num = 7, + .rx_ring_num = 15, + .tx_msg_size = 128, + .rx_msg_size = 32, + } + } +}; + +static int qat_qp_check_queue_alignment(uint64_t phys_addr, + uint32_t queue_size_bytes); +static void qat_queue_delete(struct qat_queue *queue); +static int qat_queue_create(struct qat_pci_device *qat_dev, + struct qat_queue *queue, struct qat_qp_config *, uint8_t dir); +static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, + uint32_t *queue_size_for_csr); +static void adf_configure_queues(struct qat_qp *queue); +static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr, + rte_spinlock_t *lock); +static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr, + rte_spinlock_t *lock); + + +int qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data, + enum qat_service_type service) +{ + int i, count; + + for (i = 0, count = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) + if (qp_hw_data[i].service_type == service) + count++; + return count; +} + +static const struct rte_memzone * +queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size, + int socket_id) +{ + const struct rte_memzone *mz; + + mz = rte_memzone_lookup(queue_name); + if (mz != 0) { + if (((size_t)queue_size <= mz->len) && + ((socket_id == SOCKET_ID_ANY) || + (socket_id == mz->socket_id))) { + QAT_LOG(DEBUG, "re-use memzone already " + "allocated for %s", queue_name); + return mz; + } + + QAT_LOG(ERR, "Incompatible memzone already " + "allocated %s, size %u, socket %d. " + "Requested size %u, socket %u", + queue_name, (uint32_t)mz->len, + mz->socket_id, queue_size, socket_id); + return NULL; + } + + QAT_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u", + queue_name, queue_size, socket_id); + return rte_memzone_reserve_aligned(queue_name, queue_size, + socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size); +} + +int qat_qp_setup(struct qat_pci_device *qat_dev, + struct qat_qp **qp_addr, + uint16_t queue_pair_id, + struct qat_qp_config *qat_qp_conf) + +{ + struct qat_qp *qp; + struct rte_pci_device *pci_dev = qat_dev->pci_dev; + char op_cookie_pool_name[RTE_RING_NAMESIZE]; + uint32_t i; + + QAT_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d", + queue_pair_id, qat_dev->qat_dev_id, qat_dev->qat_dev_gen); + + if ((qat_qp_conf->nb_descriptors > ADF_MAX_DESC) || + (qat_qp_conf->nb_descriptors < ADF_MIN_DESC)) { + QAT_LOG(ERR, "Can't create qp for %u descriptors", + qat_qp_conf->nb_descriptors); + return -EINVAL; + } + + if (pci_dev->mem_resource[0].addr == NULL) { + QAT_LOG(ERR, "Could not find VF config space " + "(UIO driver attached?)."); + return -EINVAL; + } + + /* Allocate the queue pair data structure. */ + qp = rte_zmalloc("qat PMD qp metadata", + sizeof(*qp), RTE_CACHE_LINE_SIZE); + if (qp == NULL) { + QAT_LOG(ERR, "Failed to alloc mem for qp struct"); + return -ENOMEM; + } + qp->nb_descriptors = qat_qp_conf->nb_descriptors; + qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer", + qat_qp_conf->nb_descriptors * sizeof(*qp->op_cookies), + RTE_CACHE_LINE_SIZE); + if (qp->op_cookies == NULL) { + QAT_LOG(ERR, "Failed to alloc mem for cookie"); + rte_free(qp); + return -ENOMEM; + } + + qp->mmap_bar_addr = pci_dev->mem_resource[0].addr; + qp->inflights16 = 0; + + if (qat_queue_create(qat_dev, &(qp->tx_q), qat_qp_conf, + ADF_RING_DIR_TX) != 0) { + QAT_LOG(ERR, "Tx queue create failed " + "queue_pair_id=%u", queue_pair_id); + goto create_err; + } + + if (qat_queue_create(qat_dev, &(qp->rx_q), qat_qp_conf, + ADF_RING_DIR_RX) != 0) { + QAT_LOG(ERR, "Rx queue create failed " + "queue_pair_id=%hu", queue_pair_id); + qat_queue_delete(&(qp->tx_q)); + goto create_err; + } + + adf_configure_queues(qp); + adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr, + &qat_dev->arb_csr_lock); + + snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, + "%s%d_cookies_%s_qp%hu", + pci_dev->driver->driver.name, qat_dev->qat_dev_id, + qat_qp_conf->service_str, queue_pair_id); + + QAT_LOG(DEBUG, "cookiepool: %s", op_cookie_pool_name); + qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name); + if (qp->op_cookie_pool == NULL) + qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name, + qp->nb_descriptors, + qat_qp_conf->cookie_size, 64, 0, + NULL, NULL, NULL, NULL, qat_qp_conf->socket_id, + 0); + if (!qp->op_cookie_pool) { + QAT_LOG(ERR, "QAT PMD Cannot create" + " op mempool"); + goto create_err; + } + + for (i = 0; i < qp->nb_descriptors; i++) { + if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) { + QAT_LOG(ERR, "QAT PMD Cannot get op_cookie"); + goto create_err; + } + } + + qp->qat_dev_gen = qat_dev->qat_dev_gen; + qp->build_request = qat_qp_conf->build_request; + qp->service_type = qat_qp_conf->hw->service_type; + qp->qat_dev = qat_dev; + + QAT_LOG(DEBUG, "QP setup complete: id: %d, cookiepool: %s", + queue_pair_id, op_cookie_pool_name); + + *qp_addr = qp; + return 0; + +create_err: + if (qp->op_cookie_pool) + rte_mempool_free(qp->op_cookie_pool); + rte_free(qp->op_cookies); + rte_free(qp); + return -EFAULT; +} + +int qat_qp_release(struct qat_qp **qp_addr) +{ + struct qat_qp *qp = *qp_addr; + uint32_t i; + + if (qp == NULL) { + QAT_LOG(DEBUG, "qp already freed"); + return 0; + } + + QAT_LOG(DEBUG, "Free qp on qat_pci device %d", + qp->qat_dev->qat_dev_id); + + /* Don't free memory if there are still responses to be processed */ + if (qp->inflights16 == 0) { + qat_queue_delete(&(qp->tx_q)); + qat_queue_delete(&(qp->rx_q)); + } else { + return -EAGAIN; + } + + adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr, + &qp->qat_dev->arb_csr_lock); + + for (i = 0; i < qp->nb_descriptors; i++) + rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]); + + if (qp->op_cookie_pool) + rte_mempool_free(qp->op_cookie_pool); + + rte_free(qp->op_cookies); + rte_free(qp); + *qp_addr = NULL; + return 0; +} + + +static void qat_queue_delete(struct qat_queue *queue) +{ + const struct rte_memzone *mz; + int status = 0; + + if (queue == NULL) { + QAT_LOG(DEBUG, "Invalid queue"); + return; + } + QAT_LOG(DEBUG, "Free ring %d, memzone: %s", + queue->hw_queue_number, queue->memz_name); + + mz = rte_memzone_lookup(queue->memz_name); + if (mz != NULL) { + /* Write an unused pattern to the queue memory. */ + memset(queue->base_addr, 0x7F, queue->queue_size); + status = rte_memzone_free(mz); + if (status != 0) + QAT_LOG(ERR, "Error %d on freeing queue %s", + status, queue->memz_name); + } else { + QAT_LOG(DEBUG, "queue %s doesn't exist", + queue->memz_name); + } +} + +static int +qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue, + struct qat_qp_config *qp_conf, uint8_t dir) +{ + uint64_t queue_base; + void *io_addr; + const struct rte_memzone *qp_mz; + struct rte_pci_device *pci_dev = qat_dev->pci_dev; + int ret = 0; + uint16_t desc_size = (dir == ADF_RING_DIR_TX ? + qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size); + uint32_t queue_size_bytes = (qp_conf->nb_descriptors)*(desc_size); + + queue->hw_bundle_number = qp_conf->hw->hw_bundle_num; + queue->hw_queue_number = (dir == ADF_RING_DIR_TX ? + qp_conf->hw->tx_ring_num : qp_conf->hw->rx_ring_num); + + if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) { + QAT_LOG(ERR, "Invalid descriptor size %d", desc_size); + return -EINVAL; + } + + /* + * Allocate a memzone for the queue - create a unique name. + */ + snprintf(queue->memz_name, sizeof(queue->memz_name), + "%s_%d_%s_%s_%d_%d", + pci_dev->driver->driver.name, qat_dev->qat_dev_id, + qp_conf->service_str, "qp_mem", + queue->hw_bundle_number, queue->hw_queue_number); + qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes, + qp_conf->socket_id); + if (qp_mz == NULL) { + QAT_LOG(ERR, "Failed to allocate ring memzone"); + return -ENOMEM; + } + + queue->base_addr = (char *)qp_mz->addr; + queue->base_phys_addr = qp_mz->iova; + if (qat_qp_check_queue_alignment(queue->base_phys_addr, + queue_size_bytes)) { + QAT_LOG(ERR, "Invalid alignment on queue create " + " 0x%"PRIx64"\n", + queue->base_phys_addr); + ret = -EFAULT; + goto queue_create_err; + } + + if (adf_verify_queue_size(desc_size, qp_conf->nb_descriptors, + &(queue->queue_size)) != 0) { + QAT_LOG(ERR, "Invalid num inflights"); + ret = -EINVAL; + goto queue_create_err; + } + + queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size, + ADF_BYTES_TO_MSG_SIZE(desc_size)); + queue->modulo_mask = (1 << ADF_RING_SIZE_MODULO(queue->queue_size)) - 1; + + if (queue->max_inflights < 2) { + QAT_LOG(ERR, "Invalid num inflights"); + ret = -EINVAL; + goto queue_create_err; + } + queue->head = 0; + queue->tail = 0; + queue->msg_size = desc_size; + + /* + * Write an unused pattern to the queue memory. + */ + memset(queue->base_addr, 0x7F, queue_size_bytes); + + queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr, + queue->queue_size); + + io_addr = pci_dev->mem_resource[0].addr; + + WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number, + queue->hw_queue_number, queue_base); + + QAT_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u," + " nb msgs %u, msg_size %u, max_inflights %u modulo mask %u", + queue->memz_name, + queue->queue_size, queue_size_bytes, + qp_conf->nb_descriptors, desc_size, + queue->max_inflights, queue->modulo_mask); + + return 0; + +queue_create_err: + rte_memzone_free(qp_mz); + return ret; +} + +static int qat_qp_check_queue_alignment(uint64_t phys_addr, + uint32_t queue_size_bytes) +{ + if (((queue_size_bytes - 1) & phys_addr) != 0) + return -EINVAL; + return 0; +} + +static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, + uint32_t *p_queue_size_for_csr) +{ + uint8_t i = ADF_MIN_RING_SIZE; + + for (; i <= ADF_MAX_RING_SIZE; i++) + if ((msg_size * msg_num) == + (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) { + *p_queue_size_for_csr = i; + return 0; + } + QAT_LOG(ERR, "Invalid ring size %d", msg_size * msg_num); + return -EINVAL; +} + +static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr, + rte_spinlock_t *lock) +{ + uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + + (ADF_ARB_REG_SLOT * + txq->hw_bundle_number); + uint32_t value; + + rte_spinlock_lock(lock); + value = ADF_CSR_RD(base_addr, arb_csr_offset); + value |= (0x01 << txq->hw_queue_number); + ADF_CSR_WR(base_addr, arb_csr_offset, value); + rte_spinlock_unlock(lock); +} + +static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr, + rte_spinlock_t *lock) +{ + uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + + (ADF_ARB_REG_SLOT * + txq->hw_bundle_number); + uint32_t value; + + rte_spinlock_lock(lock); + value = ADF_CSR_RD(base_addr, arb_csr_offset); + value &= ~(0x01 << txq->hw_queue_number); + ADF_CSR_WR(base_addr, arb_csr_offset, value); + rte_spinlock_unlock(lock); +} + +static void adf_configure_queues(struct qat_qp *qp) +{ + uint32_t queue_config; + struct qat_queue *queue = &qp->tx_q; + + queue_config = BUILD_RING_CONFIG(queue->queue_size); + + WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number, + queue->hw_queue_number, queue_config); + + queue = &qp->rx_q; + queue_config = + BUILD_RESP_RING_CONFIG(queue->queue_size, + ADF_RING_NEAR_WATERMARK_512, + ADF_RING_NEAR_WATERMARK_0); + + WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number, + queue->hw_queue_number, queue_config); +} + +static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask) +{ + return data & modulo_mask; +} + +static inline void +txq_write_tail(struct qat_qp *qp, struct qat_queue *q) { + WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number, + q->hw_queue_number, q->tail); + q->nb_pending_requests = 0; + q->csr_tail = q->tail; +} + +static inline +void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q) +{ + uint32_t old_head, new_head; + uint32_t max_head; + + old_head = q->csr_head; + new_head = q->head; + max_head = qp->nb_descriptors * q->msg_size; + + /* write out free descriptors */ + void *cur_desc = (uint8_t *)q->base_addr + old_head; + + if (new_head < old_head) { + memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head); + memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head); + } else { + memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head); + } + q->nb_processed_responses = 0; + q->csr_head = new_head; + + /* write current head to CSR */ + WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number, + q->hw_queue_number, new_head); +} + +uint16_t +qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops) +{ + register struct qat_queue *queue; + struct qat_qp *tmp_qp = (struct qat_qp *)qp; + register uint32_t nb_ops_sent = 0; + register int ret; + uint16_t nb_ops_possible = nb_ops; + register uint8_t *base_addr; + register uint32_t tail; + int overflow; + + if (unlikely(nb_ops == 0)) + return 0; + + /* read params used a lot in main loop into registers */ + queue = &(tmp_qp->tx_q); + base_addr = (uint8_t *)queue->base_addr; + tail = queue->tail; + + /* Find how many can actually fit on the ring */ + tmp_qp->inflights16 += nb_ops; + overflow = tmp_qp->inflights16 - queue->max_inflights; + if (overflow > 0) { + tmp_qp->inflights16 -= overflow; + nb_ops_possible = nb_ops - overflow; + if (nb_ops_possible == 0) + return 0; + } + + while (nb_ops_sent != nb_ops_possible) { + ret = tmp_qp->build_request(*ops, base_addr + tail, + tmp_qp->op_cookies[tail / queue->msg_size], + tmp_qp->qat_dev_gen); + if (ret != 0) { + tmp_qp->stats.enqueue_err_count++; + /* + * This message cannot be enqueued, + * decrease number of ops that wasn't sent + */ + tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent; + if (nb_ops_sent == 0) + return 0; + goto kick_tail; + } + + tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask); + ops++; + nb_ops_sent++; + } +kick_tail: + queue->tail = tail; + tmp_qp->stats.enqueued_count += nb_ops_sent; + queue->nb_pending_requests += nb_ops_sent; + if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH || + queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) { + txq_write_tail(tmp_qp, queue); + } + return nb_ops_sent; +} + +uint16_t +qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops) +{ + struct qat_queue *rx_queue, *tx_queue; + struct qat_qp *tmp_qp = (struct qat_qp *)qp; + uint32_t head; + uint32_t resp_counter = 0; + uint8_t *resp_msg; + + rx_queue = &(tmp_qp->rx_q); + tx_queue = &(tmp_qp->tx_q); + head = rx_queue->head; + resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head; + + while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG && + resp_counter != nb_ops) { + + if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) + qat_sym_process_response(ops, resp_msg); + else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) + qat_comp_process_response(ops, resp_msg); + + head = adf_modulo(head + rx_queue->msg_size, + rx_queue->modulo_mask); + + resp_msg = (uint8_t *)rx_queue->base_addr + head; + ops++; + resp_counter++; + } + if (resp_counter > 0) { + rx_queue->head = head; + tmp_qp->stats.dequeued_count += resp_counter; + rx_queue->nb_processed_responses += resp_counter; + tmp_qp->inflights16 -= resp_counter; + + if (rx_queue->nb_processed_responses > + QAT_CSR_HEAD_WRITE_THRESH) + rxq_free_desc(tmp_qp, rx_queue); + } + /* also check if tail needs to be advanced */ + if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH && + tx_queue->tail != tx_queue->csr_tail) { + txq_write_tail(tmp_qp, tx_queue); + } + return resp_counter; +} + +__attribute__((weak)) int +qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused) +{ + return 0; +} diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h new file mode 100644 index 00000000..69f8a613 --- /dev/null +++ b/drivers/common/qat/qat_qp.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ +#ifndef _QAT_QP_H_ +#define _QAT_QP_H_ + +#include "qat_common.h" +#include "adf_transport_access_macros.h" + +struct qat_pci_device; + +#define QAT_CSR_HEAD_WRITE_THRESH 32U +/* number of requests to accumulate before writing head CSR */ +#define QAT_CSR_TAIL_WRITE_THRESH 32U +/* number of requests to accumulate before writing tail CSR */ +#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U +/* number of inflights below which no tail write coalescing should occur */ + +typedef int (*build_request_t)(void *op, + uint8_t *req, void *op_cookie, + enum qat_device_gen qat_dev_gen); +/**< Build a request from an op. */ + +/** + * Structure with data needed for creation of queue pair. + */ +struct qat_qp_hw_data { + enum qat_service_type service_type; + uint8_t hw_bundle_num; + uint8_t tx_ring_num; + uint8_t rx_ring_num; + uint16_t tx_msg_size; + uint16_t rx_msg_size; +}; +/** + * Structure with data needed for creation of queue pair. + */ +struct qat_qp_config { + const struct qat_qp_hw_data *hw; + uint32_t nb_descriptors; + uint32_t cookie_size; + int socket_id; + build_request_t build_request; + const char *service_str; +}; + +/** + * Structure associated with each queue. + */ +struct qat_queue { + char memz_name[RTE_MEMZONE_NAMESIZE]; + void *base_addr; /* Base address */ + rte_iova_t base_phys_addr; /* Queue physical address */ + uint32_t head; /* Shadow copy of the head */ + uint32_t tail; /* Shadow copy of the tail */ + uint32_t modulo_mask; + uint32_t msg_size; + uint16_t max_inflights; + uint32_t queue_size; + uint8_t hw_bundle_number; + uint8_t hw_queue_number; + /* HW queue aka ring offset on bundle */ + uint32_t csr_head; /* last written head value */ + uint32_t csr_tail; /* last written tail value */ + uint16_t nb_processed_responses; + /* number of responses processed since last CSR head write */ + uint16_t nb_pending_requests; + /* number of requests pending since last CSR tail write */ +}; + +struct qat_qp { + void *mmap_bar_addr; + uint16_t inflights16; + struct qat_queue tx_q; + struct qat_queue rx_q; + struct qat_common_stats stats; + struct rte_mempool *op_cookie_pool; + void **op_cookies; + uint32_t nb_descriptors; + enum qat_device_gen qat_dev_gen; + build_request_t build_request; + enum qat_service_type service_type; + struct qat_pci_device *qat_dev; + /**< qat device this qp is on */ +} __rte_cache_aligned; + +extern const struct qat_qp_hw_data qat_gen1_qps[][ADF_MAX_QPS_ON_ANY_SERVICE]; + +uint16_t +qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops); + +uint16_t +qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops); + +int +qat_qp_release(struct qat_qp **qp_addr); + +int +qat_qp_setup(struct qat_pci_device *qat_dev, + struct qat_qp **qp_addr, uint16_t queue_pair_id, + struct qat_qp_config *qat_qp_conf); + +int +qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data, + enum qat_service_type service); + +/* Needed for weak function*/ +int +qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused); + +#endif /* _QAT_QP_H_ */ diff --git a/drivers/compress/Makefile b/drivers/compress/Makefile new file mode 100644 index 00000000..286ea6ee --- /dev/null +++ b/drivers/compress/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +DIRS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isal +DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF) += octeontx +DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += zlib + +include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/drivers/compress/isal/Makefile b/drivers/compress/isal/Makefile new file mode 100644 index 00000000..95904f64 --- /dev/null +++ b/drivers/compress/isal/Makefile @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_pmd_isal_comp.a + +# build flags +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -DALLOW_EXPERIMENTAL_API + +# external library dependencies +LDLIBS += -lisal +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_compressdev +LDLIBS += -lrte_bus_vdev + +# library version +LIBABIVER := 1 + +# versioning export map +EXPORT_MAP := rte_pmd_isal_version.map + +# library source files +SRCS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isal_compress_pmd.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isal_compress_pmd_ops.c + +# export include files +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/compress/isal/isal_compress_pmd.c b/drivers/compress/isal/isal_compress_pmd.c new file mode 100644 index 00000000..e943336b --- /dev/null +++ b/drivers/compress/isal/isal_compress_pmd.c @@ -0,0 +1,694 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ +#include + +#include +#include +#include +#include +#include + +#include "isal_compress_pmd_private.h" + +#define RTE_COMP_ISAL_WINDOW_SIZE 15 +#define RTE_COMP_ISAL_LEVEL_ZERO 0 /* ISA-L Level 0 used for fixed Huffman */ +#define RTE_COMP_ISAL_LEVEL_ONE 1 +#define RTE_COMP_ISAL_LEVEL_TWO 2 +#define RTE_COMP_ISAL_LEVEL_THREE 3 /* Optimised for AVX512 & AVX2 only */ + +int isal_logtype_driver; + +/* Verify and set private xform parameters */ +int +isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform, + const struct rte_comp_xform *xform) +{ + if (xform == NULL) + return -EINVAL; + + /* Set compression private xform variables */ + if (xform->type == RTE_COMP_COMPRESS) { + /* Set private xform type - COMPRESS/DECOMPRESS */ + priv_xform->type = RTE_COMP_COMPRESS; + + /* Set private xform algorithm */ + if (xform->compress.algo != RTE_COMP_ALGO_DEFLATE) { + if (xform->compress.algo == RTE_COMP_ALGO_NULL) { + ISAL_PMD_LOG(ERR, "By-pass not supported\n"); + return -ENOTSUP; + } + ISAL_PMD_LOG(ERR, "Algorithm not supported\n"); + return -ENOTSUP; + } + priv_xform->compress.algo = RTE_COMP_ALGO_DEFLATE; + + /* Set private xform checksum - raw deflate by default */ + if (xform->compress.chksum != RTE_COMP_CHECKSUM_NONE) { + ISAL_PMD_LOG(ERR, "Checksum not supported\n"); + return -ENOTSUP; + } + + /* Set private xform window size, 32K supported */ + if (xform->compress.window_size == RTE_COMP_ISAL_WINDOW_SIZE) + priv_xform->compress.window_size = + RTE_COMP_ISAL_WINDOW_SIZE; + else { + ISAL_PMD_LOG(ERR, "Window size not supported\n"); + return -ENOTSUP; + } + + /* Set private xform huffman type */ + switch (xform->compress.deflate.huffman) { + case(RTE_COMP_HUFFMAN_DEFAULT): + priv_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + break; + case(RTE_COMP_HUFFMAN_FIXED): + priv_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_FIXED; + break; + case(RTE_COMP_HUFFMAN_DYNAMIC): + priv_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + break; + default: + ISAL_PMD_LOG(ERR, "Huffman code not supported\n"); + return -ENOTSUP; + } + + /* Set private xform level. + * Checking compliance with compressdev API, -1 <= level => 9 + */ + if (xform->compress.level < RTE_COMP_LEVEL_PMD_DEFAULT || + xform->compress.level > RTE_COMP_LEVEL_MAX) { + ISAL_PMD_LOG(ERR, "Compression level out of range\n"); + return -EINVAL; + } + /* Check for Compressdev API level 0, No compression + * not supported in ISA-L + */ + else if (xform->compress.level == RTE_COMP_LEVEL_NONE) { + ISAL_PMD_LOG(ERR, "No Compression not supported\n"); + return -ENOTSUP; + } + /* If using fixed huffman code, level must be 0 */ + else if (priv_xform->compress.deflate.huffman == + RTE_COMP_HUFFMAN_FIXED) { + ISAL_PMD_LOG(DEBUG, "ISA-L level 0 used due to a" + " fixed huffman code\n"); + priv_xform->compress.level = RTE_COMP_ISAL_LEVEL_ZERO; + priv_xform->level_buffer_size = + ISAL_DEF_LVL0_DEFAULT; + } else { + /* Mapping API levels to ISA-L levels 1,2 & 3 */ + switch (xform->compress.level) { + case RTE_COMP_LEVEL_PMD_DEFAULT: + /* Default is 1 if not using fixed huffman */ + priv_xform->compress.level = + RTE_COMP_ISAL_LEVEL_ONE; + priv_xform->level_buffer_size = + ISAL_DEF_LVL1_DEFAULT; + break; + case RTE_COMP_LEVEL_MIN: + priv_xform->compress.level = + RTE_COMP_ISAL_LEVEL_ONE; + priv_xform->level_buffer_size = + ISAL_DEF_LVL1_DEFAULT; + break; + case RTE_COMP_ISAL_LEVEL_TWO: + priv_xform->compress.level = + RTE_COMP_ISAL_LEVEL_TWO; + priv_xform->level_buffer_size = + ISAL_DEF_LVL2_DEFAULT; + break; + /* Level 3 or higher requested */ + default: + /* Check for AVX512, to use ISA-L level 3 */ + if (rte_cpu_get_flag_enabled( + RTE_CPUFLAG_AVX512F)) { + priv_xform->compress.level = + RTE_COMP_ISAL_LEVEL_THREE; + priv_xform->level_buffer_size = + ISAL_DEF_LVL3_DEFAULT; + } + /* Check for AVX2, to use ISA-L level 3 */ + else if (rte_cpu_get_flag_enabled( + RTE_CPUFLAG_AVX2)) { + priv_xform->compress.level = + RTE_COMP_ISAL_LEVEL_THREE; + priv_xform->level_buffer_size = + ISAL_DEF_LVL3_DEFAULT; + } else { + ISAL_PMD_LOG(DEBUG, "Requested ISA-L level" + " 3 or above; Level 3 optimized" + " for AVX512 & AVX2 only." + " level changed to 2.\n"); + priv_xform->compress.level = + RTE_COMP_ISAL_LEVEL_TWO; + priv_xform->level_buffer_size = + ISAL_DEF_LVL2_DEFAULT; + } + } + } + } + + /* Set decompression private xform variables */ + else if (xform->type == RTE_COMP_DECOMPRESS) { + + /* Set private xform type - COMPRESS/DECOMPRESS */ + priv_xform->type = RTE_COMP_DECOMPRESS; + + /* Set private xform algorithm */ + if (xform->decompress.algo != RTE_COMP_ALGO_DEFLATE) { + if (xform->decompress.algo == RTE_COMP_ALGO_NULL) { + ISAL_PMD_LOG(ERR, "By pass not supported\n"); + return -ENOTSUP; + } + ISAL_PMD_LOG(ERR, "Algorithm not supported\n"); + return -ENOTSUP; + } + priv_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE; + + /* Set private xform checksum - raw deflate by default */ + if (xform->compress.chksum != RTE_COMP_CHECKSUM_NONE) { + ISAL_PMD_LOG(ERR, "Checksum not supported\n"); + return -ENOTSUP; + } + + /* Set private xform window size, 32K supported */ + if (xform->decompress.window_size == RTE_COMP_ISAL_WINDOW_SIZE) + priv_xform->decompress.window_size = + RTE_COMP_ISAL_WINDOW_SIZE; + else { + ISAL_PMD_LOG(ERR, "Window size not supported\n"); + return -ENOTSUP; + } + } + return 0; +} + +/* Compression using chained mbufs for input/output data */ +static int +chained_mbuf_compression(struct rte_comp_op *op, struct isal_comp_qp *qp) +{ + int ret; + uint32_t remaining_offset; + uint32_t remaining_data = op->src.length; + struct rte_mbuf *src = op->m_src; + struct rte_mbuf *dst = op->m_dst; + + /* check for source/destination offset passing multiple segments + * and point compression stream to input/output buffer. + */ + remaining_offset = op->src.offset; + while (remaining_offset >= src->data_len) { + remaining_offset -= src->data_len; + src = src->next; + } + qp->stream->avail_in = RTE_MIN(src->data_len - remaining_offset, + op->src.length); + qp->stream->next_in = rte_pktmbuf_mtod_offset(src, uint8_t *, + remaining_offset); + + remaining_offset = op->dst.offset; + while (remaining_offset >= dst->data_len) { + remaining_offset -= dst->data_len; + dst = dst->next; + } + qp->stream->avail_out = dst->data_len - remaining_offset; + qp->stream->next_out = rte_pktmbuf_mtod_offset(dst, uint8_t *, + remaining_offset); + + if (unlikely(!qp->stream->next_in || !qp->stream->next_out)) { + ISAL_PMD_LOG(ERR, "Invalid source or destination buffer\n"); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + return -1; + } + + while (qp->stream->internal_state.state != ZSTATE_END) { + /* Last segment of data */ + if (remaining_data <= src->data_len) + qp->stream->end_of_stream = 1; + + /* Execute compression operation */ + ret = isal_deflate(qp->stream); + + remaining_data = op->src.length - qp->stream->total_in; + + if (ret != COMP_OK) { + ISAL_PMD_LOG(ERR, "Compression operation failed\n"); + op->status = RTE_COMP_OP_STATUS_ERROR; + return ret; + } + + if (qp->stream->avail_in == 0 && + qp->stream->total_in != op->src.length) { + if (src->next != NULL) { + src = src->next; + qp->stream->next_in = + rte_pktmbuf_mtod(src, uint8_t *); + qp->stream->avail_in = + RTE_MIN(remaining_data, src->data_len); + } else { + ISAL_PMD_LOG(ERR, + "Not enough input buffer segments\n"); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + return -1; + } + } + + if (qp->stream->avail_out == 0 && + qp->stream->internal_state.state != ZSTATE_END) { + if (dst->next != NULL) { + dst = dst->next; + qp->stream->next_out = + rte_pktmbuf_mtod(dst, uint8_t *); + qp->stream->avail_out = dst->data_len; + } else { + ISAL_PMD_LOG(ERR, + "Not enough output buffer segments\n"); + op->status = + RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; + return -1; + } + } + } + + return 0; +} + +/* Decompression using chained mbufs for input/output data */ +static int +chained_mbuf_decompression(struct rte_comp_op *op, struct isal_comp_qp *qp) +{ + int ret; + uint32_t consumed_data, src_remaining_offset, dst_remaining_offset; + uint32_t remaining_data = op->src.length; + struct rte_mbuf *src = op->m_src; + struct rte_mbuf *dst = op->m_dst; + + /* check for offset passing multiple segments + * and point decompression state to input/output buffer + */ + src_remaining_offset = op->src.offset; + while (src_remaining_offset >= src->data_len) { + src_remaining_offset -= src->data_len; + src = src->next; + } + qp->state->avail_in = RTE_MIN(src->data_len - src_remaining_offset, + op->src.length); + qp->state->next_in = rte_pktmbuf_mtod_offset(src, uint8_t *, + src_remaining_offset); + + dst_remaining_offset = op->dst.offset; + while (dst_remaining_offset >= dst->data_len) { + dst_remaining_offset -= dst->data_len; + dst = dst->next; + } + qp->state->avail_out = dst->data_len - dst_remaining_offset; + qp->state->next_out = rte_pktmbuf_mtod_offset(dst, uint8_t *, + dst_remaining_offset); + + while (qp->state->block_state != ISAL_BLOCK_FINISH) { + + ret = isal_inflate(qp->state); + + /* Check for first segment, offset needs to be accounted for */ + if (remaining_data == op->src.length) { + consumed_data = src->data_len - qp->state->avail_in - + src_remaining_offset; + } else + consumed_data = src->data_len - qp->state->avail_in; + + op->consumed += consumed_data; + remaining_data -= consumed_data; + + if (ret != ISAL_DECOMP_OK) { + ISAL_PMD_LOG(ERR, "Decompression operation failed\n"); + op->status = RTE_COMP_OP_STATUS_ERROR; + return ret; + } + + if (qp->state->avail_in == 0 + && op->consumed != op->src.length) { + if (src->next != NULL) { + src = src->next; + qp->state->next_in = + rte_pktmbuf_mtod(src, uint8_t *); + qp->state->avail_in = + RTE_MIN(remaining_data, src->data_len); + } + } + + if (qp->state->avail_out == 0 && + qp->state->block_state != ISAL_BLOCK_FINISH) { + if (dst->next != NULL) { + dst = dst->next; + qp->state->next_out = + rte_pktmbuf_mtod(dst, uint8_t *); + qp->state->avail_out = dst->data_len; + } else { + ISAL_PMD_LOG(ERR, + "Not enough output buffer segments\n"); + op->status = + RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; + return -1; + } + } + } + + return 0; +} + +/* Stateless Compression Function */ +static int +process_isal_deflate(struct rte_comp_op *op, struct isal_comp_qp *qp, + struct isal_priv_xform *priv_xform) +{ + int ret = 0; + op->status = RTE_COMP_OP_STATUS_SUCCESS; + + /* Required due to init clearing level_buf */ + uint8_t *temp_level_buf = qp->stream->level_buf; + + /* Initialize compression stream */ + isal_deflate_stateless_init(qp->stream); + + qp->stream->level_buf = temp_level_buf; + + /* Stateless operation, input will be consumed in one go */ + qp->stream->flush = NO_FLUSH; + + /* set compression level & intermediate level buffer size */ + qp->stream->level = priv_xform->compress.level; + qp->stream->level_buf_size = priv_xform->level_buffer_size; + + /* Set op huffman code */ + if (priv_xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED) + isal_deflate_set_hufftables(qp->stream, NULL, + IGZIP_HUFFTABLE_STATIC); + else if (priv_xform->compress.deflate.huffman == + RTE_COMP_HUFFMAN_DEFAULT) + isal_deflate_set_hufftables(qp->stream, NULL, + IGZIP_HUFFTABLE_DEFAULT); + /* Dynamically change the huffman code to suit the input data */ + else if (priv_xform->compress.deflate.huffman == + RTE_COMP_HUFFMAN_DYNAMIC) + isal_deflate_set_hufftables(qp->stream, NULL, + IGZIP_HUFFTABLE_DEFAULT); + + if (op->m_src->pkt_len < (op->src.length + op->src.offset)) { + ISAL_PMD_LOG(ERR, "Input mbuf(s) not big enough.\n"); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + return -1; + } + + if (op->dst.offset >= op->m_dst->pkt_len) { + ISAL_PMD_LOG(ERR, "Output mbuf(s) not big enough" + " for offset provided.\n"); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + return -1; + } + + /* Chained mbufs */ + if (op->m_src->nb_segs > 1 || op->m_dst->nb_segs > 1) { + ret = chained_mbuf_compression(op, qp); + if (ret < 0) + return ret; + } else { + /* Linear buffer */ + qp->stream->end_of_stream = 1; /* All input consumed in one */ + /* Point compression stream to input buffer */ + qp->stream->avail_in = op->src.length; + qp->stream->next_in = rte_pktmbuf_mtod_offset(op->m_src, + uint8_t *, op->src.offset); + + /* Point compression stream to output buffer */ + qp->stream->avail_out = op->m_dst->data_len - op->dst.offset; + qp->stream->next_out = rte_pktmbuf_mtod_offset(op->m_dst, + uint8_t *, op->dst.offset); + + if (unlikely(!qp->stream->next_in || !qp->stream->next_out)) { + ISAL_PMD_LOG(ERR, "Invalid source or destination" + " buffers\n"); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + return -1; + } + + /* Execute compression operation */ + ret = isal_deflate_stateless(qp->stream); + + /* Check that output buffer did not run out of space */ + if (ret == STATELESS_OVERFLOW) { + ISAL_PMD_LOG(ERR, "Output buffer not big enough\n"); + op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; + return ret; + } + + /* Check that input buffer has been fully consumed */ + if (qp->stream->avail_in != (uint32_t)0) { + ISAL_PMD_LOG(ERR, "Input buffer could not be read" + " entirely\n"); + op->status = RTE_COMP_OP_STATUS_ERROR; + return -1; + } + + if (ret != COMP_OK) { + ISAL_PMD_LOG(ERR, "Compression operation failed\n"); + op->status = RTE_COMP_OP_STATUS_ERROR; + return ret; + } + } + op->consumed = qp->stream->total_in; + op->produced = qp->stream->total_out; + + return ret; +} + +/* Stateless Decompression Function */ +static int +process_isal_inflate(struct rte_comp_op *op, struct isal_comp_qp *qp) +{ + int ret = 0; + + op->status = RTE_COMP_OP_STATUS_SUCCESS; + + /* Initialize decompression state */ + isal_inflate_init(qp->state); + + if (op->m_src->pkt_len < (op->src.length + op->src.offset)) { + ISAL_PMD_LOG(ERR, "Input mbuf(s) not big enough.\n"); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + return -1; + } + + if (op->dst.offset >= op->m_dst->pkt_len) { + ISAL_PMD_LOG(ERR, "Output mbuf not big enough for " + "offset provided.\n"); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + return -1; + } + + /* Chained mbufs */ + if (op->m_src->nb_segs > 1 || op->m_dst->nb_segs > 1) { + ret = chained_mbuf_decompression(op, qp); + if (ret != 0) + return ret; + } else { + /* Linear buffer */ + /* Point decompression state to input buffer */ + qp->state->avail_in = op->src.length; + qp->state->next_in = rte_pktmbuf_mtod_offset(op->m_src, + uint8_t *, op->src.offset); + + /* Point decompression state to output buffer */ + qp->state->avail_out = op->m_dst->data_len - op->dst.offset; + qp->state->next_out = rte_pktmbuf_mtod_offset(op->m_dst, + uint8_t *, op->dst.offset); + + if (unlikely(!qp->state->next_in || !qp->state->next_out)) { + ISAL_PMD_LOG(ERR, "Invalid source or destination" + " buffers\n"); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + return -1; + } + + /* Execute decompression operation */ + ret = isal_inflate_stateless(qp->state); + + if (ret == ISAL_OUT_OVERFLOW) { + ISAL_PMD_LOG(ERR, "Output buffer not big enough\n"); + op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; + return ret; + } + + /* Check that input buffer has been fully consumed */ + if (qp->state->avail_in != (uint32_t)0) { + ISAL_PMD_LOG(ERR, "Input buffer could not be read" + " entirely\n"); + op->status = RTE_COMP_OP_STATUS_ERROR; + return -1; + } + + if (ret != ISAL_DECOMP_OK) { + op->status = RTE_COMP_OP_STATUS_ERROR; + return ret; + } + op->consumed = op->src.length - qp->state->avail_in; + } + op->produced = qp->state->total_out; + + return ret; +} + +/* Process compression/decompression operation */ +static int +process_op(struct isal_comp_qp *qp, struct rte_comp_op *op, + struct isal_priv_xform *priv_xform) +{ + switch (priv_xform->type) { + case RTE_COMP_COMPRESS: + process_isal_deflate(op, qp, priv_xform); + break; + case RTE_COMP_DECOMPRESS: + process_isal_inflate(op, qp); + break; + default: + ISAL_PMD_LOG(ERR, "Operation Not Supported\n"); + return -ENOTSUP; + } + return 0; +} + +/* Enqueue burst */ +static uint16_t +isal_comp_pmd_enqueue_burst(void *queue_pair, struct rte_comp_op **ops, + uint16_t nb_ops) +{ + struct isal_comp_qp *qp = queue_pair; + uint16_t i; + int retval; + int16_t num_enq = RTE_MIN(qp->num_free_elements, nb_ops); + + for (i = 0; i < num_enq; i++) { + if (unlikely(ops[i]->op_type != RTE_COMP_OP_STATELESS)) { + ops[i]->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + ISAL_PMD_LOG(ERR, "Stateful operation not Supported\n"); + qp->qp_stats.enqueue_err_count++; + continue; + } + retval = process_op(qp, ops[i], ops[i]->private_xform); + if (unlikely(retval < 0) || + ops[i]->status != RTE_COMP_OP_STATUS_SUCCESS) { + qp->qp_stats.enqueue_err_count++; + } + } + + retval = rte_ring_enqueue_burst(qp->processed_pkts, (void *)ops, + num_enq, NULL); + qp->num_free_elements -= retval; + qp->qp_stats.enqueued_count += retval; + + return retval; +} + +/* Dequeue burst */ +static uint16_t +isal_comp_pmd_dequeue_burst(void *queue_pair, struct rte_comp_op **ops, + uint16_t nb_ops) +{ + struct isal_comp_qp *qp = queue_pair; + uint16_t nb_dequeued; + + nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts, (void **)ops, + nb_ops, NULL); + qp->num_free_elements += nb_dequeued; + qp->qp_stats.dequeued_count += nb_dequeued; + + return nb_dequeued; +} + +/* Create ISA-L compression device */ +static int +compdev_isal_create(const char *name, struct rte_vdev_device *vdev, + struct rte_compressdev_pmd_init_params *init_params) +{ + struct rte_compressdev *dev; + + dev = rte_compressdev_pmd_create(name, &vdev->device, + sizeof(struct isal_comp_private), init_params); + if (dev == NULL) { + ISAL_PMD_LOG(ERR, "failed to create compressdev vdev"); + return -EFAULT; + } + + dev->dev_ops = isal_compress_pmd_ops; + + /* register rx/tx burst functions for data path */ + dev->dequeue_burst = isal_comp_pmd_dequeue_burst; + dev->enqueue_burst = isal_comp_pmd_enqueue_burst; + + return 0; +} + +/** Remove compression device */ +static int +compdev_isal_remove_dev(struct rte_vdev_device *vdev) +{ + struct rte_compressdev *compdev; + const char *name; + + name = rte_vdev_device_name(vdev); + if (name == NULL) + return -EINVAL; + + compdev = rte_compressdev_pmd_get_named_dev(name); + if (compdev == NULL) + return -ENODEV; + + return rte_compressdev_pmd_destroy(compdev); +} + +/** Initialise ISA-L compression device */ +static int +compdev_isal_probe(struct rte_vdev_device *dev) +{ + struct rte_compressdev_pmd_init_params init_params = { + "", + rte_socket_id(), + }; + const char *name, *args; + int retval; + + name = rte_vdev_device_name(dev); + if (name == NULL) + return -EINVAL; + + args = rte_vdev_device_args(dev); + + retval = rte_compressdev_pmd_parse_input_args(&init_params, args); + if (retval) { + ISAL_PMD_LOG(ERR, + "Failed to parse initialisation arguments[%s]\n", args); + return -EINVAL; + } + + return compdev_isal_create(name, dev, &init_params); +} + +static struct rte_vdev_driver compdev_isal_pmd_drv = { + .probe = compdev_isal_probe, + .remove = compdev_isal_remove_dev, +}; + +RTE_PMD_REGISTER_VDEV(COMPDEV_NAME_ISAL_PMD, compdev_isal_pmd_drv); +RTE_PMD_REGISTER_PARAM_STRING(COMPDEV_NAME_ISAL_PMD, + "socket_id="); + +RTE_INIT(isal_init_log) +{ + isal_logtype_driver = rte_log_register("pmd.compress.isal"); + if (isal_logtype_driver >= 0) + rte_log_set_level(isal_logtype_driver, RTE_LOG_INFO); +} diff --git a/drivers/compress/isal/isal_compress_pmd_ops.c b/drivers/compress/isal/isal_compress_pmd_ops.c new file mode 100644 index 00000000..41cade87 --- /dev/null +++ b/drivers/compress/isal/isal_compress_pmd_ops.c @@ -0,0 +1,351 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ +#include + +#include +#include +#include + +#include "isal_compress_pmd_private.h" + +static const struct rte_compressdev_capabilities isal_pmd_capabilities[] = { + { + .algo = RTE_COMP_ALGO_DEFLATE, + .comp_feature_flags = RTE_COMP_FF_OOP_SGL_IN_SGL_OUT | + RTE_COMP_FF_OOP_SGL_IN_LB_OUT | + RTE_COMP_FF_OOP_LB_IN_SGL_OUT | + RTE_COMP_FF_SHAREABLE_PRIV_XFORM | + RTE_COMP_FF_HUFFMAN_FIXED | + RTE_COMP_FF_HUFFMAN_DYNAMIC, + .window_size = { + .min = 15, + .max = 15, + .increment = 0 + }, + }, + RTE_COMP_END_OF_CAPABILITIES_LIST() +}; + +/** Configure device */ +static int +isal_comp_pmd_config(struct rte_compressdev *dev, + struct rte_compressdev_config *config) +{ + int ret = 0; + unsigned int n; + char mp_name[RTE_COMPRESSDEV_NAME_MAX_LEN]; + unsigned int elt_size = sizeof(struct isal_priv_xform); + struct isal_comp_private *internals = dev->data->dev_private; + + n = snprintf(mp_name, sizeof(mp_name), "compdev_%d_xform_mp", + dev->data->dev_id); + if (n > sizeof(mp_name)) { + ISAL_PMD_LOG(ERR, + "Unable to create unique name for xform mempool"); + return -ENOMEM; + } + + internals->priv_xform_mp = rte_mempool_lookup(mp_name); + + if (internals->priv_xform_mp != NULL) { + if (((internals->priv_xform_mp)->elt_size != elt_size) || + ((internals->priv_xform_mp)->size < + config->max_nb_priv_xforms)) { + + ISAL_PMD_LOG(ERR, "%s mempool already exists with different" + " initialization parameters", mp_name); + internals->priv_xform_mp = NULL; + return -ENOMEM; + } + } else { /* First time configuration */ + internals->priv_xform_mp = rte_mempool_create( + mp_name, /* mempool name */ + /* number of elements*/ + config->max_nb_priv_xforms, + elt_size, /* element size*/ + 0, /* Cache size*/ + 0, /* private data size */ + NULL, /* obj initialization constructor */ + NULL, /* obj initialization constructor arg */ + NULL, /**< obj constructor*/ + NULL, /* obj constructor arg */ + config->socket_id, /* socket id */ + 0); /* flags */ + } + + if (internals->priv_xform_mp == NULL) { + ISAL_PMD_LOG(ERR, "%s mempool allocation failed", mp_name); + return -ENOMEM; + } + + dev->data->dev_private = internals; + + return ret; +} + +/** Start device */ +static int +isal_comp_pmd_start(__rte_unused struct rte_compressdev *dev) +{ + return 0; +} + +/** Stop device */ +static void +isal_comp_pmd_stop(__rte_unused struct rte_compressdev *dev) +{ +} + +/** Close device */ +static int +isal_comp_pmd_close(struct rte_compressdev *dev) +{ + /* Free private data */ + struct isal_comp_private *internals = dev->data->dev_private; + + rte_mempool_free(internals->priv_xform_mp); + return 0; +} + +/** Get device statistics */ +static void +isal_comp_pmd_stats_get(struct rte_compressdev *dev, + struct rte_compressdev_stats *stats) +{ + uint16_t qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id]; + + stats->enqueued_count += qp->qp_stats.enqueued_count; + stats->dequeued_count += qp->qp_stats.dequeued_count; + + stats->enqueue_err_count += qp->qp_stats.enqueue_err_count; + stats->dequeue_err_count += qp->qp_stats.dequeue_err_count; + } +} + +/** Get device info */ +static void +isal_comp_pmd_info_get(struct rte_compressdev *dev __rte_unused, + struct rte_compressdev_info *dev_info) +{ + if (dev_info != NULL) { + dev_info->capabilities = isal_pmd_capabilities; + dev_info->feature_flags = RTE_COMPDEV_FF_CPU_AVX512 | + RTE_COMPDEV_FF_CPU_AVX2 | + RTE_COMPDEV_FF_CPU_AVX | + RTE_COMPDEV_FF_CPU_SSE; + } +} + +/** Reset device statistics */ +static void +isal_comp_pmd_stats_reset(struct rte_compressdev *dev) +{ + uint16_t qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id]; + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + } +} + +/** Release queue pair */ +static int +isal_comp_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id) +{ + struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id]; + + if (qp == NULL) + return -EINVAL; + + if (qp->stream != NULL) + rte_free(qp->stream); + + if (qp->stream->level_buf != NULL) + rte_free(qp->stream->level_buf); + + if (qp->state != NULL) + rte_free(qp->state); + + if (qp->processed_pkts != NULL) + rte_ring_free(qp->processed_pkts); + + rte_free(qp); + dev->data->queue_pairs[qp_id] = NULL; + + return 0; +} + +/** Create a ring to place process packets on */ +static struct rte_ring * +isal_comp_pmd_qp_create_processed_pkts_ring(struct isal_comp_qp *qp, + unsigned int ring_size, int socket_id) +{ + struct rte_ring *r; + + r = rte_ring_lookup(qp->name); + if (r) { + if (rte_ring_get_size(r) >= ring_size) { + ISAL_PMD_LOG(DEBUG, + "Reusing existing ring %s for processed packets", + qp->name); + return r; + } + + ISAL_PMD_LOG(ERR, + "Unable to reuse existing ring %s" + " for processed packets", + qp->name); + return NULL; + } + + return rte_ring_create(qp->name, ring_size, socket_id, + RING_F_SP_ENQ | RING_F_SC_DEQ); +} + +/** set a unique name for the queue pair based on its name, dev_id and qp_id */ +static int +isal_comp_pmd_qp_set_unique_name(struct rte_compressdev *dev, +struct isal_comp_qp *qp) +{ + unsigned int n = snprintf(qp->name, sizeof(qp->name), + "isal_compression_pmd_%u_qp_%u", + dev->data->dev_id, qp->id); + + if (n >= sizeof(qp->name)) + return -1; + + return 0; +} + +/* Setup a queue pair */ +static int +isal_comp_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, + uint32_t max_inflight_ops, int socket_id) +{ + struct isal_comp_qp *qp = NULL; + int retval; + + /* Free memory prior to re-allocation if needed. */ + if (dev->data->queue_pairs[qp_id] != NULL) + isal_comp_pmd_qp_release(dev, qp_id); + + /* Allocate the queue pair data structure. */ + qp = rte_zmalloc_socket("Isa-l compression PMD Queue Pair", sizeof(*qp), + RTE_CACHE_LINE_SIZE, socket_id); + if (qp == NULL) { + ISAL_PMD_LOG(ERR, "Failed to allocate queue pair memory"); + return (-ENOMEM); + } + + /* Initialize memory for compression stream structure */ + qp->stream = rte_zmalloc_socket("Isa-l compression stream ", + sizeof(struct isal_zstream), RTE_CACHE_LINE_SIZE, + socket_id); + + /* Initialize memory for compression level buffer */ + qp->stream->level_buf = rte_zmalloc_socket("Isa-l compression lev_buf", + ISAL_DEF_LVL3_DEFAULT, RTE_CACHE_LINE_SIZE, + socket_id); + + /* Initialize memory for decompression state structure */ + qp->state = rte_zmalloc_socket("Isa-l decompression state", + sizeof(struct inflate_state), RTE_CACHE_LINE_SIZE, + socket_id); + + qp->id = qp_id; + dev->data->queue_pairs[qp_id] = qp; + + retval = isal_comp_pmd_qp_set_unique_name(dev, qp); + if (retval) { + ISAL_PMD_LOG(ERR, "Failed to create unique name for isal " + "compression device"); + goto qp_setup_cleanup; + } + + qp->processed_pkts = isal_comp_pmd_qp_create_processed_pkts_ring(qp, + max_inflight_ops, socket_id); + if (qp->processed_pkts == NULL) { + ISAL_PMD_LOG(ERR, "Failed to create unique name for isal " + "compression device"); + goto qp_setup_cleanup; + } + + qp->num_free_elements = rte_ring_free_count(qp->processed_pkts); + + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + return 0; + +qp_setup_cleanup: + if (qp) + rte_free(qp); + + return -1; +} + +/** Set private xform data*/ +static int +isal_comp_pmd_priv_xform_create(struct rte_compressdev *dev, + const struct rte_comp_xform *xform, void **priv_xform) +{ + int ret; + struct isal_comp_private *internals = dev->data->dev_private; + + if (xform == NULL) { + ISAL_PMD_LOG(ERR, "Invalid Xform struct"); + return -EINVAL; + } + + if (rte_mempool_get(internals->priv_xform_mp, priv_xform)) { + ISAL_PMD_LOG(ERR, + "Couldn't get object from private xform mempool"); + return -ENOMEM; + } + + ret = isal_comp_set_priv_xform_parameters(*priv_xform, xform); + if (ret != 0) { + ISAL_PMD_LOG(ERR, "Failed to configure private xform parameters"); + + /* Return private xform to mempool */ + rte_mempool_put(internals->priv_xform_mp, priv_xform); + return ret; + } + return 0; +} + +/** Clear memory of the private xform so it doesn't leave key material behind */ +static int +isal_comp_pmd_priv_xform_free(struct rte_compressdev *dev, void *priv_xform) +{ + struct isal_comp_private *internals = dev->data->dev_private; + + /* Zero out the whole structure */ + if (priv_xform) { + memset(priv_xform, 0, sizeof(struct isal_priv_xform)); + rte_mempool_put(internals->priv_xform_mp, priv_xform); + } + return 0; +} + +struct rte_compressdev_ops isal_pmd_ops = { + .dev_configure = isal_comp_pmd_config, + .dev_start = isal_comp_pmd_start, + .dev_stop = isal_comp_pmd_stop, + .dev_close = isal_comp_pmd_close, + + .stats_get = isal_comp_pmd_stats_get, + .stats_reset = isal_comp_pmd_stats_reset, + + .dev_infos_get = isal_comp_pmd_info_get, + + .queue_pair_setup = isal_comp_pmd_qp_setup, + .queue_pair_release = isal_comp_pmd_qp_release, + + .private_xform_create = isal_comp_pmd_priv_xform_create, + .private_xform_free = isal_comp_pmd_priv_xform_free, +}; + +struct rte_compressdev_ops *isal_compress_pmd_ops = &isal_pmd_ops; diff --git a/drivers/compress/isal/isal_compress_pmd_private.h b/drivers/compress/isal/isal_compress_pmd_private.h new file mode 100644 index 00000000..46e9fcfa --- /dev/null +++ b/drivers/compress/isal/isal_compress_pmd_private.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#ifndef _ISAL_COMP_PMD_PRIVATE_H_ +#define _ISAL_COMP_PMD_PRIVATE_H_ + +#define COMPDEV_NAME_ISAL_PMD compress_isal +/**< ISA-L comp PMD device name */ + +extern int isal_logtype_driver; +#define ISAL_PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, isal_logtype_driver, "%s(): "fmt "\n", \ + __func__, ##args) + +/* private data structure for each ISA-L compression device */ +struct isal_comp_private { + struct rte_mempool *priv_xform_mp; +}; + +/** ISA-L queue pair */ +struct isal_comp_qp { + /* Queue Pair Identifier */ + uint16_t id; + /* Unique Queue Pair Name */ + char name[RTE_COMPRESSDEV_NAME_MAX_LEN]; + /* Ring for placing process packets */ + struct rte_ring *processed_pkts; + /* Queue pair statistics */ + struct rte_compressdev_stats qp_stats; + /* Compression stream information*/ + struct isal_zstream *stream; + /* Decompression state information*/ + struct inflate_state *state; + /* Number of free elements on ring */ + uint16_t num_free_elements; +} __rte_cache_aligned; + +/** ISA-L private xform structure */ +struct isal_priv_xform { + enum rte_comp_xform_type type; + union { + struct rte_comp_compress_xform compress; + struct rte_comp_decompress_xform decompress; + }; + uint32_t level_buffer_size; +} __rte_cache_aligned; + +/** Set and validate NULL comp private xform parameters */ +extern int +isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform, + const struct rte_comp_xform *xform); + +/** device specific operations function pointer structure */ +extern struct rte_compressdev_ops *isal_compress_pmd_ops; + +#endif /* _ISAL_COMP_PMD_PRIVATE_H_ */ diff --git a/drivers/compress/isal/meson.build b/drivers/compress/isal/meson.build new file mode 100644 index 00000000..94c10fd6 --- /dev/null +++ b/drivers/compress/isal/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 Intel Corporation + +dep = dependency('libisal', required: false) +if not dep.found() + build =false +endif + +deps += 'bus_vdev' +sources = files('isal_compress_pmd.c', 'isal_compress_pmd_ops.c') +ext_deps += dep +pkgconfig_extra_libs += '-lisal' + +allow_experimental_apis = true diff --git a/drivers/compress/isal/rte_pmd_isal_version.map b/drivers/compress/isal/rte_pmd_isal_version.map new file mode 100644 index 00000000..de8e412f --- /dev/null +++ b/drivers/compress/isal/rte_pmd_isal_version.map @@ -0,0 +1,3 @@ +DPDK_18.05 { + local: *; +}; diff --git a/drivers/compress/meson.build b/drivers/compress/meson.build new file mode 100644 index 00000000..817ef3be --- /dev/null +++ b/drivers/compress/meson.build @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +drivers = ['isal', 'octeontx', 'qat', 'zlib'] + +std_deps = ['compressdev'] # compressdev pulls in all other needed deps +config_flag_fmt = 'RTE_LIBRTE_@0@_PMD' +driver_name_fmt = 'rte_pmd_@0@' diff --git a/drivers/compress/octeontx/Makefile b/drivers/compress/octeontx/Makefile new file mode 100644 index 00000000..f34424c8 --- /dev/null +++ b/drivers/compress/octeontx/Makefile @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cavium, Inc + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_pmd_octeontx_zip.a + +# library version +LIBABIVER := 1 + +# build flags +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -O3 +CFLAGS += -DALLOW_EXPERIMENTAL_API +CFLAGS += -I$(RTE_SDK)/drivers/compress/octeontx/include + +# external library include paths +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_compressdev +LDLIBS += -lrte_pci -lrte_bus_pci + +# library source files +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF) += otx_zip_pmd.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF) += otx_zip.c + +# versioning export map +EXPORT_MAP := rte_pmd_octeontx_compress_version.map + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/compress/octeontx/include/zip_regs.h b/drivers/compress/octeontx/include/zip_regs.h new file mode 100644 index 00000000..1e74db43 --- /dev/null +++ b/drivers/compress/octeontx/include/zip_regs.h @@ -0,0 +1,711 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#ifndef _RTE_OCTEONTX_ZIP_REGS_H_ +#define _RTE_OCTEONTX_ZIP_REGS_H_ + + +/** + * Enumeration zip_cc + * + * ZIP compression coding Enumeration + * Enumerates ZIP_INST_S[CC]. + */ +enum { + ZIP_CC_DEFAULT = 0, + ZIP_CC_DYN_HUFF, + ZIP_CC_FIXED_HUFF, + ZIP_CC_LZS +} zip_cc; + +/** + * Register (NCB) zip_vq#_ena + * + * ZIP VF Queue Enable Register + * If a queue is disabled, ZIP CTL stops fetching instructions from the queue. + */ +typedef union { + uint64_t u; + struct zip_vqx_ena_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */ + uint64_t reserved_1_63 : 63; + uint64_t ena : 1; +#else /* Word 0 - Little Endian */ + uint64_t ena : 1; + uint64_t reserved_1_63 : 63; +#endif /* Word 0 - End */ + } s; + /* struct zip_vqx_ena_s cn; */ +} zip_vqx_ena_t; + +/** + * Register (NCB) zip_vq#_sbuf_addr + * + * ZIP VF Queue Starting Buffer Address Registers + * These registers set the buffer parameters for the instruction queues. + * When quiescent (i.e. + * outstanding doorbell count is 0), it is safe to rewrite this register + * to effectively reset the + * command buffer state machine. + * These registers must be programmed after software programs the + * corresponding ZIP_QUE()_SBUF_CTL. + */ +typedef union { + uint64_t u; + struct zip_vqx_sbuf_addr_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */ + uint64_t reserved_49_63 : 15; + uint64_t ptr : 42; + uint64_t off : 7; +#else /* Word 0 - Little Endian */ + uint64_t off : 7; + uint64_t ptr : 42; + uint64_t reserved_49_63 : 15; +#endif /* Word 0 - End */ + } s; + /* struct zip_vqx_sbuf_addr_s cn; */ +} zip_vqx_sbuf_addr_t; + +/** + * Register (NCB) zip_que#_doorbell + * + * ZIP Queue Doorbell Registers + * Doorbells for the ZIP instruction queues. + */ +typedef union { + uint64_t u; + struct zip_quex_doorbell_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */ + uint64_t reserved_20_63 : 44; + uint64_t dbell_cnt : 20; +#else /* Word 0 - Little Endian */ + uint64_t dbell_cnt : 20; + uint64_t reserved_20_63 : 44; +#endif /* Word 0 - End */ + } s; + /* struct zip_quex_doorbell_s cn; */ +} zip_quex_doorbell_t; + +/** + * Structure zip_nptr_s + * + * ZIP Instruction Next-Chunk-Buffer Pointer (NPTR) Structure + * This structure is used to chain all the ZIP instruction buffers + * together. ZIP instruction buffers are managed + * (allocated and released) by software. + */ +union zip_nptr_s { + uint64_t u; + struct zip_nptr_s_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */ + uint64_t addr : 64; +#else /* Word 0 - Little Endian */ + uint64_t addr : 64; +#endif /* Word 0 - End */ + } s; + /* struct zip_nptr_s_s cn83xx; */ +}; + +/** + * generic ptr address + */ +union zip_zptr_addr_s { + /** This field can be used to set/clear all bits, or do bitwise + * operations over the entire structure. + */ + uint64_t u; + /** generic ptr address */ + struct { +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */ + uint64_t addr : 64; +#else /* Word 0 - Little Endian */ + uint64_t addr : 64; +#endif /* Word 0 - End */ + } s; +}; + +/** + * generic ptr ctl + */ +union zip_zptr_ctl_s { + /** This field can be used to set/clear all bits, or do bitwise + * operations over the entire structure. + */ + uint64_t u; + /** generic ptr ctl */ + struct { +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */ + uint64_t reserved_112_127 : 16; + uint64_t length : 16; + uint64_t reserved_67_95 : 29; + uint64_t fw : 1; + uint64_t nc : 1; + uint64_t data_be : 1; +#else /* Word 1 - Little Endian */ + uint64_t data_be : 1; + uint64_t nc : 1; + uint64_t fw : 1; + uint64_t reserved_67_95 : 29; + uint64_t length : 16; + uint64_t reserved_112_127 : 16; +#endif /* Word 1 - End */ + } s; + +}; + +/** + * Structure zip_inst_s + * + * ZIP Instruction Structure + * Each ZIP instruction has 16 words (they are called IWORD0 to IWORD15 + * within the structure). + */ +union zip_inst_s { + /** This field can be used to set/clear all bits, or do bitwise + * operations over the entire structure. + */ + uint64_t u[16]; + /** ZIP Instruction Structure */ + struct zip_inst_s_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */ + /** Done interrupt */ + uint64_t doneint : 1; + /** reserved */ + uint64_t reserved_56_62 : 7; + /** Total output length */ + uint64_t totaloutputlength : 24; + /** reserved */ + uint64_t reserved_27_31 : 5; + /** EXNUM */ + uint64_t exn : 3; + /** HASH IV */ + uint64_t iv : 1; + /** EXBITS */ + uint64_t exbits : 7; + /** Hash more-in-file */ + uint64_t hmif : 1; + /** Hash Algorithm and enable */ + uint64_t halg : 3; + /** Sync flush*/ + uint64_t sf : 1; + /** Compression speed/storage */ + uint64_t ss : 2; + /** Compression coding */ + uint64_t cc : 2; + /** End of input data */ + uint64_t ef : 1; + /** Beginning of file */ + uint64_t bf : 1; + // uint64_t reserved_3_4 : 2; + /** Comp/decomp operation */ + uint64_t op : 2; + /** Data sactter */ + uint64_t ds : 1; + /** Data gather */ + uint64_t dg : 1; + /** History gather */ + uint64_t hg : 1; +#else /* Word 0 - Little Endian */ + uint64_t hg : 1; + uint64_t dg : 1; + uint64_t ds : 1; + //uint64_t reserved_3_4 : 2; + uint64_t op : 2; + uint64_t bf : 1; + uint64_t ef : 1; + uint64_t cc : 2; + uint64_t ss : 2; + uint64_t sf : 1; + uint64_t halg : 3; + uint64_t hmif : 1; + uint64_t exbits : 7; + uint64_t iv : 1; + uint64_t exn : 3; + uint64_t reserved_27_31 : 5; + uint64_t totaloutputlength : 24; + uint64_t reserved_56_62 : 7; + uint64_t doneint : 1; + +#endif /* Word 0 - End */ + +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */ + /** History length */ + uint64_t historylength : 16; + /** reserved */ + uint64_t reserved_96_111 : 16; + /** adler/crc32 checksum*/ + uint64_t adlercrc32 : 32; +#else /* Word 1 - Little Endian */ + uint64_t adlercrc32 : 32; + uint64_t reserved_96_111 : 16; + uint64_t historylength : 16; +#endif /* Word 1 - End */ + +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 2 - Big Endian */ + /** Decompression Context Pointer Address */ + union zip_zptr_addr_s ctx_ptr_addr; +#else /* Word 2 - Little Endian */ + union zip_zptr_addr_s ctx_ptr_addr; +#endif /* Word 2 - End */ + +#if defined(__BIG_ENDIAN_BITFIELD) + /** Decompression Context Pointer Control */ + union zip_zptr_ctl_s ctx_ptr_ctl; +#else /* Word 3 - Little Endian */ + union zip_zptr_ctl_s ctx_ptr_ctl; +#endif /* Word 3 - End */ + +#if defined(__BIG_ENDIAN_BITFIELD) + /** Decompression history pointer address */ + union zip_zptr_addr_s his_ptr_addr; +#else /* Word 4 - Little Endian */ + union zip_zptr_addr_s his_ptr_addr; +#endif /* Word 4 - End */ + +#if defined(__BIG_ENDIAN_BITFIELD) + /** Decompression history pointer control */ + union zip_zptr_ctl_s his_ptr_ctl; +#else /* Word 5 - Little Endian */ + union zip_zptr_ctl_s his_ptr_ctl; +#endif /* Word 5 - End */ + +#if defined(__BIG_ENDIAN_BITFIELD) + /** Input and compression history pointer address */ + union zip_zptr_addr_s inp_ptr_addr; +#else /* Word 6 - Little Endian */ + union zip_zptr_addr_s inp_ptr_addr; +#endif /* Word 6 - End */ + +#if defined(__BIG_ENDIAN_BITFIELD) + /** Input and compression history pointer control */ + union zip_zptr_ctl_s inp_ptr_ctl; +#else /* Word 7 - Little Endian */ + union zip_zptr_ctl_s inp_ptr_ctl; +#endif /* Word 7 - End */ + +#if defined(__BIG_ENDIAN_BITFIELD) + /** Output pointer address */ + union zip_zptr_addr_s out_ptr_addr; +#else /* Word 8 - Little Endian */ + union zip_zptr_addr_s out_ptr_addr; +#endif /* Word 8 - End */ + +#if defined(__BIG_ENDIAN_BITFIELD) + /** Output pointer control */ + union zip_zptr_ctl_s out_ptr_ctl; +#else /* Word 9 - Little Endian */ + union zip_zptr_ctl_s out_ptr_ctl; +#endif /* Word 9 - End */ + +#if defined(__BIG_ENDIAN_BITFIELD) + /** Result pointer address */ + union zip_zptr_addr_s res_ptr_addr; +#else /* Word 10 - Little Endian */ + union zip_zptr_addr_s res_ptr_addr; +#endif /* Word 10 - End */ + +#if defined(__BIG_ENDIAN_BITFIELD) + /** Result pointer control */ + union zip_zptr_ctl_s res_ptr_ctl; +#else /* Word 11 - Little Endian */ + union zip_zptr_ctl_s res_ptr_ctl; +#endif /* Word 11 - End */ + +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 12 - Big Endian */ + /** reserved */ + uint64_t reserved_812_831 : 20; + /** SSO guest group */ + uint64_t ggrp : 10; + /** SSO tag type */ + uint64_t tt : 2; + /** SSO tag */ + uint64_t tag : 32; +#else /* Word 12 - Little Endian */ + uint64_t tag : 32; + uint64_t tt : 2; + uint64_t ggrp : 10; + uint64_t reserved_812_831 : 20; +#endif /* Word 12 - End */ + +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 13 - Big Endian */ + /** Work queue entry pointer */ + uint64_t wq_ptr : 64; +#else /* Word 13 - Little Endian */ + uint64_t wq_ptr : 64; +#endif /* Word 13 - End */ + +#if defined(__BIG_ENDIAN_BITFIELD) + /** reserved */ + uint64_t reserved_896_959 : 64; +#else /* Word 14 - Little Endian */ + uint64_t reserved_896_959 : 64; +#endif /* Word 14 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) + /** Hash structure pointer */ + uint64_t hash_ptr : 64; +#else /* Word 15 - Little Endian */ + uint64_t hash_ptr : 64; +#endif /* Word 15 - End */ + } /** ZIP 88xx Instruction Structure */zip88xx; + + /** ZIP Instruction Structure */ + struct zip_inst_s_cn83xx { +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */ + /** Done interrupt */ + uint64_t doneint : 1; + /** reserved */ + uint64_t reserved_56_62 : 7; + /** Total output length */ + uint64_t totaloutputlength : 24; + /** reserved */ + uint64_t reserved_27_31 : 5; + /** EXNUM */ + uint64_t exn : 3; + /** HASH IV */ + uint64_t iv : 1; + /** EXBITS */ + uint64_t exbits : 7; + /** Hash more-in-file */ + uint64_t hmif : 1; + /** Hash Algorithm and enable */ + uint64_t halg : 3; + /** Sync flush*/ + uint64_t sf : 1; + /** Compression speed/storage */ + uint64_t ss : 2; + /** Compression coding */ + uint64_t cc : 2; + /** End of input data */ + uint64_t ef : 1; + /** Beginning of file */ + uint64_t bf : 1; + /** Comp/decomp operation */ + uint64_t op : 2; + /** Data sactter */ + uint64_t ds : 1; + /** Data gather */ + uint64_t dg : 1; + /** History gather */ + uint64_t hg : 1; +#else /* Word 0 - Little Endian */ + uint64_t hg : 1; + uint64_t dg : 1; + uint64_t ds : 1; + uint64_t op : 2; + uint64_t bf : 1; + uint64_t ef : 1; + uint64_t cc : 2; + uint64_t ss : 2; + uint64_t sf : 1; + uint64_t halg : 3; + uint64_t hmif : 1; + uint64_t exbits : 7; + uint64_t iv : 1; + uint64_t exn : 3; + uint64_t reserved_27_31 : 5; + uint64_t totaloutputlength : 24; + uint64_t reserved_56_62 : 7; + uint64_t doneint : 1; +#endif /* Word 0 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */ + /** History length */ + uint64_t historylength : 16; + /** reserved */ + uint64_t reserved_96_111 : 16; + /** adler/crc32 checksum*/ + uint64_t adlercrc32 : 32; +#else /* Word 1 - Little Endian */ + uint64_t adlercrc32 : 32; + uint64_t reserved_96_111 : 16; + uint64_t historylength : 16; +#endif /* Word 1 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 2 - Big Endian */ + /** Decompression Context Pointer Address */ + union zip_zptr_addr_s ctx_ptr_addr; +#else /* Word 2 - Little Endian */ + union zip_zptr_addr_s ctx_ptr_addr; +#endif /* Word 2 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 3 - Big Endian */ + /** Decompression Context Pointer Control */ + union zip_zptr_ctl_s ctx_ptr_ctl; +#else /* Word 3 - Little Endian */ + union zip_zptr_ctl_s ctx_ptr_ctl; +#endif /* Word 3 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 4 - Big Endian */ + /** Decompression history pointer address */ + union zip_zptr_addr_s his_ptr_addr; +#else /* Word 4 - Little Endian */ + union zip_zptr_addr_s his_ptr_addr; +#endif /* Word 4 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 5 - Big Endian */ + /** Decompression history pointer control */ + union zip_zptr_ctl_s his_ptr_ctl; +#else /* Word 5 - Little Endian */ + union zip_zptr_ctl_s his_ptr_ctl; +#endif /* Word 5 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 6 - Big Endian */ + /** Input and compression history pointer address */ + union zip_zptr_addr_s inp_ptr_addr; +#else /* Word 6 - Little Endian */ + union zip_zptr_addr_s inp_ptr_addr; +#endif /* Word 6 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 7 - Big Endian */ + /** Input and compression history pointer control */ + union zip_zptr_ctl_s inp_ptr_ctl; +#else /* Word 7 - Little Endian */ + union zip_zptr_ctl_s inp_ptr_ctl; +#endif /* Word 7 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 8 - Big Endian */ + /** Output pointer address */ + union zip_zptr_addr_s out_ptr_addr; +#else /* Word 8 - Little Endian */ + union zip_zptr_addr_s out_ptr_addr; +#endif /* Word 8 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 9 - Big Endian */ + /** Output pointer control */ + union zip_zptr_ctl_s out_ptr_ctl; +#else /* Word 9 - Little Endian */ + union zip_zptr_ctl_s out_ptr_ctl; +#endif /* Word 9 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 10 - Big Endian */ + /** Result pointer address */ + union zip_zptr_addr_s res_ptr_addr; +#else /* Word 10 - Little Endian */ + union zip_zptr_addr_s res_ptr_addr; +#endif /* Word 10 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 11 - Big Endian */ + /** Result pointer control */ + union zip_zptr_ctl_s res_ptr_ctl; +#else /* Word 11 - Little Endian */ + union zip_zptr_ctl_s res_ptr_ctl; +#endif /* Word 11 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 12 - Big Endian */ + /** reserved */ + uint64_t reserved_812_831 : 20; + /** SSO guest group */ + uint64_t ggrp : 10; + /** SSO tag type */ + uint64_t tt : 2; + /** SSO tag */ + uint64_t tag : 32; +#else /* Word 12 - Little Endian */ + uint64_t tag : 32; + uint64_t tt : 2; + uint64_t ggrp : 10; + uint64_t reserved_812_831 : 20; +#endif /* Word 12 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 13 - Big Endian */ + /** Work queue entry pointer */ + uint64_t wq_ptr : 64; +#else /* Word 13 - Little Endian */ + uint64_t wq_ptr : 64; +#endif /* Word 13 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 14 - Big Endian */ + /** reserved */ + uint64_t reserved_896_959 : 64; +#else /* Word 14 - Little Endian */ + uint64_t reserved_896_959 : 64; +#endif /* Word 14 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 15 - Big Endian */ + /** Hash structure pointer */ + uint64_t hash_ptr : 64; +#else /* Word 15 - Little Endian */ + uint64_t hash_ptr : 64; +#endif /* Word 15 - End */ + } /** ZIP 83xx Instruction Structure */s; +}; + +/** + * Structure zip_zres_s + * + * ZIP Result Structure + * The ZIP coprocessor writes the result structure after it completes the + * invocation. The result structure is exactly 24 bytes, and each invocation + * of the ZIP coprocessor produces exactly one result structure. + */ +union zip_zres_s { + /** This field can be used to set/clear all bits, or do bitwise + * operations over the entire structure. + */ + uint64_t u[8]; + /** ZIP Result Structure */ + struct zip_zres_s_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */ + /** crc32 checksum of uncompressed stream */ + uint64_t crc32 : 32; + /** adler32 checksum of uncompressed stream*/ + uint64_t adler32 : 32; +#else /* Word 0 - Little Endian */ + uint64_t adler32 : 32; + uint64_t crc32 : 32; +#endif /* Word 0 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */ + /** Total numer of Bytes produced in output stream */ + uint64_t totalbyteswritten : 32; + /** Total number of bytes processed from the input stream */ + uint64_t totalbytesread : 32; +#else /* Word 1 - Little Endian */ + uint64_t totalbytesread : 32; + uint64_t totalbyteswritten : 32; +#endif /* Word 1 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 2 - Big Endian */ + /** Total number of compressed input bits + * consumed to decompress all blocks in the file + */ + uint64_t totalbitsprocessed : 32; + /** Done interrupt*/ + uint64_t doneint : 1; + /** reserved */ + uint64_t reserved_155_158 : 4; + /** EXNUM */ + uint64_t exn : 3; + /** reserved */ + uint64_t reserved_151 : 1; + /** EXBITS */ + uint64_t exbits : 7; + /** reserved */ + uint64_t reserved_137_143 : 7; + /** End of file */ + uint64_t ef : 1; + /** Completion/error code */ + uint64_t compcode : 8; +#else /* Word 2 - Little Endian */ + uint64_t compcode : 8; + uint64_t ef : 1; + uint64_t reserved_137_143 : 7; + uint64_t exbits : 7; + uint64_t reserved_151 : 1; + uint64_t exn : 3; + uint64_t reserved_155_158 : 4; + uint64_t doneint : 1; + uint64_t totalbitsprocessed : 32; +#endif /* Word 2 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 3 - Big Endian */ + /** reserved */ + uint64_t reserved_253_255 : 3; + /** Hash length in bytes */ + uint64_t hshlen : 61; +#else /* Word 3 - Little Endian */ + uint64_t hshlen : 61; + uint64_t reserved_253_255 : 3; +#endif /* Word 3 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 4 - Big Endian */ + /** Double-word 0 of computed hash */ + uint64_t hash0 : 64; +#else /* Word 4 - Little Endian */ + uint64_t hash0 : 64; +#endif /* Word 4 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 5 - Big Endian */ + /** Double-word 1 of computed hash */ + uint64_t hash1 : 64; +#else /* Word 5 - Little Endian */ + uint64_t hash1 : 64; +#endif /* Word 5 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 6 - Big Endian */ + /** Double-word 2 of computed hash */ + uint64_t hash2 : 64; +#else /* Word 6 - Little Endian */ + uint64_t hash2 : 64; +#endif /* Word 6 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 7 - Big Endian */ + /** Double-word 3 of computed hash */ + uint64_t hash3 : 64; +#else /* Word 7 - Little Endian */ + uint64_t hash3 : 64; +#endif /* Word 7 - End */ + } /** ZIP Result Structure */s; + + /* struct zip_zres_s_s cn83xx; */ +}; + +/** + * Structure zip_zptr_s + * + * ZIP Generic Pointer Structure + * This structure is the generic format of pointers in ZIP_INST_S. + */ +union zip_zptr_s { + /** This field can be used to set/clear all bits, or do bitwise + * operations over the entire structure. + */ + uint64_t u[2]; + /** ZIP Generic Pointer Structure */ + struct zip_zptr_s_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */ + /** Pointer to Data or scatter-gather list */ + uint64_t addr : 64; +#else /* Word 0 - Little Endian */ + uint64_t addr : 64; +#endif /* Word 0 - End */ +#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */ + /** reserved */ + uint64_t reserved_112_127 : 16; + /** Length of Data or scatter-gather list*/ + uint64_t length : 16; + /** reserved */ + uint64_t reserved_67_95 : 29; + /** Full-block write */ + uint64_t fw : 1; + /** No cache allocation */ + uint64_t nc : 1; + /** reserved */ + uint64_t data_be : 1; +#else /* Word 1 - Little Endian */ + uint64_t data_be : 1; + uint64_t nc : 1; + uint64_t fw : 1; + uint64_t reserved_67_95 : 29; + uint64_t length : 16; + uint64_t reserved_112_127 : 16; +#endif /* Word 1 - End */ + } /** ZIP Generic Pointer Structure */s; +}; + +/** + * Enumeration zip_comp_e + * + * ZIP Completion Enumeration + * Enumerates the values of ZIP_ZRES_S[COMPCODE]. + */ +#define ZIP_COMP_E_NOTDONE (0) +#define ZIP_COMP_E_SUCCESS (1) +#define ZIP_COMP_E_DTRUNC (2) +#define ZIP_COMP_E_DSTOP (3) +#define ZIP_COMP_E_ITRUNC (4) +#define ZIP_COMP_E_RBLOCK (5) +#define ZIP_COMP_E_NLEN (6) +#define ZIP_COMP_E_BADCODE (7) +#define ZIP_COMP_E_BADCODE2 (8) +#define ZIP_COMP_E_ZERO_LEN (9) +#define ZIP_COMP_E_PARITY (0xa) +#define ZIP_COMP_E_FATAL (0xb) +#define ZIP_COMP_E_TIMEOUT (0xc) +#define ZIP_COMP_E_INSTR_ERR (0xd) +#define ZIP_COMP_E_HCTX_ERR (0xe) +#define ZIP_COMP_E_STOP (3) + +/** + * Enumeration zip_op_e + * + * ZIP Operation Enumeration + * Enumerates ZIP_INST_S[OP]. + * Internal: + */ +#define ZIP_OP_E_DECOMP (0) +#define ZIP_OP_E_NOCOMP (1) +#define ZIP_OP_E_COMP (2) + +/** + * Enumeration zip compression levels + * + * ZIP Compression Level Enumeration + * Enumerates ZIP_INST_S[SS]. + * Internal: + */ +#define ZIP_COMP_E_LEVEL_MAX (0) +#define ZIP_COMP_E_LEVEL_MED (1) +#define ZIP_COMP_E_LEVEL_LOW (2) +#define ZIP_COMP_E_LEVEL_MIN (3) + +#endif /* _RTE_ZIP_REGS_H_ */ diff --git a/drivers/compress/octeontx/meson.build b/drivers/compress/octeontx/meson.build new file mode 100644 index 00000000..7cd202d0 --- /dev/null +++ b/drivers/compress/octeontx/meson.build @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cavium, Inc + +name = 'octeontx_compress' +sources = files('otx_zip.c', 'otx_zip_pmd.c') +allow_experimental_apis = true +includes += include_directories('include') +deps += ['mempool_octeontx', 'bus_pci'] +ext_deps += dep diff --git a/drivers/compress/octeontx/otx_zip.c b/drivers/compress/octeontx/otx_zip.c new file mode 100644 index 00000000..a9046ff3 --- /dev/null +++ b/drivers/compress/octeontx/otx_zip.c @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#include "otx_zip.h" + +uint64_t +zip_reg_read64(uint8_t *hw_addr, uint64_t offset) +{ + uint8_t *base = hw_addr; + return *(volatile uint64_t *)(base + offset); +} + +void +zip_reg_write64(uint8_t *hw_addr, uint64_t offset, uint64_t val) +{ + uint8_t *base = hw_addr; + *(uint64_t *)(base + offset) = val; +} + +static void +zip_q_enable(struct zipvf_qp *qp) +{ + zip_vqx_ena_t que_ena; + + /*ZIP VFx command queue init*/ + que_ena.u = 0ull; + que_ena.s.ena = 1; + + zip_reg_write64(qp->vf->vbar0, ZIP_VQ_ENA, que_ena.u); + rte_wmb(); +} + +/* initialize given qp on zip device */ +int +zipvf_q_init(struct zipvf_qp *qp) +{ + zip_vqx_sbuf_addr_t que_sbuf_addr; + + uint64_t size; + void *cmdq_addr; + uint64_t iova; + struct zipvf_cmdq *cmdq = &qp->cmdq; + struct zip_vf *vf = qp->vf; + + /* allocate and setup instruction queue */ + size = ZIP_MAX_CMDQ_SIZE; + size = ZIP_ALIGN_ROUNDUP(size, ZIP_CMDQ_ALIGN); + + cmdq_addr = rte_zmalloc(qp->name, size, ZIP_CMDQ_ALIGN); + if (cmdq_addr == NULL) + return -1; + + cmdq->sw_head = (uint64_t *)cmdq_addr; + cmdq->va = (uint8_t *)cmdq_addr; + iova = rte_mem_virt2iova(cmdq_addr); + + cmdq->iova = iova; + + que_sbuf_addr.u = 0ull; + que_sbuf_addr.s.ptr = (cmdq->iova >> 7); + zip_reg_write64(vf->vbar0, ZIP_VQ_SBUF_ADDR, que_sbuf_addr.u); + + zip_q_enable(qp); + + memset(cmdq->va, 0, ZIP_MAX_CMDQ_SIZE); + rte_spinlock_init(&cmdq->qlock); + + return 0; +} + +int +zipvf_q_term(struct zipvf_qp *qp) +{ + struct zipvf_cmdq *cmdq = &qp->cmdq; + zip_vqx_ena_t que_ena; + struct zip_vf *vf = qp->vf; + + if (cmdq->va != NULL) { + memset(cmdq->va, 0, ZIP_MAX_CMDQ_SIZE); + rte_free(cmdq->va); + } + + /*Disabling the ZIP queue*/ + que_ena.u = 0ull; + zip_reg_write64(vf->vbar0, ZIP_VQ_ENA, que_ena.u); + + return 0; +} + +void +zipvf_push_command(struct zipvf_qp *qp, union zip_inst_s *cmd) +{ + zip_quex_doorbell_t dbell; + union zip_nptr_s ncp; + uint64_t *ncb_ptr; + struct zipvf_cmdq *cmdq = &qp->cmdq; + void *reg_base = qp->vf->vbar0; + + /*Held queue lock*/ + rte_spinlock_lock(&(cmdq->qlock)); + + /* Check space availability in zip cmd queue */ + if ((((cmdq->sw_head - (uint64_t *)cmdq->va) * sizeof(uint64_t *)) + + ZIP_CMD_SIZE) == (ZIP_MAX_CMDQ_SIZE - ZIP_MAX_NCBP_SIZE)) { + /*Last buffer of the command queue*/ + memcpy((uint8_t *)cmdq->sw_head, + (uint8_t *)cmd, + sizeof(union zip_inst_s)); + /* move pointer to next loc in unit of 64-bit word */ + cmdq->sw_head += ZIP_CMD_SIZE_WORDS; + + /* now, point the "Next-Chunk Buffer Ptr" to sw_head */ + ncb_ptr = cmdq->sw_head; + /* Pointing head again to cmdqueue base*/ + cmdq->sw_head = (uint64_t *)cmdq->va; + + ncp.u = 0ull; + ncp.s.addr = cmdq->iova; + *ncb_ptr = ncp.u; + } else { + /*Enough buffers available in the command queue*/ + memcpy((uint8_t *)cmdq->sw_head, + (uint8_t *)cmd, + sizeof(union zip_inst_s)); + cmdq->sw_head += ZIP_CMD_SIZE_WORDS; + } + + rte_wmb(); + + /* Ringing ZIP VF doorbell */ + dbell.u = 0ull; + dbell.s.dbell_cnt = 1; + zip_reg_write64(reg_base, ZIP_VQ_DOORBELL, dbell.u); + + rte_spinlock_unlock(&(cmdq->qlock)); +} + +int +zipvf_create(struct rte_compressdev *compressdev) +{ + struct rte_pci_device *pdev = RTE_DEV_TO_PCI(compressdev->device); + struct zip_vf *zipvf = NULL; + char *dev_name = compressdev->data->name; + void *vbar0; + uint64_t reg; + + if (pdev->mem_resource[0].phys_addr == 0ULL) + return -EIO; + + vbar0 = pdev->mem_resource[0].addr; + if (!vbar0) { + ZIP_PMD_ERR("Failed to map BAR0 of %s", dev_name); + return -ENODEV; + } + + zipvf = (struct zip_vf *)(compressdev->data->dev_private); + + if (!zipvf) + return -ENOMEM; + + zipvf->vbar0 = vbar0; + reg = zip_reg_read64(zipvf->vbar0, ZIP_VF_PF_MBOXX(0)); + /* Storing domain in local to ZIP VF */ + zipvf->dom_sdom = reg; + zipvf->pdev = pdev; + zipvf->max_nb_queue_pairs = ZIP_MAX_VF_QUEUE; + return 0; +} + +int +zipvf_destroy(struct rte_compressdev *compressdev) +{ + struct zip_vf *vf = (struct zip_vf *)(compressdev->data->dev_private); + + /* Rewriting the domain_id in ZIP_VF_MBOX for app rerun */ + zip_reg_write64(vf->vbar0, ZIP_VF_PF_MBOXX(0), vf->dom_sdom); + + return 0; +} diff --git a/drivers/compress/octeontx/otx_zip.h b/drivers/compress/octeontx/otx_zip.h new file mode 100644 index 00000000..99a38d00 --- /dev/null +++ b/drivers/compress/octeontx/otx_zip.h @@ -0,0 +1,277 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#ifndef _RTE_OCTEONTX_ZIP_VF_H_ +#define _RTE_OCTEONTX_ZIP_VF_H_ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +int octtx_zip_logtype_driver; + +/* ZIP VF Control/Status registers (CSRs): */ +/* VF_BAR0: */ +#define ZIP_VQ_ENA (0x10) +#define ZIP_VQ_SBUF_ADDR (0x20) +#define ZIP_VF_PF_MBOXX(x) (0x400 | (x)<<3) +#define ZIP_VQ_DOORBELL (0x1000) + +/**< Vendor ID */ +#define PCI_VENDOR_ID_CAVIUM 0x177D +/**< PCI device id of ZIP VF */ +#define PCI_DEVICE_ID_OCTEONTX_ZIPVF 0xA037 + +/* maxmum number of zip vf devices */ +#define ZIP_MAX_VFS 8 + +/* max size of one chunk */ +#define ZIP_MAX_CHUNK_SIZE 8192 + +/* each instruction is fixed 128 bytes */ +#define ZIP_CMD_SIZE 128 + +#define ZIP_CMD_SIZE_WORDS (ZIP_CMD_SIZE >> 3) /* 16 64_bit words */ + +/* size of next chunk buffer pointer */ +#define ZIP_MAX_NCBP_SIZE 8 + +/* size of instruction queue in units of instruction size */ +#define ZIP_MAX_NUM_CMDS ((ZIP_MAX_CHUNK_SIZE - ZIP_MAX_NCBP_SIZE) / \ + ZIP_CMD_SIZE) /* 63 */ + +/* size of instruct queue in bytes */ +#define ZIP_MAX_CMDQ_SIZE ((ZIP_MAX_NUM_CMDS * ZIP_CMD_SIZE) + \ + ZIP_MAX_NCBP_SIZE)/* ~8072ull */ + +#define ZIP_BUF_SIZE 256 + +#define ZIP_SGPTR_ALIGN 16 +#define ZIP_CMDQ_ALIGN 128 +#define MAX_SG_LEN ((ZIP_BUF_SIZE - ZIP_SGPTR_ALIGN) / sizeof(void *)) + +/**< ZIP PMD specified queue pairs */ +#define ZIP_MAX_VF_QUEUE 1 + +#define ZIP_ALIGN_ROUNDUP(x, _align) \ + ((_align) * (((x) + (_align) - 1) / (_align))) + +/**< ZIP PMD device name */ +#define COMPRESSDEV_NAME_ZIP_PMD compress_octeonx + +#define ZIP_PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, \ + octtx_zip_logtype_driver, "%s(): "fmt "\n", \ + __func__, ##args) + +#define ZIP_PMD_INFO(fmt, args...) \ + ZIP_PMD_LOG(INFO, fmt, ## args) +#define ZIP_PMD_ERR(fmt, args...) \ + ZIP_PMD_LOG(ERR, fmt, ## args) + +/* resources required to process stream */ +enum { + RES_BUF = 0, + CMD_BUF, + HASH_CTX_BUF, + DECOMP_CTX_BUF, + IN_DATA_BUF, + OUT_DATA_BUF, + HISTORY_DATA_BUF, + MAX_BUFS_PER_STREAM +} NUM_BUFS_PER_STREAM; + +struct zip_stream; +struct zipvf_qp; + +/* Algorithm handler function prototype */ +typedef int (*comp_func_t)(struct rte_comp_op *op, + struct zipvf_qp *qp, struct zip_stream *zstrm); + +/** + * ZIP private stream structure + */ +struct zip_stream { + union zip_inst_s *inst; + /* zip instruction pointer */ + comp_func_t func; + /* function to process comp operation */ + void *bufs[MAX_BUFS_PER_STREAM]; +} _rte_cache_aligned; + + +/** + * ZIP instruction Queue + */ +struct zipvf_cmdq { + rte_spinlock_t qlock; + /* queue lock */ + uint64_t *sw_head; + /* pointer to start of 8-byte word length queue-head */ + uint8_t *va; + /* pointer to instruction queue virtual address */ + rte_iova_t iova; + /* iova addr of cmdq head*/ +}; + +/** + * ZIP device queue structure + */ +struct zipvf_qp { + struct zipvf_cmdq cmdq; + /* Hardware instruction queue structure */ + struct rte_ring *processed_pkts; + /* Ring for placing processed packets */ + struct rte_compressdev_stats qp_stats; + /* Queue pair statistics */ + uint16_t id; + /* Queue Pair Identifier */ + const char *name; + /* Unique Queue Pair Name */ + struct zip_vf *vf; + /* pointer to device, queue belongs to */ +} __rte_cache_aligned; + +/** + * ZIP VF device structure. + */ +struct zip_vf { + int vfid; + /* vf index */ + struct rte_pci_device *pdev; + /* pci device */ + void *vbar0; + /* CSR base address for underlying BAR0 VF.*/ + uint64_t dom_sdom; + /* Storing mbox domain and subdomain id for app rerun*/ + uint32_t max_nb_queue_pairs; + /* pointer to device qps */ + struct rte_mempool *zip_mp; + /* pointer to pools */ +} __rte_cache_aligned; + + +static inline void +zipvf_prepare_in_buf(struct zip_stream *zstrm, struct rte_comp_op *op) +{ + uint32_t offset, inlen; + struct rte_mbuf *m_src; + union zip_inst_s *inst = zstrm->inst; + + inlen = op->src.length; + offset = op->src.offset; + m_src = op->m_src; + + /* Prepare direct input data pointer */ + inst->s.dg = 0; + inst->s.inp_ptr_addr.s.addr = + rte_pktmbuf_iova_offset(m_src, offset); + inst->s.inp_ptr_ctl.s.length = inlen; +} + +static inline void +zipvf_prepare_out_buf(struct zip_stream *zstrm, struct rte_comp_op *op) +{ + uint32_t offset; + struct rte_mbuf *m_dst; + union zip_inst_s *inst = zstrm->inst; + + offset = op->dst.offset; + m_dst = op->m_dst; + + /* Prepare direct input data pointer */ + inst->s.ds = 0; + inst->s.out_ptr_addr.s.addr = + rte_pktmbuf_iova_offset(m_dst, offset); + inst->s.totaloutputlength = rte_pktmbuf_pkt_len(m_dst) - + op->dst.offset; + inst->s.out_ptr_ctl.s.length = inst->s.totaloutputlength; +} + +static inline void +zipvf_prepare_cmd_stateless(struct rte_comp_op *op, struct zip_stream *zstrm) +{ + union zip_inst_s *inst = zstrm->inst; + + /* set flush flag to always 1*/ + inst->s.ef = 1; + + if (inst->s.op == ZIP_OP_E_DECOMP) + inst->s.sf = 1; + else + inst->s.sf = 0; + + /* Set input checksum */ + inst->s.adlercrc32 = op->input_chksum; + + /* Prepare gather buffers */ + zipvf_prepare_in_buf(zstrm, op); + zipvf_prepare_out_buf(zstrm, op); +} + +#ifdef ZIP_DBG +static inline void +zip_dump_instruction(void *inst) +{ + union zip_inst_s *cmd83 = (union zip_inst_s *)inst; + printf("####### START ########\n"); + printf("doneint:%d totaloutputlength:%d\n", cmd83->s.doneint, + cmd83->s.totaloutputlength); + printf("exnum:%d iv:%d exbits:%d hmif:%d halg:%d\n", cmd83->s.exn, + cmd83->s.iv, cmd83->s.exbits, cmd83->s.hmif, cmd83->s.halg); + printf("flush:%d speed:%d cc:%d\n", cmd83->s.sf, + cmd83->s.ss, cmd83->s.cc); + printf("eof:%d bof:%d op:%d dscatter:%d dgather:%d hgather:%d\n", + cmd83->s.ef, cmd83->s.bf, cmd83->s.op, cmd83->s.ds, + cmd83->s.dg, cmd83->s.hg); + printf("historylength:%d adler32:%d\n", cmd83->s.historylength, + cmd83->s.adlercrc32); + printf("ctx_ptr.addr:0x%"PRIx64"\n", cmd83->s.ctx_ptr_addr.s.addr); + printf("ctx_ptr.len:%d\n", cmd83->s.ctx_ptr_ctl.s.length); + printf("history_ptr.addr:0x%"PRIx64"\n", cmd83->s.his_ptr_addr.s.addr); + printf("history_ptr.len:%d\n", cmd83->s.his_ptr_ctl.s.length); + printf("inp_ptr.addr:0x%"PRIx64"\n", cmd83->s.inp_ptr_addr.s.addr); + printf("inp_ptr.len:%d\n", cmd83->s.inp_ptr_ctl.s.length); + printf("out_ptr.addr:0x%"PRIx64"\n", cmd83->s.out_ptr_addr.s.addr); + printf("out_ptr.len:%d\n", cmd83->s.out_ptr_ctl.s.length); + printf("result_ptr.len:%d\n", cmd83->s.res_ptr_ctl.s.length); + printf("####### END ########\n"); +} +#endif + +int +zipvf_create(struct rte_compressdev *compressdev); + +int +zipvf_destroy(struct rte_compressdev *compressdev); + +int +zipvf_q_init(struct zipvf_qp *qp); + +int +zipvf_q_term(struct zipvf_qp *qp); + +void +zipvf_push_command(struct zipvf_qp *qp, union zip_inst_s *zcmd); + +int +zip_process_op(struct rte_comp_op *op, + struct zipvf_qp *qp, + struct zip_stream *zstrm); + +uint64_t +zip_reg_read64(uint8_t *hw_addr, uint64_t offset); + +void +zip_reg_write64(uint8_t *hw_addr, uint64_t offset, uint64_t val); + +#endif /* _RTE_ZIP_VF_H_ */ diff --git a/drivers/compress/octeontx/otx_zip_pmd.c b/drivers/compress/octeontx/otx_zip_pmd.c new file mode 100644 index 00000000..9d13f933 --- /dev/null +++ b/drivers/compress/octeontx/otx_zip_pmd.c @@ -0,0 +1,658 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#include + +#include +#include +#include +#include + +#include "otx_zip.h" + +static const struct rte_compressdev_capabilities + octtx_zip_pmd_capabilities[] = { + { .algo = RTE_COMP_ALGO_DEFLATE, + /* Deflate */ + .comp_feature_flags = RTE_COMP_FF_HUFFMAN_FIXED | + RTE_COMP_FF_HUFFMAN_DYNAMIC, + /* Non sharable Priv XFORM and Stateless */ + .window_size = { + .min = 1, + .max = 14, + .increment = 1 + /* size supported 2^1 to 2^14 */ + }, + }, + RTE_COMP_END_OF_CAPABILITIES_LIST() +}; + +/* + * Reset session to default state for next set of stateless operation + */ +static inline void +reset_stream(struct zip_stream *z_stream) +{ + union zip_inst_s *inst = (union zip_inst_s *)(z_stream->inst); + + inst->s.bf = 1; + inst->s.ef = 0; +} + +int +zip_process_op(struct rte_comp_op *op, + struct zipvf_qp *qp, + struct zip_stream *zstrm) +{ + union zip_inst_s *inst = zstrm->inst; + volatile union zip_zres_s *zresult = NULL; + + + if ((op->m_src->nb_segs > 1) || (op->m_dst->nb_segs > 1) || + (op->src.offset > rte_pktmbuf_pkt_len(op->m_src)) || + (op->dst.offset > rte_pktmbuf_pkt_len(op->m_dst))) { + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + ZIP_PMD_ERR("Segmented packet is not supported\n"); + return 0; + } + + zipvf_prepare_cmd_stateless(op, zstrm); + + zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF]; + zresult->s.compcode = 0; + +#ifdef ZIP_DBG + zip_dump_instruction(inst); +#endif + + /* Submit zip command */ + zipvf_push_command(qp, (void *)inst); + + /* Check and Process results in sync mode */ + do { + } while (!zresult->s.compcode); + + if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) { + op->status = RTE_COMP_OP_STATUS_SUCCESS; + } else { + /* FATAL error cannot do anything */ + ZIP_PMD_ERR("operation failed with error code:%d\n", + zresult->s.compcode); + if (zresult->s.compcode == ZIP_COMP_E_DSTOP) + op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; + else + op->status = RTE_COMP_OP_STATUS_ERROR; + } + + ZIP_PMD_INFO("written %d\n", zresult->s.totalbyteswritten); + + /* Update op stats */ + switch (op->status) { + case RTE_COMP_OP_STATUS_SUCCESS: + op->consumed = zresult->s.totalbytesread; + /* Fall-through */ + case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED: + op->produced = zresult->s.totalbyteswritten; + break; + default: + ZIP_PMD_ERR("stats not updated for status:%d\n", + op->status); + break; + } + /* zstream is reset irrespective of result */ + reset_stream(zstrm); + + zresult->s.compcode = ZIP_COMP_E_NOTDONE; + return 0; +} + +/** Parse xform parameters and setup a stream */ +static int +zip_set_stream_parameters(struct rte_compressdev *dev, + const struct rte_comp_xform *xform, + struct zip_stream *z_stream) +{ + int ret; + union zip_inst_s *inst; + struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private; + void *res; + + /* Allocate resources required by a stream */ + ret = rte_mempool_get_bulk(vf->zip_mp, + z_stream->bufs, MAX_BUFS_PER_STREAM); + if (ret < 0) + return -1; + + /* get one command buffer from pool and set up */ + inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF]; + res = z_stream->bufs[RES_BUF]; + + memset(inst->u, 0, sizeof(inst->u)); + + /* set bf for only first ops of stream */ + inst->s.bf = 1; + + if (xform->type == RTE_COMP_COMPRESS) { + inst->s.op = ZIP_OP_E_COMP; + + switch (xform->compress.deflate.huffman) { + case RTE_COMP_HUFFMAN_DEFAULT: + inst->s.cc = ZIP_CC_DEFAULT; + break; + case RTE_COMP_HUFFMAN_FIXED: + inst->s.cc = ZIP_CC_FIXED_HUFF; + break; + case RTE_COMP_HUFFMAN_DYNAMIC: + inst->s.cc = ZIP_CC_DYN_HUFF; + break; + default: + ret = -1; + goto err; + } + + switch (xform->compress.level) { + case RTE_COMP_LEVEL_MIN: + inst->s.ss = ZIP_COMP_E_LEVEL_MIN; + break; + case RTE_COMP_LEVEL_MAX: + inst->s.ss = ZIP_COMP_E_LEVEL_MAX; + break; + case RTE_COMP_LEVEL_NONE: + ZIP_PMD_ERR("Compression level not supported"); + ret = -1; + goto err; + default: + /* for any value between min and max , choose + * PMD default. + */ + inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/ + break; + } + } else if (xform->type == RTE_COMP_DECOMPRESS) { + inst->s.op = ZIP_OP_E_DECOMP; + /* from HRM, + * For DEFLATE decompression, [CC] must be 0x0. + * For decompression, [SS] must be 0x0 + */ + inst->s.cc = 0; + /* Speed bit should not be set for decompression */ + inst->s.ss = 0; + /* decompression context is supported only for STATEFUL + * operations. Currently we support STATELESS ONLY so + * skip setting of ctx pointer + */ + + } else { + ZIP_PMD_ERR("\nxform type not supported"); + ret = -1; + goto err; + } + + inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res); + inst->s.res_ptr_ctl.s.length = 0; + + z_stream->inst = inst; + z_stream->func = zip_process_op; + + return 0; + +err: + rte_mempool_put_bulk(vf->zip_mp, + (void *)&(z_stream->bufs[0]), + MAX_BUFS_PER_STREAM); + + return ret; +} + +/** Configure device */ +static int +zip_pmd_config(struct rte_compressdev *dev, + struct rte_compressdev_config *config) +{ + int nb_streams; + char res_pool[RTE_MEMZONE_NAMESIZE]; + struct zip_vf *vf; + struct rte_mempool *zip_buf_mp; + + if (!config || !dev) + return -EIO; + + vf = (struct zip_vf *)(dev->data->dev_private); + + /* create pool with maximum numbers of resources + * required by streams + */ + + /* use common pool for non-shareable priv_xform and stream */ + nb_streams = config->max_nb_priv_xforms + config->max_nb_streams; + + snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u", + dev->data->dev_id); + + /** TBD Should we use the per core object cache for stream resources */ + zip_buf_mp = rte_mempool_create( + res_pool, + nb_streams * MAX_BUFS_PER_STREAM, + ZIP_BUF_SIZE, + 0, + 0, + NULL, + NULL, + NULL, + NULL, + SOCKET_ID_ANY, + 0); + + if (zip_buf_mp == NULL) { + ZIP_PMD_ERR( + "Failed to create buf mempool octtx_zip_res_pool%u", + dev->data->dev_id); + return -1; + } + + vf->zip_mp = zip_buf_mp; + + return 0; +} + +/** Start device */ +static int +zip_pmd_start(__rte_unused struct rte_compressdev *dev) +{ + return 0; +} + +/** Stop device */ +static void +zip_pmd_stop(__rte_unused struct rte_compressdev *dev) +{ + +} + +/** Close device */ +static int +zip_pmd_close(struct rte_compressdev *dev) +{ + if (dev == NULL) + return -1; + + struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private; + rte_mempool_free(vf->zip_mp); + + return 0; +} + +/** Get device statistics */ +static void +zip_pmd_stats_get(struct rte_compressdev *dev, + struct rte_compressdev_stats *stats) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct zipvf_qp *qp = dev->data->queue_pairs[qp_id]; + + stats->enqueued_count += qp->qp_stats.enqueued_count; + stats->dequeued_count += qp->qp_stats.dequeued_count; + + stats->enqueue_err_count += qp->qp_stats.enqueue_err_count; + stats->dequeue_err_count += qp->qp_stats.dequeue_err_count; + } +} + +/** Reset device statistics */ +static void +zip_pmd_stats_reset(struct rte_compressdev *dev) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct zipvf_qp *qp = dev->data->queue_pairs[qp_id]; + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + } +} + +/** Get device info */ +static void +zip_pmd_info_get(struct rte_compressdev *dev, + struct rte_compressdev_info *dev_info) +{ + struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private; + + if (dev_info != NULL) { + dev_info->driver_name = dev->device->driver->name; + dev_info->feature_flags = dev->feature_flags; + dev_info->capabilities = octtx_zip_pmd_capabilities; + dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs; + } +} + +/** Release queue pair */ +static int +zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id) +{ + struct zipvf_qp *qp = dev->data->queue_pairs[qp_id]; + + if (qp != NULL) { + zipvf_q_term(qp); + + if (qp->processed_pkts) + rte_ring_free(qp->processed_pkts); + + rte_free(qp); + dev->data->queue_pairs[qp_id] = NULL; + } + return 0; +} + +/** Create a ring to place process packets on */ +static struct rte_ring * +zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp, + unsigned int ring_size, int socket_id) +{ + struct rte_ring *r; + + r = rte_ring_lookup(qp->name); + if (r) { + if (rte_ring_get_size(r) >= ring_size) { + ZIP_PMD_INFO("Reusing existing ring %s for processed" + " packets", qp->name); + return r; + } + + ZIP_PMD_ERR("Unable to reuse existing ring %s for processed" + " packets", qp->name); + return NULL; + } + + return rte_ring_create(qp->name, ring_size, socket_id, + RING_F_EXACT_SZ); +} + +/** Setup a queue pair */ +static int +zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, + uint32_t max_inflight_ops, int socket_id) +{ + struct zipvf_qp *qp = NULL; + struct zip_vf *vf; + char *name; + int ret; + + if (!dev) + return -1; + + vf = (struct zip_vf *) (dev->data->dev_private); + + /* Free memory prior to re-allocation if needed. */ + if (dev->data->queue_pairs[qp_id] != NULL) { + ZIP_PMD_INFO("Using existing queue pair %d ", qp_id); + return 0; + } + + name = rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0); + snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, + "zip_pmd_%u_qp_%u", + dev->data->dev_id, qp_id); + + /* Allocate the queue pair data structure. */ + qp = rte_zmalloc_socket(name, sizeof(*qp), + RTE_CACHE_LINE_SIZE, socket_id); + if (qp == NULL) + return (-ENOMEM); + + qp->name = name; + + /* Create completion queue upto max_inflight_ops */ + qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp, + max_inflight_ops, socket_id); + if (qp->processed_pkts == NULL) + goto qp_setup_cleanup; + + qp->id = qp_id; + qp->vf = vf; + + ret = zipvf_q_init(qp); + if (ret < 0) + goto qp_setup_cleanup; + + dev->data->queue_pairs[qp_id] = qp; + + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + return 0; + +qp_setup_cleanup: + if (qp->processed_pkts) + rte_ring_free(qp->processed_pkts); + if (qp) + rte_free(qp); + return -1; +} + +static int +zip_pmd_stream_create(struct rte_compressdev *dev, + const struct rte_comp_xform *xform, void **stream) +{ + int ret; + struct zip_stream *strm = NULL; + + strm = rte_malloc(NULL, + sizeof(struct zip_stream), 0); + + if (strm == NULL) + return (-ENOMEM); + + ret = zip_set_stream_parameters(dev, xform, strm); + if (ret < 0) { + ZIP_PMD_ERR("failed configure xform parameters"); + rte_free(strm); + return ret; + } + *stream = strm; + return 0; +} + +static int +zip_pmd_stream_free(struct rte_compressdev *dev, void *stream) +{ + struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private); + struct zip_stream *z_stream; + + if (stream == NULL) + return 0; + + z_stream = (struct zip_stream *)stream; + + /* Free resources back to pool */ + rte_mempool_put_bulk(vf->zip_mp, + (void *)&(z_stream->bufs[0]), + MAX_BUFS_PER_STREAM); + + /* Zero out the whole structure */ + memset(stream, 0, sizeof(struct zip_stream)); + rte_free(stream); + + return 0; +} + + +static uint16_t +zip_pmd_enqueue_burst_sync(void *queue_pair, + struct rte_comp_op **ops, uint16_t nb_ops) +{ + struct zipvf_qp *qp = queue_pair; + struct rte_comp_op *op; + struct zip_stream *zstrm; + int i, ret = 0; + uint16_t enqd = 0; + + for (i = 0; i < nb_ops; i++) { + op = ops[i]; + + if (op->op_type == RTE_COMP_OP_STATEFUL) { + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + } else { + /* process stateless ops */ + zstrm = (struct zip_stream *)op->private_xform; + if (unlikely(zstrm == NULL)) + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + else + ret = zstrm->func(op, qp, zstrm); + } + + /* Whatever is out of op, put it into completion queue with + * its status + */ + if (!ret) + ret = rte_ring_enqueue(qp->processed_pkts, (void *)op); + + if (unlikely(ret < 0)) { + /* increment count if failed to enqueue op*/ + qp->qp_stats.enqueue_err_count++; + } else { + qp->qp_stats.enqueued_count++; + enqd++; + } + } + return enqd; +} + +static uint16_t +zip_pmd_dequeue_burst_sync(void *queue_pair, + struct rte_comp_op **ops, uint16_t nb_ops) +{ + struct zipvf_qp *qp = queue_pair; + + unsigned int nb_dequeued = 0; + + nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts, + (void **)ops, nb_ops, NULL); + qp->qp_stats.dequeued_count += nb_dequeued; + + return nb_dequeued; +} + +struct rte_compressdev_ops octtx_zip_pmd_ops = { + .dev_configure = zip_pmd_config, + .dev_start = zip_pmd_start, + .dev_stop = zip_pmd_stop, + .dev_close = zip_pmd_close, + + .stats_get = zip_pmd_stats_get, + .stats_reset = zip_pmd_stats_reset, + + .dev_infos_get = zip_pmd_info_get, + + .queue_pair_setup = zip_pmd_qp_setup, + .queue_pair_release = zip_pmd_qp_release, + + .private_xform_create = zip_pmd_stream_create, + .private_xform_free = zip_pmd_stream_free, + .stream_create = NULL, + .stream_free = NULL +}; + +static int +zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + int ret = 0; + char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN]; + struct rte_compressdev *compressdev; + struct rte_compressdev_pmd_init_params init_params = { + "", + rte_socket_id(), + }; + + ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x", + (unsigned int)pci_dev->id.vendor_id, + (unsigned int)pci_dev->id.device_id); + + rte_pci_device_name(&pci_dev->addr, compressdev_name, + sizeof(compressdev_name)); + + compressdev = rte_compressdev_pmd_create(compressdev_name, + &pci_dev->device, sizeof(struct zip_vf), &init_params); + if (compressdev == NULL) { + ZIP_PMD_ERR("driver %s: create failed", init_params.name); + return -ENODEV; + } + + /* + * create only if proc_type is primary. + */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + /* create vf dev with given pmd dev id */ + ret = zipvf_create(compressdev); + if (ret < 0) { + ZIP_PMD_ERR("Device creation failed"); + rte_compressdev_pmd_destroy(compressdev); + return ret; + } + } + + compressdev->dev_ops = &octtx_zip_pmd_ops; + /* register rx/tx burst functions for data path */ + compressdev->dequeue_burst = zip_pmd_dequeue_burst_sync; + compressdev->enqueue_burst = zip_pmd_enqueue_burst_sync; + compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED; + return ret; +} + +static int +zip_pci_remove(struct rte_pci_device *pci_dev) +{ + struct rte_compressdev *compressdev; + char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN]; + + if (pci_dev == NULL) { + ZIP_PMD_ERR(" Invalid PCI Device\n"); + return -EINVAL; + } + rte_pci_device_name(&pci_dev->addr, compressdev_name, + sizeof(compressdev_name)); + + compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name); + if (compressdev == NULL) + return -ENODEV; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (zipvf_destroy(compressdev) < 0) + return -ENODEV; + } + return rte_compressdev_pmd_destroy(compressdev); +} + +static struct rte_pci_id pci_id_octtx_zipvf_table[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_OCTEONTX_ZIPVF), + }, + { + .device_id = 0 + }, +}; + +/** + * Structure that represents a PCI driver + */ +static struct rte_pci_driver octtx_zip_pmd = { + .id_table = pci_id_octtx_zipvf_table, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = zip_pci_probe, + .remove = zip_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd); +RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table); + +RTE_INIT(octtx_zip_init_log); + +static void +octtx_zip_init_log(void) +{ + octtx_zip_logtype_driver = rte_log_register("pmd.compress.octeontx"); + if (octtx_zip_logtype_driver >= 0) + rte_log_set_level(octtx_zip_logtype_driver, RTE_LOG_INFO); +} diff --git a/drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map b/drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map new file mode 100644 index 00000000..ad6e191e --- /dev/null +++ b/drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map @@ -0,0 +1,3 @@ +DPDK_18.08 { + local: *; +}; diff --git a/drivers/compress/qat/meson.build b/drivers/compress/qat/meson.build new file mode 100644 index 00000000..9d15076d --- /dev/null +++ b/drivers/compress/qat/meson.build @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017-2018 Intel Corporation + + +# Add our sources files to the list +allow_experimental_apis = true +qat_sources += files('qat_comp_pmd.c', + 'qat_comp.c') +qat_includes += include_directories('.') +qat_deps += 'compressdev' +qat_ext_deps += dep + +# build the whole driver +sources += qat_sources +cflags += qat_cflags +deps += qat_deps +ext_deps += qat_ext_deps +includes += qat_includes diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c new file mode 100644 index 00000000..38c8a5b8 --- /dev/null +++ b/drivers/compress/qat/qat_comp.c @@ -0,0 +1,393 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qat_logs.h" +#include "qat_comp.h" +#include "qat_comp_pmd.h" + + +int +qat_comp_build_request(void *in_op, uint8_t *out_msg, + void *op_cookie, + enum qat_device_gen qat_dev_gen __rte_unused) +{ + struct rte_comp_op *op = in_op; + struct qat_comp_op_cookie *cookie = + (struct qat_comp_op_cookie *)op_cookie; + struct qat_comp_xform *qat_xform = op->private_xform; + const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl; + struct icp_qat_fw_comp_req *comp_req = + (struct icp_qat_fw_comp_req *)out_msg; + + if (unlikely(op->op_type != RTE_COMP_OP_STATELESS)) { + QAT_DP_LOG(ERR, "QAT PMD only supports stateless compression " + "operation requests, op (%p) is not a " + "stateless operation.", op); + return -EINVAL; + } + + rte_mov128(out_msg, tmpl); + comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op; + + /* common for sgl and flat buffers */ + comp_req->comp_pars.comp_len = op->src.length; + comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) - + op->dst.offset; + + if (op->m_src->next != NULL || op->m_dst->next != NULL) { + /* sgl */ + int ret = 0; + + ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags, + QAT_COMN_PTR_TYPE_SGL); + + ret = qat_sgl_fill_array(op->m_src, + op->src.offset, + &cookie->qat_sgl_src, + op->src.length, + RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS); + if (ret) { + QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array"); + return ret; + } + + ret = qat_sgl_fill_array(op->m_dst, + op->dst.offset, + &cookie->qat_sgl_dst, + comp_req->comp_pars.out_buffer_sz, + RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS); + if (ret) { + QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array"); + return ret; + } + + comp_req->comn_mid.src_data_addr = + cookie->qat_sgl_src_phys_addr; + comp_req->comn_mid.dest_data_addr = + cookie->qat_sgl_dst_phys_addr; + comp_req->comn_mid.src_length = 0; + comp_req->comn_mid.dst_length = 0; + + } else { + /* flat aka linear buffer */ + ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags, + QAT_COMN_PTR_TYPE_FLAT); + comp_req->comn_mid.src_length = op->src.length; + comp_req->comn_mid.dst_length = + comp_req->comp_pars.out_buffer_sz; + + comp_req->comn_mid.src_data_addr = + rte_pktmbuf_mtophys_offset(op->m_src, op->src.offset); + comp_req->comn_mid.dest_data_addr = + rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset); + } + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_LOG(DEBUG, "Direction: %s", + qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ? + "decompression" : "compression"); + QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req, + sizeof(struct icp_qat_fw_comp_req)); +#endif + return 0; +} + +int +qat_comp_process_response(void **op, uint8_t *resp) +{ + struct icp_qat_fw_comp_resp *resp_msg = + (struct icp_qat_fw_comp_resp *)resp; + struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t) + (resp_msg->opaque_data); + struct qat_comp_xform *qat_xform = (struct qat_comp_xform *) + (rx_op->private_xform); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_LOG(DEBUG, "Direction: %s", + qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ? + "decompression" : "compression"); + QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg, + sizeof(struct icp_qat_fw_comp_resp)); +#endif + + if (likely(qat_xform->qat_comp_request_type + != QAT_COMP_REQUEST_DECOMPRESS)) { + if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET( + resp_msg->comn_resp.hdr_flags) + == ICP_QAT_FW_COMP_NO_CNV)) { + rx_op->status = RTE_COMP_OP_STATUS_ERROR; + rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW; + *op = (void *)rx_op; + QAT_DP_LOG(ERR, "QAT has wrong firmware"); + return 0; + } + } + + if ((ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(resp_msg->comn_resp.comn_status) + | ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET( + resp_msg->comn_resp.comn_status)) != + ICP_QAT_FW_COMN_STATUS_FLAG_OK) { + + rx_op->status = RTE_COMP_OP_STATUS_ERROR; + rx_op->debug_status = + *((uint16_t *)(&resp_msg->comn_resp.comn_error)); + } else { + struct qat_comp_xform *qat_xform = rx_op->private_xform; + struct icp_qat_fw_resp_comp_pars *comp_resp = + (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars; + + rx_op->status = RTE_COMP_OP_STATUS_SUCCESS; + rx_op->consumed = comp_resp->input_byte_counter; + rx_op->produced = comp_resp->output_byte_counter; + + if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) { + if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32) + rx_op->output_chksum = comp_resp->curr_crc32; + else if (qat_xform->checksum_type == + RTE_COMP_CHECKSUM_ADLER32) + rx_op->output_chksum = comp_resp->curr_adler_32; + else + rx_op->output_chksum = comp_resp->curr_chksum; + } + } + *op = (void *)rx_op; + + return 0; +} + +unsigned int +qat_comp_xform_size(void) +{ + return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8); +} + +static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header, + enum qat_comp_request_type request) +{ + if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS) + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC; + else if (request == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC; + else if (request == QAT_COMP_REQUEST_DECOMPRESS) + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS; + + header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP; + header->hdr_flags = + ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); + + header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD( + QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT); +} + +static int qat_comp_create_templates(struct qat_comp_xform *qat_xform, + const struct rte_memzone *interm_buff_mz __rte_unused, + const struct rte_comp_xform *xform) +{ + struct icp_qat_fw_comp_req *comp_req; + int comp_level, algo; + uint32_t req_par_flags; + int direction = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS; + + if (unlikely(qat_xform == NULL)) { + QAT_LOG(ERR, "Session was not created for this device"); + return -EINVAL; + } + + if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) { + direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS; + comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1; + req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD( + ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP, + ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_NO_CNV, + ICP_QAT_FW_COMP_NO_CNV_RECOVERY); + + } else { + if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT) + comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8; + else if (xform->compress.level == 1) + comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1; + else if (xform->compress.level == 2) + comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_4; + else if (xform->compress.level == 3) + comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8; + else if (xform->compress.level >= 4 && + xform->compress.level <= 9) + comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_16; + else { + QAT_LOG(ERR, "compression level not supported"); + return -EINVAL; + } + req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD( + ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP, + ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV, + ICP_QAT_FW_COMP_CNV_RECOVERY); + } + + switch (xform->compress.algo) { + case RTE_COMP_ALGO_DEFLATE: + algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE; + break; + case RTE_COMP_ALGO_LZS: + default: + /* RTE_COMP_NULL */ + QAT_LOG(ERR, "compression algorithm not supported"); + return -EINVAL; + } + + comp_req = &qat_xform->qat_comp_req_tmpl; + + /* Initialize header */ + qat_comp_create_req_hdr(&comp_req->comn_hdr, + qat_xform->qat_comp_request_type); + + comp_req->comn_hdr.serv_specif_flags = ICP_QAT_FW_COMP_FLAGS_BUILD( + ICP_QAT_FW_COMP_STATELESS_SESSION, + ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST, + ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST, + ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST, + ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF); + + comp_req->cd_pars.sl.comp_slice_cfg_word[0] = + ICP_QAT_HW_COMPRESSION_CONFIG_BUILD( + direction, + /* In CPM 1.6 only valid mode ! */ + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED, algo, + /* Translate level to depth */ + comp_level, ICP_QAT_HW_COMPRESSION_FILE_TYPE_0); + + comp_req->comp_pars.initial_adler = 1; + comp_req->comp_pars.initial_crc32 = 0; + comp_req->comp_pars.req_par_flags = req_par_flags; + + + if (qat_xform->qat_comp_request_type == + QAT_COMP_REQUEST_FIXED_COMP_STATELESS || + qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) { + ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl, + ICP_QAT_FW_SLICE_DRAM_WR); + ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl, + ICP_QAT_FW_SLICE_COMP); + } else if (qat_xform->qat_comp_request_type == + QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) { + + QAT_LOG(ERR, "Dynamic huffman encoding not supported"); + return -EINVAL; + } + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message template:", comp_req, + sizeof(struct icp_qat_fw_comp_req)); +#endif + return 0; +} + +/** + * Create driver private_xform data. + * + * @param dev + * Compressdev device + * @param xform + * xform data from application + * @param private_xform + * ptr where handle of pmd's private_xform data should be stored + * @return + * - if successful returns 0 + * and valid private_xform handle + * - <0 in error cases + * - Returns -EINVAL if input parameters are invalid. + * - Returns -ENOTSUP if comp device does not support the comp transform. + * - Returns -ENOMEM if the private_xform could not be allocated. + */ +int +qat_comp_private_xform_create(struct rte_compressdev *dev, + const struct rte_comp_xform *xform, + void **private_xform) +{ + struct qat_comp_dev_private *qat = dev->data->dev_private; + + if (unlikely(private_xform == NULL)) { + QAT_LOG(ERR, "QAT: private_xform parameter is NULL"); + return -EINVAL; + } + if (unlikely(qat->xformpool == NULL)) { + QAT_LOG(ERR, "QAT device has no private_xform mempool"); + return -ENOMEM; + } + if (rte_mempool_get(qat->xformpool, private_xform)) { + QAT_LOG(ERR, "Couldn't get object from qat xform mempool"); + return -ENOMEM; + } + + struct qat_comp_xform *qat_xform = + (struct qat_comp_xform *)*private_xform; + + if (xform->type == RTE_COMP_COMPRESS) { + if (xform->compress.deflate.huffman == + RTE_COMP_HUFFMAN_DYNAMIC) { + QAT_LOG(ERR, + "QAT device doesn't support dynamic compression"); + return -ENOTSUP; + } + + if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED || + ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT) + && qat->interm_buff_mz == NULL)) + + qat_xform->qat_comp_request_type = + QAT_COMP_REQUEST_FIXED_COMP_STATELESS; + + + } else { + qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS; + } + + qat_xform->checksum_type = xform->compress.chksum; + + if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform)) { + QAT_LOG(ERR, "QAT: Problem with setting compression"); + return -EINVAL; + } + return 0; +} + +/** + * Free driver private_xform data. + * + * @param dev + * Compressdev device + * @param private_xform + * handle of pmd's private_xform data + * @return + * - 0 if successful + * - <0 in error cases + * - Returns -EINVAL if input parameters are invalid. + */ +int +qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused, + void *private_xform) +{ + struct qat_comp_xform *qat_xform = + (struct qat_comp_xform *)private_xform; + + if (qat_xform) { + memset(qat_xform, 0, qat_comp_xform_size()); + struct rte_mempool *mp = rte_mempool_from_obj(qat_xform); + + rte_mempool_put(mp, qat_xform); + return 0; + } + return -EINVAL; +} diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h new file mode 100644 index 00000000..8d315efb --- /dev/null +++ b/drivers/compress/qat/qat_comp.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ + +#ifndef _QAT_COMP_H_ +#define _QAT_COMP_H_ + +#ifdef RTE_LIBRTE_COMPRESSDEV + +#include +#include + +#include "qat_common.h" +#include "icp_qat_hw.h" +#include "icp_qat_fw_comp.h" +#include "icp_qat_fw_la.h" + +#define ERR_CODE_QAT_COMP_WRONG_FW -99 + +enum qat_comp_request_type { + QAT_COMP_REQUEST_FIXED_COMP_STATELESS, + QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS, + QAT_COMP_REQUEST_DECOMPRESS, + REQ_COMP_END +}; + +struct qat_comp_sgl { + qat_sgl_hdr; + struct qat_flat_buf buffers[RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS]; +} __rte_packed __rte_cache_aligned; + +struct qat_comp_op_cookie { + struct qat_comp_sgl qat_sgl_src; + struct qat_comp_sgl qat_sgl_dst; + phys_addr_t qat_sgl_src_phys_addr; + phys_addr_t qat_sgl_dst_phys_addr; +}; + +struct qat_comp_xform { + struct icp_qat_fw_comp_req qat_comp_req_tmpl; + enum qat_comp_request_type qat_comp_request_type; + enum rte_comp_checksum_type checksum_type; +}; + +int +qat_comp_build_request(void *in_op, uint8_t *out_msg, void *op_cookie, + enum qat_device_gen qat_dev_gen __rte_unused); + +int +qat_comp_process_response(void **op, uint8_t *resp); + + +int +qat_comp_private_xform_create(struct rte_compressdev *dev, + const struct rte_comp_xform *xform, + void **private_xform); + +int +qat_comp_private_xform_free(struct rte_compressdev *dev, void *private_xform); + +unsigned int +qat_comp_xform_size(void); + +#endif +#endif diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c new file mode 100644 index 00000000..b89975fc --- /dev/null +++ b/drivers/compress/qat/qat_comp_pmd.c @@ -0,0 +1,429 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ + +#include "qat_comp.h" +#include "qat_comp_pmd.h" + +static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = { + {/* COMPRESSION - deflate */ + .algo = RTE_COMP_ALGO_DEFLATE, + .comp_feature_flags = RTE_COMP_FF_MULTI_PKT_CHECKSUM | + RTE_COMP_FF_CRC32_CHECKSUM | + RTE_COMP_FF_ADLER32_CHECKSUM | + RTE_COMP_FF_CRC32_ADLER32_CHECKSUM | + RTE_COMP_FF_SHAREABLE_PRIV_XFORM | + RTE_COMP_FF_HUFFMAN_FIXED | + RTE_COMP_FF_OOP_SGL_IN_SGL_OUT | + RTE_COMP_FF_OOP_SGL_IN_LB_OUT | + RTE_COMP_FF_OOP_LB_IN_SGL_OUT, + .window_size = {.min = 15, .max = 15, .increment = 0} }, + {RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } }; + +static void +qat_comp_stats_get(struct rte_compressdev *dev, + struct rte_compressdev_stats *stats) +{ + struct qat_common_stats qat_stats = {0}; + struct qat_comp_dev_private *qat_priv; + + if (stats == NULL || dev == NULL) { + QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev); + return; + } + qat_priv = dev->data->dev_private; + + qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_COMPRESSION); + stats->enqueued_count = qat_stats.enqueued_count; + stats->dequeued_count = qat_stats.dequeued_count; + stats->enqueue_err_count = qat_stats.enqueue_err_count; + stats->dequeue_err_count = qat_stats.dequeue_err_count; +} + +static void +qat_comp_stats_reset(struct rte_compressdev *dev) +{ + struct qat_comp_dev_private *qat_priv; + + if (dev == NULL) { + QAT_LOG(ERR, "invalid compressdev ptr %p", dev); + return; + } + qat_priv = dev->data->dev_private; + + qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_COMPRESSION); + +} + +static int +qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id) +{ + struct qat_comp_dev_private *qat_private = dev->data->dev_private; + + QAT_LOG(DEBUG, "Release comp qp %u on device %d", + queue_pair_id, dev->data->dev_id); + + qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id] + = NULL; + + return qat_qp_release((struct qat_qp **) + &(dev->data->queue_pairs[queue_pair_id])); +} + +static int +qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, + uint32_t max_inflight_ops, int socket_id) +{ + struct qat_qp *qp; + int ret = 0; + uint32_t i; + struct qat_qp_config qat_qp_conf; + + struct qat_qp **qp_addr = + (struct qat_qp **)&(dev->data->queue_pairs[qp_id]); + struct qat_comp_dev_private *qat_private = dev->data->dev_private; + const struct qat_qp_hw_data *comp_hw_qps = + qat_gen_config[qat_private->qat_dev->qat_dev_gen] + .qp_hw_data[QAT_SERVICE_COMPRESSION]; + const struct qat_qp_hw_data *qp_hw_data = comp_hw_qps + qp_id; + + /* If qp is already in use free ring memory and qp metadata. */ + if (*qp_addr != NULL) { + ret = qat_comp_qp_release(dev, qp_id); + if (ret < 0) + return ret; + } + if (qp_id >= qat_qps_per_service(comp_hw_qps, + QAT_SERVICE_COMPRESSION)) { + QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id); + return -EINVAL; + } + + qat_qp_conf.hw = qp_hw_data; + qat_qp_conf.build_request = qat_comp_build_request; + qat_qp_conf.cookie_size = sizeof(struct qat_comp_op_cookie); + qat_qp_conf.nb_descriptors = max_inflight_ops; + qat_qp_conf.socket_id = socket_id; + qat_qp_conf.service_str = "comp"; + + ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf); + if (ret != 0) + return ret; + + /* store a link to the qp in the qat_pci_device */ + qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id] + = *qp_addr; + + qp = (struct qat_qp *)*qp_addr; + + for (i = 0; i < qp->nb_descriptors; i++) { + + struct qat_comp_op_cookie *cookie = + qp->op_cookies[i]; + + cookie->qat_sgl_src_phys_addr = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_comp_op_cookie, + qat_sgl_src); + + cookie->qat_sgl_dst_phys_addr = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_comp_op_cookie, + qat_sgl_dst); + } + + return ret; +} + +static struct rte_mempool * +qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev, + uint32_t num_elements) +{ + char xform_pool_name[RTE_MEMPOOL_NAMESIZE]; + struct rte_mempool *mp; + + snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE, + "%s_xforms", comp_dev->qat_dev->name); + + QAT_LOG(DEBUG, "xformpool: %s", xform_pool_name); + mp = rte_mempool_lookup(xform_pool_name); + + if (mp != NULL) { + QAT_LOG(DEBUG, "xformpool already created"); + if (mp->size != num_elements) { + QAT_LOG(DEBUG, "xformpool wrong size - delete it"); + rte_mempool_free(mp); + mp = NULL; + comp_dev->xformpool = NULL; + } + } + + if (mp == NULL) + mp = rte_mempool_create(xform_pool_name, + num_elements, + qat_comp_xform_size(), 0, 0, + NULL, NULL, NULL, NULL, rte_socket_id(), + 0); + if (mp == NULL) { + QAT_LOG(ERR, "Err creating mempool %s w %d elements of size %d", + xform_pool_name, num_elements, qat_comp_xform_size()); + return NULL; + } + + return mp; +} + +static void +_qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev) +{ + /* Free private_xform pool */ + if (comp_dev->xformpool) { + /* Free internal mempool for private xforms */ + rte_mempool_free(comp_dev->xformpool); + comp_dev->xformpool = NULL; + } +} + +static int +qat_comp_dev_config(struct rte_compressdev *dev, + struct rte_compressdev_config *config) +{ + struct qat_comp_dev_private *comp_dev = dev->data->dev_private; + int ret = 0; + + if (config->max_nb_streams != 0) { + QAT_LOG(ERR, + "QAT device does not support STATEFUL so max_nb_streams must be 0"); + return -EINVAL; + } + + comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev, + config->max_nb_priv_xforms); + if (comp_dev->xformpool == NULL) { + + ret = -ENOMEM; + goto error_out; + } + return 0; + +error_out: + _qat_comp_dev_config_clear(comp_dev); + return ret; +} + +static int +qat_comp_dev_start(struct rte_compressdev *dev __rte_unused) +{ + return 0; +} + +static void +qat_comp_dev_stop(struct rte_compressdev *dev __rte_unused) +{ + +} + +static int +qat_comp_dev_close(struct rte_compressdev *dev) +{ + int i; + int ret = 0; + struct qat_comp_dev_private *comp_dev = dev->data->dev_private; + + for (i = 0; i < dev->data->nb_queue_pairs; i++) { + ret = qat_comp_qp_release(dev, i); + if (ret < 0) + return ret; + } + + _qat_comp_dev_config_clear(comp_dev); + + return ret; +} + + +static void +qat_comp_dev_info_get(struct rte_compressdev *dev, + struct rte_compressdev_info *info) +{ + struct qat_comp_dev_private *comp_dev = dev->data->dev_private; + const struct qat_qp_hw_data *comp_hw_qps = + qat_gen_config[comp_dev->qat_dev->qat_dev_gen] + .qp_hw_data[QAT_SERVICE_COMPRESSION]; + + if (info != NULL) { + info->max_nb_queue_pairs = + qat_qps_per_service(comp_hw_qps, + QAT_SERVICE_COMPRESSION); + info->feature_flags = dev->feature_flags; + info->capabilities = comp_dev->qat_dev_capabilities; + } +} + +static uint16_t +qat_comp_pmd_enqueue_op_burst(void *qp, struct rte_comp_op **ops, + uint16_t nb_ops) +{ + return qat_enqueue_op_burst(qp, (void **)ops, nb_ops); +} + +static uint16_t +qat_comp_pmd_dequeue_op_burst(void *qp, struct rte_comp_op **ops, + uint16_t nb_ops) +{ + return qat_dequeue_op_burst(qp, (void **)ops, nb_ops); +} + +static uint16_t +qat_comp_pmd_enq_deq_dummy_op_burst(void *qp __rte_unused, + struct rte_comp_op **ops __rte_unused, + uint16_t nb_ops __rte_unused) +{ + QAT_DP_LOG(ERR, "QAT PMD detected wrong FW version !"); + return 0; +} + +static struct rte_compressdev_ops compress_qat_dummy_ops = { + + /* Device related operations */ + .dev_configure = NULL, + .dev_start = NULL, + .dev_stop = qat_comp_dev_stop, + .dev_close = qat_comp_dev_close, + .dev_infos_get = NULL, + + .stats_get = NULL, + .stats_reset = qat_comp_stats_reset, + .queue_pair_setup = NULL, + .queue_pair_release = qat_comp_qp_release, + + /* Compression related operations */ + .private_xform_create = NULL, + .private_xform_free = qat_comp_private_xform_free +}; + +static uint16_t +qat_comp_pmd_dequeue_frst_op_burst(void *qp, struct rte_comp_op **ops, + uint16_t nb_ops) +{ + uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops); + struct qat_qp *tmp_qp = (struct qat_qp *)qp; + + if (ret) { + if ((*ops)->debug_status == + (uint64_t)ERR_CODE_QAT_COMP_WRONG_FW) { + tmp_qp->qat_dev->comp_dev->compressdev->enqueue_burst = + qat_comp_pmd_enq_deq_dummy_op_burst; + tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst = + qat_comp_pmd_enq_deq_dummy_op_burst; + + tmp_qp->qat_dev->comp_dev->compressdev->dev_ops = + &compress_qat_dummy_ops; + QAT_LOG(ERR, "QAT PMD detected wrong FW version !"); + + } else { + tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst = + qat_comp_pmd_dequeue_op_burst; + } + } + return ret; +} + +static struct rte_compressdev_ops compress_qat_ops = { + + /* Device related operations */ + .dev_configure = qat_comp_dev_config, + .dev_start = qat_comp_dev_start, + .dev_stop = qat_comp_dev_stop, + .dev_close = qat_comp_dev_close, + .dev_infos_get = qat_comp_dev_info_get, + + .stats_get = qat_comp_stats_get, + .stats_reset = qat_comp_stats_reset, + .queue_pair_setup = qat_comp_qp_setup, + .queue_pair_release = qat_comp_qp_release, + + /* Compression related operations */ + .private_xform_create = qat_comp_private_xform_create, + .private_xform_free = qat_comp_private_xform_free +}; + +int +qat_comp_dev_create(struct qat_pci_device *qat_pci_dev) +{ + if (qat_pci_dev->qat_dev_gen == QAT_GEN1) { + QAT_LOG(ERR, "Compression PMD not supported on QAT dh895xcc"); + return 0; + } + + struct rte_compressdev_pmd_init_params init_params = { + .name = "", + .socket_id = qat_pci_dev->pci_dev->device.numa_node, + }; + char name[RTE_COMPRESSDEV_NAME_MAX_LEN]; + struct rte_compressdev *compressdev; + struct qat_comp_dev_private *comp_dev; + + snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s", + qat_pci_dev->name, "comp"); + QAT_LOG(DEBUG, "Creating QAT COMP device %s", name); + + compressdev = rte_compressdev_pmd_create(name, + &qat_pci_dev->pci_dev->device, + sizeof(struct qat_comp_dev_private), + &init_params); + + if (compressdev == NULL) + return -ENODEV; + + compressdev->dev_ops = &compress_qat_ops; + + compressdev->enqueue_burst = qat_comp_pmd_enqueue_op_burst; + compressdev->dequeue_burst = qat_comp_pmd_dequeue_frst_op_burst; + + compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED; + + comp_dev = compressdev->data->dev_private; + comp_dev->qat_dev = qat_pci_dev; + comp_dev->compressdev = compressdev; + qat_pci_dev->comp_dev = comp_dev; + + switch (qat_pci_dev->qat_dev_gen) { + case QAT_GEN1: + case QAT_GEN2: + comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities; + break; + default: + comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities; + QAT_LOG(DEBUG, + "QAT gen %d capabilities unknown, default to GEN1", + qat_pci_dev->qat_dev_gen); + break; + } + + QAT_LOG(DEBUG, + "Created QAT COMP device %s as compressdev instance %d", + name, compressdev->data->dev_id); + return 0; +} + +int +qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev) +{ + struct qat_comp_dev_private *comp_dev; + + if (qat_pci_dev == NULL) + return -ENODEV; + + comp_dev = qat_pci_dev->comp_dev; + if (comp_dev == NULL) + return 0; + + /* clean up any resources used by the device */ + qat_comp_dev_close(comp_dev->compressdev); + + rte_compressdev_pmd_destroy(comp_dev->compressdev); + qat_pci_dev->comp_dev = NULL; + + return 0; +} diff --git a/drivers/compress/qat/qat_comp_pmd.h b/drivers/compress/qat/qat_comp_pmd.h new file mode 100644 index 00000000..9ad2a283 --- /dev/null +++ b/drivers/compress/qat/qat_comp_pmd.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ + +#ifndef _QAT_COMP_PMD_H_ +#define _QAT_COMP_PMD_H_ + +#ifdef RTE_LIBRTE_COMPRESSDEV + +#include +#include + +#include "qat_device.h" + +/** private data structure for a QAT compression device. + * This QAT device is a device offering only a compression service, + * there can be one of these on each qat_pci_device (VF). + */ +struct qat_comp_dev_private { + struct qat_pci_device *qat_dev; + /**< The qat pci device hosting the service */ + struct rte_compressdev *compressdev; + /**< The pointer to this compression device structure */ + const struct rte_compressdev_capabilities *qat_dev_capabilities; + /* QAT device compression capabilities */ + const struct rte_memzone *interm_buff_mz; + /**< The device's memory for intermediate buffers */ + struct rte_mempool *xformpool; + /**< The device's pool for qat_comp_xforms */ +}; + +int +qat_comp_dev_create(struct qat_pci_device *qat_pci_dev); + +int +qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev); + +#endif +#endif /* _QAT_COMP_PMD_H_ */ diff --git a/drivers/compress/qat/rte_pmd_qat_version.map b/drivers/compress/qat/rte_pmd_qat_version.map new file mode 100644 index 00000000..ad6e191e --- /dev/null +++ b/drivers/compress/qat/rte_pmd_qat_version.map @@ -0,0 +1,3 @@ +DPDK_18.08 { + local: *; +}; diff --git a/drivers/compress/zlib/Makefile b/drivers/compress/zlib/Makefile new file mode 100644 index 00000000..5cf8de6f --- /dev/null +++ b/drivers/compress/zlib/Makefile @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cavium Networks + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_pmd_zlib.a + +# build flags +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -DALLOW_EXPERIMENTAL_API + +# library version +LIBABIVER := 1 + +# versioning export map +EXPORT_MAP := rte_pmd_zlib_version.map + +# external library dependencies +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -lz +LDLIBS += -lrte_compressdev +LDLIBS += -lrte_bus_vdev + +# library source files +SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += zlib_pmd.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += zlib_pmd_ops.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/compress/zlib/meson.build b/drivers/compress/zlib/meson.build new file mode 100644 index 00000000..7748de2d --- /dev/null +++ b/drivers/compress/zlib/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cavium Networks + +dep = dependency('zlib', required: false) +if not dep.found() + build = false +endif + +deps += 'bus_vdev' +sources = files('zlib_pmd.c', 'zlib_pmd_ops.c') +ext_deps += dep +pkgconfig_extra_libs += '-lz' + +allow_experimental_apis = true diff --git a/drivers/compress/zlib/rte_pmd_zlib_version.map b/drivers/compress/zlib/rte_pmd_zlib_version.map new file mode 100644 index 00000000..ad6e191e --- /dev/null +++ b/drivers/compress/zlib/rte_pmd_zlib_version.map @@ -0,0 +1,3 @@ +DPDK_18.08 { + local: *; +}; diff --git a/drivers/compress/zlib/zlib_pmd.c b/drivers/compress/zlib/zlib_pmd.c new file mode 100644 index 00000000..7d6871b1 --- /dev/null +++ b/drivers/compress/zlib/zlib_pmd.c @@ -0,0 +1,436 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium Networks + */ + +#include +#include + +#include "zlib_pmd_private.h" + +/** Compute next mbuf in the list, assign data buffer and length, + * returns 0 if mbuf is NULL + */ +#define COMPUTE_BUF(mbuf, data, len) \ + ((mbuf = mbuf->next) ? \ + (data = rte_pktmbuf_mtod(mbuf, uint8_t *)), \ + (len = rte_pktmbuf_data_len(mbuf)) : 0) + +static void +process_zlib_deflate(struct rte_comp_op *op, z_stream *strm) +{ + int ret, flush, fin_flush; + struct rte_mbuf *mbuf_src = op->m_src; + struct rte_mbuf *mbuf_dst = op->m_dst; + + switch (op->flush_flag) { + case RTE_COMP_FLUSH_FULL: + case RTE_COMP_FLUSH_FINAL: + fin_flush = Z_FINISH; + break; + default: + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + ZLIB_PMD_ERR("Invalid flush value\n"); + } + + if (unlikely(!strm)) { + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + ZLIB_PMD_ERR("Invalid z_stream\n"); + return; + } + /* Update z_stream with the inputs provided by application */ + strm->next_in = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *, + op->src.offset); + + strm->avail_in = rte_pktmbuf_data_len(mbuf_src) - op->src.offset; + + strm->next_out = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *, + op->dst.offset); + + strm->avail_out = rte_pktmbuf_data_len(mbuf_dst) - op->dst.offset; + + /* Set flush value to NO_FLUSH unless it is last mbuf */ + flush = Z_NO_FLUSH; + /* Initialize status to SUCCESS */ + op->status = RTE_COMP_OP_STATUS_SUCCESS; + + do { + /* Set flush value to Z_FINISH for last block */ + if ((op->src.length - strm->total_in) <= strm->avail_in) { + strm->avail_in = (op->src.length - strm->total_in); + flush = fin_flush; + } + do { + ret = deflate(strm, flush); + if (unlikely(ret == Z_STREAM_ERROR)) { + /* error return, do not process further */ + op->status = RTE_COMP_OP_STATUS_ERROR; + goto def_end; + } + /* Break if Z_STREAM_END is encountered */ + if (ret == Z_STREAM_END) + goto def_end; + + /* Keep looping until input mbuf is consumed. + * Exit if destination mbuf gets exhausted. + */ + } while ((strm->avail_out == 0) && + COMPUTE_BUF(mbuf_dst, strm->next_out, strm->avail_out)); + + if (!strm->avail_out) { + /* there is no space for compressed output */ + op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; + break; + } + + /* Update source buffer to next mbuf + * Exit if input buffers are fully consumed + */ + } while (COMPUTE_BUF(mbuf_src, strm->next_in, strm->avail_in)); + +def_end: + /* Update op stats */ + switch (op->status) { + case RTE_COMP_OP_STATUS_SUCCESS: + op->consumed += strm->total_in; + /* Fall-through */ + case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED: + op->produced += strm->total_out; + break; + default: + ZLIB_PMD_ERR("stats not updated for status:%d\n", + op->status); + } + + deflateReset(strm); +} + +static void +process_zlib_inflate(struct rte_comp_op *op, z_stream *strm) +{ + int ret, flush; + struct rte_mbuf *mbuf_src = op->m_src; + struct rte_mbuf *mbuf_dst = op->m_dst; + + if (unlikely(!strm)) { + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + ZLIB_PMD_ERR("Invalid z_stream\n"); + return; + } + strm->next_in = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *, + op->src.offset); + + strm->avail_in = rte_pktmbuf_data_len(mbuf_src) - op->src.offset; + + strm->next_out = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *, + op->dst.offset); + + strm->avail_out = rte_pktmbuf_data_len(mbuf_dst) - op->dst.offset; + + /** Ignoring flush value provided from application for decompression */ + flush = Z_NO_FLUSH; + /* initialize status to SUCCESS */ + op->status = RTE_COMP_OP_STATUS_SUCCESS; + + do { + do { + ret = inflate(strm, flush); + + switch (ret) { + /* Fall-through */ + case Z_NEED_DICT: + ret = Z_DATA_ERROR; + /* Fall-through */ + case Z_DATA_ERROR: + /* Fall-through */ + case Z_MEM_ERROR: + /* Fall-through */ + case Z_STREAM_ERROR: + op->status = RTE_COMP_OP_STATUS_ERROR; + /* Fall-through */ + case Z_STREAM_END: + /* no further computation needed if + * Z_STREAM_END is encountered + */ + goto inf_end; + default: + /* success */ + break; + + } + /* Keep looping until input mbuf is consumed. + * Exit if destination mbuf gets exhausted. + */ + } while ((strm->avail_out == 0) && + COMPUTE_BUF(mbuf_dst, strm->next_out, strm->avail_out)); + + if (!strm->avail_out) { + /* there is no more space for decompressed output */ + op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; + break; + } + /* Read next input buffer to be processed, exit if compressed + * blocks are fully read + */ + } while (COMPUTE_BUF(mbuf_src, strm->next_in, strm->avail_in)); + +inf_end: + /* Update op stats */ + switch (op->status) { + case RTE_COMP_OP_STATUS_SUCCESS: + op->consumed += strm->total_in; + /* Fall-through */ + case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED: + op->produced += strm->total_out; + break; + default: + ZLIB_PMD_ERR("stats not produced for status:%d\n", + op->status); + } + + inflateReset(strm); +} + +/** Process comp operation for mbuf */ +static inline int +process_zlib_op(struct zlib_qp *qp, struct rte_comp_op *op) +{ + struct zlib_stream *stream; + struct zlib_priv_xform *private_xform; + + if ((op->op_type == RTE_COMP_OP_STATEFUL) || + (op->src.offset > rte_pktmbuf_data_len(op->m_src)) || + (op->dst.offset > rte_pktmbuf_data_len(op->m_dst))) { + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + ZLIB_PMD_ERR("Invalid source or destination buffers or " + "invalid Operation requested\n"); + } else { + private_xform = (struct zlib_priv_xform *)op->private_xform; + stream = &private_xform->stream; + stream->comp(op, &stream->strm); + } + /* whatever is out of op, put it into completion queue with + * its status + */ + return rte_ring_enqueue(qp->processed_pkts, (void *)op); +} + +/** Parse comp xform and set private xform/Stream parameters */ +int +zlib_set_stream_parameters(const struct rte_comp_xform *xform, + struct zlib_stream *stream) +{ + int strategy, level, wbits; + z_stream *strm = &stream->strm; + + /* allocate deflate state */ + strm->zalloc = Z_NULL; + strm->zfree = Z_NULL; + strm->opaque = Z_NULL; + + switch (xform->type) { + case RTE_COMP_COMPRESS: + stream->comp = process_zlib_deflate; + stream->free = deflateEnd; + /** Compression window bits */ + switch (xform->compress.algo) { + case RTE_COMP_ALGO_DEFLATE: + wbits = -(xform->compress.window_size); + break; + default: + ZLIB_PMD_ERR("Compression algorithm not supported\n"); + return -1; + } + /** Compression Level */ + switch (xform->compress.level) { + case RTE_COMP_LEVEL_PMD_DEFAULT: + level = Z_DEFAULT_COMPRESSION; + break; + case RTE_COMP_LEVEL_NONE: + level = Z_NO_COMPRESSION; + break; + case RTE_COMP_LEVEL_MIN: + level = Z_BEST_SPEED; + break; + case RTE_COMP_LEVEL_MAX: + level = Z_BEST_COMPRESSION; + break; + default: + level = xform->compress.level; + if (level < RTE_COMP_LEVEL_MIN || + level > RTE_COMP_LEVEL_MAX) { + ZLIB_PMD_ERR("Compression level %d " + "not supported\n", + level); + return -1; + } + break; + } + /** Compression strategy */ + switch (xform->compress.deflate.huffman) { + case RTE_COMP_HUFFMAN_DEFAULT: + strategy = Z_DEFAULT_STRATEGY; + break; + case RTE_COMP_HUFFMAN_FIXED: + strategy = Z_FIXED; + break; + case RTE_COMP_HUFFMAN_DYNAMIC: + strategy = Z_DEFAULT_STRATEGY; + break; + default: + ZLIB_PMD_ERR("Compression strategy not supported\n"); + return -1; + } + if (deflateInit2(strm, level, + Z_DEFLATED, wbits, + DEF_MEM_LEVEL, strategy) != Z_OK) { + ZLIB_PMD_ERR("Deflate init failed\n"); + return -1; + } + break; + + case RTE_COMP_DECOMPRESS: + stream->comp = process_zlib_inflate; + stream->free = inflateEnd; + /** window bits */ + switch (xform->decompress.algo) { + case RTE_COMP_ALGO_DEFLATE: + wbits = -(xform->decompress.window_size); + break; + default: + ZLIB_PMD_ERR("Compression algorithm not supported\n"); + return -1; + } + + if (inflateInit2(strm, wbits) != Z_OK) { + ZLIB_PMD_ERR("Inflate init failed\n"); + return -1; + } + break; + default: + return -1; + } + return 0; +} + +static uint16_t +zlib_pmd_enqueue_burst(void *queue_pair, + struct rte_comp_op **ops, uint16_t nb_ops) +{ + struct zlib_qp *qp = queue_pair; + int ret; + uint16_t i; + uint16_t enqd = 0; + for (i = 0; i < nb_ops; i++) { + ret = process_zlib_op(qp, ops[i]); + if (unlikely(ret < 0)) { + /* increment count if failed to push to completion + * queue + */ + qp->qp_stats.enqueue_err_count++; + } else { + qp->qp_stats.enqueued_count++; + enqd++; + } + } + return enqd; +} + +static uint16_t +zlib_pmd_dequeue_burst(void *queue_pair, + struct rte_comp_op **ops, uint16_t nb_ops) +{ + struct zlib_qp *qp = queue_pair; + + unsigned int nb_dequeued = 0; + + nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts, + (void **)ops, nb_ops, NULL); + qp->qp_stats.dequeued_count += nb_dequeued; + + return nb_dequeued; +} + +static int +zlib_create(const char *name, + struct rte_vdev_device *vdev, + struct rte_compressdev_pmd_init_params *init_params) +{ + struct rte_compressdev *dev; + + dev = rte_compressdev_pmd_create(name, &vdev->device, + sizeof(struct zlib_private), init_params); + if (dev == NULL) { + ZLIB_PMD_ERR("driver %s: create failed", init_params->name); + return -ENODEV; + } + + dev->dev_ops = rte_zlib_pmd_ops; + + /* register rx/tx burst functions for data path */ + dev->dequeue_burst = zlib_pmd_dequeue_burst; + dev->enqueue_burst = zlib_pmd_enqueue_burst; + + return 0; +} + +static int +zlib_probe(struct rte_vdev_device *vdev) +{ + struct rte_compressdev_pmd_init_params init_params = { + "", + rte_socket_id() + }; + const char *name; + const char *input_args; + int retval; + + name = rte_vdev_device_name(vdev); + + if (name == NULL) + return -EINVAL; + + input_args = rte_vdev_device_args(vdev); + + retval = rte_compressdev_pmd_parse_input_args(&init_params, input_args); + if (retval < 0) { + ZLIB_PMD_LOG(ERR, + "Failed to parse initialisation arguments[%s]\n", + input_args); + return -EINVAL; + } + + return zlib_create(name, vdev, &init_params); +} + +static int +zlib_remove(struct rte_vdev_device *vdev) +{ + struct rte_compressdev *compressdev; + const char *name; + + name = rte_vdev_device_name(vdev); + if (name == NULL) + return -EINVAL; + + compressdev = rte_compressdev_pmd_get_named_dev(name); + if (compressdev == NULL) + return -ENODEV; + + return rte_compressdev_pmd_destroy(compressdev); +} + +static struct rte_vdev_driver zlib_pmd_drv = { + .probe = zlib_probe, + .remove = zlib_remove +}; + +RTE_PMD_REGISTER_VDEV(COMPRESSDEV_NAME_ZLIB_PMD, zlib_pmd_drv); +RTE_INIT(zlib_init_log); + +static void +zlib_init_log(void) +{ + zlib_logtype_driver = rte_log_register("pmd.compress.zlib"); + if (zlib_logtype_driver >= 0) + rte_log_set_level(zlib_logtype_driver, RTE_LOG_INFO); +} diff --git a/drivers/compress/zlib/zlib_pmd_ops.c b/drivers/compress/zlib/zlib_pmd_ops.c new file mode 100644 index 00000000..0a73aed9 --- /dev/null +++ b/drivers/compress/zlib/zlib_pmd_ops.c @@ -0,0 +1,307 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium Networks + */ + +#include + +#include +#include + +#include "zlib_pmd_private.h" + +static const struct rte_compressdev_capabilities zlib_pmd_capabilities[] = { + { /* Deflate */ + .algo = RTE_COMP_ALGO_DEFLATE, + .comp_feature_flags = (RTE_COMP_FF_NONCOMPRESSED_BLOCKS | + RTE_COMP_FF_HUFFMAN_FIXED | + RTE_COMP_FF_HUFFMAN_DYNAMIC), + .window_size = { + .min = 8, + .max = 15, + .increment = 1 + }, + }, + + RTE_COMP_END_OF_CAPABILITIES_LIST() + +}; + +/** Configure device */ +static int +zlib_pmd_config(struct rte_compressdev *dev, + struct rte_compressdev_config *config) +{ + struct rte_mempool *mp; + char mp_name[RTE_MEMPOOL_NAMESIZE]; + struct zlib_private *internals = dev->data->dev_private; + + snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, + "stream_mp_%u", dev->data->dev_id); + mp = internals->mp; + if (mp == NULL) { + mp = rte_mempool_create(mp_name, + config->max_nb_priv_xforms + + config->max_nb_streams, + sizeof(struct zlib_priv_xform), + 0, 0, NULL, NULL, NULL, + NULL, config->socket_id, + 0); + if (mp == NULL) { + ZLIB_PMD_ERR("Cannot create private xform pool on " + "socket %d\n", config->socket_id); + return -ENOMEM; + } + internals->mp = mp; + } + return 0; +} + +/** Start device */ +static int +zlib_pmd_start(__rte_unused struct rte_compressdev *dev) +{ + return 0; +} + +/** Stop device */ +static void +zlib_pmd_stop(__rte_unused struct rte_compressdev *dev) +{ +} + +/** Close device */ +static int +zlib_pmd_close(struct rte_compressdev *dev) +{ + struct zlib_private *internals = dev->data->dev_private; + rte_mempool_free(internals->mp); + internals->mp = NULL; + return 0; +} + +/** Get device statistics */ +static void +zlib_pmd_stats_get(struct rte_compressdev *dev, + struct rte_compressdev_stats *stats) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct zlib_qp *qp = dev->data->queue_pairs[qp_id]; + + stats->enqueued_count += qp->qp_stats.enqueued_count; + stats->dequeued_count += qp->qp_stats.dequeued_count; + + stats->enqueue_err_count += qp->qp_stats.enqueue_err_count; + stats->dequeue_err_count += qp->qp_stats.dequeue_err_count; + } +} + +/** Reset device statistics */ +static void +zlib_pmd_stats_reset(struct rte_compressdev *dev) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct zlib_qp *qp = dev->data->queue_pairs[qp_id]; + + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + } +} + +/** Get device info */ +static void +zlib_pmd_info_get(struct rte_compressdev *dev, + struct rte_compressdev_info *dev_info) +{ + if (dev_info != NULL) { + dev_info->driver_name = dev->device->name; + dev_info->feature_flags = dev->feature_flags; + dev_info->capabilities = zlib_pmd_capabilities; + } +} + +/** Release queue pair */ +static int +zlib_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id) +{ + struct zlib_qp *qp = dev->data->queue_pairs[qp_id]; + + if (qp != NULL) { + rte_ring_free(qp->processed_pkts); + rte_free(qp); + dev->data->queue_pairs[qp_id] = NULL; + } + return 0; +} + +/** set a unique name for the queue pair based on its name, dev_id and qp_id */ +static int +zlib_pmd_qp_set_unique_name(struct rte_compressdev *dev, + struct zlib_qp *qp) +{ + unsigned int n = snprintf(qp->name, sizeof(qp->name), + "zlib_pmd_%u_qp_%u", + dev->data->dev_id, qp->id); + + if (n >= sizeof(qp->name)) + return -1; + + return 0; +} + +/** Create a ring to place process packets on */ +static struct rte_ring * +zlib_pmd_qp_create_processed_pkts_ring(struct zlib_qp *qp, + unsigned int ring_size, int socket_id) +{ + struct rte_ring *r = qp->processed_pkts; + + if (r) { + if (rte_ring_get_size(r) >= ring_size) { + ZLIB_PMD_INFO("Reusing existing ring %s for processed" + " packets", qp->name); + return r; + } + + ZLIB_PMD_ERR("Unable to reuse existing ring %s for processed" + " packets", qp->name); + return NULL; + } + + return rte_ring_create(qp->name, ring_size, socket_id, + RING_F_EXACT_SZ); +} + +/** Setup a queue pair */ +static int +zlib_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, + uint32_t max_inflight_ops, int socket_id) +{ + struct zlib_qp *qp = NULL; + + /* Free memory prior to re-allocation if needed. */ + if (dev->data->queue_pairs[qp_id] != NULL) + zlib_pmd_qp_release(dev, qp_id); + + /* Allocate the queue pair data structure. */ + qp = rte_zmalloc_socket("ZLIB PMD Queue Pair", sizeof(*qp), + RTE_CACHE_LINE_SIZE, socket_id); + if (qp == NULL) + return (-ENOMEM); + + qp->id = qp_id; + dev->data->queue_pairs[qp_id] = qp; + + if (zlib_pmd_qp_set_unique_name(dev, qp)) + goto qp_setup_cleanup; + + qp->processed_pkts = zlib_pmd_qp_create_processed_pkts_ring(qp, + max_inflight_ops, socket_id); + if (qp->processed_pkts == NULL) + goto qp_setup_cleanup; + + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + return 0; + +qp_setup_cleanup: + if (qp) { + rte_free(qp); + qp = NULL; + } + return -1; +} + +/** Configure stream */ +static int +zlib_pmd_stream_create(struct rte_compressdev *dev, + const struct rte_comp_xform *xform, + void **zstream) +{ + int ret = 0; + struct zlib_stream *stream; + struct zlib_private *internals = dev->data->dev_private; + + if (xform == NULL) { + ZLIB_PMD_ERR("invalid xform struct"); + return -EINVAL; + } + + if (rte_mempool_get(internals->mp, zstream)) { + ZLIB_PMD_ERR("Couldn't get object from session mempool"); + return -ENOMEM; + } + stream = *((struct zlib_stream **)zstream); + + ret = zlib_set_stream_parameters(xform, stream); + + if (ret < 0) { + ZLIB_PMD_ERR("failed configure session parameters"); + + memset(stream, 0, sizeof(struct zlib_stream)); + /* Return session to mempool */ + rte_mempool_put(internals->mp, stream); + return ret; + } + + return 0; +} + +/** Configure private xform */ +static int +zlib_pmd_private_xform_create(struct rte_compressdev *dev, + const struct rte_comp_xform *xform, + void **private_xform) +{ + return zlib_pmd_stream_create(dev, xform, private_xform); +} + +/** Clear the memory of stream so it doesn't leave key material behind */ +static int +zlib_pmd_stream_free(__rte_unused struct rte_compressdev *dev, + void *zstream) +{ + struct zlib_stream *stream = (struct zlib_stream *)zstream; + if (!stream) + return -EINVAL; + + stream->free(&stream->strm); + /* Zero out the whole structure */ + memset(stream, 0, sizeof(struct zlib_stream)); + struct rte_mempool *mp = rte_mempool_from_obj(stream); + rte_mempool_put(mp, stream); + + return 0; +} + +/** Clear the memory of stream so it doesn't leave key material behind */ +static int +zlib_pmd_private_xform_free(struct rte_compressdev *dev, + void *private_xform) +{ + return zlib_pmd_stream_free(dev, private_xform); +} + +struct rte_compressdev_ops zlib_pmd_ops = { + .dev_configure = zlib_pmd_config, + .dev_start = zlib_pmd_start, + .dev_stop = zlib_pmd_stop, + .dev_close = zlib_pmd_close, + + .stats_get = zlib_pmd_stats_get, + .stats_reset = zlib_pmd_stats_reset, + + .dev_infos_get = zlib_pmd_info_get, + + .queue_pair_setup = zlib_pmd_qp_setup, + .queue_pair_release = zlib_pmd_qp_release, + + .private_xform_create = zlib_pmd_private_xform_create, + .private_xform_free = zlib_pmd_private_xform_free, + + .stream_create = NULL, + .stream_free = NULL +}; + +struct rte_compressdev_ops *rte_zlib_pmd_ops = &zlib_pmd_ops; diff --git a/drivers/compress/zlib/zlib_pmd_private.h b/drivers/compress/zlib/zlib_pmd_private.h new file mode 100644 index 00000000..2c6e83d4 --- /dev/null +++ b/drivers/compress/zlib/zlib_pmd_private.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium Networks + */ + +#ifndef _RTE_ZLIB_PMD_PRIVATE_H_ +#define _RTE_ZLIB_PMD_PRIVATE_H_ + +#include +#include +#include + +#define COMPRESSDEV_NAME_ZLIB_PMD compress_zlib +/**< ZLIB PMD device name */ + +#define DEF_MEM_LEVEL 8 + +int zlib_logtype_driver; +#define ZLIB_PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, zlib_logtype_driver, "%s(): "fmt "\n", \ + __func__, ##args) + +#define ZLIB_PMD_INFO(fmt, args...) \ + ZLIB_PMD_LOG(INFO, fmt, ## args) +#define ZLIB_PMD_ERR(fmt, args...) \ + ZLIB_PMD_LOG(ERR, fmt, ## args) +#define ZLIB_PMD_WARN(fmt, args...) \ + ZLIB_PMD_LOG(WARNING, fmt, ## args) + +struct zlib_private { + struct rte_mempool *mp; +}; + +struct zlib_qp { + struct rte_ring *processed_pkts; + /**< Ring for placing process packets */ + struct rte_compressdev_stats qp_stats; + /**< Queue pair statistics */ + uint16_t id; + /**< Queue Pair Identifier */ + char name[RTE_COMPRESSDEV_NAME_MAX_LEN]; + /**< Unique Queue Pair Name */ +} __rte_cache_aligned; + +/* Algorithm handler function prototype */ +typedef void (*comp_func_t)(struct rte_comp_op *op, z_stream *strm); + +typedef int (*comp_free_t)(z_stream *strm); + +/** ZLIB Stream structure */ +struct zlib_stream { + z_stream strm; + /**< zlib stream structure */ + comp_func_t comp; + /**< Operation (compression/decompression) */ + comp_free_t free; + /**< Free Operation (compression/decompression) */ +} __rte_cache_aligned; + +/** ZLIB private xform structure */ +struct zlib_priv_xform { + struct zlib_stream stream; +} __rte_cache_aligned; + +int +zlib_set_stream_parameters(const struct rte_comp_xform *xform, + struct zlib_stream *stream); + +/** Device specific operations function pointer structure */ +extern struct rte_compressdev_ops *rte_zlib_pmd_ops; + +#endif /* _RTE_ZLIB_PMD_PRIVATE_H_ */ diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 628bd142..c480cbd3 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -6,15 +6,20 @@ include $(RTE_SDK)/mk/rte.vars.mk DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb DIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += armv8 +DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += openssl -DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat DIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += zuc -DIRS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += mrvl +DIRS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += mvsam DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null +ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy) DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec +endif +ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y) DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec +endif +DIRS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile index d06c5444..0a5c1a87 100644 --- a/drivers/crypto/aesni_gcm/Makefile +++ b/drivers/crypto/aesni_gcm/Makefile @@ -3,12 +3,6 @@ include $(RTE_SDK)/mk/rte.vars.mk -ifneq ($(MAKECMDGOALS),clean) -ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),) -$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable") -endif -endif - # library name LIB = librte_pmd_aesni_gcm.a @@ -23,9 +17,7 @@ LIBABIVER := 1 EXPORT_MAP := rte_pmd_aesni_gcm_version.map # external library dependencies -CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH) -CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include -LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB +LDLIBS += -lIPSec_MB LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_cryptodev LDLIBS += -lrte_bus_vdev diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h index 59e504ee..45061669 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h +++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h @@ -9,8 +9,7 @@ #define LINUX #endif -#include -#include +#include /** Supported vector modes */ enum aesni_gcm_vector_mode { diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c index 83e54480..752e0cd6 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c @@ -31,8 +31,8 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { auth_xform = xform; if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) { - GCM_LOG_ERR("Only AES GMAC is supported as an " - "authentication only algorithm"); + AESNI_GCM_LOG(ERR, "Only AES GMAC is supported as an " + "authentication only algorithm"); return -ENOTSUP; } /* Set IV parameters */ @@ -54,7 +54,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, aead_xform = xform; if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) { - GCM_LOG_ERR("The only combined operation " + AESNI_GCM_LOG(ERR, "The only combined operation " "supported is AES GCM"); return -ENOTSUP; } @@ -75,7 +75,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, sess->aad_length = aead_xform->aead.aad_length; digest_length = aead_xform->aead.digest_length; } else { - GCM_LOG_ERR("Wrong xform type, has to be AEAD or authentication"); + AESNI_GCM_LOG(ERR, "Wrong xform type, has to be AEAD or authentication"); return -ENOTSUP; } @@ -83,7 +83,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, /* IV check */ if (sess->iv.length != 16 && sess->iv.length != 12 && sess->iv.length != 0) { - GCM_LOG_ERR("Wrong IV length"); + AESNI_GCM_LOG(ERR, "Wrong IV length"); return -EINVAL; } @@ -99,7 +99,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, sess->key = AESNI_GCM_KEY_256; break; default: - GCM_LOG_ERR("Invalid key length"); + AESNI_GCM_LOG(ERR, "Invalid key length"); return -EINVAL; } @@ -109,7 +109,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, if (digest_length != 16 && digest_length != 12 && digest_length != 8) { - GCM_LOG_ERR("digest"); + AESNI_GCM_LOG(ERR, "Invalid digest length"); return -EINVAL; } sess->digest_length = digest_length; @@ -127,7 +127,7 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op) if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { if (likely(sym_op->session != NULL)) sess = (struct aesni_gcm_session *) - get_session_private_data( + get_sym_session_private_data( sym_op->session, cryptodev_driver_id); } else { @@ -149,8 +149,8 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op) sess = NULL; } sym_op->session = (struct rte_cryptodev_sym_session *)_sess; - set_session_private_data(sym_op->session, cryptodev_driver_id, - _sess_private_data); + set_sym_session_private_data(sym_op->session, + cryptodev_driver_id, _sess_private_data); } if (unlikely(sess == NULL)) @@ -352,7 +352,7 @@ post_process_gcm_crypto_op(struct aesni_gcm_qp *qp, session->op == AESNI_GMAC_OP_VERIFY) { uint8_t *digest; - uint8_t *tag = (uint8_t *)&qp->temp_digest; + uint8_t *tag = qp->temp_digest; if (session->op == AESNI_GMAC_OP_VERIFY) digest = op->sym->auth.digest.data; @@ -392,7 +392,7 @@ handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp, if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { memset(sess, 0, sizeof(struct aesni_gcm_session)); memset(op->sym->session, 0, - rte_cryptodev_get_header_session_size()); + rte_cryptodev_sym_get_header_session_size()); rte_mempool_put(qp->sess_mp, sess); rte_mempool_put(qp->sess_mp, op->sym->session); op->sym->session = NULL; @@ -464,13 +464,13 @@ aesni_gcm_create(const char *name, /* Check CPU for support for AES instruction set */ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) { - GCM_LOG_ERR("AES instructions not supported by CPU"); + AESNI_GCM_LOG(ERR, "AES instructions not supported by CPU"); return -EFAULT; } - dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); if (dev == NULL) { - GCM_LOG_ERR("driver %s: create failed", init_params->name); + AESNI_GCM_LOG(ERR, "driver %s: create failed", + init_params->name); return -ENODEV; } @@ -492,7 +492,8 @@ aesni_gcm_create(const char *name, dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | RTE_CRYPTODEV_FF_CPU_AESNI | - RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; + RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | + RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; switch (vector_mode) { case RTE_AESNI_GCM_SSE: @@ -513,7 +514,13 @@ aesni_gcm_create(const char *name, internals->vector_mode = vector_mode; internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; + +#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0) + AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: %s\n", + imb_get_version_str()); +#else + AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n"); +#endif return 0; } @@ -525,8 +532,7 @@ aesni_gcm_probe(struct rte_vdev_device *vdev) "", sizeof(struct aesni_gcm_private), rte_socket_id(), - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }; const char *name; const char *input_args; @@ -568,7 +574,12 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv); RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id="); -RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv, +RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver, cryptodev_driver_id); + + +RTE_INIT(aesni_gcm_init_log) +{ + aesni_gcm_logtype_driver = rte_log_register("pmd.crypto.aesni_gcm"); +} diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c index 6f542137..b6b4dd02 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c @@ -143,7 +143,8 @@ aesni_gcm_pmd_info_get(struct rte_cryptodev *dev, dev_info->capabilities = aesni_gcm_pmd_capabilities; dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; - dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; } } @@ -183,12 +184,11 @@ aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp, r = rte_ring_lookup(qp->name); if (r) { if (rte_ring_get_size(r) >= ring_size) { - GCM_LOG_INFO("Reusing existing ring %s for processed" - " packets", qp->name); + AESNI_GCM_LOG(INFO, "Reusing existing ring %s for processed" + " packets", qp->name); return r; } - - GCM_LOG_ERR("Unable to reuse existing ring %s for processed" + AESNI_GCM_LOG(ERR, "Unable to reuse existing ring %s for processed" " packets", qp->name); return NULL; } @@ -242,22 +242,6 @@ qp_setup_cleanup: return -1; } -/** Start queue pair */ -static int -aesni_gcm_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair */ -static int -aesni_gcm_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - /** Return the number of allocated queue pairs */ static uint32_t aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev) @@ -267,14 +251,14 @@ aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev) /** Returns the size of the aesni gcm session structure */ static unsigned -aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +aesni_gcm_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { return sizeof(struct aesni_gcm_session); } /** Configure a aesni gcm session from a crypto xform chain */ static int -aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, +aesni_gcm_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -284,26 +268,26 @@ aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, struct aesni_gcm_private *internals = dev->data->dev_private; if (unlikely(sess == NULL)) { - GCM_LOG_ERR("invalid session struct"); + AESNI_GCM_LOG(ERR, "invalid session struct"); return -EINVAL; } if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( - "Couldn't get object from session mempool"); + AESNI_GCM_LOG(ERR, + "Couldn't get object from session mempool"); return -ENOMEM; } ret = aesni_gcm_set_session_parameters(gcm_ops[internals->vector_mode], sess_private_data, xform); if (ret != 0) { - GCM_LOG_ERR("failed configure session parameters"); + AESNI_GCM_LOG(ERR, "failed configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); return 0; @@ -311,17 +295,17 @@ aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, /** Clear the memory of session so it doesn't leave key material behind */ static void -aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev, +aesni_gcm_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); /* Zero out the whole structure */ if (sess_priv) { memset(sess_priv, 0, sizeof(struct aesni_gcm_session)); struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -339,13 +323,11 @@ struct rte_cryptodev_ops aesni_gcm_pmd_ops = { .queue_pair_setup = aesni_gcm_pmd_qp_setup, .queue_pair_release = aesni_gcm_pmd_qp_release, - .queue_pair_start = aesni_gcm_pmd_qp_start, - .queue_pair_stop = aesni_gcm_pmd_qp_stop, .queue_pair_count = aesni_gcm_pmd_qp_count, - .session_get_size = aesni_gcm_pmd_session_get_size, - .session_configure = aesni_gcm_pmd_session_configure, - .session_clear = aesni_gcm_pmd_session_clear + .sym_session_get_size = aesni_gcm_pmd_sym_session_get_size, + .sym_session_configure = aesni_gcm_pmd_sym_session_configure, + .sym_session_clear = aesni_gcm_pmd_sym_session_clear }; struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops; diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h index 3d60583b..c13a12a5 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h @@ -7,28 +7,24 @@ #include "aesni_gcm_ops.h" +/* + * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50, + * so if macro is not defined, it means that the version is 0.49. + */ +#if !defined(IMB_VERSION_NUM) +#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0) +#endif + #define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm /**< AES-NI GCM PMD device name */ -#define GCM_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \ - __func__, __LINE__, ## args) - -#ifdef RTE_LIBRTE_AESNI_MB_DEBUG -#define GCM_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \ - __func__, __LINE__, ## args) - -#define GCM_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \ - __func__, __LINE__, ## args) -#else -#define GCM_LOG_INFO(fmt, args...) -#define GCM_LOG_DBG(fmt, args...) -#endif +/** AES-NI GCM PMD LOGTYPE DRIVER */ +int aesni_gcm_logtype_driver; +#define AESNI_GCM_LOG(level, fmt, ...) \ + rte_log(RTE_LOG_ ## level, aesni_gcm_logtype_driver, \ + "%s() line %u: "fmt "\n", __func__, __LINE__, \ + ## __VA_ARGS__) /* Maximum length for digest */ #define DIGEST_LENGTH_MAX 16 @@ -39,8 +35,6 @@ struct aesni_gcm_private { /**< Vector mode */ unsigned max_nb_queue_pairs; /**< Max number of queue pairs supported by device */ - unsigned max_nb_sessions; - /**< Max number of sessions supported by device */ }; struct aesni_gcm_qp { diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile index d9f8fb98..806a95eb 100644 --- a/drivers/crypto/aesni_mb/Makefile +++ b/drivers/crypto/aesni_mb/Makefile @@ -3,12 +3,6 @@ include $(RTE_SDK)/mk/rte.vars.mk -ifneq ($(MAKECMDGOALS),clean) -ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),) -$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable") -endif -endif - # library name LIB = librte_pmd_aesni_mb.a @@ -23,9 +17,7 @@ LIBABIVER := 1 EXPORT_MAP := rte_pmd_aesni_mb_version.map # external library dependencies -CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH) -CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include -LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB +LDLIBS += -lIPSec_MB LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_cryptodev LDLIBS += -lrte_bus_vdev diff --git a/drivers/crypto/aesni_mb/aesni_mb_ops.h b/drivers/crypto/aesni_mb/aesni_mb_ops.h index 4d596e85..5a1cba6c 100644 --- a/drivers/crypto/aesni_mb/aesni_mb_ops.h +++ b/drivers/crypto/aesni_mb/aesni_mb_ops.h @@ -9,8 +9,7 @@ #define LINUX #endif -#include -#include +#include enum aesni_mb_vector_mode { RTE_AESNI_MB_NOT_SUPPORTED = 0, @@ -34,9 +33,12 @@ typedef void (*aes_keyexp_192_t) (const void *key, void *enc_exp_keys, void *dec_exp_keys); typedef void (*aes_keyexp_256_t) (const void *key, void *enc_exp_keys, void *dec_exp_keys); - typedef void (*aes_xcbc_expand_key_t) (const void *key, void *exp_k1, void *k2, void *k3); +typedef void (*aes_cmac_sub_key_gen_t) + (const void *exp_key, void *k2, void *k3); +typedef void (*aes_cmac_keyexp_t) + (const void *key, void *keyexp); /** Multi-buffer library function pointer table */ struct aesni_mb_op_fns { @@ -78,9 +80,12 @@ struct aesni_mb_op_fns { /**< AES192 key expansions */ aes_keyexp_256_t aes256; /**< AES256 key expansions */ - aes_xcbc_expand_key_t aes_xcbc; - /**< AES XCBC key expansions */ + /**< AES XCBC key epansions */ + aes_cmac_sub_key_gen_t aes_cmac_subkey; + /**< AES CMAC subkey expansions */ + aes_cmac_keyexp_t aes_cmac_expkey; + /**< AES CMAC key expansions */ } keyexp; /**< Key expansion functions */ } aux; @@ -123,7 +128,9 @@ static const struct aesni_mb_op_fns job_ops[] = { aes_keyexp_128_sse, aes_keyexp_192_sse, aes_keyexp_256_sse, - aes_xcbc_expand_key_sse + aes_xcbc_expand_key_sse, + aes_cmac_subkey_gen_sse, + aes_keyexp_128_enc_sse } } }, @@ -148,7 +155,9 @@ static const struct aesni_mb_op_fns job_ops[] = { aes_keyexp_128_avx, aes_keyexp_192_avx, aes_keyexp_256_avx, - aes_xcbc_expand_key_avx + aes_xcbc_expand_key_avx, + aes_cmac_subkey_gen_avx, + aes_keyexp_128_enc_avx } } }, @@ -173,7 +182,9 @@ static const struct aesni_mb_op_fns job_ops[] = { aes_keyexp_128_avx2, aes_keyexp_192_avx2, aes_keyexp_256_avx2, - aes_xcbc_expand_key_avx2 + aes_xcbc_expand_key_avx2, + aes_cmac_subkey_gen_avx2, + aes_keyexp_128_enc_avx2 } } }, @@ -198,7 +209,9 @@ static const struct aesni_mb_op_fns job_ops[] = { aes_keyexp_128_avx512, aes_keyexp_192_avx512, aes_keyexp_256_avx512, - aes_xcbc_expand_key_avx512 + aes_xcbc_expand_key_avx512, + aes_cmac_subkey_gen_avx512, + aes_keyexp_128_enc_avx512 } } } diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c index 636c6c37..93dc7a44 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c @@ -2,7 +2,7 @@ * Copyright(c) 2015-2017 Intel Corporation */ -#include +#include #include #include @@ -108,7 +108,7 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops, } if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) { - MB_LOG_ERR("Crypto xform struct not of type auth"); + AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth"); return -1; } @@ -124,6 +124,17 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops, return 0; } + if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) { + sess->auth.algo = AES_CMAC; + (*mb_ops->aux.keyexp.aes_cmac_expkey)(xform->auth.key.data, + sess->auth.cmac.expkey); + + (*mb_ops->aux.keyexp.aes_cmac_subkey)(sess->auth.cmac.expkey, + sess->auth.cmac.skey1, sess->auth.cmac.skey2); + return 0; + } + + switch (xform->auth.algo) { case RTE_CRYPTO_AUTH_MD5_HMAC: sess->auth.algo = MD5; @@ -150,7 +161,7 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops, hash_oneblock_fn = mb_ops->aux.one_block.sha512; break; default: - MB_LOG_ERR("Unsupported authentication algorithm selection"); + AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection"); return -ENOTSUP; } @@ -171,6 +182,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops, const struct rte_crypto_sym_xform *xform) { uint8_t is_aes = 0; + uint8_t is_3DES = 0; aes_keyexp_t aes_keyexp_fn; if (xform == NULL) { @@ -179,7 +191,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops, } if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) { - MB_LOG_ERR("Crypto xform struct not of type cipher"); + AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher"); return -EINVAL; } @@ -192,7 +204,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops, sess->cipher.direction = DECRYPT; break; default: - MB_LOG_ERR("Invalid cipher operation parameter"); + AESNI_MB_LOG(ERR, "Invalid cipher operation parameter"); return -EINVAL; } @@ -216,8 +228,12 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops, case RTE_CRYPTO_CIPHER_DES_DOCSISBPI: sess->cipher.mode = DOCSIS_DES; break; + case RTE_CRYPTO_CIPHER_3DES_CBC: + sess->cipher.mode = DES3; + is_3DES = 1; + break; default: - MB_LOG_ERR("Unsupported cipher mode parameter"); + AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter"); return -ENOTSUP; } @@ -241,7 +257,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops, aes_keyexp_fn = mb_ops->aux.keyexp.aes256; break; default: - MB_LOG_ERR("Invalid cipher key length"); + AESNI_MB_LOG(ERR, "Invalid cipher key length"); return -EINVAL; } @@ -250,9 +266,52 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops, sess->cipher.expanded_aes_keys.encode, sess->cipher.expanded_aes_keys.decode); + } else if (is_3DES) { + uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0], + sess->cipher.exp_3des_keys.key[1], + sess->cipher.exp_3des_keys.key[2]}; + + switch (xform->cipher.key.length) { + case 24: + des_key_schedule(keys[0], xform->cipher.key.data); + des_key_schedule(keys[1], xform->cipher.key.data+8); + des_key_schedule(keys[2], xform->cipher.key.data+16); + + /* Initialize keys - 24 bytes: [K1-K2-K3] */ + sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0]; + sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1]; + sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2]; + break; + case 16: + des_key_schedule(keys[0], xform->cipher.key.data); + des_key_schedule(keys[1], xform->cipher.key.data+8); + + /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */ + sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0]; + sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1]; + sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0]; + break; + case 8: + des_key_schedule(keys[0], xform->cipher.key.data); + + /* Initialize keys - 8 bytes: [K1 = K2 = K3] */ + sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0]; + sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0]; + sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0]; + break; + default: + AESNI_MB_LOG(ERR, "Invalid cipher key length"); + return -EINVAL; + } + +#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0) + sess->cipher.key_length_in_bytes = 24; +#else + sess->cipher.key_length_in_bytes = 8; +#endif } else { if (xform->cipher.key.length != 8) { - MB_LOG_ERR("Invalid cipher key length"); + AESNI_MB_LOG(ERR, "Invalid cipher key length"); return -EINVAL; } sess->cipher.key_length_in_bytes = 8; @@ -283,7 +342,7 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops, sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY; break; default: - MB_LOG_ERR("Invalid aead operation parameter"); + AESNI_MB_LOG(ERR, "Invalid aead operation parameter"); return -EINVAL; } @@ -293,7 +352,7 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops, sess->auth.algo = AES_CCM; break; default: - MB_LOG_ERR("Unsupported aead mode parameter"); + AESNI_MB_LOG(ERR, "Unsupported aead mode parameter"); return -ENOTSUP; } @@ -309,7 +368,7 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops, aes_keyexp_fn = mb_ops->aux.keyexp.aes128; break; default: - MB_LOG_ERR("Invalid cipher key length"); + AESNI_MB_LOG(ERR, "Invalid cipher key length"); return -EINVAL; } @@ -338,16 +397,19 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops, sess->chain_order = HASH_CIPHER; auth_xform = xform; cipher_xform = xform->next; + sess->auth.digest_len = xform->auth.digest_length; break; case AESNI_MB_OP_CIPHER_HASH: sess->chain_order = CIPHER_HASH; auth_xform = xform->next; cipher_xform = xform; + sess->auth.digest_len = xform->auth.digest_length; break; case AESNI_MB_OP_HASH_ONLY: sess->chain_order = HASH_CIPHER; auth_xform = xform; cipher_xform = NULL; + sess->auth.digest_len = xform->auth.digest_length; break; case AESNI_MB_OP_CIPHER_ONLY: /* @@ -366,18 +428,18 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops, case AESNI_MB_OP_AEAD_CIPHER_HASH: sess->chain_order = CIPHER_HASH; sess->aead.aad_len = xform->aead.aad_length; - sess->aead.digest_len = xform->aead.digest_length; + sess->auth.digest_len = xform->aead.digest_length; aead_xform = xform; break; case AESNI_MB_OP_AEAD_HASH_CIPHER: sess->chain_order = HASH_CIPHER; sess->aead.aad_len = xform->aead.aad_length; - sess->aead.digest_len = xform->aead.digest_length; + sess->auth.digest_len = xform->aead.digest_length; aead_xform = xform; break; case AESNI_MB_OP_NOT_SUPPORTED: default: - MB_LOG_ERR("Unsupported operation chain order parameter"); + AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter"); return -ENOTSUP; } @@ -386,14 +448,14 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops, ret = aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform); if (ret != 0) { - MB_LOG_ERR("Invalid/unsupported authentication parameters"); + AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters"); return ret; } ret = aesni_mb_set_session_cipher_parameters(mb_ops, sess, cipher_xform); if (ret != 0) { - MB_LOG_ERR("Invalid/unsupported cipher parameters"); + AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters"); return ret; } @@ -401,7 +463,7 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops, ret = aesni_mb_set_session_aead_parameters(mb_ops, sess, aead_xform); if (ret != 0) { - MB_LOG_ERR("Invalid/unsupported aead parameters"); + AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters"); return ret; } } @@ -444,7 +506,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op) if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { if (likely(op->sym->session != NULL)) sess = (struct aesni_mb_session *) - get_session_private_data( + get_sym_session_private_data( op->sym->session, cryptodev_driver_id); } else { @@ -466,8 +528,8 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op) sess = NULL; } op->sym->session = (struct rte_cryptodev_sym_session *)_sess; - set_session_private_data(op->sym->session, cryptodev_driver_id, - _sess_private_data); + set_sym_session_private_data(op->sym->session, + cryptodev_driver_id, _sess_private_data); } if (unlikely(sess == NULL)) @@ -510,22 +572,39 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp, job->cipher_mode = session->cipher.mode; job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes; - job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode; - job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode; + + if (job->cipher_mode == DES3) { + job->aes_enc_key_expanded = + session->cipher.exp_3des_keys.ks_ptr; + job->aes_dec_key_expanded = + session->cipher.exp_3des_keys.ks_ptr; + } else { + job->aes_enc_key_expanded = + session->cipher.expanded_aes_keys.encode; + job->aes_dec_key_expanded = + session->cipher.expanded_aes_keys.decode; + } + + /* Set authentication parameters */ job->hash_alg = session->auth.algo; if (job->hash_alg == AES_XCBC) { - job->_k1_expanded = session->auth.xcbc.k1_expanded; - job->_k2 = session->auth.xcbc.k2; - job->_k3 = session->auth.xcbc.k3; + job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded; + job->u.XCBC._k2 = session->auth.xcbc.k2; + job->u.XCBC._k3 = session->auth.xcbc.k3; } else if (job->hash_alg == AES_CCM) { job->u.CCM.aad = op->sym->aead.aad.data + 18; job->u.CCM.aad_len_in_bytes = session->aead.aad_len; + } else if (job->hash_alg == AES_CMAC) { + job->u.CMAC._key_expanded = session->auth.cmac.expkey; + job->u.CMAC._skey1 = session->auth.cmac.skey1; + job->u.CMAC._skey2 = session->auth.cmac.skey2; + } else { - job->hashed_auth_key_xor_ipad = session->auth.pads.inner; - job->hashed_auth_key_xor_opad = session->auth.pads.outer; + job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner; + job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer; } /* Mutable crypto operation parameters */ @@ -536,7 +615,7 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp, char *odata = rte_pktmbuf_append(m_dst, rte_pktmbuf_data_len(op->sym->m_src)); if (odata == NULL) { - MB_LOG_ERR("failed to allocate space in destination " + AESNI_MB_LOG(ERR, "failed to allocate space in destination " "mbuf for source data"); op->status = RTE_CRYPTO_OP_STATUS_ERROR; return -1; @@ -568,11 +647,11 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp, * Multi-buffer library current only support returning a truncated * digest length as specified in the relevant IPsec RFCs */ - if (job->hash_alg != AES_CCM) + if (job->hash_alg != AES_CCM && job->hash_alg != AES_CMAC) job->auth_tag_output_len_in_bytes = get_truncated_digest_byte_length(job->hash_alg); else - job->auth_tag_output_len_in_bytes = session->aead.digest_len; + job->auth_tag_output_len_in_bytes = session->auth.digest_len; /* Set IV parameters */ @@ -639,7 +718,7 @@ static inline struct rte_crypto_op * post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job) { struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data; - struct aesni_mb_session *sess = get_session_private_data( + struct aesni_mb_session *sess = get_sym_session_private_data( op->sym->session, cryptodev_driver_id); @@ -663,7 +742,7 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job) if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { memset(sess, 0, sizeof(struct aesni_mb_session)); memset(op->sym->session, 0, - rte_cryptodev_get_header_session_size()); + rte_cryptodev_sym_get_header_session_size()); rte_mempool_put(qp->sess_mp, sess); rte_mempool_put(qp->sess_mp, op->sym->session); op->sym->session = NULL; @@ -702,7 +781,7 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job, if (processed_jobs == nb_ops) break; - job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr); + job = (*qp->op_fns->job.get_completed_job)(qp->mb_mgr); } return processed_jobs; @@ -715,7 +794,7 @@ flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops, int processed_ops = 0; /* Flush the remaining jobs */ - JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(&qp->mb_mgr); + JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(qp->mb_mgr); if (job) processed_ops += handle_completed_jobs(qp, job, @@ -760,14 +839,14 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, break; /* Get next free mb job struct from mb manager */ - job = (*qp->op_fns->job.get_next)(&qp->mb_mgr); + job = (*qp->op_fns->job.get_next)(qp->mb_mgr); if (unlikely(job == NULL)) { /* if no free mb job structs we need to flush mb_mgr */ processed_jobs += flush_mb_mgr(qp, &ops[processed_jobs], (nb_ops - processed_jobs) - 1); - job = (*qp->op_fns->job.get_next)(&qp->mb_mgr); + job = (*qp->op_fns->job.get_next)(qp->mb_mgr); } retval = set_mb_job_params(job, qp, op, &digest_idx); @@ -777,7 +856,7 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, } /* Submit job to multi-buffer for processing */ - job = (*qp->op_fns->job.submit)(&qp->mb_mgr); + job = (*qp->op_fns->job.submit)(qp->mb_mgr); /* * If submit returns a processed job then handle it, @@ -813,13 +892,13 @@ cryptodev_aesni_mb_create(const char *name, /* Check CPU for support for AES instruction set */ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) { - MB_LOG_ERR("AES instructions not supported by CPU"); + AESNI_MB_LOG(ERR, "AES instructions not supported by CPU"); return -EFAULT; } dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); if (dev == NULL) { - MB_LOG_ERR("failed to create cryptodev vdev"); + AESNI_MB_LOG(ERR, "failed to create cryptodev vdev"); return -ENODEV; } @@ -866,7 +945,13 @@ cryptodev_aesni_mb_create(const char *name, internals->vector_mode = vector_mode; internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; + +#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0) + AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n", + imb_get_version_str()); +#else + AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n"); +#endif return 0; } @@ -878,8 +963,7 @@ cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev) "", sizeof(struct aesni_mb_private), rte_socket_id(), - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }; const char *name, *args; int retval; @@ -892,7 +976,7 @@ cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev) retval = rte_cryptodev_pmd_parse_input_args(&init_params, args); if (retval) { - MB_LOG_ERR("Failed to parse initialisation arguments[%s]\n", + AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]", args); return -EINVAL; } @@ -928,8 +1012,12 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv); RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id="); RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv, - cryptodev_aesni_mb_pmd_drv, + cryptodev_aesni_mb_pmd_drv.driver, cryptodev_driver_id); + +RTE_INIT(aesni_mb_init_log) +{ + aesni_mb_logtype_driver = rte_log_register("pmd.crypto.aesni_mb"); +} diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c index 9d685a09..ab26e5ae 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c @@ -239,6 +239,26 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = { }, } }, } }, + { /* 3DES CBC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_3DES_CBC, + .block_size = 8, + .key_size = { + .min = 8, + .max = 24, + .increment = 8 + }, + .iv_size = { + .min = 8, + .max = 8, + .increment = 0 + } + }, } + }, } + }, { /* DES DOCSIS BPI */ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, {.sym = { @@ -289,8 +309,27 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = { }, } }, } }, - - + { /* AES CMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_AES_CMAC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .digest_size = { + .min = 12, + .max = 16, + .increment = 4 + }, + .iv_size = { 0 } + }, } + }, } + }, RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() }; @@ -368,7 +407,8 @@ aesni_mb_pmd_info_get(struct rte_cryptodev *dev, dev_info->feature_flags = dev->feature_flags; dev_info->capabilities = aesni_mb_pmd_capabilities; dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; - dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; } } @@ -383,6 +423,8 @@ aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) r = rte_ring_lookup(qp->name); if (r) rte_ring_free(r); + if (qp->mb_mgr) + free_mb_mgr(qp->mb_mgr); rte_free(qp); dev->data->queue_pairs[qp_id] = NULL; } @@ -422,12 +464,12 @@ aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp, r = rte_ring_lookup(ring_name); if (r) { if (rte_ring_get_size(r) >= ring_size) { - MB_LOG_INFO("Reusing existing ring %s for processed ops", + AESNI_MB_LOG(INFO, "Reusing existing ring %s for processed ops", ring_name); return r; } - MB_LOG_ERR("Unable to reuse existing ring %s for processed ops", + AESNI_MB_LOG(ERR, "Unable to reuse existing ring %s for processed ops", ring_name); return NULL; } @@ -444,6 +486,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, { struct aesni_mb_qp *qp = NULL; struct aesni_mb_private *internals = dev->data->dev_private; + int ret = -1; /* Free memory prior to re-allocation if needed. */ if (dev->data->queue_pairs[qp_id] != NULL) @@ -462,12 +505,20 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, goto qp_setup_cleanup; + qp->mb_mgr = alloc_mb_mgr(0); + if (qp->mb_mgr == NULL) { + ret = -ENOMEM; + goto qp_setup_cleanup; + } + qp->op_fns = &job_ops[internals->vector_mode]; qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp, "ingress", qp_conf->nb_descriptors, socket_id); - if (qp->ingress_queue == NULL) + if (qp->ingress_queue == NULL) { + ret = -1; goto qp_setup_cleanup; + } qp->sess_mp = session_pool; @@ -479,30 +530,17 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, "digest_mp_%u_%u", dev->data->dev_id, qp_id); /* Initialise multi-buffer manager */ - (*qp->op_fns->job.init_mgr)(&qp->mb_mgr); + (*qp->op_fns->job.init_mgr)(qp->mb_mgr); return 0; qp_setup_cleanup: - if (qp) + if (qp) { + if (qp->mb_mgr == NULL) + free_mb_mgr(qp->mb_mgr); rte_free(qp); + } - return -1; -} - -/** Start queue pair */ -static int -aesni_mb_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair */ -static int -aesni_mb_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; + return ret; } /** Return the number of allocated queue pairs */ @@ -514,14 +552,14 @@ aesni_mb_pmd_qp_count(struct rte_cryptodev *dev) /** Returns the size of the aesni multi-buffer session structure */ static unsigned -aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { return sizeof(struct aesni_mb_session); } /** Configure a aesni multi-buffer session from a crypto xform chain */ static int -aesni_mb_pmd_session_configure(struct rte_cryptodev *dev, +aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -531,27 +569,27 @@ aesni_mb_pmd_session_configure(struct rte_cryptodev *dev, int ret; if (unlikely(sess == NULL)) { - MB_LOG_ERR("invalid session struct"); + AESNI_MB_LOG(ERR, "invalid session struct"); return -EINVAL; } if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( - "Couldn't get object from session mempool"); + AESNI_MB_LOG(ERR, + "Couldn't get object from session mempool"); return -ENOMEM; } ret = aesni_mb_set_session_parameters(&job_ops[internals->vector_mode], sess_private_data, xform); if (ret != 0) { - MB_LOG_ERR("failed configure session parameters"); + AESNI_MB_LOG(ERR, "failed configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); return 0; @@ -559,17 +597,17 @@ aesni_mb_pmd_session_configure(struct rte_cryptodev *dev, /** Clear the memory of session so it doesn't leave key material behind */ static void -aesni_mb_pmd_session_clear(struct rte_cryptodev *dev, +aesni_mb_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); /* Zero out the whole structure */ if (sess_priv) { memset(sess_priv, 0, sizeof(struct aesni_mb_session)); struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -587,13 +625,11 @@ struct rte_cryptodev_ops aesni_mb_pmd_ops = { .queue_pair_setup = aesni_mb_pmd_qp_setup, .queue_pair_release = aesni_mb_pmd_qp_release, - .queue_pair_start = aesni_mb_pmd_qp_start, - .queue_pair_stop = aesni_mb_pmd_qp_stop, .queue_pair_count = aesni_mb_pmd_qp_count, - .session_get_size = aesni_mb_pmd_session_get_size, - .session_configure = aesni_mb_pmd_session_configure, - .session_clear = aesni_mb_pmd_session_clear + .sym_session_get_size = aesni_mb_pmd_sym_session_get_size, + .sym_session_configure = aesni_mb_pmd_sym_session_configure, + .sym_session_clear = aesni_mb_pmd_sym_session_clear }; struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops; diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h index 948e091c..70e9d18e 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h @@ -7,28 +7,26 @@ #include "aesni_mb_ops.h" +/* + * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50, + * so if macro is not defined, it means that the version is 0.49. + */ +#if !defined(IMB_VERSION_NUM) +#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0) +#endif + #define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb /**< AES-NI Multi buffer PMD device name */ -#define MB_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), \ - __func__, __LINE__, ## args) - -#ifdef RTE_LIBRTE_AESNI_MB_DEBUG -#define MB_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - CRYPTODEV_NAME_AESNI_MB_PMD, \ - __func__, __LINE__, ## args) - -#define MB_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - CRYPTODEV_NAME_AESNI_MB_PMD, \ - __func__, __LINE__, ## args) -#else -#define MB_LOG_INFO(fmt, args...) -#define MB_LOG_DBG(fmt, args...) -#endif +/** AESNI_MB PMD LOGTYPE DRIVER */ +int aesni_mb_logtype_driver; + +#define AESNI_MB_LOG(level, fmt, ...) \ + rte_log(RTE_LOG_ ## level, aesni_mb_logtype_driver, \ + "%s() line %u: " fmt "\n", __func__, __LINE__, \ + ## __VA_ARGS__) + #define HMAC_IPAD_VALUE (0x36) #define HMAC_OPAD_VALUE (0x5C) @@ -66,8 +64,9 @@ static const unsigned auth_truncated_digest_byte_lengths[] = { [SHA_384] = 24, [SHA_512] = 32, [AES_XCBC] = 12, + [AES_CMAC] = 16, [AES_CCM] = 8, - [NULL_HASH] = 0 + [NULL_HASH] = 0 }; /** @@ -91,7 +90,8 @@ static const unsigned auth_digest_byte_lengths[] = { [SHA_384] = 48, [SHA_512] = 64, [AES_XCBC] = 16, - [NULL_HASH] = 0 + [AES_CMAC] = 16, + [NULL_HASH] = 0 }; /** @@ -122,8 +122,6 @@ struct aesni_mb_private { /**< CPU vector instruction set mode */ unsigned max_nb_queue_pairs; /**< Max number of queue pairs supported by device */ - unsigned max_nb_sessions; - /**< Max number of sessions supported by device */ }; /** AESNI Multi buffer queue pair */ @@ -134,7 +132,7 @@ struct aesni_mb_qp { /**< Unique Queue Pair Name */ const struct aesni_mb_op_fns *op_fns; /**< Vector mode dependent pointer table of the multi-buffer APIs */ - MB_MGR mb_mgr; + MB_MGR *mb_mgr; /**< Multi-buffer instance */ struct rte_ring *ingress_queue; /**< Ring for placing operations ready for processing */ @@ -171,12 +169,18 @@ struct aesni_mb_session { uint64_t key_length_in_bytes; - struct { - uint32_t encode[60] __rte_aligned(16); - /**< encode key */ - uint32_t decode[60] __rte_aligned(16); - /**< decode key */ - } expanded_aes_keys; + union { + struct { + uint32_t encode[60] __rte_aligned(16); + /**< encode key */ + uint32_t decode[60] __rte_aligned(16); + /**< decode key */ + } expanded_aes_keys; + struct { + const void *ks_ptr[3]; + uint64_t key[3][16]; + } exp_3des_keys; + }; /**< Expanded AES keys - Allocating space to * contain the maximum expanded key size which * is 240 bytes for 256 bit AES, calculate by: @@ -211,14 +215,24 @@ struct aesni_mb_session { uint8_t k3[16] __rte_aligned(16); /**< k3. */ } xcbc; + + struct { + uint32_t expkey[60] __rte_aligned(16); + /**< k1 (expanded key). */ + uint32_t skey1[4] __rte_aligned(16); + /**< k2. */ + uint32_t skey2[4] __rte_aligned(16); + /**< k3. */ + } cmac; /**< Expanded XCBC authentication keys */ }; + /** digest size */ + uint16_t digest_len; + } auth; struct { /** AAD data length */ uint16_t aad_len; - /** digest size */ - uint16_t digest_len; } aead; } __rte_cache_aligned; diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c index 59fffcf1..9d15fee5 100644 --- a/drivers/crypto/armv8/rte_armv8_pmd.c +++ b/drivers/crypto/armv8/rte_armv8_pmd.c @@ -502,7 +502,7 @@ get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op) /* get existing session */ if (likely(op->sym->session != NULL)) { sess = (struct armv8_crypto_session *) - get_session_private_data( + get_sym_session_private_data( op->sym->session, cryptodev_driver_id); } @@ -526,8 +526,8 @@ get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op) sess = NULL; } op->sym->session = (struct rte_cryptodev_sym_session *)_sess; - set_session_private_data(op->sym->session, cryptodev_driver_id, - _sess_private_data); + set_sym_session_private_data(op->sym->session, + cryptodev_driver_id, _sess_private_data); } if (unlikely(sess == NULL)) @@ -654,7 +654,7 @@ process_op(struct armv8_crypto_qp *qp, struct rte_crypto_op *op, if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { memset(sess, 0, sizeof(struct armv8_crypto_session)); memset(op->sym->session, 0, - rte_cryptodev_get_header_session_size()); + rte_cryptodev_sym_get_header_session_size()); rte_mempool_put(qp->sess_mp, sess); rte_mempool_put(qp->sess_mp, op->sym->session); op->sym->session = NULL; @@ -779,7 +779,6 @@ cryptodev_armv8_crypto_create(const char *name, internals = dev->data->dev_private; internals->max_nb_qpairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; return 0; @@ -800,8 +799,7 @@ cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev) "", sizeof(struct armv8_crypto_private), rte_socket_id(), - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }; const char *name; const char *input_args; @@ -848,7 +846,6 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ARMV8_PMD, armv8_crypto_pmd_drv); RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_ARMV8_PMD, cryptodev_armv8_pmd); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ARMV8_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id="); -RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, armv8_crypto_pmd_drv, +RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, armv8_crypto_pmd_drv.driver, cryptodev_driver_id); diff --git a/drivers/crypto/armv8/rte_armv8_pmd_ops.c b/drivers/crypto/armv8/rte_armv8_pmd_ops.c index 3817ad7b..ae03117e 100644 --- a/drivers/crypto/armv8/rte_armv8_pmd_ops.c +++ b/drivers/crypto/armv8/rte_armv8_pmd_ops.c @@ -27,9 +27,9 @@ static const struct rte_cryptodev_capabilities .increment = 1 }, .digest_size = { - .min = 20, + .min = 1, .max = 20, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } @@ -48,9 +48,9 @@ static const struct rte_cryptodev_capabilities .increment = 1 }, .digest_size = { - .min = 32, + .min = 1, .max = 32, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } @@ -154,7 +154,8 @@ armv8_crypto_pmd_info_get(struct rte_cryptodev *dev, dev_info->feature_flags = dev->feature_flags; dev_info->capabilities = armv8_crypto_pmd_capabilities; dev_info->max_nb_queue_pairs = internals->max_nb_qpairs; - dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; } } @@ -257,22 +258,6 @@ qp_setup_cleanup: return -1; } -/** Start queue pair */ -static int -armv8_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair */ -static int -armv8_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - /** Return the number of allocated queue pairs */ static uint32_t armv8_crypto_pmd_qp_count(struct rte_cryptodev *dev) @@ -282,14 +267,14 @@ armv8_crypto_pmd_qp_count(struct rte_cryptodev *dev) /** Returns the size of the session structure */ static unsigned -armv8_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +armv8_crypto_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { return sizeof(struct armv8_crypto_session); } /** Configure the session from a crypto xform chain */ static int -armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev, +armv8_crypto_pmd_sym_session_configure(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -317,7 +302,7 @@ armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev, return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); return 0; @@ -325,17 +310,17 @@ armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev, /** Clear the memory of session so it doesn't leave key material behind */ static void -armv8_crypto_pmd_session_clear(struct rte_cryptodev *dev, +armv8_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); /* Zero out the whole structure */ if (sess_priv) { memset(sess_priv, 0, sizeof(struct armv8_crypto_session)); struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -353,13 +338,11 @@ struct rte_cryptodev_ops armv8_crypto_pmd_ops = { .queue_pair_setup = armv8_crypto_pmd_qp_setup, .queue_pair_release = armv8_crypto_pmd_qp_release, - .queue_pair_start = armv8_crypto_pmd_qp_start, - .queue_pair_stop = armv8_crypto_pmd_qp_stop, .queue_pair_count = armv8_crypto_pmd_qp_count, - .session_get_size = armv8_crypto_pmd_session_get_size, - .session_configure = armv8_crypto_pmd_session_configure, - .session_clear = armv8_crypto_pmd_session_clear + .sym_session_get_size = armv8_crypto_pmd_sym_session_get_size, + .sym_session_configure = armv8_crypto_pmd_sym_session_configure, + .sym_session_clear = armv8_crypto_pmd_sym_session_clear }; struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops = &armv8_crypto_pmd_ops; diff --git a/drivers/crypto/armv8/rte_armv8_pmd_private.h b/drivers/crypto/armv8/rte_armv8_pmd_private.h index b8966e93..7feb021d 100644 --- a/drivers/crypto/armv8/rte_armv8_pmd_private.h +++ b/drivers/crypto/armv8/rte_armv8_pmd_private.h @@ -106,8 +106,6 @@ typedef void (*crypto_key_sched_t)(uint8_t *, const uint8_t *); struct armv8_crypto_private { unsigned int max_nb_qpairs; /**< Max number of queue pairs */ - unsigned int max_nb_sessions; - /**< Max number of sessions */ }; /** ARMv8 crypto queue pair */ diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile new file mode 100644 index 00000000..f51d170f --- /dev/null +++ b/drivers/crypto/ccp/Makefile @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_pmd_ccp.a + +# build flags +CFLAGS += -O3 +CFLAGS += -I$(SRCDIR) +CFLAGS += $(WERROR_FLAGS) + +# library version +LIBABIVER := 1 + +# external library include paths +LDLIBS += -lcrypto +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_cryptodev +LDLIBS += -lrte_pci -lrte_bus_pci +LDLIBS += -lrte_bus_vdev +LDLIBS += -lrte_kvargs + +# versioning export map +EXPORT_MAP := rte_pmd_ccp_version.map + +# library source files +SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_crypto.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c new file mode 100644 index 00000000..19ae9153 --- /dev/null +++ b/drivers/crypto/ccp/ccp_crypto.c @@ -0,0 +1,2951 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /*sub key apis*/ +#include /*sub key apis*/ + +#include +#include +#include +#include +#include +#include +#include + +#include "ccp_dev.h" +#include "ccp_crypto.h" +#include "ccp_pci.h" +#include "ccp_pmd_private.h" + +#include +#include +#include + +/* SHA initial context values */ +static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = { + SHA1_H4, SHA1_H3, + SHA1_H2, SHA1_H1, + SHA1_H0, 0x0U, + 0x0U, 0x0U, +}; + +uint32_t ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = { + SHA224_H7, SHA224_H6, + SHA224_H5, SHA224_H4, + SHA224_H3, SHA224_H2, + SHA224_H1, SHA224_H0, +}; + +uint32_t ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = { + SHA256_H7, SHA256_H6, + SHA256_H5, SHA256_H4, + SHA256_H3, SHA256_H2, + SHA256_H1, SHA256_H0, +}; + +uint64_t ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = { + SHA384_H7, SHA384_H6, + SHA384_H5, SHA384_H4, + SHA384_H3, SHA384_H2, + SHA384_H1, SHA384_H0, +}; + +uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = { + SHA512_H7, SHA512_H6, + SHA512_H5, SHA512_H4, + SHA512_H3, SHA512_H2, + SHA512_H1, SHA512_H0, +}; + +#if defined(_MSC_VER) +#define SHA3_CONST(x) x +#else +#define SHA3_CONST(x) x##L +#endif + +/** 'Words' here refers to uint64_t */ +#define SHA3_KECCAK_SPONGE_WORDS \ + (((1600) / 8) / sizeof(uint64_t)) +typedef struct sha3_context_ { + uint64_t saved; + /** + * The portion of the input message that we + * didn't consume yet + */ + union { + uint64_t s[SHA3_KECCAK_SPONGE_WORDS]; + /* Keccak's state */ + uint8_t sb[SHA3_KECCAK_SPONGE_WORDS * 8]; + /**total 200 ctx size**/ + }; + unsigned int byteIndex; + /** + * 0..7--the next byte after the set one + * (starts from 0; 0--none are buffered) + */ + unsigned int wordIndex; + /** + * 0..24--the next word to integrate input + * (starts from 0) + */ + unsigned int capacityWords; + /** + * the double size of the hash output in + * words (e.g. 16 for Keccak 512) + */ +} sha3_context; + +#ifndef SHA3_ROTL64 +#define SHA3_ROTL64(x, y) \ + (((x) << (y)) | ((x) >> ((sizeof(uint64_t)*8) - (y)))) +#endif + +static const uint64_t keccakf_rndc[24] = { + SHA3_CONST(0x0000000000000001UL), SHA3_CONST(0x0000000000008082UL), + SHA3_CONST(0x800000000000808aUL), SHA3_CONST(0x8000000080008000UL), + SHA3_CONST(0x000000000000808bUL), SHA3_CONST(0x0000000080000001UL), + SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008009UL), + SHA3_CONST(0x000000000000008aUL), SHA3_CONST(0x0000000000000088UL), + SHA3_CONST(0x0000000080008009UL), SHA3_CONST(0x000000008000000aUL), + SHA3_CONST(0x000000008000808bUL), SHA3_CONST(0x800000000000008bUL), + SHA3_CONST(0x8000000000008089UL), SHA3_CONST(0x8000000000008003UL), + SHA3_CONST(0x8000000000008002UL), SHA3_CONST(0x8000000000000080UL), + SHA3_CONST(0x000000000000800aUL), SHA3_CONST(0x800000008000000aUL), + SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008080UL), + SHA3_CONST(0x0000000080000001UL), SHA3_CONST(0x8000000080008008UL) +}; + +static const unsigned int keccakf_rotc[24] = { + 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62, + 18, 39, 61, 20, 44 +}; + +static const unsigned int keccakf_piln[24] = { + 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20, + 14, 22, 9, 6, 1 +}; + +static enum ccp_cmd_order +ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform) +{ + enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED; + + if (xform == NULL) + return res; + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { + if (xform->next == NULL) + return CCP_CMD_AUTH; + else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) + return CCP_CMD_HASH_CIPHER; + } + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { + if (xform->next == NULL) + return CCP_CMD_CIPHER; + else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) + return CCP_CMD_CIPHER_HASH; + } + if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) + return CCP_CMD_COMBINED; + return res; +} + +/* partial hash using openssl */ +static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out) +{ + SHA_CTX ctx; + + if (!SHA1_Init(&ctx)) + return -EFAULT; + SHA1_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH); + return 0; +} + +static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out) +{ + SHA256_CTX ctx; + + if (!SHA224_Init(&ctx)) + return -EFAULT; + SHA256_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, + SHA256_DIGEST_LENGTH); + return 0; +} + +static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out) +{ + SHA256_CTX ctx; + + if (!SHA256_Init(&ctx)) + return -EFAULT; + SHA256_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, + SHA256_DIGEST_LENGTH); + return 0; +} + +static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out) +{ + SHA512_CTX ctx; + + if (!SHA384_Init(&ctx)) + return -EFAULT; + SHA512_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, + SHA512_DIGEST_LENGTH); + return 0; +} + +static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out) +{ + SHA512_CTX ctx; + + if (!SHA512_Init(&ctx)) + return -EFAULT; + SHA512_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, + SHA512_DIGEST_LENGTH); + return 0; +} + +static void +keccakf(uint64_t s[25]) +{ + int i, j, round; + uint64_t t, bc[5]; +#define KECCAK_ROUNDS 24 + + for (round = 0; round < KECCAK_ROUNDS; round++) { + + /* Theta */ + for (i = 0; i < 5; i++) + bc[i] = s[i] ^ s[i + 5] ^ s[i + 10] ^ s[i + 15] ^ + s[i + 20]; + + for (i = 0; i < 5; i++) { + t = bc[(i + 4) % 5] ^ SHA3_ROTL64(bc[(i + 1) % 5], 1); + for (j = 0; j < 25; j += 5) + s[j + i] ^= t; + } + + /* Rho Pi */ + t = s[1]; + for (i = 0; i < 24; i++) { + j = keccakf_piln[i]; + bc[0] = s[j]; + s[j] = SHA3_ROTL64(t, keccakf_rotc[i]); + t = bc[0]; + } + + /* Chi */ + for (j = 0; j < 25; j += 5) { + for (i = 0; i < 5; i++) + bc[i] = s[j + i]; + for (i = 0; i < 5; i++) + s[j + i] ^= (~bc[(i + 1) % 5]) & + bc[(i + 2) % 5]; + } + + /* Iota */ + s[0] ^= keccakf_rndc[round]; + } +} + +static void +sha3_Init224(void *priv) +{ + sha3_context *ctx = (sha3_context *) priv; + + memset(ctx, 0, sizeof(*ctx)); + ctx->capacityWords = 2 * 224 / (8 * sizeof(uint64_t)); +} + +static void +sha3_Init256(void *priv) +{ + sha3_context *ctx = (sha3_context *) priv; + + memset(ctx, 0, sizeof(*ctx)); + ctx->capacityWords = 2 * 256 / (8 * sizeof(uint64_t)); +} + +static void +sha3_Init384(void *priv) +{ + sha3_context *ctx = (sha3_context *) priv; + + memset(ctx, 0, sizeof(*ctx)); + ctx->capacityWords = 2 * 384 / (8 * sizeof(uint64_t)); +} + +static void +sha3_Init512(void *priv) +{ + sha3_context *ctx = (sha3_context *) priv; + + memset(ctx, 0, sizeof(*ctx)); + ctx->capacityWords = 2 * 512 / (8 * sizeof(uint64_t)); +} + + +/* This is simply the 'update' with the padding block. + * The padding block is 0x01 || 0x00* || 0x80. First 0x01 and last 0x80 + * bytes are always present, but they can be the same byte. + */ +static void +sha3_Update(void *priv, void const *bufIn, size_t len) +{ + sha3_context *ctx = (sha3_context *) priv; + unsigned int old_tail = (8 - ctx->byteIndex) & 7; + size_t words; + unsigned int tail; + size_t i; + const uint8_t *buf = bufIn; + + if (len < old_tail) { + while (len--) + ctx->saved |= (uint64_t) (*(buf++)) << + ((ctx->byteIndex++) * 8); + return; + } + + if (old_tail) { + len -= old_tail; + while (old_tail--) + ctx->saved |= (uint64_t) (*(buf++)) << + ((ctx->byteIndex++) * 8); + + ctx->s[ctx->wordIndex] ^= ctx->saved; + ctx->byteIndex = 0; + ctx->saved = 0; + if (++ctx->wordIndex == + (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) { + keccakf(ctx->s); + ctx->wordIndex = 0; + } + } + + words = len / sizeof(uint64_t); + tail = len - words * sizeof(uint64_t); + + for (i = 0; i < words; i++, buf += sizeof(uint64_t)) { + const uint64_t t = (uint64_t) (buf[0]) | + ((uint64_t) (buf[1]) << 8 * 1) | + ((uint64_t) (buf[2]) << 8 * 2) | + ((uint64_t) (buf[3]) << 8 * 3) | + ((uint64_t) (buf[4]) << 8 * 4) | + ((uint64_t) (buf[5]) << 8 * 5) | + ((uint64_t) (buf[6]) << 8 * 6) | + ((uint64_t) (buf[7]) << 8 * 7); + ctx->s[ctx->wordIndex] ^= t; + if (++ctx->wordIndex == + (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) { + keccakf(ctx->s); + ctx->wordIndex = 0; + } + } + + while (tail--) + ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8); +} + +int partial_hash_sha3_224(uint8_t *data_in, uint8_t *data_out) +{ + sha3_context *ctx; + int i; + + ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0); + if (!ctx) { + CCP_LOG_ERR("sha3-ctx creation failed"); + return -ENOMEM; + } + sha3_Init224(ctx); + sha3_Update(ctx, data_in, SHA3_224_BLOCK_SIZE); + for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++) + *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1]; + rte_free(ctx); + + return 0; +} + +int partial_hash_sha3_256(uint8_t *data_in, uint8_t *data_out) +{ + sha3_context *ctx; + int i; + + ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0); + if (!ctx) { + CCP_LOG_ERR("sha3-ctx creation failed"); + return -ENOMEM; + } + sha3_Init256(ctx); + sha3_Update(ctx, data_in, SHA3_256_BLOCK_SIZE); + for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++) + *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1]; + rte_free(ctx); + + return 0; +} + +int partial_hash_sha3_384(uint8_t *data_in, uint8_t *data_out) +{ + sha3_context *ctx; + int i; + + ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0); + if (!ctx) { + CCP_LOG_ERR("sha3-ctx creation failed"); + return -ENOMEM; + } + sha3_Init384(ctx); + sha3_Update(ctx, data_in, SHA3_384_BLOCK_SIZE); + for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++) + *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1]; + rte_free(ctx); + + return 0; +} + +int partial_hash_sha3_512(uint8_t *data_in, uint8_t *data_out) +{ + sha3_context *ctx; + int i; + + ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0); + if (!ctx) { + CCP_LOG_ERR("sha3-ctx creation failed"); + return -ENOMEM; + } + sha3_Init512(ctx); + sha3_Update(ctx, data_in, SHA3_512_BLOCK_SIZE); + for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++) + *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1]; + rte_free(ctx); + + return 0; +} + +static int generate_partial_hash(struct ccp_session *sess) +{ + + uint8_t ipad[sess->auth.block_size]; + uint8_t opad[sess->auth.block_size]; + uint8_t *ipad_t, *opad_t; + uint32_t *hash_value_be32, hash_temp32[8]; + uint64_t *hash_value_be64, hash_temp64[8]; + int i, count; + uint8_t *hash_value_sha3; + + opad_t = ipad_t = (uint8_t *)sess->auth.key; + + hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute); + hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute); + + /* considering key size is always equal to block size of algorithm */ + for (i = 0; i < sess->auth.block_size; i++) { + ipad[i] = (ipad_t[i] ^ HMAC_IPAD_VALUE); + opad[i] = (opad_t[i] ^ HMAC_OPAD_VALUE); + } + + switch (sess->auth.algo) { + case CCP_AUTH_ALGO_SHA1_HMAC: + count = SHA1_DIGEST_SIZE >> 2; + + if (partial_hash_sha1(ipad, (uint8_t *)hash_temp32)) + return -1; + for (i = 0; i < count; i++, hash_value_be32++) + *hash_value_be32 = hash_temp32[count - 1 - i]; + + hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute + + sess->auth.ctx_len); + if (partial_hash_sha1(opad, (uint8_t *)hash_temp32)) + return -1; + for (i = 0; i < count; i++, hash_value_be32++) + *hash_value_be32 = hash_temp32[count - 1 - i]; + return 0; + case CCP_AUTH_ALGO_SHA224_HMAC: + count = SHA256_DIGEST_SIZE >> 2; + + if (partial_hash_sha224(ipad, (uint8_t *)hash_temp32)) + return -1; + for (i = 0; i < count; i++, hash_value_be32++) + *hash_value_be32 = hash_temp32[count - 1 - i]; + + hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute + + sess->auth.ctx_len); + if (partial_hash_sha224(opad, (uint8_t *)hash_temp32)) + return -1; + for (i = 0; i < count; i++, hash_value_be32++) + *hash_value_be32 = hash_temp32[count - 1 - i]; + return 0; + case CCP_AUTH_ALGO_SHA3_224_HMAC: + hash_value_sha3 = sess->auth.pre_compute; + if (partial_hash_sha3_224(ipad, hash_value_sha3)) + return -1; + + hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute + + sess->auth.ctx_len); + if (partial_hash_sha3_224(opad, hash_value_sha3)) + return -1; + return 0; + case CCP_AUTH_ALGO_SHA256_HMAC: + count = SHA256_DIGEST_SIZE >> 2; + + if (partial_hash_sha256(ipad, (uint8_t *)hash_temp32)) + return -1; + for (i = 0; i < count; i++, hash_value_be32++) + *hash_value_be32 = hash_temp32[count - 1 - i]; + + hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute + + sess->auth.ctx_len); + if (partial_hash_sha256(opad, (uint8_t *)hash_temp32)) + return -1; + for (i = 0; i < count; i++, hash_value_be32++) + *hash_value_be32 = hash_temp32[count - 1 - i]; + return 0; + case CCP_AUTH_ALGO_SHA3_256_HMAC: + hash_value_sha3 = sess->auth.pre_compute; + if (partial_hash_sha3_256(ipad, hash_value_sha3)) + return -1; + + hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute + + sess->auth.ctx_len); + if (partial_hash_sha3_256(opad, hash_value_sha3)) + return -1; + return 0; + case CCP_AUTH_ALGO_SHA384_HMAC: + count = SHA512_DIGEST_SIZE >> 3; + + if (partial_hash_sha384(ipad, (uint8_t *)hash_temp64)) + return -1; + for (i = 0; i < count; i++, hash_value_be64++) + *hash_value_be64 = hash_temp64[count - 1 - i]; + + hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute + + sess->auth.ctx_len); + if (partial_hash_sha384(opad, (uint8_t *)hash_temp64)) + return -1; + for (i = 0; i < count; i++, hash_value_be64++) + *hash_value_be64 = hash_temp64[count - 1 - i]; + return 0; + case CCP_AUTH_ALGO_SHA3_384_HMAC: + hash_value_sha3 = sess->auth.pre_compute; + if (partial_hash_sha3_384(ipad, hash_value_sha3)) + return -1; + + hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute + + sess->auth.ctx_len); + if (partial_hash_sha3_384(opad, hash_value_sha3)) + return -1; + return 0; + case CCP_AUTH_ALGO_SHA512_HMAC: + count = SHA512_DIGEST_SIZE >> 3; + + if (partial_hash_sha512(ipad, (uint8_t *)hash_temp64)) + return -1; + for (i = 0; i < count; i++, hash_value_be64++) + *hash_value_be64 = hash_temp64[count - 1 - i]; + + hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute + + sess->auth.ctx_len); + if (partial_hash_sha512(opad, (uint8_t *)hash_temp64)) + return -1; + for (i = 0; i < count; i++, hash_value_be64++) + *hash_value_be64 = hash_temp64[count - 1 - i]; + return 0; + case CCP_AUTH_ALGO_SHA3_512_HMAC: + hash_value_sha3 = sess->auth.pre_compute; + if (partial_hash_sha3_512(ipad, hash_value_sha3)) + return -1; + + hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute + + sess->auth.ctx_len); + if (partial_hash_sha3_512(opad, hash_value_sha3)) + return -1; + return 0; + default: + CCP_LOG_ERR("Invalid auth algo"); + return -1; + } +} + +/* prepare temporary keys K1 and K2 */ +static void prepare_key(unsigned char *k, unsigned char *l, int bl) +{ + int i; + /* Shift block to left, including carry */ + for (i = 0; i < bl; i++) { + k[i] = l[i] << 1; + if (i < bl - 1 && l[i + 1] & 0x80) + k[i] |= 1; + } + /* If MSB set fixup with R */ + if (l[0] & 0x80) + k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b; +} + +/* subkeys K1 and K2 generation for CMAC */ +static int +generate_cmac_subkeys(struct ccp_session *sess) +{ + const EVP_CIPHER *algo; + EVP_CIPHER_CTX *ctx; + unsigned char *ccp_ctx; + size_t i; + int dstlen, totlen; + unsigned char zero_iv[AES_BLOCK_SIZE] = {0}; + unsigned char dst[2 * AES_BLOCK_SIZE] = {0}; + unsigned char k1[AES_BLOCK_SIZE] = {0}; + unsigned char k2[AES_BLOCK_SIZE] = {0}; + + if (sess->auth.ut.aes_type == CCP_AES_TYPE_128) + algo = EVP_aes_128_cbc(); + else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192) + algo = EVP_aes_192_cbc(); + else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256) + algo = EVP_aes_256_cbc(); + else { + CCP_LOG_ERR("Invalid CMAC type length"); + return -1; + } + + ctx = EVP_CIPHER_CTX_new(); + if (!ctx) { + CCP_LOG_ERR("ctx creation failed"); + return -1; + } + if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key, + (unsigned char *)zero_iv) <= 0) + goto key_generate_err; + if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0) + goto key_generate_err; + if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv, + AES_BLOCK_SIZE) <= 0) + goto key_generate_err; + if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0) + goto key_generate_err; + + memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2); + + ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1); + prepare_key(k1, dst, AES_BLOCK_SIZE); + for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--) + *ccp_ctx = k1[i]; + + ccp_ctx = (unsigned char *)(sess->auth.pre_compute + + (2 * CCP_SB_BYTES) - 1); + prepare_key(k2, k1, AES_BLOCK_SIZE); + for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--) + *ccp_ctx = k2[i]; + + EVP_CIPHER_CTX_free(ctx); + + return 0; + +key_generate_err: + CCP_LOG_ERR("CMAC Init failed"); + return -1; +} + +/* configure session */ +static int +ccp_configure_session_cipher(struct ccp_session *sess, + const struct rte_crypto_sym_xform *xform) +{ + const struct rte_crypto_cipher_xform *cipher_xform = NULL; + size_t i, j, x; + + cipher_xform = &xform->cipher; + + /* set cipher direction */ + if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) + sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT; + else + sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT; + + /* set cipher key */ + sess->cipher.key_length = cipher_xform->key.length; + rte_memcpy(sess->cipher.key, cipher_xform->key.data, + cipher_xform->key.length); + + /* set iv parameters */ + sess->iv.offset = cipher_xform->iv.offset; + sess->iv.length = cipher_xform->iv.length; + + switch (cipher_xform->algo) { + case RTE_CRYPTO_CIPHER_AES_CTR: + sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR; + sess->cipher.um.aes_mode = CCP_AES_MODE_CTR; + sess->cipher.engine = CCP_ENGINE_AES; + break; + case RTE_CRYPTO_CIPHER_AES_ECB: + sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC; + sess->cipher.um.aes_mode = CCP_AES_MODE_ECB; + sess->cipher.engine = CCP_ENGINE_AES; + break; + case RTE_CRYPTO_CIPHER_AES_CBC: + sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC; + sess->cipher.um.aes_mode = CCP_AES_MODE_CBC; + sess->cipher.engine = CCP_ENGINE_AES; + break; + case RTE_CRYPTO_CIPHER_3DES_CBC: + sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC; + sess->cipher.um.des_mode = CCP_DES_MODE_CBC; + sess->cipher.engine = CCP_ENGINE_3DES; + break; + default: + CCP_LOG_ERR("Unsupported cipher algo"); + return -1; + } + + + switch (sess->cipher.engine) { + case CCP_ENGINE_AES: + if (sess->cipher.key_length == 16) + sess->cipher.ut.aes_type = CCP_AES_TYPE_128; + else if (sess->cipher.key_length == 24) + sess->cipher.ut.aes_type = CCP_AES_TYPE_192; + else if (sess->cipher.key_length == 32) + sess->cipher.ut.aes_type = CCP_AES_TYPE_256; + else { + CCP_LOG_ERR("Invalid cipher key length"); + return -1; + } + for (i = 0; i < sess->cipher.key_length ; i++) + sess->cipher.key_ccp[sess->cipher.key_length - i - 1] = + sess->cipher.key[i]; + break; + case CCP_ENGINE_3DES: + if (sess->cipher.key_length == 16) + sess->cipher.ut.des_type = CCP_DES_TYPE_128; + else if (sess->cipher.key_length == 24) + sess->cipher.ut.des_type = CCP_DES_TYPE_192; + else { + CCP_LOG_ERR("Invalid cipher key length"); + return -1; + } + for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8) + for (i = 0; i < 8; i++) + sess->cipher.key_ccp[(8 + x) - i - 1] = + sess->cipher.key[i + x]; + break; + default: + CCP_LOG_ERR("Invalid CCP Engine"); + return -ENOTSUP; + } + sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce); + sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp); + return 0; +} + +static int +ccp_configure_session_auth(struct ccp_session *sess, + const struct rte_crypto_sym_xform *xform) +{ + const struct rte_crypto_auth_xform *auth_xform = NULL; + size_t i; + + auth_xform = &xform->auth; + + sess->auth.digest_length = auth_xform->digest_length; + if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) + sess->auth.op = CCP_AUTH_OP_GENERATE; + else + sess->auth.op = CCP_AUTH_OP_VERIFY; + switch (auth_xform->algo) { + case RTE_CRYPTO_AUTH_MD5_HMAC: + if (sess->auth_opt) { + sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC; + sess->auth.offset = ((CCP_SB_BYTES << 1) - + MD5_DIGEST_SIZE); + sess->auth.key_length = auth_xform->key.length; + sess->auth.block_size = MD5_BLOCK_SIZE; + memset(sess->auth.key, 0, sess->auth.block_size); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + } else + return -1; /* HMAC MD5 not supported on CCP */ + break; + case RTE_CRYPTO_AUTH_SHA1: + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.algo = CCP_AUTH_ALGO_SHA1; + sess->auth.ut.sha_type = CCP_SHA_TYPE_1; + sess->auth.ctx = (void *)ccp_sha1_init; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; + break; + case RTE_CRYPTO_AUTH_SHA1_HMAC: + if (sess->auth_opt) { + if (auth_xform->key.length > SHA1_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC; + sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; + sess->auth.block_size = SHA1_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + } else { + if (auth_xform->key.length > SHA1_BLOCK_SIZE) + return -1; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC; + sess->auth.ut.sha_type = CCP_SHA_TYPE_1; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; + sess->auth.block_size = SHA1_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, + sess->auth.ctx_len << 1); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + } + break; + case RTE_CRYPTO_AUTH_SHA224: + sess->auth.algo = CCP_AUTH_ALGO_SHA224; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_224; + sess->auth.ctx = (void *)ccp_sha224_init; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; + break; + case RTE_CRYPTO_AUTH_SHA224_HMAC: + if (sess->auth_opt) { + if (auth_xform->key.length > SHA224_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC; + sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; + sess->auth.block_size = SHA224_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + } else { + if (auth_xform->key.length > SHA224_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_224; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; + sess->auth.block_size = SHA224_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, + sess->auth.ctx_len << 1); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + } + break; + case RTE_CRYPTO_AUTH_SHA3_224: + sess->auth.algo = CCP_AUTH_ALGO_SHA3_224; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA3_TYPE_224; + sess->auth.ctx_len = CCP_SHA3_CTX_SIZE; + sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE; + break; + case RTE_CRYPTO_AUTH_SHA3_224_HMAC: + if (auth_xform->key.length > SHA3_224_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA3_224_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA3_TYPE_224; + sess->auth.ctx_len = CCP_SHA3_CTX_SIZE; + sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE; + sess->auth.block_size = SHA3_224_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + break; + case RTE_CRYPTO_AUTH_SHA256: + sess->auth.algo = CCP_AUTH_ALGO_SHA256; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_256; + sess->auth.ctx = (void *)ccp_sha256_init; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE; + break; + case RTE_CRYPTO_AUTH_SHA256_HMAC: + if (sess->auth_opt) { + if (auth_xform->key.length > SHA256_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC; + sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE; + sess->auth.block_size = SHA256_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + } else { + if (auth_xform->key.length > SHA256_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_256; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE; + sess->auth.block_size = SHA256_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, + sess->auth.ctx_len << 1); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + } + break; + case RTE_CRYPTO_AUTH_SHA3_256: + sess->auth.algo = CCP_AUTH_ALGO_SHA3_256; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA3_TYPE_256; + sess->auth.ctx_len = CCP_SHA3_CTX_SIZE; + sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE; + break; + case RTE_CRYPTO_AUTH_SHA3_256_HMAC: + if (auth_xform->key.length > SHA3_256_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA3_256_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA3_TYPE_256; + sess->auth.ctx_len = CCP_SHA3_CTX_SIZE; + sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE; + sess->auth.block_size = SHA3_256_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + break; + case RTE_CRYPTO_AUTH_SHA384: + sess->auth.algo = CCP_AUTH_ALGO_SHA384; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_384; + sess->auth.ctx = (void *)ccp_sha384_init; + sess->auth.ctx_len = CCP_SB_BYTES << 1; + sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE; + break; + case RTE_CRYPTO_AUTH_SHA384_HMAC: + if (sess->auth_opt) { + if (auth_xform->key.length > SHA384_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC; + sess->auth.offset = ((CCP_SB_BYTES << 1) - + SHA384_DIGEST_SIZE); + sess->auth.block_size = SHA384_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + } else { + if (auth_xform->key.length > SHA384_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_384; + sess->auth.ctx_len = CCP_SB_BYTES << 1; + sess->auth.offset = ((CCP_SB_BYTES << 1) - + SHA384_DIGEST_SIZE); + sess->auth.block_size = SHA384_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, + sess->auth.ctx_len << 1); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + } + break; + case RTE_CRYPTO_AUTH_SHA3_384: + sess->auth.algo = CCP_AUTH_ALGO_SHA3_384; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA3_TYPE_384; + sess->auth.ctx_len = CCP_SHA3_CTX_SIZE; + sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE; + break; + case RTE_CRYPTO_AUTH_SHA3_384_HMAC: + if (auth_xform->key.length > SHA3_384_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA3_384_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA3_TYPE_384; + sess->auth.ctx_len = CCP_SHA3_CTX_SIZE; + sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE; + sess->auth.block_size = SHA3_384_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + break; + case RTE_CRYPTO_AUTH_SHA512: + sess->auth.algo = CCP_AUTH_ALGO_SHA512; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_512; + sess->auth.ctx = (void *)ccp_sha512_init; + sess->auth.ctx_len = CCP_SB_BYTES << 1; + sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE; + break; + case RTE_CRYPTO_AUTH_SHA512_HMAC: + if (sess->auth_opt) { + if (auth_xform->key.length > SHA512_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC; + sess->auth.offset = ((CCP_SB_BYTES << 1) - + SHA512_DIGEST_SIZE); + sess->auth.block_size = SHA512_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + } else { + if (auth_xform->key.length > SHA512_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_512; + sess->auth.ctx_len = CCP_SB_BYTES << 1; + sess->auth.offset = ((CCP_SB_BYTES << 1) - + SHA512_DIGEST_SIZE); + sess->auth.block_size = SHA512_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, + sess->auth.ctx_len << 1); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + } + break; + case RTE_CRYPTO_AUTH_SHA3_512: + sess->auth.algo = CCP_AUTH_ALGO_SHA3_512; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA3_TYPE_512; + sess->auth.ctx_len = CCP_SHA3_CTX_SIZE; + sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE; + break; + case RTE_CRYPTO_AUTH_SHA3_512_HMAC: + if (auth_xform->key.length > SHA3_512_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA3_512_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA3_TYPE_512; + sess->auth.ctx_len = CCP_SHA3_CTX_SIZE; + sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE; + sess->auth.block_size = SHA3_512_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + break; + case RTE_CRYPTO_AUTH_AES_CMAC: + sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC; + sess->auth.engine = CCP_ENGINE_AES; + sess->auth.um.aes_mode = CCP_AES_MODE_CMAC; + sess->auth.key_length = auth_xform->key.length; + /* padding and hash result */ + sess->auth.ctx_len = CCP_SB_BYTES << 1; + sess->auth.offset = AES_BLOCK_SIZE; + sess->auth.block_size = AES_BLOCK_SIZE; + if (sess->auth.key_length == 16) + sess->auth.ut.aes_type = CCP_AES_TYPE_128; + else if (sess->auth.key_length == 24) + sess->auth.ut.aes_type = CCP_AES_TYPE_192; + else if (sess->auth.key_length == 32) + sess->auth.ut.aes_type = CCP_AES_TYPE_256; + else { + CCP_LOG_ERR("Invalid CMAC key length"); + return -1; + } + rte_memcpy(sess->auth.key, auth_xform->key.data, + sess->auth.key_length); + for (i = 0; i < sess->auth.key_length; i++) + sess->auth.key_ccp[sess->auth.key_length - i - 1] = + sess->auth.key[i]; + if (generate_cmac_subkeys(sess)) + return -1; + break; + default: + CCP_LOG_ERR("Unsupported hash algo"); + return -ENOTSUP; + } + return 0; +} + +static int +ccp_configure_session_aead(struct ccp_session *sess, + const struct rte_crypto_sym_xform *xform) +{ + const struct rte_crypto_aead_xform *aead_xform = NULL; + size_t i; + + aead_xform = &xform->aead; + + sess->cipher.key_length = aead_xform->key.length; + rte_memcpy(sess->cipher.key, aead_xform->key.data, + aead_xform->key.length); + + if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) { + sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT; + sess->auth.op = CCP_AUTH_OP_GENERATE; + } else { + sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT; + sess->auth.op = CCP_AUTH_OP_VERIFY; + } + sess->aead_algo = aead_xform->algo; + sess->auth.aad_length = aead_xform->aad_length; + sess->auth.digest_length = aead_xform->digest_length; + + /* set iv parameters */ + sess->iv.offset = aead_xform->iv.offset; + sess->iv.length = aead_xform->iv.length; + + switch (aead_xform->algo) { + case RTE_CRYPTO_AEAD_AES_GCM: + sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM; + sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR; + sess->cipher.engine = CCP_ENGINE_AES; + if (sess->cipher.key_length == 16) + sess->cipher.ut.aes_type = CCP_AES_TYPE_128; + else if (sess->cipher.key_length == 24) + sess->cipher.ut.aes_type = CCP_AES_TYPE_192; + else if (sess->cipher.key_length == 32) + sess->cipher.ut.aes_type = CCP_AES_TYPE_256; + else { + CCP_LOG_ERR("Invalid aead key length"); + return -1; + } + for (i = 0; i < sess->cipher.key_length; i++) + sess->cipher.key_ccp[sess->cipher.key_length - i - 1] = + sess->cipher.key[i]; + sess->auth.algo = CCP_AUTH_ALGO_AES_GCM; + sess->auth.engine = CCP_ENGINE_AES; + sess->auth.um.aes_mode = CCP_AES_MODE_GHASH; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = 0; + sess->auth.block_size = AES_BLOCK_SIZE; + sess->cmd_id = CCP_CMD_COMBINED; + break; + default: + CCP_LOG_ERR("Unsupported aead algo"); + return -ENOTSUP; + } + sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce); + sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp); + return 0; +} + +int +ccp_set_session_parameters(struct ccp_session *sess, + const struct rte_crypto_sym_xform *xform, + struct ccp_private *internals) +{ + const struct rte_crypto_sym_xform *cipher_xform = NULL; + const struct rte_crypto_sym_xform *auth_xform = NULL; + const struct rte_crypto_sym_xform *aead_xform = NULL; + int ret = 0; + + sess->auth_opt = internals->auth_opt; + sess->cmd_id = ccp_get_cmd_id(xform); + + switch (sess->cmd_id) { + case CCP_CMD_CIPHER: + cipher_xform = xform; + break; + case CCP_CMD_AUTH: + auth_xform = xform; + break; + case CCP_CMD_CIPHER_HASH: + cipher_xform = xform; + auth_xform = xform->next; + break; + case CCP_CMD_HASH_CIPHER: + auth_xform = xform; + cipher_xform = xform->next; + break; + case CCP_CMD_COMBINED: + aead_xform = xform; + break; + default: + CCP_LOG_ERR("Unsupported cmd_id"); + return -1; + } + + /* Default IV length = 0 */ + sess->iv.length = 0; + if (cipher_xform) { + ret = ccp_configure_session_cipher(sess, cipher_xform); + if (ret != 0) { + CCP_LOG_ERR("Invalid/unsupported cipher parameters"); + return ret; + } + } + if (auth_xform) { + ret = ccp_configure_session_auth(sess, auth_xform); + if (ret != 0) { + CCP_LOG_ERR("Invalid/unsupported auth parameters"); + return ret; + } + } + if (aead_xform) { + ret = ccp_configure_session_aead(sess, aead_xform); + if (ret != 0) { + CCP_LOG_ERR("Invalid/unsupported aead parameters"); + return ret; + } + } + return ret; +} + +/* calculate CCP descriptors requirement */ +static inline int +ccp_cipher_slot(struct ccp_session *session) +{ + int count = 0; + + switch (session->cipher.algo) { + case CCP_CIPHER_ALGO_AES_CBC: + count = 2; + /**< op + passthrough for iv */ + break; + case CCP_CIPHER_ALGO_AES_ECB: + count = 1; + /**cipher.algo); + } + return count; +} + +static inline int +ccp_auth_slot(struct ccp_session *session) +{ + int count = 0; + + switch (session->auth.algo) { + case CCP_AUTH_ALGO_SHA1: + case CCP_AUTH_ALGO_SHA224: + case CCP_AUTH_ALGO_SHA256: + case CCP_AUTH_ALGO_SHA384: + case CCP_AUTH_ALGO_SHA512: + count = 3; + /**< op + lsb passthrough cpy to/from*/ + break; + case CCP_AUTH_ALGO_MD5_HMAC: + break; + case CCP_AUTH_ALGO_SHA1_HMAC: + case CCP_AUTH_ALGO_SHA224_HMAC: + case CCP_AUTH_ALGO_SHA256_HMAC: + if (session->auth_opt == 0) + count = 6; + break; + case CCP_AUTH_ALGO_SHA384_HMAC: + case CCP_AUTH_ALGO_SHA512_HMAC: + /** + * 1. Load PHash1 = H(k ^ ipad); to LSB + * 2. generate IHash = H(hash on meassage with PHash1 + * as init values); + * 3. Retrieve IHash 2 slots for 384/512 + * 4. Load Phash2 = H(k ^ opad); to LSB + * 5. generate FHash = H(hash on Ihash with Phash2 + * as init value); + * 6. Retrieve HMAC output from LSB to host memory + */ + if (session->auth_opt == 0) + count = 7; + break; + case CCP_AUTH_ALGO_SHA3_224: + case CCP_AUTH_ALGO_SHA3_256: + case CCP_AUTH_ALGO_SHA3_384: + case CCP_AUTH_ALGO_SHA3_512: + count = 1; + /**< only op ctx and dst in host memory*/ + break; + case CCP_AUTH_ALGO_SHA3_224_HMAC: + case CCP_AUTH_ALGO_SHA3_256_HMAC: + count = 3; + break; + case CCP_AUTH_ALGO_SHA3_384_HMAC: + case CCP_AUTH_ALGO_SHA3_512_HMAC: + count = 4; + /** + * 1. Op to Perform Ihash + * 2. Retrieve result from LSB to host memory + * 3. Perform final hash + */ + break; + case CCP_AUTH_ALGO_AES_CMAC: + count = 4; + /** + * op + * extra descriptor in padding case + * (k1/k2(255:128) with iv(127:0)) + * Retrieve result + */ + break; + default: + CCP_LOG_ERR("Unsupported auth algo %d", + session->auth.algo); + } + + return count; +} + +static int +ccp_aead_slot(struct ccp_session *session) +{ + int count = 0; + + switch (session->aead_algo) { + case RTE_CRYPTO_AEAD_AES_GCM: + break; + default: + CCP_LOG_ERR("Unsupported aead algo %d", + session->aead_algo); + } + switch (session->auth.algo) { + case CCP_AUTH_ALGO_AES_GCM: + count = 5; + /** + * 1. Passthru iv + * 2. Hash AAD + * 3. GCTR + * 4. Reload passthru + * 5. Hash Final tag + */ + break; + default: + CCP_LOG_ERR("Unsupported combined auth ALGO %d", + session->auth.algo); + } + return count; +} + +int +ccp_compute_slot_count(struct ccp_session *session) +{ + int count = 0; + + switch (session->cmd_id) { + case CCP_CMD_CIPHER: + count = ccp_cipher_slot(session); + break; + case CCP_CMD_AUTH: + count = ccp_auth_slot(session); + break; + case CCP_CMD_CIPHER_HASH: + case CCP_CMD_HASH_CIPHER: + count = ccp_cipher_slot(session); + count += ccp_auth_slot(session); + break; + case CCP_CMD_COMBINED: + count = ccp_aead_slot(session); + break; + default: + CCP_LOG_ERR("Unsupported cmd_id"); + + } + + return count; +} + +static uint8_t +algo_select(int sessalgo, + const EVP_MD **algo) +{ + int res = 0; + + switch (sessalgo) { + case CCP_AUTH_ALGO_MD5_HMAC: + *algo = EVP_md5(); + break; + case CCP_AUTH_ALGO_SHA1_HMAC: + *algo = EVP_sha1(); + break; + case CCP_AUTH_ALGO_SHA224_HMAC: + *algo = EVP_sha224(); + break; + case CCP_AUTH_ALGO_SHA256_HMAC: + *algo = EVP_sha256(); + break; + case CCP_AUTH_ALGO_SHA384_HMAC: + *algo = EVP_sha384(); + break; + case CCP_AUTH_ALGO_SHA512_HMAC: + *algo = EVP_sha512(); + break; + default: + res = -EINVAL; + break; + } + return res; +} + +static int +process_cpu_auth_hmac(uint8_t *src, uint8_t *dst, + __rte_unused uint8_t *iv, + EVP_PKEY *pkey, + int srclen, + EVP_MD_CTX *ctx, + const EVP_MD *algo, + uint16_t d_len) +{ + size_t dstlen; + unsigned char temp_dst[64]; + + if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0) + goto process_auth_err; + + if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0) + goto process_auth_err; + + if (EVP_DigestSignFinal(ctx, temp_dst, &dstlen) <= 0) + goto process_auth_err; + + memcpy(dst, temp_dst, d_len); + return 0; +process_auth_err: + CCP_LOG_ERR("Process cpu auth failed"); + return -EINVAL; +} + +static int cpu_crypto_auth(struct ccp_qp *qp, + struct rte_crypto_op *op, + struct ccp_session *sess, + EVP_MD_CTX *ctx) +{ + uint8_t *src, *dst; + int srclen, status; + struct rte_mbuf *mbuf_src, *mbuf_dst; + const EVP_MD *algo = NULL; + EVP_PKEY *pkey; + + algo_select(sess->auth.algo, &algo); + pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess->auth.key, + sess->auth.key_length); + mbuf_src = op->sym->m_src; + mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src; + srclen = op->sym->auth.data.length; + src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *, + op->sym->auth.data.offset); + + if (sess->auth.op == CCP_AUTH_OP_VERIFY) { + dst = qp->temp_digest; + } else { + dst = op->sym->auth.digest.data; + if (dst == NULL) { + dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *, + op->sym->auth.data.offset + + sess->auth.digest_length); + } + } + status = process_cpu_auth_hmac(src, dst, NULL, + pkey, srclen, + ctx, + algo, + sess->auth.digest_length); + if (status) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + return status; + } + + if (sess->auth.op == CCP_AUTH_OP_VERIFY) { + if (memcmp(dst, op->sym->auth.digest.data, + sess->auth.digest_length) != 0) { + op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + } else { + op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + } + } else { + op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + } + EVP_PKEY_free(pkey); + return 0; +} + +static void +ccp_perform_passthru(struct ccp_passthru *pst, + struct ccp_queue *cmd_q) +{ + struct ccp_desc *desc; + union ccp_function function; + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU; + + CCP_CMD_SOC(desc) = 0; + CCP_CMD_IOC(desc) = 0; + CCP_CMD_INIT(desc) = 0; + CCP_CMD_EOM(desc) = 0; + CCP_CMD_PROT(desc) = 0; + + function.raw = 0; + CCP_PT_BYTESWAP(&function) = pst->byte_swap; + CCP_PT_BITWISE(&function) = pst->bit_mod; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = pst->len; + + if (pst->dir) { + CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr); + CCP_CMD_DST_HI(desc) = 0; + CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB; + + if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) + CCP_CMD_LSB_ID(desc) = cmd_q->sb_key; + } else { + + CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr); + CCP_CMD_SRC_HI(desc) = 0; + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB; + + CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr); + CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr); + CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM; + } + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; +} + +static int +ccp_perform_hmac(struct rte_crypto_op *op, + struct ccp_queue *cmd_q) +{ + + struct ccp_session *session; + union ccp_function function; + struct ccp_desc *desc; + uint32_t tail; + phys_addr_t src_addr, dest_addr, dest_addr_t; + struct ccp_passthru pst; + uint64_t auth_msg_bits; + void *append_ptr; + uint8_t *addr; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + addr = session->auth.pre_compute; + + src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + op->sym->auth.data.offset); + append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src, + session->auth.ctx_len); + dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr); + dest_addr_t = dest_addr; + + /** Load PHash1 to LSB*/ + pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.len = session->auth.ctx_len; + pst.dir = 1; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; + ccp_perform_passthru(&pst, cmd_q); + + /**sha engine command descriptor for IntermediateHash*/ + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA; + + CCP_CMD_SOC(desc) = 0; + CCP_CMD_IOC(desc) = 0; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_EOM(desc) = 1; + CCP_CMD_PROT(desc) = 0; + + function.raw = 0; + CCP_SHA_TYPE(&function) = session->auth.ut.sha_type; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = op->sym->auth.data.length; + auth_msg_bits = (op->sym->auth.data.length + + session->auth.block_size) * 8; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha; + CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits); + CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits); + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + rte_wmb(); + + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + /* Intermediate Hash value retrieve */ + if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) || + (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) { + + pst.src_addr = + (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES); + pst.dest_addr = dest_addr_t; + pst.len = CCP_SB_BYTES; + pst.dir = 0; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + + pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.dest_addr = dest_addr_t + CCP_SB_BYTES; + pst.len = CCP_SB_BYTES; + pst.dir = 0; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + + } else { + pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.dest_addr = dest_addr_t; + pst.len = session->auth.ctx_len; + pst.dir = 0; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + + } + + /** Load PHash2 to LSB*/ + addr += session->auth.ctx_len; + pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.len = session->auth.ctx_len; + pst.dir = 1; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; + ccp_perform_passthru(&pst, cmd_q); + + /**sha engine command descriptor for FinalHash*/ + dest_addr_t += session->auth.offset; + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA; + + CCP_CMD_SOC(desc) = 0; + CCP_CMD_IOC(desc) = 0; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_EOM(desc) = 1; + CCP_CMD_PROT(desc) = 0; + + function.raw = 0; + CCP_SHA_TYPE(&function) = session->auth.ut.sha_type; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = (session->auth.ctx_len - + session->auth.offset); + auth_msg_bits = (session->auth.block_size + + session->auth.ctx_len - + session->auth.offset) * 8; + + CCP_CMD_SRC_LO(desc) = (uint32_t)(dest_addr_t); + CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha; + CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits); + CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits); + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + rte_wmb(); + + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + /* Retrieve hmac output */ + pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.dest_addr = dest_addr; + pst.len = session->auth.ctx_len; + pst.dir = 0; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) || + (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; + else + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return 0; + +} + +static int +ccp_perform_sha(struct rte_crypto_op *op, + struct ccp_queue *cmd_q) +{ + struct ccp_session *session; + union ccp_function function; + struct ccp_desc *desc; + uint32_t tail; + phys_addr_t src_addr, dest_addr; + struct ccp_passthru pst; + void *append_ptr; + uint64_t auth_msg_bits; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + + src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + op->sym->auth.data.offset); + + append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src, + session->auth.ctx_len); + dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr); + + /** Passthru sha context*/ + + pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) + session->auth.ctx); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.len = session->auth.ctx_len; + pst.dir = 1; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; + ccp_perform_passthru(&pst, cmd_q); + + /**prepare sha command descriptor*/ + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA; + + CCP_CMD_SOC(desc) = 0; + CCP_CMD_IOC(desc) = 0; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_EOM(desc) = 1; + CCP_CMD_PROT(desc) = 0; + + function.raw = 0; + CCP_SHA_TYPE(&function) = session->auth.ut.sha_type; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = op->sym->auth.data.length; + auth_msg_bits = op->sym->auth.data.length * 8; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha; + CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits); + CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits); + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + rte_wmb(); + + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + /* Hash value retrieve */ + pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.dest_addr = dest_addr; + pst.len = session->auth.ctx_len; + pst.dir = 0; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) || + (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; + else + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return 0; + +} + +static int +ccp_perform_sha3_hmac(struct rte_crypto_op *op, + struct ccp_queue *cmd_q) +{ + struct ccp_session *session; + struct ccp_passthru pst; + union ccp_function function; + struct ccp_desc *desc; + uint8_t *append_ptr; + uint32_t tail; + phys_addr_t src_addr, dest_addr, ctx_paddr, dest_addr_t; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + + src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + op->sym->auth.data.offset); + append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src, + session->auth.ctx_len); + if (!append_ptr) { + CCP_LOG_ERR("CCP MBUF append failed\n"); + return -1; + } + dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr); + dest_addr_t = dest_addr + (session->auth.ctx_len / 2); + ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void + *)session->auth.pre_compute); + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + /*desc1 for SHA3-Ihash operation */ + CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_EOM(desc) = 1; + + function.raw = 0; + CCP_SHA_TYPE(&function) = session->auth.ut.sha_type; + CCP_CMD_FUNCTION(desc) = function.raw; + CCP_CMD_LEN(desc) = op->sym->auth.data.length; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_DST_LO(desc) = (cmd_q->sb_sha * CCP_SB_BYTES); + CCP_CMD_DST_HI(desc) = 0; + CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr); + CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + rte_wmb(); + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + /* Intermediate Hash value retrieve */ + if ((session->auth.ut.sha_type == CCP_SHA3_TYPE_384) || + (session->auth.ut.sha_type == CCP_SHA3_TYPE_512)) { + + pst.src_addr = + (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES); + pst.dest_addr = dest_addr_t; + pst.len = CCP_SB_BYTES; + pst.dir = 0; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + + pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.dest_addr = dest_addr_t + CCP_SB_BYTES; + pst.len = CCP_SB_BYTES; + pst.dir = 0; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + + } else { + pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.dest_addr = dest_addr_t; + pst.len = CCP_SB_BYTES; + pst.dir = 0; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + } + + /**sha engine command descriptor for FinalHash*/ + ctx_paddr += CCP_SHA3_CTX_SIZE; + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_EOM(desc) = 1; + + function.raw = 0; + CCP_SHA_TYPE(&function) = session->auth.ut.sha_type; + CCP_CMD_FUNCTION(desc) = function.raw; + + if (session->auth.ut.sha_type == CCP_SHA3_TYPE_224) { + dest_addr_t += (CCP_SB_BYTES - SHA224_DIGEST_SIZE); + CCP_CMD_LEN(desc) = SHA224_DIGEST_SIZE; + } else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_256) { + CCP_CMD_LEN(desc) = SHA256_DIGEST_SIZE; + } else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_384) { + dest_addr_t += (2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE); + CCP_CMD_LEN(desc) = SHA384_DIGEST_SIZE; + } else { + CCP_CMD_LEN(desc) = SHA512_DIGEST_SIZE; + } + + CCP_CMD_SRC_LO(desc) = ((uint32_t)dest_addr_t); + CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_DST_LO(desc) = (uint32_t)dest_addr; + CCP_CMD_DST_HI(desc) = high32_value(dest_addr); + CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr); + CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + rte_wmb(); + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return 0; +} + +static int +ccp_perform_sha3(struct rte_crypto_op *op, + struct ccp_queue *cmd_q) +{ + struct ccp_session *session; + union ccp_function function; + struct ccp_desc *desc; + uint8_t *ctx_addr, *append_ptr; + uint32_t tail; + phys_addr_t src_addr, dest_addr, ctx_paddr; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + + src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + op->sym->auth.data.offset); + append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src, + session->auth.ctx_len); + if (!append_ptr) { + CCP_LOG_ERR("CCP MBUF append failed\n"); + return -1; + } + dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr); + ctx_addr = session->auth.sha3_ctx; + ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr); + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + /* prepare desc for SHA3 operation */ + CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_EOM(desc) = 1; + + function.raw = 0; + CCP_SHA_TYPE(&function) = session->auth.ut.sha_type; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = op->sym->auth.data.length; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr); + CCP_CMD_DST_HI(desc) = high32_value(dest_addr); + CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr); + CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + rte_wmb(); + + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return 0; +} + +static int +ccp_perform_aes_cmac(struct rte_crypto_op *op, + struct ccp_queue *cmd_q) +{ + struct ccp_session *session; + union ccp_function function; + struct ccp_passthru pst; + struct ccp_desc *desc; + uint32_t tail; + uint8_t *src_tb, *append_ptr, *ctx_addr; + phys_addr_t src_addr, dest_addr, key_addr; + int length, non_align_len; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + key_addr = rte_mem_virt2phy(session->auth.key_ccp); + + src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + op->sym->auth.data.offset); + append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src, + session->auth.ctx_len); + dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr); + + function.raw = 0; + CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT; + CCP_AES_MODE(&function) = session->auth.um.aes_mode; + CCP_AES_TYPE(&function) = session->auth.ut.aes_type; + + if (op->sym->auth.data.length % session->auth.block_size == 0) { + + ctx_addr = session->auth.pre_compute; + memset(ctx_addr, 0, AES_BLOCK_SIZE); + pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = CCP_SB_BYTES; + pst.dir = 1; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; + ccp_perform_passthru(&pst, cmd_q); + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + /* prepare desc for aes-cmac command */ + CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES; + CCP_CMD_EOM(desc) = 1; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = op->sym->auth.data.length; + CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr); + CCP_CMD_KEY_HI(desc) = high32_value(key_addr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + rte_wmb(); + + tail = + (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + } else { + ctx_addr = session->auth.pre_compute + CCP_SB_BYTES; + memset(ctx_addr, 0, AES_BLOCK_SIZE); + pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = CCP_SB_BYTES; + pst.dir = 1; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; + ccp_perform_passthru(&pst, cmd_q); + + length = (op->sym->auth.data.length / AES_BLOCK_SIZE); + length *= AES_BLOCK_SIZE; + non_align_len = op->sym->auth.data.length - length; + /* prepare desc for aes-cmac command */ + /*Command 1*/ + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = length; + CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr); + CCP_CMD_KEY_HI(desc) = high32_value(key_addr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + /*Command 2*/ + append_ptr = append_ptr + CCP_SB_BYTES; + memset(append_ptr, 0, AES_BLOCK_SIZE); + src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src, + uint8_t *, + op->sym->auth.data.offset + + length); + rte_memcpy(append_ptr, src_tb, non_align_len); + append_ptr[non_align_len] = CMAC_PAD_VALUE; + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES; + CCP_CMD_EOM(desc) = 1; + CCP_CMD_FUNCTION(desc) = function.raw; + CCP_CMD_LEN(desc) = AES_BLOCK_SIZE; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES)); + CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr); + CCP_CMD_KEY_HI(desc) = high32_value(key_addr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + rte_wmb(); + tail = + (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + } + /* Retrieve result */ + pst.dest_addr = dest_addr; + pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = CCP_SB_BYTES; + pst.dir = 0; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return 0; +} + +static int +ccp_perform_aes(struct rte_crypto_op *op, + struct ccp_queue *cmd_q, + struct ccp_batch_info *b_info) +{ + struct ccp_session *session; + union ccp_function function; + uint8_t *lsb_buf; + struct ccp_passthru pst = {0}; + struct ccp_desc *desc; + phys_addr_t src_addr, dest_addr, key_addr; + uint8_t *iv; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + function.raw = 0; + + iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset); + if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) { + if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) { + rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE, + iv, session->iv.length); + pst.src_addr = (phys_addr_t)session->cipher.nonce_phys; + CCP_AES_SIZE(&function) = 0x1F; + } else { + lsb_buf = + &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]); + rte_memcpy(lsb_buf + + (CCP_SB_BYTES - session->iv.length), + iv, session->iv.length); + pst.src_addr = b_info->lsb_buf_phys + + (b_info->lsb_buf_idx * CCP_SB_BYTES); + b_info->lsb_buf_idx++; + } + + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = CCP_SB_BYTES; + pst.dir = 1; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + } + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + + src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + op->sym->cipher.data.offset); + if (likely(op->sym->m_dst != NULL)) + dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst, + op->sym->cipher.data.offset); + else + dest_addr = src_addr; + key_addr = session->cipher.key_phys; + + /* prepare desc for aes command */ + CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_EOM(desc) = 1; + + CCP_AES_ENCRYPT(&function) = session->cipher.dir; + CCP_AES_MODE(&function) = session->cipher.um.aes_mode; + CCP_AES_TYPE(&function) = session->cipher.ut.aes_type; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = op->sym->cipher.data.length; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr); + CCP_CMD_DST_HI(desc) = high32_value(dest_addr); + CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr); + CCP_CMD_KEY_HI(desc) = high32_value(key_addr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) + CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return 0; +} + +static int +ccp_perform_3des(struct rte_crypto_op *op, + struct ccp_queue *cmd_q, + struct ccp_batch_info *b_info) +{ + struct ccp_session *session; + union ccp_function function; + unsigned char *lsb_buf; + struct ccp_passthru pst; + struct ccp_desc *desc; + uint32_t tail; + uint8_t *iv; + phys_addr_t src_addr, dest_addr, key_addr; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + + iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset); + switch (session->cipher.um.des_mode) { + case CCP_DES_MODE_CBC: + lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]); + b_info->lsb_buf_idx++; + + rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length), + iv, session->iv.length); + + pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = CCP_SB_BYTES; + pst.dir = 1; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + break; + case CCP_DES_MODE_CFB: + case CCP_DES_MODE_ECB: + CCP_LOG_ERR("Unsupported DES cipher mode"); + return -ENOTSUP; + } + + src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + op->sym->cipher.data.offset); + if (unlikely(op->sym->m_dst != NULL)) + dest_addr = + rte_pktmbuf_mtophys_offset(op->sym->m_dst, + op->sym->cipher.data.offset); + else + dest_addr = src_addr; + + key_addr = rte_mem_virt2phy(session->cipher.key_ccp); + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + + memset(desc, 0, Q_DESC_SIZE); + + /* prepare desc for des command */ + CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES; + + CCP_CMD_SOC(desc) = 0; + CCP_CMD_IOC(desc) = 0; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_EOM(desc) = 1; + CCP_CMD_PROT(desc) = 0; + + function.raw = 0; + CCP_DES_ENCRYPT(&function) = session->cipher.dir; + CCP_DES_MODE(&function) = session->cipher.um.des_mode; + CCP_DES_TYPE(&function) = session->cipher.ut.des_type; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = op->sym->cipher.data.length; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr); + CCP_CMD_DST_HI(desc) = high32_value(dest_addr); + CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr); + CCP_CMD_KEY_HI(desc) = high32_value(key_addr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + if (session->cipher.um.des_mode) + CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + rte_wmb(); + + /* Write the new tail address back to the queue register */ + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + /* Turn the queue back on using our cached control register */ + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return 0; +} + +static int +ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q) +{ + struct ccp_session *session; + union ccp_function function; + uint8_t *iv; + struct ccp_passthru pst; + struct ccp_desc *desc; + uint32_t tail; + uint64_t *temp; + phys_addr_t src_addr, dest_addr, key_addr, aad_addr; + phys_addr_t digest_dest_addr; + int length, non_align_len; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset); + key_addr = session->cipher.key_phys; + + src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + op->sym->aead.data.offset); + if (unlikely(op->sym->m_dst != NULL)) + dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst, + op->sym->aead.data.offset); + else + dest_addr = src_addr; + rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len); + digest_dest_addr = op->sym->aead.digest.phys_addr; + temp = (uint64_t *)(op->sym->aead.digest.data + AES_BLOCK_SIZE); + *temp++ = rte_bswap64(session->auth.aad_length << 3); + *temp = rte_bswap64(op->sym->aead.data.length << 3); + + non_align_len = op->sym->aead.data.length % AES_BLOCK_SIZE; + length = CCP_ALIGN(op->sym->aead.data.length, AES_BLOCK_SIZE); + + aad_addr = op->sym->aead.aad.phys_addr; + + /* CMD1 IV Passthru */ + rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE, iv, + session->iv.length); + pst.src_addr = session->cipher.nonce_phys; + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = CCP_SB_BYTES; + pst.dir = 1; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; + ccp_perform_passthru(&pst, cmd_q); + + /* CMD2 GHASH-AAD */ + function.raw = 0; + CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD; + CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH; + CCP_AES_TYPE(&function) = session->cipher.ut.aes_type; + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = session->auth.aad_length; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr); + CCP_CMD_SRC_HI(desc) = high32_value(aad_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr); + CCP_CMD_KEY_HI(desc) = high32_value(key_addr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + rte_wmb(); + + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + /* CMD3 : GCTR Plain text */ + function.raw = 0; + CCP_AES_ENCRYPT(&function) = session->cipher.dir; + CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR; + CCP_AES_TYPE(&function) = session->cipher.ut.aes_type; + if (non_align_len == 0) + CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1; + else + CCP_AES_SIZE(&function) = (non_align_len << 3) - 1; + + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES; + CCP_CMD_EOM(desc) = 1; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = length; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr); + CCP_CMD_DST_HI(desc) = high32_value(dest_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr); + CCP_CMD_KEY_HI(desc) = high32_value(key_addr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + rte_wmb(); + + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + /* CMD4 : PT to copy IV */ + pst.src_addr = session->cipher.nonce_phys; + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = AES_BLOCK_SIZE; + pst.dir = 1; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; + ccp_perform_passthru(&pst, cmd_q); + + /* CMD5 : GHASH-Final */ + function.raw = 0; + CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL; + CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH; + CCP_AES_TYPE(&function) = session->cipher.ut.aes_type; + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES; + CCP_CMD_FUNCTION(desc) = function.raw; + /* Last block (AAD_len || PT_len)*/ + CCP_CMD_LEN(desc) = AES_BLOCK_SIZE; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE); + CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr); + CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr); + CCP_CMD_KEY_HI(desc) = high32_value(key_addr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + rte_wmb(); + + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return 0; +} + +static inline int +ccp_crypto_cipher(struct rte_crypto_op *op, + struct ccp_queue *cmd_q, + struct ccp_batch_info *b_info) +{ + int result = 0; + struct ccp_session *session; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + + switch (session->cipher.algo) { + case CCP_CIPHER_ALGO_AES_CBC: + result = ccp_perform_aes(op, cmd_q, b_info); + b_info->desccnt += 2; + break; + case CCP_CIPHER_ALGO_AES_CTR: + result = ccp_perform_aes(op, cmd_q, b_info); + b_info->desccnt += 2; + break; + case CCP_CIPHER_ALGO_AES_ECB: + result = ccp_perform_aes(op, cmd_q, b_info); + b_info->desccnt += 1; + break; + case CCP_CIPHER_ALGO_3DES_CBC: + result = ccp_perform_3des(op, cmd_q, b_info); + b_info->desccnt += 2; + break; + default: + CCP_LOG_ERR("Unsupported cipher algo %d", + session->cipher.algo); + return -ENOTSUP; + } + return result; +} + +static inline int +ccp_crypto_auth(struct rte_crypto_op *op, + struct ccp_queue *cmd_q, + struct ccp_batch_info *b_info) +{ + + int result = 0; + struct ccp_session *session; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + + switch (session->auth.algo) { + case CCP_AUTH_ALGO_SHA1: + case CCP_AUTH_ALGO_SHA224: + case CCP_AUTH_ALGO_SHA256: + case CCP_AUTH_ALGO_SHA384: + case CCP_AUTH_ALGO_SHA512: + result = ccp_perform_sha(op, cmd_q); + b_info->desccnt += 3; + break; + case CCP_AUTH_ALGO_MD5_HMAC: + if (session->auth_opt == 0) + result = -1; + break; + case CCP_AUTH_ALGO_SHA1_HMAC: + case CCP_AUTH_ALGO_SHA224_HMAC: + case CCP_AUTH_ALGO_SHA256_HMAC: + if (session->auth_opt == 0) { + result = ccp_perform_hmac(op, cmd_q); + b_info->desccnt += 6; + } + break; + case CCP_AUTH_ALGO_SHA384_HMAC: + case CCP_AUTH_ALGO_SHA512_HMAC: + if (session->auth_opt == 0) { + result = ccp_perform_hmac(op, cmd_q); + b_info->desccnt += 7; + } + break; + case CCP_AUTH_ALGO_SHA3_224: + case CCP_AUTH_ALGO_SHA3_256: + case CCP_AUTH_ALGO_SHA3_384: + case CCP_AUTH_ALGO_SHA3_512: + result = ccp_perform_sha3(op, cmd_q); + b_info->desccnt += 1; + break; + case CCP_AUTH_ALGO_SHA3_224_HMAC: + case CCP_AUTH_ALGO_SHA3_256_HMAC: + result = ccp_perform_sha3_hmac(op, cmd_q); + b_info->desccnt += 3; + break; + case CCP_AUTH_ALGO_SHA3_384_HMAC: + case CCP_AUTH_ALGO_SHA3_512_HMAC: + result = ccp_perform_sha3_hmac(op, cmd_q); + b_info->desccnt += 4; + break; + case CCP_AUTH_ALGO_AES_CMAC: + result = ccp_perform_aes_cmac(op, cmd_q); + b_info->desccnt += 4; + break; + default: + CCP_LOG_ERR("Unsupported auth algo %d", + session->auth.algo); + return -ENOTSUP; + } + + return result; +} + +static inline int +ccp_crypto_aead(struct rte_crypto_op *op, + struct ccp_queue *cmd_q, + struct ccp_batch_info *b_info) +{ + int result = 0; + struct ccp_session *session; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + + switch (session->auth.algo) { + case CCP_AUTH_ALGO_AES_GCM: + if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) { + CCP_LOG_ERR("Incorrect chain order"); + return -1; + } + result = ccp_perform_aes_gcm(op, cmd_q); + b_info->desccnt += 5; + break; + default: + CCP_LOG_ERR("Unsupported aead algo %d", + session->aead_algo); + return -ENOTSUP; + } + return result; +} + +int +process_ops_to_enqueue(struct ccp_qp *qp, + struct rte_crypto_op **op, + struct ccp_queue *cmd_q, + uint16_t nb_ops, + int slots_req) +{ + int i, result = 0; + struct ccp_batch_info *b_info; + struct ccp_session *session; + EVP_MD_CTX *auth_ctx = NULL; + + if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) { + CCP_LOG_ERR("batch info allocation failed"); + return 0; + } + + auth_ctx = EVP_MD_CTX_create(); + if (unlikely(!auth_ctx)) { + CCP_LOG_ERR("Unable to create auth ctx"); + return 0; + } + b_info->auth_ctr = 0; + + /* populate batch info necessary for dequeue */ + b_info->op_idx = 0; + b_info->lsb_buf_idx = 0; + b_info->desccnt = 0; + b_info->cmd_q = cmd_q; + b_info->lsb_buf_phys = + (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf); + rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req); + + b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * + Q_DESC_SIZE); + for (i = 0; i < nb_ops; i++) { + session = (struct ccp_session *)get_sym_session_private_data( + op[i]->sym->session, + ccp_cryptodev_driver_id); + switch (session->cmd_id) { + case CCP_CMD_CIPHER: + result = ccp_crypto_cipher(op[i], cmd_q, b_info); + break; + case CCP_CMD_AUTH: + if (session->auth_opt) { + b_info->auth_ctr++; + result = cpu_crypto_auth(qp, op[i], + session, auth_ctx); + } else + result = ccp_crypto_auth(op[i], cmd_q, b_info); + break; + case CCP_CMD_CIPHER_HASH: + result = ccp_crypto_cipher(op[i], cmd_q, b_info); + if (result) + break; + result = ccp_crypto_auth(op[i], cmd_q, b_info); + break; + case CCP_CMD_HASH_CIPHER: + if (session->auth_opt) { + result = cpu_crypto_auth(qp, op[i], + session, auth_ctx); + if (op[i]->status != + RTE_CRYPTO_OP_STATUS_SUCCESS) + continue; + } else + result = ccp_crypto_auth(op[i], cmd_q, b_info); + + if (result) + break; + result = ccp_crypto_cipher(op[i], cmd_q, b_info); + break; + case CCP_CMD_COMBINED: + result = ccp_crypto_aead(op[i], cmd_q, b_info); + break; + default: + CCP_LOG_ERR("Unsupported cmd_id"); + result = -1; + } + if (unlikely(result < 0)) { + rte_atomic64_add(&b_info->cmd_q->free_slots, + (slots_req - b_info->desccnt)); + break; + } + b_info->op[i] = op[i]; + } + + b_info->opcnt = i; + b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * + Q_DESC_SIZE); + + rte_wmb(); + /* Write the new tail address back to the queue register */ + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, + b_info->tail_offset); + /* Turn the queue back on using our cached control register */ + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + rte_ring_enqueue(qp->processed_pkts, (void *)b_info); + + EVP_MD_CTX_destroy(auth_ctx); + return i; +} + +static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op) +{ + struct ccp_session *session; + uint8_t *digest_data, *addr; + struct rte_mbuf *m_last; + int offset, digest_offset; + uint8_t digest_le[64]; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + + if (session->cmd_id == CCP_CMD_COMBINED) { + digest_data = op->sym->aead.digest.data; + digest_offset = op->sym->aead.data.offset + + op->sym->aead.data.length; + } else { + digest_data = op->sym->auth.digest.data; + digest_offset = op->sym->auth.data.offset + + op->sym->auth.data.length; + } + m_last = rte_pktmbuf_lastseg(op->sym->m_src); + addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off + + m_last->data_len - session->auth.ctx_len); + + rte_mb(); + offset = session->auth.offset; + + if (session->auth.engine == CCP_ENGINE_SHA) + if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) && + (session->auth.ut.sha_type != CCP_SHA_TYPE_224) && + (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) { + /* All other algorithms require byte + * swap done by host + */ + unsigned int i; + + offset = session->auth.ctx_len - + session->auth.offset - 1; + for (i = 0; i < session->auth.digest_length; i++) + digest_le[i] = addr[offset - i]; + offset = 0; + addr = digest_le; + } + + op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + if (session->auth.op == CCP_AUTH_OP_VERIFY) { + if (memcmp(addr + offset, digest_data, + session->auth.digest_length) != 0) + op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + + } else { + if (unlikely(digest_data == 0)) + digest_data = rte_pktmbuf_mtod_offset( + op->sym->m_dst, uint8_t *, + digest_offset); + rte_memcpy(digest_data, addr + offset, + session->auth.digest_length); + } + /* Trim area used for digest from mbuf. */ + rte_pktmbuf_trim(op->sym->m_src, + session->auth.ctx_len); +} + +static int +ccp_prepare_ops(struct ccp_qp *qp, + struct rte_crypto_op **op_d, + struct ccp_batch_info *b_info, + uint16_t nb_ops) +{ + int i, min_ops; + struct ccp_session *session; + + EVP_MD_CTX *auth_ctx = NULL; + + auth_ctx = EVP_MD_CTX_create(); + if (unlikely(!auth_ctx)) { + CCP_LOG_ERR("Unable to create auth ctx"); + return 0; + } + min_ops = RTE_MIN(nb_ops, b_info->opcnt); + + for (i = 0; i < min_ops; i++) { + op_d[i] = b_info->op[b_info->op_idx++]; + session = (struct ccp_session *)get_sym_session_private_data( + op_d[i]->sym->session, + ccp_cryptodev_driver_id); + switch (session->cmd_id) { + case CCP_CMD_CIPHER: + op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + break; + case CCP_CMD_AUTH: + if (session->auth_opt == 0) + ccp_auth_dq_prepare(op_d[i]); + break; + case CCP_CMD_CIPHER_HASH: + if (session->auth_opt) + cpu_crypto_auth(qp, op_d[i], + session, auth_ctx); + else + ccp_auth_dq_prepare(op_d[i]); + break; + case CCP_CMD_HASH_CIPHER: + if (session->auth_opt) + op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + else + ccp_auth_dq_prepare(op_d[i]); + break; + case CCP_CMD_COMBINED: + ccp_auth_dq_prepare(op_d[i]); + break; + default: + CCP_LOG_ERR("Unsupported cmd_id"); + } + } + + EVP_MD_CTX_destroy(auth_ctx); + b_info->opcnt -= min_ops; + return min_ops; +} + +int +process_ops_to_dequeue(struct ccp_qp *qp, + struct rte_crypto_op **op, + uint16_t nb_ops) +{ + struct ccp_batch_info *b_info; + uint32_t cur_head_offset; + + if (qp->b_info != NULL) { + b_info = qp->b_info; + if (unlikely(b_info->op_idx > 0)) + goto success; + } else if (rte_ring_dequeue(qp->processed_pkts, + (void **)&b_info)) + return 0; + + if (b_info->auth_ctr == b_info->opcnt) + goto success; + cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base, + CMD_Q_HEAD_LO_BASE); + + if (b_info->head_offset < b_info->tail_offset) { + if ((cur_head_offset >= b_info->head_offset) && + (cur_head_offset < b_info->tail_offset)) { + qp->b_info = b_info; + return 0; + } + } else { + if ((cur_head_offset >= b_info->head_offset) || + (cur_head_offset < b_info->tail_offset)) { + qp->b_info = b_info; + return 0; + } + } + + +success: + nb_ops = ccp_prepare_ops(qp, op, b_info, nb_ops); + rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt); + b_info->desccnt = 0; + if (b_info->opcnt > 0) { + qp->b_info = b_info; + } else { + rte_mempool_put(qp->batch_mp, (void *)b_info); + qp->b_info = NULL; + } + + return nb_ops; +} diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h new file mode 100644 index 00000000..882b398a --- /dev/null +++ b/drivers/crypto/ccp/ccp_crypto.h @@ -0,0 +1,388 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#ifndef _CCP_CRYPTO_H_ +#define _CCP_CRYPTO_H_ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "ccp_dev.h" + +#define AES_BLOCK_SIZE 16 +#define CMAC_PAD_VALUE 0x80 +#define CTR_NONCE_SIZE 4 +#define CTR_IV_SIZE 8 +#define CCP_SHA3_CTX_SIZE 200 + +/**Macro helpers for CCP command creation*/ +#define CCP_AES_SIZE(p) ((p)->aes.size) +#define CCP_AES_ENCRYPT(p) ((p)->aes.encrypt) +#define CCP_AES_MODE(p) ((p)->aes.mode) +#define CCP_AES_TYPE(p) ((p)->aes.type) +#define CCP_DES_ENCRYPT(p) ((p)->des.encrypt) +#define CCP_DES_MODE(p) ((p)->des.mode) +#define CCP_DES_TYPE(p) ((p)->des.type) +#define CCP_SHA_TYPE(p) ((p)->sha.type) +#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap) +#define CCP_PT_BITWISE(p) ((p)->pt.bitwise) + +/* HMAC */ +#define HMAC_IPAD_VALUE 0x36 +#define HMAC_OPAD_VALUE 0x5c + +/* MD5 */ +#define MD5_DIGEST_SIZE 16 +#define MD5_BLOCK_SIZE 64 + +/* SHA */ +#define SHA_COMMON_DIGEST_SIZE 32 +#define SHA1_DIGEST_SIZE 20 +#define SHA1_BLOCK_SIZE 64 + +#define SHA224_DIGEST_SIZE 28 +#define SHA224_BLOCK_SIZE 64 +#define SHA3_224_BLOCK_SIZE 144 + +#define SHA256_DIGEST_SIZE 32 +#define SHA256_BLOCK_SIZE 64 +#define SHA3_256_BLOCK_SIZE 136 + +#define SHA384_DIGEST_SIZE 48 +#define SHA384_BLOCK_SIZE 128 +#define SHA3_384_BLOCK_SIZE 104 + +#define SHA512_DIGEST_SIZE 64 +#define SHA512_BLOCK_SIZE 128 +#define SHA3_512_BLOCK_SIZE 72 + +/* Maximum length for digest */ +#define DIGEST_LENGTH_MAX 64 + +/* SHA LSB intialiazation values */ + +#define SHA1_H0 0x67452301UL +#define SHA1_H1 0xefcdab89UL +#define SHA1_H2 0x98badcfeUL +#define SHA1_H3 0x10325476UL +#define SHA1_H4 0xc3d2e1f0UL + +#define SHA224_H0 0xc1059ed8UL +#define SHA224_H1 0x367cd507UL +#define SHA224_H2 0x3070dd17UL +#define SHA224_H3 0xf70e5939UL +#define SHA224_H4 0xffc00b31UL +#define SHA224_H5 0x68581511UL +#define SHA224_H6 0x64f98fa7UL +#define SHA224_H7 0xbefa4fa4UL + +#define SHA256_H0 0x6a09e667UL +#define SHA256_H1 0xbb67ae85UL +#define SHA256_H2 0x3c6ef372UL +#define SHA256_H3 0xa54ff53aUL +#define SHA256_H4 0x510e527fUL +#define SHA256_H5 0x9b05688cUL +#define SHA256_H6 0x1f83d9abUL +#define SHA256_H7 0x5be0cd19UL + +#define SHA384_H0 0xcbbb9d5dc1059ed8ULL +#define SHA384_H1 0x629a292a367cd507ULL +#define SHA384_H2 0x9159015a3070dd17ULL +#define SHA384_H3 0x152fecd8f70e5939ULL +#define SHA384_H4 0x67332667ffc00b31ULL +#define SHA384_H5 0x8eb44a8768581511ULL +#define SHA384_H6 0xdb0c2e0d64f98fa7ULL +#define SHA384_H7 0x47b5481dbefa4fa4ULL + +#define SHA512_H0 0x6a09e667f3bcc908ULL +#define SHA512_H1 0xbb67ae8584caa73bULL +#define SHA512_H2 0x3c6ef372fe94f82bULL +#define SHA512_H3 0xa54ff53a5f1d36f1ULL +#define SHA512_H4 0x510e527fade682d1ULL +#define SHA512_H5 0x9b05688c2b3e6c1fULL +#define SHA512_H6 0x1f83d9abfb41bd6bULL +#define SHA512_H7 0x5be0cd19137e2179ULL + +/** + * CCP supported AES modes + */ +enum ccp_aes_mode { + CCP_AES_MODE_ECB = 0, + CCP_AES_MODE_CBC, + CCP_AES_MODE_OFB, + CCP_AES_MODE_CFB, + CCP_AES_MODE_CTR, + CCP_AES_MODE_CMAC, + CCP_AES_MODE_GHASH, + CCP_AES_MODE_GCTR, + CCP_AES_MODE__LAST, +}; + +/** + * CCP AES GHASH mode + */ +enum ccp_aes_ghash_mode { + CCP_AES_MODE_GHASH_AAD = 0, + CCP_AES_MODE_GHASH_FINAL +}; + +/** + * CCP supported AES types + */ +enum ccp_aes_type { + CCP_AES_TYPE_128 = 0, + CCP_AES_TYPE_192, + CCP_AES_TYPE_256, + CCP_AES_TYPE__LAST, +}; + +/***** 3DES engine *****/ + +/** + * CCP supported DES/3DES modes + */ +enum ccp_des_mode { + CCP_DES_MODE_ECB = 0, /* Not supported */ + CCP_DES_MODE_CBC, + CCP_DES_MODE_CFB, +}; + +/** + * CCP supported DES types + */ +enum ccp_des_type { + CCP_DES_TYPE_128 = 0, /* 112 + 16 parity */ + CCP_DES_TYPE_192, /* 168 + 24 parity */ + CCP_DES_TYPE__LAST, +}; + +/***** SHA engine *****/ + +/** + * ccp_sha_type - type of SHA operation + * + * @CCP_SHA_TYPE_1: SHA-1 operation + * @CCP_SHA_TYPE_224: SHA-224 operation + * @CCP_SHA_TYPE_256: SHA-256 operation + */ +enum ccp_sha_type { + CCP_SHA_TYPE_1 = 1, + CCP_SHA_TYPE_224, + CCP_SHA_TYPE_256, + CCP_SHA_TYPE_384, + CCP_SHA_TYPE_512, + CCP_SHA_TYPE_RSVD1, + CCP_SHA_TYPE_RSVD2, + CCP_SHA3_TYPE_224, + CCP_SHA3_TYPE_256, + CCP_SHA3_TYPE_384, + CCP_SHA3_TYPE_512, + CCP_SHA_TYPE__LAST, +}; + +/** + * CCP supported cipher algorithms + */ +enum ccp_cipher_algo { + CCP_CIPHER_ALGO_AES_CBC = 0, + CCP_CIPHER_ALGO_AES_ECB, + CCP_CIPHER_ALGO_AES_CTR, + CCP_CIPHER_ALGO_AES_GCM, + CCP_CIPHER_ALGO_3DES_CBC, +}; + +/** + * CCP cipher operation type + */ +enum ccp_cipher_dir { + CCP_CIPHER_DIR_DECRYPT = 0, + CCP_CIPHER_DIR_ENCRYPT = 1, +}; + +/** + * CCP supported hash algorithms + */ +enum ccp_hash_algo { + CCP_AUTH_ALGO_SHA1 = 0, + CCP_AUTH_ALGO_SHA1_HMAC, + CCP_AUTH_ALGO_SHA224, + CCP_AUTH_ALGO_SHA224_HMAC, + CCP_AUTH_ALGO_SHA3_224, + CCP_AUTH_ALGO_SHA3_224_HMAC, + CCP_AUTH_ALGO_SHA256, + CCP_AUTH_ALGO_SHA256_HMAC, + CCP_AUTH_ALGO_SHA3_256, + CCP_AUTH_ALGO_SHA3_256_HMAC, + CCP_AUTH_ALGO_SHA384, + CCP_AUTH_ALGO_SHA384_HMAC, + CCP_AUTH_ALGO_SHA3_384, + CCP_AUTH_ALGO_SHA3_384_HMAC, + CCP_AUTH_ALGO_SHA512, + CCP_AUTH_ALGO_SHA512_HMAC, + CCP_AUTH_ALGO_SHA3_512, + CCP_AUTH_ALGO_SHA3_512_HMAC, + CCP_AUTH_ALGO_AES_CMAC, + CCP_AUTH_ALGO_AES_GCM, + CCP_AUTH_ALGO_MD5_HMAC, +}; + +/** + * CCP hash operation type + */ +enum ccp_hash_op { + CCP_AUTH_OP_GENERATE = 0, + CCP_AUTH_OP_VERIFY = 1, +}; + +/* CCP crypto private session structure */ +struct ccp_session { + bool auth_opt; + enum ccp_cmd_order cmd_id; + /**< chain order mode */ + struct { + uint16_t length; + uint16_t offset; + } iv; + /**< IV parameters */ + struct { + enum ccp_cipher_algo algo; + enum ccp_engine engine; + union { + enum ccp_aes_mode aes_mode; + enum ccp_des_mode des_mode; + } um; + union { + enum ccp_aes_type aes_type; + enum ccp_des_type des_type; + } ut; + enum ccp_cipher_dir dir; + uint64_t key_length; + /**< max cipher key size 256 bits */ + uint8_t key[32]; + /**ccp key format*/ + uint8_t key_ccp[32]; + phys_addr_t key_phys; + /**AES-ctr nonce(4) iv(8) ctr*/ + uint8_t nonce[32]; + phys_addr_t nonce_phys; + } cipher; + /**< Cipher Parameters */ + + struct { + enum ccp_hash_algo algo; + enum ccp_engine engine; + union { + enum ccp_aes_mode aes_mode; + } um; + union { + enum ccp_sha_type sha_type; + enum ccp_aes_type aes_type; + } ut; + enum ccp_hash_op op; + uint64_t key_length; + /**< max hash key size 144 bytes (struct capabilties) */ + uint8_t key[144]; + /**< max be key size of AES is 32*/ + uint8_t key_ccp[32]; + phys_addr_t key_phys; + uint64_t digest_length; + void *ctx; + int ctx_len; + int offset; + int block_size; + /**< Buffer to store Software generated precomute values*/ + /**< For HMAC H(ipad ^ key) and H(opad ^ key) */ + /**< For CMAC K1 IV and K2 IV*/ + uint8_t pre_compute[2 * CCP_SHA3_CTX_SIZE]; + /**< SHA3 initial ctx all zeros*/ + uint8_t sha3_ctx[200]; + int aad_length; + } auth; + /**< Authentication Parameters */ + enum rte_crypto_aead_algorithm aead_algo; + /**< AEAD Algorithm */ + + uint32_t reserved; +} __rte_cache_aligned; + +extern uint8_t ccp_cryptodev_driver_id; + +struct ccp_qp; +struct ccp_private; + +/** + * Set and validate CCP crypto session parameters + * + * @param sess ccp private session + * @param xform crypto xform for this session + * @return 0 on success otherwise -1 + */ +int ccp_set_session_parameters(struct ccp_session *sess, + const struct rte_crypto_sym_xform *xform, + struct ccp_private *internals); + +/** + * Find count of slots + * + * @param session CCP private session + * @return count of free slots available + */ +int ccp_compute_slot_count(struct ccp_session *session); + +/** + * process crypto ops to be enqueued + * + * @param qp CCP crypto queue-pair + * @param op crypto ops table + * @param cmd_q CCP cmd queue + * @param nb_ops No. of ops to be submitted + * @return 0 on success otherwise -1 + */ +int process_ops_to_enqueue(struct ccp_qp *qp, + struct rte_crypto_op **op, + struct ccp_queue *cmd_q, + uint16_t nb_ops, + int slots_req); + +/** + * process crypto ops to be dequeued + * + * @param qp CCP crypto queue-pair + * @param op crypto ops table + * @param nb_ops requested no. of ops + * @return 0 on success otherwise -1 + */ +int process_ops_to_dequeue(struct ccp_qp *qp, + struct rte_crypto_op **op, + uint16_t nb_ops); + + +/** + * Apis for SHA3 partial hash generation + * @param data_in buffer pointer on which phash is applied + * @param data_out phash result in ccp be format is written + */ +int partial_hash_sha3_224(uint8_t *data_in, + uint8_t *data_out); + +int partial_hash_sha3_256(uint8_t *data_in, + uint8_t *data_out); + +int partial_hash_sha3_384(uint8_t *data_in, + uint8_t *data_out); + +int partial_hash_sha3_512(uint8_t *data_in, + uint8_t *data_out); + +#endif /* _CCP_CRYPTO_H_ */ diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c new file mode 100644 index 00000000..80fe6a45 --- /dev/null +++ b/drivers/crypto/ccp/ccp_dev.c @@ -0,0 +1,810 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "ccp_dev.h" +#include "ccp_pci.h" +#include "ccp_pmd_private.h" + +struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list); +static int ccp_dev_id; + +int +ccp_dev_start(struct rte_cryptodev *dev) +{ + struct ccp_private *priv = dev->data->dev_private; + + priv->last_dev = TAILQ_FIRST(&ccp_list); + return 0; +} + +struct ccp_queue * +ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req) +{ + int i, ret = 0; + struct ccp_device *dev; + struct ccp_private *priv = cdev->data->dev_private; + + dev = TAILQ_NEXT(priv->last_dev, next); + if (unlikely(dev == NULL)) + dev = TAILQ_FIRST(&ccp_list); + priv->last_dev = dev; + if (dev->qidx >= dev->cmd_q_count) + dev->qidx = 0; + ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots); + if (ret >= slot_req) + return &dev->cmd_q[dev->qidx]; + for (i = 0; i < dev->cmd_q_count; i++) { + dev->qidx++; + if (dev->qidx >= dev->cmd_q_count) + dev->qidx = 0; + ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots); + if (ret >= slot_req) + return &dev->cmd_q[dev->qidx]; + } + return NULL; +} + +int +ccp_read_hwrng(uint32_t *value) +{ + struct ccp_device *dev; + + TAILQ_FOREACH(dev, &ccp_list, next) { + void *vaddr = (void *)(dev->pci.mem_resource[2].addr); + + while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) { + *value = CCP_READ_REG(vaddr, TRNG_OUT_REG); + if (*value) { + dev->hwrng_retries = 0; + return 0; + } + } + dev->hwrng_retries = 0; + } + return -1; +} + +static const struct rte_memzone * +ccp_queue_dma_zone_reserve(const char *queue_name, + uint32_t queue_size, + int socket_id) +{ + const struct rte_memzone *mz; + + mz = rte_memzone_lookup(queue_name); + if (mz != 0) { + if (((size_t)queue_size <= mz->len) && + ((socket_id == SOCKET_ID_ANY) || + (socket_id == mz->socket_id))) { + CCP_LOG_INFO("re-use memzone already " + "allocated for %s", queue_name); + return mz; + } + CCP_LOG_ERR("Incompatible memzone already " + "allocated %s, size %u, socket %d. " + "Requested size %u, socket %u", + queue_name, (uint32_t)mz->len, + mz->socket_id, queue_size, socket_id); + return NULL; + } + + CCP_LOG_INFO("Allocate memzone for %s, size %u on socket %u", + queue_name, queue_size, socket_id); + + return rte_memzone_reserve_aligned(queue_name, queue_size, + socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size); +} + +/* bitmap support apis */ +static inline void +ccp_set_bit(unsigned long *bitmap, int n) +{ + __sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n))); +} + +static inline void +ccp_clear_bit(unsigned long *bitmap, int n) +{ + __sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n))); +} + +static inline uint32_t +ccp_get_bit(unsigned long *bitmap, int n) +{ + return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0); +} + + +static inline uint32_t +ccp_ffz(unsigned long word) +{ + unsigned long first_zero; + + first_zero = __builtin_ffsl(~word); + return first_zero ? (first_zero - 1) : + BITS_PER_WORD; +} + +static inline uint32_t +ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit) +{ + uint32_t i; + uint32_t nwords = 0; + + nwords = (limit - 1) / BITS_PER_WORD + 1; + for (i = 0; i < nwords; i++) { + if (addr[i] == 0UL) + return i * BITS_PER_WORD; + if (addr[i] < ~(0UL)) + break; + } + return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]); +} + +static void +ccp_bitmap_set(unsigned long *map, unsigned int start, int len) +{ + unsigned long *p = map + WORD_OFFSET(start); + const unsigned int size = start + len; + int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD); + unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start); + + while (len - bits_to_set >= 0) { + *p |= mask_to_set; + len -= bits_to_set; + bits_to_set = BITS_PER_WORD; + mask_to_set = ~0UL; + p++; + } + if (len) { + mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size); + *p |= mask_to_set; + } +} + +static void +ccp_bitmap_clear(unsigned long *map, unsigned int start, int len) +{ + unsigned long *p = map + WORD_OFFSET(start); + const unsigned int size = start + len; + int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD); + unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start); + + while (len - bits_to_clear >= 0) { + *p &= ~mask_to_clear; + len -= bits_to_clear; + bits_to_clear = BITS_PER_WORD; + mask_to_clear = ~0UL; + p++; + } + if (len) { + mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size); + *p &= ~mask_to_clear; + } +} + + +static unsigned long +_ccp_find_next_bit(const unsigned long *addr, + unsigned long nbits, + unsigned long start, + unsigned long invert) +{ + unsigned long tmp; + + if (!nbits || start >= nbits) + return nbits; + + tmp = addr[start / BITS_PER_WORD] ^ invert; + + /* Handle 1st word. */ + tmp &= CCP_BITMAP_FIRST_WORD_MASK(start); + start = ccp_round_down(start, BITS_PER_WORD); + + while (!tmp) { + start += BITS_PER_WORD; + if (start >= nbits) + return nbits; + + tmp = addr[start / BITS_PER_WORD] ^ invert; + } + + return RTE_MIN(start + (ffs(tmp) - 1), nbits); +} + +static unsigned long +ccp_find_next_bit(const unsigned long *addr, + unsigned long size, + unsigned long offset) +{ + return _ccp_find_next_bit(addr, size, offset, 0UL); +} + +static unsigned long +ccp_find_next_zero_bit(const unsigned long *addr, + unsigned long size, + unsigned long offset) +{ + return _ccp_find_next_bit(addr, size, offset, ~0UL); +} + +/** + * bitmap_find_next_zero_area - find a contiguous aligned zero area + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + */ +static unsigned long +ccp_bitmap_find_next_zero_area(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr) +{ + unsigned long index, end, i; + +again: + index = ccp_find_next_zero_bit(map, size, start); + + end = index + nr; + if (end > size) + return end; + i = ccp_find_next_bit(map, end, index); + if (i < end) { + start = i + 1; + goto again; + } + return index; +} + +static uint32_t +ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count) +{ + struct ccp_device *ccp; + int start; + + /* First look at the map for the queue */ + if (cmd_q->lsb >= 0) { + start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap, + LSB_SIZE, 0, + count); + if (start < LSB_SIZE) { + ccp_bitmap_set(cmd_q->lsbmap, start, count); + return start + cmd_q->lsb * LSB_SIZE; + } + } + + /* try to get an entry from the shared blocks */ + ccp = cmd_q->dev; + + rte_spinlock_lock(&ccp->lsb_lock); + + start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap, + MAX_LSB_CNT * LSB_SIZE, + 0, count); + if (start <= MAX_LSB_CNT * LSB_SIZE) { + ccp_bitmap_set(ccp->lsbmap, start, count); + rte_spinlock_unlock(&ccp->lsb_lock); + return start * LSB_ITEM_SIZE; + } + CCP_LOG_ERR("NO LSBs available"); + + rte_spinlock_unlock(&ccp->lsb_lock); + + return 0; +} + +static void __rte_unused +ccp_lsb_free(struct ccp_queue *cmd_q, + unsigned int start, + unsigned int count) +{ + int lsbno = start / LSB_SIZE; + + if (!start) + return; + + if (cmd_q->lsb == lsbno) { + /* An entry from the private LSB */ + ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count); + } else { + /* From the shared LSBs */ + struct ccp_device *ccp = cmd_q->dev; + + rte_spinlock_lock(&ccp->lsb_lock); + ccp_bitmap_clear(ccp->lsbmap, start, count); + rte_spinlock_unlock(&ccp->lsb_lock); + } +} + +static int +ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status) +{ + int q_mask = 1 << cmd_q->id; + int weight = 0; + int j; + + /* Build a bit mask to know which LSBs + * this queue has access to. + * Don't bother with segment 0 + * as it has special + * privileges. + */ + cmd_q->lsbmask = 0; + status >>= LSB_REGION_WIDTH; + for (j = 1; j < MAX_LSB_CNT; j++) { + if (status & q_mask) + ccp_set_bit(&cmd_q->lsbmask, j); + + status >>= LSB_REGION_WIDTH; + } + + for (j = 0; j < MAX_LSB_CNT; j++) + if (ccp_get_bit(&cmd_q->lsbmask, j)) + weight++; + + printf("Queue %d can access %d LSB regions of mask %lu\n", + (int)cmd_q->id, weight, cmd_q->lsbmask); + + return weight ? 0 : -EINVAL; +} + +static int +ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp, + int lsb_cnt, int n_lsbs, + unsigned long *lsb_pub) +{ + unsigned long qlsb = 0; + int bitno = 0; + int qlsb_wgt = 0; + int i, j; + + /* For each queue: + * If the count of potential LSBs available to a queue matches the + * ordinal given to us in lsb_cnt: + * Copy the mask of possible LSBs for this queue into "qlsb"; + * For each bit in qlsb, see if the corresponding bit in the + * aggregation mask is set; if so, we have a match. + * If we have a match, clear the bit in the aggregation to + * mark it as no longer available. + * If there is no match, clear the bit in qlsb and keep looking. + */ + for (i = 0; i < ccp->cmd_q_count; i++) { + struct ccp_queue *cmd_q = &ccp->cmd_q[i]; + + qlsb_wgt = 0; + for (j = 0; j < MAX_LSB_CNT; j++) + if (ccp_get_bit(&cmd_q->lsbmask, j)) + qlsb_wgt++; + + if (qlsb_wgt == lsb_cnt) { + qlsb = cmd_q->lsbmask; + + bitno = ffs(qlsb) - 1; + while (bitno < MAX_LSB_CNT) { + if (ccp_get_bit(lsb_pub, bitno)) { + /* We found an available LSB + * that this queue can access + */ + cmd_q->lsb = bitno; + ccp_clear_bit(lsb_pub, bitno); + break; + } + ccp_clear_bit(&qlsb, bitno); + bitno = ffs(qlsb) - 1; + } + if (bitno >= MAX_LSB_CNT) + return -EINVAL; + n_lsbs--; + } + } + return n_lsbs; +} + +/* For each queue, from the most- to least-constrained: + * find an LSB that can be assigned to the queue. If there are N queues that + * can only use M LSBs, where N > M, fail; otherwise, every queue will get a + * dedicated LSB. Remaining LSB regions become a shared resource. + * If we have fewer LSBs than queues, all LSB regions become shared + * resources. + */ +static int +ccp_assign_lsbs(struct ccp_device *ccp) +{ + unsigned long lsb_pub = 0, qlsb = 0; + int n_lsbs = 0; + int bitno; + int i, lsb_cnt; + int rc = 0; + + rte_spinlock_init(&ccp->lsb_lock); + + /* Create an aggregate bitmap to get a total count of available LSBs */ + for (i = 0; i < ccp->cmd_q_count; i++) + lsb_pub |= ccp->cmd_q[i].lsbmask; + + for (i = 0; i < MAX_LSB_CNT; i++) + if (ccp_get_bit(&lsb_pub, i)) + n_lsbs++; + + if (n_lsbs >= ccp->cmd_q_count) { + /* We have enough LSBS to give every queue a private LSB. + * Brute force search to start with the queues that are more + * constrained in LSB choice. When an LSB is privately + * assigned, it is removed from the public mask. + * This is an ugly N squared algorithm with some optimization. + */ + for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT); + lsb_cnt++) { + rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs, + &lsb_pub); + if (rc < 0) + return -EINVAL; + n_lsbs = rc; + } + } + + rc = 0; + /* What's left of the LSBs, according to the public mask, now become + * shared. Any zero bits in the lsb_pub mask represent an LSB region + * that can't be used as a shared resource, so mark the LSB slots for + * them as "in use". + */ + qlsb = lsb_pub; + bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT); + while (bitno < MAX_LSB_CNT) { + ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE); + ccp_set_bit(&qlsb, bitno); + bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT); + } + + return rc; +} + +static int +ccp_add_device(struct ccp_device *dev, int type) +{ + int i; + uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi; + uint64_t status; + struct ccp_queue *cmd_q; + const struct rte_memzone *q_mz; + void *vaddr; + + if (dev == NULL) + return -1; + + dev->id = ccp_dev_id++; + dev->qidx = 0; + vaddr = (void *)(dev->pci.mem_resource[2].addr); + + if (type == CCP_VERSION_5B) { + CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57); + CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003); + for (i = 0; i < 12; i++) { + CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET, + CCP_READ_REG(vaddr, TRNG_OUT_REG)); + } + CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F); + CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D); + CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000); + + CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF); + CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF); + + CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823); + } + CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249); + + /* Copy the private LSB mask to the public registers */ + status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET); + status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET); + CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo); + CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi); + status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo); + + dev->cmd_q_count = 0; + /* Find available queues */ + qmr = CCP_READ_REG(vaddr, Q_MASK_REG); + for (i = 0; i < MAX_HW_QUEUES; i++) { + if (!(qmr & (1 << i))) + continue; + cmd_q = &dev->cmd_q[dev->cmd_q_count++]; + cmd_q->dev = dev; + cmd_q->id = i; + cmd_q->qidx = 0; + cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); + + cmd_q->reg_base = (uint8_t *)vaddr + + CMD_Q_STATUS_INCR * (i + 1); + + /* CCP queue memory */ + snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name), + "%s_%d_%s_%d_%s", + "ccp_dev", + (int)dev->id, "queue", + (int)cmd_q->id, "mem"); + q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name, + cmd_q->qsize, SOCKET_ID_ANY); + cmd_q->qbase_addr = (void *)q_mz->addr; + cmd_q->qbase_desc = (void *)q_mz->addr; + cmd_q->qbase_phys_addr = q_mz->phys_addr; + + cmd_q->qcontrol = 0; + /* init control reg to zero */ + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol); + + /* Disable the interrupts */ + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00); + CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE); + CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE); + + /* Clear the interrupts */ + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE, + ALL_INTERRUPTS); + + /* Configure size of each virtual queue accessible to host */ + cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT); + cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT; + + dma_addr_lo = low32_value(cmd_q->qbase_phys_addr); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, + (uint32_t)dma_addr_lo); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE, + (uint32_t)dma_addr_lo); + + dma_addr_hi = high32_value(cmd_q->qbase_phys_addr); + cmd_q->qcontrol |= (dma_addr_hi << 16); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol); + + /* create LSB Mask map */ + if (ccp_find_lsb_regions(cmd_q, status)) + CCP_LOG_ERR("queue doesn't have lsb regions"); + cmd_q->lsb = -1; + + rte_atomic64_init(&cmd_q->free_slots); + rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1)); + /* unused slot barrier b/w H&T */ + } + + if (ccp_assign_lsbs(dev)) + CCP_LOG_ERR("Unable to assign lsb region"); + + /* pre-allocate LSB slots */ + for (i = 0; i < dev->cmd_q_count; i++) { + dev->cmd_q[i].sb_key = + ccp_lsb_alloc(&dev->cmd_q[i], 1); + dev->cmd_q[i].sb_iv = + ccp_lsb_alloc(&dev->cmd_q[i], 1); + dev->cmd_q[i].sb_sha = + ccp_lsb_alloc(&dev->cmd_q[i], 2); + dev->cmd_q[i].sb_hmac = + ccp_lsb_alloc(&dev->cmd_q[i], 2); + } + + TAILQ_INSERT_TAIL(&ccp_list, dev, next); + return 0; +} + +static void +ccp_remove_device(struct ccp_device *dev) +{ + if (dev == NULL) + return; + + TAILQ_REMOVE(&ccp_list, dev, next); +} + +static int +is_ccp_device(const char *dirname, + const struct rte_pci_id *ccp_id, + int *type) +{ + char filename[PATH_MAX]; + const struct rte_pci_id *id; + uint16_t vendor, device_id; + int i; + unsigned long tmp; + + /* get vendor id */ + snprintf(filename, sizeof(filename), "%s/vendor", dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + return 0; + vendor = (uint16_t)tmp; + + /* get device id */ + snprintf(filename, sizeof(filename), "%s/device", dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + return 0; + device_id = (uint16_t)tmp; + + for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) { + if (vendor == id->vendor_id && + device_id == id->device_id) { + *type = i; + return 1; /* Matched device */ + } + } + return 0; +} + +static int +ccp_probe_device(const char *dirname, uint16_t domain, + uint8_t bus, uint8_t devid, + uint8_t function, int ccp_type) +{ + struct ccp_device *ccp_dev = NULL; + struct rte_pci_device *pci; + char filename[PATH_MAX]; + unsigned long tmp; + int uio_fd = -1, i, uio_num; + char uio_devname[PATH_MAX]; + void *map_addr; + + ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev), + RTE_CACHE_LINE_SIZE); + if (ccp_dev == NULL) + goto fail; + pci = &(ccp_dev->pci); + + pci->addr.domain = domain; + pci->addr.bus = bus; + pci->addr.devid = devid; + pci->addr.function = function; + + /* get vendor id */ + snprintf(filename, sizeof(filename), "%s/vendor", dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + goto fail; + pci->id.vendor_id = (uint16_t)tmp; + + /* get device id */ + snprintf(filename, sizeof(filename), "%s/device", dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + goto fail; + pci->id.device_id = (uint16_t)tmp; + + /* get subsystem_vendor id */ + snprintf(filename, sizeof(filename), "%s/subsystem_vendor", + dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + goto fail; + pci->id.subsystem_vendor_id = (uint16_t)tmp; + + /* get subsystem_device id */ + snprintf(filename, sizeof(filename), "%s/subsystem_device", + dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + goto fail; + pci->id.subsystem_device_id = (uint16_t)tmp; + + /* get class_id */ + snprintf(filename, sizeof(filename), "%s/class", + dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + goto fail; + /* the least 24 bits are valid: class, subclass, program interface */ + pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID; + + /* parse resources */ + snprintf(filename, sizeof(filename), "%s/resource", dirname); + if (ccp_pci_parse_sysfs_resource(filename, pci) < 0) + goto fail; + + uio_num = ccp_find_uio_devname(dirname); + if (uio_num < 0) { + /* + * It may take time for uio device to appear, + * wait here and try again + */ + usleep(100000); + uio_num = ccp_find_uio_devname(dirname); + if (uio_num < 0) + goto fail; + } + snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num); + + uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK); + if (uio_fd < 0) + goto fail; + if (flock(uio_fd, LOCK_EX | LOCK_NB)) + goto fail; + + /* Map the PCI memory resource of device */ + for (i = 0; i < PCI_MAX_RESOURCE; i++) { + + char devname[PATH_MAX]; + int res_fd; + + if (pci->mem_resource[i].phys_addr == 0) + continue; + snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i); + res_fd = open(devname, O_RDWR); + if (res_fd < 0) + goto fail; + map_addr = mmap(NULL, pci->mem_resource[i].len, + PROT_READ | PROT_WRITE, + MAP_SHARED, res_fd, 0); + if (map_addr == MAP_FAILED) + goto fail; + + pci->mem_resource[i].addr = map_addr; + } + + /* device is valid, add in list */ + if (ccp_add_device(ccp_dev, ccp_type)) { + ccp_remove_device(ccp_dev); + goto fail; + } + + return 0; +fail: + CCP_LOG_ERR("CCP Device probe failed"); + if (uio_fd > 0) + close(uio_fd); + if (ccp_dev) + rte_free(ccp_dev); + return -1; +} + +int +ccp_probe_devices(const struct rte_pci_id *ccp_id) +{ + int dev_cnt = 0; + int ccp_type = 0; + struct dirent *d; + DIR *dir; + int ret = 0; + int module_idx = 0; + uint16_t domain; + uint8_t bus, devid, function; + char dirname[PATH_MAX]; + + module_idx = ccp_check_pci_uio_module(); + if (module_idx < 0) + return -1; + + TAILQ_INIT(&ccp_list); + dir = opendir(SYSFS_PCI_DEVICES); + if (dir == NULL) + return -1; + while ((d = readdir(dir)) != NULL) { + if (d->d_name[0] == '.') + continue; + if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name), + &domain, &bus, &devid, &function) != 0) + continue; + snprintf(dirname, sizeof(dirname), "%s/%s", + SYSFS_PCI_DEVICES, d->d_name); + if (is_ccp_device(dirname, ccp_id, &ccp_type)) { + printf("CCP : Detected CCP device with ID = 0x%x\n", + ccp_id[ccp_type].device_id); + ret = ccp_probe_device(dirname, domain, bus, devid, + function, ccp_type); + if (ret == 0) + dev_cnt++; + } + } + closedir(dir); + return dev_cnt; +} diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h new file mode 100644 index 00000000..de3e4bcc --- /dev/null +++ b/drivers/crypto/ccp/ccp_dev.h @@ -0,0 +1,495 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#ifndef _CCP_DEV_H_ +#define _CCP_DEV_H_ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/**< CCP sspecific */ +#define MAX_HW_QUEUES 5 +#define CCP_MAX_TRNG_RETRIES 10 +#define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y) + +/**< CCP Register Mappings */ +#define Q_MASK_REG 0x000 +#define TRNG_OUT_REG 0x00c + +/* CCP Version 5 Specifics */ +#define CMD_QUEUE_MASK_OFFSET 0x00 +#define CMD_QUEUE_PRIO_OFFSET 0x04 +#define CMD_REQID_CONFIG_OFFSET 0x08 +#define CMD_CMD_TIMEOUT_OFFSET 0x10 +#define LSB_PUBLIC_MASK_LO_OFFSET 0x18 +#define LSB_PUBLIC_MASK_HI_OFFSET 0x1C +#define LSB_PRIVATE_MASK_LO_OFFSET 0x20 +#define LSB_PRIVATE_MASK_HI_OFFSET 0x24 + +#define CMD_Q_CONTROL_BASE 0x0000 +#define CMD_Q_TAIL_LO_BASE 0x0004 +#define CMD_Q_HEAD_LO_BASE 0x0008 +#define CMD_Q_INT_ENABLE_BASE 0x000C +#define CMD_Q_INTERRUPT_STATUS_BASE 0x0010 + +#define CMD_Q_STATUS_BASE 0x0100 +#define CMD_Q_INT_STATUS_BASE 0x0104 + +#define CMD_CONFIG_0_OFFSET 0x6000 +#define CMD_TRNG_CTL_OFFSET 0x6008 +#define CMD_AES_MASK_OFFSET 0x6010 +#define CMD_CLK_GATE_CTL_OFFSET 0x603C + +/* Address offset between two virtual queue registers */ +#define CMD_Q_STATUS_INCR 0x1000 + +/* Bit masks */ +#define CMD_Q_RUN 0x1 +#define CMD_Q_SIZE 0x1F +#define CMD_Q_SHIFT 3 +#define COMMANDS_PER_QUEUE 2048 + +#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \ + CMD_Q_SIZE) +#define Q_DESC_SIZE sizeof(struct ccp_desc) +#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n)) + +#define INT_COMPLETION 0x1 +#define INT_ERROR 0x2 +#define INT_QUEUE_STOPPED 0x4 +#define ALL_INTERRUPTS (INT_COMPLETION| \ + INT_ERROR| \ + INT_QUEUE_STOPPED) + +#define LSB_REGION_WIDTH 5 +#define MAX_LSB_CNT 8 + +#define LSB_SIZE 16 +#define LSB_ITEM_SIZE 32 +#define SLSB_MAP_SIZE (MAX_LSB_CNT * LSB_SIZE) +#define LSB_ENTRY_NUMBER(LSB_ADDR) (LSB_ADDR / LSB_ITEM_SIZE) + +/* General CCP Defines */ + +#define CCP_SB_BYTES 32 +/* Word 0 */ +#define CCP_CMD_DW0(p) ((p)->dw0) +#define CCP_CMD_SOC(p) (CCP_CMD_DW0(p).soc) +#define CCP_CMD_IOC(p) (CCP_CMD_DW0(p).ioc) +#define CCP_CMD_INIT(p) (CCP_CMD_DW0(p).init) +#define CCP_CMD_EOM(p) (CCP_CMD_DW0(p).eom) +#define CCP_CMD_FUNCTION(p) (CCP_CMD_DW0(p).function) +#define CCP_CMD_ENGINE(p) (CCP_CMD_DW0(p).engine) +#define CCP_CMD_PROT(p) (CCP_CMD_DW0(p).prot) + +/* Word 1 */ +#define CCP_CMD_DW1(p) ((p)->length) +#define CCP_CMD_LEN(p) (CCP_CMD_DW1(p)) + +/* Word 2 */ +#define CCP_CMD_DW2(p) ((p)->src_lo) +#define CCP_CMD_SRC_LO(p) (CCP_CMD_DW2(p)) + +/* Word 3 */ +#define CCP_CMD_DW3(p) ((p)->dw3) +#define CCP_CMD_SRC_MEM(p) ((p)->dw3.src_mem) +#define CCP_CMD_SRC_HI(p) ((p)->dw3.src_hi) +#define CCP_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id) +#define CCP_CMD_FIX_SRC(p) ((p)->dw3.fixed) + +/* Words 4/5 */ +#define CCP_CMD_DW4(p) ((p)->dw4) +#define CCP_CMD_DST_LO(p) (CCP_CMD_DW4(p).dst_lo) +#define CCP_CMD_DW5(p) ((p)->dw5.fields.dst_hi) +#define CCP_CMD_DST_HI(p) (CCP_CMD_DW5(p)) +#define CCP_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem) +#define CCP_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) +#define CCP_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo) +#define CCP_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi) + +/* Word 6/7 */ +#define CCP_CMD_DW6(p) ((p)->key_lo) +#define CCP_CMD_KEY_LO(p) (CCP_CMD_DW6(p)) +#define CCP_CMD_DW7(p) ((p)->dw7) +#define CCP_CMD_KEY_HI(p) ((p)->dw7.key_hi) +#define CCP_CMD_KEY_MEM(p) ((p)->dw7.key_mem) + +/* bitmap */ +enum { + BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT +}; + +#define WORD_OFFSET(b) ((b) / BITS_PER_WORD) +#define BIT_OFFSET(b) ((b) % BITS_PER_WORD) + +#define CCP_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#define CCP_BITMAP_SIZE(nr) \ + CCP_DIV_ROUND_UP(nr, CHAR_BIT * sizeof(unsigned long)) + +#define CCP_BITMAP_FIRST_WORD_MASK(start) \ + (~0UL << ((start) & (BITS_PER_WORD - 1))) +#define CCP_BITMAP_LAST_WORD_MASK(nbits) \ + (~0UL >> (-(nbits) & (BITS_PER_WORD - 1))) + +#define __ccp_round_mask(x, y) ((typeof(x))((y)-1)) +#define ccp_round_down(x, y) ((x) & ~__ccp_round_mask(x, y)) + +/** CCP registers Write/Read */ + +static inline void ccp_pci_reg_write(void *base, int offset, + uint32_t value) +{ + volatile void *reg_addr = ((uint8_t *)base + offset); + + rte_write32((rte_cpu_to_le_32(value)), reg_addr); +} + +static inline uint32_t ccp_pci_reg_read(void *base, int offset) +{ + volatile void *reg_addr = ((uint8_t *)base + offset); + + return rte_le_to_cpu_32(rte_read32(reg_addr)); +} + +#define CCP_READ_REG(hw_addr, reg_offset) \ + ccp_pci_reg_read(hw_addr, reg_offset) + +#define CCP_WRITE_REG(hw_addr, reg_offset, value) \ + ccp_pci_reg_write(hw_addr, reg_offset, value) + +TAILQ_HEAD(ccp_list, ccp_device); + +extern struct ccp_list ccp_list; + +/** + * CCP device version + */ +enum ccp_device_version { + CCP_VERSION_5A = 0, + CCP_VERSION_5B, +}; + +/** + * A structure describing a CCP command queue. + */ +struct ccp_queue { + struct ccp_device *dev; + char memz_name[RTE_MEMZONE_NAMESIZE]; + + rte_atomic64_t free_slots; + /**< available free slots updated from enq/deq calls */ + + /* Queue identifier */ + uint64_t id; /**< queue id */ + uint64_t qidx; /**< queue index */ + uint64_t qsize; /**< queue size */ + + /* Queue address */ + struct ccp_desc *qbase_desc; + void *qbase_addr; + phys_addr_t qbase_phys_addr; + /**< queue-page registers addr */ + void *reg_base; + + uint32_t qcontrol; + /**< queue ctrl reg */ + + int lsb; + /**< lsb region assigned to queue */ + unsigned long lsbmask; + /**< lsb regions queue can access */ + unsigned long lsbmap[CCP_BITMAP_SIZE(LSB_SIZE)]; + /**< all lsb resources which queue is using */ + uint32_t sb_key; + /**< lsb assigned for queue */ + uint32_t sb_iv; + /**< lsb assigned for iv */ + uint32_t sb_sha; + /**< lsb assigned for sha ctx */ + uint32_t sb_hmac; + /**< lsb assigned for hmac ctx */ +} ____cacheline_aligned; + +/** + * A structure describing a CCP device. + */ +struct ccp_device { + TAILQ_ENTRY(ccp_device) next; + int id; + /**< ccp dev id on platform */ + struct ccp_queue cmd_q[MAX_HW_QUEUES]; + /**< ccp queue */ + int cmd_q_count; + /**< no. of ccp Queues */ + struct rte_pci_device pci; + /**< ccp pci identifier */ + unsigned long lsbmap[CCP_BITMAP_SIZE(SLSB_MAP_SIZE)]; + /**< shared lsb mask of ccp */ + rte_spinlock_t lsb_lock; + /**< protection for shared lsb region allocation */ + int qidx; + /**< current queue index */ + int hwrng_retries; + /**< retry counter for CCP TRNG */ +} __rte_cache_aligned; + +/**< CCP H/W engine related */ +/** + * ccp_engine - CCP operation identifiers + * + * @CCP_ENGINE_AES: AES operation + * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation + * @CCP_ENGINE_3DES: DES/3DES operation + * @CCP_ENGINE_SHA: SHA operation + * @CCP_ENGINE_RSA: RSA operation + * @CCP_ENGINE_PASSTHRU: pass-through operation + * @CCP_ENGINE_ZLIB_DECOMPRESS: unused + * @CCP_ENGINE_ECC: ECC operation + */ +enum ccp_engine { + CCP_ENGINE_AES = 0, + CCP_ENGINE_XTS_AES_128, + CCP_ENGINE_3DES, + CCP_ENGINE_SHA, + CCP_ENGINE_RSA, + CCP_ENGINE_PASSTHRU, + CCP_ENGINE_ZLIB_DECOMPRESS, + CCP_ENGINE_ECC, + CCP_ENGINE__LAST, +}; + +/* Passthru engine */ +/** + * ccp_passthru_bitwise - type of bitwise passthru operation + * + * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed + * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask + * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask + * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask + * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask + */ +enum ccp_passthru_bitwise { + CCP_PASSTHRU_BITWISE_NOOP = 0, + CCP_PASSTHRU_BITWISE_AND, + CCP_PASSTHRU_BITWISE_OR, + CCP_PASSTHRU_BITWISE_XOR, + CCP_PASSTHRU_BITWISE_MASK, + CCP_PASSTHRU_BITWISE__LAST, +}; + +/** + * ccp_passthru_byteswap - type of byteswap passthru operation + * + * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed + * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words + * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words + */ +enum ccp_passthru_byteswap { + CCP_PASSTHRU_BYTESWAP_NOOP = 0, + CCP_PASSTHRU_BYTESWAP_32BIT, + CCP_PASSTHRU_BYTESWAP_256BIT, + CCP_PASSTHRU_BYTESWAP__LAST, +}; + +/** + * CCP passthru + */ +struct ccp_passthru { + phys_addr_t src_addr; + phys_addr_t dest_addr; + enum ccp_passthru_bitwise bit_mod; + enum ccp_passthru_byteswap byte_swap; + int len; + int dir; +}; + +/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */ +union ccp_function { + struct { + uint16_t size:7; + uint16_t encrypt:1; + uint16_t mode:5; + uint16_t type:2; + } aes; + struct { + uint16_t size:7; + uint16_t encrypt:1; + uint16_t mode:5; + uint16_t type:2; + } des; + struct { + uint16_t size:7; + uint16_t encrypt:1; + uint16_t rsvd:5; + uint16_t type:2; + } aes_xts; + struct { + uint16_t rsvd1:10; + uint16_t type:4; + uint16_t rsvd2:1; + } sha; + struct { + uint16_t mode:3; + uint16_t size:12; + } rsa; + struct { + uint16_t byteswap:2; + uint16_t bitwise:3; + uint16_t reflect:2; + uint16_t rsvd:8; + } pt; + struct { + uint16_t rsvd:13; + } zlib; + struct { + uint16_t size:10; + uint16_t type:2; + uint16_t mode:3; + } ecc; + uint16_t raw; +}; + + +/** + * descriptor for version 5 CPP commands + * 8 32-bit words: + * word 0: function; engine; control bits + * word 1: length of source data + * word 2: low 32 bits of source pointer + * word 3: upper 16 bits of source pointer; source memory type + * word 4: low 32 bits of destination pointer + * word 5: upper 16 bits of destination pointer; destination memory + * type + * word 6: low 32 bits of key pointer + * word 7: upper 16 bits of key pointer; key memory type + */ +struct dword0 { + uint32_t soc:1; + uint32_t ioc:1; + uint32_t rsvd1:1; + uint32_t init:1; + uint32_t eom:1; + uint32_t function:15; + uint32_t engine:4; + uint32_t prot:1; + uint32_t rsvd2:7; +}; + +struct dword3 { + uint32_t src_hi:16; + uint32_t src_mem:2; + uint32_t lsb_cxt_id:8; + uint32_t rsvd1:5; + uint32_t fixed:1; +}; + +union dword4 { + uint32_t dst_lo; /* NON-SHA */ + uint32_t sha_len_lo; /* SHA */ +}; + +union dword5 { + struct { + uint32_t dst_hi:16; + uint32_t dst_mem:2; + uint32_t rsvd1:13; + uint32_t fixed:1; + } + fields; + uint32_t sha_len_hi; +}; + +struct dword7 { + uint32_t key_hi:16; + uint32_t key_mem:2; + uint32_t rsvd1:14; +}; + +struct ccp_desc { + struct dword0 dw0; + uint32_t length; + uint32_t src_lo; + struct dword3 dw3; + union dword4 dw4; + union dword5 dw5; + uint32_t key_lo; + struct dword7 dw7; +}; + +/** + * ccp memory type + */ +enum ccp_memtype { + CCP_MEMTYPE_SYSTEM = 0, + CCP_MEMTYPE_SB, + CCP_MEMTYPE_LOCAL, + CCP_MEMTYPE_LAST, +}; + +/** + * cmd id to follow order + */ +enum ccp_cmd_order { + CCP_CMD_CIPHER = 0, + CCP_CMD_AUTH, + CCP_CMD_CIPHER_HASH, + CCP_CMD_HASH_CIPHER, + CCP_CMD_COMBINED, + CCP_CMD_NOT_SUPPORTED, +}; + +static inline uint32_t +low32_value(unsigned long addr) +{ + return ((uint64_t)addr) & 0x0ffffffff; +} + +static inline uint32_t +high32_value(unsigned long addr) +{ + return ((uint64_t)addr >> 32) & 0x00000ffff; +} + +/* + * Start CCP device + */ +int ccp_dev_start(struct rte_cryptodev *dev); + +/** + * Detect ccp platform and initialize all ccp devices + * + * @param ccp_id rte_pci_id list for supported CCP devices + * @return no. of successfully initialized CCP devices + */ +int ccp_probe_devices(const struct rte_pci_id *ccp_id); + +/** + * allocate a ccp command queue + * + * @dev rte crypto device + * @param slot_req number of required + * @return allotted CCP queue on success otherwise NULL + */ +struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req); + +/** + * read hwrng value + * + * @param trng_value data pointer to write RNG value + * @return 0 on success otherwise -1 + */ +int ccp_read_hwrng(uint32_t *trng_value); + +#endif /* _CCP_DEV_H_ */ diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c new file mode 100644 index 00000000..59152ca5 --- /dev/null +++ b/drivers/crypto/ccp/ccp_pci.c @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include + +#include "ccp_pci.h" + +static const char * const uio_module_names[] = { + "igb_uio", + "uio_pci_generic", +}; + +int +ccp_check_pci_uio_module(void) +{ + FILE *fp; + int i; + char buf[BUFSIZ]; + + fp = fopen(PROC_MODULES, "r"); + if (fp == NULL) + return -1; + i = 0; + while (uio_module_names[i] != NULL) { + while (fgets(buf, sizeof(buf), fp) != NULL) { + if (!strncmp(buf, uio_module_names[i], + strlen(uio_module_names[i]))) + return i; + } + i++; + rewind(fp); + } + printf("Insert igb_uio or uio_pci_generic kernel module(s)"); + return -1;/* uio not inserted */ +} + +/* + * split up a pci address into its constituent parts. + */ +int +ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain, + uint8_t *bus, uint8_t *devid, uint8_t *function) +{ + /* first split on ':' */ + union splitaddr { + struct { + char *domain; + char *bus; + char *devid; + char *function; + }; + char *str[PCI_FMT_NVAL]; + /* last element-separator is "." not ":" */ + } splitaddr; + + char *buf_copy = strndup(buf, bufsize); + + if (buf_copy == NULL) + return -1; + + if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':') + != PCI_FMT_NVAL - 1) + goto error; + /* final split is on '.' between devid and function */ + splitaddr.function = strchr(splitaddr.devid, '.'); + if (splitaddr.function == NULL) + goto error; + *splitaddr.function++ = '\0'; + + /* now convert to int values */ + errno = 0; + *domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16); + *bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16); + *devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16); + *function = (uint8_t)strtoul(splitaddr.function, NULL, 10); + if (errno != 0) + goto error; + + free(buf_copy); /* free the copy made with strdup */ + return 0; +error: + free(buf_copy); + return -1; +} + +int +ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val) +{ + FILE *f; + char buf[BUFSIZ]; + char *end = NULL; + + f = fopen(filename, "r"); + if (f == NULL) + return -1; + if (fgets(buf, sizeof(buf), f) == NULL) { + fclose(f); + return -1; + } + *val = strtoul(buf, &end, 0); + if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) { + fclose(f); + return -1; + } + fclose(f); + return 0; +} + +/** IO resource type: */ +#define IORESOURCE_IO 0x00000100 +#define IORESOURCE_MEM 0x00000200 + +/* parse one line of the "resource" sysfs file (note that the 'line' + * string is modified) + */ +static int +ccp_pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr, + uint64_t *end_addr, uint64_t *flags) +{ + union pci_resource_info { + struct { + char *phys_addr; + char *end_addr; + char *flags; + }; + char *ptrs[PCI_RESOURCE_FMT_NVAL]; + } res_info; + + if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3) + return -1; + errno = 0; + *phys_addr = strtoull(res_info.phys_addr, NULL, 16); + *end_addr = strtoull(res_info.end_addr, NULL, 16); + *flags = strtoull(res_info.flags, NULL, 16); + if (errno != 0) + return -1; + + return 0; +} + +/* parse the "resource" sysfs file */ +int +ccp_pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev) +{ + FILE *fp; + char buf[BUFSIZ]; + int i; + uint64_t phys_addr, end_addr, flags; + + fp = fopen(filename, "r"); + if (fp == NULL) + return -1; + + for (i = 0; i < PCI_MAX_RESOURCE; i++) { + if (fgets(buf, sizeof(buf), fp) == NULL) + goto error; + if (ccp_pci_parse_one_sysfs_resource(buf, sizeof(buf), + &phys_addr, &end_addr, &flags) < 0) + goto error; + + if (flags & IORESOURCE_MEM) { + dev->mem_resource[i].phys_addr = phys_addr; + dev->mem_resource[i].len = end_addr - phys_addr + 1; + /* not mapped for now */ + dev->mem_resource[i].addr = NULL; + } + } + fclose(fp); + return 0; + +error: + fclose(fp); + return -1; +} + +int +ccp_find_uio_devname(const char *dirname) +{ + + DIR *dir; + struct dirent *e; + char dirname_uio[PATH_MAX]; + unsigned int uio_num; + int ret = -1; + + /* depending on kernel version, uio can be located in uio/uioX + * or uio:uioX + */ + snprintf(dirname_uio, sizeof(dirname_uio), "%s/uio", dirname); + dir = opendir(dirname_uio); + if (dir == NULL) { + /* retry with the parent directory might be different kernel version*/ + dir = opendir(dirname); + if (dir == NULL) + return -1; + } + + /* take the first file starting with "uio" */ + while ((e = readdir(dir)) != NULL) { + /* format could be uio%d ...*/ + int shortprefix_len = sizeof("uio") - 1; + /* ... or uio:uio%d */ + int longprefix_len = sizeof("uio:uio") - 1; + char *endptr; + + if (strncmp(e->d_name, "uio", 3) != 0) + continue; + + /* first try uio%d */ + errno = 0; + uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10); + if (errno == 0 && endptr != (e->d_name + shortprefix_len)) { + ret = uio_num; + break; + } + + /* then try uio:uio%d */ + errno = 0; + uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10); + if (errno == 0 && endptr != (e->d_name + longprefix_len)) { + ret = uio_num; + break; + } + } + closedir(dir); + return ret; + + +} diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h new file mode 100644 index 00000000..7ed3bac4 --- /dev/null +++ b/drivers/crypto/ccp/ccp_pci.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#ifndef _CCP_PCI_H_ +#define _CCP_PCI_H_ + +#include + +#include + +#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices" +#define PROC_MODULES "/proc/modules" + +int ccp_check_pci_uio_module(void); + +int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain, + uint8_t *bus, uint8_t *devid, uint8_t *function); + +int ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val); + +int ccp_pci_parse_sysfs_resource(const char *filename, + struct rte_pci_device *dev); + +int ccp_find_uio_devname(const char *dirname); + +#endif /* _CCP_PCI_H_ */ diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c new file mode 100644 index 00000000..6984913f --- /dev/null +++ b/drivers/crypto/ccp/ccp_pmd_ops.c @@ -0,0 +1,833 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#include + +#include +#include +#include + +#include "ccp_pmd_private.h" +#include "ccp_dev.h" +#include "ccp_crypto.h" + +#define CCP_BASE_SYM_CRYPTO_CAPABILITIES \ + { /* SHA1 */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA1, \ + .block_size = 64, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 20, \ + .max = 20, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA1 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 20, \ + .max = 20, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA224 */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA224, \ + .block_size = 64, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 28, \ + .max = 28, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA224 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 28, \ + .max = 28, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA3-224 */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA3_224, \ + .block_size = 144, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 28, \ + .max = 28, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA3-224 HMAC*/ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA3_224_HMAC, \ + .block_size = 144, \ + .key_size = { \ + .min = 1, \ + .max = 144, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 28, \ + .max = 28, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA256 */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA256, \ + .block_size = 64, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 32, \ + .max = 32, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA256 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 32, \ + .max = 32, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA3-256 */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA3_256, \ + .block_size = 136, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 32, \ + .max = 32, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA3-256-HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA3_256_HMAC, \ + .block_size = 136, \ + .key_size = { \ + .min = 1, \ + .max = 136, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 32, \ + .max = 32, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA384 */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA384, \ + .block_size = 128, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 48, \ + .max = 48, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA384 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \ + .block_size = 128, \ + .key_size = { \ + .min = 1, \ + .max = 128, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 48, \ + .max = 48, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA3-384 */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA3_384, \ + .block_size = 104, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 48, \ + .max = 48, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA3-384-HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA3_384_HMAC, \ + .block_size = 104, \ + .key_size = { \ + .min = 1, \ + .max = 104, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 48, \ + .max = 48, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA512 */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA512, \ + .block_size = 128, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 64, \ + .max = 64, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA512 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \ + .block_size = 128, \ + .key_size = { \ + .min = 1, \ + .max = 128, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 64, \ + .max = 64, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA3-512 */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA3_512, \ + .block_size = 72, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 64, \ + .max = 64, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA3-512-HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA3_512_HMAC, \ + .block_size = 72, \ + .key_size = { \ + .min = 1, \ + .max = 72, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 64, \ + .max = 64, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /*AES-CMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_AES_CMAC, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .digest_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + }, } \ + }, } \ + }, \ + { /* AES ECB */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_AES_ECB, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* AES CBC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_AES_CBC, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* AES CTR */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_AES_CTR, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* 3DES CBC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_3DES_CBC, \ + .block_size = 8, \ + .key_size = { \ + .min = 16, \ + .max = 24, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* AES GCM */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \ + {.aead = { \ + .algo = RTE_CRYPTO_AEAD_AES_GCM, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .digest_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .aad_size = { \ + .min = 0, \ + .max = 65535, \ + .increment = 1 \ + }, \ + .iv_size = { \ + .min = 12, \ + .max = 16, \ + .increment = 4 \ + }, \ + }, } \ + }, } \ + } + +#define CCP_EXTRA_SYM_CRYPTO_CAPABILITIES \ + { /* MD5 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_MD5_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + } + +static const struct rte_cryptodev_capabilities ccp_crypto_cap[] = { + CCP_BASE_SYM_CRYPTO_CAPABILITIES, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +static const struct rte_cryptodev_capabilities ccp_crypto_cap_complete[] = { + CCP_EXTRA_SYM_CRYPTO_CAPABILITIES, + CCP_BASE_SYM_CRYPTO_CAPABILITIES, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +static int +ccp_pmd_config(struct rte_cryptodev *dev __rte_unused, + struct rte_cryptodev_config *config __rte_unused) +{ + return 0; +} + +static int +ccp_pmd_start(struct rte_cryptodev *dev) +{ + return ccp_dev_start(dev); +} + +static void +ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused) +{ + +} + +static int +ccp_pmd_close(struct rte_cryptodev *dev __rte_unused) +{ + return 0; +} + +static void +ccp_pmd_stats_get(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct ccp_qp *qp = dev->data->queue_pairs[qp_id]; + + stats->enqueued_count += qp->qp_stats.enqueued_count; + stats->dequeued_count += qp->qp_stats.dequeued_count; + + stats->enqueue_err_count += qp->qp_stats.enqueue_err_count; + stats->dequeue_err_count += qp->qp_stats.dequeue_err_count; + } + +} + +static void +ccp_pmd_stats_reset(struct rte_cryptodev *dev) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct ccp_qp *qp = dev->data->queue_pairs[qp_id]; + + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + } +} + +static void +ccp_pmd_info_get(struct rte_cryptodev *dev, + struct rte_cryptodev_info *dev_info) +{ + struct ccp_private *internals = dev->data->dev_private; + + if (dev_info != NULL) { + dev_info->driver_id = dev->driver_id; + dev_info->feature_flags = dev->feature_flags; + dev_info->capabilities = ccp_crypto_cap; + if (internals->auth_opt == 1) + dev_info->capabilities = ccp_crypto_cap_complete; + dev_info->max_nb_queue_pairs = internals->max_nb_qpairs; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; + } +} + +static int +ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) +{ + struct ccp_qp *qp; + + if (dev->data->queue_pairs[qp_id] != NULL) { + qp = (struct ccp_qp *)dev->data->queue_pairs[qp_id]; + rte_ring_free(qp->processed_pkts); + rte_mempool_free(qp->batch_mp); + rte_free(qp); + dev->data->queue_pairs[qp_id] = NULL; + } + return 0; +} + +static int +ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev, + struct ccp_qp *qp) +{ + unsigned int n = snprintf(qp->name, sizeof(qp->name), + "ccp_pmd_%u_qp_%u", + dev->data->dev_id, qp->id); + + if (n > sizeof(qp->name)) + return -1; + + return 0; +} + +static struct rte_ring * +ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp, + unsigned int ring_size, int socket_id) +{ + struct rte_ring *r; + + r = rte_ring_lookup(qp->name); + if (r) { + if (r->size >= ring_size) { + CCP_LOG_INFO( + "Reusing ring %s for processed packets", + qp->name); + return r; + } + CCP_LOG_INFO( + "Unable to reuse ring %s for processed packets", + qp->name); + return NULL; + } + + return rte_ring_create(qp->name, ring_size, socket_id, + RING_F_SP_ENQ | RING_F_SC_DEQ); +} + +static int +ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, + const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id, struct rte_mempool *session_pool) +{ + struct ccp_private *internals = dev->data->dev_private; + struct ccp_qp *qp; + int retval = 0; + + if (qp_id >= internals->max_nb_qpairs) { + CCP_LOG_ERR("Invalid qp_id %u, should be less than %u", + qp_id, internals->max_nb_qpairs); + return (-EINVAL); + } + + /* Free memory prior to re-allocation if needed. */ + if (dev->data->queue_pairs[qp_id] != NULL) + ccp_pmd_qp_release(dev, qp_id); + + /* Allocate the queue pair data structure. */ + qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp), + RTE_CACHE_LINE_SIZE, socket_id); + if (qp == NULL) { + CCP_LOG_ERR("Failed to allocate queue pair memory"); + return (-ENOMEM); + } + + qp->dev = dev; + qp->id = qp_id; + dev->data->queue_pairs[qp_id] = qp; + + retval = ccp_pmd_qp_set_unique_name(dev, qp); + if (retval) { + CCP_LOG_ERR("Failed to create unique name for ccp qp"); + goto qp_setup_cleanup; + } + + qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp, + qp_conf->nb_descriptors, socket_id); + if (qp->processed_pkts == NULL) { + CCP_LOG_ERR("Failed to create batch info ring"); + goto qp_setup_cleanup; + } + + qp->sess_mp = session_pool; + + /* mempool for batch info */ + qp->batch_mp = rte_mempool_create( + qp->name, + qp_conf->nb_descriptors, + sizeof(struct ccp_batch_info), + RTE_CACHE_LINE_SIZE, + 0, NULL, NULL, NULL, NULL, + SOCKET_ID_ANY, 0); + if (qp->batch_mp == NULL) + goto qp_setup_cleanup; + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + return 0; + +qp_setup_cleanup: + dev->data->queue_pairs[qp_id] = NULL; + if (qp) + rte_free(qp); + return -1; +} + +static uint32_t +ccp_pmd_qp_count(struct rte_cryptodev *dev) +{ + return dev->data->nb_queue_pairs; +} + +static unsigned +ccp_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) +{ + return sizeof(struct ccp_session); +} + +static int +ccp_pmd_sym_session_configure(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sess, + struct rte_mempool *mempool) +{ + int ret; + void *sess_private_data; + struct ccp_private *internals; + + if (unlikely(sess == NULL || xform == NULL)) { + CCP_LOG_ERR("Invalid session struct or xform"); + return -ENOMEM; + } + + if (rte_mempool_get(mempool, &sess_private_data)) { + CCP_LOG_ERR("Couldn't get object from session mempool"); + return -ENOMEM; + } + internals = (struct ccp_private *)dev->data->dev_private; + ret = ccp_set_session_parameters(sess_private_data, xform, internals); + if (ret != 0) { + CCP_LOG_ERR("failed configure session parameters"); + + /* Return session to mempool */ + rte_mempool_put(mempool, sess_private_data); + return ret; + } + set_sym_session_private_data(sess, dev->driver_id, + sess_private_data); + + return 0; +} + +static void +ccp_pmd_sym_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *sess) +{ + uint8_t index = dev->driver_id; + void *sess_priv = get_sym_session_private_data(sess, index); + + if (sess_priv) { + struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); + + rte_mempool_put(sess_mp, sess_priv); + memset(sess_priv, 0, sizeof(struct ccp_session)); + set_sym_session_private_data(sess, index, NULL); + } +} + +struct rte_cryptodev_ops ccp_ops = { + .dev_configure = ccp_pmd_config, + .dev_start = ccp_pmd_start, + .dev_stop = ccp_pmd_stop, + .dev_close = ccp_pmd_close, + + .stats_get = ccp_pmd_stats_get, + .stats_reset = ccp_pmd_stats_reset, + + .dev_infos_get = ccp_pmd_info_get, + + .queue_pair_setup = ccp_pmd_qp_setup, + .queue_pair_release = ccp_pmd_qp_release, + .queue_pair_count = ccp_pmd_qp_count, + + .sym_session_get_size = ccp_pmd_sym_session_get_size, + .sym_session_configure = ccp_pmd_sym_session_configure, + .sym_session_clear = ccp_pmd_sym_session_clear, +}; + +struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops; diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h new file mode 100644 index 00000000..79752f68 --- /dev/null +++ b/drivers/crypto/ccp/ccp_pmd_private.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#ifndef _CCP_PMD_PRIVATE_H_ +#define _CCP_PMD_PRIVATE_H_ + +#include +#include "ccp_crypto.h" + +#define CRYPTODEV_NAME_CCP_PMD crypto_ccp + +#define CCP_LOG_ERR(fmt, args...) \ + RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + RTE_STR(CRYPTODEV_NAME_CCP_PMD), \ + __func__, __LINE__, ## args) + +#ifdef RTE_LIBRTE_CCP_DEBUG +#define CCP_LOG_INFO(fmt, args...) \ + RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + RTE_STR(CRYPTODEV_NAME_CCP_PMD), \ + __func__, __LINE__, ## args) + +#define CCP_LOG_DBG(fmt, args...) \ + RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + RTE_STR(CRYPTODEV_NAME_CCP_PMD), \ + __func__, __LINE__, ## args) +#else +#define CCP_LOG_INFO(fmt, args...) +#define CCP_LOG_DBG(fmt, args...) +#endif + +/**< Maximum queue pairs supported by CCP PMD */ +#define CCP_PMD_MAX_QUEUE_PAIRS 1 +#define CCP_NB_MAX_DESCRIPTORS 1024 +#define CCP_MAX_BURST 64 + +#include "ccp_dev.h" + +/* private data structure for each CCP crypto device */ +struct ccp_private { + unsigned int max_nb_qpairs; /**< Max number of queue pairs */ + uint8_t crypto_num_dev; /**< Number of working crypto devices */ + bool auth_opt; /**< Authentication offload option */ + struct ccp_device *last_dev; /**< Last working crypto device */ +}; + +/* CCP batch info */ +struct ccp_batch_info { + struct rte_crypto_op *op[CCP_MAX_BURST]; + /**< optable populated at enque time from app*/ + int op_idx; + struct ccp_queue *cmd_q; + uint16_t opcnt; + /**< no. of crypto ops in batch*/ + int desccnt; + /**< no. of ccp queue descriptors*/ + uint32_t head_offset; + /**< ccp queue head tail offsets time of enqueue*/ + uint32_t tail_offset; + uint8_t lsb_buf[CCP_SB_BYTES * CCP_MAX_BURST]; + phys_addr_t lsb_buf_phys; + /**< LSB intermediate buf for passthru */ + int lsb_buf_idx; + uint16_t auth_ctr; + /**< auth only ops batch for CPU based auth */ +} __rte_cache_aligned; + +/**< CCP crypto queue pair */ +struct ccp_qp { + uint16_t id; + /**< Queue Pair Identifier */ + char name[RTE_CRYPTODEV_NAME_MAX_LEN]; + /**< Unique Queue Pair Name */ + struct rte_ring *processed_pkts; + /**< Ring for placing process packets */ + struct rte_mempool *sess_mp; + /**< Session Mempool */ + struct rte_mempool *batch_mp; + /**< Session Mempool for batch info */ + struct rte_cryptodev_stats qp_stats; + /**< Queue pair statistics */ + struct ccp_batch_info *b_info; + /**< Store ops pulled out of queue */ + struct rte_cryptodev *dev; + /**< rte crypto device to which this qp belongs */ + uint8_t temp_digest[DIGEST_LENGTH_MAX]; + /**< Buffer used to store the digest generated + * by the driver when verifying a digest provided + * by the user (using authentication verify operation) + */ +} __rte_cache_aligned; + + +/**< device specific operations function pointer structure */ +extern struct rte_cryptodev_ops *ccp_pmd_ops; + +uint16_t +ccp_cpu_pmd_enqueue_burst(void *queue_pair, + struct rte_crypto_op **ops, + uint16_t nb_ops); +uint16_t +ccp_cpu_pmd_dequeue_burst(void *queue_pair, + struct rte_crypto_op **ops, + uint16_t nb_ops); + +#endif /* _CCP_PMD_PRIVATE_H_ */ diff --git a/drivers/crypto/ccp/meson.build b/drivers/crypto/ccp/meson.build new file mode 100644 index 00000000..e43b0059 --- /dev/null +++ b/drivers/crypto/ccp/meson.build @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + +if host_machine.system() != 'linux' + build = false +endif +dep = dependency('libcrypto', required: false) +if not dep.found() + build = false +endif +deps += 'bus_vdev' +deps += 'bus_pci' + +sources = files('rte_ccp_pmd.c', + 'ccp_crypto.c', + 'ccp_dev.c', + 'ccp_pci.c', + 'ccp_pmd_ops.c') + +ext_deps += dep +pkgconfig_extra_libs += '-lcrypto' diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c new file mode 100644 index 00000000..92d8a955 --- /dev/null +++ b/drivers/crypto/ccp/rte_ccp_pmd.c @@ -0,0 +1,397 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp_crypto.h" +#include "ccp_dev.h" +#include "ccp_pmd_private.h" + +/** + * Global static parameter used to find if CCP device is already initialized. + */ +static unsigned int ccp_pmd_init_done; +uint8_t ccp_cryptodev_driver_id; + +struct ccp_pmd_init_params { + struct rte_cryptodev_pmd_init_params def_p; + bool auth_opt; +}; + +#define CCP_CRYPTODEV_PARAM_NAME ("name") +#define CCP_CRYPTODEV_PARAM_SOCKET_ID ("socket_id") +#define CCP_CRYPTODEV_PARAM_MAX_NB_QP ("max_nb_queue_pairs") +#define CCP_CRYPTODEV_PARAM_AUTH_OPT ("ccp_auth_opt") + +const char *ccp_pmd_valid_params[] = { + CCP_CRYPTODEV_PARAM_NAME, + CCP_CRYPTODEV_PARAM_SOCKET_ID, + CCP_CRYPTODEV_PARAM_MAX_NB_QP, + CCP_CRYPTODEV_PARAM_AUTH_OPT, +}; + +/** ccp pmd auth option */ +enum ccp_pmd_auth_opt { + CCP_PMD_AUTH_OPT_CCP = 0, + CCP_PMD_AUTH_OPT_CPU, +}; + +/** parse integer from integer argument */ +static int +parse_integer_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + int *i = (int *) extra_args; + + *i = atoi(value); + if (*i < 0) { + CCP_LOG_ERR("Argument has to be positive.\n"); + return -EINVAL; + } + + return 0; +} + +/** parse name argument */ +static int +parse_name_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + struct rte_cryptodev_pmd_init_params *params = extra_args; + + if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) { + CCP_LOG_ERR("Invalid name %s, should be less than " + "%u bytes.\n", value, + RTE_CRYPTODEV_NAME_MAX_LEN - 1); + return -EINVAL; + } + + strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN); + + return 0; +} + +/** parse authentication operation option */ +static int +parse_auth_opt_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + struct ccp_pmd_init_params *params = extra_args; + int i; + + i = atoi(value); + if (i < CCP_PMD_AUTH_OPT_CCP || i > CCP_PMD_AUTH_OPT_CPU) { + CCP_LOG_ERR("Invalid ccp pmd auth option. " + "0->auth on CCP(default), " + "1->auth on CPU\n"); + return -EINVAL; + } + params->auth_opt = i; + return 0; +} + +static int +ccp_pmd_parse_input_args(struct ccp_pmd_init_params *params, + const char *input_args) +{ + struct rte_kvargs *kvlist = NULL; + int ret = 0; + + if (params == NULL) + return -EINVAL; + + if (input_args) { + kvlist = rte_kvargs_parse(input_args, + ccp_pmd_valid_params); + if (kvlist == NULL) + return -1; + + ret = rte_kvargs_process(kvlist, + CCP_CRYPTODEV_PARAM_MAX_NB_QP, + &parse_integer_arg, + ¶ms->def_p.max_nb_queue_pairs); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, + CCP_CRYPTODEV_PARAM_SOCKET_ID, + &parse_integer_arg, + ¶ms->def_p.socket_id); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, + CCP_CRYPTODEV_PARAM_NAME, + &parse_name_arg, + ¶ms->def_p); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, + CCP_CRYPTODEV_PARAM_AUTH_OPT, + &parse_auth_opt_arg, + params); + if (ret < 0) + goto free_kvlist; + + } + +free_kvlist: + rte_kvargs_free(kvlist); + return ret; +} + +static struct ccp_session * +get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op) +{ + struct ccp_session *sess = NULL; + + if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { + if (unlikely(op->sym->session == NULL)) + return NULL; + + sess = (struct ccp_session *) + get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { + void *_sess; + void *_sess_private_data = NULL; + struct ccp_private *internals; + + if (rte_mempool_get(qp->sess_mp, &_sess)) + return NULL; + if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data)) + return NULL; + + sess = (struct ccp_session *)_sess_private_data; + + internals = (struct ccp_private *)qp->dev->data->dev_private; + if (unlikely(ccp_set_session_parameters(sess, op->sym->xform, + internals) != 0)) { + rte_mempool_put(qp->sess_mp, _sess); + rte_mempool_put(qp->sess_mp, _sess_private_data); + sess = NULL; + } + op->sym->session = (struct rte_cryptodev_sym_session *)_sess; + set_sym_session_private_data(op->sym->session, + ccp_cryptodev_driver_id, + _sess_private_data); + } + + return sess; +} + +static uint16_t +ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + struct ccp_session *sess = NULL; + struct ccp_qp *qp = queue_pair; + struct ccp_queue *cmd_q; + struct rte_cryptodev *dev = qp->dev; + uint16_t i, enq_cnt = 0, slots_req = 0; + + if (nb_ops == 0) + return 0; + + if (unlikely(rte_ring_full(qp->processed_pkts) != 0)) + return 0; + + for (i = 0; i < nb_ops; i++) { + sess = get_ccp_session(qp, ops[i]); + if (unlikely(sess == NULL) && (i == 0)) { + qp->qp_stats.enqueue_err_count++; + return 0; + } else if (sess == NULL) { + nb_ops = i; + break; + } + slots_req += ccp_compute_slot_count(sess); + } + + cmd_q = ccp_allot_queue(dev, slots_req); + if (unlikely(cmd_q == NULL)) + return 0; + + enq_cnt = process_ops_to_enqueue(qp, ops, cmd_q, nb_ops, slots_req); + qp->qp_stats.enqueued_count += enq_cnt; + return enq_cnt; +} + +static uint16_t +ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + struct ccp_qp *qp = queue_pair; + uint16_t nb_dequeued = 0, i; + + nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops); + + /* Free session if a session-less crypto op */ + for (i = 0; i < nb_dequeued; i++) + if (unlikely(ops[i]->sess_type == + RTE_CRYPTO_OP_SESSIONLESS)) { + rte_mempool_put(qp->sess_mp, + ops[i]->sym->session); + ops[i]->sym->session = NULL; + } + qp->qp_stats.dequeued_count += nb_dequeued; + + return nb_dequeued; +} + +/* + * The set of PCI devices this driver supports + */ +static struct rte_pci_id ccp_pci_id[] = { + { + RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */ + }, + { + RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */ + }, + {.device_id = 0}, +}; + +/** Remove ccp pmd */ +static int +cryptodev_ccp_remove(struct rte_vdev_device *dev) +{ + const char *name; + + ccp_pmd_init_done = 0; + name = rte_vdev_device_name(dev); + if (name == NULL) + return -EINVAL; + + RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n", + name, rte_socket_id()); + + return 0; +} + +/** Create crypto device */ +static int +cryptodev_ccp_create(const char *name, + struct rte_vdev_device *vdev, + struct ccp_pmd_init_params *init_params) +{ + struct rte_cryptodev *dev; + struct ccp_private *internals; + uint8_t cryptodev_cnt = 0; + + if (init_params->def_p.name[0] == '\0') + snprintf(init_params->def_p.name, + sizeof(init_params->def_p.name), + "%s", name); + + dev = rte_cryptodev_pmd_create(init_params->def_p.name, + &vdev->device, + &init_params->def_p); + if (dev == NULL) { + CCP_LOG_ERR("failed to create cryptodev vdev"); + goto init_error; + } + + cryptodev_cnt = ccp_probe_devices(ccp_pci_id); + + if (cryptodev_cnt == 0) { + CCP_LOG_ERR("failed to detect CCP crypto device"); + goto init_error; + } + + printf("CCP : Crypto device count = %d\n", cryptodev_cnt); + dev->driver_id = ccp_cryptodev_driver_id; + + /* register rx/tx burst functions for data path */ + dev->dev_ops = ccp_pmd_ops; + dev->enqueue_burst = ccp_pmd_enqueue_burst; + dev->dequeue_burst = ccp_pmd_dequeue_burst; + + dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | + RTE_CRYPTODEV_FF_HW_ACCELERATED | + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING; + + internals = dev->data->dev_private; + + internals->max_nb_qpairs = init_params->def_p.max_nb_queue_pairs; + internals->auth_opt = init_params->auth_opt; + internals->crypto_num_dev = cryptodev_cnt; + + return 0; + +init_error: + CCP_LOG_ERR("driver %s: %s() failed", + init_params->def_p.name, __func__); + cryptodev_ccp_remove(vdev); + + return -EFAULT; +} + +/** Probe ccp pmd */ +static int +cryptodev_ccp_probe(struct rte_vdev_device *vdev) +{ + int rc = 0; + const char *name; + struct ccp_pmd_init_params init_params = { + .def_p = { + "", + sizeof(struct ccp_private), + rte_socket_id(), + CCP_PMD_MAX_QUEUE_PAIRS + }, + .auth_opt = CCP_PMD_AUTH_OPT_CCP, + }; + const char *input_args; + + if (ccp_pmd_init_done) { + RTE_LOG(INFO, PMD, "CCP PMD already initialized\n"); + return -EFAULT; + } + name = rte_vdev_device_name(vdev); + if (name == NULL) + return -EINVAL; + + input_args = rte_vdev_device_args(vdev); + ccp_pmd_parse_input_args(&init_params, input_args); + init_params.def_p.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS; + + RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name, + init_params.def_p.socket_id); + RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n", + init_params.def_p.max_nb_queue_pairs); + RTE_LOG(INFO, PMD, "Authentication offload to %s\n", + ((init_params.auth_opt == 0) ? "CCP" : "CPU")); + + rc = cryptodev_ccp_create(name, vdev, &init_params); + if (rc) + return rc; + ccp_pmd_init_done = 1; + return 0; +} + +static struct rte_vdev_driver cryptodev_ccp_pmd_drv = { + .probe = cryptodev_ccp_probe, + .remove = cryptodev_ccp_remove +}; + +static struct cryptodev_driver ccp_crypto_drv; + +RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv); +RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD, + "max_nb_queue_pairs= " + "socket_id= " + "ccp_auth_opt="); +RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv.driver, + ccp_cryptodev_driver_id); diff --git a/drivers/crypto/ccp/rte_pmd_ccp_version.map b/drivers/crypto/ccp/rte_pmd_ccp_version.map new file mode 100644 index 00000000..9b9ab1a4 --- /dev/null +++ b/drivers/crypto/ccp/rte_pmd_ccp_version.map @@ -0,0 +1,4 @@ +DPDK_18.05 { + + local: *; +}; diff --git a/drivers/crypto/dpaa2_sec/Makefile b/drivers/crypto/dpaa2_sec/Makefile index cb6c63e6..da3d8f84 100644 --- a/drivers/crypto/dpaa2_sec/Makefile +++ b/drivers/crypto/dpaa2_sec/Makefile @@ -18,13 +18,8 @@ LIB = librte_pmd_dpaa2_sec.a # build flags CFLAGS += -DALLOW_EXPERIMENTAL_API -ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT),y) -CFLAGS += -O0 -g -CFLAGS += "-Wno-error" -else CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) -endif CFLAGS += -D _GNU_SOURCE ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c index 9a790ddd..2a3c61c6 100644 --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c @@ -30,6 +30,9 @@ #include "dpaa2_sec_priv.h" #include "dpaa2_sec_logs.h" +/* Required types */ +typedef uint64_t dma_addr_t; + /* RTA header files */ #include #include @@ -56,6 +59,8 @@ enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8; static uint8_t cryptodev_driver_id; +int dpaa2_logtype_sec; + static inline int build_proto_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, @@ -77,11 +82,11 @@ build_proto_fd(dpaa2_sec_session *sess, DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off); DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len); - DPAA2_SET_FD_FLC(fd, ((uint64_t)flc)); + DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc); /* save physical address of mbuf */ op->sym->aead.digest.phys_addr = mbuf->buf_iova; - mbuf->buf_iova = (uint64_t)op; + mbuf->buf_iova = (size_t)op; return 0; } @@ -113,12 +118,12 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, RTE_CACHE_LINE_SIZE); if (unlikely(!fle)) { - RTE_LOG(ERR, PMD, "GCM SG: Memory alloc failed for SGE\n"); + DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE"); return -1; } memset(fle, 0, FLE_SG_MEM_SIZE); - DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); - DPAA2_FLE_SAVE_CTXT(fle, priv); + DPAA2_SET_FLE_ADDR(fle, (size_t)op); + DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv); op_fle = fle + 1; ip_fle = fle + 2; @@ -132,11 +137,11 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, DPAA2_SET_FD_COMPOUND_FMT(fd); DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); - PMD_TX_LOG(DEBUG, "GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n" + DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n" "iv-len=%d data_off: 0x%x\n", sym_op->aead.data.offset, sym_op->aead.data.length, - sym_op->aead.digest.length, + sess->digest_length, sess->iv.length, sym_op->m_src->data_off); @@ -264,12 +269,12 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess, */ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); if (retval) { - RTE_LOG(ERR, PMD, "GCM: Memory alloc failed for SGE\n"); + DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE"); return -1; } memset(fle, 0, FLE_POOL_BUF_SIZE); - DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); - DPAA2_FLE_SAVE_CTXT(fle, priv); + DPAA2_SET_FLE_ADDR(fle, (size_t)op); + DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); fle = fle + 1; sge = fle + 2; if (likely(bpid < MAX_BPID)) { @@ -297,11 +302,11 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess, DPAA2_SET_FD_COMPOUND_FMT(fd); DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); - PMD_TX_LOG(DEBUG, "GCM: auth_off: 0x%x/length %d, digest-len=%d\n" + DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n" "iv-len=%d data_off: 0x%x\n", sym_op->aead.data.offset, sym_op->aead.data.length, - sym_op->aead.digest.length, + sess->digest_length, sess->iv.length, sym_op->m_src->data_off); @@ -409,12 +414,12 @@ build_authenc_sg_fd(dpaa2_sec_session *sess, fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, RTE_CACHE_LINE_SIZE); if (unlikely(!fle)) { - RTE_LOG(ERR, PMD, "AUTHENC SG: Memory alloc failed for SGE\n"); + DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE"); return -1; } memset(fle, 0, FLE_SG_MEM_SIZE); - DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); - DPAA2_FLE_SAVE_CTXT(fle, priv); + DPAA2_SET_FLE_ADDR(fle, (size_t)op); + DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); op_fle = fle + 1; ip_fle = fle + 2; @@ -428,16 +433,16 @@ build_authenc_sg_fd(dpaa2_sec_session *sess, DPAA2_SET_FD_COMPOUND_FMT(fd); DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); - PMD_TX_LOG(DEBUG, - "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n" - "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", - sym_op->auth.data.offset, - sym_op->auth.data.length, - sym_op->auth.digest.length, - sym_op->cipher.data.offset, - sym_op->cipher.data.length, - sym_op->cipher.iv.length, - sym_op->m_src->data_off); + DPAA2_SEC_DP_DEBUG( + "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n" + "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", + sym_op->auth.data.offset, + sym_op->auth.data.length, + sess->digest_length, + sym_op->cipher.data.offset, + sym_op->cipher.data.length, + sess->iv.length, + sym_op->m_src->data_off); /* Configure Output FLE with Scatter/Gather Entry */ DPAA2_SET_FLE_SG_EXT(op_fle); @@ -558,12 +563,12 @@ build_authenc_fd(dpaa2_sec_session *sess, */ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); if (retval) { - RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n"); + DPAA2_SEC_ERR("Memory alloc failed for SGE"); return -1; } memset(fle, 0, FLE_POOL_BUF_SIZE); - DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); - DPAA2_FLE_SAVE_CTXT(fle, priv); + DPAA2_SET_FLE_ADDR(fle, (size_t)op); + DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); fle = fle + 1; sge = fle + 2; if (likely(bpid < MAX_BPID)) { @@ -591,15 +596,16 @@ build_authenc_fd(dpaa2_sec_session *sess, DPAA2_SET_FD_COMPOUND_FMT(fd); DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); - PMD_TX_LOG(DEBUG, "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n" - "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", - sym_op->auth.data.offset, - sym_op->auth.data.length, - sess->digest_length, - sym_op->cipher.data.offset, - sym_op->cipher.data.length, - sess->iv.length, - sym_op->m_src->data_off); + DPAA2_SEC_DP_DEBUG( + "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n" + "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", + sym_op->auth.data.offset, + sym_op->auth.data.length, + sess->digest_length, + sym_op->cipher.data.offset, + sym_op->cipher.data.length, + sess->iv.length, + sym_op->m_src->data_off); /* Configure Output FLE with Scatter/Gather Entry */ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); @@ -686,13 +692,13 @@ static inline int build_auth_sg_fd( fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, RTE_CACHE_LINE_SIZE); if (unlikely(!fle)) { - RTE_LOG(ERR, PMD, "AUTH SG: Memory alloc failed for SGE\n"); + DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE"); return -1; } memset(fle, 0, FLE_SG_MEM_SIZE); /* first FLE entry used to store mbuf and session ctxt */ - DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); - DPAA2_FLE_SAVE_CTXT(fle, priv); + DPAA2_SET_FLE_ADDR(fle, (size_t)op); + DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); op_fle = fle + 1; ip_fle = fle + 2; sge = fle + 3; @@ -762,7 +768,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); if (retval) { - RTE_LOG(ERR, PMD, "AUTH Memory alloc failed for SGE\n"); + DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE"); return -1; } memset(fle, 0, FLE_POOL_BUF_SIZE); @@ -772,8 +778,8 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, * to get the MBUF Addr from the previous FLE. * We can have a better approach to use the inline Mbuf */ - DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); - DPAA2_FLE_SAVE_CTXT(fle, priv); + DPAA2_SET_FLE_ADDR(fle, (size_t)op); + DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); fle = fle + 1; if (likely(bpid < MAX_BPID)) { @@ -859,13 +865,13 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, RTE_CACHE_LINE_SIZE); if (!fle) { - RTE_LOG(ERR, PMD, "CIPHER SG: Memory alloc failed for SGE\n"); + DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE"); return -1; } memset(fle, 0, FLE_SG_MEM_SIZE); /* first FLE entry used to store mbuf and session ctxt */ - DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); - DPAA2_FLE_SAVE_CTXT(fle, priv); + DPAA2_SET_FLE_ADDR(fle, (size_t)op); + DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); op_fle = fle + 1; ip_fle = fle + 2; @@ -873,12 +879,13 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, flc = &priv->flc_desc[0].flc; - PMD_TX_LOG(DEBUG, - "CIPHER SG: cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x", - sym_op->cipher.data.offset, - sym_op->cipher.data.length, - sym_op->cipher.iv.length, - sym_op->m_src->data_off); + DPAA2_SEC_DP_DEBUG( + "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d" + " data_off: 0x%x\n", + sym_op->cipher.data.offset, + sym_op->cipher.data.length, + sess->iv.length, + sym_op->m_src->data_off); /* o/p fle */ DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); @@ -901,10 +908,10 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, } DPAA2_SET_FLE_FIN(sge); - PMD_TX_LOG(DEBUG, - "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d", - flc, fle, fle->addr_hi, fle->addr_lo, - fle->length); + DPAA2_SEC_DP_DEBUG( + "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n", + flc, fle, fle->addr_hi, fle->addr_lo, + fle->length); /* i/p fle */ mbuf = sym_op->m_src; @@ -944,13 +951,14 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, DPAA2_SET_FD_COMPOUND_FMT(fd); DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); - PMD_TX_LOG(DEBUG, - "CIPHER SG: fdaddr =%p bpid =%d meta =%d off =%d, len =%d", - (void *)DPAA2_GET_FD_ADDR(fd), - DPAA2_GET_FD_BPID(fd), - rte_dpaa2_bpid_info[bpid].meta_data_size, - DPAA2_GET_FD_OFFSET(fd), - DPAA2_GET_FD_LEN(fd)); + DPAA2_SEC_DP_DEBUG( + "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d" + " off =%d, len =%d\n", + DPAA2_GET_FD_ADDR(fd), + DPAA2_GET_FD_BPID(fd), + rte_dpaa2_bpid_info[bpid].meta_data_size, + DPAA2_GET_FD_OFFSET(fd), + DPAA2_GET_FD_LEN(fd)); return 0; } @@ -976,7 +984,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); if (retval) { - RTE_LOG(ERR, PMD, "CIPHER: Memory alloc failed for SGE\n"); + DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE"); return -1; } memset(fle, 0, FLE_POOL_BUF_SIZE); @@ -986,8 +994,8 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, * to get the MBUF Addr from the previous FLE. * We can have a better approach to use the inline Mbuf */ - DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); - DPAA2_FLE_SAVE_CTXT(fle, priv); + DPAA2_SET_FLE_ADDR(fle, (size_t)op); + DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); fle = fle + 1; sge = fle + 2; @@ -1012,12 +1020,13 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, DPAA2_SET_FD_COMPOUND_FMT(fd); DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); - PMD_TX_LOG(DEBUG, - "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d, data_off: 0x%x", - sym_op->cipher.data.offset, - sym_op->cipher.data.length, - sess->iv.length, - sym_op->m_src->data_off); + DPAA2_SEC_DP_DEBUG( + "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d," + " data_off: 0x%x\n", + sym_op->cipher.data.offset, + sym_op->cipher.data.length, + sess->iv.length, + sym_op->m_src->data_off); DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst)); DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset + @@ -1025,10 +1034,10 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, fle->length = sym_op->cipher.data.length + sess->iv.length; - PMD_TX_LOG(DEBUG, - "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d", - flc, fle, fle->addr_hi, fle->addr_lo, - fle->length); + DPAA2_SEC_DP_DEBUG( + "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n", + flc, fle, fle->addr_hi, fle->addr_lo, + fle->length); fle++; @@ -1049,13 +1058,14 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, DPAA2_SET_FLE_FIN(sge); DPAA2_SET_FLE_FIN(fle); - PMD_TX_LOG(DEBUG, - "CIPHER: fdaddr =%p bpid =%d meta =%d off =%d, len =%d", - (void *)DPAA2_GET_FD_ADDR(fd), - DPAA2_GET_FD_BPID(fd), - rte_dpaa2_bpid_info[bpid].meta_data_size, - DPAA2_GET_FD_OFFSET(fd), - DPAA2_GET_FD_LEN(fd)); + DPAA2_SEC_DP_DEBUG( + "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d" + " off =%d, len =%d\n", + DPAA2_GET_FD_ADDR(fd), + DPAA2_GET_FD_BPID(fd), + rte_dpaa2_bpid_info[bpid].meta_data_size, + DPAA2_GET_FD_OFFSET(fd), + DPAA2_GET_FD_LEN(fd)); return 0; } @@ -1070,7 +1080,7 @@ build_sec_fd(struct rte_crypto_op *op, PMD_INIT_FUNC_TRACE(); if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) - sess = (dpaa2_sec_session *)get_session_private_data( + sess = (dpaa2_sec_session *)get_sym_session_private_data( op->sym->session, cryptodev_driver_id); else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) sess = (dpaa2_sec_session *)get_sec_session_private_data( @@ -1095,7 +1105,7 @@ build_sec_fd(struct rte_crypto_op *op, break; case DPAA2_SEC_HASH_CIPHER: default: - RTE_LOG(ERR, PMD, "error: Unsupported session\n"); + DPAA2_SEC_ERR("error: Unsupported session"); } } else { switch (sess->ctxt_type) { @@ -1116,7 +1126,7 @@ build_sec_fd(struct rte_crypto_op *op, break; case DPAA2_SEC_HASH_CIPHER: default: - RTE_LOG(ERR, PMD, "error: Unsupported session\n"); + DPAA2_SEC_ERR("error: Unsupported session"); } } return ret; @@ -1143,7 +1153,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, return 0; if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { - RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n"); + DPAA2_SEC_ERR("sessionless crypto op not supported"); return 0; } /*Prepare enqueue descriptor*/ @@ -1152,14 +1162,14 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, qbman_eq_desc_set_response(&eqdesc, 0, 0); qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); - if (!DPAA2_PER_LCORE_SEC_DPIO) { - ret = dpaa2_affine_qbman_swp_sec(); + if (!DPAA2_PER_LCORE_DPIO) { + ret = dpaa2_affine_qbman_swp(); if (ret) { - RTE_LOG(ERR, PMD, "Failure in affining portal\n"); + DPAA2_SEC_ERR("Failure in affining portal"); return 0; } } - swp = DPAA2_PER_LCORE_SEC_PORTAL; + swp = DPAA2_PER_LCORE_PORTAL; while (nb_ops) { frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops; @@ -1171,8 +1181,8 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, bpid = mempool_to_bpid(mb_pool); ret = build_sec_fd(*ops, &fd_arr[loop], bpid); if (ret) { - PMD_DRV_LOG(ERR, "error: Improper packet" - " contents for crypto operation\n"); + DPAA2_SEC_ERR("error: Improper packet contents" + " for crypto operation"); goto skip_tx; } ops++; @@ -1206,7 +1216,7 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id) DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); - op = (struct rte_crypto_op *)mbuf->buf_iova; + op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova; mbuf->buf_iova = op->sym->aead.digest.phys_addr; op->sym->aead.digest.phys_addr = 0L; @@ -1236,8 +1246,8 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id) fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); - PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x", - fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); + DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n", + fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); /* we are using the first FLE entry to store Mbuf. * Currently we donot know which FLE has the mbuf stored. @@ -1248,11 +1258,10 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id) if (unlikely(DPAA2_GET_FD_IVP(fd))) { /* TODO complete it. */ - RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?\n"); + DPAA2_SEC_ERR("error: non inline buffer"); return NULL; } - op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR( - DPAA2_GET_FLE_ADDR((fle - 1))); + op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); /* Prefeth op */ src = op->sym->m_src; @@ -1264,19 +1273,19 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id) } else dst = src; - PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p", - (void *)dst, dst->buf_addr); - - PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d", - (void *)DPAA2_GET_FD_ADDR(fd), - DPAA2_GET_FD_BPID(fd), - rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, - DPAA2_GET_FD_OFFSET(fd), - DPAA2_GET_FD_LEN(fd)); + DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p," + " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n", + (void *)dst, + dst->buf_addr, + DPAA2_GET_FD_ADDR(fd), + DPAA2_GET_FD_BPID(fd), + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, + DPAA2_GET_FD_OFFSET(fd), + DPAA2_GET_FD_LEN(fd)); /* free the fle memory */ if (likely(rte_pktmbuf_is_contiguous(src))) { - priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1); + priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1); rte_mempool_put(priv->fle_pool, (void *)(fle-1)); } else rte_free((void *)(fle-1)); @@ -1300,14 +1309,14 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, const struct qbman_fd *fd; struct qbman_pull_desc pulldesc; - if (!DPAA2_PER_LCORE_SEC_DPIO) { - ret = dpaa2_affine_qbman_swp_sec(); + if (!DPAA2_PER_LCORE_DPIO) { + ret = dpaa2_affine_qbman_swp(); if (ret) { - RTE_LOG(ERR, PMD, "Failure in affining portal\n"); + DPAA2_SEC_ERR("Failure in affining portal"); return 0; } } - swp = DPAA2_PER_LCORE_SEC_PORTAL; + swp = DPAA2_PER_LCORE_PORTAL; dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; qbman_pull_desc_clear(&pulldesc); @@ -1322,8 +1331,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, /*Issue a volatile dequeue command. */ while (1) { if (qbman_swp_pull(swp, &pulldesc)) { - RTE_LOG(WARNING, PMD, - "SEC VDQ command is not issued : QBMAN busy\n"); + DPAA2_SEC_WARN( + "SEC VDQ command is not issued : QBMAN busy"); /* Portal was busy, try again */ continue; } @@ -1355,7 +1364,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, status = (uint8_t)qbman_result_DQ_flags(dq_storage); if (unlikely( (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { - PMD_RX_LOG(DEBUG, "No frame is delivered"); + DPAA2_SEC_DP_DEBUG("No frame is delivered\n"); continue; } } @@ -1365,8 +1374,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, if (unlikely(fd->simple.frc)) { /* TODO Parse SEC errors */ - RTE_LOG(ERR, PMD, "SEC returned Error - %x\n", - fd->simple.frc); + DPAA2_SEC_ERR("SEC returned Error - %x", + fd->simple.frc); ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; } else { ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; @@ -1378,7 +1387,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, dpaa2_qp->rx_vq.rx_pkts += num_rx; - PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx); + DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); /*Return the total number of packets received to DPAA2 app*/ return num_rx; } @@ -1420,11 +1429,11 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, /* If qp is already in use free ring memory and qp metadata. */ if (dev->data->queue_pairs[qp_id] != NULL) { - PMD_DRV_LOG(INFO, "QP already setup"); + DPAA2_SEC_INFO("QP already setup"); return 0; } - PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p", + DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf); memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); @@ -1432,7 +1441,7 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), RTE_CACHE_LINE_SIZE); if (!qp) { - RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n"); + DPAA2_SEC_ERR("malloc failed for rx/tx queues"); return -1; } @@ -1442,45 +1451,25 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, sizeof(struct queue_storage_info_t), RTE_CACHE_LINE_SIZE); if (!qp->rx_vq.q_storage) { - RTE_LOG(ERR, PMD, "malloc failed for q_storage\n"); + DPAA2_SEC_ERR("malloc failed for q_storage"); return -1; } memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { - RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n"); + DPAA2_SEC_ERR("Unable to allocate dequeue storage"); return -1; } dev->data->queue_pairs[qp_id] = qp; cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX; - cfg.user_ctx = (uint64_t)(&qp->rx_vq); + cfg.user_ctx = (size_t)(&qp->rx_vq); retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, qp_id, &cfg); return retcode; } -/** Start queue pair */ -static int -dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - PMD_INIT_FUNC_TRACE(); - - return 0; -} - -/** Stop queue pair */ -static int -dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - PMD_INIT_FUNC_TRACE(); - - return 0; -} - /** Return the number of allocated queue pairs */ static uint32_t dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev) @@ -1492,7 +1481,7 @@ dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev) /** Returns the size of the aesni gcm session structure */ static unsigned int -dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused) +dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { PMD_INIT_FUNC_TRACE(); @@ -1517,7 +1506,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), RTE_CACHE_LINE_SIZE); if (priv == NULL) { - RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); + DPAA2_SEC_ERR("No Memory for priv CTXT"); return -1; } @@ -1528,7 +1517,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, RTE_CACHE_LINE_SIZE); if (session->cipher_key.data == NULL) { - RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); + DPAA2_SEC_ERR("No Memory for cipher key"); rte_free(priv); return -1; } @@ -1536,7 +1525,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, memcpy(session->cipher_key.data, xform->cipher.key.data, xform->cipher.key.length); - cipherdata.key = (uint64_t)session->cipher_key.data; + cipherdata.key = (size_t)session->cipher_key.data; cipherdata.keylen = session->cipher_key.length; cipherdata.key_enc_flags = 0; cipherdata.key_type = RTA_DATA_IMM; @@ -1571,11 +1560,11 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: case RTE_CRYPTO_CIPHER_ZUC_EEA3: case RTE_CRYPTO_CIPHER_NULL: - RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", + DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", xform->cipher.algo); goto error_out; default: - RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", + DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", xform->cipher.algo); goto error_out; } @@ -1586,7 +1575,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, &cipherdata, NULL, session->iv.length, session->dir); if (bufsize < 0) { - RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n"); + DPAA2_SEC_ERR("Crypto: Descriptor build failed"); goto error_out; } flc->dhr = 0; @@ -1595,16 +1584,15 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, flc->word1_sdl = (uint8_t)bufsize; flc->word2_rflc_31_0 = lower_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq)); flc->word3_rflc_63_32 = upper_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq)); session->ctxt = priv; for (i = 0; i < bufsize; i++) - PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", - i, priv->flc_desc[0].desc[i]); + DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); return 0; @@ -1621,7 +1609,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev, { struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; struct alginfo authdata; - unsigned int bufsize, i; + int bufsize, i; struct ctxt_priv *priv; struct sec_flow_context *flc; @@ -1633,7 +1621,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev, sizeof(struct sec_flc_desc), RTE_CACHE_LINE_SIZE); if (priv == NULL) { - RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); + DPAA2_SEC_ERR("No Memory for priv CTXT"); return -1; } @@ -1643,7 +1631,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev, session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, RTE_CACHE_LINE_SIZE); if (session->auth_key.data == NULL) { - RTE_LOG(ERR, PMD, "No Memory for auth key\n"); + DPAA2_SEC_ERR("Unable to allocate memory for auth key"); rte_free(priv); return -1; } @@ -1651,7 +1639,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev, memcpy(session->auth_key.data, xform->auth.key.data, xform->auth.key.length); - authdata.key = (uint64_t)session->auth_key.data; + authdata.key = (size_t)session->auth_key.data; authdata.keylen = session->auth_key.length; authdata.key_enc_flags = 0; authdata.key_type = RTA_DATA_IMM; @@ -1703,12 +1691,12 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev, case RTE_CRYPTO_AUTH_AES_CMAC: case RTE_CRYPTO_AUTH_AES_CBC_MAC: case RTE_CRYPTO_AUTH_ZUC_EIA3: - RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", - xform->auth.algo); + DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un", + xform->auth.algo); goto error_out; default: - RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", - xform->auth.algo); + DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", + xform->auth.algo); goto error_out; } session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? @@ -1717,18 +1705,22 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev, bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 1, 0, &authdata, !session->dir, session->digest_length); + if (bufsize < 0) { + DPAA2_SEC_ERR("Crypto: Invalid buffer length"); + goto error_out; + } flc->word1_sdl = (uint8_t)bufsize; flc->word2_rflc_31_0 = lower_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq)); flc->word3_rflc_63_32 = upper_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq)); session->ctxt = priv; for (i = 0; i < bufsize; i++) - PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", - i, priv->flc_desc[DESC_INITFINAL].desc[i]); + DPAA2_SEC_DEBUG("DESC[%d]:0x%x", + i, priv->flc_desc[DESC_INITFINAL].desc[i]); return 0; @@ -1747,7 +1739,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; struct alginfo aeaddata; - unsigned int bufsize, i; + int bufsize, i; struct ctxt_priv *priv; struct sec_flow_context *flc; struct rte_crypto_aead_xform *aead_xform = &xform->aead; @@ -1765,7 +1757,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), RTE_CACHE_LINE_SIZE); if (priv == NULL) { - RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); + DPAA2_SEC_ERR("No Memory for priv CTXT"); return -1; } @@ -1775,7 +1767,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, RTE_CACHE_LINE_SIZE); if (session->aead_key.data == NULL && aead_xform->key.length > 0) { - RTE_LOG(ERR, PMD, "No Memory for aead key\n"); + DPAA2_SEC_ERR("No Memory for aead key"); rte_free(priv); return -1; } @@ -1786,7 +1778,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, session->aead_key.length = aead_xform->key.length; ctxt->auth_only_len = aead_xform->aad_length; - aeaddata.key = (uint64_t)session->aead_key.data; + aeaddata.key = (size_t)session->aead_key.data; aeaddata.keylen = session->aead_key.length; aeaddata.key_enc_flags = 0; aeaddata.key_type = RTA_DATA_IMM; @@ -1798,12 +1790,12 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; break; case RTE_CRYPTO_AEAD_AES_CCM: - RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n", - aead_xform->algo); + DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u", + aead_xform->algo); goto error_out; default: - RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n", - aead_xform->algo); + DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", + aead_xform->algo); goto error_out; } session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? @@ -1816,7 +1808,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, &priv->flc_desc[0].desc[1], 1); if (err < 0) { - PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n"); + DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); goto error_out; } if (priv->flc_desc[0].desc[1] & 1) { @@ -1838,16 +1830,21 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, priv->flc_desc[0].desc, 1, 0, &aeaddata, session->iv.length, session->digest_length); + if (bufsize < 0) { + DPAA2_SEC_ERR("Crypto: Invalid buffer length"); + goto error_out; + } + flc->word1_sdl = (uint8_t)bufsize; flc->word2_rflc_31_0 = lower_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq)); flc->word3_rflc_63_32 = upper_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq)); session->ctxt = priv; for (i = 0; i < bufsize; i++) - PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", + DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n", i, priv->flc_desc[0].desc[i]); return 0; @@ -1867,7 +1864,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; struct alginfo authdata, cipherdata; - unsigned int bufsize, i; + int bufsize, i; struct ctxt_priv *priv; struct sec_flow_context *flc; struct rte_crypto_cipher_xform *cipher_xform; @@ -1899,7 +1896,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), RTE_CACHE_LINE_SIZE); if (priv == NULL) { - RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); + DPAA2_SEC_ERR("No Memory for priv CTXT"); return -1; } @@ -1909,7 +1906,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, RTE_CACHE_LINE_SIZE); if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { - RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); + DPAA2_SEC_ERR("No Memory for cipher key"); rte_free(priv); return -1; } @@ -1917,7 +1914,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, RTE_CACHE_LINE_SIZE); if (session->auth_key.data == NULL && auth_xform->key.length > 0) { - RTE_LOG(ERR, PMD, "No Memory for auth key\n"); + DPAA2_SEC_ERR("No Memory for auth key"); rte_free(session->cipher_key.data); rte_free(priv); return -1; @@ -1928,7 +1925,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, memcpy(session->auth_key.data, auth_xform->key.data, auth_xform->key.length); - authdata.key = (uint64_t)session->auth_key.data; + authdata.key = (size_t)session->auth_key.data; authdata.keylen = session->auth_key.length; authdata.key_enc_flags = 0; authdata.key_type = RTA_DATA_IMM; @@ -1980,15 +1977,15 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, case RTE_CRYPTO_AUTH_AES_CMAC: case RTE_CRYPTO_AUTH_AES_CBC_MAC: case RTE_CRYPTO_AUTH_ZUC_EIA3: - RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", - auth_xform->algo); + DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", + auth_xform->algo); goto error_out; default: - RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", - auth_xform->algo); + DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", + auth_xform->algo); goto error_out; } - cipherdata.key = (uint64_t)session->cipher_key.data; + cipherdata.key = (size_t)session->cipher_key.data; cipherdata.keylen = session->cipher_key.length; cipherdata.key_enc_flags = 0; cipherdata.key_type = RTA_DATA_IMM; @@ -2014,12 +2011,12 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, case RTE_CRYPTO_CIPHER_3DES_ECB: case RTE_CRYPTO_CIPHER_AES_ECB: case RTE_CRYPTO_CIPHER_KASUMI_F8: - RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", - cipher_xform->algo); + DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", + cipher_xform->algo); goto error_out; default: - RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", - cipher_xform->algo); + DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", + cipher_xform->algo); goto error_out; } session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? @@ -2033,7 +2030,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, &priv->flc_desc[0].desc[2], 2); if (err < 0) { - PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n"); + DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); goto error_out; } if (priv->flc_desc[0].desc[2] & 1) { @@ -2059,21 +2056,25 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, ctxt->auth_only_len, session->digest_length, session->dir); + if (bufsize < 0) { + DPAA2_SEC_ERR("Crypto: Invalid buffer length"); + goto error_out; + } } else { - RTE_LOG(ERR, PMD, "Hash before cipher not supported\n"); + DPAA2_SEC_ERR("Hash before cipher not supported"); goto error_out; } flc->word1_sdl = (uint8_t)bufsize; flc->word2_rflc_31_0 = lower_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq)); flc->word3_rflc_63_32 = upper_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq)); session->ctxt = priv; for (i = 0; i < bufsize; i++) - PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", + DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); return 0; @@ -2094,7 +2095,7 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, PMD_INIT_FUNC_TRACE(); if (unlikely(sess == NULL)) { - RTE_LOG(ERR, PMD, "invalid session struct\n"); + DPAA2_SEC_ERR("Invalid session struct"); return -1; } @@ -2130,7 +2131,7 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, dpaa2_sec_aead_init(dev, xform, session); } else { - RTE_LOG(ERR, PMD, "Invalid crypto type\n"); + DPAA2_SEC_ERR("Invalid crypto type"); return -EINVAL; } @@ -2150,7 +2151,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, struct ipsec_encap_pdb encap_pdb; struct ipsec_decap_pdb decap_pdb; struct alginfo authdata, cipherdata; - unsigned int bufsize; + int bufsize; struct sec_flow_context *flc; PMD_INIT_FUNC_TRACE(); @@ -2168,7 +2169,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, RTE_CACHE_LINE_SIZE); if (priv == NULL) { - RTE_LOG(ERR, PMD, "\nNo memory for priv CTXT"); + DPAA2_SEC_ERR("No memory for priv CTXT"); return -ENOMEM; } @@ -2180,7 +2181,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, RTE_CACHE_LINE_SIZE); if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { - RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); + DPAA2_SEC_ERR("No Memory for cipher key"); rte_free(priv); return -ENOMEM; } @@ -2191,7 +2192,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, RTE_CACHE_LINE_SIZE); if (session->auth_key.data == NULL && auth_xform->key.length > 0) { - RTE_LOG(ERR, PMD, "No Memory for auth key\n"); + DPAA2_SEC_ERR("No Memory for auth key"); rte_free(session->cipher_key.data); rte_free(priv); return -ENOMEM; @@ -2202,7 +2203,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, memcpy(session->auth_key.data, auth_xform->key.data, auth_xform->key.length); - authdata.key = (uint64_t)session->auth_key.data; + authdata.key = (size_t)session->auth_key.data; authdata.keylen = session->auth_key.length; authdata.key_enc_flags = 0; authdata.key_type = RTA_DATA_IMM; @@ -2253,15 +2254,15 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, case RTE_CRYPTO_AUTH_KASUMI_F9: case RTE_CRYPTO_AUTH_AES_CBC_MAC: case RTE_CRYPTO_AUTH_ZUC_EIA3: - RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", - auth_xform->algo); + DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", + auth_xform->algo); goto out; default: - RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", - auth_xform->algo); + DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", + auth_xform->algo); goto out; } - cipherdata.key = (uint64_t)session->cipher_key.data; + cipherdata.key = (size_t)session->cipher_key.data; cipherdata.keylen = session->cipher_key.length; cipherdata.key_enc_flags = 0; cipherdata.key_type = RTA_DATA_IMM; @@ -2289,12 +2290,12 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, case RTE_CRYPTO_CIPHER_3DES_ECB: case RTE_CRYPTO_CIPHER_AES_ECB: case RTE_CRYPTO_CIPHER_KASUMI_F8: - RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", - cipher_xform->algo); + DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", + cipher_xform->algo); goto out; default: - RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", - cipher_xform->algo); + DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", + cipher_xform->algo); goto out; } @@ -2340,15 +2341,21 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, 1, 0, &decap_pdb, &cipherdata, &authdata); } else goto out; + + if (bufsize < 0) { + DPAA2_SEC_ERR("Crypto: Invalid buffer length"); + goto out; + } + flc->word1_sdl = (uint8_t)bufsize; /* Enable the stashing control bit */ DPAA2_SET_FLC_RSC(flc); flc->word2_rflc_31_0 = lower_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq) | 0x14); flc->word3_rflc_63_32 = upper_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq)); /* Set EWS bit i.e. enable write-safe */ @@ -2379,8 +2386,7 @@ dpaa2_sec_security_session_create(void *dev, int ret; if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( - "Couldn't get object from session mempool"); + DPAA2_SEC_ERR("Couldn't get object from session mempool"); return -ENOMEM; } @@ -2395,9 +2401,7 @@ dpaa2_sec_security_session_create(void *dev, return -EINVAL; } if (ret != 0) { - PMD_DRV_LOG(ERR, - "DPAA2 PMD: failed to configure session parameters"); - + DPAA2_SEC_ERR("Failed to configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; @@ -2432,7 +2436,7 @@ dpaa2_sec_security_session_destroy(void *dev __rte_unused, } static int -dpaa2_sec_session_configure(struct rte_cryptodev *dev, +dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -2441,22 +2445,19 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev, int ret; if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( - "Couldn't get object from session mempool"); + DPAA2_SEC_ERR("Couldn't get object from session mempool"); return -ENOMEM; } ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data); if (ret != 0) { - PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure " - "session parameters"); - + DPAA2_SEC_ERR("Failed to configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); return 0; @@ -2464,12 +2465,12 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev, /** Clear the memory of session so it doesn't leave key material behind */ static void -dpaa2_sec_session_clear(struct rte_cryptodev *dev, +dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { PMD_INIT_FUNC_TRACE(); uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; if (sess_priv) { @@ -2478,7 +2479,7 @@ dpaa2_sec_session_clear(struct rte_cryptodev *dev, rte_free(s->auth_key.data); memset(sess, 0, sizeof(dpaa2_sec_session)); struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -2511,14 +2512,13 @@ dpaa2_sec_dev_start(struct rte_cryptodev *dev) ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); if (ret) { - PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n", - priv->hw_id); + DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED", + priv->hw_id); goto get_attr_failure; } ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); if (ret) { - PMD_INIT_LOG(ERR, - "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n"); + DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC"); goto get_attr_failure; } for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { @@ -2526,14 +2526,14 @@ dpaa2_sec_dev_start(struct rte_cryptodev *dev) dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, &rx_attr); dpaa2_q->fqid = rx_attr.fqid; - PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid); + DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid); } for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { dpaa2_q = &qp[i]->tx_vq; dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, &tx_attr); dpaa2_q->fqid = tx_attr.fqid; - PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid); + DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid); } return 0; @@ -2553,15 +2553,14 @@ dpaa2_sec_dev_stop(struct rte_cryptodev *dev) ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); if (ret) { - PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device", + DPAA2_SEC_ERR("Failure in disabling dpseci %d device", priv->hw_id); return; } ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); if (ret < 0) { - PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n", - ret); + DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret); return; } } @@ -2585,8 +2584,7 @@ dpaa2_sec_dev_close(struct rte_cryptodev *dev) /*Close the device at underlying layer*/ ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); if (ret) { - PMD_INIT_LOG(ERR, "Failure closing dpseci device with" - " error code %d\n", ret); + DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); return -1; } @@ -2608,7 +2606,8 @@ dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, info->max_nb_queue_pairs = internals->max_nb_queue_pairs; info->feature_flags = dev->feature_flags; info->capabilities = dpaa2_sec_capabilities; - info->sym.max_nb_sessions = internals->max_nb_sessions; + /* No limit of number of sessions */ + info->sym.max_nb_sessions = 0; info->driver_id = cryptodev_driver_id; } } @@ -2626,12 +2625,12 @@ void dpaa2_sec_stats_get(struct rte_cryptodev *dev, PMD_INIT_FUNC_TRACE(); if (stats == NULL) { - PMD_DRV_LOG(ERR, "invalid stats ptr NULL"); + DPAA2_SEC_ERR("Invalid stats ptr NULL"); return; } for (i = 0; i < dev->data->nb_queue_pairs; i++) { if (qp[i] == NULL) { - PMD_DRV_LOG(DEBUG, "Uninitialised queue pair"); + DPAA2_SEC_DEBUG("Uninitialised queue pair"); continue; } @@ -2644,16 +2643,16 @@ void dpaa2_sec_stats_get(struct rte_cryptodev *dev, ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token, &counters); if (ret) { - PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n"); + DPAA2_SEC_ERR("SEC counters failed"); } else { - PMD_DRV_LOG(INFO, "dpseci hw stats:" - "\n\tNumber of Requests Dequeued = %lu" - "\n\tNumber of Outbound Encrypt Requests = %lu" - "\n\tNumber of Inbound Decrypt Requests = %lu" - "\n\tNumber of Outbound Bytes Encrypted = %lu" - "\n\tNumber of Outbound Bytes Protected = %lu" - "\n\tNumber of Inbound Bytes Decrypted = %lu" - "\n\tNumber of Inbound Bytes Validated = %lu", + DPAA2_SEC_INFO("dpseci hardware stats:" + "\n\tNum of Requests Dequeued = %" PRIu64 + "\n\tNum of Outbound Encrypt Requests = %" PRIu64 + "\n\tNum of Inbound Decrypt Requests = %" PRIu64 + "\n\tNum of Outbound Bytes Encrypted = %" PRIu64 + "\n\tNum of Outbound Bytes Protected = %" PRIu64 + "\n\tNum of Inbound Bytes Decrypted = %" PRIu64 + "\n\tNum of Inbound Bytes Validated = %" PRIu64, counters.dequeued_requests, counters.ob_enc_requests, counters.ib_dec_requests, @@ -2675,7 +2674,7 @@ void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) for (i = 0; i < dev->data->nb_queue_pairs; i++) { if (qp[i] == NULL) { - PMD_DRV_LOG(DEBUG, "Uninitialised queue pair"); + DPAA2_SEC_DEBUG("Uninitialised queue pair"); continue; } qp[i]->tx_vq.rx_pkts = 0; @@ -2697,12 +2696,10 @@ static struct rte_cryptodev_ops crypto_ops = { .stats_reset = dpaa2_sec_stats_reset, .queue_pair_setup = dpaa2_sec_queue_pair_setup, .queue_pair_release = dpaa2_sec_queue_pair_release, - .queue_pair_start = dpaa2_sec_queue_pair_start, - .queue_pair_stop = dpaa2_sec_queue_pair_stop, .queue_pair_count = dpaa2_sec_queue_pair_count, - .session_get_size = dpaa2_sec_session_get_size, - .session_configure = dpaa2_sec_session_configure, - .session_clear = dpaa2_sec_session_clear, + .sym_session_get_size = dpaa2_sec_sym_session_get_size, + .sym_session_configure = dpaa2_sec_sym_session_configure, + .sym_session_clear = dpaa2_sec_sym_session_clear, }; static const struct rte_security_capability * @@ -2729,8 +2726,8 @@ dpaa2_sec_uninit(const struct rte_cryptodev *dev) rte_mempool_free(internals->fle_pool); - PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n", - dev->data->name, rte_socket_id()); + DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u", + dev->data->name, rte_socket_id()); return 0; } @@ -2751,7 +2748,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) PMD_INIT_FUNC_TRACE(); dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); if (dpaa2_dev == NULL) { - PMD_INIT_LOG(ERR, "dpaa2_device not found\n"); + DPAA2_SEC_ERR("DPAA2 SEC device not found"); return -1; } hw_id = dpaa2_dev->object_id; @@ -2765,10 +2762,13 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) RTE_CRYPTODEV_FF_HW_ACCELERATED | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | RTE_CRYPTODEV_FF_SECURITY | - RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; + RTE_CRYPTODEV_FF_IN_PLACE_SGL | + RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | + RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | + RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | + RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; internals = cryptodev->data->dev_private; - internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS; /* * For secondary processes, we don't initialise any further as primary @@ -2776,7 +2776,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) * RX function */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - PMD_INIT_LOG(DEBUG, "Device already init by primary process"); + DPAA2_SEC_DEBUG("Device already init by primary process"); return 0; } @@ -2794,21 +2794,21 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, sizeof(struct fsl_mc_io), 0); if (!dpseci) { - PMD_INIT_LOG(ERR, - "Error in allocating the memory for dpsec object"); + DPAA2_SEC_ERR( + "Error in allocating the memory for dpsec object"); return -1; } dpseci->regs = rte_mcp_ptr_list[0]; retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); if (retcode != 0) { - PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x", - retcode); + DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x", + retcode); goto init_error; } retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); if (retcode != 0) { - PMD_INIT_LOG(ERR, + DPAA2_SEC_ERR( "Cannot get dpsec device attributed: Error = %x", retcode); goto init_error; @@ -2828,15 +2828,15 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0); if (!internals->fle_pool) { - RTE_LOG(ERR, PMD, "%s create failed\n", str); + DPAA2_SEC_ERR("Mempool (%s) creation failed", str); goto init_error; } - PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name); + DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name); return 0; init_error: - PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name); + DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name); /* dpaa2_sec_uninit(crypto_dev_name); */ return -EFAULT; @@ -2866,7 +2866,7 @@ cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv, if (cryptodev->data->dev_private == NULL) rte_panic("Cannot allocate memzone for private " - "device data"); + "device data"); } dpaa2_dev->cryptodev = cryptodev; @@ -2919,5 +2919,13 @@ static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { static struct cryptodev_driver dpaa2_sec_crypto_drv; RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); -RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver, - cryptodev_driver_id); +RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, + rte_dpaa2_sec_driver.driver, cryptodev_driver_id); + +RTE_INIT(dpaa2_sec_init_log) +{ + /* Bus level logs */ + dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2"); + if (dpaa2_logtype_sec >= 0) + rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE); +} diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h index 23251141..8a990442 100644 --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h @@ -8,37 +8,35 @@ #ifndef _DPAA2_SEC_LOGS_H_ #define _DPAA2_SEC_LOGS_H_ -#define PMD_INIT_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args) - -#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT -#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") -#else -#define PMD_INIT_FUNC_TRACE() do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_RX -#define PMD_RX_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_RX_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_TX -#define PMD_TX_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_TX_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER -#define PMD_DRV_LOG_RAW(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args) -#else -#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0) -#endif - -#define PMD_DRV_LOG(level, fmt, args...) \ - PMD_DRV_LOG_RAW(level, fmt "\n", ## args) +extern int dpaa2_logtype_sec; + +#define DPAA2_SEC_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, dpaa2_logtype_sec, "dpaa2_sec: " \ + fmt "\n", ##args) + +#define DPAA2_SEC_DEBUG(fmt, args...) \ + rte_log(RTE_LOG_DEBUG, dpaa2_logtype_sec, "dpaa2_sec: %s(): " \ + fmt "\n", __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() DPAA2_SEC_DEBUG(">>") + +#define DPAA2_SEC_INFO(fmt, args...) \ + DPAA2_SEC_LOG(INFO, fmt, ## args) +#define DPAA2_SEC_ERR(fmt, args...) \ + DPAA2_SEC_LOG(ERR, fmt, ## args) +#define DPAA2_SEC_WARN(fmt, args...) \ + DPAA2_SEC_LOG(WARNING, fmt, ## args) + +/* DP Logs, toggled out at compile time if level lower than current level */ +#define DPAA2_SEC_DP_LOG(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, fmt, ## args) + +#define DPAA2_SEC_DP_DEBUG(fmt, args...) \ + DPAA2_SEC_DP_LOG(DEBUG, fmt, ## args) +#define DPAA2_SEC_DP_INFO(fmt, args...) \ + DPAA2_SEC_DP_LOG(INFO, fmt, ## args) +#define DPAA2_SEC_DP_WARN(fmt, args...) \ + DPAA2_SEC_DP_LOG(WARNING, fmt, ## args) + #endif /* _DPAA2_SEC_LOGS_H_ */ diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h index e8ac95ba..d015be1e 100644 --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h @@ -23,8 +23,6 @@ struct dpaa2_sec_dev_private { uint16_t token; /**< Token required by DPxxx objects */ unsigned int max_nb_queue_pairs; /**< Max number of queue pairs supported by device */ - unsigned int max_nb_sessions; - /**< Max number of sessions supported by device */ }; struct dpaa2_sec_qp { @@ -185,9 +183,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 16, + .min = 1, .max = 16, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } @@ -206,9 +204,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 20, + .min = 1, .max = 20, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } @@ -227,9 +225,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 28, + .min = 1, .max = 28, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } @@ -248,9 +246,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 32, - .max = 32, - .increment = 0 + .min = 1, + .max = 32, + .increment = 1 }, .iv_size = { 0 } }, } @@ -269,9 +267,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 48, + .min = 1, .max = 48, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } @@ -290,9 +288,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 64, + .min = 1, .max = 64, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } diff --git a/drivers/crypto/dpaa2_sec/meson.build b/drivers/crypto/dpaa2_sec/meson.build new file mode 100644 index 00000000..01afc587 --- /dev/null +++ b/drivers/crypto/dpaa2_sec/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if host_machine.system() != 'linux' + build = false +endif + +deps += ['security', 'mempool_dpaa2'] +sources = files('dpaa2_sec_dpseci.c', + 'mc/dpseci.c') + +allow_experimental_apis = true + +includes += include_directories('mc', 'hw') diff --git a/drivers/crypto/dpaa_sec/Makefile b/drivers/crypto/dpaa_sec/Makefile index fe2c5932..9be44704 100644 --- a/drivers/crypto/dpaa_sec/Makefile +++ b/drivers/crypto/dpaa_sec/Makefile @@ -12,13 +12,8 @@ LIB = librte_pmd_dpaa_sec.a # build flags CFLAGS += -DALLOW_EXPERIMENTAL_API CFLAGS += -D _GNU_SOURCE -ifeq ($(CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT),y) -CFLAGS += -O0 -g -CFLAGS += "-Wno-error" -else CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) -endif CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c index 18681cf3..f571050b 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec.c +++ b/drivers/crypto/dpaa_sec/dpaa_sec.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2017 NXP + * Copyright 2017-2018 NXP * */ @@ -39,6 +39,8 @@ enum rta_sec_era rta_sec_era; +int dpaa_logtype_sec; + static uint8_t cryptodev_driver_id; static __thread struct rte_crypto_op **dpaa_sec_ops; @@ -53,7 +55,7 @@ dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx) if (!ctx->fd_status) { ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; } else { - PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status); + DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; } @@ -69,7 +71,7 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses) retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx)); if (!ctx || retval) { - PMD_TX_LOG(ERR, "Alloc sec descriptor failed!"); + DPAA_SEC_DP_WARN("Alloc sec descriptor failed!"); return NULL; } /* @@ -84,7 +86,7 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses) dcbz_64(&ctx->job.sg[SG_CACHELINE_3]); ctx->ctx_pool = ses->ctx_pool; - ctx->vtop_offset = (uint64_t) ctx + ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx); return ctx; @@ -93,43 +95,18 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses) static inline rte_iova_t dpaa_mem_vtop(void *vaddr) { - const struct rte_memseg *memseg = rte_eal_get_physmem_layout(); - uint64_t vaddr_64, paddr; - int i; - - vaddr_64 = (uint64_t)vaddr; - for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { - if (vaddr_64 >= memseg[i].addr_64 && - vaddr_64 < memseg[i].addr_64 + memseg[i].len) { - paddr = memseg[i].iova + - (vaddr_64 - memseg[i].addr_64); - - return (rte_iova_t)paddr; - } - } - return (rte_iova_t)(NULL); -} + const struct rte_memseg *ms; -/* virtual address conversin when mempool support is available for ctx */ -static inline phys_addr_t -dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr) -{ - return (uint64_t)vaddr - ctx->vtop_offset; + ms = rte_mem_virt2memseg(vaddr, NULL); + if (ms) + return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr); + return (size_t)NULL; } static inline void * dpaa_mem_ptov(rte_iova_t paddr) { - const struct rte_memseg *memseg = rte_eal_get_physmem_layout(); - int i; - - for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { - if (paddr >= memseg[i].iova && - (char *)paddr < (char *)memseg[i].iova + memseg[i].len) - return (void *)(memseg[i].addr_64 + - (paddr - memseg[i].iova)); - } - return NULL; + return rte_mem_iova2virt(paddr); } static void @@ -137,8 +114,8 @@ ern_sec_fq_handler(struct qman_portal *qm __rte_unused, struct qman_fq *fq, const struct qm_mr_entry *msg) { - RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n", - fq->fqid, msg->ern.rc, msg->ern.seqnum); + DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n", + fq->fqid, msg->ern.rc, msg->ern.seqnum); } /* initialize the queue with dest chan as caam chan so that @@ -166,11 +143,11 @@ dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc, fq_in->cb.ern = ern_sec_fq_handler; - PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out); + DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out); ret = qman_init_fq(fq_in, flags, &fq_opts); if (unlikely(ret != 0)) - PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret); + DPAA_SEC_ERR("qman_init_fq failed %d", ret); return ret; } @@ -229,7 +206,7 @@ dpaa_sec_init_tx(struct qman_fq *fq) ret = qman_create_fq(0, flags, fq); if (unlikely(ret)) { - PMD_INIT_LOG(ERR, "qman_create_fq failed"); + DPAA_SEC_ERR("qman_create_fq failed"); return ret; } @@ -244,7 +221,7 @@ dpaa_sec_init_tx(struct qman_fq *fq) ret = qman_init_fq(fq, 0, &opts); if (unlikely(ret)) { - PMD_INIT_LOG(ERR, "unable to init caam source fq!"); + DPAA_SEC_ERR("unable to init caam source fq!"); return ret; } @@ -336,7 +313,7 @@ caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a) alginfo_a->algmode = OP_ALG_AAI_HMAC; break; default: - PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg); + DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg); } } @@ -365,7 +342,7 @@ caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c) alginfo_c->algmode = OP_ALG_AAI_CTR; break; default: - PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg); + DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg); } } @@ -378,7 +355,7 @@ caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo) alginfo->algmode = OP_ALG_AAI_GCM; break; default: - PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg); + DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg); } } @@ -388,7 +365,7 @@ static int dpaa_sec_prep_cdb(dpaa_sec_session *ses) { struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0}; - uint32_t shared_desc_len = 0; + int32_t shared_desc_len = 0; struct sec_cdb *cdb = &ses->cdb; int err; #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN @@ -402,11 +379,11 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) if (is_cipher_only(ses)) { caam_cipher_alg(ses, &alginfo_c); if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { - PMD_TX_LOG(ERR, "not supported cipher alg\n"); + DPAA_SEC_ERR("not supported cipher alg"); return -ENOTSUP; } - alginfo_c.key = (uint64_t)ses->cipher_key.data; + alginfo_c.key = (size_t)ses->cipher_key.data; alginfo_c.keylen = ses->cipher_key.length; alginfo_c.key_enc_flags = 0; alginfo_c.key_type = RTA_DATA_IMM; @@ -420,11 +397,11 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) } else if (is_auth_only(ses)) { caam_auth_alg(ses, &alginfo_a); if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { - PMD_TX_LOG(ERR, "not supported auth alg\n"); + DPAA_SEC_ERR("not supported auth alg"); return -ENOTSUP; } - alginfo_a.key = (uint64_t)ses->auth_key.data; + alginfo_a.key = (size_t)ses->auth_key.data; alginfo_a.keylen = ses->auth_key.length; alginfo_a.key_enc_flags = 0; alginfo_a.key_type = RTA_DATA_IMM; @@ -436,10 +413,10 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) } else if (is_aead(ses)) { caam_aead_alg(ses, &alginfo); if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { - PMD_TX_LOG(ERR, "not supported aead alg\n"); + DPAA_SEC_ERR("not supported aead alg"); return -ENOTSUP; } - alginfo.key = (uint64_t)ses->aead_key.data; + alginfo.key = (size_t)ses->aead_key.data; alginfo.keylen = ses->aead_key.length; alginfo.key_enc_flags = 0; alginfo.key_type = RTA_DATA_IMM; @@ -459,22 +436,22 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) } else { caam_cipher_alg(ses, &alginfo_c); if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { - PMD_TX_LOG(ERR, "not supported cipher alg\n"); + DPAA_SEC_ERR("not supported cipher alg"); return -ENOTSUP; } - alginfo_c.key = (uint64_t)ses->cipher_key.data; + alginfo_c.key = (size_t)ses->cipher_key.data; alginfo_c.keylen = ses->cipher_key.length; alginfo_c.key_enc_flags = 0; alginfo_c.key_type = RTA_DATA_IMM; caam_auth_alg(ses, &alginfo_a); if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { - PMD_TX_LOG(ERR, "not supported auth alg\n"); + DPAA_SEC_ERR("not supported auth alg"); return -ENOTSUP; } - alginfo_a.key = (uint64_t)ses->auth_key.data; + alginfo_a.key = (size_t)ses->auth_key.data; alginfo_a.keylen = ses->auth_key.length; alginfo_a.key_enc_flags = 0; alginfo_a.key_type = RTA_DATA_IMM; @@ -487,21 +464,21 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) &cdb->sh_desc[2], 2); if (err < 0) { - PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths"); + DPAA_SEC_ERR("Crypto: Incorrect key lengths"); return err; } if (cdb->sh_desc[2] & 1) alginfo_c.key_type = RTA_DATA_IMM; else { - alginfo_c.key = (uint64_t)dpaa_mem_vtop( - (void *)alginfo_c.key); + alginfo_c.key = (size_t)dpaa_mem_vtop( + (void *)(size_t)alginfo_c.key); alginfo_c.key_type = RTA_DATA_PTR; } if (cdb->sh_desc[2] & (1<<1)) alginfo_a.key_type = RTA_DATA_IMM; else { - alginfo_a.key = (uint64_t)dpaa_mem_vtop( - (void *)alginfo_a.key); + alginfo_a.key = (size_t)dpaa_mem_vtop( + (void *)(size_t)alginfo_a.key); alginfo_a.key_type = RTA_DATA_PTR; } cdb->sh_desc[0] = 0; @@ -530,6 +507,12 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) ses->digest_length, ses->dir); } } + + if (shared_desc_len < 0) { + DPAA_SEC_ERR("error in preparing command block"); + return shared_desc_len; + } + cdb->sh_hdr.hi.field.idlen = shared_desc_len; cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word); cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word); @@ -543,12 +526,25 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops) { struct qman_fq *fq; unsigned int pkts = 0; - int ret; + int num_rx_bufs, ret; struct qm_dqrr_entry *dq; + uint32_t vdqcr_flags = 0; fq = &qp->outq; - ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ? - DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops); + /* + * Until request for four buffers, we provide exact number of buffers. + * Otherwise we do not set the QM_VDQCR_EXACT flag. + * Not setting QM_VDQCR_EXACT flag can provide two more buffers than + * requested, so we request two less in this case. + */ + if (nb_ops < 4) { + vdqcr_flags = QM_VDQCR_EXACT; + num_rx_bufs = nb_ops; + } else { + num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ? + (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2); + } + ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags); if (ret) return 0; @@ -585,7 +581,7 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops) if (!ctx->fd_status) { op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; } else { - printf("\nSEC return err: 0x%x", ctx->fd_status); + DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status); op->status = RTE_CRYPTO_OP_STATUS_ERROR; } ops[pkts++] = op; @@ -616,8 +612,8 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) extra_segs = 2; if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) { - PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n", - MAX_SG_ENTRIES); + DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d", + MAX_SG_ENTRIES); return NULL; } ctx = dpaa_sec_alloc_ctx(ses); @@ -640,7 +636,7 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) in_sg->extension = 1; in_sg->final = 1; in_sg->length = sym->auth.data.length; - qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2])); + qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2])); /* 1st seg */ sg = in_sg + 1; @@ -664,7 +660,7 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) sg++; rte_memcpy(old_digest, sym->auth.digest.data, ses->digest_length); - start_addr = dpaa_mem_vtop_ctx(ctx, old_digest); + start_addr = dpaa_mem_vtop(old_digest); qm_sg_entry_set64(sg, start_addr); sg->length = ses->digest_length; in_sg->length += ses->digest_length; @@ -718,7 +714,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses) if (is_decode(ses)) { /* need to extend the input to a compound frame */ sg->extension = 1; - qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2])); + qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2])); sg->length = sym->auth.data.length + ses->digest_length; sg->final = 1; cpu_to_hw_sg(sg); @@ -732,7 +728,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses) cpu_to_hw_sg(sg); /* let's check digest by hw */ - start_addr = dpaa_mem_vtop_ctx(ctx, old_digest); + start_addr = dpaa_mem_vtop(old_digest); sg++; qm_sg_entry_set64(sg, start_addr); sg->length = ses->digest_length; @@ -769,8 +765,8 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) } if (req_segs > MAX_SG_ENTRIES) { - PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n", - MAX_SG_ENTRIES); + DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d", + MAX_SG_ENTRIES); return NULL; } @@ -785,7 +781,7 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) out_sg = &cf->sg[0]; out_sg->extension = 1; out_sg->length = sym->cipher.data.length; - qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2])); + qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2])); cpu_to_hw_sg(out_sg); /* 1st seg */ @@ -814,7 +810,7 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) in_sg->length = sym->cipher.data.length + ses->iv.length; sg++; - qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg)); cpu_to_hw_sg(in_sg); /* IV */ @@ -881,7 +877,7 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses) sg->extension = 1; sg->final = 1; sg->length = sym->cipher.data.length + ses->iv.length; - qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2])); + qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2])); cpu_to_hw_sg(sg); sg = &cf->sg[2]; @@ -922,7 +918,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) req_segs++; if (req_segs > MAX_SG_ENTRIES) { - PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n", + DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d", MAX_SG_ENTRIES); return NULL; } @@ -947,7 +943,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) /* output sg entries */ sg = &cf->sg[2]; - qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg)); cpu_to_hw_sg(out_sg); /* 1st seg */ @@ -991,7 +987,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) /* input sg entries */ sg++; - qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg)); cpu_to_hw_sg(in_sg); /* 1st seg IV */ @@ -1028,7 +1024,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) sg++; memcpy(ctx->digest, sym->aead.digest.data, ses->digest_length); - qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest)); + qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest)); sg->length = ses->digest_length; } sg->final = 1; @@ -1066,7 +1062,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) /* input */ rte_prefetch0(cf->sg); sg = &cf->sg[2]; - qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg)); if (is_encode(ses)) { qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); sg->length = ses->iv.length; @@ -1111,7 +1107,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) ses->digest_length); sg++; - qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest)); + qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest)); sg->length = ses->digest_length; length += sg->length; sg->final = 1; @@ -1125,7 +1121,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) /* output */ sg++; - qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg)); qm_sg_entry_set64(sg, dst_start_addr + sym->aead.data.offset - ses->auth_only_len); sg->length = sym->aead.data.length + ses->auth_only_len; @@ -1170,7 +1166,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) } if (req_segs > MAX_SG_ENTRIES) { - PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n", + DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d", MAX_SG_ENTRIES); return NULL; } @@ -1194,7 +1190,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) /* output sg entries */ sg = &cf->sg[2]; - qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg)); cpu_to_hw_sg(out_sg); /* 1st seg */ @@ -1236,7 +1232,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) /* input sg entries */ sg++; - qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg)); cpu_to_hw_sg(in_sg); /* 1st seg IV */ @@ -1266,7 +1262,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) sg++; memcpy(ctx->digest, sym->auth.digest.data, ses->digest_length); - qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest)); + qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest)); sg->length = ses->digest_length; } sg->final = 1; @@ -1303,7 +1299,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) /* input */ rte_prefetch0(cf->sg); sg = &cf->sg[2]; - qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg)); if (is_encode(ses)) { qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); sg->length = ses->iv.length; @@ -1333,7 +1329,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) ses->digest_length); sg++; - qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest)); + qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest)); sg->length = ses->digest_length; length += sg->length; sg->final = 1; @@ -1347,7 +1343,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) /* output */ sg++; - qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg)); qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset); sg->length = sym->cipher.data.length; length = sg->length; @@ -1422,7 +1418,6 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, struct rte_crypto_op *op; struct dpaa_sec_job *cf; dpaa_sec_session *ses; - struct dpaa_sec_op_ctx *ctx; uint32_t auth_only_len; struct qman_fq *inq[DPAA_SEC_BURST]; @@ -1434,7 +1429,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, switch (op->sess_type) { case RTE_CRYPTO_OP_WITH_SESSION: ses = (dpaa_sec_session *) - get_session_private_data( + get_sym_session_private_data( op->sym->session, cryptodev_driver_id); break; @@ -1444,15 +1439,15 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, op->sym->sec_session); break; default: - PMD_TX_LOG(ERR, + DPAA_SEC_DP_ERR( "sessionless crypto op not supported"); frames_to_send = loop; nb_ops = loop; goto send_pkts; } if (unlikely(!ses->qp || ses->qp != qp)) { - PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", - ses->qp, qp); + DPAA_SEC_DP_ERR("sess->qp - %p qp %p", + ses->qp, qp); if (dpaa_sec_attach_sess_q(qp, ses)) { frames_to_send = loop; nb_ops = loop; @@ -1475,7 +1470,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, } else if (is_proto_ipsec(ses)) { cf = build_proto(op, ses); } else { - PMD_TX_LOG(ERR, "not supported sec op"); + DPAA_SEC_DP_ERR("not supported ops"); frames_to_send = loop; nb_ops = loop; goto send_pkts; @@ -1491,7 +1486,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, } else if (is_auth_cipher(ses)) { cf = build_cipher_auth_sg(op, ses); } else { - PMD_TX_LOG(ERR, "not supported sec op"); + DPAA_SEC_DP_ERR("not supported ops"); frames_to_send = loop; nb_ops = loop; goto send_pkts; @@ -1507,8 +1502,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, inq[loop] = ses->inq; fd->opaque_addr = 0; fd->cmd = 0; - ctx = container_of(cf, struct dpaa_sec_op_ctx, job); - qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg)); + qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg)); fd->_format1 = qm_fd_compound; fd->length29 = 2 * sizeof(struct qm_sg_entry); /* Auth_only_len is set as 0 in descriptor and it is @@ -1547,7 +1541,7 @@ dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, dpaa_qp->rx_pkts += num_rx; dpaa_qp->rx_errs += nb_ops - num_rx; - PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx); + DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); return num_rx; } @@ -1562,11 +1556,11 @@ dpaa_sec_queue_pair_release(struct rte_cryptodev *dev, PMD_INIT_FUNC_TRACE(); - PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id); + DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id); internals = dev->data->dev_private; if (qp_id >= internals->max_nb_queue_pairs) { - PMD_INIT_LOG(ERR, "Max supported qpid %d", + DPAA_SEC_ERR("Max supported qpid %d", internals->max_nb_queue_pairs); return -EINVAL; } @@ -1588,12 +1582,11 @@ dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, struct dpaa_sec_dev_private *internals; struct dpaa_sec_qp *qp = NULL; - PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p", - dev, qp_id, qp_conf); + DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf); internals = dev->data->dev_private; if (qp_id >= internals->max_nb_queue_pairs) { - PMD_INIT_LOG(ERR, "Max supported qpid %d", + DPAA_SEC_ERR("Max supported qpid %d", internals->max_nb_queue_pairs); return -EINVAL; } @@ -1605,26 +1598,6 @@ dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, return 0; } -/** Start queue pair */ -static int -dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - PMD_INIT_FUNC_TRACE(); - - return 0; -} - -/** Stop queue pair */ -static int -dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - PMD_INIT_FUNC_TRACE(); - - return 0; -} - /** Return the number of allocated queue pairs */ static uint32_t dpaa_sec_queue_pair_count(struct rte_cryptodev *dev) @@ -1636,7 +1609,7 @@ dpaa_sec_queue_pair_count(struct rte_cryptodev *dev) /** Returns the size of session structure */ static unsigned int -dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused) +dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { PMD_INIT_FUNC_TRACE(); @@ -1654,7 +1627,7 @@ dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused, session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, RTE_CACHE_LINE_SIZE); if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { - PMD_INIT_LOG(ERR, "No Memory for cipher key\n"); + DPAA_SEC_ERR("No Memory for cipher key"); return -ENOMEM; } session->cipher_key.length = xform->cipher.key.length; @@ -1676,7 +1649,7 @@ dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused, session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, RTE_CACHE_LINE_SIZE); if (session->auth_key.data == NULL && xform->auth.key.length > 0) { - PMD_INIT_LOG(ERR, "No Memory for auth key\n"); + DPAA_SEC_ERR("No Memory for auth key"); return -ENOMEM; } session->auth_key.length = xform->auth.key.length; @@ -1702,7 +1675,7 @@ dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused, session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length, RTE_CACHE_LINE_SIZE); if (session->aead_key.data == NULL && xform->aead.key.length > 0) { - PMD_INIT_LOG(ERR, "No Memory for aead key\n"); + DPAA_SEC_ERR("No Memory for aead key\n"); return -ENOMEM; } session->aead_key.length = xform->aead.key.length; @@ -1727,7 +1700,7 @@ dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi) return &qi->inq[i]; } } - PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions); + DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions); return NULL; } @@ -1756,46 +1729,24 @@ dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess) sess->qp = qp; ret = dpaa_sec_prep_cdb(sess); if (ret) { - PMD_DRV_LOG(ERR, "Unable to prepare sec cdb"); + DPAA_SEC_ERR("Unable to prepare sec cdb"); return -1; } - + if (unlikely(!RTE_PER_LCORE(dpaa_io))) { + ret = rte_dpaa_portal_init((void *)0); + if (ret) { + DPAA_SEC_ERR("Failure in affining portal"); + return ret; + } + } ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb), qman_fq_fqid(&qp->outq)); if (ret) - PMD_DRV_LOG(ERR, "Unable to init sec queue"); + DPAA_SEC_ERR("Unable to init sec queue"); return ret; } -static int -dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused, - uint16_t qp_id __rte_unused, - void *ses __rte_unused) -{ - PMD_INIT_FUNC_TRACE(); - return 0; -} - -static int -dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev, - uint16_t qp_id __rte_unused, - void *ses) -{ - dpaa_sec_session *sess = ses; - struct dpaa_sec_dev_private *qi = dev->data->dev_private; - - PMD_INIT_FUNC_TRACE(); - - if (sess->inq) - dpaa_sec_detach_rxq(qi, sess->inq); - sess->inq = NULL; - - sess->qp = NULL; - - return 0; -} - static int dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *sess) @@ -1806,7 +1757,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, PMD_INIT_FUNC_TRACE(); if (unlikely(sess == NULL)) { - RTE_LOG(ERR, PMD, "invalid session struct\n"); + DPAA_SEC_ERR("invalid session struct"); return -EINVAL; } @@ -1831,7 +1782,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, dpaa_sec_cipher_init(dev, xform, session); dpaa_sec_auth_init(dev, xform->next, session); } else { - PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher"); + DPAA_SEC_ERR("Not supported: Auth then Cipher"); return -EINVAL; } @@ -1842,7 +1793,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, dpaa_sec_auth_init(dev, xform, session); dpaa_sec_cipher_init(dev, xform->next, session); } else { - PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher"); + DPAA_SEC_ERR("Not supported: Auth then Cipher"); return -EINVAL; } @@ -1852,13 +1803,13 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, dpaa_sec_aead_init(dev, xform, session); } else { - PMD_DRV_LOG(ERR, "Invalid crypto type"); + DPAA_SEC_ERR("Invalid crypto type"); return -EINVAL; } session->ctx_pool = internals->ctx_pool; session->inq = dpaa_sec_attach_rxq(internals); if (session->inq == NULL) { - PMD_DRV_LOG(ERR, "unable to attach sec queue"); + DPAA_SEC_ERR("unable to attach sec queue"); goto err1; } @@ -1873,7 +1824,7 @@ err1: } static int -dpaa_sec_session_configure(struct rte_cryptodev *dev, +dpaa_sec_sym_session_configure(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -1884,22 +1835,20 @@ dpaa_sec_session_configure(struct rte_cryptodev *dev, PMD_INIT_FUNC_TRACE(); if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( - "Couldn't get object from session mempool"); + DPAA_SEC_ERR("Couldn't get object from session mempool"); return -ENOMEM; } ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data); if (ret != 0) { - PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure " - "session parameters"); + DPAA_SEC_ERR("failed to configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); @@ -1908,12 +1857,12 @@ dpaa_sec_session_configure(struct rte_cryptodev *dev, /** Clear the memory of session so it doesn't leave key material behind */ static void -dpaa_sec_session_clear(struct rte_cryptodev *dev, +dpaa_sec_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { struct dpaa_sec_dev_private *qi = dev->data->dev_private; uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); PMD_INIT_FUNC_TRACE(); @@ -1927,7 +1876,7 @@ dpaa_sec_session_clear(struct rte_cryptodev *dev, rte_free(s->cipher_key.data); rte_free(s->auth_key.data); memset(s, 0, sizeof(dpaa_sec_session)); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -1958,7 +1907,7 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, RTE_CACHE_LINE_SIZE); if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { - RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); + DPAA_SEC_ERR("No Memory for cipher key"); return -ENOMEM; } @@ -1968,7 +1917,7 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, RTE_CACHE_LINE_SIZE); if (session->auth_key.data == NULL && auth_xform->key.length > 0) { - RTE_LOG(ERR, PMD, "No Memory for auth key\n"); + DPAA_SEC_ERR("No Memory for auth key"); rte_free(session->cipher_key.data); return -ENOMEM; } @@ -2013,11 +1962,11 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, case RTE_CRYPTO_AUTH_KASUMI_F9: case RTE_CRYPTO_AUTH_AES_CBC_MAC: case RTE_CRYPTO_AUTH_ZUC_EIA3: - RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", + DPAA_SEC_ERR("Crypto: Unsupported auth alg %u", auth_xform->algo); goto out; default: - RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", + DPAA_SEC_ERR("Crypto: Undefined Auth specified %u", auth_xform->algo); goto out; } @@ -2037,11 +1986,11 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, case RTE_CRYPTO_CIPHER_3DES_ECB: case RTE_CRYPTO_CIPHER_AES_ECB: case RTE_CRYPTO_CIPHER_KASUMI_F8: - RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", + DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u", cipher_xform->algo); goto out; default: - RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", + DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", cipher_xform->algo); goto out; } @@ -2086,7 +2035,7 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, session->ctx_pool = internals->ctx_pool; session->inq = dpaa_sec_attach_rxq(internals); if (session->inq == NULL) { - PMD_DRV_LOG(ERR, "unable to attach sec queue"); + DPAA_SEC_ERR("unable to attach sec queue"); goto out; } @@ -2110,8 +2059,7 @@ dpaa_sec_security_session_create(void *dev, int ret; if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( - "Couldn't get object from session mempool"); + DPAA_SEC_ERR("Couldn't get object from session mempool"); return -ENOMEM; } @@ -2126,9 +2074,7 @@ dpaa_sec_security_session_create(void *dev, return -EINVAL; } if (ret != 0) { - PMD_DRV_LOG(ERR, - "DPAA2 PMD: failed to configure session parameters"); - + DPAA_SEC_ERR("failed to configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; @@ -2163,11 +2109,32 @@ dpaa_sec_security_session_destroy(void *dev __rte_unused, static int -dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, +dpaa_sec_dev_configure(struct rte_cryptodev *dev, struct rte_cryptodev_config *config __rte_unused) { + + char str[20]; + struct dpaa_sec_dev_private *internals; + PMD_INIT_FUNC_TRACE(); + internals = dev->data->dev_private; + sprintf(str, "ctx_pool_%d", dev->data->dev_id); + if (!internals->ctx_pool) { + internals->ctx_pool = rte_mempool_create((const char *)str, + CTX_POOL_NUM_BUFS, + CTX_POOL_BUF_SIZE, + CTX_POOL_CACHE_SIZE, 0, + NULL, NULL, NULL, NULL, + SOCKET_ID_ANY, 0); + if (!internals->ctx_pool) { + DPAA_SEC_ERR("%s create failed\n", str); + return -ENOMEM; + } + } else + DPAA_SEC_INFO("mempool already created for dev_id : %d", + dev->data->dev_id); + return 0; } @@ -2185,9 +2152,19 @@ dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused) } static int -dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused) +dpaa_sec_dev_close(struct rte_cryptodev *dev) { + struct dpaa_sec_dev_private *internals; + PMD_INIT_FUNC_TRACE(); + + if (dev == NULL) + return -ENOMEM; + + internals = dev->data->dev_private; + rte_mempool_free(internals->ctx_pool); + internals->ctx_pool = NULL; + return 0; } @@ -2203,9 +2180,6 @@ dpaa_sec_dev_infos_get(struct rte_cryptodev *dev, info->feature_flags = dev->feature_flags; info->capabilities = dpaa_sec_capabilities; info->sym.max_nb_sessions = internals->max_nb_sessions; - info->sym.max_nb_sessions_per_qp = - RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS / - RTE_DPAA_MAX_NB_SEC_QPS; info->driver_id = cryptodev_driver_id; } } @@ -2218,14 +2192,10 @@ static struct rte_cryptodev_ops crypto_ops = { .dev_infos_get = dpaa_sec_dev_infos_get, .queue_pair_setup = dpaa_sec_queue_pair_setup, .queue_pair_release = dpaa_sec_queue_pair_release, - .queue_pair_start = dpaa_sec_queue_pair_start, - .queue_pair_stop = dpaa_sec_queue_pair_stop, .queue_pair_count = dpaa_sec_queue_pair_count, - .session_get_size = dpaa_sec_session_get_size, - .session_configure = dpaa_sec_session_configure, - .session_clear = dpaa_sec_session_clear, - .qp_attach_session = dpaa_sec_qp_attach_sess, - .qp_detach_session = dpaa_sec_qp_detach_sess, + .sym_session_get_size = dpaa_sec_sym_session_get_size, + .sym_session_configure = dpaa_sec_sym_session_configure, + .sym_session_clear = dpaa_sec_sym_session_clear }; static const struct rte_security_capability * @@ -2246,18 +2216,20 @@ struct rte_security_ops dpaa_sec_security_ops = { static int dpaa_sec_uninit(struct rte_cryptodev *dev) { - struct dpaa_sec_dev_private *internals = dev->data->dev_private; + struct dpaa_sec_dev_private *internals; if (dev == NULL) return -ENODEV; + internals = dev->data->dev_private; rte_free(dev->security_ctx); + /* In case close has been called, internals->ctx_pool would be NULL */ rte_mempool_free(internals->ctx_pool); rte_free(internals); - PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n", - dev->data->name, rte_socket_id()); + DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u", + dev->data->name, rte_socket_id()); return 0; } @@ -2270,7 +2242,6 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) struct dpaa_sec_qp *qp; uint32_t i, flags; int ret; - char str[20]; PMD_INIT_FUNC_TRACE(); @@ -2283,7 +2254,11 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) RTE_CRYPTODEV_FF_HW_ACCELERATED | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | RTE_CRYPTODEV_FF_SECURITY | - RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; + RTE_CRYPTODEV_FF_IN_PLACE_SGL | + RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | + RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | + RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | + RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; internals = cryptodev->data->dev_private; internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS; @@ -2295,7 +2270,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) * RX function */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - PMD_INIT_LOG(DEBUG, "Device already init by primary process"); + DPAA_SEC_WARN("Device already init by primary process"); return 0; } @@ -2314,7 +2289,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) qp = &internals->qps[i]; ret = dpaa_sec_init_tx(&qp->outq); if (ret) { - PMD_INIT_LOG(ERR, "config tx of queue pair %d", i); + DPAA_SEC_ERR("config tx of queue pair %d", i); goto init_error; } } @@ -2325,28 +2300,16 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) /* create rx qman fq for sessions*/ ret = qman_create_fq(0, flags, &internals->inq[i]); if (unlikely(ret != 0)) { - PMD_INIT_LOG(ERR, "sec qman_create_fq failed"); + DPAA_SEC_ERR("sec qman_create_fq failed"); goto init_error; } } - sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id); - internals->ctx_pool = rte_mempool_create((const char *)str, - CTX_POOL_NUM_BUFS, - CTX_POOL_BUF_SIZE, - CTX_POOL_CACHE_SIZE, 0, - NULL, NULL, NULL, NULL, - SOCKET_ID_ANY, 0); - if (!internals->ctx_pool) { - RTE_LOG(ERR, PMD, "%s create failed\n", str); - goto init_error; - } - - PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name); + RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name); return 0; init_error: - PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name); + DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name); dpaa_sec_uninit(cryptodev); return -EFAULT; @@ -2445,5 +2408,12 @@ static struct rte_dpaa_driver rte_dpaa_sec_driver = { static struct cryptodev_driver dpaa_sec_crypto_drv; RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver); -RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver, +RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver, cryptodev_driver_id); + +RTE_INIT(dpaa_sec_init_log) +{ + dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa"); + if (dpaa_logtype_sec >= 0) + rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE); +} diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h index f45b36cb..ac6c00a6 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec.h +++ b/drivers/crypto/dpaa_sec/dpaa_sec.h @@ -7,6 +7,9 @@ #ifndef _DPAA_SEC_H_ #define _DPAA_SEC_H_ +#define CRYPTODEV_NAME_DPAA_SEC_PMD crypto_dpaa_sec +/**< NXP DPAA - SEC PMD device name */ + #define NUM_POOL_CHANNELS 4 #define DPAA_SEC_BURST 7 #define DPAA_SEC_ALG_UNSUPPORT (-1) @@ -23,6 +26,7 @@ #define CTX_POOL_NUM_BUFS 32000 #define CTX_POOL_BUF_SIZE sizeof(struct dpaa_sec_op_ctx) #define CTX_POOL_CACHE_SIZE 512 +#define RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS 2048 #define DIR_ENC 1 #define DIR_DEC 0 @@ -133,7 +137,7 @@ struct dpaa_sec_qp { int tx_errs; }; -#define RTE_DPAA_MAX_NB_SEC_QPS 1 +#define RTE_DPAA_MAX_NB_SEC_QPS 8 #define RTE_DPAA_MAX_RX_QUEUE RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS #define DPAA_MAX_DEQUEUE_NUM_FRAMES 63 @@ -182,10 +186,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 16, + .min = 1, .max = 16, - .increment = 0 + .increment = 1 }, + .iv_size = { 0 } }, } }, } }, @@ -202,10 +207,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 20, + .min = 1, .max = 20, - .increment = 0 + .increment = 1 }, + .iv_size = { 0 } }, } }, } }, @@ -222,10 +228,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 28, + .min = 1, .max = 28, - .increment = 0 + .increment = 1 }, + .iv_size = { 0 } }, } }, } }, @@ -242,10 +249,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 32, + .min = 1, .max = 32, - .increment = 0 + .increment = 1 }, + .iv_size = { 0 } }, } }, } }, @@ -262,10 +270,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 48, + .min = 1, .max = 48, - .increment = 0 + .increment = 1 }, + .iv_size = { 0 } }, } }, } }, @@ -282,10 +291,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 64, + .min = 1, .max = 64, - .increment = 0 + .increment = 1 }, + .iv_size = { 0 } }, } }, } }, diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_log.h b/drivers/crypto/dpaa_sec/dpaa_sec_log.h index 992a79f5..fb895a8b 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec_log.h +++ b/drivers/crypto/dpaa_sec/dpaa_sec_log.h @@ -1,44 +1,43 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright NXP 2017. + * Copyright 2017-2018 NXP * */ #ifndef _DPAA_SEC_LOG_H_ #define _DPAA_SEC_LOG_H_ -#define PMD_INIT_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args) - -#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_INIT -#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") -#else -#define PMD_INIT_FUNC_TRACE() do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_RX -#define PMD_RX_LOG(level, fmt, args...) \ - RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_RX_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_TX -#define PMD_TX_LOG(level, fmt, args...) \ - RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_TX_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER -#define PMD_DRV_LOG_RAW(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args) -#else -#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0) -#endif - -#define PMD_DRV_LOG(level, fmt, args...) \ - PMD_DRV_LOG_RAW(level, fmt "\n", ## args) +extern int dpaa_logtype_sec; + +#define DPAA_SEC_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, dpaa_logtype_sec, "dpaa_sec: " \ + fmt "\n", ##args) + +#define DPAA_SEC_DEBUG(fmt, args...) \ + rte_log(RTE_LOG_DEBUG, dpaa_logtype_sec, "dpaa_sec: %s(): " \ + fmt "\n", __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() DPAA_SEC_DEBUG(" >>") + +#define DPAA_SEC_INFO(fmt, args...) \ + DPAA_SEC_LOG(INFO, fmt, ## args) +#define DPAA_SEC_ERR(fmt, args...) \ + DPAA_SEC_LOG(ERR, fmt, ## args) +#define DPAA_SEC_WARN(fmt, args...) \ + DPAA_SEC_LOG(WARNING, fmt, ## args) + +/* DP Logs, toggled out at compile time if level lower than current level */ +#define DPAA_SEC_DP_LOG(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, fmt, ## args) + +#define DPAA_SEC_DP_DEBUG(fmt, args...) \ + DPAA_SEC_DP_LOG(DEBUG, fmt, ## args) +#define DPAA_SEC_DP_INFO(fmt, args...) \ + DPAA_SEC_DP_LOG(INFO, fmt, ## args) +#define DPAA_SEC_DP_WARN(fmt, args...) \ + DPAA_SEC_DP_LOG(WARNING, fmt, ## args) +#define DPAA_SEC_DP_ERR(fmt, args...) \ + DPAA_SEC_DP_LOG(ERR, fmt, ## args) #endif /* _DPAA_SEC_LOG_H_ */ diff --git a/drivers/crypto/dpaa_sec/meson.build b/drivers/crypto/dpaa_sec/meson.build new file mode 100644 index 00000000..8a570984 --- /dev/null +++ b/drivers/crypto/dpaa_sec/meson.build @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if host_machine.system() != 'linux' + build = false +endif + +deps += ['bus_dpaa', 'security'] +sources = files('dpaa_sec.c') + +allow_experimental_apis = true + +includes += include_directories('../dpaa2_sec/') diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c index 356621d4..239a1cf4 100644 --- a/drivers/crypto/kasumi/rte_kasumi_pmd.c +++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2018 Intel Corporation */ #include @@ -79,18 +79,20 @@ kasumi_set_session_parameters(struct kasumi_session *sess, break; case KASUMI_OP_NOT_SUPPORTED: default: - KASUMI_LOG_ERR("Unsupported operation chain order parameter"); + KASUMI_LOG(ERR, "Unsupported operation chain order parameter"); return -ENOTSUP; } if (cipher_xform) { /* Only KASUMI F8 supported */ - if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) + if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) { + KASUMI_LOG(ERR, "Unsupported cipher algorithm "); return -ENOTSUP; + } sess->cipher_iv_offset = cipher_xform->cipher.iv.offset; if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) { - KASUMI_LOG_ERR("Wrong IV length"); + KASUMI_LOG(ERR, "Wrong IV length"); return -EINVAL; } @@ -101,11 +103,13 @@ kasumi_set_session_parameters(struct kasumi_session *sess, if (auth_xform) { /* Only KASUMI F9 supported */ - if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) + if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) { + KASUMI_LOG(ERR, "Unsupported authentication"); return -ENOTSUP; + } if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) { - KASUMI_LOG_ERR("Wrong digest length"); + KASUMI_LOG(ERR, "Wrong digest length"); return -EINVAL; } @@ -131,7 +135,7 @@ kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op) if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { if (likely(op->sym->session != NULL)) sess = (struct kasumi_session *) - get_session_private_data( + get_sym_session_private_data( op->sym->session, cryptodev_driver_id); } else { @@ -153,8 +157,8 @@ kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op) sess = NULL; } op->sym->session = (struct rte_cryptodev_sym_session *)_sess; - set_session_private_data(op->sym->session, cryptodev_driver_id, - _sess_private_data); + set_sym_session_private_data(op->sym->session, + cryptodev_driver_id, _sess_private_data); } if (unlikely(sess == NULL)) @@ -213,7 +217,7 @@ process_kasumi_cipher_op_bit(struct rte_crypto_op *op, src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); if (op->sym->m_dst == NULL) { op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - KASUMI_LOG_ERR("bit-level in-place not supported\n"); + KASUMI_LOG(ERR, "bit-level in-place not supported"); return 0; } dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *); @@ -244,7 +248,7 @@ process_kasumi_hash_op(struct kasumi_qp *qp, struct rte_crypto_op **ops, /* Data must be byte aligned */ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - KASUMI_LOG_ERR("offset"); + KASUMI_LOG(ERR, "Invalid Offset"); break; } @@ -320,7 +324,7 @@ process_ops(struct rte_crypto_op **ops, struct kasumi_session *session, if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { memset(session, 0, sizeof(struct kasumi_session)); memset(ops[i]->sym->session, 0, - rte_cryptodev_get_header_session_size()); + rte_cryptodev_sym_get_header_session_size()); rte_mempool_put(qp->sess_mp, session); rte_mempool_put(qp->sess_mp, ops[i]->sym->session); ops[i]->sym->session = NULL; @@ -409,9 +413,9 @@ kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, (curr_c_op->sym->m_dst != NULL && !rte_pktmbuf_is_contiguous( curr_c_op->sym->m_dst))) { - KASUMI_LOG_ERR("PMD supports only contiguous mbufs, " + KASUMI_LOG(ERR, "PMD supports only contiguous mbufs, " "op (%p) provides noncontiguous mbuf as " - "source/destination buffer.\n", curr_c_op); + "source/destination buffer.", curr_c_op); curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; break; } @@ -531,7 +535,7 @@ cryptodev_kasumi_create(const char *name, dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); if (dev == NULL) { - KASUMI_LOG_ERR("failed to create cryptodev vdev"); + KASUMI_LOG(ERR, "failed to create cryptodev vdev"); goto init_error; } @@ -555,11 +559,10 @@ cryptodev_kasumi_create(const char *name, internals = dev->data->dev_private; internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; return 0; init_error: - KASUMI_LOG_ERR("driver %s: cryptodev_kasumi_create failed", + KASUMI_LOG(ERR, "driver %s: failed", init_params->name); cryptodev_kasumi_remove(vdev); @@ -573,8 +576,7 @@ cryptodev_kasumi_probe(struct rte_vdev_device *vdev) "", sizeof(struct kasumi_private), rte_socket_id(), - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }; const char *name; const char *input_args; @@ -617,7 +619,11 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd_drv); RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id="); -RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv, cryptodev_kasumi_pmd_drv, - cryptodev_driver_id); +RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv, + cryptodev_kasumi_pmd_drv.driver, cryptodev_driver_id); + +RTE_INIT(kasumi_init_log) +{ + kasumi_logtype_driver = rte_log_register("pmd.crypto.kasumi"); +} diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c index a388dbb6..9e4bf1b5 100644 --- a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c +++ b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2018 Intel Corporation */ #include @@ -126,7 +126,8 @@ kasumi_pmd_info_get(struct rte_cryptodev *dev, if (dev_info != NULL) { dev_info->driver_id = dev->driver_id; dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; - dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; dev_info->feature_flags = dev->feature_flags; dev_info->capabilities = kasumi_pmd_capabilities; } @@ -171,13 +172,13 @@ kasumi_pmd_qp_create_processed_ops_ring(struct kasumi_qp *qp, r = rte_ring_lookup(qp->name); if (r) { if (rte_ring_get_size(r) == ring_size) { - KASUMI_LOG_INFO("Reusing existing ring %s" + KASUMI_LOG(INFO, "Reusing existing ring %s" " for processed packets", qp->name); return r; } - KASUMI_LOG_ERR("Unable to reuse existing ring %s" + KASUMI_LOG(ERR, "Unable to reuse existing ring %s" " for processed packets", qp->name); return NULL; @@ -228,22 +229,6 @@ qp_setup_cleanup: return -1; } -/** Start queue pair */ -static int -kasumi_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair */ -static int -kasumi_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - /** Return the number of allocated queue pairs */ static uint32_t kasumi_pmd_qp_count(struct rte_cryptodev *dev) @@ -253,14 +238,14 @@ kasumi_pmd_qp_count(struct rte_cryptodev *dev) /** Returns the size of the KASUMI session structure */ static unsigned -kasumi_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +kasumi_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { return sizeof(struct kasumi_session); } /** Configure a KASUMI session from a crypto xform chain */ static int -kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, +kasumi_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -269,26 +254,26 @@ kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, int ret; if (unlikely(sess == NULL)) { - KASUMI_LOG_ERR("invalid session struct"); + KASUMI_LOG(ERR, "invalid session struct"); return -EINVAL; } if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( - "Couldn't get object from session mempool"); + KASUMI_LOG(ERR, + "Couldn't get object from session mempool"); return -ENOMEM; } ret = kasumi_set_session_parameters(sess_private_data, xform); if (ret != 0) { - KASUMI_LOG_ERR("failed configure session parameters"); + KASUMI_LOG(ERR, "failed configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); return 0; @@ -296,17 +281,17 @@ kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, /** Clear the memory of session so it doesn't leave key material behind */ static void -kasumi_pmd_session_clear(struct rte_cryptodev *dev, +kasumi_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); /* Zero out the whole structure */ if (sess_priv) { memset(sess_priv, 0, sizeof(struct kasumi_session)); struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -324,13 +309,11 @@ struct rte_cryptodev_ops kasumi_pmd_ops = { .queue_pair_setup = kasumi_pmd_qp_setup, .queue_pair_release = kasumi_pmd_qp_release, - .queue_pair_start = kasumi_pmd_qp_start, - .queue_pair_stop = kasumi_pmd_qp_stop, .queue_pair_count = kasumi_pmd_qp_count, - .session_get_size = kasumi_pmd_session_get_size, - .session_configure = kasumi_pmd_session_configure, - .session_clear = kasumi_pmd_session_clear + .sym_session_get_size = kasumi_pmd_sym_session_get_size, + .sym_session_configure = kasumi_pmd_sym_session_configure, + .sym_session_clear = kasumi_pmd_sym_session_clear }; struct rte_cryptodev_ops *rte_kasumi_pmd_ops = &kasumi_pmd_ops; diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h index a397bee6..488777ca 100644 --- a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h +++ b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2018 Intel Corporation */ #ifndef _RTE_KASUMI_PMD_PRIVATE_H_ @@ -10,25 +10,13 @@ #define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi /**< KASUMI PMD device name */ -#define KASUMI_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \ - __func__, __LINE__, ## args) +/** KASUMI PMD LOGTYPE DRIVER */ +int kasumi_logtype_driver; -#ifdef RTE_LIBRTE_KASUMI_DEBUG -#define KASUMI_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \ - __func__, __LINE__, ## args) - -#define KASUMI_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \ - __func__, __LINE__, ## args) -#else -#define KASUMI_LOG_INFO(fmt, args...) -#define KASUMI_LOG_DBG(fmt, args...) -#endif +#define KASUMI_LOG(level, fmt, ...) \ + rte_log(RTE_LOG_ ## level, kasumi_logtype_driver, \ + "%s() line %u: " fmt "\n", __func__, __LINE__, \ + ## __VA_ARGS__) #define KASUMI_DIGEST_LENGTH 4 @@ -36,8 +24,6 @@ struct kasumi_private { unsigned max_nb_queue_pairs; /**< Max number of queue pairs supported by device */ - unsigned max_nb_sessions; - /**< Max number of sessions supported by device */ }; /** KASUMI buffer queue pair */ diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build index 17041ad8..d64ca418 100644 --- a/drivers/crypto/meson.build +++ b/drivers/crypto/meson.build @@ -1,7 +1,9 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation -drivers = ['qat', 'null', 'openssl'] +drivers = ['ccp', 'dpaa_sec', 'dpaa2_sec', 'mvsam', + 'null', 'openssl', 'qat', 'virtio'] + std_deps = ['cryptodev'] # cryptodev pulls in all other needed deps config_flag_fmt = 'RTE_LIBRTE_@0@_PMD' driver_name_fmt = 'rte_pmd_@0@' diff --git a/drivers/crypto/mrvl/Makefile b/drivers/crypto/mrvl/Makefile deleted file mode 100644 index bc5c2270..00000000 --- a/drivers/crypto/mrvl/Makefile +++ /dev/null @@ -1,67 +0,0 @@ -# BSD LICENSE -# -# Copyright(c) 2017 Marvell International Ltd. -# Copyright(c) 2017 Semihalf. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -include $(RTE_SDK)/mk/rte.vars.mk - -ifneq ($(MAKECMDGOALS),clean) -ifneq ($(MAKECMDGOALS),config) -ifeq ($(LIBMUSDK_PATH),) -$(error "Please define LIBMUSDK_PATH environment variable") -endif -endif -endif - -# library name -LIB = librte_pmd_mrvl_crypto.a - -# build flags -CFLAGS += -O3 -CFLAGS += $(WERROR_FLAGS) -CFLAGS += -I$(LIBMUSDK_PATH)/include -CFLAGS += -DMVCONF_TYPES_PUBLIC -CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC - -# library version -LIBABIVER := 1 - -# versioning export map -EXPORT_MAP := rte_pmd_mrvl_version.map - -# external library dependencies -LDLIBS += -L$(LIBMUSDK_PATH)/lib -lmusdk -LDLIBS += -lrte_bus_vdev - -# library source files -SRCS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += rte_mrvl_pmd.c -SRCS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += rte_mrvl_pmd_ops.c - -include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/mrvl/rte_mrvl_compat.h b/drivers/crypto/mrvl/rte_mrvl_compat.h deleted file mode 100644 index 22cd1840..00000000 --- a/drivers/crypto/mrvl/rte_mrvl_compat.h +++ /dev/null @@ -1,51 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Marvell International Ltd. - * Copyright(c) 2017 Semihalf. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _RTE_MRVL_COMPAT_H_ -#define _RTE_MRVL_COMPAT_H_ - -/* Unluckily, container_of is defined by both DPDK and MUSDK, - * we'll declare only one version. - * - * Note that it is not used in this PMD anyway. - */ -#ifdef container_of -#undef container_of -#endif -#include "env/mv_autogen_comp_flags.h" -#include "drivers/mv_sam.h" -#include "drivers/mv_sam_cio.h" -#include "drivers/mv_sam_session.h" - -#endif /* _RTE_MRVL_COMPAT_H_ */ diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd.c b/drivers/crypto/mrvl/rte_mrvl_pmd.c deleted file mode 100644 index 31f3fe58..00000000 --- a/drivers/crypto/mrvl/rte_mrvl_pmd.c +++ /dev/null @@ -1,857 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Marvell International Ltd. - * Copyright(c) 2017 Semihalf. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "rte_mrvl_pmd_private.h" - -#define MRVL_MUSDK_DMA_MEMSIZE 41943040 - -static uint8_t cryptodev_driver_id; - -/** - * Flag if particular crypto algorithm is supported by PMD/MUSDK. - * - * The idea is to have Not Supported value as default (0). - * This way we need only to define proper map sizes, - * non-initialized entries will be by default not supported. - */ -enum algo_supported { - ALGO_NOT_SUPPORTED = 0, - ALGO_SUPPORTED = 1, -}; - -/** Map elements for cipher mapping.*/ -struct cipher_params_mapping { - enum algo_supported supported; /**< On/Off switch */ - enum sam_cipher_alg cipher_alg; /**< Cipher algorithm */ - enum sam_cipher_mode cipher_mode; /**< Cipher mode */ - unsigned int max_key_len; /**< Maximum key length (in bytes)*/ -} -/* We want to squeeze in multiple maps into the cache line. */ -__rte_aligned(32); - -/** Map elements for auth mapping.*/ -struct auth_params_mapping { - enum algo_supported supported; /**< On/off switch */ - enum sam_auth_alg auth_alg; /**< Auth algorithm */ -} -/* We want to squeeze in multiple maps into the cache line. */ -__rte_aligned(32); - -/** - * Map of supported cipher algorithms. - */ -static const -struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = { - [RTE_CRYPTO_CIPHER_3DES_CBC] = { - .supported = ALGO_SUPPORTED, - .cipher_alg = SAM_CIPHER_3DES, - .cipher_mode = SAM_CIPHER_CBC, - .max_key_len = BITS2BYTES(192) }, - [RTE_CRYPTO_CIPHER_3DES_CTR] = { - .supported = ALGO_SUPPORTED, - .cipher_alg = SAM_CIPHER_3DES, - .cipher_mode = SAM_CIPHER_CTR, - .max_key_len = BITS2BYTES(192) }, - [RTE_CRYPTO_CIPHER_3DES_ECB] = { - .supported = ALGO_SUPPORTED, - .cipher_alg = SAM_CIPHER_3DES, - .cipher_mode = SAM_CIPHER_ECB, - .max_key_len = BITS2BYTES(192) }, - [RTE_CRYPTO_CIPHER_AES_CBC] = { - .supported = ALGO_SUPPORTED, - .cipher_alg = SAM_CIPHER_AES, - .cipher_mode = SAM_CIPHER_CBC, - .max_key_len = BITS2BYTES(256) }, - [RTE_CRYPTO_CIPHER_AES_CTR] = { - .supported = ALGO_SUPPORTED, - .cipher_alg = SAM_CIPHER_AES, - .cipher_mode = SAM_CIPHER_CTR, - .max_key_len = BITS2BYTES(256) }, -}; - -/** - * Map of supported auth algorithms. - */ -static const -struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = { - [RTE_CRYPTO_AUTH_MD5_HMAC] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HMAC_MD5 }, - [RTE_CRYPTO_AUTH_MD5] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HASH_MD5 }, - [RTE_CRYPTO_AUTH_SHA1_HMAC] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HMAC_SHA1 }, - [RTE_CRYPTO_AUTH_SHA1] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HASH_SHA1 }, - [RTE_CRYPTO_AUTH_SHA224] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HASH_SHA2_224 }, - [RTE_CRYPTO_AUTH_SHA256_HMAC] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HMAC_SHA2_256 }, - [RTE_CRYPTO_AUTH_SHA256] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HASH_SHA2_256 }, - [RTE_CRYPTO_AUTH_SHA384_HMAC] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HMAC_SHA2_384 }, - [RTE_CRYPTO_AUTH_SHA384] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HASH_SHA2_384 }, - [RTE_CRYPTO_AUTH_SHA512_HMAC] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HMAC_SHA2_512 }, - [RTE_CRYPTO_AUTH_SHA512] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HASH_SHA2_512 }, - [RTE_CRYPTO_AUTH_AES_GMAC] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_AES_GMAC }, -}; - -/** - * Map of supported aead algorithms. - */ -static const -struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = { - [RTE_CRYPTO_AEAD_AES_GCM] = { - .supported = ALGO_SUPPORTED, - .cipher_alg = SAM_CIPHER_AES, - .cipher_mode = SAM_CIPHER_GCM, - .max_key_len = BITS2BYTES(256) }, -}; - -/* - *----------------------------------------------------------------------------- - * Forward declarations. - *----------------------------------------------------------------------------- - */ -static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev); - -/* - *----------------------------------------------------------------------------- - * Session Preparation. - *----------------------------------------------------------------------------- - */ - -/** - * Get xform chain order. - * - * @param xform Pointer to configuration structure chain for crypto operations. - * @returns Order of crypto operations. - */ -static enum mrvl_crypto_chain_order -mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform) -{ - /* Currently, Marvell supports max 2 operations in chain */ - if (xform->next != NULL && xform->next->next != NULL) - return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED; - - if (xform->next != NULL) { - if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) && - (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)) - return MRVL_CRYPTO_CHAIN_AUTH_CIPHER; - - if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) && - (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)) - return MRVL_CRYPTO_CHAIN_CIPHER_AUTH; - } else { - if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) - return MRVL_CRYPTO_CHAIN_AUTH_ONLY; - - if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) - return MRVL_CRYPTO_CHAIN_CIPHER_ONLY; - - if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) - return MRVL_CRYPTO_CHAIN_COMBINED; - } - return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED; -} - -/** - * Set session parameters for cipher part. - * - * @param sess Crypto session pointer. - * @param cipher_xform Pointer to configuration structure for cipher operations. - * @returns 0 in case of success, negative value otherwise. - */ -static int -mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess, - const struct rte_crypto_sym_xform *cipher_xform) -{ - /* Make sure we've got proper struct */ - if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) { - MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!"); - return -EINVAL; - } - - /* See if map data is present and valid */ - if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) || - (cipher_map[cipher_xform->cipher.algo].supported - != ALGO_SUPPORTED)) { - MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!"); - return -EINVAL; - } - - sess->cipher_iv_offset = cipher_xform->cipher.iv.offset; - - sess->sam_sess_params.dir = - (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? - SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT; - sess->sam_sess_params.cipher_alg = - cipher_map[cipher_xform->cipher.algo].cipher_alg; - sess->sam_sess_params.cipher_mode = - cipher_map[cipher_xform->cipher.algo].cipher_mode; - - /* Assume IV will be passed together with data. */ - sess->sam_sess_params.cipher_iv = NULL; - - /* Get max key length. */ - if (cipher_xform->cipher.key.length > - cipher_map[cipher_xform->cipher.algo].max_key_len) { - MRVL_CRYPTO_LOG_ERR("Wrong key length!"); - return -EINVAL; - } - - sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length; - sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data; - - return 0; -} - -/** - * Set session parameters for authentication part. - * - * @param sess Crypto session pointer. - * @param auth_xform Pointer to configuration structure for auth operations. - * @returns 0 in case of success, negative value otherwise. - */ -static int -mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess, - const struct rte_crypto_sym_xform *auth_xform) -{ - /* Make sure we've got proper struct */ - if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) { - MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!"); - return -EINVAL; - } - - /* See if map data is present and valid */ - if ((auth_xform->auth.algo > RTE_DIM(auth_map)) || - (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) { - MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!"); - return -EINVAL; - } - - sess->sam_sess_params.dir = - (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? - SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT; - sess->sam_sess_params.auth_alg = - auth_map[auth_xform->auth.algo].auth_alg; - sess->sam_sess_params.u.basic.auth_icv_len = - auth_xform->auth.digest_length; - /* auth_key must be NULL if auth algorithm does not use HMAC */ - sess->sam_sess_params.auth_key = auth_xform->auth.key.length ? - auth_xform->auth.key.data : NULL; - sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length; - - return 0; -} - -/** - * Set session parameters for aead part. - * - * @param sess Crypto session pointer. - * @param aead_xform Pointer to configuration structure for aead operations. - * @returns 0 in case of success, negative value otherwise. - */ -static int -mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess, - const struct rte_crypto_sym_xform *aead_xform) -{ - /* Make sure we've got proper struct */ - if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) { - MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!"); - return -EINVAL; - } - - /* See if map data is present and valid */ - if ((aead_xform->aead.algo > RTE_DIM(aead_map)) || - (aead_map[aead_xform->aead.algo].supported - != ALGO_SUPPORTED)) { - MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!"); - return -EINVAL; - } - - sess->sam_sess_params.dir = - (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? - SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT; - sess->sam_sess_params.cipher_alg = - aead_map[aead_xform->aead.algo].cipher_alg; - sess->sam_sess_params.cipher_mode = - aead_map[aead_xform->aead.algo].cipher_mode; - - /* Assume IV will be passed together with data. */ - sess->sam_sess_params.cipher_iv = NULL; - - /* Get max key length. */ - if (aead_xform->aead.key.length > - aead_map[aead_xform->aead.algo].max_key_len) { - MRVL_CRYPTO_LOG_ERR("Wrong key length!"); - return -EINVAL; - } - - sess->sam_sess_params.cipher_key = aead_xform->aead.key.data; - sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length; - - if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM) - sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM; - - sess->sam_sess_params.u.basic.auth_icv_len = - aead_xform->aead.digest_length; - - sess->sam_sess_params.u.basic.auth_aad_len = - aead_xform->aead.aad_length; - - return 0; -} - -/** - * Parse crypto transform chain and setup session parameters. - * - * @param dev Pointer to crypto device - * @param sess Poiner to crypto session - * @param xform Pointer to configuration structure chain for crypto operations. - * @returns 0 in case of success, negative value otherwise. - */ -int -mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess, - const struct rte_crypto_sym_xform *xform) -{ - const struct rte_crypto_sym_xform *cipher_xform = NULL; - const struct rte_crypto_sym_xform *auth_xform = NULL; - const struct rte_crypto_sym_xform *aead_xform = NULL; - - /* Filter out spurious/broken requests */ - if (xform == NULL) - return -EINVAL; - - sess->chain_order = mrvl_crypto_get_chain_order(xform); - switch (sess->chain_order) { - case MRVL_CRYPTO_CHAIN_CIPHER_AUTH: - cipher_xform = xform; - auth_xform = xform->next; - break; - case MRVL_CRYPTO_CHAIN_AUTH_CIPHER: - auth_xform = xform; - cipher_xform = xform->next; - break; - case MRVL_CRYPTO_CHAIN_CIPHER_ONLY: - cipher_xform = xform; - break; - case MRVL_CRYPTO_CHAIN_AUTH_ONLY: - auth_xform = xform; - break; - case MRVL_CRYPTO_CHAIN_COMBINED: - aead_xform = xform; - break; - default: - return -EINVAL; - } - - if ((cipher_xform != NULL) && - (mrvl_crypto_set_cipher_session_parameters( - sess, cipher_xform) < 0)) { - MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters"); - return -EINVAL; - } - - if ((auth_xform != NULL) && - (mrvl_crypto_set_auth_session_parameters( - sess, auth_xform) < 0)) { - MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters"); - return -EINVAL; - } - - if ((aead_xform != NULL) && - (mrvl_crypto_set_aead_session_parameters( - sess, aead_xform) < 0)) { - MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters"); - return -EINVAL; - } - - return 0; -} - -/* - *----------------------------------------------------------------------------- - * Process Operations - *----------------------------------------------------------------------------- - */ - -/** - * Prepare a single request. - * - * This function basically translates DPDK crypto request into one - * understandable by MUDSK's SAM. If this is a first request in a session, - * it starts the session. - * - * @param request Pointer to pre-allocated && reset request buffer [Out]. - * @param src_bd Pointer to pre-allocated source descriptor [Out]. - * @param dst_bd Pointer to pre-allocated destination descriptor [Out]. - * @param op Pointer to DPDK crypto operation struct [In]. - */ -static inline int -mrvl_request_prepare(struct sam_cio_op_params *request, - struct sam_buf_info *src_bd, - struct sam_buf_info *dst_bd, - struct rte_crypto_op *op) -{ - struct mrvl_crypto_session *sess; - struct rte_mbuf *dst_mbuf; - uint8_t *digest; - - if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) { - MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session " - "oriented requests, op (%p) is sessionless.", - op); - return -EINVAL; - } - - sess = (struct mrvl_crypto_session *)get_session_private_data( - op->sym->session, cryptodev_driver_id); - if (unlikely(sess == NULL)) { - MRVL_CRYPTO_LOG_ERR("Session was not created for this device"); - return -EINVAL; - } - - /* - * If application delivered us null dst buffer, it means it expects - * us to deliver the result in src buffer. - */ - dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src; - - request->sa = sess->sam_sess; - request->cookie = op; - - /* Single buffers only, sorry. */ - request->num_bufs = 1; - request->src = src_bd; - src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *); - src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src); - src_bd->len = rte_pktmbuf_data_len(op->sym->m_src); - - /* Empty source. */ - if (rte_pktmbuf_data_len(op->sym->m_src) == 0) { - /* EIP does not support 0 length buffers. */ - MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!"); - return -1; - } - - /* Empty destination. */ - if (rte_pktmbuf_data_len(dst_mbuf) == 0) { - /* Make dst buffer fit at least source data. */ - if (rte_pktmbuf_append(dst_mbuf, - rte_pktmbuf_data_len(op->sym->m_src)) == NULL) { - MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!"); - return -1; - } - } - - request->dst = dst_bd; - dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *); - dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf); - - /* - * We can use all available space in dst_mbuf, - * not only what's used currently. - */ - dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf); - - if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) { - request->cipher_len = op->sym->aead.data.length; - request->cipher_offset = op->sym->aead.data.offset; - request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *, - sess->cipher_iv_offset); - - request->auth_aad = op->sym->aead.aad.data; - request->auth_offset = request->cipher_offset; - request->auth_len = request->cipher_len; - } else { - request->cipher_len = op->sym->cipher.data.length; - request->cipher_offset = op->sym->cipher.data.offset; - request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *, - sess->cipher_iv_offset); - - request->auth_offset = op->sym->auth.data.offset; - request->auth_len = op->sym->auth.data.length; - } - - digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ? - op->sym->aead.digest.data : op->sym->auth.digest.data; - if (digest == NULL) { - /* No auth - no worry. */ - return 0; - } - - request->auth_icv_offset = request->auth_offset + request->auth_len; - - /* - * EIP supports only scenarios where ICV(digest buffer) is placed at - * auth_icv_offset. Any other placement means risking errors. - */ - if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) { - /* - * This should be the most common case anyway, - * EIP will overwrite DST buffer at auth_icv_offset. - */ - if (rte_pktmbuf_mtod_offset( - dst_mbuf, uint8_t *, - request->auth_icv_offset) == digest) { - return 0; - } - } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */ - /* - * EIP will look for digest at auth_icv_offset - * offset in SRC buffer. - */ - if (rte_pktmbuf_mtod_offset( - op->sym->m_src, uint8_t *, - request->auth_icv_offset) == digest) { - return 0; - } - } - - /* - * If we landed here it means that digest pointer is - * at different than expected place. - */ - return -1; -} - -/* - *----------------------------------------------------------------------------- - * PMD Framework handlers - *----------------------------------------------------------------------------- - */ - -/** - * Enqueue burst. - * - * @param queue_pair Pointer to queue pair. - * @param ops Pointer to ops requests array. - * @param nb_ops Number of elements in ops requests array. - * @returns Number of elements consumed from ops. - */ -static uint16_t -mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - uint16_t iter_ops = 0; - uint16_t to_enq = 0; - uint16_t consumed = 0; - int ret; - struct sam_cio_op_params requests[nb_ops]; - /* - * DPDK uses single fragment buffers, so we can KISS descriptors. - * SAM does not store bd pointers, so on-stack scope will be enough. - */ - struct sam_buf_info src_bd[nb_ops]; - struct sam_buf_info dst_bd[nb_ops]; - struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair; - - if (nb_ops == 0) - return 0; - - /* Prepare the burst. */ - memset(&requests, 0, sizeof(requests)); - - /* Iterate through */ - for (; iter_ops < nb_ops; ++iter_ops) { - if (mrvl_request_prepare(&requests[iter_ops], - &src_bd[iter_ops], - &dst_bd[iter_ops], - ops[iter_ops]) < 0) { - MRVL_CRYPTO_LOG_ERR( - "Error while parameters preparation!"); - qp->stats.enqueue_err_count++; - ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR; - - /* - * Number of handled ops is increased - * (even if the result of handling is error). - */ - ++consumed; - break; - } - - ops[iter_ops]->status = - RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - - /* Increase the number of ops to enqueue. */ - ++to_enq; - } /* for (; iter_ops < nb_ops;... */ - - if (to_enq > 0) { - /* Send the burst */ - ret = sam_cio_enq(qp->cio, requests, &to_enq); - consumed += to_enq; - if (ret < 0) { - /* - * Trust SAM that in this case returned value will be at - * some point correct (now it is returned unmodified). - */ - qp->stats.enqueue_err_count += to_enq; - for (iter_ops = 0; iter_ops < to_enq; ++iter_ops) - ops[iter_ops]->status = - RTE_CRYPTO_OP_STATUS_ERROR; - } - } - - qp->stats.enqueued_count += to_enq; - return consumed; -} - -/** - * Dequeue burst. - * - * @param queue_pair Pointer to queue pair. - * @param ops Pointer to ops requests array. - * @param nb_ops Number of elements in ops requests array. - * @returns Number of elements dequeued. - */ -static uint16_t -mrvl_crypto_pmd_dequeue_burst(void *queue_pair, - struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - int ret; - struct mrvl_crypto_qp *qp = queue_pair; - struct sam_cio *cio = qp->cio; - struct sam_cio_op_result results[nb_ops]; - uint16_t i; - - ret = sam_cio_deq(cio, results, &nb_ops); - if (ret < 0) { - /* Count all dequeued as error. */ - qp->stats.dequeue_err_count += nb_ops; - - /* But act as they were dequeued anyway*/ - qp->stats.dequeued_count += nb_ops; - - return 0; - } - - /* Unpack and check results. */ - for (i = 0; i < nb_ops; ++i) { - ops[i] = results[i].cookie; - - switch (results[i].status) { - case SAM_CIO_OK: - ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; - break; - case SAM_CIO_ERR_ICV: - MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV."); - ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; - break; - default: - MRVL_CRYPTO_LOG_DBG( - "CIO returned Error: %d", results[i].status); - ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR; - break; - } - } - - qp->stats.dequeued_count += nb_ops; - return nb_ops; -} - -/** - * Create a new crypto device. - * - * @param name Driver name. - * @param vdev Pointer to device structure. - * @param init_params Pointer to initialization parameters. - * @returns 0 in case of success, negative value otherwise. - */ -static int -cryptodev_mrvl_crypto_create(const char *name, - struct rte_vdev_device *vdev, - struct rte_cryptodev_pmd_init_params *init_params) -{ - struct rte_cryptodev *dev; - struct mrvl_crypto_private *internals; - struct sam_init_params sam_params; - int ret; - - dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); - if (dev == NULL) { - MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev"); - goto init_error; - } - - dev->driver_id = cryptodev_driver_id; - dev->dev_ops = rte_mrvl_crypto_pmd_ops; - - /* Register rx/tx burst functions for data path. */ - dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst; - dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst; - - dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | - RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | - RTE_CRYPTODEV_FF_HW_ACCELERATED; - - /* Set vector instructions mode supported */ - internals = dev->data->dev_private; - - internals->max_nb_qpairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; - - /* - * ret == -EEXIST is correct, it means DMA - * has been already initialized. - */ - ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE); - if (ret < 0) { - if (ret != -EEXIST) - return ret; - - MRVL_CRYPTO_LOG_INFO( - "DMA memory has been already initialized by a different driver."); - } - - sam_params.max_num_sessions = internals->max_nb_sessions; - - return sam_init(&sam_params); - -init_error: - MRVL_CRYPTO_LOG_ERR( - "driver %s: %s failed", init_params->name, __func__); - - cryptodev_mrvl_crypto_uninit(vdev); - return -EFAULT; -} - -/** - * Initialize the crypto device. - * - * @param vdev Pointer to device structure. - * @returns 0 in case of success, negative value otherwise. - */ -static int -cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev) -{ - struct rte_cryptodev_pmd_init_params init_params = { }; - const char *name, *args; - int ret; - - name = rte_vdev_device_name(vdev); - if (name == NULL) - return -EINVAL; - args = rte_vdev_device_args(vdev); - - init_params.private_data_size = sizeof(struct mrvl_crypto_private); - init_params.max_nb_queue_pairs = sam_get_num_inst() * SAM_HW_RING_NUM; - init_params.max_nb_sessions = - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS; - init_params.socket_id = rte_socket_id(); - - ret = rte_cryptodev_pmd_parse_input_args(&init_params, args); - if (ret) { - RTE_LOG(ERR, PMD, - "Failed to parse initialisation arguments[%s]\n", - args); - return -EINVAL; - } - - return cryptodev_mrvl_crypto_create(name, vdev, &init_params); -} - -/** - * Uninitialize the crypto device - * - * @param vdev Pointer to device structure. - * @returns 0 in case of success, negative value otherwise. - */ -static int -cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev) -{ - struct rte_cryptodev *cryptodev; - const char *name = rte_vdev_device_name(vdev); - - if (name == NULL) - return -EINVAL; - - RTE_LOG(INFO, PMD, - "Closing Marvell crypto device %s on numa socket %u\n", - name, rte_socket_id()); - - sam_deinit(); - - cryptodev = rte_cryptodev_pmd_get_named_dev(name); - if (cryptodev == NULL) - return -ENODEV; - - return rte_cryptodev_pmd_destroy(cryptodev); -} - -/** - * Basic driver handlers for use in the constructor. - */ -static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = { - .probe = cryptodev_mrvl_crypto_init, - .remove = cryptodev_mrvl_crypto_uninit -}; - -static struct cryptodev_driver mrvl_crypto_drv; - -/* Register the driver in constructor. */ -RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv); -RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD, - "max_nb_queue_pairs= " - "max_nb_sessions= " - "socket_id="); -RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv, - cryptodev_driver_id); diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c b/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c deleted file mode 100644 index a1de33ae..00000000 --- a/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c +++ /dev/null @@ -1,778 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Marvell International Ltd. - * Copyright(c) 2017 Semihalf. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include - -#include -#include -#include - -#include "rte_mrvl_pmd_private.h" - -/** - * Capabilities list to be used in reporting to DPDK. - */ -static const struct rte_cryptodev_capabilities - mrvl_crypto_pmd_capabilities[] = { - { /* MD5 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_MD5_HMAC, - .block_size = 64, - .key_size = { - .min = 1, - .max = 64, - .increment = 1 - }, - .digest_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - }, } - }, } - }, - { /* MD5 */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_MD5, - .block_size = 64, - .key_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .digest_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - }, } - }, } - }, - { /* SHA1 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, - .block_size = 64, - .key_size = { - .min = 1, - .max = 64, - .increment = 1 - }, - .digest_size = { - .min = 20, - .max = 20, - .increment = 0 - }, - }, } - }, } - }, - { /* SHA1 */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA1, - .block_size = 64, - .key_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .digest_size = { - .min = 20, - .max = 20, - .increment = 0 - }, - }, } - }, } - }, - { /* SHA224 */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA224, - .block_size = 64, - .key_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .digest_size = { - .min = 28, - .max = 28, - .increment = 0 - }, - }, } - }, } - }, - { /* SHA256 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, - .block_size = 64, - .key_size = { - .min = 1, - .max = 64, - .increment = 1 - }, - .digest_size = { - .min = 32, - .max = 32, - .increment = 0 - }, - }, } - }, } - }, - { /* SHA256 */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA256, - .block_size = 64, - .key_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .digest_size = { - .min = 32, - .max = 32, - .increment = 0 - }, - }, } - }, } - }, - { /* SHA384 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, - .block_size = 128, - .key_size = { - .min = 1, - .max = 128, - .increment = 1 - }, - .digest_size = { - .min = 48, - .max = 48, - .increment = 0 - }, - }, } - }, } - }, - { /* SHA384 */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA384, - .block_size = 128, - .key_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .digest_size = { - .min = 48, - .max = 48, - .increment = 0 - }, - }, } - }, } - }, - { /* SHA512 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, - .block_size = 128, - .key_size = { - .min = 1, - .max = 128, - .increment = 1 - }, - .digest_size = { - .min = 64, - .max = 64, - .increment = 0 - }, - }, } - }, } - }, - { /* SHA512 */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA512, - .block_size = 128, - .key_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .digest_size = { - .min = 64, - .max = 64, - .increment = 0 - }, - }, } - }, } - }, - { /* AES CBC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_AES_CBC, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .iv_size = { - .min = 16, - .max = 16, - .increment = 0 - } - }, } - }, } - }, - { /* AES CTR */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_AES_CTR, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .iv_size = { - .min = 16, - .max = 16, - .increment = 0 - } - }, } - }, } - }, - { /* AES GCM */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, - {.aead = { - .algo = RTE_CRYPTO_AEAD_AES_GCM, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .digest_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .aad_size = { - .min = 8, - .max = 12, - .increment = 4 - }, - .iv_size = { - .min = 12, - .max = 16, - .increment = 4 - } - }, } - }, } - }, - { /* AES GMAC (AUTH) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_AES_GMAC, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .digest_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .iv_size = { - .min = 8, - .max = 65532, - .increment = 4 - } - }, } - }, } - }, - { /* 3DES CBC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_3DES_CBC, - .block_size = 8, - .key_size = { - .min = 24, - .max = 24, - .increment = 0 - }, - .iv_size = { - .min = 8, - .max = 8, - .increment = 0 - } - }, } - }, } - }, - { /* 3DES CTR */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_3DES_CTR, - .block_size = 8, - .key_size = { - .min = 24, - .max = 24, - .increment = 0 - }, - .iv_size = { - .min = 8, - .max = 8, - .increment = 0 - } - }, } - }, } - }, - - RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() -}; - - -/** - * Configure device (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @param config Pointer to configuration structure. - * @returns 0. Always. - */ -static int -mrvl_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev, - __rte_unused struct rte_cryptodev_config *config) -{ - return 0; -} - -/** - * Start device (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @returns 0. Always. - */ -static int -mrvl_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev) -{ - return 0; -} - -/** - * Stop device (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @returns 0. Always. - */ -static void -mrvl_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev) -{ -} - -/** - * Get device statistics (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @param stats Pointer to statistics structure [out]. - */ -static void -mrvl_crypto_pmd_stats_get(struct rte_cryptodev *dev, - struct rte_cryptodev_stats *stats) -{ - int qp_id; - - for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { - struct mrvl_crypto_qp *qp = dev->data->queue_pairs[qp_id]; - - stats->enqueued_count += qp->stats.enqueued_count; - stats->dequeued_count += qp->stats.dequeued_count; - - stats->enqueue_err_count += qp->stats.enqueue_err_count; - stats->dequeue_err_count += qp->stats.dequeue_err_count; - } -} - -/** - * Reset device statistics (PMD ops callback). - * - * @param dev Pointer to the device structure. - */ -static void -mrvl_crypto_pmd_stats_reset(struct rte_cryptodev *dev) -{ - int qp_id; - - for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { - struct mrvl_crypto_qp *qp = dev->data->queue_pairs[qp_id]; - - memset(&qp->stats, 0, sizeof(qp->stats)); - } -} - -/** - * Get device info (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @param dev_info Pointer to the device info structure [out]. - */ -static void -mrvl_crypto_pmd_info_get(struct rte_cryptodev *dev, - struct rte_cryptodev_info *dev_info) -{ - struct mrvl_crypto_private *internals = dev->data->dev_private; - - if (dev_info != NULL) { - dev_info->driver_id = dev->driver_id; - dev_info->feature_flags = dev->feature_flags; - dev_info->capabilities = mrvl_crypto_pmd_capabilities; - dev_info->max_nb_queue_pairs = internals->max_nb_qpairs; - dev_info->sym.max_nb_sessions = internals->max_nb_sessions; - } -} - -/** - * Release queue pair (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @param qp_id ID of Queue Pair to release. - * @returns 0. Always. - */ -static int -mrvl_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) -{ - struct mrvl_crypto_qp *qp = - (struct mrvl_crypto_qp *)dev->data->queue_pairs[qp_id]; - - if (dev->data->queue_pairs[qp_id] != NULL) { - sam_cio_flush(qp->cio); - sam_cio_deinit(qp->cio); - rte_free(dev->data->queue_pairs[qp_id]); - dev->data->queue_pairs[qp_id] = NULL; - } - - return 0; -} - -/** - * Close device (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @returns 0. Always. - */ -static int -mrvl_crypto_pmd_close(struct rte_cryptodev *dev) -{ - int qp_id; - - for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) - mrvl_crypto_pmd_qp_release(dev, qp_id); - - return 0; -} - -/** - * Setup a queue pair (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @param qp_id ID of the Queue Pair. - * @param qp_conf Queue pair configuration (nb of descriptors). - * @param socket_id NUMA socket to allocate memory on. - * @returns 0 upon success, negative value otherwise. - */ -static int -mrvl_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, - const struct rte_cryptodev_qp_conf *qp_conf, - int socket_id, struct rte_mempool *session_pool) -{ - struct mrvl_crypto_qp *qp = NULL; - char match[RTE_CRYPTODEV_NAME_MAX_LEN]; - unsigned int n; - - /* Allocate the queue pair data structure. */ - qp = rte_zmalloc_socket("MRVL Crypto PMD Queue Pair", sizeof(*qp), - RTE_CACHE_LINE_SIZE, socket_id); - if (qp == NULL) - return -ENOMEM; - - /* Free old qp prior setup if needed. */ - if (dev->data->queue_pairs[qp_id] != NULL) - mrvl_crypto_pmd_qp_release(dev, qp_id); - - do { /* Error handling block */ - - /* - * This extra check is necessary due to a bug in - * crypto library. - */ - int num = sam_get_num_inst(); - if (num == 0) { - MRVL_CRYPTO_LOG_ERR("No crypto engines detected.\n"); - return -1; - } - - /* - * In case two crypto engines are enabled qps will - * be evenly spread among them. Even and odd qps will - * be handled by cio-0 and cio-1 respectively. qp-cio mapping - * will look as follows: - * - * qp: 0 1 2 3 - * cio-x:y: cio-0:0, cio-1:0, cio-0:1, cio-1:1 - * - * qp: 4 5 6 7 - * cio-x:y: cio-0:2, cio-1:2, cio-0:3, cio-1:3 - * - * In case just one engine is enabled mapping will look as - * follows: - * qp: 0 1 2 3 - * cio-x:y: cio-0:0, cio-0:1, cio-0:2, cio-0:3 - */ - n = snprintf(match, sizeof(match), "cio-%u:%u", - qp_id % num, qp_id / num); - - if (n >= sizeof(match)) - break; - - qp->cio_params.match = match; - qp->cio_params.size = qp_conf->nb_descriptors; - - if (sam_cio_init(&qp->cio_params, &qp->cio) < 0) - break; - - qp->sess_mp = session_pool; - - memset(&qp->stats, 0, sizeof(qp->stats)); - dev->data->queue_pairs[qp_id] = qp; - return 0; - } while (0); - - rte_free(qp); - return -1; -} - -/** Start queue pair (PMD ops callback) - not supported. - * - * @param dev Pointer to the device structure. - * @param qp_id ID of the Queue Pair. - * @returns -ENOTSUP. Always. - */ -static int -mrvl_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair (PMD ops callback) - not supported. - * - * @param dev Pointer to the device structure. - * @param qp_id ID of the Queue Pair. - * @returns -ENOTSUP. Always. - */ -static int -mrvl_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Return the number of allocated queue pairs (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @returns Number of allocated queue pairs. - */ -static uint32_t -mrvl_crypto_pmd_qp_count(struct rte_cryptodev *dev) -{ - return dev->data->nb_queue_pairs; -} - -/** Returns the size of the session structure (PMD ops callback). - * - * @param dev Pointer to the device structure [Unused]. - * @returns Size of Marvell crypto session. - */ -static unsigned -mrvl_crypto_pmd_session_get_size(__rte_unused struct rte_cryptodev *dev) -{ - return sizeof(struct mrvl_crypto_session); -} - -/** Configure the session from a crypto xform chain (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @param xform Pointer to the crytpo configuration structure. - * @param sess Pointer to the empty session structure. - * @returns 0 upon success, negative value otherwise. - */ -static int -mrvl_crypto_pmd_session_configure(__rte_unused struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, - struct rte_cryptodev_sym_session *sess, - struct rte_mempool *mp) -{ - struct mrvl_crypto_session *mrvl_sess; - void *sess_private_data; - int ret; - - if (sess == NULL) { - MRVL_CRYPTO_LOG_ERR("Invalid session struct."); - return -EINVAL; - } - - if (rte_mempool_get(mp, &sess_private_data)) { - CDEV_LOG_ERR("Couldn't get object from session mempool."); - return -ENOMEM; - } - - ret = mrvl_crypto_set_session_parameters(sess_private_data, xform); - if (ret != 0) { - MRVL_CRYPTO_LOG_ERR("Failed to configure session parameters."); - - /* Return session to mempool */ - rte_mempool_put(mp, sess_private_data); - return ret; - } - - set_session_private_data(sess, dev->driver_id, sess_private_data); - - mrvl_sess = (struct mrvl_crypto_session *)sess_private_data; - if (sam_session_create(&mrvl_sess->sam_sess_params, - &mrvl_sess->sam_sess) < 0) { - MRVL_CRYPTO_LOG_DBG("Failed to create session!"); - return -EIO; - } - - return 0; -} - -/** - * Clear the memory of session so it doesn't leave key material behind. - * - * @param dev Pointer to the device structure. - * @returns 0. Always. - */ -static void -mrvl_crypto_pmd_session_clear(struct rte_cryptodev *dev, - struct rte_cryptodev_sym_session *sess) -{ - - uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); - - /* Zero out the whole structure */ - if (sess_priv) { - struct mrvl_crypto_session *mrvl_sess = - (struct mrvl_crypto_session *)sess_priv; - - if (mrvl_sess->sam_sess && - sam_session_destroy(mrvl_sess->sam_sess) < 0) { - MRVL_CRYPTO_LOG_INFO("Error while destroying session!"); - } - - memset(sess, 0, sizeof(struct mrvl_crypto_session)); - struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); - rte_mempool_put(sess_mp, sess_priv); - } -} - -/** - * PMD handlers for crypto ops. - */ -static struct rte_cryptodev_ops mrvl_crypto_pmd_ops = { - .dev_configure = mrvl_crypto_pmd_config, - .dev_start = mrvl_crypto_pmd_start, - .dev_stop = mrvl_crypto_pmd_stop, - .dev_close = mrvl_crypto_pmd_close, - - .dev_infos_get = mrvl_crypto_pmd_info_get, - - .stats_get = mrvl_crypto_pmd_stats_get, - .stats_reset = mrvl_crypto_pmd_stats_reset, - - .queue_pair_setup = mrvl_crypto_pmd_qp_setup, - .queue_pair_release = mrvl_crypto_pmd_qp_release, - .queue_pair_start = mrvl_crypto_pmd_qp_start, - .queue_pair_stop = mrvl_crypto_pmd_qp_stop, - .queue_pair_count = mrvl_crypto_pmd_qp_count, - - .session_get_size = mrvl_crypto_pmd_session_get_size, - .session_configure = mrvl_crypto_pmd_session_configure, - .session_clear = mrvl_crypto_pmd_session_clear -}; - -struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops = &mrvl_crypto_pmd_ops; diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd_private.h b/drivers/crypto/mrvl/rte_mrvl_pmd_private.h deleted file mode 100644 index 923faaf9..00000000 --- a/drivers/crypto/mrvl/rte_mrvl_pmd_private.h +++ /dev/null @@ -1,123 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Marvell International Ltd. - * Copyright(c) 2017 Semihalf. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _RTE_MRVL_PMD_PRIVATE_H_ -#define _RTE_MRVL_PMD_PRIVATE_H_ - -#include "rte_mrvl_compat.h" - -#define CRYPTODEV_NAME_MRVL_PMD crypto_mrvl -/**< Marvell PMD device name */ - -#define MRVL_CRYPTO_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \ - __func__, __LINE__, ## args) - -#ifdef RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG -#define MRVL_CRYPTO_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \ - __func__, __LINE__, ## args) - -#define MRVL_CRYPTO_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \ - __func__, __LINE__, ## args) - -#else -#define MRVL_CRYPTO_LOG_INFO(fmt, args...) -#define MRVL_CRYPTO_LOG_DBG(fmt, args...) -#endif - -/** - * Handy bits->bytes conversion macro. - */ -#define BITS2BYTES(x) ((x) >> 3) - -/** The operation order mode enumerator. */ -enum mrvl_crypto_chain_order { - MRVL_CRYPTO_CHAIN_CIPHER_ONLY, - MRVL_CRYPTO_CHAIN_AUTH_ONLY, - MRVL_CRYPTO_CHAIN_CIPHER_AUTH, - MRVL_CRYPTO_CHAIN_AUTH_CIPHER, - MRVL_CRYPTO_CHAIN_COMBINED, - MRVL_CRYPTO_CHAIN_NOT_SUPPORTED, -}; - -/** Private data structure for each crypto device. */ -struct mrvl_crypto_private { - unsigned int max_nb_qpairs; /**< Max number of queue pairs */ - unsigned int max_nb_sessions; /**< Max number of sessions */ -}; - -/** MRVL crypto queue pair structure. */ -struct mrvl_crypto_qp { - /** SAM CIO (MUSDK Queue Pair equivalent).*/ - struct sam_cio *cio; - - /** Session Mempool. */ - struct rte_mempool *sess_mp; - - /** Queue pair statistics. */ - struct rte_cryptodev_stats stats; - - /** CIO initialization parameters.*/ - struct sam_cio_params cio_params; -} __rte_cache_aligned; - -/** MRVL crypto private session structure. */ -struct mrvl_crypto_session { - /** Crypto operations chain order. */ - enum mrvl_crypto_chain_order chain_order; - - /** Session initialization parameters. */ - struct sam_session_params sam_sess_params; - - /** SAM session pointer. */ - struct sam_sa *sam_sess; - - /** Cipher IV offset. */ - uint16_t cipher_iv_offset; -} __rte_cache_aligned; - -/** Set and validate MRVL crypto session parameters */ -extern int -mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess, - const struct rte_crypto_sym_xform *xform); - -/** device specific operations function pointer structure */ -extern struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops; - -#endif /* _RTE_MRVL_PMD_PRIVATE_H_ */ diff --git a/drivers/crypto/mrvl/rte_pmd_mrvl_version.map b/drivers/crypto/mrvl/rte_pmd_mrvl_version.map deleted file mode 100644 index a7530317..00000000 --- a/drivers/crypto/mrvl/rte_pmd_mrvl_version.map +++ /dev/null @@ -1,3 +0,0 @@ -DPDK_17.11 { - local: *; -}; diff --git a/drivers/crypto/mvsam/Makefile b/drivers/crypto/mvsam/Makefile new file mode 100644 index 00000000..c3dc72c1 --- /dev/null +++ b/drivers/crypto/mvsam/Makefile @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Marvell International Ltd. +# Copyright(c) 2017 Semihalf. +# All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +ifneq ($(MAKECMDGOALS),clean) +ifneq ($(MAKECMDGOALS),config) +ifeq ($(LIBMUSDK_PATH),) +$(error "Please define LIBMUSDK_PATH environment variable") +endif +endif +endif + +# library name +LIB = librte_pmd_mvsam_crypto.a + +# build flags +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -I$(LIBMUSDK_PATH)/include +CFLAGS += -DMVCONF_TYPES_PUBLIC +CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC + +# library version +LIBABIVER := 1 + +# versioning export map +EXPORT_MAP := rte_pmd_mvsam_version.map + +# external library dependencies +LDLIBS += -L$(LIBMUSDK_PATH)/lib -lmusdk +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool +LDLIBS += -lrte_cryptodev +LDLIBS += -lrte_bus_vdev + +# library source files +SRCS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += rte_mrvl_pmd.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += rte_mrvl_pmd_ops.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/mvsam/meson.build b/drivers/crypto/mvsam/meson.build new file mode 100644 index 00000000..3c8ea3cf --- /dev/null +++ b/drivers/crypto/mvsam/meson.build @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Marvell International Ltd. +# Copyright(c) 2018 Semihalf. +# All rights reserved. + +path = get_option('lib_musdk_dir') +lib_dir = path + '/lib' +inc_dir = path + '/include' + +lib = cc.find_library('libmusdk', dirs: [lib_dir], required: false) +if not lib.found() + build = false +else + ext_deps += lib + includes += include_directories(inc_dir) + cflags += ['-DMVCONF_TYPES_PUBLIC', '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC'] +endif + +sources = files('rte_mrvl_pmd.c', 'rte_mrvl_pmd_ops.c') + +deps += ['bus_vdev'] diff --git a/drivers/crypto/mvsam/rte_mrvl_compat.h b/drivers/crypto/mvsam/rte_mrvl_compat.h new file mode 100644 index 00000000..4ab28d39 --- /dev/null +++ b/drivers/crypto/mvsam/rte_mrvl_compat.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#ifndef _RTE_MRVL_COMPAT_H_ +#define _RTE_MRVL_COMPAT_H_ + +/* Unluckily, container_of is defined by both DPDK and MUSDK, + * we'll declare only one version. + * + * Note that it is not used in this PMD anyway. + */ +#ifdef container_of +#undef container_of +#endif +#include "env/mv_autogen_comp_flags.h" +#include "drivers/mv_sam.h" +#include "drivers/mv_sam_cio.h" +#include "drivers/mv_sam_session.h" + +#endif /* _RTE_MRVL_COMPAT_H_ */ diff --git a/drivers/crypto/mvsam/rte_mrvl_pmd.c b/drivers/crypto/mvsam/rte_mrvl_pmd.c new file mode 100644 index 00000000..73eff757 --- /dev/null +++ b/drivers/crypto/mvsam/rte_mrvl_pmd.c @@ -0,0 +1,937 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "rte_mrvl_pmd_private.h" + +#define MRVL_MUSDK_DMA_MEMSIZE 41943040 + +#define MRVL_PMD_MAX_NB_SESS_ARG ("max_nb_sessions") +#define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS 2048 + +static uint8_t cryptodev_driver_id; + +struct mrvl_pmd_init_params { + struct rte_cryptodev_pmd_init_params common; + uint32_t max_nb_sessions; +}; + +const char *mrvl_pmd_valid_params[] = { + RTE_CRYPTODEV_PMD_NAME_ARG, + RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG, + RTE_CRYPTODEV_PMD_SOCKET_ID_ARG, + MRVL_PMD_MAX_NB_SESS_ARG +}; + +/** + * Flag if particular crypto algorithm is supported by PMD/MUSDK. + * + * The idea is to have Not Supported value as default (0). + * This way we need only to define proper map sizes, + * non-initialized entries will be by default not supported. + */ +enum algo_supported { + ALGO_NOT_SUPPORTED = 0, + ALGO_SUPPORTED = 1, +}; + +/** Map elements for cipher mapping.*/ +struct cipher_params_mapping { + enum algo_supported supported; /**< On/Off switch */ + enum sam_cipher_alg cipher_alg; /**< Cipher algorithm */ + enum sam_cipher_mode cipher_mode; /**< Cipher mode */ + unsigned int max_key_len; /**< Maximum key length (in bytes)*/ +} +/* We want to squeeze in multiple maps into the cache line. */ +__rte_aligned(32); + +/** Map elements for auth mapping.*/ +struct auth_params_mapping { + enum algo_supported supported; /**< On/off switch */ + enum sam_auth_alg auth_alg; /**< Auth algorithm */ +} +/* We want to squeeze in multiple maps into the cache line. */ +__rte_aligned(32); + +/** + * Map of supported cipher algorithms. + */ +static const +struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = { + [RTE_CRYPTO_CIPHER_3DES_CBC] = { + .supported = ALGO_SUPPORTED, + .cipher_alg = SAM_CIPHER_3DES, + .cipher_mode = SAM_CIPHER_CBC, + .max_key_len = BITS2BYTES(192) }, + [RTE_CRYPTO_CIPHER_3DES_CTR] = { + .supported = ALGO_SUPPORTED, + .cipher_alg = SAM_CIPHER_3DES, + .cipher_mode = SAM_CIPHER_CTR, + .max_key_len = BITS2BYTES(192) }, + [RTE_CRYPTO_CIPHER_3DES_ECB] = { + .supported = ALGO_SUPPORTED, + .cipher_alg = SAM_CIPHER_3DES, + .cipher_mode = SAM_CIPHER_ECB, + .max_key_len = BITS2BYTES(192) }, + [RTE_CRYPTO_CIPHER_AES_CBC] = { + .supported = ALGO_SUPPORTED, + .cipher_alg = SAM_CIPHER_AES, + .cipher_mode = SAM_CIPHER_CBC, + .max_key_len = BITS2BYTES(256) }, + [RTE_CRYPTO_CIPHER_AES_CTR] = { + .supported = ALGO_SUPPORTED, + .cipher_alg = SAM_CIPHER_AES, + .cipher_mode = SAM_CIPHER_CTR, + .max_key_len = BITS2BYTES(256) }, +}; + +/** + * Map of supported auth algorithms. + */ +static const +struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = { + [RTE_CRYPTO_AUTH_MD5_HMAC] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HMAC_MD5 }, + [RTE_CRYPTO_AUTH_MD5] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HASH_MD5 }, + [RTE_CRYPTO_AUTH_SHA1_HMAC] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HMAC_SHA1 }, + [RTE_CRYPTO_AUTH_SHA1] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HASH_SHA1 }, + [RTE_CRYPTO_AUTH_SHA224] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HASH_SHA2_224 }, + [RTE_CRYPTO_AUTH_SHA256_HMAC] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HMAC_SHA2_256 }, + [RTE_CRYPTO_AUTH_SHA256] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HASH_SHA2_256 }, + [RTE_CRYPTO_AUTH_SHA384_HMAC] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HMAC_SHA2_384 }, + [RTE_CRYPTO_AUTH_SHA384] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HASH_SHA2_384 }, + [RTE_CRYPTO_AUTH_SHA512_HMAC] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HMAC_SHA2_512 }, + [RTE_CRYPTO_AUTH_SHA512] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HASH_SHA2_512 }, + [RTE_CRYPTO_AUTH_AES_GMAC] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_AES_GMAC }, +}; + +/** + * Map of supported aead algorithms. + */ +static const +struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = { + [RTE_CRYPTO_AEAD_AES_GCM] = { + .supported = ALGO_SUPPORTED, + .cipher_alg = SAM_CIPHER_AES, + .cipher_mode = SAM_CIPHER_GCM, + .max_key_len = BITS2BYTES(256) }, +}; + +/* + *----------------------------------------------------------------------------- + * Forward declarations. + *----------------------------------------------------------------------------- + */ +static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev); + +/* + *----------------------------------------------------------------------------- + * Session Preparation. + *----------------------------------------------------------------------------- + */ + +/** + * Get xform chain order. + * + * @param xform Pointer to configuration structure chain for crypto operations. + * @returns Order of crypto operations. + */ +static enum mrvl_crypto_chain_order +mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform) +{ + /* Currently, Marvell supports max 2 operations in chain */ + if (xform->next != NULL && xform->next->next != NULL) + return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED; + + if (xform->next != NULL) { + if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) && + (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)) + return MRVL_CRYPTO_CHAIN_AUTH_CIPHER; + + if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) && + (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)) + return MRVL_CRYPTO_CHAIN_CIPHER_AUTH; + } else { + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) + return MRVL_CRYPTO_CHAIN_AUTH_ONLY; + + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) + return MRVL_CRYPTO_CHAIN_CIPHER_ONLY; + + if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) + return MRVL_CRYPTO_CHAIN_COMBINED; + } + return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED; +} + +/** + * Set session parameters for cipher part. + * + * @param sess Crypto session pointer. + * @param cipher_xform Pointer to configuration structure for cipher operations. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess, + const struct rte_crypto_sym_xform *cipher_xform) +{ + /* Make sure we've got proper struct */ + if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) { + MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!"); + return -EINVAL; + } + + /* See if map data is present and valid */ + if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) || + (cipher_map[cipher_xform->cipher.algo].supported + != ALGO_SUPPORTED)) { + MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!"); + return -EINVAL; + } + + sess->cipher_iv_offset = cipher_xform->cipher.iv.offset; + + sess->sam_sess_params.dir = + (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? + SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT; + sess->sam_sess_params.cipher_alg = + cipher_map[cipher_xform->cipher.algo].cipher_alg; + sess->sam_sess_params.cipher_mode = + cipher_map[cipher_xform->cipher.algo].cipher_mode; + + /* Assume IV will be passed together with data. */ + sess->sam_sess_params.cipher_iv = NULL; + + /* Get max key length. */ + if (cipher_xform->cipher.key.length > + cipher_map[cipher_xform->cipher.algo].max_key_len) { + MRVL_CRYPTO_LOG_ERR("Wrong key length!"); + return -EINVAL; + } + + sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length; + sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data; + + return 0; +} + +/** + * Set session parameters for authentication part. + * + * @param sess Crypto session pointer. + * @param auth_xform Pointer to configuration structure for auth operations. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess, + const struct rte_crypto_sym_xform *auth_xform) +{ + /* Make sure we've got proper struct */ + if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) { + MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!"); + return -EINVAL; + } + + /* See if map data is present and valid */ + if ((auth_xform->auth.algo > RTE_DIM(auth_map)) || + (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) { + MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!"); + return -EINVAL; + } + + sess->sam_sess_params.dir = + (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? + SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT; + sess->sam_sess_params.auth_alg = + auth_map[auth_xform->auth.algo].auth_alg; + sess->sam_sess_params.u.basic.auth_icv_len = + auth_xform->auth.digest_length; + /* auth_key must be NULL if auth algorithm does not use HMAC */ + sess->sam_sess_params.auth_key = auth_xform->auth.key.length ? + auth_xform->auth.key.data : NULL; + sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length; + + return 0; +} + +/** + * Set session parameters for aead part. + * + * @param sess Crypto session pointer. + * @param aead_xform Pointer to configuration structure for aead operations. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess, + const struct rte_crypto_sym_xform *aead_xform) +{ + /* Make sure we've got proper struct */ + if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) { + MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!"); + return -EINVAL; + } + + /* See if map data is present and valid */ + if ((aead_xform->aead.algo > RTE_DIM(aead_map)) || + (aead_map[aead_xform->aead.algo].supported + != ALGO_SUPPORTED)) { + MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!"); + return -EINVAL; + } + + sess->sam_sess_params.dir = + (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? + SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT; + sess->sam_sess_params.cipher_alg = + aead_map[aead_xform->aead.algo].cipher_alg; + sess->sam_sess_params.cipher_mode = + aead_map[aead_xform->aead.algo].cipher_mode; + + /* Assume IV will be passed together with data. */ + sess->sam_sess_params.cipher_iv = NULL; + + /* Get max key length. */ + if (aead_xform->aead.key.length > + aead_map[aead_xform->aead.algo].max_key_len) { + MRVL_CRYPTO_LOG_ERR("Wrong key length!"); + return -EINVAL; + } + + sess->sam_sess_params.cipher_key = aead_xform->aead.key.data; + sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length; + + if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM) + sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM; + + sess->sam_sess_params.u.basic.auth_icv_len = + aead_xform->aead.digest_length; + + sess->sam_sess_params.u.basic.auth_aad_len = + aead_xform->aead.aad_length; + + return 0; +} + +/** + * Parse crypto transform chain and setup session parameters. + * + * @param dev Pointer to crypto device + * @param sess Poiner to crypto session + * @param xform Pointer to configuration structure chain for crypto operations. + * @returns 0 in case of success, negative value otherwise. + */ +int +mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess, + const struct rte_crypto_sym_xform *xform) +{ + const struct rte_crypto_sym_xform *cipher_xform = NULL; + const struct rte_crypto_sym_xform *auth_xform = NULL; + const struct rte_crypto_sym_xform *aead_xform = NULL; + + /* Filter out spurious/broken requests */ + if (xform == NULL) + return -EINVAL; + + sess->chain_order = mrvl_crypto_get_chain_order(xform); + switch (sess->chain_order) { + case MRVL_CRYPTO_CHAIN_CIPHER_AUTH: + cipher_xform = xform; + auth_xform = xform->next; + break; + case MRVL_CRYPTO_CHAIN_AUTH_CIPHER: + auth_xform = xform; + cipher_xform = xform->next; + break; + case MRVL_CRYPTO_CHAIN_CIPHER_ONLY: + cipher_xform = xform; + break; + case MRVL_CRYPTO_CHAIN_AUTH_ONLY: + auth_xform = xform; + break; + case MRVL_CRYPTO_CHAIN_COMBINED: + aead_xform = xform; + break; + default: + return -EINVAL; + } + + if ((cipher_xform != NULL) && + (mrvl_crypto_set_cipher_session_parameters( + sess, cipher_xform) < 0)) { + MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters"); + return -EINVAL; + } + + if ((auth_xform != NULL) && + (mrvl_crypto_set_auth_session_parameters( + sess, auth_xform) < 0)) { + MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters"); + return -EINVAL; + } + + if ((aead_xform != NULL) && + (mrvl_crypto_set_aead_session_parameters( + sess, aead_xform) < 0)) { + MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters"); + return -EINVAL; + } + + return 0; +} + +/* + *----------------------------------------------------------------------------- + * Process Operations + *----------------------------------------------------------------------------- + */ + +/** + * Prepare a single request. + * + * This function basically translates DPDK crypto request into one + * understandable by MUDSK's SAM. If this is a first request in a session, + * it starts the session. + * + * @param request Pointer to pre-allocated && reset request buffer [Out]. + * @param src_bd Pointer to pre-allocated source descriptor [Out]. + * @param dst_bd Pointer to pre-allocated destination descriptor [Out]. + * @param op Pointer to DPDK crypto operation struct [In]. + */ +static inline int +mrvl_request_prepare(struct sam_cio_op_params *request, + struct sam_buf_info *src_bd, + struct sam_buf_info *dst_bd, + struct rte_crypto_op *op) +{ + struct mrvl_crypto_session *sess; + struct rte_mbuf *dst_mbuf; + uint8_t *digest; + + if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) { + MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session " + "oriented requests, op (%p) is sessionless.", + op); + return -EINVAL; + } + + sess = (struct mrvl_crypto_session *)get_sym_session_private_data( + op->sym->session, cryptodev_driver_id); + if (unlikely(sess == NULL)) { + MRVL_CRYPTO_LOG_ERR("Session was not created for this device"); + return -EINVAL; + } + + /* + * If application delivered us null dst buffer, it means it expects + * us to deliver the result in src buffer. + */ + dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src; + + request->sa = sess->sam_sess; + request->cookie = op; + + /* Single buffers only, sorry. */ + request->num_bufs = 1; + request->src = src_bd; + src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *); + src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src); + src_bd->len = rte_pktmbuf_data_len(op->sym->m_src); + + /* Empty source. */ + if (rte_pktmbuf_data_len(op->sym->m_src) == 0) { + /* EIP does not support 0 length buffers. */ + MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!"); + return -1; + } + + /* Empty destination. */ + if (rte_pktmbuf_data_len(dst_mbuf) == 0) { + /* Make dst buffer fit at least source data. */ + if (rte_pktmbuf_append(dst_mbuf, + rte_pktmbuf_data_len(op->sym->m_src)) == NULL) { + MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!"); + return -1; + } + } + + request->dst = dst_bd; + dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *); + dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf); + + /* + * We can use all available space in dst_mbuf, + * not only what's used currently. + */ + dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf); + + if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) { + request->cipher_len = op->sym->aead.data.length; + request->cipher_offset = op->sym->aead.data.offset; + request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *, + sess->cipher_iv_offset); + + request->auth_aad = op->sym->aead.aad.data; + request->auth_offset = request->cipher_offset; + request->auth_len = request->cipher_len; + } else { + request->cipher_len = op->sym->cipher.data.length; + request->cipher_offset = op->sym->cipher.data.offset; + request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *, + sess->cipher_iv_offset); + + request->auth_offset = op->sym->auth.data.offset; + request->auth_len = op->sym->auth.data.length; + } + + digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ? + op->sym->aead.digest.data : op->sym->auth.digest.data; + if (digest == NULL) { + /* No auth - no worry. */ + return 0; + } + + request->auth_icv_offset = request->auth_offset + request->auth_len; + + /* + * EIP supports only scenarios where ICV(digest buffer) is placed at + * auth_icv_offset. Any other placement means risking errors. + */ + if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) { + /* + * This should be the most common case anyway, + * EIP will overwrite DST buffer at auth_icv_offset. + */ + if (rte_pktmbuf_mtod_offset( + dst_mbuf, uint8_t *, + request->auth_icv_offset) == digest) { + return 0; + } + } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */ + /* + * EIP will look for digest at auth_icv_offset + * offset in SRC buffer. + */ + if (rte_pktmbuf_mtod_offset( + op->sym->m_src, uint8_t *, + request->auth_icv_offset) == digest) { + return 0; + } + } + + /* + * If we landed here it means that digest pointer is + * at different than expected place. + */ + return -1; +} + +/* + *----------------------------------------------------------------------------- + * PMD Framework handlers + *----------------------------------------------------------------------------- + */ + +/** + * Enqueue burst. + * + * @param queue_pair Pointer to queue pair. + * @param ops Pointer to ops requests array. + * @param nb_ops Number of elements in ops requests array. + * @returns Number of elements consumed from ops. + */ +static uint16_t +mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + uint16_t iter_ops = 0; + uint16_t to_enq = 0; + uint16_t consumed = 0; + int ret; + struct sam_cio_op_params requests[nb_ops]; + /* + * DPDK uses single fragment buffers, so we can KISS descriptors. + * SAM does not store bd pointers, so on-stack scope will be enough. + */ + struct sam_buf_info src_bd[nb_ops]; + struct sam_buf_info dst_bd[nb_ops]; + struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair; + + if (nb_ops == 0) + return 0; + + /* Prepare the burst. */ + memset(&requests, 0, sizeof(requests)); + + /* Iterate through */ + for (; iter_ops < nb_ops; ++iter_ops) { + if (mrvl_request_prepare(&requests[iter_ops], + &src_bd[iter_ops], + &dst_bd[iter_ops], + ops[iter_ops]) < 0) { + MRVL_CRYPTO_LOG_ERR( + "Error while parameters preparation!"); + qp->stats.enqueue_err_count++; + ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR; + + /* + * Number of handled ops is increased + * (even if the result of handling is error). + */ + ++consumed; + break; + } + + ops[iter_ops]->status = + RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + /* Increase the number of ops to enqueue. */ + ++to_enq; + } /* for (; iter_ops < nb_ops;... */ + + if (to_enq > 0) { + /* Send the burst */ + ret = sam_cio_enq(qp->cio, requests, &to_enq); + consumed += to_enq; + if (ret < 0) { + /* + * Trust SAM that in this case returned value will be at + * some point correct (now it is returned unmodified). + */ + qp->stats.enqueue_err_count += to_enq; + for (iter_ops = 0; iter_ops < to_enq; ++iter_ops) + ops[iter_ops]->status = + RTE_CRYPTO_OP_STATUS_ERROR; + } + } + + qp->stats.enqueued_count += to_enq; + return consumed; +} + +/** + * Dequeue burst. + * + * @param queue_pair Pointer to queue pair. + * @param ops Pointer to ops requests array. + * @param nb_ops Number of elements in ops requests array. + * @returns Number of elements dequeued. + */ +static uint16_t +mrvl_crypto_pmd_dequeue_burst(void *queue_pair, + struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + int ret; + struct mrvl_crypto_qp *qp = queue_pair; + struct sam_cio *cio = qp->cio; + struct sam_cio_op_result results[nb_ops]; + uint16_t i; + + ret = sam_cio_deq(cio, results, &nb_ops); + if (ret < 0) { + /* Count all dequeued as error. */ + qp->stats.dequeue_err_count += nb_ops; + + /* But act as they were dequeued anyway*/ + qp->stats.dequeued_count += nb_ops; + + return 0; + } + + /* Unpack and check results. */ + for (i = 0; i < nb_ops; ++i) { + ops[i] = results[i].cookie; + + switch (results[i].status) { + case SAM_CIO_OK: + ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + break; + case SAM_CIO_ERR_ICV: + MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV."); + ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + break; + default: + MRVL_CRYPTO_LOG_DBG( + "CIO returned Error: %d", results[i].status); + ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR; + break; + } + } + + qp->stats.dequeued_count += nb_ops; + return nb_ops; +} + +/** + * Create a new crypto device. + * + * @param name Driver name. + * @param vdev Pointer to device structure. + * @param init_params Pointer to initialization parameters. + * @returns 0 in case of success, negative value otherwise. + */ +static int +cryptodev_mrvl_crypto_create(const char *name, + struct rte_vdev_device *vdev, + struct mrvl_pmd_init_params *init_params) +{ + struct rte_cryptodev *dev; + struct mrvl_crypto_private *internals; + struct sam_init_params sam_params; + int ret; + + dev = rte_cryptodev_pmd_create(name, &vdev->device, + &init_params->common); + if (dev == NULL) { + MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev"); + goto init_error; + } + + dev->driver_id = cryptodev_driver_id; + dev->dev_ops = rte_mrvl_crypto_pmd_ops; + + /* Register rx/tx burst functions for data path. */ + dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst; + dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst; + + dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | + RTE_CRYPTODEV_FF_HW_ACCELERATED; + + /* Set vector instructions mode supported */ + internals = dev->data->dev_private; + + internals->max_nb_qpairs = init_params->common.max_nb_queue_pairs; + internals->max_nb_sessions = init_params->max_nb_sessions; + + /* + * ret == -EEXIST is correct, it means DMA + * has been already initialized. + */ + ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE); + if (ret < 0) { + if (ret != -EEXIST) + return ret; + + MRVL_CRYPTO_LOG_INFO( + "DMA memory has been already initialized by a different driver."); + } + + sam_params.max_num_sessions = internals->max_nb_sessions; + + return sam_init(&sam_params); + +init_error: + MRVL_CRYPTO_LOG_ERR( + "driver %s: %s failed", init_params->common.name, __func__); + + cryptodev_mrvl_crypto_uninit(vdev); + return -EFAULT; +} + +/** Parse integer from integer argument */ +static int +parse_integer_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + int *i = (int *) extra_args; + + *i = atoi(value); + if (*i < 0) { + MRVL_CRYPTO_LOG_ERR("Argument has to be positive.\n"); + return -EINVAL; + } + + return 0; +} + +/** Parse name */ +static int +parse_name_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + struct rte_cryptodev_pmd_init_params *params = extra_args; + + if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) { + MRVL_CRYPTO_LOG_ERR("Invalid name %s, should be less than " + "%u bytes.\n", value, + RTE_CRYPTODEV_NAME_MAX_LEN - 1); + return -EINVAL; + } + + strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN); + + return 0; +} + +static int +mrvl_pmd_parse_input_args(struct mrvl_pmd_init_params *params, + const char *input_args) +{ + struct rte_kvargs *kvlist = NULL; + int ret = 0; + + if (params == NULL) + return -EINVAL; + + if (input_args) { + kvlist = rte_kvargs_parse(input_args, + mrvl_pmd_valid_params); + if (kvlist == NULL) + return -1; + + /* Common VDEV parameters */ + ret = rte_kvargs_process(kvlist, + RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG, + &parse_integer_arg, + ¶ms->common.max_nb_queue_pairs); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, + RTE_CRYPTODEV_PMD_SOCKET_ID_ARG, + &parse_integer_arg, + ¶ms->common.socket_id); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, + RTE_CRYPTODEV_PMD_NAME_ARG, + &parse_name_arg, + ¶ms->common); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, + MRVL_PMD_MAX_NB_SESS_ARG, + &parse_integer_arg, + params); + if (ret < 0) + goto free_kvlist; + + } + +free_kvlist: + rte_kvargs_free(kvlist); + return ret; +} + +/** + * Initialize the crypto device. + * + * @param vdev Pointer to device structure. + * @returns 0 in case of success, negative value otherwise. + */ +static int +cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev) +{ + struct mrvl_pmd_init_params init_params = { + .common = { + .name = "", + .private_data_size = + sizeof(struct mrvl_crypto_private), + .max_nb_queue_pairs = + sam_get_num_inst() * SAM_HW_RING_NUM, + .socket_id = rte_socket_id() + }, + .max_nb_sessions = MRVL_PMD_DEFAULT_MAX_NB_SESSIONS + }; + + const char *name, *args; + int ret; + + name = rte_vdev_device_name(vdev); + if (name == NULL) + return -EINVAL; + args = rte_vdev_device_args(vdev); + + ret = mrvl_pmd_parse_input_args(&init_params, args); + if (ret) { + RTE_LOG(ERR, PMD, + "Failed to parse initialisation arguments[%s]\n", + args); + return -EINVAL; + } + + return cryptodev_mrvl_crypto_create(name, vdev, &init_params); +} + +/** + * Uninitialize the crypto device + * + * @param vdev Pointer to device structure. + * @returns 0 in case of success, negative value otherwise. + */ +static int +cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev) +{ + struct rte_cryptodev *cryptodev; + const char *name = rte_vdev_device_name(vdev); + + if (name == NULL) + return -EINVAL; + + RTE_LOG(INFO, PMD, + "Closing Marvell crypto device %s on numa socket %u\n", + name, rte_socket_id()); + + sam_deinit(); + + cryptodev = rte_cryptodev_pmd_get_named_dev(name); + if (cryptodev == NULL) + return -ENODEV; + + return rte_cryptodev_pmd_destroy(cryptodev); +} + +/** + * Basic driver handlers for use in the constructor. + */ +static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = { + .probe = cryptodev_mrvl_crypto_init, + .remove = cryptodev_mrvl_crypto_uninit +}; + +static struct cryptodev_driver mrvl_crypto_drv; + +/* Register the driver in constructor. */ +RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv); +RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD, + "max_nb_queue_pairs= " + "max_nb_sessions= " + "socket_id="); +RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver, + cryptodev_driver_id); diff --git a/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c b/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c new file mode 100644 index 00000000..c045562c --- /dev/null +++ b/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c @@ -0,0 +1,722 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#include + +#include +#include +#include + +#include "rte_mrvl_pmd_private.h" + +/** + * Capabilities list to be used in reporting to DPDK. + */ +static const struct rte_cryptodev_capabilities + mrvl_crypto_pmd_capabilities[] = { + { /* MD5 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_MD5_HMAC, + .block_size = 64, + .key_size = { + .min = 1, + .max = 64, + .increment = 1 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + }, } + }, } + }, + { /* MD5 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_MD5, + .block_size = 64, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + }, } + }, } + }, + { /* SHA1 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, + .block_size = 64, + .key_size = { + .min = 1, + .max = 64, + .increment = 1 + }, + .digest_size = { + .min = 20, + .max = 20, + .increment = 0 + }, + }, } + }, } + }, + { /* SHA1 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA1, + .block_size = 64, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 20, + .max = 20, + .increment = 0 + }, + }, } + }, } + }, + { /* SHA224 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA224, + .block_size = 64, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 28, + .max = 28, + .increment = 0 + }, + }, } + }, } + }, + { /* SHA256 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, + .block_size = 64, + .key_size = { + .min = 1, + .max = 64, + .increment = 1 + }, + .digest_size = { + .min = 32, + .max = 32, + .increment = 0 + }, + }, } + }, } + }, + { /* SHA256 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA256, + .block_size = 64, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 32, + .max = 32, + .increment = 0 + }, + }, } + }, } + }, + { /* SHA384 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, + .block_size = 128, + .key_size = { + .min = 1, + .max = 128, + .increment = 1 + }, + .digest_size = { + .min = 48, + .max = 48, + .increment = 0 + }, + }, } + }, } + }, + { /* SHA384 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA384, + .block_size = 128, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 48, + .max = 48, + .increment = 0 + }, + }, } + }, } + }, + { /* SHA512 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, + .block_size = 128, + .key_size = { + .min = 1, + .max = 128, + .increment = 1 + }, + .digest_size = { + .min = 64, + .max = 64, + .increment = 0 + }, + }, } + }, } + }, + { /* SHA512 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA512, + .block_size = 128, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 64, + .max = 64, + .increment = 0 + }, + }, } + }, } + }, + { /* AES CBC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_AES_CBC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, + { /* AES CTR */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_AES_CTR, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, + { /* AES GCM */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, + {.aead = { + .algo = RTE_CRYPTO_AEAD_AES_GCM, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .aad_size = { + .min = 8, + .max = 12, + .increment = 4 + }, + .iv_size = { + .min = 12, + .max = 16, + .increment = 4 + } + }, } + }, } + }, + { /* AES GMAC (AUTH) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_AES_GMAC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .iv_size = { + .min = 8, + .max = 65532, + .increment = 4 + } + }, } + }, } + }, + { /* 3DES CBC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_3DES_CBC, + .block_size = 8, + .key_size = { + .min = 24, + .max = 24, + .increment = 0 + }, + .iv_size = { + .min = 8, + .max = 8, + .increment = 0 + } + }, } + }, } + }, + { /* 3DES CTR */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_3DES_CTR, + .block_size = 8, + .key_size = { + .min = 24, + .max = 24, + .increment = 0 + }, + .iv_size = { + .min = 8, + .max = 8, + .increment = 0 + } + }, } + }, } + }, + + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + + +/** + * Configure device (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @param config Pointer to configuration structure. + * @returns 0. Always. + */ +static int +mrvl_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev, + __rte_unused struct rte_cryptodev_config *config) +{ + return 0; +} + +/** + * Start device (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @returns 0. Always. + */ +static int +mrvl_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev) +{ + return 0; +} + +/** + * Stop device (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @returns 0. Always. + */ +static void +mrvl_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev) +{ +} + +/** + * Get device statistics (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @param stats Pointer to statistics structure [out]. + */ +static void +mrvl_crypto_pmd_stats_get(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct mrvl_crypto_qp *qp = dev->data->queue_pairs[qp_id]; + + stats->enqueued_count += qp->stats.enqueued_count; + stats->dequeued_count += qp->stats.dequeued_count; + + stats->enqueue_err_count += qp->stats.enqueue_err_count; + stats->dequeue_err_count += qp->stats.dequeue_err_count; + } +} + +/** + * Reset device statistics (PMD ops callback). + * + * @param dev Pointer to the device structure. + */ +static void +mrvl_crypto_pmd_stats_reset(struct rte_cryptodev *dev) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct mrvl_crypto_qp *qp = dev->data->queue_pairs[qp_id]; + + memset(&qp->stats, 0, sizeof(qp->stats)); + } +} + +/** + * Get device info (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @param dev_info Pointer to the device info structure [out]. + */ +static void +mrvl_crypto_pmd_info_get(struct rte_cryptodev *dev, + struct rte_cryptodev_info *dev_info) +{ + struct mrvl_crypto_private *internals = dev->data->dev_private; + + if (dev_info != NULL) { + dev_info->driver_id = dev->driver_id; + dev_info->feature_flags = dev->feature_flags; + dev_info->capabilities = mrvl_crypto_pmd_capabilities; + dev_info->max_nb_queue_pairs = internals->max_nb_qpairs; + dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + } +} + +/** + * Release queue pair (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @param qp_id ID of Queue Pair to release. + * @returns 0. Always. + */ +static int +mrvl_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) +{ + struct mrvl_crypto_qp *qp = + (struct mrvl_crypto_qp *)dev->data->queue_pairs[qp_id]; + + if (dev->data->queue_pairs[qp_id] != NULL) { + sam_cio_flush(qp->cio); + sam_cio_deinit(qp->cio); + rte_free(dev->data->queue_pairs[qp_id]); + dev->data->queue_pairs[qp_id] = NULL; + } + + return 0; +} + +/** + * Close device (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @returns 0. Always. + */ +static int +mrvl_crypto_pmd_close(struct rte_cryptodev *dev) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) + mrvl_crypto_pmd_qp_release(dev, qp_id); + + return 0; +} + +/** + * Setup a queue pair (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @param qp_id ID of the Queue Pair. + * @param qp_conf Queue pair configuration (nb of descriptors). + * @param socket_id NUMA socket to allocate memory on. + * @returns 0 upon success, negative value otherwise. + */ +static int +mrvl_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, + const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id, struct rte_mempool *session_pool) +{ + struct mrvl_crypto_qp *qp = NULL; + char match[RTE_CRYPTODEV_NAME_MAX_LEN]; + unsigned int n; + + /* Allocate the queue pair data structure. */ + qp = rte_zmalloc_socket("MRVL Crypto PMD Queue Pair", sizeof(*qp), + RTE_CACHE_LINE_SIZE, socket_id); + if (qp == NULL) + return -ENOMEM; + + /* Free old qp prior setup if needed. */ + if (dev->data->queue_pairs[qp_id] != NULL) + mrvl_crypto_pmd_qp_release(dev, qp_id); + + do { /* Error handling block */ + + /* + * This extra check is necessary due to a bug in + * crypto library. + */ + int num = sam_get_num_inst(); + if (num == 0) { + MRVL_CRYPTO_LOG_ERR("No crypto engines detected.\n"); + return -1; + } + + /* + * In case two crypto engines are enabled qps will + * be evenly spread among them. Even and odd qps will + * be handled by cio-0 and cio-1 respectively. qp-cio mapping + * will look as follows: + * + * qp: 0 1 2 3 + * cio-x:y: cio-0:0, cio-1:0, cio-0:1, cio-1:1 + * + * qp: 4 5 6 7 + * cio-x:y: cio-0:2, cio-1:2, cio-0:3, cio-1:3 + * + * In case just one engine is enabled mapping will look as + * follows: + * qp: 0 1 2 3 + * cio-x:y: cio-0:0, cio-0:1, cio-0:2, cio-0:3 + */ + n = snprintf(match, sizeof(match), "cio-%u:%u", + qp_id % num, qp_id / num); + + if (n >= sizeof(match)) + break; + + qp->cio_params.match = match; + qp->cio_params.size = qp_conf->nb_descriptors; + + if (sam_cio_init(&qp->cio_params, &qp->cio) < 0) + break; + + qp->sess_mp = session_pool; + + memset(&qp->stats, 0, sizeof(qp->stats)); + dev->data->queue_pairs[qp_id] = qp; + return 0; + } while (0); + + rte_free(qp); + return -1; +} + +/** Return the number of allocated queue pairs (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @returns Number of allocated queue pairs. + */ +static uint32_t +mrvl_crypto_pmd_qp_count(struct rte_cryptodev *dev) +{ + return dev->data->nb_queue_pairs; +} + +/** Returns the size of the session structure (PMD ops callback). + * + * @param dev Pointer to the device structure [Unused]. + * @returns Size of Marvell crypto session. + */ +static unsigned +mrvl_crypto_pmd_sym_session_get_size(__rte_unused struct rte_cryptodev *dev) +{ + return sizeof(struct mrvl_crypto_session); +} + +/** Configure the session from a crypto xform chain (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @param xform Pointer to the crytpo configuration structure. + * @param sess Pointer to the empty session structure. + * @returns 0 upon success, negative value otherwise. + */ +static int +mrvl_crypto_pmd_sym_session_configure(__rte_unused struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sess, + struct rte_mempool *mp) +{ + struct mrvl_crypto_session *mrvl_sess; + void *sess_private_data; + int ret; + + if (sess == NULL) { + MRVL_CRYPTO_LOG_ERR("Invalid session struct."); + return -EINVAL; + } + + if (rte_mempool_get(mp, &sess_private_data)) { + CDEV_LOG_ERR("Couldn't get object from session mempool."); + return -ENOMEM; + } + + ret = mrvl_crypto_set_session_parameters(sess_private_data, xform); + if (ret != 0) { + MRVL_CRYPTO_LOG_ERR("Failed to configure session parameters."); + + /* Return session to mempool */ + rte_mempool_put(mp, sess_private_data); + return ret; + } + + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); + + mrvl_sess = (struct mrvl_crypto_session *)sess_private_data; + if (sam_session_create(&mrvl_sess->sam_sess_params, + &mrvl_sess->sam_sess) < 0) { + MRVL_CRYPTO_LOG_DBG("Failed to create session!"); + return -EIO; + } + + return 0; +} + +/** + * Clear the memory of session so it doesn't leave key material behind. + * + * @param dev Pointer to the device structure. + * @returns 0. Always. + */ +static void +mrvl_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *sess) +{ + + uint8_t index = dev->driver_id; + void *sess_priv = get_sym_session_private_data(sess, index); + + /* Zero out the whole structure */ + if (sess_priv) { + struct mrvl_crypto_session *mrvl_sess = + (struct mrvl_crypto_session *)sess_priv; + + if (mrvl_sess->sam_sess && + sam_session_destroy(mrvl_sess->sam_sess) < 0) { + MRVL_CRYPTO_LOG_INFO("Error while destroying session!"); + } + + memset(sess, 0, sizeof(struct mrvl_crypto_session)); + struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); + set_sym_session_private_data(sess, index, NULL); + rte_mempool_put(sess_mp, sess_priv); + } +} + +/** + * PMD handlers for crypto ops. + */ +static struct rte_cryptodev_ops mrvl_crypto_pmd_ops = { + .dev_configure = mrvl_crypto_pmd_config, + .dev_start = mrvl_crypto_pmd_start, + .dev_stop = mrvl_crypto_pmd_stop, + .dev_close = mrvl_crypto_pmd_close, + + .dev_infos_get = mrvl_crypto_pmd_info_get, + + .stats_get = mrvl_crypto_pmd_stats_get, + .stats_reset = mrvl_crypto_pmd_stats_reset, + + .queue_pair_setup = mrvl_crypto_pmd_qp_setup, + .queue_pair_release = mrvl_crypto_pmd_qp_release, + .queue_pair_count = mrvl_crypto_pmd_qp_count, + + .sym_session_get_size = mrvl_crypto_pmd_sym_session_get_size, + .sym_session_configure = mrvl_crypto_pmd_sym_session_configure, + .sym_session_clear = mrvl_crypto_pmd_sym_session_clear +}; + +struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops = &mrvl_crypto_pmd_ops; diff --git a/drivers/crypto/mvsam/rte_mrvl_pmd_private.h b/drivers/crypto/mvsam/rte_mrvl_pmd_private.h new file mode 100644 index 00000000..c16d95b4 --- /dev/null +++ b/drivers/crypto/mvsam/rte_mrvl_pmd_private.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#ifndef _RTE_MRVL_PMD_PRIVATE_H_ +#define _RTE_MRVL_PMD_PRIVATE_H_ + +#include "rte_mrvl_compat.h" + +#define CRYPTODEV_NAME_MRVL_PMD crypto_mvsam +/**< Marvell PMD device name */ + +#define MRVL_CRYPTO_LOG_ERR(fmt, args...) \ + RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \ + __func__, __LINE__, ## args) + +#ifdef RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG +#define MRVL_CRYPTO_LOG_INFO(fmt, args...) \ + RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \ + __func__, __LINE__, ## args) + +#define MRVL_CRYPTO_LOG_DBG(fmt, args...) \ + RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \ + __func__, __LINE__, ## args) + +#else +#define MRVL_CRYPTO_LOG_INFO(fmt, args...) +#define MRVL_CRYPTO_LOG_DBG(fmt, args...) +#endif + +/** + * Handy bits->bytes conversion macro. + */ +#define BITS2BYTES(x) ((x) >> 3) + +/** The operation order mode enumerator. */ +enum mrvl_crypto_chain_order { + MRVL_CRYPTO_CHAIN_CIPHER_ONLY, + MRVL_CRYPTO_CHAIN_AUTH_ONLY, + MRVL_CRYPTO_CHAIN_CIPHER_AUTH, + MRVL_CRYPTO_CHAIN_AUTH_CIPHER, + MRVL_CRYPTO_CHAIN_COMBINED, + MRVL_CRYPTO_CHAIN_NOT_SUPPORTED, +}; + +/** Private data structure for each crypto device. */ +struct mrvl_crypto_private { + unsigned int max_nb_qpairs; /**< Max number of queue pairs */ + unsigned int max_nb_sessions; /**< Max number of sessions */ +}; + +/** MRVL crypto queue pair structure. */ +struct mrvl_crypto_qp { + /** SAM CIO (MUSDK Queue Pair equivalent).*/ + struct sam_cio *cio; + + /** Session Mempool. */ + struct rte_mempool *sess_mp; + + /** Queue pair statistics. */ + struct rte_cryptodev_stats stats; + + /** CIO initialization parameters.*/ + struct sam_cio_params cio_params; +} __rte_cache_aligned; + +/** MRVL crypto private session structure. */ +struct mrvl_crypto_session { + /** Crypto operations chain order. */ + enum mrvl_crypto_chain_order chain_order; + + /** Session initialization parameters. */ + struct sam_session_params sam_sess_params; + + /** SAM session pointer. */ + struct sam_sa *sam_sess; + + /** Cipher IV offset. */ + uint16_t cipher_iv_offset; +} __rte_cache_aligned; + +/** Set and validate MRVL crypto session parameters */ +extern int +mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess, + const struct rte_crypto_sym_xform *xform); + +/** device specific operations function pointer structure */ +extern struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops; + +#endif /* _RTE_MRVL_PMD_PRIVATE_H_ */ diff --git a/drivers/crypto/mvsam/rte_pmd_mvsam_version.map b/drivers/crypto/mvsam/rte_pmd_mvsam_version.map new file mode 100644 index 00000000..a7530317 --- /dev/null +++ b/drivers/crypto/mvsam/rte_pmd_mvsam_version.map @@ -0,0 +1,3 @@ +DPDK_17.11 { + local: *; +}; diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c index dc8e5776..6e29a21a 100644 --- a/drivers/crypto/null/null_crypto_pmd.c +++ b/drivers/crypto/null/null_crypto_pmd.c @@ -78,7 +78,7 @@ get_session(struct null_crypto_qp *qp, struct rte_crypto_op *op) if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { if (likely(sym_op->session != NULL)) sess = (struct null_crypto_session *) - get_session_private_data( + get_sym_session_private_data( sym_op->session, cryptodev_driver_id); } else { void *_sess = NULL; @@ -99,8 +99,8 @@ get_session(struct null_crypto_qp *qp, struct rte_crypto_op *op) sess = NULL; } sym_op->session = (struct rte_cryptodev_sym_session *)_sess; - set_session_private_data(sym_op->session, cryptodev_driver_id, - _sess_private_data); + set_sym_session_private_data(op->sym->session, + cryptodev_driver_id, _sess_private_data); } return sess; @@ -161,10 +161,9 @@ cryptodev_null_create(const char *name, { struct rte_cryptodev *dev; struct null_crypto_private *internals; - dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); if (dev == NULL) { - NULL_CRYPTO_LOG_ERR("failed to create cryptodev vdev"); + NULL_LOG(ERR, "failed to create cryptodev vdev"); return -EFAULT; } @@ -177,12 +176,11 @@ cryptodev_null_create(const char *name, dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | - RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; + RTE_CRYPTODEV_FF_IN_PLACE_SGL; internals = dev->data->dev_private; internals->max_nb_qpairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; return 0; } @@ -195,8 +193,7 @@ cryptodev_null_probe(struct rte_vdev_device *dev) "", sizeof(struct null_crypto_private), rte_socket_id(), - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }; const char *name, *args; int retval; @@ -209,8 +206,9 @@ cryptodev_null_probe(struct rte_vdev_device *dev) retval = rte_cryptodev_pmd_parse_input_args(&init_params, args); if (retval) { - RTE_LOG(ERR, PMD, - "Failed to parse initialisation arguments[%s]\n", args); + NULL_LOG(ERR, + "Failed to parse initialisation arguments[%s]", + args); return -EINVAL; } @@ -245,7 +243,11 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd_drv); RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id="); -RTE_PMD_REGISTER_CRYPTO_DRIVER(null_crypto_drv, cryptodev_null_pmd_drv, +RTE_PMD_REGISTER_CRYPTO_DRIVER(null_crypto_drv, cryptodev_null_pmd_drv.driver, cryptodev_driver_id); + +RTE_INIT(null_init_log) +{ + null_logtype_driver = rte_log_register("pmd.crypto.null"); +} diff --git a/drivers/crypto/null/null_crypto_pmd_ops.c b/drivers/crypto/null/null_crypto_pmd_ops.c index f8e5f61f..bb2b6e14 100644 --- a/drivers/crypto/null/null_crypto_pmd_ops.c +++ b/drivers/crypto/null/null_crypto_pmd_ops.c @@ -121,7 +121,8 @@ null_crypto_pmd_info_get(struct rte_cryptodev *dev, if (dev_info != NULL) { dev_info->driver_id = dev->driver_id; dev_info->max_nb_queue_pairs = internals->max_nb_qpairs; - dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; dev_info->feature_flags = dev->feature_flags; dev_info->capabilities = null_crypto_pmd_capabilities; } @@ -163,15 +164,15 @@ null_crypto_pmd_qp_create_processed_pkts_ring(struct null_crypto_qp *qp, r = rte_ring_lookup(qp->name); if (r) { if (rte_ring_get_size(r) >= ring_size) { - NULL_CRYPTO_LOG_INFO( - "Reusing existing ring %s for processed packets", - qp->name); + NULL_LOG(INFO, + "Reusing existing ring %s for " + " processed packets", qp->name); return r; } - NULL_CRYPTO_LOG_INFO( - "Unable to reuse existing ring %s for processed packets", - qp->name); + NULL_LOG(INFO, + "Unable to reuse existing ring %s for " + " processed packets", qp->name); return NULL; } @@ -190,7 +191,7 @@ null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, int retval; if (qp_id >= internals->max_nb_qpairs) { - NULL_CRYPTO_LOG_ERR("Invalid qp_id %u, greater than maximum " + NULL_LOG(ERR, "Invalid qp_id %u, greater than maximum " "number of queue pairs supported (%u).", qp_id, internals->max_nb_qpairs); return (-EINVAL); @@ -204,7 +205,7 @@ null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, qp = rte_zmalloc_socket("Null Crypto PMD Queue Pair", sizeof(*qp), RTE_CACHE_LINE_SIZE, socket_id); if (qp == NULL) { - NULL_CRYPTO_LOG_ERR("Failed to allocate queue pair memory"); + NULL_LOG(ERR, "Failed to allocate queue pair memory"); return (-ENOMEM); } @@ -213,15 +214,16 @@ null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, retval = null_crypto_pmd_qp_set_unique_name(dev, qp); if (retval) { - NULL_CRYPTO_LOG_ERR("Failed to create unique name for null " + NULL_LOG(ERR, "Failed to create unique name for null " "crypto device"); + goto qp_setup_cleanup; } qp->processed_pkts = null_crypto_pmd_qp_create_processed_pkts_ring(qp, qp_conf->nb_descriptors, socket_id); if (qp->processed_pkts == NULL) { - NULL_CRYPTO_LOG_ERR("Failed to create unique name for null " + NULL_LOG(ERR, "Failed to create unique name for null " "crypto device"); goto qp_setup_cleanup; } @@ -239,22 +241,6 @@ qp_setup_cleanup: return -1; } -/** Start queue pair */ -static int -null_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair */ -static int -null_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - /** Return the number of allocated queue pairs */ static uint32_t null_crypto_pmd_qp_count(struct rte_cryptodev *dev) @@ -264,14 +250,14 @@ null_crypto_pmd_qp_count(struct rte_cryptodev *dev) /** Returns the size of the NULL crypto session structure */ static unsigned -null_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +null_crypto_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { return sizeof(struct null_crypto_session); } /** Configure a null crypto session from a crypto xform chain */ static int -null_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, +null_crypto_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mp) @@ -280,26 +266,26 @@ null_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, int ret; if (unlikely(sess == NULL)) { - NULL_CRYPTO_LOG_ERR("invalid session struct"); + NULL_LOG(ERR, "invalid session struct"); return -EINVAL; } if (rte_mempool_get(mp, &sess_private_data)) { - CDEV_LOG_ERR( - "Couldn't get object from session mempool"); + NULL_LOG(ERR, + "Couldn't get object from session mempool"); return -ENOMEM; } ret = null_crypto_set_session_parameters(sess_private_data, xform); if (ret != 0) { - NULL_CRYPTO_LOG_ERR("failed configure session parameters"); + NULL_LOG(ERR, "failed configure session parameters"); /* Return session to mempool */ rte_mempool_put(mp, sess_private_data); return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); return 0; @@ -307,17 +293,17 @@ null_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, /** Clear the memory of session so it doesn't leave key material behind */ static void -null_crypto_pmd_session_clear(struct rte_cryptodev *dev, +null_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); /* Zero out the whole structure */ if (sess_priv) { memset(sess_priv, 0, sizeof(struct null_crypto_session)); struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -335,13 +321,11 @@ struct rte_cryptodev_ops pmd_ops = { .queue_pair_setup = null_crypto_pmd_qp_setup, .queue_pair_release = null_crypto_pmd_qp_release, - .queue_pair_start = null_crypto_pmd_qp_start, - .queue_pair_stop = null_crypto_pmd_qp_stop, .queue_pair_count = null_crypto_pmd_qp_count, - .session_get_size = null_crypto_pmd_session_get_size, - .session_configure = null_crypto_pmd_session_configure, - .session_clear = null_crypto_pmd_session_clear + .sym_session_get_size = null_crypto_pmd_sym_session_get_size, + .sym_session_configure = null_crypto_pmd_sym_session_configure, + .sym_session_clear = null_crypto_pmd_sym_session_clear }; struct rte_cryptodev_ops *null_crypto_pmd_ops = &pmd_ops; diff --git a/drivers/crypto/null/null_crypto_pmd_private.h b/drivers/crypto/null/null_crypto_pmd_private.h index 0fd13362..d5905afd 100644 --- a/drivers/crypto/null/null_crypto_pmd_private.h +++ b/drivers/crypto/null/null_crypto_pmd_private.h @@ -8,31 +8,17 @@ #define CRYPTODEV_NAME_NULL_PMD crypto_null /**< Null crypto PMD device name */ -#define NULL_CRYPTO_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_NULL_PMD), \ - __func__, __LINE__, ## args) +int null_logtype_driver; -#ifdef RTE_LIBRTE_NULL_CRYPTO_DEBUG -#define NULL_CRYPTO_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_NULL_PMD), \ - __func__, __LINE__, ## args) - -#define NULL_CRYPTO_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_NULL_PMD), \ - __func__, __LINE__, ## args) -#else -#define NULL_CRYPTO_LOG_INFO(fmt, args...) -#define NULL_CRYPTO_LOG_DBG(fmt, args...) -#endif +#define NULL_LOG(level, fmt, ...) \ + rte_log(RTE_LOG_ ## level, null_logtype_driver, \ + "%s() line %u: "fmt "\n", __func__, __LINE__, \ + ## __VA_ARGS__) /** private data structure for each NULL crypto device */ struct null_crypto_private { unsigned max_nb_qpairs; /**< Max number of queue pairs */ - unsigned max_nb_sessions; /**< Max number of sessions */ }; /** NULL crypto queue pair */ diff --git a/drivers/crypto/openssl/compat.h b/drivers/crypto/openssl/compat.h new file mode 100644 index 00000000..45f9a33d --- /dev/null +++ b/drivers/crypto/openssl/compat.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium Networks + */ + +#ifndef __RTA_COMPAT_H__ +#define __RTA_COMPAT_H__ + +#if (OPENSSL_VERSION_NUMBER < 0x10100000L) + +#define set_rsa_params(rsa, p, q, ret) \ + do {rsa->p = p; rsa->q = q; ret = 0; } while (0) + +#define set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret) \ + do { \ + rsa->dmp1 = dmp1; \ + rsa->dmq1 = dmq1; \ + rsa->iqmp = iqmp; \ + ret = 0; \ + } while (0) + +#define set_rsa_keys(rsa, n, e, d, ret) \ + do { \ + rsa->n = n; rsa->e = e; rsa->d = d; ret = 0; \ + } while (0) + +#define set_dh_params(dh, p, g, ret) \ + do { \ + dh->p = p; \ + dh->q = NULL; \ + dh->g = g; \ + ret = 0; \ + } while (0) + +#define set_dh_priv_key(dh, priv_key, ret) \ + do { dh->priv_key = priv_key; ret = 0; } while (0) + +#define set_dsa_params(dsa, p, q, g, ret) \ + do { dsa->p = p; dsa->q = q; dsa->g = g; ret = 0; } while (0) + +#define get_dh_pub_key(dh, pub_key) \ + (pub_key = dh->pub_key) + +#define get_dh_priv_key(dh, priv_key) \ + (priv_key = dh->priv_key) + +#define set_dsa_sign(sign, r, s) \ + do { sign->r = r; sign->s = s; } while (0) + +#define get_dsa_sign(sign, r, s) \ + do { r = sign->r; s = sign->s; } while (0) + +#define set_dsa_keys(dsa, pub, priv, ret) \ + do { dsa->pub_key = pub; dsa->priv_key = priv; ret = 0; } while (0) + +#define set_dsa_pub_key(dsa, pub_key) \ + (dsa->pub_key = pub_key) + +#define get_dsa_priv_key(dsa, priv_key) \ + (priv_key = dsa->priv_key) + +#else + +#define set_rsa_params(rsa, p, q, ret) \ + (ret = !RSA_set0_factors(rsa, p, q)) + +#define set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret) \ + (ret = !RSA_set0_crt_params(rsa, dmp1, dmq1, iqmp)) + +/* n, e must be non-null, d can be NULL */ +#define set_rsa_keys(rsa, n, e, d, ret) \ + (ret = !RSA_set0_key(rsa, n, e, d)) + +#define set_dh_params(dh, p, g, ret) \ + (ret = !DH_set0_pqg(dh, p, NULL, g)) + +#define set_dh_priv_key(dh, priv_key, ret) \ + (ret = !DH_set0_key(dh, NULL, priv_key)) + +#define get_dh_pub_key(dh, pub_key) \ + (DH_get0_key(dh_key, &pub_key, NULL)) + +#define get_dh_priv_key(dh, priv_key) \ + (DH_get0_key(dh_key, NULL, &priv_key)) + +#define set_dsa_params(dsa, p, q, g, ret) \ + (ret = !DSA_set0_pqg(dsa, p, q, g)) + +#define set_dsa_priv_key(dsa, priv_key) \ + (DSA_set0_key(dsa, NULL, priv_key)) + +#define set_dsa_sign(sign, r, s) \ + (DSA_SIG_set0(sign, r, s)) + +#define get_dsa_sign(sign, r, s) \ + (DSA_SIG_get0(sign, &r, &s)) + +#define set_dsa_keys(dsa, pub, priv, ret) \ + (ret = !DSA_set0_key(dsa, pub, priv)) + +#define set_dsa_pub_key(dsa, pub_key) \ + (DSA_set0_key(dsa, pub_key, NULL)) + +#define get_dsa_priv_key(dsa, priv_key) \ + (DSA_get0_key(dsa, NULL, &priv_key)) + +#endif /* version < 10100000 */ + +#endif /* __RTA_COMPAT_H__ */ diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c index 0d9bedc0..7d263aba 100644 --- a/drivers/crypto/openssl/rte_openssl_pmd.c +++ b/drivers/crypto/openssl/rte_openssl_pmd.c @@ -14,6 +14,7 @@ #include #include "rte_openssl_pmd_private.h" +#include "compat.h" #define DES_BLOCK_SIZE 8 @@ -119,7 +120,7 @@ get_cipher_key_ede(uint8_t *key, int keylen, uint8_t *key_ede) memcpy(key_ede + 16, key, 8); break; default: - OPENSSL_LOG_ERR("Unsupported key size"); + OPENSSL_LOG(ERR, "Unsupported key size"); res = -EINVAL; } @@ -137,6 +138,9 @@ get_cipher_algo(enum rte_crypto_cipher_algorithm sess_algo, size_t keylen, switch (sess_algo) { case RTE_CRYPTO_CIPHER_3DES_CBC: switch (keylen) { + case 8: + *algo = EVP_des_cbc(); + break; case 16: *algo = EVP_des_ede_cbc(); break; @@ -677,7 +681,7 @@ openssl_set_session_parameters(struct openssl_session *sess, ret = openssl_set_session_cipher_parameters( sess, cipher_xform); if (ret != 0) { - OPENSSL_LOG_ERR( + OPENSSL_LOG(ERR, "Invalid/unsupported cipher parameters"); return ret; } @@ -686,7 +690,7 @@ openssl_set_session_parameters(struct openssl_session *sess, if (auth_xform) { ret = openssl_set_session_auth_parameters(sess, auth_xform); if (ret != 0) { - OPENSSL_LOG_ERR( + OPENSSL_LOG(ERR, "Invalid/unsupported auth parameters"); return ret; } @@ -695,7 +699,7 @@ openssl_set_session_parameters(struct openssl_session *sess, if (aead_xform) { ret = openssl_set_session_aead_parameters(sess, aead_xform); if (ret != 0) { - OPENSSL_LOG_ERR( + OPENSSL_LOG(ERR, "Invalid/unsupported AEAD parameters"); return ret; } @@ -727,19 +731,36 @@ openssl_reset_session(struct openssl_session *sess) } /** Provide session for operation */ -static struct openssl_session * +static void * get_session(struct openssl_qp *qp, struct rte_crypto_op *op) { struct openssl_session *sess = NULL; + struct openssl_asym_session *asym_sess = NULL; if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { - /* get existing session */ - if (likely(op->sym->session != NULL)) - sess = (struct openssl_session *) - get_session_private_data( - op->sym->session, - cryptodev_driver_id); + if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { + /* get existing session */ + if (likely(op->sym->session != NULL)) + sess = (struct openssl_session *) + get_sym_session_private_data( + op->sym->session, + cryptodev_driver_id); + } else { + if (likely(op->asym->session != NULL)) + asym_sess = (struct openssl_asym_session *) + get_asym_session_private_data( + op->asym->session, + cryptodev_driver_id); + if (asym_sess == NULL) + op->status = + RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + return asym_sess; + } } else { + /* sessionless asymmetric not supported */ + if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) + return NULL; + /* provide internal session */ void *_sess = NULL; void *_sess_private_data = NULL; @@ -759,8 +780,8 @@ get_session(struct openssl_qp *qp, struct rte_crypto_op *op) sess = NULL; } op->sym->session = (struct rte_cryptodev_sym_session *)_sess; - set_session_private_data(op->sym->session, cryptodev_driver_id, - _sess_private_data); + set_sym_session_private_data(op->sym->session, + cryptodev_driver_id, _sess_private_data); } if (sess == NULL) @@ -884,7 +905,7 @@ process_openssl_cipher_encrypt(struct rte_mbuf *mbuf_src, uint8_t *dst, return 0; process_cipher_encrypt_err: - OPENSSL_LOG_ERR("Process openssl cipher encrypt failed"); + OPENSSL_LOG(ERR, "Process openssl cipher encrypt failed"); return -EINVAL; } @@ -908,7 +929,7 @@ process_openssl_cipher_bpi_encrypt(uint8_t *src, uint8_t *dst, return 0; process_cipher_encrypt_err: - OPENSSL_LOG_ERR("Process openssl cipher bpi encrypt failed"); + OPENSSL_LOG(ERR, "Process openssl cipher bpi encrypt failed"); return -EINVAL; } /** Process standard openssl cipher decryption */ @@ -932,7 +953,7 @@ process_openssl_cipher_decrypt(struct rte_mbuf *mbuf_src, uint8_t *dst, return 0; process_cipher_decrypt_err: - OPENSSL_LOG_ERR("Process openssl cipher decrypt failed"); + OPENSSL_LOG(ERR, "Process openssl cipher decrypt failed"); return -EINVAL; } @@ -989,7 +1010,7 @@ process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst, return 0; process_cipher_des3ctr_err: - OPENSSL_LOG_ERR("Process openssl cipher des 3 ede ctr failed"); + OPENSSL_LOG(ERR, "Process openssl cipher des 3 ede ctr failed"); return -EINVAL; } @@ -1027,7 +1048,7 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset, return 0; process_auth_encryption_gcm_err: - OPENSSL_LOG_ERR("Process openssl auth encryption gcm failed"); + OPENSSL_LOG(ERR, "Process openssl auth encryption gcm failed"); return -EINVAL; } @@ -1068,7 +1089,7 @@ process_openssl_auth_encryption_ccm(struct rte_mbuf *mbuf_src, int offset, return 0; process_auth_encryption_ccm_err: - OPENSSL_LOG_ERR("Process openssl auth encryption ccm failed"); + OPENSSL_LOG(ERR, "Process openssl auth encryption ccm failed"); return -EINVAL; } @@ -1106,7 +1127,7 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset, return 0; process_auth_decryption_gcm_err: - OPENSSL_LOG_ERR("Process openssl auth decryption gcm failed"); + OPENSSL_LOG(ERR, "Process openssl auth decryption gcm failed"); return -EINVAL; } @@ -1145,7 +1166,7 @@ process_openssl_auth_decryption_ccm(struct rte_mbuf *mbuf_src, int offset, return 0; process_auth_decryption_ccm_err: - OPENSSL_LOG_ERR("Process openssl auth decryption ccm failed"); + OPENSSL_LOG(ERR, "Process openssl auth decryption ccm failed"); return -EINVAL; } @@ -1198,7 +1219,7 @@ process_auth_final: return 0; process_auth_err: - OPENSSL_LOG_ERR("Process openssl auth failed"); + OPENSSL_LOG(ERR, "Process openssl auth failed"); return -EINVAL; } @@ -1251,7 +1272,7 @@ process_auth_final: return 0; process_auth_err: - OPENSSL_LOG_ERR("Process openssl auth failed"); + OPENSSL_LOG(ERR, "Process openssl auth failed"); return -EINVAL; } @@ -1525,6 +1546,433 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op, op->status = RTE_CRYPTO_OP_STATUS_ERROR; } +/* process dsa sign operation */ +static int +process_openssl_dsa_sign_op(struct rte_crypto_op *cop, + struct openssl_asym_session *sess) +{ + struct rte_crypto_dsa_op_param *op = &cop->asym->dsa; + DSA *dsa = sess->u.s.dsa; + DSA_SIG *sign = NULL; + + sign = DSA_do_sign(op->message.data, + op->message.length, + dsa); + + if (sign == NULL) { + OPENSSL_LOG(ERR, "%s:%d\n", __func__, __LINE__); + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + } else { + const BIGNUM *r = NULL, *s = NULL; + get_dsa_sign(sign, r, s); + + op->r.length = BN_bn2bin(r, op->r.data); + op->s.length = BN_bn2bin(s, op->s.data); + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + } + + DSA_SIG_free(sign); + + return 0; +} + +/* process dsa verify operation */ +static int +process_openssl_dsa_verify_op(struct rte_crypto_op *cop, + struct openssl_asym_session *sess) +{ + struct rte_crypto_dsa_op_param *op = &cop->asym->dsa; + DSA *dsa = sess->u.s.dsa; + int ret; + DSA_SIG *sign = DSA_SIG_new(); + BIGNUM *r = NULL, *s = NULL; + BIGNUM *pub_key = NULL; + + if (sign == NULL) { + OPENSSL_LOG(ERR, " %s:%d\n", __func__, __LINE__); + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return -1; + } + + r = BN_bin2bn(op->r.data, + op->r.length, + r); + s = BN_bin2bn(op->s.data, + op->s.length, + s); + pub_key = BN_bin2bn(op->y.data, + op->y.length, + pub_key); + if (!r || !s || !pub_key) { + if (r) + BN_free(r); + if (s) + BN_free(s); + if (pub_key) + BN_free(pub_key); + + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return -1; + } + set_dsa_sign(sign, r, s); + set_dsa_pub_key(dsa, pub_key); + + ret = DSA_do_verify(op->message.data, + op->message.length, + sign, + dsa); + + if (ret != 1) + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + else + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + + DSA_SIG_free(sign); + + return 0; +} + +/* process dh operation */ +static int +process_openssl_dh_op(struct rte_crypto_op *cop, + struct openssl_asym_session *sess) +{ + struct rte_crypto_dh_op_param *op = &cop->asym->dh; + DH *dh_key = sess->u.dh.dh_key; + BIGNUM *priv_key = NULL; + int ret = 0; + + if (sess->u.dh.key_op & + (1 << RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE)) { + /* compute shared secret using peer public key + * and current private key + * shared secret = peer_key ^ priv_key mod p + */ + BIGNUM *peer_key = NULL; + + /* copy private key and peer key and compute shared secret */ + peer_key = BN_bin2bn(op->pub_key.data, + op->pub_key.length, + peer_key); + if (peer_key == NULL) { + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return -1; + } + priv_key = BN_bin2bn(op->priv_key.data, + op->priv_key.length, + priv_key); + if (priv_key == NULL) { + BN_free(peer_key); + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return -1; + } + set_dh_priv_key(dh_key, priv_key, ret); + if (ret) { + OPENSSL_LOG(ERR, "Failed to set private key\n"); + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + BN_free(peer_key); + BN_free(priv_key); + return 0; + } + + ret = DH_compute_key( + op->shared_secret.data, + peer_key, dh_key); + if (ret < 0) { + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + BN_free(peer_key); + /* priv key is already loaded into dh, + * let's not free that directly here. + * DH_free() will auto free it later. + */ + return 0; + } + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + op->shared_secret.length = ret; + BN_free(peer_key); + return 0; + } + + /* + * other options are public and private key generations. + * + * if user provides private key, + * then first set DH with user provided private key + */ + if ((sess->u.dh.key_op & + (1 << RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE)) && + !(sess->u.dh.key_op & + (1 << RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE))) { + /* generate public key using user-provided private key + * pub_key = g ^ priv_key mod p + */ + + /* load private key into DH */ + priv_key = BN_bin2bn(op->priv_key.data, + op->priv_key.length, + priv_key); + if (priv_key == NULL) { + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return -1; + } + set_dh_priv_key(dh_key, priv_key, ret); + if (ret) { + OPENSSL_LOG(ERR, "Failed to set private key\n"); + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + BN_free(priv_key); + return 0; + } + } + + /* generate public and private key pair. + * + * if private key already set, generates only public key. + * + * if private key is not already set, then set it to random value + * and update internal private key. + */ + if (!DH_generate_key(dh_key)) { + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + return 0; + } + + if (sess->u.dh.key_op & (1 << RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE)) { + const BIGNUM *pub_key = NULL; + + OPENSSL_LOG(DEBUG, "%s:%d update public key\n", + __func__, __LINE__); + + /* get the generated keys */ + get_dh_pub_key(dh_key, pub_key); + + /* output public key */ + op->pub_key.length = BN_bn2bin(pub_key, + op->pub_key.data); + } + + if (sess->u.dh.key_op & + (1 << RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE)) { + const BIGNUM *priv_key = NULL; + + OPENSSL_LOG(DEBUG, "%s:%d updated priv key\n", + __func__, __LINE__); + + /* get the generated keys */ + get_dh_priv_key(dh_key, priv_key); + + /* provide generated private key back to user */ + op->priv_key.length = BN_bn2bin(priv_key, + op->priv_key.data); + } + + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + + return 0; +} + +/* process modinv operation */ +static int +process_openssl_modinv_op(struct rte_crypto_op *cop, + struct openssl_asym_session *sess) +{ + struct rte_crypto_asym_op *op = cop->asym; + BIGNUM *base = BN_CTX_get(sess->u.m.ctx); + BIGNUM *res = BN_CTX_get(sess->u.m.ctx); + + if (unlikely(base == NULL || res == NULL)) { + if (base) + BN_free(base); + if (res) + BN_free(res); + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return -1; + } + + base = BN_bin2bn((const unsigned char *)op->modinv.base.data, + op->modinv.base.length, base); + + if (BN_mod_inverse(res, base, sess->u.m.modulus, sess->u.m.ctx)) { + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + op->modinv.base.length = BN_bn2bin(res, op->modinv.base.data); + } else { + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + } + + return 0; +} + +/* process modexp operation */ +static int +process_openssl_modexp_op(struct rte_crypto_op *cop, + struct openssl_asym_session *sess) +{ + struct rte_crypto_asym_op *op = cop->asym; + BIGNUM *base = BN_CTX_get(sess->u.e.ctx); + BIGNUM *res = BN_CTX_get(sess->u.e.ctx); + + if (unlikely(base == NULL || res == NULL)) { + if (base) + BN_free(base); + if (res) + BN_free(res); + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return -1; + } + + base = BN_bin2bn((const unsigned char *)op->modinv.base.data, + op->modinv.base.length, base); + + if (BN_mod_exp(res, base, sess->u.e.exp, + sess->u.e.mod, sess->u.e.ctx)) { + op->modinv.base.length = BN_bn2bin(res, op->modinv.base.data); + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + } else { + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + } + + return 0; +} + +/* process rsa operations */ +static int +process_openssl_rsa_op(struct rte_crypto_op *cop, + struct openssl_asym_session *sess) +{ + int ret = 0; + struct rte_crypto_asym_op *op = cop->asym; + RSA *rsa = sess->u.r.rsa; + uint32_t pad = (op->rsa.pad); + + switch (pad) { + case RTE_CRYPTO_RSA_PKCS1_V1_5_BT0: + case RTE_CRYPTO_RSA_PKCS1_V1_5_BT1: + case RTE_CRYPTO_RSA_PKCS1_V1_5_BT2: + pad = RSA_PKCS1_PADDING; + break; + case RTE_CRYPTO_RSA_PADDING_NONE: + pad = RSA_NO_PADDING; + break; + default: + cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + OPENSSL_LOG(ERR, + "rsa pad type not supported %d\n", pad); + return 0; + } + + switch (op->rsa.op_type) { + case RTE_CRYPTO_ASYM_OP_ENCRYPT: + ret = RSA_public_encrypt(op->rsa.message.length, + op->rsa.message.data, + op->rsa.message.data, + rsa, + pad); + + if (ret > 0) + op->rsa.message.length = ret; + OPENSSL_LOG(DEBUG, + "length of encrypted text %d\n", ret); + break; + + case RTE_CRYPTO_ASYM_OP_DECRYPT: + ret = RSA_private_decrypt(op->rsa.message.length, + op->rsa.message.data, + op->rsa.message.data, + rsa, + pad); + if (ret > 0) + op->rsa.message.length = ret; + break; + + case RTE_CRYPTO_ASYM_OP_SIGN: + ret = RSA_private_encrypt(op->rsa.message.length, + op->rsa.message.data, + op->rsa.sign.data, + rsa, + pad); + if (ret > 0) + op->rsa.sign.length = ret; + break; + + case RTE_CRYPTO_ASYM_OP_VERIFY: + ret = RSA_public_decrypt(op->rsa.sign.length, + op->rsa.sign.data, + op->rsa.sign.data, + rsa, + pad); + + OPENSSL_LOG(DEBUG, + "Length of public_decrypt %d " + "length of message %zd\n", + ret, op->rsa.message.length); + + if (memcmp(op->rsa.sign.data, op->rsa.message.data, + op->rsa.message.length)) { + OPENSSL_LOG(ERR, + "RSA sign Verification failed"); + return -1; + } + break; + + default: + /* allow ops with invalid args to be pushed to + * completion queue + */ + cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + break; + } + + if (ret < 0) + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + + return 0; +} + +static int +process_asym_op(struct openssl_qp *qp, struct rte_crypto_op *op, + struct openssl_asym_session *sess) +{ + int retval = 0; + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + switch (sess->xfrm_type) { + case RTE_CRYPTO_ASYM_XFORM_RSA: + retval = process_openssl_rsa_op(op, sess); + break; + case RTE_CRYPTO_ASYM_XFORM_MODEX: + retval = process_openssl_modexp_op(op, sess); + break; + case RTE_CRYPTO_ASYM_XFORM_MODINV: + retval = process_openssl_modinv_op(op, sess); + break; + case RTE_CRYPTO_ASYM_XFORM_DH: + retval = process_openssl_dh_op(op, sess); + break; + case RTE_CRYPTO_ASYM_XFORM_DSA: + if (op->asym->dsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) + retval = process_openssl_dsa_sign_op(op, sess); + else if (op->asym->dsa.op_type == + RTE_CRYPTO_ASYM_OP_VERIFY) + retval = + process_openssl_dsa_verify_op(op, sess); + else + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + break; + default: + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + break; + } + if (!retval) { + /* op processed so push to completion queue as processed */ + retval = rte_ring_enqueue(qp->processed_ops, (void *)op); + if (retval) + /* return error if failed to put in completion queue */ + retval = -1; + } + + return retval; +} + /** Process crypto operation for mbuf */ static int process_op(struct openssl_qp *qp, struct rte_crypto_op *op, @@ -1569,7 +2017,7 @@ process_op(struct openssl_qp *qp, struct rte_crypto_op *op, openssl_reset_session(sess); memset(sess, 0, sizeof(struct openssl_session)); memset(op->sym->session, 0, - rte_cryptodev_get_header_session_size()); + rte_cryptodev_sym_get_header_session_size()); rte_mempool_put(qp->sess_mp, sess); rte_mempool_put(qp->sess_mp, op->sym->session); op->sym->session = NULL; @@ -1597,7 +2045,7 @@ static uint16_t openssl_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, uint16_t nb_ops) { - struct openssl_session *sess; + void *sess; struct openssl_qp *qp = queue_pair; int i, retval; @@ -1606,7 +2054,12 @@ openssl_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, if (unlikely(sess == NULL)) goto enqueue_err; - retval = process_op(qp, ops[i], sess); + if (ops[i]->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) + retval = process_op(qp, ops[i], + (struct openssl_session *) sess); + else + retval = process_asym_op(qp, ops[i], + (struct openssl_asym_session *) sess); if (unlikely(retval < 0)) goto enqueue_err; } @@ -1646,7 +2099,7 @@ cryptodev_openssl_create(const char *name, dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); if (dev == NULL) { - OPENSSL_LOG_ERR("failed to create cryptodev vdev"); + OPENSSL_LOG(ERR, "failed to create cryptodev vdev"); goto init_error; } @@ -1660,18 +2113,19 @@ cryptodev_openssl_create(const char *name, dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | RTE_CRYPTODEV_FF_CPU_AESNI | - RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; + RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | + RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | + RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO; /* Set vector instructions mode supported */ internals = dev->data->dev_private; internals->max_nb_qpairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; return 0; init_error: - OPENSSL_LOG_ERR("driver %s: cryptodev_openssl_create failed", + OPENSSL_LOG(ERR, "driver %s: create failed", init_params->name); cryptodev_openssl_remove(vdev); @@ -1686,8 +2140,7 @@ cryptodev_openssl_probe(struct rte_vdev_device *vdev) "", sizeof(struct openssl_private), rte_socket_id(), - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }; const char *name; const char *input_args; @@ -1731,7 +2184,11 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_OPENSSL_PMD, cryptodev_openssl_pmd_drv); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_OPENSSL_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id="); -RTE_PMD_REGISTER_CRYPTO_DRIVER(openssl_crypto_drv, cryptodev_openssl_pmd_drv, - cryptodev_driver_id); +RTE_PMD_REGISTER_CRYPTO_DRIVER(openssl_crypto_drv, + cryptodev_openssl_pmd_drv.driver, cryptodev_driver_id); + +RTE_INIT(openssl_init_log) +{ + openssl_logtype_driver = rte_log_register("pmd.crypto.openssl"); +} diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c index 1cb87d59..de228439 100644 --- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c +++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c @@ -9,6 +9,7 @@ #include #include "rte_openssl_pmd_private.h" +#include "compat.h" static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = { @@ -397,7 +398,7 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = { .algo = RTE_CRYPTO_CIPHER_3DES_CBC, .block_size = 8, .key_size = { - .min = 16, + .min = 8, .max = 24, .increment = 8 }, @@ -469,6 +470,105 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = { }, } }, } }, + { /* RSA */ + .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC, + {.asym = { + .xform_capa = { + .xform_type = RTE_CRYPTO_ASYM_XFORM_RSA, + .op_types = ((1 << RTE_CRYPTO_ASYM_OP_SIGN) | + (1 << RTE_CRYPTO_ASYM_OP_VERIFY) | + (1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) | + (1 << RTE_CRYPTO_ASYM_OP_DECRYPT)), + { + .modlen = { + /* min length is based on openssl rsa keygen */ + .min = 30, + /* value 0 symbolizes no limit on max length */ + .max = 0, + .increment = 1 + }, } + } + }, + } + }, + { /* modexp */ + .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC, + {.asym = { + .xform_capa = { + .xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX, + .op_types = 0, + { + .modlen = { + /* value 0 symbolizes no limit on min length */ + .min = 0, + /* value 0 symbolizes no limit on max length */ + .max = 0, + .increment = 1 + }, } + } + }, + } + }, + { /* modinv */ + .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC, + {.asym = { + .xform_capa = { + .xform_type = RTE_CRYPTO_ASYM_XFORM_MODINV, + .op_types = 0, + { + .modlen = { + /* value 0 symbolizes no limit on min length */ + .min = 0, + /* value 0 symbolizes no limit on max length */ + .max = 0, + .increment = 1 + }, } + } + }, + } + }, + { /* dh */ + .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC, + {.asym = { + .xform_capa = { + .xform_type = RTE_CRYPTO_ASYM_XFORM_DH, + .op_types = + ((1<feature_flags = dev->feature_flags; dev_info->capabilities = openssl_pmd_capabilities; dev_info->max_nb_queue_pairs = internals->max_nb_qpairs; - dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; } } @@ -588,14 +689,14 @@ openssl_pmd_qp_create_processed_ops_ring(struct openssl_qp *qp, r = rte_ring_lookup(qp->name); if (r) { if (rte_ring_get_size(r) >= ring_size) { - OPENSSL_LOG_INFO( - "Reusing existing ring %s for processed ops", + OPENSSL_LOG(INFO, + "Reusing existing ring %s for processed ops", qp->name); return r; } - OPENSSL_LOG_ERR( - "Unable to reuse existing ring %s for processed ops", + OPENSSL_LOG(ERR, + "Unable to reuse existing ring %s for processed ops", qp->name); return NULL; } @@ -647,22 +748,6 @@ qp_setup_cleanup: return -1; } -/** Start queue pair */ -static int -openssl_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair */ -static int -openssl_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - /** Return the number of allocated queue pairs */ static uint32_t openssl_pmd_qp_count(struct rte_cryptodev *dev) @@ -670,16 +755,23 @@ openssl_pmd_qp_count(struct rte_cryptodev *dev) return dev->data->nb_queue_pairs; } -/** Returns the size of the session structure */ +/** Returns the size of the symmetric session structure */ static unsigned -openssl_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +openssl_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { return sizeof(struct openssl_session); } +/** Returns the size of the asymmetric session structure */ +static unsigned +openssl_pmd_asym_session_get_size(struct rte_cryptodev *dev __rte_unused) +{ + return sizeof(struct openssl_asym_session); +} + /** Configure the session from a crypto xform chain */ static int -openssl_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, +openssl_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -688,46 +780,460 @@ openssl_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, int ret; if (unlikely(sess == NULL)) { - OPENSSL_LOG_ERR("invalid session struct"); + OPENSSL_LOG(ERR, "invalid session struct"); return -EINVAL; } if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( + OPENSSL_LOG(ERR, "Couldn't get object from session mempool"); return -ENOMEM; } ret = openssl_set_session_parameters(sess_private_data, xform); if (ret != 0) { - OPENSSL_LOG_ERR("failed configure session parameters"); + OPENSSL_LOG(ERR, "failed configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); return 0; } +static int openssl_set_asym_session_parameters( + struct openssl_asym_session *asym_session, + struct rte_crypto_asym_xform *xform) +{ + int ret = 0; + + if ((xform->xform_type != RTE_CRYPTO_ASYM_XFORM_DH) && + (xform->next != NULL)) { + OPENSSL_LOG(ERR, "chained xfrms are not supported on %s", + rte_crypto_asym_xform_strings[xform->xform_type]); + return -1; + } + + switch (xform->xform_type) { + case RTE_CRYPTO_ASYM_XFORM_RSA: + { + BIGNUM *n = NULL; + BIGNUM *e = NULL; + BIGNUM *d = NULL; + BIGNUM *p = NULL, *q = NULL, *dmp1 = NULL; + BIGNUM *iqmp = NULL, *dmq1 = NULL; + + /* copy xfrm data into rsa struct */ + n = BN_bin2bn((const unsigned char *)xform->rsa.n.data, + xform->rsa.n.length, n); + e = BN_bin2bn((const unsigned char *)xform->rsa.e.data, + xform->rsa.e.length, e); + + if (!n || !e) + goto err_rsa; + + RSA *rsa = RSA_new(); + if (rsa == NULL) + goto err_rsa; + + if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_EXP) { + d = BN_bin2bn( + (const unsigned char *)xform->rsa.d.data, + xform->rsa.d.length, + d); + if (!d) { + RSA_free(rsa); + goto err_rsa; + } + } else { + p = BN_bin2bn((const unsigned char *) + xform->rsa.qt.p.data, + xform->rsa.qt.p.length, + p); + q = BN_bin2bn((const unsigned char *) + xform->rsa.qt.q.data, + xform->rsa.qt.q.length, + q); + dmp1 = BN_bin2bn((const unsigned char *) + xform->rsa.qt.dP.data, + xform->rsa.qt.dP.length, + dmp1); + dmq1 = BN_bin2bn((const unsigned char *) + xform->rsa.qt.dQ.data, + xform->rsa.qt.dQ.length, + dmq1); + iqmp = BN_bin2bn((const unsigned char *) + xform->rsa.qt.qInv.data, + xform->rsa.qt.qInv.length, + iqmp); + + if (!p || !q || !dmp1 || !dmq1 || !iqmp) { + RSA_free(rsa); + goto err_rsa; + } + set_rsa_params(rsa, p, q, ret); + if (ret) { + OPENSSL_LOG(ERR, + "failed to set rsa params\n"); + RSA_free(rsa); + goto err_rsa; + } + set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret); + if (ret) { + OPENSSL_LOG(ERR, + "failed to set crt params\n"); + RSA_free(rsa); + /* + * set already populated params to NULL + * as its freed by call to RSA_free + */ + p = q = NULL; + goto err_rsa; + } + } + + set_rsa_keys(rsa, n, e, d, ret); + if (ret) { + OPENSSL_LOG(ERR, "Failed to load rsa keys\n"); + RSA_free(rsa); + return -1; + } + asym_session->u.r.rsa = rsa; + asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_RSA; + break; +err_rsa: + if (n) + BN_free(n); + if (e) + BN_free(e); + if (d) + BN_free(d); + if (p) + BN_free(p); + if (q) + BN_free(q); + if (dmp1) + BN_free(dmp1); + if (dmq1) + BN_free(dmq1); + if (iqmp) + BN_free(iqmp); + + return -1; + } + case RTE_CRYPTO_ASYM_XFORM_MODEX: + { + struct rte_crypto_modex_xform *xfrm = &(xform->modex); + + BN_CTX *ctx = BN_CTX_new(); + if (ctx == NULL) { + OPENSSL_LOG(ERR, + " failed to allocate resources\n"); + return -1; + } + BN_CTX_start(ctx); + BIGNUM *mod = BN_CTX_get(ctx); + BIGNUM *exp = BN_CTX_get(ctx); + if (mod == NULL || exp == NULL) { + BN_CTX_end(ctx); + BN_CTX_free(ctx); + return -1; + } + + mod = BN_bin2bn((const unsigned char *) + xfrm->modulus.data, + xfrm->modulus.length, mod); + exp = BN_bin2bn((const unsigned char *) + xfrm->exponent.data, + xfrm->exponent.length, exp); + asym_session->u.e.ctx = ctx; + asym_session->u.e.mod = mod; + asym_session->u.e.exp = exp; + asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_MODEX; + break; + } + case RTE_CRYPTO_ASYM_XFORM_MODINV: + { + struct rte_crypto_modinv_xform *xfrm = &(xform->modinv); + + BN_CTX *ctx = BN_CTX_new(); + if (ctx == NULL) { + OPENSSL_LOG(ERR, + " failed to allocate resources\n"); + return -1; + } + BN_CTX_start(ctx); + BIGNUM *mod = BN_CTX_get(ctx); + if (mod == NULL) { + BN_CTX_end(ctx); + BN_CTX_free(ctx); + return -1; + } + + mod = BN_bin2bn((const unsigned char *) + xfrm->modulus.data, + xfrm->modulus.length, + mod); + asym_session->u.m.ctx = ctx; + asym_session->u.m.modulus = mod; + asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_MODINV; + break; + } + case RTE_CRYPTO_ASYM_XFORM_DH: + { + BIGNUM *p = NULL; + BIGNUM *g = NULL; + + p = BN_bin2bn((const unsigned char *) + xform->dh.p.data, + xform->dh.p.length, + p); + g = BN_bin2bn((const unsigned char *) + xform->dh.g.data, + xform->dh.g.length, + g); + if (!p || !g) + goto err_dh; + + DH *dh = DH_new(); + if (dh == NULL) { + OPENSSL_LOG(ERR, + "failed to allocate resources\n"); + goto err_dh; + } + set_dh_params(dh, p, g, ret); + if (ret) { + DH_free(dh); + goto err_dh; + } + + /* + * setup xfrom for + * public key generate, or + * DH Priv key generate, or both + * public and private key generate + */ + asym_session->u.dh.key_op = (1 << xform->dh.type); + + if (xform->dh.type == + RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE) { + /* check if next is pubkey */ + if ((xform->next != NULL) && + (xform->next->xform_type == + RTE_CRYPTO_ASYM_XFORM_DH) && + (xform->next->dh.type == + RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE) + ) { + /* + * setup op as pub/priv key + * pair generationi + */ + asym_session->u.dh.key_op |= + (1 << + RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE); + } + } + asym_session->u.dh.dh_key = dh; + asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_DH; + break; + +err_dh: + OPENSSL_LOG(ERR, " failed to set dh params\n"); + if (p) + BN_free(p); + if (g) + BN_free(g); + return -1; + } + case RTE_CRYPTO_ASYM_XFORM_DSA: + { + BIGNUM *p = NULL, *g = NULL; + BIGNUM *q = NULL, *priv_key = NULL; + BIGNUM *pub_key = BN_new(); + BN_zero(pub_key); + + p = BN_bin2bn((const unsigned char *) + xform->dsa.p.data, + xform->dsa.p.length, + p); + + g = BN_bin2bn((const unsigned char *) + xform->dsa.g.data, + xform->dsa.g.length, + g); + + q = BN_bin2bn((const unsigned char *) + xform->dsa.q.data, + xform->dsa.q.length, + q); + if (!p || !q || !g) + goto err_dsa; + + priv_key = BN_bin2bn((const unsigned char *) + xform->dsa.x.data, + xform->dsa.x.length, + priv_key); + if (priv_key == NULL) + goto err_dsa; + + DSA *dsa = DSA_new(); + if (dsa == NULL) { + OPENSSL_LOG(ERR, + " failed to allocate resources\n"); + goto err_dsa; + } + + set_dsa_params(dsa, p, q, g, ret); + if (ret) { + DSA_free(dsa); + OPENSSL_LOG(ERR, "Failed to dsa params\n"); + goto err_dsa; + } + + /* + * openssl 1.1.0 mandate that public key can't be + * NULL in very first call. so set a dummy pub key. + * to keep consistency, lets follow same approach for + * both versions + */ + /* just set dummy public for very 1st call */ + set_dsa_keys(dsa, pub_key, priv_key, ret); + if (ret) { + DSA_free(dsa); + OPENSSL_LOG(ERR, "Failed to set keys\n"); + return -1; + } + asym_session->u.s.dsa = dsa; + asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_DSA; + break; + +err_dsa: + if (p) + BN_free(p); + if (q) + BN_free(q); + if (g) + BN_free(g); + if (priv_key) + BN_free(priv_key); + if (pub_key) + BN_free(pub_key); + return -1; + } + default: + return -1; + } + + return 0; +} + +/** Configure the session from a crypto xform chain */ +static int +openssl_pmd_asym_session_configure(struct rte_cryptodev *dev __rte_unused, + struct rte_crypto_asym_xform *xform, + struct rte_cryptodev_asym_session *sess, + struct rte_mempool *mempool) +{ + void *asym_sess_private_data; + int ret; + + if (unlikely(sess == NULL)) { + OPENSSL_LOG(ERR, "invalid asymmetric session struct"); + return -EINVAL; + } + + if (rte_mempool_get(mempool, &asym_sess_private_data)) { + CDEV_LOG_ERR( + "Couldn't get object from session mempool"); + return -ENOMEM; + } + + ret = openssl_set_asym_session_parameters(asym_sess_private_data, + xform); + if (ret != 0) { + OPENSSL_LOG(ERR, "failed configure session parameters"); + + /* Return session to mempool */ + rte_mempool_put(mempool, asym_sess_private_data); + return ret; + } + + set_asym_session_private_data(sess, dev->driver_id, + asym_sess_private_data); + + return 0; +} /** Clear the memory of session so it doesn't leave key material behind */ static void -openssl_pmd_session_clear(struct rte_cryptodev *dev, +openssl_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); /* Zero out the whole structure */ if (sess_priv) { openssl_reset_session(sess_priv); memset(sess_priv, 0, sizeof(struct openssl_session)); struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); + rte_mempool_put(sess_mp, sess_priv); + } +} + +static void openssl_reset_asym_session(struct openssl_asym_session *sess) +{ + switch (sess->xfrm_type) { + case RTE_CRYPTO_ASYM_XFORM_RSA: + if (sess->u.r.rsa) + RSA_free(sess->u.r.rsa); + break; + case RTE_CRYPTO_ASYM_XFORM_MODEX: + if (sess->u.e.ctx) { + BN_CTX_end(sess->u.e.ctx); + BN_CTX_free(sess->u.e.ctx); + } + break; + case RTE_CRYPTO_ASYM_XFORM_MODINV: + if (sess->u.m.ctx) { + BN_CTX_end(sess->u.m.ctx); + BN_CTX_free(sess->u.m.ctx); + } + break; + case RTE_CRYPTO_ASYM_XFORM_DH: + if (sess->u.dh.dh_key) + DH_free(sess->u.dh.dh_key); + break; + case RTE_CRYPTO_ASYM_XFORM_DSA: + if (sess->u.s.dsa) + DSA_free(sess->u.s.dsa); + break; + default: + break; + } +} + +/** Clear the memory of asymmetric session + * so it doesn't leave key material behind + */ +static void +openssl_pmd_asym_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_asym_session *sess) +{ + uint8_t index = dev->driver_id; + void *sess_priv = get_asym_session_private_data(sess, index); + + /* Zero out the whole structure */ + if (sess_priv) { + openssl_reset_asym_session(sess_priv); + memset(sess_priv, 0, sizeof(struct openssl_asym_session)); + struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); + set_asym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -745,13 +1251,14 @@ struct rte_cryptodev_ops openssl_pmd_ops = { .queue_pair_setup = openssl_pmd_qp_setup, .queue_pair_release = openssl_pmd_qp_release, - .queue_pair_start = openssl_pmd_qp_start, - .queue_pair_stop = openssl_pmd_qp_stop, .queue_pair_count = openssl_pmd_qp_count, - .session_get_size = openssl_pmd_session_get_size, - .session_configure = openssl_pmd_session_configure, - .session_clear = openssl_pmd_session_clear + .sym_session_get_size = openssl_pmd_sym_session_get_size, + .asym_session_get_size = openssl_pmd_asym_session_get_size, + .sym_session_configure = openssl_pmd_sym_session_configure, + .asym_session_configure = openssl_pmd_asym_session_configure, + .sym_session_clear = openssl_pmd_sym_session_clear, + .asym_session_clear = openssl_pmd_asym_session_clear }; struct rte_cryptodev_ops *rte_openssl_pmd_ops = &openssl_pmd_ops; diff --git a/drivers/crypto/openssl/rte_openssl_pmd_private.h b/drivers/crypto/openssl/rte_openssl_pmd_private.h index bc8dc7cd..a8f2c848 100644 --- a/drivers/crypto/openssl/rte_openssl_pmd_private.h +++ b/drivers/crypto/openssl/rte_openssl_pmd_private.h @@ -8,29 +8,19 @@ #include #include #include +#include +#include +#include #define CRYPTODEV_NAME_OPENSSL_PMD crypto_openssl /**< Open SSL Crypto PMD device name */ -#define OPENSSL_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \ - __func__, __LINE__, ## args) - -#ifdef RTE_LIBRTE_OPENSSL_DEBUG -#define OPENSSL_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \ - __func__, __LINE__, ## args) - -#define OPENSSL_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \ - __func__, __LINE__, ## args) -#else -#define OPENSSL_LOG_INFO(fmt, args...) -#define OPENSSL_LOG_DBG(fmt, args...) -#endif +/** OPENSSL PMD LOGTYPE DRIVER */ +int openssl_logtype_driver; +#define OPENSSL_LOG(level, fmt, ...) \ + rte_log(RTE_LOG_ ## level, openssl_logtype_driver, \ + "%s() line %u: " fmt "\n", __func__, __LINE__, \ + ## __VA_ARGS__) /* Maximum length for digest (SHA-512 needs 64 bytes) */ #define DIGEST_LENGTH_MAX 64 @@ -62,8 +52,6 @@ enum openssl_auth_mode { struct openssl_private { unsigned int max_nb_qpairs; /**< Max number of queue pairs */ - unsigned int max_nb_sessions; - /**< Max number of sessions */ }; /** OPENSSL crypto queue pair */ @@ -157,6 +145,31 @@ struct openssl_session { } __rte_cache_aligned; +/** OPENSSL crypto private asymmetric session structure */ +struct openssl_asym_session { + enum rte_crypto_asym_xform_type xfrm_type; + union { + struct rsa { + RSA *rsa; + } r; + struct exp { + BIGNUM *exp; + BIGNUM *mod; + BN_CTX *ctx; + } e; + struct mod { + BIGNUM *modulus; + BN_CTX *ctx; + } m; + struct dh { + DH *dh_key; + uint32_t key_op; + } dh; + struct { + DSA *dsa; + } s; + } u; +} __rte_cache_aligned; /** Set and validate OPENSSL crypto session parameters */ extern int openssl_set_session_parameters(struct openssl_session *sess, diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile deleted file mode 100644 index 260912dc..00000000 --- a/drivers/crypto/qat/Makefile +++ /dev/null @@ -1,35 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2015 Intel Corporation - -include $(RTE_SDK)/mk/rte.vars.mk - -# library name -LIB = librte_pmd_qat.a - -# library version -LIBABIVER := 1 - -# build flags -CFLAGS += $(WERROR_FLAGS) -CFLAGS += -O3 - -# external library include paths -CFLAGS += -I$(SRCDIR)/qat_adf -LDLIBS += -lcrypto -LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -LDLIBS += -lrte_cryptodev -LDLIBS += -lrte_pci -lrte_bus_pci - -# library source files -SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_crypto.c -SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_qp.c -SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_adf/qat_algs_build_desc.c -SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += rte_qat_cryptodev.c - -# export include files -SYMLINK-y-include += - -# versioning export map -EXPORT_MAP := rte_pmd_qat_version.map - -include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/qat/README b/drivers/crypto/qat/README new file mode 100644 index 00000000..444ae605 --- /dev/null +++ b/drivers/crypto/qat/README @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2015-2018 Intel Corporation + +Makefile for crypto QAT PMD is in common/qat directory. +The build for the QAT driver is done from there as only one library is built for the +whole QAT pci device and that library includes all the services (crypto, compression) +which are enabled on the device. diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build index 7b904630..9cc98d2c 100644 --- a/drivers/crypto/qat/meson.build +++ b/drivers/crypto/qat/meson.build @@ -1,14 +1,18 @@ # SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2017 Intel Corporation +# Copyright(c) 2017-2018 Intel Corporation +# this does not build the QAT driver, instead that is done in the compression +# driver which comes later. Here we just add our sources files to the list +build = false dep = dependency('libcrypto', required: false) -if not dep.found() - build = false +qat_includes += include_directories('.') +qat_deps += 'cryptodev' +if dep.found() + # Add our sources files to the list + qat_sources += files('qat_sym_pmd.c', + 'qat_sym.c', + 'qat_sym_session.c') + qat_ext_deps += dep + pkgconfig_extra_libs += '-lcrypto' + qat_cflags += '-DBUILD_QAT_SYM' endif -sources = files('qat_crypto.c', 'qat_qp.c', - 'qat_adf/qat_algs_build_desc.c', - 'rte_qat_cryptodev.c') -includes += include_directories('qat_adf') -deps += ['bus_pci'] -ext_deps += dep -pkgconfig_extra_libs += '-lcrypto' diff --git a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h b/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h deleted file mode 100644 index 4f8f3d13..00000000 --- a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h +++ /dev/null @@ -1,176 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * Copyright(c) 2015 Intel Corporation. - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * qat-linux@intel.com - * - * BSD LICENSE - * Copyright(c) 2015 Intel Corporation. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef ADF_TRANSPORT_ACCESS_MACROS_H -#define ADF_TRANSPORT_ACCESS_MACROS_H - -#include - -/* CSR write macro */ -#define ADF_CSR_WR(csrAddr, csrOffset, val) \ - rte_write32(val, (((uint8_t *)csrAddr) + csrOffset)) - -/* CSR read macro */ -#define ADF_CSR_RD(csrAddr, csrOffset) \ - rte_read32((((uint8_t *)csrAddr) + csrOffset)) - -#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL -#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL -#define ADF_RING_CSR_RING_CONFIG 0x000 -#define ADF_RING_CSR_RING_LBASE 0x040 -#define ADF_RING_CSR_RING_UBASE 0x080 -#define ADF_RING_CSR_RING_HEAD 0x0C0 -#define ADF_RING_CSR_RING_TAIL 0x100 -#define ADF_RING_CSR_E_STAT 0x14C -#define ADF_RING_CSR_INT_SRCSEL 0x174 -#define ADF_RING_CSR_INT_SRCSEL_2 0x178 -#define ADF_RING_CSR_INT_COL_EN 0x17C -#define ADF_RING_CSR_INT_COL_CTL 0x180 -#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 -#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 -#define ADF_RING_BUNDLE_SIZE 0x1000 -#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A -#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05 -#define ADF_COALESCING_MIN_TIME 0x1FF -#define ADF_COALESCING_MAX_TIME 0xFFFFF -#define ADF_COALESCING_DEF_TIME 0x27FF -#define ADF_RING_NEAR_WATERMARK_512 0x08 -#define ADF_RING_NEAR_WATERMARK_0 0x00 -#define ADF_RING_EMPTY_SIG 0x7F7F7F7F -#define ADF_RING_EMPTY_SIG_BYTE 0x7F - -/* Valid internal ring size values */ -#define ADF_RING_SIZE_128 0x01 -#define ADF_RING_SIZE_256 0x02 -#define ADF_RING_SIZE_512 0x03 -#define ADF_RING_SIZE_4K 0x06 -#define ADF_RING_SIZE_16K 0x08 -#define ADF_RING_SIZE_4M 0x10 -#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128 -#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M -#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K - -#define ADF_NUM_BUNDLES_PER_DEV 1 -#define ADF_NUM_SYM_QPS_PER_BUNDLE 2 - -/* Valid internal msg size values */ -#define ADF_MSG_SIZE_32 0x01 -#define ADF_MSG_SIZE_64 0x02 -#define ADF_MSG_SIZE_128 0x04 -#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32 -#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128 - -/* Size to bytes conversion macros for ring and msg size values */ -#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5) -#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5) -#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7) -#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7) - -/* Minimum ring bufer size for memory allocation */ -#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \ - ADF_RING_SIZE_4K : SIZE) -#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6) -#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \ - SIZE) & ~0x4) -/* Max outstanding requests */ -#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \ - ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1) -#define BUILD_RING_CONFIG(size) \ - ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \ - | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ - | size) -#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \ - ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \ - | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ - | size) -#define BUILD_RING_BASE_ADDR(addr, size) \ - ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size)) -#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ - ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_RING_HEAD + (ring << 2)) -#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ - ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_RING_TAIL + (ring << 2)) -#define READ_CSR_E_STAT(csr_base_addr, bank) \ - ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_E_STAT) -#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_RING_CONFIG + (ring << 2), value) -#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ -do { \ - uint32_t l_base = 0, u_base = 0; \ - l_base = (uint32_t)(value & 0xFFFFFFFF); \ - u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \ -} while (0) -#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_RING_HEAD + (ring << 2), value) -#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_RING_TAIL + (ring << 2), value) -#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ -do { \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \ -} while (0) -#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_INT_COL_EN, value) -#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_INT_COL_CTL, \ - ADF_RING_CSR_INT_COL_CTL_ENABLE | value) -#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_INT_FLAG_AND_COL, value) -#endif diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw.h b/drivers/crypto/qat/qat_adf/icp_qat_fw.h deleted file mode 100644 index 5de34d55..00000000 --- a/drivers/crypto/qat/qat_adf/icp_qat_fw.h +++ /dev/null @@ -1,316 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * Copyright(c) 2015 Intel Corporation. - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * qat-linux@intel.com - * - * BSD LICENSE - * Copyright(c) 2015 Intel Corporation. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef _ICP_QAT_FW_H_ -#define _ICP_QAT_FW_H_ -#include -#include "icp_qat_hw.h" - -#define QAT_FIELD_SET(flags, val, bitpos, mask) \ -{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \ - (((val) & (mask)) << (bitpos))) ; } - -#define QAT_FIELD_GET(flags, bitpos, mask) \ - (((flags) >> (bitpos)) & (mask)) - -#define ICP_QAT_FW_REQ_DEFAULT_SZ 128 -#define ICP_QAT_FW_RESP_DEFAULT_SZ 32 -#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8 -#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF -#define ICP_QAT_FW_NUM_LONGWORDS_1 1 -#define ICP_QAT_FW_NUM_LONGWORDS_2 2 -#define ICP_QAT_FW_NUM_LONGWORDS_3 3 -#define ICP_QAT_FW_NUM_LONGWORDS_4 4 -#define ICP_QAT_FW_NUM_LONGWORDS_5 5 -#define ICP_QAT_FW_NUM_LONGWORDS_6 6 -#define ICP_QAT_FW_NUM_LONGWORDS_7 7 -#define ICP_QAT_FW_NUM_LONGWORDS_10 10 -#define ICP_QAT_FW_NUM_LONGWORDS_13 13 -#define ICP_QAT_FW_NULL_REQ_SERV_ID 1 - -enum icp_qat_fw_comn_resp_serv_id { - ICP_QAT_FW_COMN_RESP_SERV_NULL, - ICP_QAT_FW_COMN_RESP_SERV_CPM_FW, - ICP_QAT_FW_COMN_RESP_SERV_DELIMITER -}; - -enum icp_qat_fw_comn_request_id { - ICP_QAT_FW_COMN_REQ_NULL = 0, - ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3, - ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4, - ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7, - ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9, - ICP_QAT_FW_COMN_REQ_DELIMITER -}; - -struct icp_qat_fw_comn_req_hdr_cd_pars { - union { - struct { - uint64_t content_desc_addr; - uint16_t content_desc_resrvd1; - uint8_t content_desc_params_sz; - uint8_t content_desc_hdr_resrvd2; - uint32_t content_desc_resrvd3; - } s; - struct { - uint32_t serv_specif_fields[4]; - } s1; - } u; -}; - -struct icp_qat_fw_comn_req_mid { - uint64_t opaque_data; - uint64_t src_data_addr; - uint64_t dest_data_addr; - uint32_t src_length; - uint32_t dst_length; -}; - -struct icp_qat_fw_comn_req_cd_ctrl { - uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5]; -}; - -struct icp_qat_fw_comn_req_hdr { - uint8_t resrvd1; - uint8_t service_cmd_id; - uint8_t service_type; - uint8_t hdr_flags; - uint16_t serv_specif_flags; - uint16_t comn_req_flags; -}; - -struct icp_qat_fw_comn_req_rqpars { - uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13]; -}; - -struct icp_qat_fw_comn_req { - struct icp_qat_fw_comn_req_hdr comn_hdr; - struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; - struct icp_qat_fw_comn_req_mid comn_mid; - struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; - struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; -}; - -struct icp_qat_fw_comn_error { - uint8_t xlat_err_code; - uint8_t cmp_err_code; -}; - -struct icp_qat_fw_comn_resp_hdr { - uint8_t resrvd1; - uint8_t service_id; - uint8_t response_type; - uint8_t hdr_flags; - struct icp_qat_fw_comn_error comn_error; - uint8_t comn_status; - uint8_t cmd_id; -}; - -struct icp_qat_fw_comn_resp { - struct icp_qat_fw_comn_resp_hdr comn_hdr; - uint64_t opaque_data; - uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; -}; - -#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1 -#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0 -#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7 -#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1 -#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F - -#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \ - icp_qat_fw_comn_req_hdr_t.service_type - -#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \ - icp_qat_fw_comn_req_hdr_t.service_type = val - -#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \ - icp_qat_fw_comn_req_hdr_t.service_cmd_id - -#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \ - icp_qat_fw_comn_req_hdr_t.service_cmd_id = val - -#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \ - ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags) - -#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \ - ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) - -#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \ - QAT_FIELD_GET(hdr_flags, \ - ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ - ICP_QAT_FW_COMN_VALID_FLAG_MASK) - -#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \ - (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK) - -#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \ - QAT_FIELD_SET((hdr_t.hdr_flags), (val), \ - ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ - ICP_QAT_FW_COMN_VALID_FLAG_MASK) - -#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \ - (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \ - ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) - -#define QAT_COMN_PTR_TYPE_BITPOS 0 -#define QAT_COMN_PTR_TYPE_MASK 0x1 -#define QAT_COMN_CD_FLD_TYPE_BITPOS 1 -#define QAT_COMN_CD_FLD_TYPE_MASK 0x1 -#define QAT_COMN_PTR_TYPE_FLAT 0x0 -#define QAT_COMN_PTR_TYPE_SGL 0x1 -#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0 -#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1 - -#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \ - ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \ - | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS)) - -#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \ - QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK) - -#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \ - QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \ - QAT_COMN_CD_FLD_TYPE_MASK) - -#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \ - QAT_COMN_PTR_TYPE_MASK) - -#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \ - QAT_COMN_CD_FLD_TYPE_MASK) - -#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4 -#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0 -#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0 -#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F - -#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \ - ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ - >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) - -#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ - { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \ - & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ - ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ - & ICP_QAT_FW_COMN_NEXT_ID_MASK)); } - -#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \ - (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) - -#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \ - { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \ - & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ - ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); } - -#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7 -#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1 -#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5 -#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1 -#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4 -#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1 -#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3 -#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1 - -#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \ - ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \ - QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \ - (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \ - QAT_COMN_RESP_CMP_STATUS_BITPOS) | \ - (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \ - QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \ - (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \ - QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS)) - -#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \ - QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \ - QAT_COMN_RESP_CRYPTO_STATUS_MASK) - -#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \ - QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \ - QAT_COMN_RESP_CMP_STATUS_MASK) - -#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \ - QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \ - QAT_COMN_RESP_XLAT_STATUS_MASK) - -#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \ - QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \ - QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) - -#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0 -#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1 -#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0 -#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1 -#define ERR_CODE_NO_ERROR 0 -#define ERR_CODE_INVALID_BLOCK_TYPE -1 -#define ERR_CODE_NO_MATCH_ONES_COMP -2 -#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3 -#define ERR_CODE_INCOMPLETE_LEN -4 -#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5 -#define ERR_CODE_RPT_GT_SPEC_LEN -6 -#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7 -#define ERR_CODE_INV_DIS_CODE_LEN -8 -#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9 -#define ERR_CODE_DIS_TOO_FAR_BACK -10 -#define ERR_CODE_OVERFLOW_ERROR -11 -#define ERR_CODE_SOFT_ERROR -12 -#define ERR_CODE_FATAL_ERROR -13 -#define ERR_CODE_SSM_ERROR -14 -#define ERR_CODE_ENDPOINT_ERROR -15 - -enum icp_qat_fw_slice { - ICP_QAT_FW_SLICE_NULL = 0, - ICP_QAT_FW_SLICE_CIPHER = 1, - ICP_QAT_FW_SLICE_AUTH = 2, - ICP_QAT_FW_SLICE_DRAM_RD = 3, - ICP_QAT_FW_SLICE_DRAM_WR = 4, - ICP_QAT_FW_SLICE_COMP = 5, - ICP_QAT_FW_SLICE_XLAT = 6, - ICP_QAT_FW_SLICE_DELIMITER -}; -#endif diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h b/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h deleted file mode 100644 index fbf2b839..00000000 --- a/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h +++ /dev/null @@ -1,404 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * Copyright(c) 2015 Intel Corporation. - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * qat-linux@intel.com - * - * BSD LICENSE - * Copyright(c) 2015 Intel Corporation. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef _ICP_QAT_FW_LA_H_ -#define _ICP_QAT_FW_LA_H_ -#include "icp_qat_fw.h" - -enum icp_qat_fw_la_cmd_id { - ICP_QAT_FW_LA_CMD_CIPHER = 0, - ICP_QAT_FW_LA_CMD_AUTH = 1, - ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2, - ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3, - ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4, - ICP_QAT_FW_LA_CMD_TRNG_TEST = 5, - ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6, - ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7, - ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8, - ICP_QAT_FW_LA_CMD_MGF1 = 9, - ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10, - ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11, - ICP_QAT_FW_LA_CMD_DELIMITER = 12 -}; - -#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK -#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR -#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK -#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR - -struct icp_qat_fw_la_bulk_req { - struct icp_qat_fw_comn_req_hdr comn_hdr; - struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; - struct icp_qat_fw_comn_req_mid comn_mid; - struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; - struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; -}; - -#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1 -#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0 -#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12 -#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1 -#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1 -#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11 -#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1 -#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1 -#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0 -#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10 -#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1 -#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4 -#define ICP_QAT_FW_LA_GCM_PROTO 2 -#define ICP_QAT_FW_LA_CCM_PROTO 1 -#define ICP_QAT_FW_LA_NO_PROTO 0 -#define QAT_LA_PROTO_BITPOS 7 -#define QAT_LA_PROTO_MASK 0x7 -#define ICP_QAT_FW_LA_CMP_AUTH_RES 1 -#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0 -#define QAT_LA_CMP_AUTH_RES_BITPOS 6 -#define QAT_LA_CMP_AUTH_RES_MASK 0x1 -#define ICP_QAT_FW_LA_RET_AUTH_RES 1 -#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0 -#define QAT_LA_RET_AUTH_RES_BITPOS 5 -#define QAT_LA_RET_AUTH_RES_MASK 0x1 -#define ICP_QAT_FW_LA_UPDATE_STATE 1 -#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0 -#define QAT_LA_UPDATE_STATE_BITPOS 4 -#define QAT_LA_UPDATE_STATE_MASK 0x1 -#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0 -#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1 -#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3 -#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1 -#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0 -#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1 -#define QAT_LA_CIPH_IV_FLD_BITPOS 2 -#define QAT_LA_CIPH_IV_FLD_MASK 0x1 -#define ICP_QAT_FW_LA_PARTIAL_NONE 0 -#define ICP_QAT_FW_LA_PARTIAL_START 1 -#define ICP_QAT_FW_LA_PARTIAL_MID 3 -#define ICP_QAT_FW_LA_PARTIAL_END 2 -#define QAT_LA_PARTIAL_BITPOS 0 -#define QAT_LA_PARTIAL_MASK 0x3 -#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \ - cmp_auth, ret_auth, update_state, \ - ciph_iv, ciphcfg, partial) \ - (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \ - QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \ - ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \ - QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \ - ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \ - QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \ - ((proto & QAT_LA_PROTO_MASK) << \ - QAT_LA_PROTO_BITPOS) | \ - ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \ - QAT_LA_CMP_AUTH_RES_BITPOS) | \ - ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \ - QAT_LA_RET_AUTH_RES_BITPOS) | \ - ((update_state & QAT_LA_UPDATE_STATE_MASK) << \ - QAT_LA_UPDATE_STATE_BITPOS) | \ - ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \ - QAT_LA_CIPH_IV_FLD_BITPOS) | \ - ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \ - QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \ - ((partial & QAT_LA_PARTIAL_MASK) << \ - QAT_LA_PARTIAL_BITPOS)) - -#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \ - QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \ - QAT_LA_CIPH_IV_FLD_MASK) - -#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \ - QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ - QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) - -#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \ - QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ - QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) - -#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \ - QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ - QAT_LA_GCM_IV_LEN_FLAG_MASK) - -#define ICP_QAT_FW_LA_PROTO_GET(flags) \ - QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK) - -#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \ - QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \ - QAT_LA_CMP_AUTH_RES_MASK) - -#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \ - QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \ - QAT_LA_RET_AUTH_RES_MASK) - -#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \ - QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ - QAT_LA_DIGEST_IN_BUFFER_MASK) - -#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \ - QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \ - QAT_LA_UPDATE_STATE_MASK) - -#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \ - QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \ - QAT_LA_PARTIAL_MASK) - -#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \ - QAT_LA_CIPH_IV_FLD_MASK) - -#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ - QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) - -#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ - QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) - -#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ - QAT_LA_GCM_IV_LEN_FLAG_MASK) - -#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \ - QAT_LA_PROTO_MASK) - -#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \ - QAT_LA_CMP_AUTH_RES_MASK) - -#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \ - QAT_LA_RET_AUTH_RES_MASK) - -#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ - QAT_LA_DIGEST_IN_BUFFER_MASK) - -#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \ - QAT_LA_UPDATE_STATE_MASK) - -#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \ - QAT_LA_PARTIAL_MASK) - -struct icp_qat_fw_cipher_req_hdr_cd_pars { - union { - struct { - uint64_t content_desc_addr; - uint16_t content_desc_resrvd1; - uint8_t content_desc_params_sz; - uint8_t content_desc_hdr_resrvd2; - uint32_t content_desc_resrvd3; - } s; - struct { - uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; - } s1; - } u; -}; - -struct icp_qat_fw_cipher_auth_req_hdr_cd_pars { - union { - struct { - uint64_t content_desc_addr; - uint16_t content_desc_resrvd1; - uint8_t content_desc_params_sz; - uint8_t content_desc_hdr_resrvd2; - uint32_t content_desc_resrvd3; - } s; - struct { - uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; - } sl; - } u; -}; - -struct icp_qat_fw_cipher_cd_ctrl_hdr { - uint8_t cipher_state_sz; - uint8_t cipher_key_sz; - uint8_t cipher_cfg_offset; - uint8_t next_curr_id; - uint8_t cipher_padding_sz; - uint8_t resrvd1; - uint16_t resrvd2; - uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3]; -}; - -struct icp_qat_fw_auth_cd_ctrl_hdr { - uint32_t resrvd1; - uint8_t resrvd2; - uint8_t hash_flags; - uint8_t hash_cfg_offset; - uint8_t next_curr_id; - uint8_t resrvd3; - uint8_t outer_prefix_sz; - uint8_t final_sz; - uint8_t inner_res_sz; - uint8_t resrvd4; - uint8_t inner_state1_sz; - uint8_t inner_state2_offset; - uint8_t inner_state2_sz; - uint8_t outer_config_offset; - uint8_t outer_state1_sz; - uint8_t outer_res_sz; - uint8_t outer_prefix_offset; -}; - -struct icp_qat_fw_cipher_auth_cd_ctrl_hdr { - uint8_t cipher_state_sz; - uint8_t cipher_key_sz; - uint8_t cipher_cfg_offset; - uint8_t next_curr_id_cipher; - uint8_t cipher_padding_sz; - uint8_t hash_flags; - uint8_t hash_cfg_offset; - uint8_t next_curr_id_auth; - uint8_t resrvd1; - uint8_t outer_prefix_sz; - uint8_t final_sz; - uint8_t inner_res_sz; - uint8_t resrvd2; - uint8_t inner_state1_sz; - uint8_t inner_state2_offset; - uint8_t inner_state2_sz; - uint8_t outer_config_offset; - uint8_t outer_state1_sz; - uint8_t outer_res_sz; - uint8_t outer_prefix_offset; -}; - -#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1 -#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0 -#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240 -#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \ - (sizeof(struct icp_qat_fw_la_cipher_req_params_t)) -#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0) - -struct icp_qat_fw_la_cipher_req_params { - uint32_t cipher_offset; - uint32_t cipher_length; - union { - uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4]; - struct { - uint64_t cipher_IV_ptr; - uint64_t resrvd1; - } s; - } u; -}; - -struct icp_qat_fw_la_auth_req_params { - uint32_t auth_off; - uint32_t auth_len; - union { - uint64_t auth_partial_st_prefix; - uint64_t aad_adr; - } u1; - uint64_t auth_res_addr; - union { - uint8_t inner_prefix_sz; - uint8_t aad_sz; - } u2; - uint8_t resrvd1; - uint8_t hash_state_sz; - uint8_t auth_res_sz; -} __rte_packed; - -struct icp_qat_fw_la_auth_req_params_resrvd_flds { - uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6]; - union { - uint8_t inner_prefix_sz; - uint8_t aad_sz; - } u2; - uint8_t resrvd1; - uint16_t resrvd2; -}; - -struct icp_qat_fw_la_resp { - struct icp_qat_fw_comn_resp_hdr comn_resp; - uint64_t opaque_data; - uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; -}; - -#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \ - ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \ - ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) - -#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ -{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \ - ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ - & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ - ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ - & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } - -#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \ - (((cd_ctrl_hdr_t)->next_curr_id_cipher) \ - & ICP_QAT_FW_COMN_CURR_ID_MASK) - -#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \ -{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \ - ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ - & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ - ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } - -#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \ - ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ - >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) - -#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ -{ (cd_ctrl_hdr_t)->next_curr_id_auth = \ - ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ - & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ - ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ - & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } - -#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \ - (((cd_ctrl_hdr_t)->next_curr_id_auth) \ - & ICP_QAT_FW_COMN_CURR_ID_MASK) - -#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \ -{ (cd_ctrl_hdr_t)->next_curr_id_auth = \ - ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ - & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ - ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } - -#endif diff --git a/drivers/crypto/qat/qat_adf/icp_qat_hw.h b/drivers/crypto/qat/qat_adf/icp_qat_hw.h deleted file mode 100644 index d03688c7..00000000 --- a/drivers/crypto/qat/qat_adf/icp_qat_hw.h +++ /dev/null @@ -1,329 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * Copyright(c) 2015 Intel Corporation. - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * qat-linux@intel.com - * - * BSD LICENSE - * Copyright(c) 2015 Intel Corporation. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef _ICP_QAT_HW_H_ -#define _ICP_QAT_HW_H_ - -enum icp_qat_hw_ae_id { - ICP_QAT_HW_AE_0 = 0, - ICP_QAT_HW_AE_1 = 1, - ICP_QAT_HW_AE_2 = 2, - ICP_QAT_HW_AE_3 = 3, - ICP_QAT_HW_AE_4 = 4, - ICP_QAT_HW_AE_5 = 5, - ICP_QAT_HW_AE_6 = 6, - ICP_QAT_HW_AE_7 = 7, - ICP_QAT_HW_AE_8 = 8, - ICP_QAT_HW_AE_9 = 9, - ICP_QAT_HW_AE_10 = 10, - ICP_QAT_HW_AE_11 = 11, - ICP_QAT_HW_AE_DELIMITER = 12 -}; - -enum icp_qat_hw_qat_id { - ICP_QAT_HW_QAT_0 = 0, - ICP_QAT_HW_QAT_1 = 1, - ICP_QAT_HW_QAT_2 = 2, - ICP_QAT_HW_QAT_3 = 3, - ICP_QAT_HW_QAT_4 = 4, - ICP_QAT_HW_QAT_5 = 5, - ICP_QAT_HW_QAT_DELIMITER = 6 -}; - -enum icp_qat_hw_auth_algo { - ICP_QAT_HW_AUTH_ALGO_NULL = 0, - ICP_QAT_HW_AUTH_ALGO_SHA1 = 1, - ICP_QAT_HW_AUTH_ALGO_MD5 = 2, - ICP_QAT_HW_AUTH_ALGO_SHA224 = 3, - ICP_QAT_HW_AUTH_ALGO_SHA256 = 4, - ICP_QAT_HW_AUTH_ALGO_SHA384 = 5, - ICP_QAT_HW_AUTH_ALGO_SHA512 = 6, - ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7, - ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8, - ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9, - ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10, - ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11, - ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12, - ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13, - ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14, - ICP_QAT_HW_AUTH_RESERVED_1 = 15, - ICP_QAT_HW_AUTH_RESERVED_2 = 16, - ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17, - ICP_QAT_HW_AUTH_RESERVED_3 = 18, - ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19, - ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20 -}; - -enum icp_qat_hw_auth_mode { - ICP_QAT_HW_AUTH_MODE0 = 0, - ICP_QAT_HW_AUTH_MODE1 = 1, - ICP_QAT_HW_AUTH_MODE2 = 2, - ICP_QAT_HW_AUTH_MODE_DELIMITER = 3 -}; - -struct icp_qat_hw_auth_config { - uint32_t config; - uint32_t reserved; -}; - -#define QAT_AUTH_MODE_BITPOS 4 -#define QAT_AUTH_MODE_MASK 0xF -#define QAT_AUTH_ALGO_BITPOS 0 -#define QAT_AUTH_ALGO_MASK 0xF -#define QAT_AUTH_CMP_BITPOS 8 -#define QAT_AUTH_CMP_MASK 0x7F -#define QAT_AUTH_SHA3_PADDING_BITPOS 16 -#define QAT_AUTH_SHA3_PADDING_MASK 0x1 -#define QAT_AUTH_ALGO_SHA3_BITPOS 22 -#define QAT_AUTH_ALGO_SHA3_MASK 0x3 -#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \ - (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \ - ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \ - (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \ - QAT_AUTH_ALGO_SHA3_BITPOS) | \ - (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \ - (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \ - & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \ - ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS)) - -struct icp_qat_hw_auth_counter { - uint32_t counter; - uint32_t reserved; -}; - -#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF -#define QAT_AUTH_COUNT_BITPOS 0 -#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \ - (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS) - -struct icp_qat_hw_auth_setup { - struct icp_qat_hw_auth_config auth_config; - struct icp_qat_hw_auth_counter auth_counter; -}; - -#define QAT_HW_DEFAULT_ALIGNMENT 8 -#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1))) -#define ICP_QAT_HW_NULL_STATE1_SZ 32 -#define ICP_QAT_HW_MD5_STATE1_SZ 16 -#define ICP_QAT_HW_SHA1_STATE1_SZ 20 -#define ICP_QAT_HW_SHA224_STATE1_SZ 32 -#define ICP_QAT_HW_SHA256_STATE1_SZ 32 -#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32 -#define ICP_QAT_HW_SHA384_STATE1_SZ 64 -#define ICP_QAT_HW_SHA512_STATE1_SZ 64 -#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64 -#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28 -#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48 -#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16 -#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16 -#define ICP_QAT_HW_AES_F9_STATE1_SZ 32 -#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16 -#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16 -#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8 -#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8 -#define ICP_QAT_HW_NULL_STATE2_SZ 32 -#define ICP_QAT_HW_MD5_STATE2_SZ 16 -#define ICP_QAT_HW_SHA1_STATE2_SZ 20 -#define ICP_QAT_HW_SHA224_STATE2_SZ 32 -#define ICP_QAT_HW_SHA256_STATE2_SZ 32 -#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0 -#define ICP_QAT_HW_SHA384_STATE2_SZ 64 -#define ICP_QAT_HW_SHA512_STATE2_SZ 64 -#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0 -#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0 -#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0 -#define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48 -#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16 -#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16 -#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16 -#define ICP_QAT_HW_F9_IK_SZ 16 -#define ICP_QAT_HW_F9_FK_SZ 16 -#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \ - ICP_QAT_HW_F9_FK_SZ) -#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ -#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24 -#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32 -#define ICP_QAT_HW_GALOIS_H_SZ 16 -#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8 -#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16 - -struct icp_qat_hw_auth_sha512 { - struct icp_qat_hw_auth_setup inner_setup; - uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ]; - struct icp_qat_hw_auth_setup outer_setup; - uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ]; -}; - -struct icp_qat_hw_auth_algo_blk { - struct icp_qat_hw_auth_sha512 sha; -}; - -#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0 -#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF - -enum icp_qat_hw_cipher_algo { - ICP_QAT_HW_CIPHER_ALGO_NULL = 0, - ICP_QAT_HW_CIPHER_ALGO_DES = 1, - ICP_QAT_HW_CIPHER_ALGO_3DES = 2, - ICP_QAT_HW_CIPHER_ALGO_AES128 = 3, - ICP_QAT_HW_CIPHER_ALGO_AES192 = 4, - ICP_QAT_HW_CIPHER_ALGO_AES256 = 5, - ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6, - ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7, - ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8, - ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9, - ICP_QAT_HW_CIPHER_DELIMITER = 10 -}; - -enum icp_qat_hw_cipher_mode { - ICP_QAT_HW_CIPHER_ECB_MODE = 0, - ICP_QAT_HW_CIPHER_CBC_MODE = 1, - ICP_QAT_HW_CIPHER_CTR_MODE = 2, - ICP_QAT_HW_CIPHER_F8_MODE = 3, - ICP_QAT_HW_CIPHER_XTS_MODE = 6, - ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7 -}; - -struct icp_qat_hw_cipher_config { - uint32_t val; - uint32_t reserved; -}; - -enum icp_qat_hw_cipher_dir { - ICP_QAT_HW_CIPHER_ENCRYPT = 0, - ICP_QAT_HW_CIPHER_DECRYPT = 1, -}; - -enum icp_qat_hw_auth_op { - ICP_QAT_HW_AUTH_VERIFY = 0, - ICP_QAT_HW_AUTH_GENERATE = 1, -}; - -enum icp_qat_hw_cipher_convert { - ICP_QAT_HW_CIPHER_NO_CONVERT = 0, - ICP_QAT_HW_CIPHER_KEY_CONVERT = 1, -}; - -#define QAT_CIPHER_MODE_BITPOS 4 -#define QAT_CIPHER_MODE_MASK 0xF -#define QAT_CIPHER_ALGO_BITPOS 0 -#define QAT_CIPHER_ALGO_MASK 0xF -#define QAT_CIPHER_CONVERT_BITPOS 9 -#define QAT_CIPHER_CONVERT_MASK 0x1 -#define QAT_CIPHER_DIR_BITPOS 8 -#define QAT_CIPHER_DIR_MASK 0x1 -#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2 -#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2 -#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \ - (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \ - ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \ - ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \ - ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS)) -#define ICP_QAT_HW_DES_BLK_SZ 8 -#define ICP_QAT_HW_3DES_BLK_SZ 8 -#define ICP_QAT_HW_NULL_BLK_SZ 8 -#define ICP_QAT_HW_AES_BLK_SZ 16 -#define ICP_QAT_HW_KASUMI_BLK_SZ 8 -#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8 -#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8 -#define ICP_QAT_HW_NULL_KEY_SZ 256 -#define ICP_QAT_HW_DES_KEY_SZ 8 -#define ICP_QAT_HW_3DES_KEY_SZ 24 -#define ICP_QAT_HW_AES_128_KEY_SZ 16 -#define ICP_QAT_HW_AES_192_KEY_SZ 24 -#define ICP_QAT_HW_AES_256_KEY_SZ 32 -#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ - QAT_CIPHER_MODE_F8_KEY_SZ_MULT) -#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \ - QAT_CIPHER_MODE_F8_KEY_SZ_MULT) -#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ - QAT_CIPHER_MODE_F8_KEY_SZ_MULT) -#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ - QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) -#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ - QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) -#define ICP_QAT_HW_KASUMI_KEY_SZ 16 -#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \ - QAT_CIPHER_MODE_F8_KEY_SZ_MULT) -#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ - QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) -#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ - QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) -#define ICP_QAT_HW_ARC4_KEY_SZ 256 -#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16 -#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16 -#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16 -#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16 -#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2 - -#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ - -/* These defines describe position of the bit-fields - * in the flags byte in B0 - */ -#define ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT 6 -#define ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT 3 - -#define ICP_QAT_HW_CCM_BUILD_B0_FLAGS(Adata, t, q) \ - ((((Adata) > 0 ? 1 : 0) << ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT) \ - | ((((t) - 2) >> 1) << ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT) \ - | ((q) - 1)) - -#define ICP_QAT_HW_CCM_NQ_CONST 15 -#define ICP_QAT_HW_CCM_AAD_B0_LEN 16 -#define ICP_QAT_HW_CCM_AAD_LEN_INFO 2 -#define ICP_QAT_HW_CCM_AAD_DATA_OFFSET (ICP_QAT_HW_CCM_AAD_B0_LEN + \ - ICP_QAT_HW_CCM_AAD_LEN_INFO) -#define ICP_QAT_HW_CCM_AAD_ALIGNMENT 16 -#define ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE 4 -#define ICP_QAT_HW_CCM_NONCE_OFFSET 1 - -struct icp_qat_hw_cipher_algo_blk { - struct icp_qat_hw_cipher_config cipher_config; - uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ]; -} __rte_cache_aligned; - -#endif diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h deleted file mode 100644 index 802ba95d..00000000 --- a/drivers/crypto/qat/qat_adf/qat_algs.h +++ /dev/null @@ -1,169 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * Copyright(c) 2015-2016 Intel Corporation. - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * qat-linux@intel.com - * - * BSD LICENSE - * Copyright(c) 2015-2017 Intel Corporation. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef _ICP_QAT_ALGS_H_ -#define _ICP_QAT_ALGS_H_ -#include -#include -#include "icp_qat_hw.h" -#include "icp_qat_fw.h" -#include "icp_qat_fw_la.h" -#include "../qat_crypto.h" - -/* - * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR - * Integrity Key (IK) - */ -#define KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA - -#define KASUMI_F8_KEY_MODIFIER_4_BYTES 0x55555555 - -/* 3DES key sizes */ -#define QAT_3DES_KEY_SZ_OPT1 24 /* Keys are independent */ -#define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */ - -#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \ - ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ - ICP_QAT_HW_CIPHER_NO_CONVERT, \ - ICP_QAT_HW_CIPHER_ENCRYPT) - -#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \ - ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ - ICP_QAT_HW_CIPHER_KEY_CONVERT, \ - ICP_QAT_HW_CIPHER_DECRYPT) - -struct qat_alg_buf { - uint32_t len; - uint32_t resrvd; - uint64_t addr; -} __rte_packed; - -enum qat_crypto_proto_flag { - QAT_CRYPTO_PROTO_FLAG_NONE = 0, - QAT_CRYPTO_PROTO_FLAG_CCM = 1, - QAT_CRYPTO_PROTO_FLAG_GCM = 2, - QAT_CRYPTO_PROTO_FLAG_SNOW3G = 3, - QAT_CRYPTO_PROTO_FLAG_ZUC = 4 -}; - -/* - * Maximum number of SGL entries - */ -#define QAT_SGL_MAX_NUMBER 16 - -struct qat_alg_buf_list { - uint64_t resrvd; - uint32_t num_bufs; - uint32_t num_mapped_bufs; - struct qat_alg_buf bufers[QAT_SGL_MAX_NUMBER]; -} __rte_packed __rte_cache_aligned; - -struct qat_crypto_op_cookie { - struct qat_alg_buf_list qat_sgl_list_src; - struct qat_alg_buf_list qat_sgl_list_dst; - rte_iova_t qat_sgl_src_phys_addr; - rte_iova_t qat_sgl_dst_phys_addr; -}; - -/* Common content descriptor */ -struct qat_alg_cd { - struct icp_qat_hw_cipher_algo_blk cipher; - struct icp_qat_hw_auth_algo_blk hash; -} __rte_packed __rte_cache_aligned; - -struct qat_session { - enum icp_qat_fw_la_cmd_id qat_cmd; - enum icp_qat_hw_cipher_algo qat_cipher_alg; - enum icp_qat_hw_cipher_dir qat_dir; - enum icp_qat_hw_cipher_mode qat_mode; - enum icp_qat_hw_auth_algo qat_hash_alg; - enum icp_qat_hw_auth_op auth_op; - void *bpi_ctx; - struct qat_alg_cd cd; - uint8_t *cd_cur_ptr; - rte_iova_t cd_paddr; - struct icp_qat_fw_la_bulk_req fw_req; - uint8_t aad_len; - struct qat_crypto_instance *inst; - struct { - uint16_t offset; - uint16_t length; - } cipher_iv; - struct { - uint16_t offset; - uint16_t length; - } auth_iv; - uint16_t digest_length; - rte_spinlock_t lock; /* protects this struct */ - enum qat_device_gen min_qat_dev_gen; -}; - -int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg); - -int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cd, - uint8_t *enckey, - uint32_t enckeylen); - -int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, - uint8_t *authkey, - uint32_t authkeylen, - uint32_t aad_length, - uint32_t digestsize, - unsigned int operation); - -void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, - enum qat_crypto_proto_flag proto_flags); - -int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg); -int qat_alg_validate_aes_docsisbpi_key(int key_len, - enum icp_qat_hw_cipher_algo *alg); -int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg); -int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg); -int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg); -int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg); -int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg); -int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg); -#endif diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c deleted file mode 100644 index 26f854c2..00000000 --- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c +++ /dev/null @@ -1,1059 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * Copyright(c) 2015-2016 Intel Corporation. - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * qat-linux@intel.com - * - * BSD LICENSE - * Copyright(c) 2015-2017 Intel Corporation. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "../qat_logs.h" - -#include /* Needed to calculate pre-compute values */ -#include /* Needed to calculate pre-compute values */ -#include /* Needed to calculate pre-compute values */ - -#include "qat_algs.h" - -/* returns block size in bytes per cipher algo */ -int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg) -{ - switch (qat_cipher_alg) { - case ICP_QAT_HW_CIPHER_ALGO_DES: - return ICP_QAT_HW_DES_BLK_SZ; - case ICP_QAT_HW_CIPHER_ALGO_3DES: - return ICP_QAT_HW_3DES_BLK_SZ; - case ICP_QAT_HW_CIPHER_ALGO_AES128: - case ICP_QAT_HW_CIPHER_ALGO_AES192: - case ICP_QAT_HW_CIPHER_ALGO_AES256: - return ICP_QAT_HW_AES_BLK_SZ; - default: - PMD_DRV_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg); - return -EFAULT; - }; - return -EFAULT; -} - -/* - * Returns size in bytes per hash algo for state1 size field in cd_ctrl - * This is digest size rounded up to nearest quadword - */ -static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg) -{ - switch (qat_hash_alg) { - case ICP_QAT_HW_AUTH_ALGO_SHA1: - return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_SHA224: - return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_SHA256: - return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_SHA384: - return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_SHA512: - return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC: - return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: - case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: - return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: - return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: - return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_MD5: - return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: - return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_NULL: - return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC: - return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_DELIMITER: - /* return maximum state1 size in this case */ - return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - default: - PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg); - return -EFAULT; - }; - return -EFAULT; -} - -/* returns digest size in bytes per hash algo */ -static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg) -{ - switch (qat_hash_alg) { - case ICP_QAT_HW_AUTH_ALGO_SHA1: - return ICP_QAT_HW_SHA1_STATE1_SZ; - case ICP_QAT_HW_AUTH_ALGO_SHA224: - return ICP_QAT_HW_SHA224_STATE1_SZ; - case ICP_QAT_HW_AUTH_ALGO_SHA256: - return ICP_QAT_HW_SHA256_STATE1_SZ; - case ICP_QAT_HW_AUTH_ALGO_SHA384: - return ICP_QAT_HW_SHA384_STATE1_SZ; - case ICP_QAT_HW_AUTH_ALGO_SHA512: - return ICP_QAT_HW_SHA512_STATE1_SZ; - case ICP_QAT_HW_AUTH_ALGO_MD5: - return ICP_QAT_HW_MD5_STATE1_SZ; - case ICP_QAT_HW_AUTH_ALGO_DELIMITER: - /* return maximum digest size in this case */ - return ICP_QAT_HW_SHA512_STATE1_SZ; - default: - PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg); - return -EFAULT; - }; - return -EFAULT; -} - -/* returns block size in byes per hash algo */ -static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg) -{ - switch (qat_hash_alg) { - case ICP_QAT_HW_AUTH_ALGO_SHA1: - return SHA_CBLOCK; - case ICP_QAT_HW_AUTH_ALGO_SHA224: - return SHA256_CBLOCK; - case ICP_QAT_HW_AUTH_ALGO_SHA256: - return SHA256_CBLOCK; - case ICP_QAT_HW_AUTH_ALGO_SHA384: - return SHA512_CBLOCK; - case ICP_QAT_HW_AUTH_ALGO_SHA512: - return SHA512_CBLOCK; - case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: - return 16; - case ICP_QAT_HW_AUTH_ALGO_MD5: - return MD5_CBLOCK; - case ICP_QAT_HW_AUTH_ALGO_DELIMITER: - /* return maximum block size in this case */ - return SHA512_CBLOCK; - default: - PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg); - return -EFAULT; - }; - return -EFAULT; -} - -static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out) -{ - SHA_CTX ctx; - - if (!SHA1_Init(&ctx)) - return -EFAULT; - SHA1_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH); - return 0; -} - -static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out) -{ - SHA256_CTX ctx; - - if (!SHA224_Init(&ctx)) - return -EFAULT; - SHA256_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH); - return 0; -} - -static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out) -{ - SHA256_CTX ctx; - - if (!SHA256_Init(&ctx)) - return -EFAULT; - SHA256_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH); - return 0; -} - -static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out) -{ - SHA512_CTX ctx; - - if (!SHA384_Init(&ctx)) - return -EFAULT; - SHA512_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH); - return 0; -} - -static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out) -{ - SHA512_CTX ctx; - - if (!SHA512_Init(&ctx)) - return -EFAULT; - SHA512_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH); - return 0; -} - -static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out) -{ - MD5_CTX ctx; - - if (!MD5_Init(&ctx)) - return -EFAULT; - MD5_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH); - - return 0; -} - -static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg, - uint8_t *data_in, - uint8_t *data_out) -{ - int digest_size; - uint8_t digest[qat_hash_get_digest_size( - ICP_QAT_HW_AUTH_ALGO_DELIMITER)]; - uint32_t *hash_state_out_be32; - uint64_t *hash_state_out_be64; - int i; - - PMD_INIT_FUNC_TRACE(); - digest_size = qat_hash_get_digest_size(hash_alg); - if (digest_size <= 0) - return -EFAULT; - - hash_state_out_be32 = (uint32_t *)data_out; - hash_state_out_be64 = (uint64_t *)data_out; - - switch (hash_alg) { - case ICP_QAT_HW_AUTH_ALGO_SHA1: - if (partial_hash_sha1(data_in, digest)) - return -EFAULT; - for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++) - *hash_state_out_be32 = - rte_bswap32(*(((uint32_t *)digest)+i)); - break; - case ICP_QAT_HW_AUTH_ALGO_SHA224: - if (partial_hash_sha224(data_in, digest)) - return -EFAULT; - for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++) - *hash_state_out_be32 = - rte_bswap32(*(((uint32_t *)digest)+i)); - break; - case ICP_QAT_HW_AUTH_ALGO_SHA256: - if (partial_hash_sha256(data_in, digest)) - return -EFAULT; - for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++) - *hash_state_out_be32 = - rte_bswap32(*(((uint32_t *)digest)+i)); - break; - case ICP_QAT_HW_AUTH_ALGO_SHA384: - if (partial_hash_sha384(data_in, digest)) - return -EFAULT; - for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++) - *hash_state_out_be64 = - rte_bswap64(*(((uint64_t *)digest)+i)); - break; - case ICP_QAT_HW_AUTH_ALGO_SHA512: - if (partial_hash_sha512(data_in, digest)) - return -EFAULT; - for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++) - *hash_state_out_be64 = - rte_bswap64(*(((uint64_t *)digest)+i)); - break; - case ICP_QAT_HW_AUTH_ALGO_MD5: - if (partial_hash_md5(data_in, data_out)) - return -EFAULT; - break; - default: - PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg); - return -EFAULT; - } - - return 0; -} -#define HMAC_IPAD_VALUE 0x36 -#define HMAC_OPAD_VALUE 0x5c -#define HASH_XCBC_PRECOMP_KEY_NUM 3 - -static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg, - const uint8_t *auth_key, - uint16_t auth_keylen, - uint8_t *p_state_buf, - uint16_t *p_state_len) -{ - int block_size; - uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)]; - uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)]; - int i; - - PMD_INIT_FUNC_TRACE(); - if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) { - static uint8_t qat_aes_xcbc_key_seed[ - ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = { - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, - 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, - }; - - uint8_t *in = NULL; - uint8_t *out = p_state_buf; - int x; - AES_KEY enc_key; - - in = rte_zmalloc("working mem for key", - ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16); - if (in == NULL) { - PMD_DRV_LOG(ERR, "Failed to alloc memory"); - return -ENOMEM; - } - - rte_memcpy(in, qat_aes_xcbc_key_seed, - ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ); - for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) { - if (AES_set_encrypt_key(auth_key, auth_keylen << 3, - &enc_key) != 0) { - rte_free(in - - (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ)); - memset(out - - (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ), - 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ); - return -EFAULT; - } - AES_encrypt(in, out, &enc_key); - in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ; - out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ; - } - *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ; - rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ); - return 0; - } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) || - (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) { - uint8_t *in = NULL; - uint8_t *out = p_state_buf; - AES_KEY enc_key; - - memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ + - ICP_QAT_HW_GALOIS_LEN_A_SZ + - ICP_QAT_HW_GALOIS_E_CTR0_SZ); - in = rte_zmalloc("working mem for key", - ICP_QAT_HW_GALOIS_H_SZ, 16); - if (in == NULL) { - PMD_DRV_LOG(ERR, "Failed to alloc memory"); - return -ENOMEM; - } - - memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ); - if (AES_set_encrypt_key(auth_key, auth_keylen << 3, - &enc_key) != 0) { - return -EFAULT; - } - AES_encrypt(in, out, &enc_key); - *p_state_len = ICP_QAT_HW_GALOIS_H_SZ + - ICP_QAT_HW_GALOIS_LEN_A_SZ + - ICP_QAT_HW_GALOIS_E_CTR0_SZ; - rte_free(in); - return 0; - } - - block_size = qat_hash_get_block_size(hash_alg); - if (block_size <= 0) - return -EFAULT; - /* init ipad and opad from key and xor with fixed values */ - memset(ipad, 0, block_size); - memset(opad, 0, block_size); - - if (auth_keylen > (unsigned int)block_size) { - PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen); - return -EFAULT; - } - rte_memcpy(ipad, auth_key, auth_keylen); - rte_memcpy(opad, auth_key, auth_keylen); - - for (i = 0; i < block_size; i++) { - uint8_t *ipad_ptr = ipad + i; - uint8_t *opad_ptr = opad + i; - *ipad_ptr ^= HMAC_IPAD_VALUE; - *opad_ptr ^= HMAC_OPAD_VALUE; - } - - /* do partial hash of ipad and copy to state1 */ - if (partial_hash_compute(hash_alg, ipad, p_state_buf)) { - memset(ipad, 0, block_size); - memset(opad, 0, block_size); - PMD_DRV_LOG(ERR, "ipad precompute failed"); - return -EFAULT; - } - - /* - * State len is a multiple of 8, so may be larger than the digest. - * Put the partial hash of opad state_len bytes after state1 - */ - *p_state_len = qat_hash_get_state1_size(hash_alg); - if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) { - memset(ipad, 0, block_size); - memset(opad, 0, block_size); - PMD_DRV_LOG(ERR, "opad precompute failed"); - return -EFAULT; - } - - /* don't leave data lying around */ - memset(ipad, 0, block_size); - memset(opad, 0, block_size); - return 0; -} - -void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, - enum qat_crypto_proto_flag proto_flags) -{ - PMD_INIT_FUNC_TRACE(); - header->hdr_flags = - ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); - header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA; - header->comn_req_flags = - ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR, - QAT_COMN_PTR_TYPE_FLAT); - ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_PARTIAL_NONE); - ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, - ICP_QAT_FW_CIPH_IV_16BYTE_DATA); - - switch (proto_flags) { - case QAT_CRYPTO_PROTO_FLAG_NONE: - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_PROTO); - break; - case QAT_CRYPTO_PROTO_FLAG_CCM: - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_CCM_PROTO); - break; - case QAT_CRYPTO_PROTO_FLAG_GCM: - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_GCM_PROTO); - break; - case QAT_CRYPTO_PROTO_FLAG_SNOW3G: - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_SNOW_3G_PROTO); - break; - case QAT_CRYPTO_PROTO_FLAG_ZUC: - ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_ZUC_3G_PROTO); - break; - } - - ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_UPDATE_STATE); - ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER); -} - -/* - * Snow3G and ZUC should never use this function - * and set its protocol flag in both cipher and auth part of content - * descriptor building function - */ -static enum qat_crypto_proto_flag -qat_get_crypto_proto_flag(uint16_t flags) -{ - int proto = ICP_QAT_FW_LA_PROTO_GET(flags); - enum qat_crypto_proto_flag qat_proto_flag = - QAT_CRYPTO_PROTO_FLAG_NONE; - - switch (proto) { - case ICP_QAT_FW_LA_GCM_PROTO: - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM; - break; - case ICP_QAT_FW_LA_CCM_PROTO: - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM; - break; - } - - return qat_proto_flag; -} - -int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc, - uint8_t *cipherkey, - uint32_t cipherkeylen) -{ - struct icp_qat_hw_cipher_algo_blk *cipher; - struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req; - struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; - struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; - void *ptr = &req_tmpl->cd_ctrl; - struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; - struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; - enum icp_qat_hw_cipher_convert key_convert; - enum qat_crypto_proto_flag qat_proto_flag = - QAT_CRYPTO_PROTO_FLAG_NONE; - uint32_t total_key_size; - uint16_t cipher_offset, cd_size; - uint32_t wordIndex = 0; - uint32_t *temp_key = NULL; - PMD_INIT_FUNC_TRACE(); - - if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { - cd_pars->u.s.content_desc_addr = cdesc->cd_paddr; - ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, - ICP_QAT_FW_SLICE_CIPHER); - ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, - ICP_QAT_FW_SLICE_DRAM_WR); - ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_RET_AUTH_RES); - ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_CMP_AUTH_RES); - cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd; - } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { - cd_pars->u.s.content_desc_addr = cdesc->cd_paddr; - ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, - ICP_QAT_FW_SLICE_CIPHER); - ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, - ICP_QAT_FW_SLICE_AUTH); - ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, - ICP_QAT_FW_SLICE_AUTH); - ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, - ICP_QAT_FW_SLICE_DRAM_WR); - cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd; - } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) { - PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command."); - return -EFAULT; - } - - if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) { - /* - * CTR Streaming ciphers are a special case. Decrypt = encrypt - * Overriding default values previously set - */ - cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; - key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; - } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 - || cdesc->qat_cipher_alg == - ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) - key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; - else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) - key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; - else - key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; - - if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { - total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ + - ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; - cipher_cd_ctrl->cipher_state_sz = - ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3; - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G; - - } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) { - total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ; - cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3; - cipher_cd_ctrl->cipher_padding_sz = - (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3; - } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) { - total_key_size = ICP_QAT_HW_3DES_KEY_SZ; - cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3; - qat_proto_flag = - qat_get_crypto_proto_flag(header->serv_specif_flags); - } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) { - total_key_size = ICP_QAT_HW_DES_KEY_SZ; - cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3; - qat_proto_flag = - qat_get_crypto_proto_flag(header->serv_specif_flags); - } else if (cdesc->qat_cipher_alg == - ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { - total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ + - ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; - cipher_cd_ctrl->cipher_state_sz = - ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; - cdesc->min_qat_dev_gen = QAT_GEN2; - } else { - total_key_size = cipherkeylen; - cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3; - qat_proto_flag = - qat_get_crypto_proto_flag(header->serv_specif_flags); - } - cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3; - cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd); - cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3; - - header->service_cmd_id = cdesc->qat_cmd; - qat_alg_init_common_hdr(header, qat_proto_flag); - - cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr; - cipher->cipher_config.val = - ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode, - cdesc->qat_cipher_alg, key_convert, - cdesc->qat_dir); - - if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) { - temp_key = (uint32_t *)(cdesc->cd_cur_ptr + - sizeof(struct icp_qat_hw_cipher_config) - + cipherkeylen); - memcpy(cipher->key, cipherkey, cipherkeylen); - memcpy(temp_key, cipherkey, cipherkeylen); - - /* XOR Key with KASUMI F8 key modifier at 4 bytes level */ - for (wordIndex = 0; wordIndex < (cipherkeylen >> 2); - wordIndex++) - temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES; - - cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + - cipherkeylen + cipherkeylen; - } else { - memcpy(cipher->key, cipherkey, cipherkeylen); - cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + - cipherkeylen; - } - - if (total_key_size > cipherkeylen) { - uint32_t padding_size = total_key_size-cipherkeylen; - if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) - && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) - /* K3 not provided so use K1 = K3*/ - memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size); - else - memset(cdesc->cd_cur_ptr, 0, padding_size); - cdesc->cd_cur_ptr += padding_size; - } - cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd; - cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3; - - return 0; -} - -int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, - uint8_t *authkey, - uint32_t authkeylen, - uint32_t aad_length, - uint32_t digestsize, - unsigned int operation) -{ - struct icp_qat_hw_auth_setup *hash; - struct icp_qat_hw_cipher_algo_blk *cipherconfig; - struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req; - struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; - struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; - void *ptr = &req_tmpl->cd_ctrl; - struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; - struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; - struct icp_qat_fw_la_auth_req_params *auth_param = - (struct icp_qat_fw_la_auth_req_params *) - ((char *)&req_tmpl->serv_specif_rqpars + - sizeof(struct icp_qat_fw_la_cipher_req_params)); - uint16_t state1_size = 0, state2_size = 0; - uint16_t hash_offset, cd_size; - uint32_t *aad_len = NULL; - uint32_t wordIndex = 0; - uint32_t *pTempKey; - enum qat_crypto_proto_flag qat_proto_flag = - QAT_CRYPTO_PROTO_FLAG_NONE; - - PMD_INIT_FUNC_TRACE(); - - if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) { - ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, - ICP_QAT_FW_SLICE_AUTH); - ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, - ICP_QAT_FW_SLICE_DRAM_WR); - cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd; - } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) { - ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, - ICP_QAT_FW_SLICE_AUTH); - ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, - ICP_QAT_FW_SLICE_CIPHER); - ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, - ICP_QAT_FW_SLICE_CIPHER); - ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, - ICP_QAT_FW_SLICE_DRAM_WR); - cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd; - } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) { - PMD_DRV_LOG(ERR, "Invalid param, must be a hash command."); - return -EFAULT; - } - - if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) { - ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_RET_AUTH_RES); - ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_CMP_AUTH_RES); - cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY; - } else { - ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_RET_AUTH_RES); - ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_CMP_AUTH_RES); - cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE; - } - - /* - * Setup the inner hash config - */ - hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd); - hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr; - hash->auth_config.reserved = 0; - hash->auth_config.config = - ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, - cdesc->qat_hash_alg, digestsize); - - if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 - || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 - || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) - hash->auth_counter.counter = 0; - else - hash->auth_counter.counter = rte_bswap32( - qat_hash_get_block_size(cdesc->qat_hash_alg)); - - cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup); - - /* - * cd_cur_ptr now points at the state1 information. - */ - switch (cdesc->qat_hash_alg) { - case ICP_QAT_HW_AUTH_ALGO_SHA1: - if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, - authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { - PMD_DRV_LOG(ERR, "(SHA)precompute failed"); - return -EFAULT; - } - state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8); - break; - case ICP_QAT_HW_AUTH_ALGO_SHA224: - if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, - authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { - PMD_DRV_LOG(ERR, "(SHA)precompute failed"); - return -EFAULT; - } - state2_size = ICP_QAT_HW_SHA224_STATE2_SZ; - break; - case ICP_QAT_HW_AUTH_ALGO_SHA256: - if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, - authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { - PMD_DRV_LOG(ERR, "(SHA)precompute failed"); - return -EFAULT; - } - state2_size = ICP_QAT_HW_SHA256_STATE2_SZ; - break; - case ICP_QAT_HW_AUTH_ALGO_SHA384: - if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, - authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { - PMD_DRV_LOG(ERR, "(SHA)precompute failed"); - return -EFAULT; - } - state2_size = ICP_QAT_HW_SHA384_STATE2_SZ; - break; - case ICP_QAT_HW_AUTH_ALGO_SHA512: - if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, - authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { - PMD_DRV_LOG(ERR, "(SHA)precompute failed"); - return -EFAULT; - } - state2_size = ICP_QAT_HW_SHA512_STATE2_SZ; - break; - case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC: - state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ; - if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC, - authkey, authkeylen, cdesc->cd_cur_ptr + state1_size, - &state2_size)) { - PMD_DRV_LOG(ERR, "(XCBC)precompute failed"); - return -EFAULT; - } - break; - case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: - case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM; - state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ; - if (qat_alg_do_precomputes(cdesc->qat_hash_alg, - authkey, authkeylen, cdesc->cd_cur_ptr + state1_size, - &state2_size)) { - PMD_DRV_LOG(ERR, "(GCM)precompute failed"); - return -EFAULT; - } - /* - * Write (the length of AAD) into bytes 16-19 of state2 - * in big-endian format. This field is 8 bytes - */ - auth_param->u2.aad_sz = - RTE_ALIGN_CEIL(aad_length, 16); - auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3; - - aad_len = (uint32_t *)(cdesc->cd_cur_ptr + - ICP_QAT_HW_GALOIS_128_STATE1_SZ + - ICP_QAT_HW_GALOIS_H_SZ); - *aad_len = rte_bswap32(aad_length); - cdesc->aad_len = aad_length; - break; - case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G; - state1_size = qat_hash_get_state1_size( - ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2); - state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ; - memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size); - - cipherconfig = (struct icp_qat_hw_cipher_algo_blk *) - (cdesc->cd_cur_ptr + state1_size + state2_size); - cipherconfig->cipher_config.val = - ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE, - ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2, - ICP_QAT_HW_CIPHER_KEY_CONVERT, - ICP_QAT_HW_CIPHER_ENCRYPT); - memcpy(cipherconfig->key, authkey, authkeylen); - memset(cipherconfig->key + authkeylen, - 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ); - cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + - authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; - auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3; - break; - case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: - hash->auth_config.config = - ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0, - cdesc->qat_hash_alg, digestsize); - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; - state1_size = qat_hash_get_state1_size( - ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3); - state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ; - memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size - + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ); - - memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); - cdesc->cd_cur_ptr += state1_size + state2_size - + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; - auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; - cdesc->min_qat_dev_gen = QAT_GEN2; - - break; - case ICP_QAT_HW_AUTH_ALGO_MD5: - if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, - authkey, authkeylen, cdesc->cd_cur_ptr, - &state1_size)) { - PMD_DRV_LOG(ERR, "(MD5)precompute failed"); - return -EFAULT; - } - state2_size = ICP_QAT_HW_MD5_STATE2_SZ; - break; - case ICP_QAT_HW_AUTH_ALGO_NULL: - state1_size = qat_hash_get_state1_size( - ICP_QAT_HW_AUTH_ALGO_NULL); - state2_size = ICP_QAT_HW_NULL_STATE2_SZ; - break; - case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC: - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM; - state1_size = qat_hash_get_state1_size( - ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC); - state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ + - ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ; - - if (aad_length > 0) { - aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN + - ICP_QAT_HW_CCM_AAD_LEN_INFO; - auth_param->u2.aad_sz = - RTE_ALIGN_CEIL(aad_length, - ICP_QAT_HW_CCM_AAD_ALIGNMENT); - } else { - auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN; - } - - cdesc->aad_len = aad_length; - hash->auth_counter.counter = 0; - - hash_cd_ctrl->outer_prefix_sz = digestsize; - auth_param->hash_state_sz = digestsize; - - memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); - break; - case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: - state1_size = qat_hash_get_state1_size( - ICP_QAT_HW_AUTH_ALGO_KASUMI_F9); - state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ; - memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size); - pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size - + authkeylen); - /* - * The Inner Hash Initial State2 block must contain IK - * (Initialisation Key), followed by IK XOR-ed with KM - * (Key Modifier): IK||(IK^KM). - */ - /* write the auth key */ - memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); - /* initialise temp key with auth key */ - memcpy(pTempKey, authkey, authkeylen); - /* XOR Key with KASUMI F9 key modifier at 4 bytes level */ - for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++) - pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES; - break; - default: - PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg); - return -EFAULT; - } - - /* Request template setup */ - qat_alg_init_common_hdr(header, qat_proto_flag); - header->service_cmd_id = cdesc->qat_cmd; - - /* Auth CD config setup */ - hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3; - hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; - hash_cd_ctrl->inner_res_sz = digestsize; - hash_cd_ctrl->final_sz = digestsize; - hash_cd_ctrl->inner_state1_sz = state1_size; - auth_param->auth_res_sz = digestsize; - - hash_cd_ctrl->inner_state2_sz = state2_size; - hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + - ((sizeof(struct icp_qat_hw_auth_setup) + - RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8)) - >> 3); - - cdesc->cd_cur_ptr += state1_size + state2_size; - cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd; - - cd_pars->u.s.content_desc_addr = cdesc->cd_paddr; - cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3; - - return 0; -} - -int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg) -{ - switch (key_len) { - case ICP_QAT_HW_AES_128_KEY_SZ: - *alg = ICP_QAT_HW_CIPHER_ALGO_AES128; - break; - case ICP_QAT_HW_AES_192_KEY_SZ: - *alg = ICP_QAT_HW_CIPHER_ALGO_AES192; - break; - case ICP_QAT_HW_AES_256_KEY_SZ: - *alg = ICP_QAT_HW_CIPHER_ALGO_AES256; - break; - default: - return -EINVAL; - } - return 0; -} - -int qat_alg_validate_aes_docsisbpi_key(int key_len, - enum icp_qat_hw_cipher_algo *alg) -{ - switch (key_len) { - case ICP_QAT_HW_AES_128_KEY_SZ: - *alg = ICP_QAT_HW_CIPHER_ALGO_AES128; - break; - default: - return -EINVAL; - } - return 0; -} - -int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg) -{ - switch (key_len) { - case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ: - *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2; - break; - default: - return -EINVAL; - } - return 0; -} - -int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg) -{ - switch (key_len) { - case ICP_QAT_HW_KASUMI_KEY_SZ: - *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI; - break; - default: - return -EINVAL; - } - return 0; -} - -int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg) -{ - switch (key_len) { - case ICP_QAT_HW_DES_KEY_SZ: - *alg = ICP_QAT_HW_CIPHER_ALGO_DES; - break; - default: - return -EINVAL; - } - return 0; -} - -int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg) -{ - switch (key_len) { - case QAT_3DES_KEY_SZ_OPT1: - case QAT_3DES_KEY_SZ_OPT2: - *alg = ICP_QAT_HW_CIPHER_ALGO_3DES; - break; - default: - return -EINVAL; - } - return 0; -} - -int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg) -{ - switch (key_len) { - case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ: - *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3; - break; - default: - return -EINVAL; - } - return 0; -} diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c deleted file mode 100644 index 4afe159d..00000000 --- a/drivers/crypto/qat/qat_crypto.c +++ /dev/null @@ -1,1696 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2017 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "qat_logs.h" -#include "qat_algs.h" -#include "qat_crypto.h" -#include "adf_transport_access_macros.h" - -#define BYTE_LENGTH 8 -/* bpi is only used for partial blocks of DES and AES - * so AES block len can be assumed as max len for iv, src and dst - */ -#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ - -static int -qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo, - struct qat_pmd_private *internals) { - int i = 0; - const struct rte_cryptodev_capabilities *capability; - - while ((capability = &(internals->qat_dev_capabilities[i++]))->op != - RTE_CRYPTO_OP_TYPE_UNDEFINED) { - if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) - continue; - - if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) - continue; - - if (capability->sym.cipher.algo == algo) - return 1; - } - return 0; -} - -static int -qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo, - struct qat_pmd_private *internals) { - int i = 0; - const struct rte_cryptodev_capabilities *capability; - - while ((capability = &(internals->qat_dev_capabilities[i++]))->op != - RTE_CRYPTO_OP_TYPE_UNDEFINED) { - if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) - continue; - - if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH) - continue; - - if (capability->sym.auth.algo == algo) - return 1; - } - return 0; -} - -/** Encrypt a single partial block - * Depends on openssl libcrypto - * Uses ECB+XOR to do CFB encryption, same result, more performant - */ -static inline int -bpi_cipher_encrypt(uint8_t *src, uint8_t *dst, - uint8_t *iv, int ivlen, int srclen, - void *bpi_ctx) -{ - EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx; - int encrypted_ivlen; - uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN]; - uint8_t *encr = encrypted_iv; - - /* ECB method: encrypt the IV, then XOR this with plaintext */ - if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen) - <= 0) - goto cipher_encrypt_err; - - for (; srclen != 0; --srclen, ++dst, ++src, ++encr) - *dst = *src ^ *encr; - - return 0; - -cipher_encrypt_err: - PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed"); - return -EINVAL; -} - -/** Decrypt a single partial block - * Depends on openssl libcrypto - * Uses ECB+XOR to do CFB encryption, same result, more performant - */ -static inline int -bpi_cipher_decrypt(uint8_t *src, uint8_t *dst, - uint8_t *iv, int ivlen, int srclen, - void *bpi_ctx) -{ - EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx; - int encrypted_ivlen; - uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN]; - uint8_t *encr = encrypted_iv; - - /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */ - if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen) - <= 0) - goto cipher_decrypt_err; - - for (; srclen != 0; --srclen, ++dst, ++src, ++encr) - *dst = *src ^ *encr; - - return 0; - -cipher_decrypt_err: - PMD_DRV_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed"); - return -EINVAL; -} - -/** Creates a context in either AES or DES in ECB mode - * Depends on openssl libcrypto - */ -static int -bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo, - enum rte_crypto_cipher_operation direction __rte_unused, - uint8_t *key, void **ctx) -{ - const EVP_CIPHER *algo = NULL; - int ret; - *ctx = EVP_CIPHER_CTX_new(); - - if (*ctx == NULL) { - ret = -ENOMEM; - goto ctx_init_err; - } - - if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI) - algo = EVP_des_ecb(); - else - algo = EVP_aes_128_ecb(); - - /* IV will be ECB encrypted whether direction is encrypt or decrypt*/ - if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) { - ret = -EINVAL; - goto ctx_init_err; - } - - return 0; - -ctx_init_err: - if (*ctx != NULL) - EVP_CIPHER_CTX_free(*ctx); - return ret; -} - -/** Frees a context previously created - * Depends on openssl libcrypto - */ -static void -bpi_cipher_ctx_free(void *bpi_ctx) -{ - if (bpi_ctx != NULL) - EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx); -} - -static inline uint32_t -adf_modulo(uint32_t data, uint32_t shift); - -static inline int -qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, - struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp); - -void -qat_crypto_sym_clear_session(struct rte_cryptodev *dev, - struct rte_cryptodev_sym_session *sess) -{ - PMD_INIT_FUNC_TRACE(); - uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); - struct qat_session *s = (struct qat_session *)sess_priv; - - if (sess_priv) { - if (s->bpi_ctx) - bpi_cipher_ctx_free(s->bpi_ctx); - memset(s, 0, qat_crypto_sym_get_session_private_size(dev)); - struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); - rte_mempool_put(sess_mp, sess_priv); - } -} - -static int -qat_get_cmd_id(const struct rte_crypto_sym_xform *xform) -{ - /* Cipher Only */ - if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) - return ICP_QAT_FW_LA_CMD_CIPHER; - - /* Authentication Only */ - if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL) - return ICP_QAT_FW_LA_CMD_AUTH; - - /* AEAD */ - if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { - /* AES-GCM and AES-CCM works with different direction - * GCM first encrypts and generate hash where AES-CCM - * first generate hash and encrypts. Similar relation - * applies to decryption. - */ - if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) - if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) - return ICP_QAT_FW_LA_CMD_CIPHER_HASH; - else - return ICP_QAT_FW_LA_CMD_HASH_CIPHER; - else - if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) - return ICP_QAT_FW_LA_CMD_HASH_CIPHER; - else - return ICP_QAT_FW_LA_CMD_CIPHER_HASH; - } - - if (xform->next == NULL) - return -1; - - /* Cipher then Authenticate */ - if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && - xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) - return ICP_QAT_FW_LA_CMD_CIPHER_HASH; - - /* Authenticate then Cipher */ - if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && - xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) - return ICP_QAT_FW_LA_CMD_HASH_CIPHER; - - return -1; -} - -static struct rte_crypto_auth_xform * -qat_get_auth_xform(struct rte_crypto_sym_xform *xform) -{ - do { - if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) - return &xform->auth; - - xform = xform->next; - } while (xform); - - return NULL; -} - -static struct rte_crypto_cipher_xform * -qat_get_cipher_xform(struct rte_crypto_sym_xform *xform) -{ - do { - if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) - return &xform->cipher; - - xform = xform->next; - } while (xform); - - return NULL; -} - -int -qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, - struct qat_session *session) -{ - struct qat_pmd_private *internals = dev->data->dev_private; - struct rte_crypto_cipher_xform *cipher_xform = NULL; - int ret; - - /* Get cipher xform from crypto xform chain */ - cipher_xform = qat_get_cipher_xform(xform); - - session->cipher_iv.offset = cipher_xform->iv.offset; - session->cipher_iv.length = cipher_xform->iv.length; - - switch (cipher_xform->algo) { - case RTE_CRYPTO_CIPHER_AES_CBC: - if (qat_alg_validate_aes_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid AES cipher key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; - break; - case RTE_CRYPTO_CIPHER_AES_CTR: - if (qat_alg_validate_aes_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid AES cipher key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; - break; - case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: - if (qat_alg_validate_snow3g_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; - break; - case RTE_CRYPTO_CIPHER_NULL: - session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; - break; - case RTE_CRYPTO_CIPHER_KASUMI_F8: - if (qat_alg_validate_kasumi_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE; - break; - case RTE_CRYPTO_CIPHER_3DES_CBC: - if (qat_alg_validate_3des_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; - break; - case RTE_CRYPTO_CIPHER_DES_CBC: - if (qat_alg_validate_des_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid DES cipher key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; - break; - case RTE_CRYPTO_CIPHER_3DES_CTR: - if (qat_alg_validate_3des_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; - break; - case RTE_CRYPTO_CIPHER_DES_DOCSISBPI: - ret = bpi_cipher_ctx_init( - cipher_xform->algo, - cipher_xform->op, - cipher_xform->key.data, - &session->bpi_ctx); - if (ret != 0) { - PMD_DRV_LOG(ERR, "failed to create DES BPI ctx"); - goto error_out; - } - if (qat_alg_validate_des_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid DES cipher key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; - break; - case RTE_CRYPTO_CIPHER_AES_DOCSISBPI: - ret = bpi_cipher_ctx_init( - cipher_xform->algo, - cipher_xform->op, - cipher_xform->key.data, - &session->bpi_ctx); - if (ret != 0) { - PMD_DRV_LOG(ERR, "failed to create AES BPI ctx"); - goto error_out; - } - if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; - break; - case RTE_CRYPTO_CIPHER_ZUC_EEA3: - if (!qat_is_cipher_alg_supported( - cipher_xform->algo, internals)) { - PMD_DRV_LOG(ERR, "%s not supported on this device", - rte_crypto_cipher_algorithm_strings - [cipher_xform->algo]); - ret = -ENOTSUP; - goto error_out; - } - if (qat_alg_validate_zuc_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; - break; - case RTE_CRYPTO_CIPHER_3DES_ECB: - case RTE_CRYPTO_CIPHER_AES_ECB: - case RTE_CRYPTO_CIPHER_AES_F8: - case RTE_CRYPTO_CIPHER_AES_XTS: - case RTE_CRYPTO_CIPHER_ARC4: - PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u", - cipher_xform->algo); - ret = -ENOTSUP; - goto error_out; - default: - PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n", - cipher_xform->algo); - ret = -EINVAL; - goto error_out; - } - - if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) - session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; - else - session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; - - if (qat_alg_aead_session_create_content_desc_cipher(session, - cipher_xform->key.data, - cipher_xform->key.length)) { - ret = -EINVAL; - goto error_out; - } - - return 0; - -error_out: - if (session->bpi_ctx) { - bpi_cipher_ctx_free(session->bpi_ctx); - session->bpi_ctx = NULL; - } - return ret; -} - -int -qat_crypto_sym_configure_session(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, - struct rte_cryptodev_sym_session *sess, - struct rte_mempool *mempool) -{ - void *sess_private_data; - int ret; - - if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( - "Couldn't get object from session mempool"); - return -ENOMEM; - } - - ret = qat_crypto_set_session_parameters(dev, xform, sess_private_data); - if (ret != 0) { - PMD_DRV_LOG(ERR, "Crypto QAT PMD: failed to configure " - "session parameters"); - - /* Return session to mempool */ - rte_mempool_put(mempool, sess_private_data); - return ret; - } - - set_session_private_data(sess, dev->driver_id, - sess_private_data); - - return 0; -} - -int -qat_crypto_set_session_parameters(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, void *session_private) -{ - struct qat_session *session = session_private; - int ret; - - int qat_cmd_id; - PMD_INIT_FUNC_TRACE(); - - /* Set context descriptor physical address */ - session->cd_paddr = rte_mempool_virt2iova(session) + - offsetof(struct qat_session, cd); - - session->min_qat_dev_gen = QAT_GEN1; - - /* Get requested QAT command id */ - qat_cmd_id = qat_get_cmd_id(xform); - if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) { - PMD_DRV_LOG(ERR, "Unsupported xform chain requested"); - return -ENOTSUP; - } - session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id; - switch (session->qat_cmd) { - case ICP_QAT_FW_LA_CMD_CIPHER: - ret = qat_crypto_sym_configure_session_cipher(dev, xform, session); - if (ret < 0) - return ret; - break; - case ICP_QAT_FW_LA_CMD_AUTH: - ret = qat_crypto_sym_configure_session_auth(dev, xform, session); - if (ret < 0) - return ret; - break; - case ICP_QAT_FW_LA_CMD_CIPHER_HASH: - if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { - ret = qat_crypto_sym_configure_session_aead(xform, - session); - if (ret < 0) - return ret; - } else { - ret = qat_crypto_sym_configure_session_cipher(dev, - xform, session); - if (ret < 0) - return ret; - ret = qat_crypto_sym_configure_session_auth(dev, - xform, session); - if (ret < 0) - return ret; - } - break; - case ICP_QAT_FW_LA_CMD_HASH_CIPHER: - if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { - ret = qat_crypto_sym_configure_session_aead(xform, - session); - if (ret < 0) - return ret; - } else { - ret = qat_crypto_sym_configure_session_auth(dev, - xform, session); - if (ret < 0) - return ret; - ret = qat_crypto_sym_configure_session_cipher(dev, - xform, session); - if (ret < 0) - return ret; - } - break; - case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM: - case ICP_QAT_FW_LA_CMD_TRNG_TEST: - case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE: - case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE: - case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE: - case ICP_QAT_FW_LA_CMD_MGF1: - case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP: - case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP: - case ICP_QAT_FW_LA_CMD_DELIMITER: - PMD_DRV_LOG(ERR, "Unsupported Service %u", - session->qat_cmd); - return -ENOTSUP; - default: - PMD_DRV_LOG(ERR, "Unsupported Service %u", - session->qat_cmd); - return -ENOTSUP; - } - - return 0; -} - -int -qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, - struct qat_session *session) -{ - struct rte_crypto_auth_xform *auth_xform = NULL; - struct qat_pmd_private *internals = dev->data->dev_private; - auth_xform = qat_get_auth_xform(xform); - uint8_t *key_data = auth_xform->key.data; - uint8_t key_length = auth_xform->key.length; - - switch (auth_xform->algo) { - case RTE_CRYPTO_AUTH_SHA1_HMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1; - break; - case RTE_CRYPTO_AUTH_SHA224_HMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224; - break; - case RTE_CRYPTO_AUTH_SHA256_HMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256; - break; - case RTE_CRYPTO_AUTH_SHA384_HMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384; - break; - case RTE_CRYPTO_AUTH_SHA512_HMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512; - break; - case RTE_CRYPTO_AUTH_AES_XCBC_MAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC; - break; - case RTE_CRYPTO_AUTH_AES_GMAC: - if (qat_alg_validate_aes_key(auth_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid AES key size"); - return -EINVAL; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; - - break; - case RTE_CRYPTO_AUTH_SNOW3G_UIA2: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2; - break; - case RTE_CRYPTO_AUTH_MD5_HMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5; - break; - case RTE_CRYPTO_AUTH_NULL: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL; - break; - case RTE_CRYPTO_AUTH_KASUMI_F9: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9; - break; - case RTE_CRYPTO_AUTH_ZUC_EIA3: - if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) { - PMD_DRV_LOG(ERR, "%s not supported on this device", - rte_crypto_auth_algorithm_strings - [auth_xform->algo]); - return -ENOTSUP; - } - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3; - break; - case RTE_CRYPTO_AUTH_SHA1: - case RTE_CRYPTO_AUTH_SHA256: - case RTE_CRYPTO_AUTH_SHA512: - case RTE_CRYPTO_AUTH_SHA224: - case RTE_CRYPTO_AUTH_SHA384: - case RTE_CRYPTO_AUTH_MD5: - case RTE_CRYPTO_AUTH_AES_CMAC: - case RTE_CRYPTO_AUTH_AES_CBC_MAC: - PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u", - auth_xform->algo); - return -ENOTSUP; - default: - PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified", - auth_xform->algo); - return -EINVAL; - } - - session->auth_iv.offset = auth_xform->iv.offset; - session->auth_iv.length = auth_xform->iv.length; - - if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) { - if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) { - session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH; - session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; - /* - * It needs to create cipher desc content first, - * then authentication - */ - if (qat_alg_aead_session_create_content_desc_cipher(session, - auth_xform->key.data, - auth_xform->key.length)) - return -EINVAL; - - if (qat_alg_aead_session_create_content_desc_auth(session, - key_data, - key_length, - 0, - auth_xform->digest_length, - auth_xform->op)) - return -EINVAL; - } else { - session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER; - session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; - /* - * It needs to create authentication desc content first, - * then cipher - */ - if (qat_alg_aead_session_create_content_desc_auth(session, - key_data, - key_length, - 0, - auth_xform->digest_length, - auth_xform->op)) - return -EINVAL; - - if (qat_alg_aead_session_create_content_desc_cipher(session, - auth_xform->key.data, - auth_xform->key.length)) - return -EINVAL; - } - /* Restore to authentication only only */ - session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH; - } else { - if (qat_alg_aead_session_create_content_desc_auth(session, - key_data, - key_length, - 0, - auth_xform->digest_length, - auth_xform->op)) - return -EINVAL; - } - - session->digest_length = auth_xform->digest_length; - return 0; -} - -int -qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform, - struct qat_session *session) -{ - struct rte_crypto_aead_xform *aead_xform = &xform->aead; - enum rte_crypto_auth_operation crypto_operation; - - /* - * Store AEAD IV parameters as cipher IV, - * to avoid unnecessary memory usage - */ - session->cipher_iv.offset = xform->aead.iv.offset; - session->cipher_iv.length = xform->aead.iv.length; - - switch (aead_xform->algo) { - case RTE_CRYPTO_AEAD_AES_GCM: - if (qat_alg_validate_aes_key(aead_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid AES key size"); - return -EINVAL; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; - break; - case RTE_CRYPTO_AEAD_AES_CCM: - if (qat_alg_validate_aes_key(aead_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid AES key size"); - return -EINVAL; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC; - break; - default: - PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n", - aead_xform->algo); - return -EINVAL; - } - - if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT && - aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) || - (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT && - aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) { - session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; - /* - * It needs to create cipher desc content first, - * then authentication - */ - - crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ? - RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY; - - if (qat_alg_aead_session_create_content_desc_cipher(session, - aead_xform->key.data, - aead_xform->key.length)) - return -EINVAL; - - if (qat_alg_aead_session_create_content_desc_auth(session, - aead_xform->key.data, - aead_xform->key.length, - aead_xform->aad_length, - aead_xform->digest_length, - crypto_operation)) - return -EINVAL; - } else { - session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; - /* - * It needs to create authentication desc content first, - * then cipher - */ - - crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ? - RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE; - - if (qat_alg_aead_session_create_content_desc_auth(session, - aead_xform->key.data, - aead_xform->key.length, - aead_xform->aad_length, - aead_xform->digest_length, - crypto_operation)) - return -EINVAL; - - if (qat_alg_aead_session_create_content_desc_cipher(session, - aead_xform->key.data, - aead_xform->key.length)) - return -EINVAL; - } - - session->digest_length = aead_xform->digest_length; - return 0; -} - -unsigned qat_crypto_sym_get_session_private_size( - struct rte_cryptodev *dev __rte_unused) -{ - return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8); -} - -static inline uint32_t -qat_bpicipher_preprocess(struct qat_session *ctx, - struct rte_crypto_op *op) -{ - int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg); - struct rte_crypto_sym_op *sym_op = op->sym; - uint8_t last_block_len = block_len > 0 ? - sym_op->cipher.data.length % block_len : 0; - - if (last_block_len && - ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) { - - /* Decrypt last block */ - uint8_t *last_block, *dst, *iv; - uint32_t last_block_offset = sym_op->cipher.data.offset + - sym_op->cipher.data.length - last_block_len; - last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src, - uint8_t *, last_block_offset); - - if (unlikely(sym_op->m_dst != NULL)) - /* out-of-place operation (OOP) */ - dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst, - uint8_t *, last_block_offset); - else - dst = last_block; - - if (last_block_len < sym_op->cipher.data.length) - /* use previous block ciphertext as IV */ - iv = last_block - block_len; - else - /* runt block, i.e. less than one full block */ - iv = rte_crypto_op_ctod_offset(op, uint8_t *, - ctx->cipher_iv.offset); - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX - rte_hexdump(stdout, "BPI: src before pre-process:", last_block, - last_block_len); - if (sym_op->m_dst != NULL) - rte_hexdump(stdout, "BPI: dst before pre-process:", dst, - last_block_len); -#endif - bpi_cipher_decrypt(last_block, dst, iv, block_len, - last_block_len, ctx->bpi_ctx); -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX - rte_hexdump(stdout, "BPI: src after pre-process:", last_block, - last_block_len); - if (sym_op->m_dst != NULL) - rte_hexdump(stdout, "BPI: dst after pre-process:", dst, - last_block_len); -#endif - } - - return sym_op->cipher.data.length - last_block_len; -} - -static inline uint32_t -qat_bpicipher_postprocess(struct qat_session *ctx, - struct rte_crypto_op *op) -{ - int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg); - struct rte_crypto_sym_op *sym_op = op->sym; - uint8_t last_block_len = block_len > 0 ? - sym_op->cipher.data.length % block_len : 0; - - if (last_block_len > 0 && - ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) { - - /* Encrypt last block */ - uint8_t *last_block, *dst, *iv; - uint32_t last_block_offset; - - last_block_offset = sym_op->cipher.data.offset + - sym_op->cipher.data.length - last_block_len; - last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src, - uint8_t *, last_block_offset); - - if (unlikely(sym_op->m_dst != NULL)) - /* out-of-place operation (OOP) */ - dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst, - uint8_t *, last_block_offset); - else - dst = last_block; - - if (last_block_len < sym_op->cipher.data.length) - /* use previous block ciphertext as IV */ - iv = dst - block_len; - else - /* runt block, i.e. less than one full block */ - iv = rte_crypto_op_ctod_offset(op, uint8_t *, - ctx->cipher_iv.offset); - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX - rte_hexdump(stdout, "BPI: src before post-process:", last_block, - last_block_len); - if (sym_op->m_dst != NULL) - rte_hexdump(stdout, "BPI: dst before post-process:", - dst, last_block_len); -#endif - bpi_cipher_encrypt(last_block, dst, iv, block_len, - last_block_len, ctx->bpi_ctx); -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX - rte_hexdump(stdout, "BPI: src after post-process:", last_block, - last_block_len); - if (sym_op->m_dst != NULL) - rte_hexdump(stdout, "BPI: dst after post-process:", dst, - last_block_len); -#endif - } - return sym_op->cipher.data.length - last_block_len; -} - -static inline void -txq_write_tail(struct qat_qp *qp, struct qat_queue *q) { - WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number, - q->hw_queue_number, q->tail); - q->nb_pending_requests = 0; - q->csr_tail = q->tail; -} - -uint16_t -qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - register struct qat_queue *queue; - struct qat_qp *tmp_qp = (struct qat_qp *)qp; - register uint32_t nb_ops_sent = 0; - register struct rte_crypto_op **cur_op = ops; - register int ret; - uint16_t nb_ops_possible = nb_ops; - register uint8_t *base_addr; - register uint32_t tail; - int overflow; - - if (unlikely(nb_ops == 0)) - return 0; - - /* read params used a lot in main loop into registers */ - queue = &(tmp_qp->tx_q); - base_addr = (uint8_t *)queue->base_addr; - tail = queue->tail; - - /* Find how many can actually fit on the ring */ - tmp_qp->inflights16 += nb_ops; - overflow = tmp_qp->inflights16 - queue->max_inflights; - if (overflow > 0) { - tmp_qp->inflights16 -= overflow; - nb_ops_possible = nb_ops - overflow; - if (nb_ops_possible == 0) - return 0; - } - - while (nb_ops_sent != nb_ops_possible) { - ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail, - tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp); - if (ret != 0) { - tmp_qp->stats.enqueue_err_count++; - /* - * This message cannot be enqueued, - * decrease number of ops that wasn't sent - */ - tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent; - if (nb_ops_sent == 0) - return 0; - goto kick_tail; - } - - tail = adf_modulo(tail + queue->msg_size, queue->modulo); - nb_ops_sent++; - cur_op++; - } -kick_tail: - queue->tail = tail; - tmp_qp->stats.enqueued_count += nb_ops_sent; - queue->nb_pending_requests += nb_ops_sent; - if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH || - queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) { - txq_write_tail(tmp_qp, queue); - } - return nb_ops_sent; -} - -static inline -void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q) -{ - uint32_t old_head, new_head; - uint32_t max_head; - - old_head = q->csr_head; - new_head = q->head; - max_head = qp->nb_descriptors * q->msg_size; - - /* write out free descriptors */ - void *cur_desc = (uint8_t *)q->base_addr + old_head; - - if (new_head < old_head) { - memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head); - memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head); - } else { - memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head); - } - q->nb_processed_responses = 0; - q->csr_head = new_head; - - /* write current head to CSR */ - WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number, - q->hw_queue_number, new_head); -} - -uint16_t -qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - struct qat_queue *rx_queue, *tx_queue; - struct qat_qp *tmp_qp = (struct qat_qp *)qp; - uint32_t msg_counter = 0; - struct rte_crypto_op *rx_op; - struct icp_qat_fw_comn_resp *resp_msg; - uint32_t head; - - rx_queue = &(tmp_qp->rx_q); - tx_queue = &(tmp_qp->tx_q); - head = rx_queue->head; - resp_msg = (struct icp_qat_fw_comn_resp *) - ((uint8_t *)rx_queue->base_addr + head); - - while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG && - msg_counter != nb_ops) { - rx_op = (struct rte_crypto_op *)(uintptr_t) - (resp_msg->opaque_data); - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX - rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg, - sizeof(struct icp_qat_fw_comn_resp)); -#endif - if (ICP_QAT_FW_COMN_STATUS_FLAG_OK != - ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET( - resp_msg->comn_hdr.comn_status)) { - rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; - } else { - struct qat_session *sess = (struct qat_session *) - get_session_private_data( - rx_op->sym->session, - cryptodev_qat_driver_id); - - if (sess->bpi_ctx) - qat_bpicipher_postprocess(sess, rx_op); - rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; - } - - head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo); - resp_msg = (struct icp_qat_fw_comn_resp *) - ((uint8_t *)rx_queue->base_addr + head); - *ops = rx_op; - ops++; - msg_counter++; - } - if (msg_counter > 0) { - rx_queue->head = head; - tmp_qp->stats.dequeued_count += msg_counter; - rx_queue->nb_processed_responses += msg_counter; - tmp_qp->inflights16 -= msg_counter; - - if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) - rxq_free_desc(tmp_qp, rx_queue); - } - /* also check if tail needs to be advanced */ - if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH && - tx_queue->tail != tx_queue->csr_tail) { - txq_write_tail(tmp_qp, tx_queue); - } - return msg_counter; -} - -static inline int -qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start, - struct qat_alg_buf_list *list, uint32_t data_len) -{ - int nr = 1; - - uint32_t buf_len = rte_pktmbuf_iova(buf) - - buff_start + rte_pktmbuf_data_len(buf); - - list->bufers[0].addr = buff_start; - list->bufers[0].resrvd = 0; - list->bufers[0].len = buf_len; - - if (data_len <= buf_len) { - list->num_bufs = nr; - list->bufers[0].len = data_len; - return 0; - } - - buf = buf->next; - while (buf) { - if (unlikely(nr == QAT_SGL_MAX_NUMBER)) { - PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL" - " entry(%u)", - QAT_SGL_MAX_NUMBER); - return -EINVAL; - } - - list->bufers[nr].len = rte_pktmbuf_data_len(buf); - list->bufers[nr].resrvd = 0; - list->bufers[nr].addr = rte_pktmbuf_iova(buf); - - buf_len += list->bufers[nr].len; - buf = buf->next; - - if (buf_len > data_len) { - list->bufers[nr].len -= - buf_len - data_len; - buf = NULL; - } - ++nr; - } - list->num_bufs = nr; - - return 0; -} - -static inline void -set_cipher_iv(uint16_t iv_length, uint16_t iv_offset, - struct icp_qat_fw_la_cipher_req_params *cipher_param, - struct rte_crypto_op *op, - struct icp_qat_fw_la_bulk_req *qat_req) -{ - /* copy IV into request if it fits */ - if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) { - rte_memcpy(cipher_param->u.cipher_IV_array, - rte_crypto_op_ctod_offset(op, uint8_t *, - iv_offset), - iv_length); - } else { - ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_CIPH_IV_64BIT_PTR); - cipher_param->u.s.cipher_IV_ptr = - rte_crypto_op_ctophys_offset(op, - iv_offset); - } -} - -/** Set IV for CCM is special case, 0th byte is set to q-1 - * where q is padding of nonce in 16 byte block - */ -static inline void -set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset, - struct icp_qat_fw_la_cipher_req_params *cipher_param, - struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz) -{ - rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) + - ICP_QAT_HW_CCM_NONCE_OFFSET, - rte_crypto_op_ctod_offset(op, uint8_t *, - iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET, - iv_length); - *(uint8_t *)&cipher_param->u.cipher_IV_array[0] = - q - ICP_QAT_HW_CCM_NONCE_OFFSET; - - if (aad_len_field_sz) - rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET], - rte_crypto_op_ctod_offset(op, uint8_t *, - iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET, - iv_length); -} - -static inline int -qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, - struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp) -{ - int ret = 0; - struct qat_session *ctx; - struct icp_qat_fw_la_cipher_req_params *cipher_param; - struct icp_qat_fw_la_auth_req_params *auth_param; - register struct icp_qat_fw_la_bulk_req *qat_req; - uint8_t do_auth = 0, do_cipher = 0, do_aead = 0; - uint32_t cipher_len = 0, cipher_ofs = 0; - uint32_t auth_len = 0, auth_ofs = 0; - uint32_t min_ofs = 0; - uint64_t src_buf_start = 0, dst_buf_start = 0; - uint8_t do_sgl = 0; - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX - if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) { - PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto " - "operation requests, op (%p) is not a " - "symmetric operation.", op); - return -EINVAL; - } -#endif - if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) { - PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented" - " requests, op (%p) is sessionless.", op); - return -EINVAL; - } - - ctx = (struct qat_session *)get_session_private_data( - op->sym->session, cryptodev_qat_driver_id); - - if (unlikely(ctx == NULL)) { - PMD_DRV_LOG(ERR, "Session was not created for this device"); - return -EINVAL; - } - - if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) { - PMD_DRV_LOG(ERR, "Session alg not supported on this device gen"); - op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; - return -EINVAL; - } - - - - qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg; - rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req)); - qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op; - cipher_param = (void *)&qat_req->serv_specif_rqpars; - auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); - - if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || - ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { - /* AES-GCM or AES-CCM */ - if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || - ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 || - (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128 - && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE - && ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) { - do_aead = 1; - } else { - do_auth = 1; - do_cipher = 1; - } - } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) { - do_auth = 1; - do_cipher = 0; - } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { - do_auth = 0; - do_cipher = 1; - } - - if (do_cipher) { - - if (ctx->qat_cipher_alg == - ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || - ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI || - ctx->qat_cipher_alg == - ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { - - if (unlikely( - (cipher_param->cipher_length % BYTE_LENGTH != 0) - || (cipher_param->cipher_offset - % BYTE_LENGTH != 0))) { - PMD_DRV_LOG(ERR, - "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values"); - op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - return -EINVAL; - } - cipher_len = op->sym->cipher.data.length >> 3; - cipher_ofs = op->sym->cipher.data.offset >> 3; - - } else if (ctx->bpi_ctx) { - /* DOCSIS - only send complete blocks to device - * Process any partial block using CFB mode. - * Even if 0 complete blocks, still send this to device - * to get into rx queue for post-process and dequeuing - */ - cipher_len = qat_bpicipher_preprocess(ctx, op); - cipher_ofs = op->sym->cipher.data.offset; - } else { - cipher_len = op->sym->cipher.data.length; - cipher_ofs = op->sym->cipher.data.offset; - } - - set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset, - cipher_param, op, qat_req); - min_ofs = cipher_ofs; - } - - if (do_auth) { - - if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 || - ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 || - ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) { - if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0) - || (auth_param->auth_len % BYTE_LENGTH != 0))) { - PMD_DRV_LOG(ERR, - "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values"); - op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - return -EINVAL; - } - auth_ofs = op->sym->auth.data.offset >> 3; - auth_len = op->sym->auth.data.length >> 3; - - auth_param->u1.aad_adr = - rte_crypto_op_ctophys_offset(op, - ctx->auth_iv.offset); - - } else if (ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || - ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { - /* AES-GMAC */ - set_cipher_iv(ctx->auth_iv.length, - ctx->auth_iv.offset, - cipher_param, op, qat_req); - auth_ofs = op->sym->auth.data.offset; - auth_len = op->sym->auth.data.length; - - auth_param->u1.aad_adr = 0; - auth_param->u2.aad_sz = 0; - - /* - * If len(iv)==12B fw computes J0 - */ - if (ctx->auth_iv.length == 12) { - ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); - - } - } else { - auth_ofs = op->sym->auth.data.offset; - auth_len = op->sym->auth.data.length; - - } - min_ofs = auth_ofs; - - if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL)) - auth_param->auth_res_addr = - op->sym->auth.digest.phys_addr; - - } - - if (do_aead) { - /* - * This address may used for setting AAD physical pointer - * into IV offset from op - */ - rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr; - if (ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || - ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { - /* - * If len(iv)==12B fw computes J0 - */ - if (ctx->cipher_iv.length == 12) { - ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); - } - - set_cipher_iv(ctx->cipher_iv.length, - ctx->cipher_iv.offset, - cipher_param, op, qat_req); - - } else if (ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) { - - /* In case of AES-CCM this may point to user selected memory - * or iv offset in cypto_op - */ - uint8_t *aad_data = op->sym->aead.aad.data; - /* This is true AAD length, it not includes 18 bytes of - * preceding data - */ - uint8_t aad_ccm_real_len = 0; - - uint8_t aad_len_field_sz = 0; - uint32_t msg_len_be = - rte_bswap32(op->sym->aead.data.length); - - if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) { - aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO; - aad_ccm_real_len = ctx->aad_len - - ICP_QAT_HW_CCM_AAD_B0_LEN - - ICP_QAT_HW_CCM_AAD_LEN_INFO; - } else { - /* - * aad_len not greater than 18, so no actual aad data, - * then use IV after op for B0 block - */ - aad_data = rte_crypto_op_ctod_offset(op, uint8_t *, - ctx->cipher_iv.offset); - aad_phys_addr_aead = - rte_crypto_op_ctophys_offset(op, - ctx->cipher_iv.offset); - } - - uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length; - - aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz, - ctx->digest_length, q); - - if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) { - memcpy(aad_data + ctx->cipher_iv.length + - ICP_QAT_HW_CCM_NONCE_OFFSET - + (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE), - (uint8_t *)&msg_len_be, - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE); - } else { - memcpy(aad_data + ctx->cipher_iv.length + - ICP_QAT_HW_CCM_NONCE_OFFSET, - (uint8_t *)&msg_len_be - + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE - - q), q); - } - - if (aad_len_field_sz > 0) { - *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] - = rte_bswap16(aad_ccm_real_len); - - if ((aad_ccm_real_len + aad_len_field_sz) - % ICP_QAT_HW_CCM_AAD_B0_LEN) { - uint8_t pad_len = 0; - uint8_t pad_idx = 0; - - pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN - - ((aad_ccm_real_len + aad_len_field_sz) % - ICP_QAT_HW_CCM_AAD_B0_LEN); - pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN + - aad_ccm_real_len + aad_len_field_sz; - memset(&aad_data[pad_idx], - 0, pad_len); - } - - } - - set_cipher_iv_ccm(ctx->cipher_iv.length, - ctx->cipher_iv.offset, - cipher_param, op, q, - aad_len_field_sz); - - } - - cipher_len = op->sym->aead.data.length; - cipher_ofs = op->sym->aead.data.offset; - auth_len = op->sym->aead.data.length; - auth_ofs = op->sym->aead.data.offset; - - auth_param->u1.aad_adr = aad_phys_addr_aead; - auth_param->auth_res_addr = op->sym->aead.digest.phys_addr; - min_ofs = op->sym->aead.data.offset; - } - - if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next)) - do_sgl = 1; - - /* adjust for chain case */ - if (do_cipher && do_auth) - min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs; - - if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl)) - min_ofs = 0; - - if (unlikely(op->sym->m_dst != NULL)) { - /* Out-of-place operation (OOP) - * Don't align DMA start. DMA the minimum data-set - * so as not to overwrite data in dest buffer - */ - src_buf_start = - rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs); - dst_buf_start = - rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs); - - } else { - /* In-place operation - * Start DMA at nearest aligned address below min_ofs - */ - src_buf_start = - rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs) - & QAT_64_BTYE_ALIGN_MASK; - - if (unlikely((rte_pktmbuf_iova(op->sym->m_src) - - rte_pktmbuf_headroom(op->sym->m_src)) - > src_buf_start)) { - /* alignment has pushed addr ahead of start of mbuf - * so revert and take the performance hit - */ - src_buf_start = - rte_pktmbuf_iova_offset(op->sym->m_src, - min_ofs); - } - dst_buf_start = src_buf_start; - } - - if (do_cipher || do_aead) { - cipher_param->cipher_offset = - (uint32_t)rte_pktmbuf_iova_offset( - op->sym->m_src, cipher_ofs) - src_buf_start; - cipher_param->cipher_length = cipher_len; - } else { - cipher_param->cipher_offset = 0; - cipher_param->cipher_length = 0; - } - - if (do_auth || do_aead) { - auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset( - op->sym->m_src, auth_ofs) - src_buf_start; - auth_param->auth_len = auth_len; - } else { - auth_param->auth_off = 0; - auth_param->auth_len = 0; - } - - qat_req->comn_mid.dst_length = - qat_req->comn_mid.src_length = - (cipher_param->cipher_offset + cipher_param->cipher_length) - > (auth_param->auth_off + auth_param->auth_len) ? - (cipher_param->cipher_offset + cipher_param->cipher_length) - : (auth_param->auth_off + auth_param->auth_len); - - if (do_sgl) { - - ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags, - QAT_COMN_PTR_TYPE_SGL); - ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start, - &qat_op_cookie->qat_sgl_list_src, - qat_req->comn_mid.src_length); - if (ret) { - PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array"); - return ret; - } - - if (likely(op->sym->m_dst == NULL)) - qat_req->comn_mid.dest_data_addr = - qat_req->comn_mid.src_data_addr = - qat_op_cookie->qat_sgl_src_phys_addr; - else { - ret = qat_sgl_fill_array(op->sym->m_dst, - dst_buf_start, - &qat_op_cookie->qat_sgl_list_dst, - qat_req->comn_mid.dst_length); - - if (ret) { - PMD_DRV_LOG(ERR, "QAT PMD Cannot " - "fill sgl array"); - return ret; - } - - qat_req->comn_mid.src_data_addr = - qat_op_cookie->qat_sgl_src_phys_addr; - qat_req->comn_mid.dest_data_addr = - qat_op_cookie->qat_sgl_dst_phys_addr; - } - } else { - qat_req->comn_mid.src_data_addr = src_buf_start; - qat_req->comn_mid.dest_data_addr = dst_buf_start; - } - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX - rte_hexdump(stdout, "qat_req:", qat_req, - sizeof(struct icp_qat_fw_la_bulk_req)); - rte_hexdump(stdout, "src_data:", - rte_pktmbuf_mtod(op->sym->m_src, uint8_t*), - rte_pktmbuf_data_len(op->sym->m_src)); - if (do_cipher) { - uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op, - uint8_t *, - ctx->cipher_iv.offset); - rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr, - ctx->cipher_iv.length); - } - - if (do_auth) { - if (ctx->auth_iv.length) { - uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op, - uint8_t *, - ctx->auth_iv.offset); - rte_hexdump(stdout, "auth iv:", auth_iv_ptr, - ctx->auth_iv.length); - } - rte_hexdump(stdout, "digest:", op->sym->auth.digest.data, - ctx->digest_length); - } - - if (do_aead) { - rte_hexdump(stdout, "digest:", op->sym->aead.digest.data, - ctx->digest_length); - rte_hexdump(stdout, "aad:", op->sym->aead.aad.data, - ctx->aad_len); - } -#endif - return 0; -} - -static inline uint32_t adf_modulo(uint32_t data, uint32_t shift) -{ - uint32_t div = data >> shift; - uint32_t mult = div << shift; - - return data - mult; -} - -int qat_dev_config(__rte_unused struct rte_cryptodev *dev, - __rte_unused struct rte_cryptodev_config *config) -{ - PMD_INIT_FUNC_TRACE(); - return 0; -} - -int qat_dev_start(__rte_unused struct rte_cryptodev *dev) -{ - PMD_INIT_FUNC_TRACE(); - return 0; -} - -void qat_dev_stop(__rte_unused struct rte_cryptodev *dev) -{ - PMD_INIT_FUNC_TRACE(); -} - -int qat_dev_close(struct rte_cryptodev *dev) -{ - int i, ret; - - PMD_INIT_FUNC_TRACE(); - - for (i = 0; i < dev->data->nb_queue_pairs; i++) { - ret = qat_crypto_sym_qp_release(dev, i); - if (ret < 0) - return ret; - } - - return 0; -} - -void qat_dev_info_get(struct rte_cryptodev *dev, - struct rte_cryptodev_info *info) -{ - struct qat_pmd_private *internals = dev->data->dev_private; - - PMD_INIT_FUNC_TRACE(); - if (info != NULL) { - info->max_nb_queue_pairs = - ADF_NUM_SYM_QPS_PER_BUNDLE * - ADF_NUM_BUNDLES_PER_DEV; - info->feature_flags = dev->feature_flags; - info->capabilities = internals->qat_dev_capabilities; - info->sym.max_nb_sessions = internals->max_nb_sessions; - info->driver_id = cryptodev_qat_driver_id; - info->pci_dev = RTE_DEV_TO_PCI(dev->device); - } -} - -void qat_crypto_sym_stats_get(struct rte_cryptodev *dev, - struct rte_cryptodev_stats *stats) -{ - int i; - struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs); - - PMD_INIT_FUNC_TRACE(); - if (stats == NULL) { - PMD_DRV_LOG(ERR, "invalid stats ptr NULL"); - return; - } - for (i = 0; i < dev->data->nb_queue_pairs; i++) { - if (qp[i] == NULL) { - PMD_DRV_LOG(DEBUG, "Uninitialised queue pair"); - continue; - } - - stats->enqueued_count += qp[i]->stats.enqueued_count; - stats->dequeued_count += qp[i]->stats.dequeued_count; - stats->enqueue_err_count += qp[i]->stats.enqueue_err_count; - stats->dequeue_err_count += qp[i]->stats.dequeue_err_count; - } -} - -void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev) -{ - int i; - struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs); - - PMD_INIT_FUNC_TRACE(); - for (i = 0; i < dev->data->nb_queue_pairs; i++) - memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats)); - PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared"); -} diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h deleted file mode 100644 index c182af2e..00000000 --- a/drivers/crypto/qat/qat_crypto.h +++ /dev/null @@ -1,150 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2016 Intel Corporation - */ - -#ifndef _QAT_CRYPTO_H_ -#define _QAT_CRYPTO_H_ - -#include -#include - -#include "qat_crypto_capabilities.h" - -#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat -/**< Intel QAT Symmetric Crypto PMD device name */ - -/* - * This macro rounds up a number to a be a multiple of - * the alignment when the alignment is a power of 2 - */ -#define ALIGN_POW2_ROUNDUP(num, align) \ - (((num) + (align) - 1) & ~((align) - 1)) -#define QAT_64_BTYE_ALIGN_MASK (~0x3f) - -#define QAT_CSR_HEAD_WRITE_THRESH 32U -/* number of requests to accumulate before writing head CSR */ -#define QAT_CSR_TAIL_WRITE_THRESH 32U -/* number of requests to accumulate before writing tail CSR */ -#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U -/* number of inflights below which no tail write coalescing should occur */ - -struct qat_session; - -enum qat_device_gen { - QAT_GEN1 = 1, - QAT_GEN2, -}; - -/** - * Structure associated with each queue. - */ -struct qat_queue { - char memz_name[RTE_MEMZONE_NAMESIZE]; - void *base_addr; /* Base address */ - rte_iova_t base_phys_addr; /* Queue physical address */ - uint32_t head; /* Shadow copy of the head */ - uint32_t tail; /* Shadow copy of the tail */ - uint32_t modulo; - uint32_t msg_size; - uint16_t max_inflights; - uint32_t queue_size; - uint8_t hw_bundle_number; - uint8_t hw_queue_number; - /* HW queue aka ring offset on bundle */ - uint32_t csr_head; /* last written head value */ - uint32_t csr_tail; /* last written tail value */ - uint16_t nb_processed_responses; - /* number of responses processed since last CSR head write */ - uint16_t nb_pending_requests; - /* number of requests pending since last CSR tail write */ -}; - -struct qat_qp { - void *mmap_bar_addr; - uint16_t inflights16; - struct qat_queue tx_q; - struct qat_queue rx_q; - struct rte_cryptodev_stats stats; - struct rte_mempool *op_cookie_pool; - void **op_cookies; - uint32_t nb_descriptors; - enum qat_device_gen qat_dev_gen; -} __rte_cache_aligned; - -/** private data structure for each QAT device */ -struct qat_pmd_private { - unsigned max_nb_queue_pairs; - /**< Max number of queue pairs supported by device */ - unsigned max_nb_sessions; - /**< Max number of sessions supported by device */ - enum qat_device_gen qat_dev_gen; - /**< QAT device generation */ - const struct rte_cryptodev_capabilities *qat_dev_capabilities; -}; - -extern uint8_t cryptodev_qat_driver_id; - -int qat_dev_config(struct rte_cryptodev *dev, - struct rte_cryptodev_config *config); -int qat_dev_start(struct rte_cryptodev *dev); -void qat_dev_stop(struct rte_cryptodev *dev); -int qat_dev_close(struct rte_cryptodev *dev); -void qat_dev_info_get(struct rte_cryptodev *dev, - struct rte_cryptodev_info *info); - -void qat_crypto_sym_stats_get(struct rte_cryptodev *dev, - struct rte_cryptodev_stats *stats); -void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev); - -int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, - const struct rte_cryptodev_qp_conf *rx_conf, int socket_id, - struct rte_mempool *session_pool); -int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, - uint16_t queue_pair_id); - -int -qat_pmd_session_mempool_create(struct rte_cryptodev *dev, - unsigned nb_objs, unsigned obj_cache_size, int socket_id); - -extern unsigned -qat_crypto_sym_get_session_private_size(struct rte_cryptodev *dev); - -extern int -qat_crypto_sym_configure_session(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, - struct rte_cryptodev_sym_session *sess, - struct rte_mempool *mempool); - - -int -qat_crypto_set_session_parameters(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, void *session_private); - -int -qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform, - struct qat_session *session); - -int -qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, - struct qat_session *session); - -int -qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, - struct qat_session *session); - - -extern void -qat_crypto_sym_clear_session(struct rte_cryptodev *dev, - struct rte_cryptodev_sym_session *session); - -extern uint16_t -qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops); - -extern uint16_t -qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops); - -#endif /* _QAT_CRYPTO_H_ */ diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_crypto_capabilities.h deleted file mode 100644 index 37a6b7cb..00000000 --- a/drivers/crypto/qat/qat_crypto_capabilities.h +++ /dev/null @@ -1,557 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017 Intel Corporation - */ - -#ifndef _QAT_CRYPTO_CAPABILITIES_H_ -#define _QAT_CRYPTO_CAPABILITIES_H_ - -#define QAT_BASE_GEN1_SYM_CAPABILITIES \ - { /* SHA1 HMAC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \ - .block_size = 64, \ - .key_size = { \ - .min = 1, \ - .max = 64, \ - .increment = 1 \ - }, \ - .digest_size = { \ - .min = 1, \ - .max = 20, \ - .increment = 1 \ - }, \ - .iv_size = { 0 } \ - }, } \ - }, } \ - }, \ - { /* SHA224 HMAC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \ - .block_size = 64, \ - .key_size = { \ - .min = 1, \ - .max = 64, \ - .increment = 1 \ - }, \ - .digest_size = { \ - .min = 1, \ - .max = 28, \ - .increment = 1 \ - }, \ - .iv_size = { 0 } \ - }, } \ - }, } \ - }, \ - { /* SHA256 HMAC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \ - .block_size = 64, \ - .key_size = { \ - .min = 1, \ - .max = 64, \ - .increment = 1 \ - }, \ - .digest_size = { \ - .min = 1, \ - .max = 32, \ - .increment = 1 \ - }, \ - .iv_size = { 0 } \ - }, } \ - }, } \ - }, \ - { /* SHA384 HMAC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \ - .block_size = 128, \ - .key_size = { \ - .min = 1, \ - .max = 128, \ - .increment = 1 \ - }, \ - .digest_size = { \ - .min = 1, \ - .max = 48, \ - .increment = 1 \ - }, \ - .iv_size = { 0 } \ - }, } \ - }, } \ - }, \ - { /* SHA512 HMAC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \ - .block_size = 128, \ - .key_size = { \ - .min = 1, \ - .max = 128, \ - .increment = 1 \ - }, \ - .digest_size = { \ - .min = 1, \ - .max = 64, \ - .increment = 1 \ - }, \ - .iv_size = { 0 } \ - }, } \ - }, } \ - }, \ - { /* MD5 HMAC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_MD5_HMAC, \ - .block_size = 64, \ - .key_size = { \ - .min = 1, \ - .max = 64, \ - .increment = 1 \ - }, \ - .digest_size = { \ - .min = 1, \ - .max = 16, \ - .increment = 1 \ - }, \ - .iv_size = { 0 } \ - }, } \ - }, } \ - }, \ - { /* AES XCBC MAC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .digest_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .aad_size = { 0 }, \ - .iv_size = { 0 } \ - }, } \ - }, } \ - }, \ - { /* AES CCM */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \ - {.aead = { \ - .algo = RTE_CRYPTO_AEAD_AES_CCM, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .digest_size = { \ - .min = 4, \ - .max = 16, \ - .increment = 2 \ - }, \ - .aad_size = { \ - .min = 0, \ - .max = 224, \ - .increment = 1 \ - }, \ - .iv_size = { \ - .min = 7, \ - .max = 13, \ - .increment = 1 \ - }, \ - }, } \ - }, } \ - }, \ - { /* AES GCM */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \ - {.aead = { \ - .algo = RTE_CRYPTO_AEAD_AES_GCM, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 32, \ - .increment = 8 \ - }, \ - .digest_size = { \ - .min = 8, \ - .max = 16, \ - .increment = 4 \ - }, \ - .aad_size = { \ - .min = 0, \ - .max = 240, \ - .increment = 1 \ - }, \ - .iv_size = { \ - .min = 12, \ - .max = 12, \ - .increment = 0 \ - }, \ - }, } \ - }, } \ - }, \ - { /* AES GMAC (AUTH) */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_AES_GMAC, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 32, \ - .increment = 8 \ - }, \ - .digest_size = { \ - .min = 8, \ - .max = 16, \ - .increment = 4 \ - }, \ - .iv_size = { \ - .min = 12, \ - .max = 12, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* SNOW 3G (UIA2) */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .digest_size = { \ - .min = 4, \ - .max = 4, \ - .increment = 0 \ - }, \ - .iv_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* AES CBC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_AES_CBC, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 32, \ - .increment = 8 \ - }, \ - .iv_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* AES DOCSIS BPI */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,\ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .iv_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* SNOW 3G (UEA2) */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .iv_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* AES CTR */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_AES_CTR, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 32, \ - .increment = 8 \ - }, \ - .iv_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* NULL (AUTH) */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_NULL, \ - .block_size = 1, \ - .key_size = { \ - .min = 0, \ - .max = 0, \ - .increment = 0 \ - }, \ - .digest_size = { \ - .min = 0, \ - .max = 0, \ - .increment = 0 \ - }, \ - .iv_size = { 0 } \ - }, }, \ - }, }, \ - }, \ - { /* NULL (CIPHER) */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_NULL, \ - .block_size = 1, \ - .key_size = { \ - .min = 0, \ - .max = 0, \ - .increment = 0 \ - }, \ - .iv_size = { \ - .min = 0, \ - .max = 0, \ - .increment = 0 \ - } \ - }, }, \ - }, } \ - }, \ - { /* KASUMI (F8) */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_KASUMI_F8, \ - .block_size = 8, \ - .key_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .iv_size = { \ - .min = 8, \ - .max = 8, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* KASUMI (F9) */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_KASUMI_F9, \ - .block_size = 8, \ - .key_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .digest_size = { \ - .min = 4, \ - .max = 4, \ - .increment = 0 \ - }, \ - .iv_size = { 0 } \ - }, } \ - }, } \ - }, \ - { /* 3DES CBC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_3DES_CBC, \ - .block_size = 8, \ - .key_size = { \ - .min = 16, \ - .max = 24, \ - .increment = 8 \ - }, \ - .iv_size = { \ - .min = 8, \ - .max = 8, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* 3DES CTR */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_3DES_CTR, \ - .block_size = 8, \ - .key_size = { \ - .min = 16, \ - .max = 24, \ - .increment = 8 \ - }, \ - .iv_size = { \ - .min = 8, \ - .max = 8, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* DES CBC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_DES_CBC, \ - .block_size = 8, \ - .key_size = { \ - .min = 8, \ - .max = 8, \ - .increment = 0 \ - }, \ - .iv_size = { \ - .min = 8, \ - .max = 8, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* DES DOCSISBPI */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,\ - .block_size = 8, \ - .key_size = { \ - .min = 8, \ - .max = 8, \ - .increment = 0 \ - }, \ - .iv_size = { \ - .min = 8, \ - .max = 8, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - } - -#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \ - { /* ZUC (EEA3) */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .iv_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* ZUC (EIA3) */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_ZUC_EIA3, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .digest_size = { \ - .min = 4, \ - .max = 4, \ - .increment = 0 \ - }, \ - .iv_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - } - -#endif /* _QAT_CRYPTO_CAPABILITIES_H_ */ diff --git a/drivers/crypto/qat/qat_logs.h b/drivers/crypto/qat/qat_logs.h deleted file mode 100644 index 565089a4..00000000 --- a/drivers/crypto/qat/qat_logs.h +++ /dev/null @@ -1,49 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2015 Intel Corporation - */ - -#ifndef _QAT_LOGS_H_ -#define _QAT_LOGS_H_ - -#define PMD_INIT_LOG(level, fmt, args...) \ - rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \ - "PMD: %s(): " fmt "\n", __func__, ##args) - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_INIT -#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") -#else -#define PMD_INIT_FUNC_TRACE() do { } while (0) -#endif - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX -#define PMD_RX_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_RX_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX -#define PMD_TX_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_TX_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX_FREE -#define PMD_TX_FREE_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER -#define PMD_DRV_LOG_RAW(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args) -#else -#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0) -#endif - -#define PMD_DRV_LOG(level, fmt, args...) \ - PMD_DRV_LOG_RAW(level, fmt "\n", ## args) - -#endif /* _QAT_LOGS_H_ */ diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c deleted file mode 100644 index 87b9ce0b..00000000 --- a/drivers/crypto/qat/qat_qp.c +++ /dev/null @@ -1,470 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2015 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "qat_logs.h" -#include "qat_crypto.h" -#include "qat_algs.h" -#include "adf_transport_access_macros.h" - -#define ADF_MAX_SYM_DESC 4096 -#define ADF_MIN_SYM_DESC 128 -#define ADF_SYM_TX_RING_DESC_SIZE 128 -#define ADF_SYM_RX_RING_DESC_SIZE 32 -#define ADF_SYM_TX_QUEUE_STARTOFF 2 -/* Offset from bundle start to 1st Sym Tx queue */ -#define ADF_SYM_RX_QUEUE_STARTOFF 10 -#define ADF_ARB_REG_SLOT 0x1000 -#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C - -#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ - ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ - (ADF_ARB_REG_SLOT * index), value) - -static int qat_qp_check_queue_alignment(uint64_t phys_addr, - uint32_t queue_size_bytes); -static int qat_tx_queue_create(struct rte_cryptodev *dev, - struct qat_queue *queue, uint8_t id, uint32_t nb_desc, - int socket_id); -static int qat_rx_queue_create(struct rte_cryptodev *dev, - struct qat_queue *queue, uint8_t id, uint32_t nb_desc, - int socket_id); -static void qat_queue_delete(struct qat_queue *queue); -static int qat_queue_create(struct rte_cryptodev *dev, - struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size, - int socket_id); -static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, - uint32_t *queue_size_for_csr); -static void adf_configure_queues(struct qat_qp *queue); -static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr); -static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr); - -static const struct rte_memzone * -queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size, - int socket_id) -{ - const struct rte_memzone *mz; - unsigned memzone_flags = 0; - const struct rte_memseg *ms; - - PMD_INIT_FUNC_TRACE(); - mz = rte_memzone_lookup(queue_name); - if (mz != 0) { - if (((size_t)queue_size <= mz->len) && - ((socket_id == SOCKET_ID_ANY) || - (socket_id == mz->socket_id))) { - PMD_DRV_LOG(DEBUG, "re-use memzone already " - "allocated for %s", queue_name); - return mz; - } - - PMD_DRV_LOG(ERR, "Incompatible memzone already " - "allocated %s, size %u, socket %d. " - "Requested size %u, socket %u", - queue_name, (uint32_t)mz->len, - mz->socket_id, queue_size, socket_id); - return NULL; - } - - PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u", - queue_name, queue_size, socket_id); - ms = rte_eal_get_physmem_layout(); - switch (ms[0].hugepage_sz) { - case(RTE_PGSIZE_2M): - memzone_flags = RTE_MEMZONE_2MB; - break; - case(RTE_PGSIZE_1G): - memzone_flags = RTE_MEMZONE_1GB; - break; - case(RTE_PGSIZE_16M): - memzone_flags = RTE_MEMZONE_16MB; - break; - case(RTE_PGSIZE_16G): - memzone_flags = RTE_MEMZONE_16GB; - break; - default: - memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY; - } - return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id, - memzone_flags, queue_size); -} - -int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, - const struct rte_cryptodev_qp_conf *qp_conf, - int socket_id, struct rte_mempool *session_pool __rte_unused) -{ - struct qat_qp *qp; - struct rte_pci_device *pci_dev; - int ret; - char op_cookie_pool_name[RTE_RING_NAMESIZE]; - uint32_t i; - - PMD_INIT_FUNC_TRACE(); - - /* If qp is already in use free ring memory and qp metadata. */ - if (dev->data->queue_pairs[queue_pair_id] != NULL) { - ret = qat_crypto_sym_qp_release(dev, queue_pair_id); - if (ret < 0) - return ret; - } - - if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) || - (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) { - PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors", - qp_conf->nb_descriptors); - return -EINVAL; - } - - pci_dev = RTE_DEV_TO_PCI(dev->device); - - if (pci_dev->mem_resource[0].addr == NULL) { - PMD_DRV_LOG(ERR, "Could not find VF config space " - "(UIO driver attached?)."); - return -EINVAL; - } - - if (queue_pair_id >= - (ADF_NUM_SYM_QPS_PER_BUNDLE * - ADF_NUM_BUNDLES_PER_DEV)) { - PMD_DRV_LOG(ERR, "qp_id %u invalid for this device", - queue_pair_id); - return -EINVAL; - } - /* Allocate the queue pair data structure. */ - qp = rte_zmalloc("qat PMD qp metadata", - sizeof(*qp), RTE_CACHE_LINE_SIZE); - if (qp == NULL) { - PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct"); - return -ENOMEM; - } - qp->nb_descriptors = qp_conf->nb_descriptors; - qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer", - qp_conf->nb_descriptors * sizeof(*qp->op_cookies), - RTE_CACHE_LINE_SIZE); - if (qp->op_cookies == NULL) { - PMD_DRV_LOG(ERR, "Failed to alloc mem for cookie"); - rte_free(qp); - return -ENOMEM; - } - - qp->mmap_bar_addr = pci_dev->mem_resource[0].addr; - qp->inflights16 = 0; - - if (qat_tx_queue_create(dev, &(qp->tx_q), - queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) { - PMD_INIT_LOG(ERR, "Tx queue create failed " - "queue_pair_id=%u", queue_pair_id); - goto create_err; - } - - if (qat_rx_queue_create(dev, &(qp->rx_q), - queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) { - PMD_DRV_LOG(ERR, "Rx queue create failed " - "queue_pair_id=%hu", queue_pair_id); - qat_queue_delete(&(qp->tx_q)); - goto create_err; - } - - adf_configure_queues(qp); - adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr); - snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu", - pci_dev->driver->driver.name, dev->data->dev_id, - queue_pair_id); - - qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name); - if (qp->op_cookie_pool == NULL) - qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name, - qp->nb_descriptors, - sizeof(struct qat_crypto_op_cookie), 64, 0, - NULL, NULL, NULL, NULL, socket_id, - 0); - if (!qp->op_cookie_pool) { - PMD_DRV_LOG(ERR, "QAT PMD Cannot create" - " op mempool"); - goto create_err; - } - - for (i = 0; i < qp->nb_descriptors; i++) { - if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) { - PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie"); - goto create_err; - } - - struct qat_crypto_op_cookie *sql_cookie = - qp->op_cookies[i]; - - sql_cookie->qat_sgl_src_phys_addr = - rte_mempool_virt2iova(sql_cookie) + - offsetof(struct qat_crypto_op_cookie, - qat_sgl_list_src); - - sql_cookie->qat_sgl_dst_phys_addr = - rte_mempool_virt2iova(sql_cookie) + - offsetof(struct qat_crypto_op_cookie, - qat_sgl_list_dst); - } - - struct qat_pmd_private *internals - = dev->data->dev_private; - qp->qat_dev_gen = internals->qat_dev_gen; - - dev->data->queue_pairs[queue_pair_id] = qp; - return 0; - -create_err: - if (qp->op_cookie_pool) - rte_mempool_free(qp->op_cookie_pool); - rte_free(qp->op_cookies); - rte_free(qp); - return -EFAULT; -} - -int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) -{ - struct qat_qp *qp = - (struct qat_qp *)dev->data->queue_pairs[queue_pair_id]; - uint32_t i; - - PMD_INIT_FUNC_TRACE(); - if (qp == NULL) { - PMD_DRV_LOG(DEBUG, "qp already freed"); - return 0; - } - - /* Don't free memory if there are still responses to be processed */ - if (qp->inflights16 == 0) { - qat_queue_delete(&(qp->tx_q)); - qat_queue_delete(&(qp->rx_q)); - } else { - return -EAGAIN; - } - - adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr); - - for (i = 0; i < qp->nb_descriptors; i++) - rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]); - - if (qp->op_cookie_pool) - rte_mempool_free(qp->op_cookie_pool); - - rte_free(qp->op_cookies); - rte_free(qp); - dev->data->queue_pairs[queue_pair_id] = NULL; - return 0; -} - -static int qat_tx_queue_create(struct rte_cryptodev *dev, - struct qat_queue *queue, uint8_t qp_id, - uint32_t nb_desc, int socket_id) -{ - PMD_INIT_FUNC_TRACE(); - queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE; - queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) + - ADF_SYM_TX_QUEUE_STARTOFF; - PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u", - nb_desc, qp_id, queue->hw_bundle_number, - queue->hw_queue_number); - - return qat_queue_create(dev, queue, nb_desc, - ADF_SYM_TX_RING_DESC_SIZE, socket_id); -} - -static int qat_rx_queue_create(struct rte_cryptodev *dev, - struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc, - int socket_id) -{ - PMD_INIT_FUNC_TRACE(); - queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE; - queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) + - ADF_SYM_RX_QUEUE_STARTOFF; - - PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u", - nb_desc, qp_id, queue->hw_bundle_number, - queue->hw_queue_number); - return qat_queue_create(dev, queue, nb_desc, - ADF_SYM_RX_RING_DESC_SIZE, socket_id); -} - -static void qat_queue_delete(struct qat_queue *queue) -{ - const struct rte_memzone *mz; - int status = 0; - - if (queue == NULL) { - PMD_DRV_LOG(DEBUG, "Invalid queue"); - return; - } - mz = rte_memzone_lookup(queue->memz_name); - if (mz != NULL) { - /* Write an unused pattern to the queue memory. */ - memset(queue->base_addr, 0x7F, queue->queue_size); - status = rte_memzone_free(mz); - if (status != 0) - PMD_DRV_LOG(ERR, "Error %d on freeing queue %s", - status, queue->memz_name); - } else { - PMD_DRV_LOG(DEBUG, "queue %s doesn't exist", - queue->memz_name); - } -} - -static int -qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue, - uint32_t nb_desc, uint8_t desc_size, int socket_id) -{ - uint64_t queue_base; - void *io_addr; - const struct rte_memzone *qp_mz; - uint32_t queue_size_bytes = nb_desc*desc_size; - struct rte_pci_device *pci_dev; - - PMD_INIT_FUNC_TRACE(); - if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) { - PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size); - return -EINVAL; - } - - pci_dev = RTE_DEV_TO_PCI(dev->device); - - /* - * Allocate a memzone for the queue - create a unique name. - */ - snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d", - pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id, - queue->hw_bundle_number, queue->hw_queue_number); - qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes, - socket_id); - if (qp_mz == NULL) { - PMD_DRV_LOG(ERR, "Failed to allocate ring memzone"); - return -ENOMEM; - } - - queue->base_addr = (char *)qp_mz->addr; - queue->base_phys_addr = qp_mz->iova; - if (qat_qp_check_queue_alignment(queue->base_phys_addr, - queue_size_bytes)) { - PMD_DRV_LOG(ERR, "Invalid alignment on queue create " - " 0x%"PRIx64"\n", - queue->base_phys_addr); - return -EFAULT; - } - - if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size)) - != 0) { - PMD_DRV_LOG(ERR, "Invalid num inflights"); - return -EINVAL; - } - - queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size, - ADF_BYTES_TO_MSG_SIZE(desc_size)); - queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size); - PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u," - " msg_size %u, max_inflights %u modulo %u", - queue->queue_size, queue_size_bytes, - nb_desc, desc_size, queue->max_inflights, - queue->modulo); - - if (queue->max_inflights < 2) { - PMD_DRV_LOG(ERR, "Invalid num inflights"); - return -EINVAL; - } - queue->head = 0; - queue->tail = 0; - queue->msg_size = desc_size; - - /* - * Write an unused pattern to the queue memory. - */ - memset(queue->base_addr, 0x7F, queue_size_bytes); - - queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr, - queue->queue_size); - - io_addr = pci_dev->mem_resource[0].addr; - - WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number, - queue->hw_queue_number, queue_base); - return 0; -} - -static int qat_qp_check_queue_alignment(uint64_t phys_addr, - uint32_t queue_size_bytes) -{ - PMD_INIT_FUNC_TRACE(); - if (((queue_size_bytes - 1) & phys_addr) != 0) - return -EINVAL; - return 0; -} - -static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, - uint32_t *p_queue_size_for_csr) -{ - uint8_t i = ADF_MIN_RING_SIZE; - - PMD_INIT_FUNC_TRACE(); - for (; i <= ADF_MAX_RING_SIZE; i++) - if ((msg_size * msg_num) == - (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) { - *p_queue_size_for_csr = i; - return 0; - } - PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num); - return -EINVAL; -} - -static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr) -{ - uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + - (ADF_ARB_REG_SLOT * - txq->hw_bundle_number); - uint32_t value; - - PMD_INIT_FUNC_TRACE(); - value = ADF_CSR_RD(base_addr, arb_csr_offset); - value |= (0x01 << txq->hw_queue_number); - ADF_CSR_WR(base_addr, arb_csr_offset, value); -} - -static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr) -{ - uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + - (ADF_ARB_REG_SLOT * - txq->hw_bundle_number); - uint32_t value; - - PMD_INIT_FUNC_TRACE(); - value = ADF_CSR_RD(base_addr, arb_csr_offset); - value ^= (0x01 << txq->hw_queue_number); - ADF_CSR_WR(base_addr, arb_csr_offset, value); -} - -static void adf_configure_queues(struct qat_qp *qp) -{ - uint32_t queue_config; - struct qat_queue *queue = &qp->tx_q; - - PMD_INIT_FUNC_TRACE(); - queue_config = BUILD_RING_CONFIG(queue->queue_size); - - WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number, - queue->hw_queue_number, queue_config); - - queue = &qp->rx_q; - queue_config = - BUILD_RESP_RING_CONFIG(queue->queue_size, - ADF_RING_NEAR_WATERMARK_512, - ADF_RING_NEAR_WATERMARK_0); - - WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number, - queue->hw_queue_number, queue_config); -} diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c new file mode 100644 index 00000000..10cdf2e1 --- /dev/null +++ b/drivers/crypto/qat/qat_sym.c @@ -0,0 +1,569 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ + +#include + +#include +#include +#include +#include +#include + +#include "qat_sym.h" + +/** Decrypt a single partial block + * Depends on openssl libcrypto + * Uses ECB+XOR to do CFB encryption, same result, more performant + */ +static inline int +bpi_cipher_decrypt(uint8_t *src, uint8_t *dst, + uint8_t *iv, int ivlen, int srclen, + void *bpi_ctx) +{ + EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx; + int encrypted_ivlen; + uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN]; + uint8_t *encr = encrypted_iv; + + /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */ + if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen) + <= 0) + goto cipher_decrypt_err; + + for (; srclen != 0; --srclen, ++dst, ++src, ++encr) + *dst = *src ^ *encr; + + return 0; + +cipher_decrypt_err: + QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed"); + return -EINVAL; +} + + +static inline uint32_t +qat_bpicipher_preprocess(struct qat_sym_session *ctx, + struct rte_crypto_op *op) +{ + int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg); + struct rte_crypto_sym_op *sym_op = op->sym; + uint8_t last_block_len = block_len > 0 ? + sym_op->cipher.data.length % block_len : 0; + + if (last_block_len && + ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) { + + /* Decrypt last block */ + uint8_t *last_block, *dst, *iv; + uint32_t last_block_offset = sym_op->cipher.data.offset + + sym_op->cipher.data.length - last_block_len; + last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src, + uint8_t *, last_block_offset); + + if (unlikely(sym_op->m_dst != NULL)) + /* out-of-place operation (OOP) */ + dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst, + uint8_t *, last_block_offset); + else + dst = last_block; + + if (last_block_len < sym_op->cipher.data.length) + /* use previous block ciphertext as IV */ + iv = last_block - block_len; + else + /* runt block, i.e. less than one full block */ + iv = rte_crypto_op_ctod_offset(op, uint8_t *, + ctx->cipher_iv.offset); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:", + last_block, last_block_len); + if (sym_op->m_dst != NULL) + QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:", + dst, last_block_len); +#endif + bpi_cipher_decrypt(last_block, dst, iv, block_len, + last_block_len, ctx->bpi_ctx); +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:", + last_block, last_block_len); + if (sym_op->m_dst != NULL) + QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:", + dst, last_block_len); +#endif + } + + return sym_op->cipher.data.length - last_block_len; +} + +static inline void +set_cipher_iv(uint16_t iv_length, uint16_t iv_offset, + struct icp_qat_fw_la_cipher_req_params *cipher_param, + struct rte_crypto_op *op, + struct icp_qat_fw_la_bulk_req *qat_req) +{ + /* copy IV into request if it fits */ + if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) { + rte_memcpy(cipher_param->u.cipher_IV_array, + rte_crypto_op_ctod_offset(op, uint8_t *, + iv_offset), + iv_length); + } else { + ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( + qat_req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_CIPH_IV_64BIT_PTR); + cipher_param->u.s.cipher_IV_ptr = + rte_crypto_op_ctophys_offset(op, + iv_offset); + } +} + +/** Set IV for CCM is special case, 0th byte is set to q-1 + * where q is padding of nonce in 16 byte block + */ +static inline void +set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset, + struct icp_qat_fw_la_cipher_req_params *cipher_param, + struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz) +{ + rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) + + ICP_QAT_HW_CCM_NONCE_OFFSET, + rte_crypto_op_ctod_offset(op, uint8_t *, + iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET, + iv_length); + *(uint8_t *)&cipher_param->u.cipher_IV_array[0] = + q - ICP_QAT_HW_CCM_NONCE_OFFSET; + + if (aad_len_field_sz) + rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET], + rte_crypto_op_ctod_offset(op, uint8_t *, + iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET, + iv_length); +} + +int +qat_sym_build_request(void *in_op, uint8_t *out_msg, + void *op_cookie, enum qat_device_gen qat_dev_gen) +{ + int ret = 0; + struct qat_sym_session *ctx; + struct icp_qat_fw_la_cipher_req_params *cipher_param; + struct icp_qat_fw_la_auth_req_params *auth_param; + register struct icp_qat_fw_la_bulk_req *qat_req; + uint8_t do_auth = 0, do_cipher = 0, do_aead = 0; + uint32_t cipher_len = 0, cipher_ofs = 0; + uint32_t auth_len = 0, auth_ofs = 0; + uint32_t min_ofs = 0; + uint64_t src_buf_start = 0, dst_buf_start = 0; + uint8_t do_sgl = 0; + struct rte_crypto_op *op = (struct rte_crypto_op *)in_op; + struct qat_sym_op_cookie *cookie = + (struct qat_sym_op_cookie *)op_cookie; + + if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) { + QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto " + "operation requests, op (%p) is not a " + "symmetric operation.", op); + return -EINVAL; + } + + if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) { + QAT_DP_LOG(ERR, "QAT PMD only supports session oriented" + " requests, op (%p) is sessionless.", op); + return -EINVAL; + } + + ctx = (struct qat_sym_session *)get_sym_session_private_data( + op->sym->session, cryptodev_qat_driver_id); + + if (unlikely(ctx == NULL)) { + QAT_DP_LOG(ERR, "Session was not created for this device"); + return -EINVAL; + } + + if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) { + QAT_DP_LOG(ERR, "Session alg not supported on this device gen"); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + return -EINVAL; + } + + qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg; + rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req)); + qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op; + cipher_param = (void *)&qat_req->serv_specif_rqpars; + auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); + + if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || + ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { + /* AES-GCM or AES-CCM */ + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 || + (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128 + && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE + && ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) { + do_aead = 1; + } else { + do_auth = 1; + do_cipher = 1; + } + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) { + do_auth = 1; + do_cipher = 0; + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { + do_auth = 0; + do_cipher = 1; + } + + if (do_cipher) { + + if (ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI || + ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + + if (unlikely( + (op->sym->cipher.data.length % BYTE_LENGTH != 0) || + (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) { + QAT_DP_LOG(ERR, + "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values"); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + cipher_len = op->sym->cipher.data.length >> 3; + cipher_ofs = op->sym->cipher.data.offset >> 3; + + } else if (ctx->bpi_ctx) { + /* DOCSIS - only send complete blocks to device + * Process any partial block using CFB mode. + * Even if 0 complete blocks, still send this to device + * to get into rx queue for post-process and dequeuing + */ + cipher_len = qat_bpicipher_preprocess(ctx, op); + cipher_ofs = op->sym->cipher.data.offset; + } else { + cipher_len = op->sym->cipher.data.length; + cipher_ofs = op->sym->cipher.data.offset; + } + + set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset, + cipher_param, op, qat_req); + min_ofs = cipher_ofs; + } + + if (do_auth) { + + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 || + ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) { + if (unlikely( + (op->sym->auth.data.offset % BYTE_LENGTH != 0) || + (op->sym->auth.data.length % BYTE_LENGTH != 0))) { + QAT_DP_LOG(ERR, + "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values"); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + auth_ofs = op->sym->auth.data.offset >> 3; + auth_len = op->sym->auth.data.length >> 3; + + auth_param->u1.aad_adr = + rte_crypto_op_ctophys_offset(op, + ctx->auth_iv.offset); + + } else if (ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { + /* AES-GMAC */ + set_cipher_iv(ctx->auth_iv.length, + ctx->auth_iv.offset, + cipher_param, op, qat_req); + auth_ofs = op->sym->auth.data.offset; + auth_len = op->sym->auth.data.length; + + auth_param->u1.aad_adr = 0; + auth_param->u2.aad_sz = 0; + + /* + * If len(iv)==12B fw computes J0 + */ + if (ctx->auth_iv.length == 12) { + ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( + qat_req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); + + } + } else { + auth_ofs = op->sym->auth.data.offset; + auth_len = op->sym->auth.data.length; + + } + min_ofs = auth_ofs; + + if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL)) + auth_param->auth_res_addr = + op->sym->auth.digest.phys_addr; + + } + + if (do_aead) { + /* + * This address may used for setting AAD physical pointer + * into IV offset from op + */ + rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr; + if (ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { + /* + * If len(iv)==12B fw computes J0 + */ + if (ctx->cipher_iv.length == 12) { + ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( + qat_req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); + } + set_cipher_iv(ctx->cipher_iv.length, + ctx->cipher_iv.offset, + cipher_param, op, qat_req); + + } else if (ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) { + + /* In case of AES-CCM this may point to user selected + * memory or iv offset in cypto_op + */ + uint8_t *aad_data = op->sym->aead.aad.data; + /* This is true AAD length, it not includes 18 bytes of + * preceding data + */ + uint8_t aad_ccm_real_len = 0; + uint8_t aad_len_field_sz = 0; + uint32_t msg_len_be = + rte_bswap32(op->sym->aead.data.length); + + if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) { + aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO; + aad_ccm_real_len = ctx->aad_len - + ICP_QAT_HW_CCM_AAD_B0_LEN - + ICP_QAT_HW_CCM_AAD_LEN_INFO; + } else { + /* + * aad_len not greater than 18, so no actual aad + * data, then use IV after op for B0 block + */ + aad_data = rte_crypto_op_ctod_offset(op, + uint8_t *, + ctx->cipher_iv.offset); + aad_phys_addr_aead = + rte_crypto_op_ctophys_offset(op, + ctx->cipher_iv.offset); + } + + uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - + ctx->cipher_iv.length; + + aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS( + aad_len_field_sz, + ctx->digest_length, q); + + if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) { + memcpy(aad_data + ctx->cipher_iv.length + + ICP_QAT_HW_CCM_NONCE_OFFSET + + (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE), + (uint8_t *)&msg_len_be, + ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE); + } else { + memcpy(aad_data + ctx->cipher_iv.length + + ICP_QAT_HW_CCM_NONCE_OFFSET, + (uint8_t *)&msg_len_be + + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE + - q), q); + } + + if (aad_len_field_sz > 0) { + *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] + = rte_bswap16(aad_ccm_real_len); + + if ((aad_ccm_real_len + aad_len_field_sz) + % ICP_QAT_HW_CCM_AAD_B0_LEN) { + uint8_t pad_len = 0; + uint8_t pad_idx = 0; + + pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN - + ((aad_ccm_real_len + aad_len_field_sz) % + ICP_QAT_HW_CCM_AAD_B0_LEN); + pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN + + aad_ccm_real_len + aad_len_field_sz; + memset(&aad_data[pad_idx], + 0, pad_len); + } + + } + + set_cipher_iv_ccm(ctx->cipher_iv.length, + ctx->cipher_iv.offset, + cipher_param, op, q, + aad_len_field_sz); + + } + + cipher_len = op->sym->aead.data.length; + cipher_ofs = op->sym->aead.data.offset; + auth_len = op->sym->aead.data.length; + auth_ofs = op->sym->aead.data.offset; + + auth_param->u1.aad_adr = aad_phys_addr_aead; + auth_param->auth_res_addr = op->sym->aead.digest.phys_addr; + min_ofs = op->sym->aead.data.offset; + } + + if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next)) + do_sgl = 1; + + /* adjust for chain case */ + if (do_cipher && do_auth) + min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs; + + if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl)) + min_ofs = 0; + + if (unlikely(op->sym->m_dst != NULL)) { + /* Out-of-place operation (OOP) + * Don't align DMA start. DMA the minimum data-set + * so as not to overwrite data in dest buffer + */ + src_buf_start = + rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs); + dst_buf_start = + rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs); + + } else { + /* In-place operation + * Start DMA at nearest aligned address below min_ofs + */ + src_buf_start = + rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs) + & QAT_64_BTYE_ALIGN_MASK; + + if (unlikely((rte_pktmbuf_iova(op->sym->m_src) - + rte_pktmbuf_headroom(op->sym->m_src)) + > src_buf_start)) { + /* alignment has pushed addr ahead of start of mbuf + * so revert and take the performance hit + */ + src_buf_start = + rte_pktmbuf_iova_offset(op->sym->m_src, + min_ofs); + } + dst_buf_start = src_buf_start; + } + + if (do_cipher || do_aead) { + cipher_param->cipher_offset = + (uint32_t)rte_pktmbuf_iova_offset( + op->sym->m_src, cipher_ofs) - src_buf_start; + cipher_param->cipher_length = cipher_len; + } else { + cipher_param->cipher_offset = 0; + cipher_param->cipher_length = 0; + } + + if (do_auth || do_aead) { + auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset( + op->sym->m_src, auth_ofs) - src_buf_start; + auth_param->auth_len = auth_len; + } else { + auth_param->auth_off = 0; + auth_param->auth_len = 0; + } + + qat_req->comn_mid.dst_length = + qat_req->comn_mid.src_length = + (cipher_param->cipher_offset + cipher_param->cipher_length) + > (auth_param->auth_off + auth_param->auth_len) ? + (cipher_param->cipher_offset + cipher_param->cipher_length) + : (auth_param->auth_off + auth_param->auth_len); + + if (do_sgl) { + + ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags, + QAT_COMN_PTR_TYPE_SGL); + ret = qat_sgl_fill_array(op->sym->m_src, + (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)), + &cookie->qat_sgl_src, + qat_req->comn_mid.src_length, + QAT_SYM_SGL_MAX_NUMBER); + + if (unlikely(ret)) { + QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array"); + return ret; + } + + if (likely(op->sym->m_dst == NULL)) + qat_req->comn_mid.dest_data_addr = + qat_req->comn_mid.src_data_addr = + cookie->qat_sgl_src_phys_addr; + else { + ret = qat_sgl_fill_array(op->sym->m_dst, + (int64_t)(dst_buf_start - + rte_pktmbuf_iova(op->sym->m_dst)), + &cookie->qat_sgl_dst, + qat_req->comn_mid.dst_length, + QAT_SYM_SGL_MAX_NUMBER); + + if (unlikely(ret)) { + QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array"); + return ret; + } + + qat_req->comn_mid.src_data_addr = + cookie->qat_sgl_src_phys_addr; + qat_req->comn_mid.dest_data_addr = + cookie->qat_sgl_dst_phys_addr; + } + } else { + qat_req->comn_mid.src_data_addr = src_buf_start; + qat_req->comn_mid.dest_data_addr = dst_buf_start; + } + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, + sizeof(struct icp_qat_fw_la_bulk_req)); + QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", + rte_pktmbuf_mtod(op->sym->m_src, uint8_t*), + rte_pktmbuf_data_len(op->sym->m_src)); + if (do_cipher) { + uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op, + uint8_t *, + ctx->cipher_iv.offset); + QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr, + ctx->cipher_iv.length); + } + + if (do_auth) { + if (ctx->auth_iv.length) { + uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op, + uint8_t *, + ctx->auth_iv.offset); + QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr, + ctx->auth_iv.length); + } + QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data, + ctx->digest_length); + } + + if (do_aead) { + QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data, + ctx->digest_length); + QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data, + ctx->aad_len); + } +#endif + return 0; +} diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h new file mode 100644 index 00000000..bc6426c3 --- /dev/null +++ b/drivers/crypto/qat/qat_sym.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ + +#ifndef _QAT_SYM_H_ +#define _QAT_SYM_H_ + +#include + +#ifdef BUILD_QAT_SYM +#include + +#include "qat_common.h" +#include "qat_sym_session.h" +#include "qat_sym_pmd.h" +#include "qat_logs.h" + +#define BYTE_LENGTH 8 +/* bpi is only used for partial blocks of DES and AES + * so AES block len can be assumed as max len for iv, src and dst + */ +#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ + +/* + * Maximum number of SGL entries + */ +#define QAT_SYM_SGL_MAX_NUMBER 16 + +struct qat_sym_session; + +struct qat_sym_sgl { + qat_sgl_hdr; + struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER]; +} __rte_packed __rte_cache_aligned; + +struct qat_sym_op_cookie { + struct qat_sym_sgl qat_sgl_src; + struct qat_sym_sgl qat_sgl_dst; + phys_addr_t qat_sgl_src_phys_addr; + phys_addr_t qat_sgl_dst_phys_addr; +}; + +int +qat_sym_build_request(void *in_op, uint8_t *out_msg, + void *op_cookie, enum qat_device_gen qat_dev_gen); + + +/** Encrypt a single partial block + * Depends on openssl libcrypto + * Uses ECB+XOR to do CFB encryption, same result, more performant + */ +static inline int +bpi_cipher_encrypt(uint8_t *src, uint8_t *dst, + uint8_t *iv, int ivlen, int srclen, + void *bpi_ctx) +{ + EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx; + int encrypted_ivlen; + uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN]; + uint8_t *encr = encrypted_iv; + + /* ECB method: encrypt the IV, then XOR this with plaintext */ + if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen) + <= 0) + goto cipher_encrypt_err; + + for (; srclen != 0; --srclen, ++dst, ++src, ++encr) + *dst = *src ^ *encr; + + return 0; + +cipher_encrypt_err: + QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed"); + return -EINVAL; +} + +static inline uint32_t +qat_bpicipher_postprocess(struct qat_sym_session *ctx, + struct rte_crypto_op *op) +{ + int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg); + struct rte_crypto_sym_op *sym_op = op->sym; + uint8_t last_block_len = block_len > 0 ? + sym_op->cipher.data.length % block_len : 0; + + if (last_block_len > 0 && + ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) { + + /* Encrypt last block */ + uint8_t *last_block, *dst, *iv; + uint32_t last_block_offset; + + last_block_offset = sym_op->cipher.data.offset + + sym_op->cipher.data.length - last_block_len; + last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src, + uint8_t *, last_block_offset); + + if (unlikely(sym_op->m_dst != NULL)) + /* out-of-place operation (OOP) */ + dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst, + uint8_t *, last_block_offset); + else + dst = last_block; + + if (last_block_len < sym_op->cipher.data.length) + /* use previous block ciphertext as IV */ + iv = dst - block_len; + else + /* runt block, i.e. less than one full block */ + iv = rte_crypto_op_ctod_offset(op, uint8_t *, + ctx->cipher_iv.offset); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:", + last_block, last_block_len); + if (sym_op->m_dst != NULL) + QAT_DP_HEXDUMP_LOG(DEBUG, + "BPI: dst before post-process:", + dst, last_block_len); +#endif + bpi_cipher_encrypt(last_block, dst, iv, block_len, + last_block_len, ctx->bpi_ctx); +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:", + last_block, last_block_len); + if (sym_op->m_dst != NULL) + QAT_DP_HEXDUMP_LOG(DEBUG, + "BPI: dst after post-process:", + dst, last_block_len); +#endif + } + return sym_op->cipher.data.length - last_block_len; +} + +static inline void +qat_sym_process_response(void **op, uint8_t *resp) +{ + + struct icp_qat_fw_comn_resp *resp_msg = + (struct icp_qat_fw_comn_resp *)resp; + struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t) + (resp_msg->opaque_data); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg, + sizeof(struct icp_qat_fw_comn_resp)); +#endif + + if (ICP_QAT_FW_COMN_STATUS_FLAG_OK != + ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET( + resp_msg->comn_hdr.comn_status)) { + + rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + } else { + struct qat_sym_session *sess = (struct qat_sym_session *) + get_sym_session_private_data( + rx_op->sym->session, + cryptodev_qat_driver_id); + + + if (sess->bpi_ctx) + qat_bpicipher_postprocess(sess, rx_op); + rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + } + *op = (void *)rx_op; +} +#else + +static inline void +qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused) +{ +} +#endif +#endif /* _QAT_SYM_H_ */ diff --git a/drivers/crypto/qat/qat_sym_capabilities.h b/drivers/crypto/qat/qat_sym_capabilities.h new file mode 100644 index 00000000..eea08bc7 --- /dev/null +++ b/drivers/crypto/qat/qat_sym_capabilities.h @@ -0,0 +1,557 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017-2018 Intel Corporation + */ + +#ifndef _QAT_SYM_CAPABILITIES_H_ +#define _QAT_SYM_CAPABILITIES_H_ + +#define QAT_BASE_GEN1_SYM_CAPABILITIES \ + { /* SHA1 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 1, \ + .max = 20, \ + .increment = 1 \ + }, \ + .iv_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA224 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 1, \ + .max = 28, \ + .increment = 1 \ + }, \ + .iv_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA256 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 1, \ + .max = 32, \ + .increment = 1 \ + }, \ + .iv_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA384 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \ + .block_size = 128, \ + .key_size = { \ + .min = 1, \ + .max = 128, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 1, \ + .max = 48, \ + .increment = 1 \ + }, \ + .iv_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA512 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \ + .block_size = 128, \ + .key_size = { \ + .min = 1, \ + .max = 128, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .iv_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* MD5 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_MD5_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 1, \ + .max = 16, \ + .increment = 1 \ + }, \ + .iv_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* AES XCBC MAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .aad_size = { 0 }, \ + .iv_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* AES CCM */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \ + {.aead = { \ + .algo = RTE_CRYPTO_AEAD_AES_CCM, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 4, \ + .max = 16, \ + .increment = 2 \ + }, \ + .aad_size = { \ + .min = 0, \ + .max = 224, \ + .increment = 1 \ + }, \ + .iv_size = { \ + .min = 7, \ + .max = 13, \ + .increment = 1 \ + }, \ + }, } \ + }, } \ + }, \ + { /* AES GCM */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \ + {.aead = { \ + .algo = RTE_CRYPTO_AEAD_AES_GCM, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .digest_size = { \ + .min = 8, \ + .max = 16, \ + .increment = 4 \ + }, \ + .aad_size = { \ + .min = 0, \ + .max = 240, \ + .increment = 1 \ + }, \ + .iv_size = { \ + .min = 12, \ + .max = 12, \ + .increment = 0 \ + }, \ + }, } \ + }, } \ + }, \ + { /* AES GMAC (AUTH) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_AES_GMAC, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .digest_size = { \ + .min = 8, \ + .max = 16, \ + .increment = 4 \ + }, \ + .iv_size = { \ + .min = 12, \ + .max = 12, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* SNOW 3G (UIA2) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 4, \ + .max = 4, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* AES CBC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_AES_CBC, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* AES DOCSIS BPI */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,\ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* SNOW 3G (UEA2) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* AES CTR */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_AES_CTR, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* NULL (AUTH) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_NULL, \ + .block_size = 1, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .iv_size = { 0 } \ + }, }, \ + }, }, \ + }, \ + { /* NULL (CIPHER) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_NULL, \ + .block_size = 1, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + } \ + }, }, \ + }, } \ + }, \ + { /* KASUMI (F8) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_KASUMI_F8, \ + .block_size = 8, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* KASUMI (F9) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_KASUMI_F9, \ + .block_size = 8, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 4, \ + .max = 4, \ + .increment = 0 \ + }, \ + .iv_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* 3DES CBC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_3DES_CBC, \ + .block_size = 8, \ + .key_size = { \ + .min = 8, \ + .max = 24, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* 3DES CTR */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_3DES_CTR, \ + .block_size = 8, \ + .key_size = { \ + .min = 16, \ + .max = 24, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* DES CBC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_DES_CBC, \ + .block_size = 8, \ + .key_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* DES DOCSISBPI */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,\ + .block_size = 8, \ + .key_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + } + +#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \ + { /* ZUC (EEA3) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* ZUC (EIA3) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_ZUC_EIA3, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 4, \ + .max = 4, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + } + +#endif /* _QAT_SYM_CAPABILITIES_H_ */ diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c new file mode 100644 index 00000000..96f442e8 --- /dev/null +++ b/drivers/crypto/qat/qat_sym_pmd.c @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include + +#include "qat_logs.h" +#include "qat_sym.h" +#include "qat_sym_session.h" +#include "qat_sym_pmd.h" + +uint8_t cryptodev_qat_driver_id; + +static const struct rte_cryptodev_capabilities qat_gen1_sym_capabilities[] = { + QAT_BASE_GEN1_SYM_CAPABILITIES, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +static const struct rte_cryptodev_capabilities qat_gen2_sym_capabilities[] = { + QAT_BASE_GEN1_SYM_CAPABILITIES, + QAT_EXTRA_GEN2_SYM_CAPABILITIES, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +static int qat_sym_qp_release(struct rte_cryptodev *dev, + uint16_t queue_pair_id); + +static int qat_sym_dev_config(__rte_unused struct rte_cryptodev *dev, + __rte_unused struct rte_cryptodev_config *config) +{ + return 0; +} + +static int qat_sym_dev_start(__rte_unused struct rte_cryptodev *dev) +{ + return 0; +} + +static void qat_sym_dev_stop(__rte_unused struct rte_cryptodev *dev) +{ + return; +} + +static int qat_sym_dev_close(struct rte_cryptodev *dev) +{ + int i, ret; + + for (i = 0; i < dev->data->nb_queue_pairs; i++) { + ret = qat_sym_qp_release(dev, i); + if (ret < 0) + return ret; + } + + return 0; +} + +static void qat_sym_dev_info_get(struct rte_cryptodev *dev, + struct rte_cryptodev_info *info) +{ + struct qat_sym_dev_private *internals = dev->data->dev_private; + const struct qat_qp_hw_data *sym_hw_qps = + qat_gen_config[internals->qat_dev->qat_dev_gen] + .qp_hw_data[QAT_SERVICE_SYMMETRIC]; + + if (info != NULL) { + info->max_nb_queue_pairs = + qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC); + info->feature_flags = dev->feature_flags; + info->capabilities = internals->qat_dev_capabilities; + info->driver_id = cryptodev_qat_driver_id; + /* No limit of number of sessions */ + info->sym.max_nb_sessions = 0; + } +} + +static void qat_sym_stats_get(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats) +{ + struct qat_common_stats qat_stats = {0}; + struct qat_sym_dev_private *qat_priv; + + if (stats == NULL || dev == NULL) { + QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev); + return; + } + qat_priv = dev->data->dev_private; + + qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_SYMMETRIC); + stats->enqueued_count = qat_stats.enqueued_count; + stats->dequeued_count = qat_stats.dequeued_count; + stats->enqueue_err_count = qat_stats.enqueue_err_count; + stats->dequeue_err_count = qat_stats.dequeue_err_count; +} + +static void qat_sym_stats_reset(struct rte_cryptodev *dev) +{ + struct qat_sym_dev_private *qat_priv; + + if (dev == NULL) { + QAT_LOG(ERR, "invalid cryptodev ptr %p", dev); + return; + } + qat_priv = dev->data->dev_private; + + qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_SYMMETRIC); + +} + +static int qat_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) +{ + struct qat_sym_dev_private *qat_private = dev->data->dev_private; + + QAT_LOG(DEBUG, "Release sym qp %u on device %d", + queue_pair_id, dev->data->dev_id); + + qat_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][queue_pair_id] + = NULL; + + return qat_qp_release((struct qat_qp **) + &(dev->data->queue_pairs[queue_pair_id])); +} + +static int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, + const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id, struct rte_mempool *session_pool __rte_unused) +{ + struct qat_qp *qp; + int ret = 0; + uint32_t i; + struct qat_qp_config qat_qp_conf; + + struct qat_qp **qp_addr = + (struct qat_qp **)&(dev->data->queue_pairs[qp_id]); + struct qat_sym_dev_private *qat_private = dev->data->dev_private; + const struct qat_qp_hw_data *sym_hw_qps = + qat_gen_config[qat_private->qat_dev->qat_dev_gen] + .qp_hw_data[QAT_SERVICE_SYMMETRIC]; + const struct qat_qp_hw_data *qp_hw_data = sym_hw_qps + qp_id; + + /* If qp is already in use free ring memory and qp metadata. */ + if (*qp_addr != NULL) { + ret = qat_sym_qp_release(dev, qp_id); + if (ret < 0) + return ret; + } + if (qp_id >= qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC)) { + QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id); + return -EINVAL; + } + + qat_qp_conf.hw = qp_hw_data; + qat_qp_conf.build_request = qat_sym_build_request; + qat_qp_conf.cookie_size = sizeof(struct qat_sym_op_cookie); + qat_qp_conf.nb_descriptors = qp_conf->nb_descriptors; + qat_qp_conf.socket_id = socket_id; + qat_qp_conf.service_str = "sym"; + + ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf); + if (ret != 0) + return ret; + + /* store a link to the qp in the qat_pci_device */ + qat_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][qp_id] + = *qp_addr; + + qp = (struct qat_qp *)*qp_addr; + + for (i = 0; i < qp->nb_descriptors; i++) { + + struct qat_sym_op_cookie *cookie = + qp->op_cookies[i]; + + cookie->qat_sgl_src_phys_addr = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_sym_op_cookie, + qat_sgl_src); + + cookie->qat_sgl_dst_phys_addr = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_sym_op_cookie, + qat_sgl_dst); + } + + return ret; +} + +static struct rte_cryptodev_ops crypto_qat_ops = { + + /* Device related operations */ + .dev_configure = qat_sym_dev_config, + .dev_start = qat_sym_dev_start, + .dev_stop = qat_sym_dev_stop, + .dev_close = qat_sym_dev_close, + .dev_infos_get = qat_sym_dev_info_get, + + .stats_get = qat_sym_stats_get, + .stats_reset = qat_sym_stats_reset, + .queue_pair_setup = qat_sym_qp_setup, + .queue_pair_release = qat_sym_qp_release, + .queue_pair_count = NULL, + + /* Crypto related operations */ + .sym_session_get_size = qat_sym_session_get_private_size, + .sym_session_configure = qat_sym_session_configure, + .sym_session_clear = qat_sym_session_clear +}; + +static uint16_t +qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + return qat_enqueue_op_burst(qp, (void **)ops, nb_ops); +} + +static uint16_t +qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + return qat_dequeue_op_burst(qp, (void **)ops, nb_ops); +} + +/* An rte_driver is needed in the registration of both the device and the driver + * with cryptodev. + * The actual qat pci's rte_driver can't be used as its name represents + * the whole pci device with all services. Think of this as a holder for a name + * for the crypto part of the pci device. + */ +static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD); +static const struct rte_driver cryptodev_qat_sym_driver = { + .name = qat_sym_drv_name, + .alias = qat_sym_drv_name +}; + +int +qat_sym_dev_create(struct qat_pci_device *qat_pci_dev) +{ + struct rte_cryptodev_pmd_init_params init_params = { + .name = "", + .socket_id = qat_pci_dev->pci_dev->device.numa_node, + .private_data_size = sizeof(struct qat_sym_dev_private) + }; + char name[RTE_CRYPTODEV_NAME_MAX_LEN]; + struct rte_cryptodev *cryptodev; + struct qat_sym_dev_private *internals; + + snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", + qat_pci_dev->name, "sym"); + QAT_LOG(DEBUG, "Creating QAT SYM device %s", name); + + /* Populate subset device to use in cryptodev device creation */ + qat_pci_dev->sym_rte_dev.driver = &cryptodev_qat_sym_driver; + qat_pci_dev->sym_rte_dev.numa_node = + qat_pci_dev->pci_dev->device.numa_node; + qat_pci_dev->sym_rte_dev.devargs = NULL; + + cryptodev = rte_cryptodev_pmd_create(name, + &(qat_pci_dev->sym_rte_dev), &init_params); + + if (cryptodev == NULL) + return -ENODEV; + + qat_pci_dev->sym_rte_dev.name = cryptodev->data->name; + cryptodev->driver_id = cryptodev_qat_driver_id; + cryptodev->dev_ops = &crypto_qat_ops; + + cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst; + cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst; + + cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | + RTE_CRYPTODEV_FF_HW_ACCELERATED | + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | + RTE_CRYPTODEV_FF_IN_PLACE_SGL | + RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | + RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | + RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | + RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; + + internals = cryptodev->data->dev_private; + internals->qat_dev = qat_pci_dev; + qat_pci_dev->sym_dev = internals; + + internals->sym_dev_id = cryptodev->data->dev_id; + switch (qat_pci_dev->qat_dev_gen) { + case QAT_GEN1: + internals->qat_dev_capabilities = qat_gen1_sym_capabilities; + break; + case QAT_GEN2: + internals->qat_dev_capabilities = qat_gen2_sym_capabilities; + break; + default: + internals->qat_dev_capabilities = qat_gen2_sym_capabilities; + QAT_LOG(DEBUG, + "QAT gen %d capabilities unknown, default to GEN2", + qat_pci_dev->qat_dev_gen); + break; + } + + QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d", + cryptodev->data->name, internals->sym_dev_id); + return 0; +} + +int +qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev) +{ + struct rte_cryptodev *cryptodev; + + if (qat_pci_dev == NULL) + return -ENODEV; + if (qat_pci_dev->sym_dev == NULL) + return 0; + + /* free crypto device */ + cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->sym_dev_id); + rte_cryptodev_pmd_destroy(cryptodev); + qat_pci_dev->sym_rte_dev.name = NULL; + qat_pci_dev->sym_dev = NULL; + + return 0; +} + + +static struct cryptodev_driver qat_crypto_drv; +RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, + cryptodev_qat_sym_driver, + cryptodev_qat_driver_id); diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h new file mode 100644 index 00000000..d3432854 --- /dev/null +++ b/drivers/crypto/qat/qat_sym_pmd.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ + +#ifndef _QAT_SYM_PMD_H_ +#define _QAT_SYM_PMD_H_ + +#ifdef BUILD_QAT_SYM + +#include + +#include "qat_sym_capabilities.h" +#include "qat_device.h" + +/**< Intel(R) QAT Symmetric Crypto PMD device name */ +#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat + +extern uint8_t cryptodev_qat_driver_id; + +/** private data structure for a QAT device. + * This QAT device is a device offering only symmetric crypto service, + * there can be one of these on each qat_pci_device (VF), + * in future there may also be private data structures for other services. + */ +struct qat_sym_dev_private { + struct qat_pci_device *qat_dev; + /**< The qat pci device hosting the service */ + uint8_t sym_dev_id; + /**< Device instance for this rte_cryptodev */ + const struct rte_cryptodev_capabilities *qat_dev_capabilities; + /* QAT device symmetric crypto capabilities */ +}; + +int +qat_sym_dev_create(struct qat_pci_device *qat_pci_dev); + +int +qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev); + +#endif +#endif /* _QAT_SYM_PMD_H_ */ diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c new file mode 100644 index 00000000..1d58220a --- /dev/null +++ b/drivers/crypto/qat/qat_sym_session.c @@ -0,0 +1,1725 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2015-2018 Intel Corporation + */ + +#include /* Needed to calculate pre-compute values */ +#include /* Needed to calculate pre-compute values */ +#include /* Needed to calculate pre-compute values */ +#include /* Needed for bpi runt block processing */ + +#include +#include +#include +#include +#include +#include +#include + +#include "qat_logs.h" +#include "qat_sym_session.h" +#include "qat_sym_pmd.h" + +/** Frees a context previously created + * Depends on openssl libcrypto + */ +static void +bpi_cipher_ctx_free(void *bpi_ctx) +{ + if (bpi_ctx != NULL) + EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx); +} + +/** Creates a context in either AES or DES in ECB mode + * Depends on openssl libcrypto + */ +static int +bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo, + enum rte_crypto_cipher_operation direction __rte_unused, + uint8_t *key, void **ctx) +{ + const EVP_CIPHER *algo = NULL; + int ret; + *ctx = EVP_CIPHER_CTX_new(); + + if (*ctx == NULL) { + ret = -ENOMEM; + goto ctx_init_err; + } + + if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI) + algo = EVP_des_ecb(); + else + algo = EVP_aes_128_ecb(); + + /* IV will be ECB encrypted whether direction is encrypt or decrypt*/ + if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) { + ret = -EINVAL; + goto ctx_init_err; + } + + return 0; + +ctx_init_err: + if (*ctx != NULL) + EVP_CIPHER_CTX_free(*ctx); + return ret; +} + +static int +qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo, + struct qat_sym_dev_private *internals) +{ + int i = 0; + const struct rte_cryptodev_capabilities *capability; + + while ((capability = &(internals->qat_dev_capabilities[i++]))->op != + RTE_CRYPTO_OP_TYPE_UNDEFINED) { + if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) + continue; + + if (capability->sym.cipher.algo == algo) + return 1; + } + return 0; +} + +static int +qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo, + struct qat_sym_dev_private *internals) +{ + int i = 0; + const struct rte_cryptodev_capabilities *capability; + + while ((capability = &(internals->qat_dev_capabilities[i++]))->op != + RTE_CRYPTO_OP_TYPE_UNDEFINED) { + if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH) + continue; + + if (capability->sym.auth.algo == algo) + return 1; + } + return 0; +} + +void +qat_sym_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *sess) +{ + uint8_t index = dev->driver_id; + void *sess_priv = get_sym_session_private_data(sess, index); + struct qat_sym_session *s = (struct qat_sym_session *)sess_priv; + + if (sess_priv) { + if (s->bpi_ctx) + bpi_cipher_ctx_free(s->bpi_ctx); + memset(s, 0, qat_sym_session_get_private_size(dev)); + struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); + + set_sym_session_private_data(sess, index, NULL); + rte_mempool_put(sess_mp, sess_priv); + } +} + +static int +qat_get_cmd_id(const struct rte_crypto_sym_xform *xform) +{ + /* Cipher Only */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) + return ICP_QAT_FW_LA_CMD_CIPHER; + + /* Authentication Only */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL) + return ICP_QAT_FW_LA_CMD_AUTH; + + /* AEAD */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + /* AES-GCM and AES-CCM works with different direction + * GCM first encrypts and generate hash where AES-CCM + * first generate hash and encrypts. Similar relation + * applies to decryption. + */ + if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) + if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) + return ICP_QAT_FW_LA_CMD_CIPHER_HASH; + else + return ICP_QAT_FW_LA_CMD_HASH_CIPHER; + else + if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) + return ICP_QAT_FW_LA_CMD_HASH_CIPHER; + else + return ICP_QAT_FW_LA_CMD_CIPHER_HASH; + } + + if (xform->next == NULL) + return -1; + + /* Cipher then Authenticate */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && + xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) + return ICP_QAT_FW_LA_CMD_CIPHER_HASH; + + /* Authenticate then Cipher */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && + xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) + return ICP_QAT_FW_LA_CMD_HASH_CIPHER; + + return -1; +} + +static struct rte_crypto_auth_xform * +qat_get_auth_xform(struct rte_crypto_sym_xform *xform) +{ + do { + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) + return &xform->auth; + + xform = xform->next; + } while (xform); + + return NULL; +} + +static struct rte_crypto_cipher_xform * +qat_get_cipher_xform(struct rte_crypto_sym_xform *xform) +{ + do { + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) + return &xform->cipher; + + xform = xform->next; + } while (xform); + + return NULL; +} + +int +qat_sym_session_configure_cipher(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct qat_sym_session *session) +{ + struct qat_sym_dev_private *internals = dev->data->dev_private; + struct rte_crypto_cipher_xform *cipher_xform = NULL; + int ret; + + /* Get cipher xform from crypto xform chain */ + cipher_xform = qat_get_cipher_xform(xform); + + session->cipher_iv.offset = cipher_xform->iv.offset; + session->cipher_iv.length = cipher_xform->iv.length; + + switch (cipher_xform->algo) { + case RTE_CRYPTO_CIPHER_AES_CBC: + if (qat_sym_validate_aes_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid AES cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + if (qat_sym_validate_aes_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid AES cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; + break; + case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: + if (qat_sym_validate_snow3g_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid SNOW 3G cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; + break; + case RTE_CRYPTO_CIPHER_NULL: + session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; + break; + case RTE_CRYPTO_CIPHER_KASUMI_F8: + if (qat_sym_validate_kasumi_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid KASUMI cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE; + break; + case RTE_CRYPTO_CIPHER_3DES_CBC: + if (qat_sym_validate_3des_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid 3DES cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_DES_CBC: + if (qat_sym_validate_des_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid DES cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_3DES_CTR: + if (qat_sym_validate_3des_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid 3DES cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; + break; + case RTE_CRYPTO_CIPHER_DES_DOCSISBPI: + ret = bpi_cipher_ctx_init( + cipher_xform->algo, + cipher_xform->op, + cipher_xform->key.data, + &session->bpi_ctx); + if (ret != 0) { + QAT_LOG(ERR, "failed to create DES BPI ctx"); + goto error_out; + } + if (qat_sym_validate_des_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid DES cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_AES_DOCSISBPI: + ret = bpi_cipher_ctx_init( + cipher_xform->algo, + cipher_xform->op, + cipher_xform->key.data, + &session->bpi_ctx); + if (ret != 0) { + QAT_LOG(ERR, "failed to create AES BPI ctx"); + goto error_out; + } + if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid AES DOCSISBPI key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_ZUC_EEA3: + if (!qat_is_cipher_alg_supported( + cipher_xform->algo, internals)) { + QAT_LOG(ERR, "%s not supported on this device", + rte_crypto_cipher_algorithm_strings + [cipher_xform->algo]); + ret = -ENOTSUP; + goto error_out; + } + if (qat_sym_validate_zuc_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid ZUC cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; + break; + case RTE_CRYPTO_CIPHER_3DES_ECB: + case RTE_CRYPTO_CIPHER_AES_ECB: + case RTE_CRYPTO_CIPHER_AES_F8: + case RTE_CRYPTO_CIPHER_AES_XTS: + case RTE_CRYPTO_CIPHER_ARC4: + QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u", + cipher_xform->algo); + ret = -ENOTSUP; + goto error_out; + default: + QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n", + cipher_xform->algo); + ret = -EINVAL; + goto error_out; + } + + if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) + session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; + else + session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; + + if (qat_sym_session_aead_create_cd_cipher(session, + cipher_xform->key.data, + cipher_xform->key.length)) { + ret = -EINVAL; + goto error_out; + } + + return 0; + +error_out: + if (session->bpi_ctx) { + bpi_cipher_ctx_free(session->bpi_ctx); + session->bpi_ctx = NULL; + } + return ret; +} + +int +qat_sym_session_configure(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sess, + struct rte_mempool *mempool) +{ + void *sess_private_data; + int ret; + + if (rte_mempool_get(mempool, &sess_private_data)) { + CDEV_LOG_ERR( + "Couldn't get object from session mempool"); + return -ENOMEM; + } + + ret = qat_sym_session_set_parameters(dev, xform, sess_private_data); + if (ret != 0) { + QAT_LOG(ERR, + "Crypto QAT PMD: failed to configure session parameters"); + + /* Return session to mempool */ + rte_mempool_put(mempool, sess_private_data); + return ret; + } + + set_sym_session_private_data(sess, dev->driver_id, + sess_private_data); + + return 0; +} + +int +qat_sym_session_set_parameters(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, void *session_private) +{ + struct qat_sym_session *session = session_private; + int ret; + int qat_cmd_id; + + /* Set context descriptor physical address */ + session->cd_paddr = rte_mempool_virt2iova(session) + + offsetof(struct qat_sym_session, cd); + + session->min_qat_dev_gen = QAT_GEN1; + + /* Get requested QAT command id */ + qat_cmd_id = qat_get_cmd_id(xform); + if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) { + QAT_LOG(ERR, "Unsupported xform chain requested"); + return -ENOTSUP; + } + session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id; + switch (session->qat_cmd) { + case ICP_QAT_FW_LA_CMD_CIPHER: + ret = qat_sym_session_configure_cipher(dev, xform, session); + if (ret < 0) + return ret; + break; + case ICP_QAT_FW_LA_CMD_AUTH: + ret = qat_sym_session_configure_auth(dev, xform, session); + if (ret < 0) + return ret; + break; + case ICP_QAT_FW_LA_CMD_CIPHER_HASH: + if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + ret = qat_sym_session_configure_aead(xform, + session); + if (ret < 0) + return ret; + } else { + ret = qat_sym_session_configure_cipher(dev, + xform, session); + if (ret < 0) + return ret; + ret = qat_sym_session_configure_auth(dev, + xform, session); + if (ret < 0) + return ret; + } + break; + case ICP_QAT_FW_LA_CMD_HASH_CIPHER: + if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + ret = qat_sym_session_configure_aead(xform, + session); + if (ret < 0) + return ret; + } else { + ret = qat_sym_session_configure_auth(dev, + xform, session); + if (ret < 0) + return ret; + ret = qat_sym_session_configure_cipher(dev, + xform, session); + if (ret < 0) + return ret; + } + break; + case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM: + case ICP_QAT_FW_LA_CMD_TRNG_TEST: + case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE: + case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE: + case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE: + case ICP_QAT_FW_LA_CMD_MGF1: + case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP: + case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP: + case ICP_QAT_FW_LA_CMD_DELIMITER: + QAT_LOG(ERR, "Unsupported Service %u", + session->qat_cmd); + return -ENOTSUP; + default: + QAT_LOG(ERR, "Unsupported Service %u", + session->qat_cmd); + return -ENOTSUP; + } + + return 0; +} + +int +qat_sym_session_configure_auth(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct qat_sym_session *session) +{ + struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform); + struct qat_sym_dev_private *internals = dev->data->dev_private; + uint8_t *key_data = auth_xform->key.data; + uint8_t key_length = auth_xform->key.length; + + switch (auth_xform->algo) { + case RTE_CRYPTO_AUTH_SHA1_HMAC: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1; + break; + case RTE_CRYPTO_AUTH_SHA224_HMAC: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224; + break; + case RTE_CRYPTO_AUTH_SHA256_HMAC: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256; + break; + case RTE_CRYPTO_AUTH_SHA384_HMAC: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384; + break; + case RTE_CRYPTO_AUTH_SHA512_HMAC: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512; + break; + case RTE_CRYPTO_AUTH_AES_XCBC_MAC: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC; + break; + case RTE_CRYPTO_AUTH_AES_GMAC: + if (qat_sym_validate_aes_key(auth_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid AES key size"); + return -EINVAL; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; + + break; + case RTE_CRYPTO_AUTH_SNOW3G_UIA2: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2; + break; + case RTE_CRYPTO_AUTH_MD5_HMAC: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5; + break; + case RTE_CRYPTO_AUTH_NULL: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL; + break; + case RTE_CRYPTO_AUTH_KASUMI_F9: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9; + break; + case RTE_CRYPTO_AUTH_ZUC_EIA3: + if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) { + QAT_LOG(ERR, "%s not supported on this device", + rte_crypto_auth_algorithm_strings + [auth_xform->algo]); + return -ENOTSUP; + } + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3; + break; + case RTE_CRYPTO_AUTH_SHA1: + case RTE_CRYPTO_AUTH_SHA256: + case RTE_CRYPTO_AUTH_SHA512: + case RTE_CRYPTO_AUTH_SHA224: + case RTE_CRYPTO_AUTH_SHA384: + case RTE_CRYPTO_AUTH_MD5: + case RTE_CRYPTO_AUTH_AES_CMAC: + case RTE_CRYPTO_AUTH_AES_CBC_MAC: + QAT_LOG(ERR, "Crypto: Unsupported hash alg %u", + auth_xform->algo); + return -ENOTSUP; + default: + QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified", + auth_xform->algo); + return -EINVAL; + } + + session->auth_iv.offset = auth_xform->iv.offset; + session->auth_iv.length = auth_xform->iv.length; + + if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) { + if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) { + session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH; + session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; + /* + * It needs to create cipher desc content first, + * then authentication + */ + + if (qat_sym_session_aead_create_cd_cipher(session, + auth_xform->key.data, + auth_xform->key.length)) + return -EINVAL; + + if (qat_sym_session_aead_create_cd_auth(session, + key_data, + key_length, + 0, + auth_xform->digest_length, + auth_xform->op)) + return -EINVAL; + } else { + session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER; + session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; + /* + * It needs to create authentication desc content first, + * then cipher + */ + + if (qat_sym_session_aead_create_cd_auth(session, + key_data, + key_length, + 0, + auth_xform->digest_length, + auth_xform->op)) + return -EINVAL; + + if (qat_sym_session_aead_create_cd_cipher(session, + auth_xform->key.data, + auth_xform->key.length)) + return -EINVAL; + } + /* Restore to authentication only only */ + session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH; + } else { + if (qat_sym_session_aead_create_cd_auth(session, + key_data, + key_length, + 0, + auth_xform->digest_length, + auth_xform->op)) + return -EINVAL; + } + + session->digest_length = auth_xform->digest_length; + return 0; +} + +int +qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform, + struct qat_sym_session *session) +{ + struct rte_crypto_aead_xform *aead_xform = &xform->aead; + enum rte_crypto_auth_operation crypto_operation; + + /* + * Store AEAD IV parameters as cipher IV, + * to avoid unnecessary memory usage + */ + session->cipher_iv.offset = xform->aead.iv.offset; + session->cipher_iv.length = xform->aead.iv.length; + + switch (aead_xform->algo) { + case RTE_CRYPTO_AEAD_AES_GCM: + if (qat_sym_validate_aes_key(aead_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid AES key size"); + return -EINVAL; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; + break; + case RTE_CRYPTO_AEAD_AES_CCM: + if (qat_sym_validate_aes_key(aead_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid AES key size"); + return -EINVAL; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC; + break; + default: + QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n", + aead_xform->algo); + return -EINVAL; + } + + if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT && + aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) || + (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT && + aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) { + session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; + /* + * It needs to create cipher desc content first, + * then authentication + */ + crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ? + RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY; + + if (qat_sym_session_aead_create_cd_cipher(session, + aead_xform->key.data, + aead_xform->key.length)) + return -EINVAL; + + if (qat_sym_session_aead_create_cd_auth(session, + aead_xform->key.data, + aead_xform->key.length, + aead_xform->aad_length, + aead_xform->digest_length, + crypto_operation)) + return -EINVAL; + } else { + session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; + /* + * It needs to create authentication desc content first, + * then cipher + */ + + crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ? + RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE; + + if (qat_sym_session_aead_create_cd_auth(session, + aead_xform->key.data, + aead_xform->key.length, + aead_xform->aad_length, + aead_xform->digest_length, + crypto_operation)) + return -EINVAL; + + if (qat_sym_session_aead_create_cd_cipher(session, + aead_xform->key.data, + aead_xform->key.length)) + return -EINVAL; + } + + session->digest_length = aead_xform->digest_length; + return 0; +} + +unsigned int qat_sym_session_get_private_size( + struct rte_cryptodev *dev __rte_unused) +{ + return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8); +} + +/* returns block size in bytes per cipher algo */ +int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg) +{ + switch (qat_cipher_alg) { + case ICP_QAT_HW_CIPHER_ALGO_DES: + return ICP_QAT_HW_DES_BLK_SZ; + case ICP_QAT_HW_CIPHER_ALGO_3DES: + return ICP_QAT_HW_3DES_BLK_SZ; + case ICP_QAT_HW_CIPHER_ALGO_AES128: + case ICP_QAT_HW_CIPHER_ALGO_AES192: + case ICP_QAT_HW_CIPHER_ALGO_AES256: + return ICP_QAT_HW_AES_BLK_SZ; + default: + QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg); + return -EFAULT; + }; + return -EFAULT; +} + +/* + * Returns size in bytes per hash algo for state1 size field in cd_ctrl + * This is digest size rounded up to nearest quadword + */ +static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg) +{ + switch (qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_SHA224: + return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_SHA256: + return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_SHA384: + return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_SHA512: + return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC: + return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: + case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: + return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: + return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: + return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_MD5: + return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: + return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC: + return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_NULL: + return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_DELIMITER: + /* return maximum state1 size in this case */ + return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + default: + QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg); + return -EFAULT; + }; + return -EFAULT; +} + +/* returns digest size in bytes per hash algo */ +static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg) +{ + switch (qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + return ICP_QAT_HW_SHA1_STATE1_SZ; + case ICP_QAT_HW_AUTH_ALGO_SHA224: + return ICP_QAT_HW_SHA224_STATE1_SZ; + case ICP_QAT_HW_AUTH_ALGO_SHA256: + return ICP_QAT_HW_SHA256_STATE1_SZ; + case ICP_QAT_HW_AUTH_ALGO_SHA384: + return ICP_QAT_HW_SHA384_STATE1_SZ; + case ICP_QAT_HW_AUTH_ALGO_SHA512: + return ICP_QAT_HW_SHA512_STATE1_SZ; + case ICP_QAT_HW_AUTH_ALGO_MD5: + return ICP_QAT_HW_MD5_STATE1_SZ; + case ICP_QAT_HW_AUTH_ALGO_DELIMITER: + /* return maximum digest size in this case */ + return ICP_QAT_HW_SHA512_STATE1_SZ; + default: + QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg); + return -EFAULT; + }; + return -EFAULT; +} + +/* returns block size in byes per hash algo */ +static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg) +{ + switch (qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + return SHA_CBLOCK; + case ICP_QAT_HW_AUTH_ALGO_SHA224: + return SHA256_CBLOCK; + case ICP_QAT_HW_AUTH_ALGO_SHA256: + return SHA256_CBLOCK; + case ICP_QAT_HW_AUTH_ALGO_SHA384: + return SHA512_CBLOCK; + case ICP_QAT_HW_AUTH_ALGO_SHA512: + return SHA512_CBLOCK; + case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: + return 16; + case ICP_QAT_HW_AUTH_ALGO_MD5: + return MD5_CBLOCK; + case ICP_QAT_HW_AUTH_ALGO_DELIMITER: + /* return maximum block size in this case */ + return SHA512_CBLOCK; + default: + QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg); + return -EFAULT; + }; + return -EFAULT; +} + +static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out) +{ + SHA_CTX ctx; + + if (!SHA1_Init(&ctx)) + return -EFAULT; + SHA1_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH); + return 0; +} + +static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out) +{ + SHA256_CTX ctx; + + if (!SHA224_Init(&ctx)) + return -EFAULT; + SHA256_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH); + return 0; +} + +static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out) +{ + SHA256_CTX ctx; + + if (!SHA256_Init(&ctx)) + return -EFAULT; + SHA256_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH); + return 0; +} + +static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out) +{ + SHA512_CTX ctx; + + if (!SHA384_Init(&ctx)) + return -EFAULT; + SHA512_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH); + return 0; +} + +static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out) +{ + SHA512_CTX ctx; + + if (!SHA512_Init(&ctx)) + return -EFAULT; + SHA512_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH); + return 0; +} + +static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out) +{ + MD5_CTX ctx; + + if (!MD5_Init(&ctx)) + return -EFAULT; + MD5_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH); + + return 0; +} + +static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg, + uint8_t *data_in, + uint8_t *data_out) +{ + int digest_size; + uint8_t digest[qat_hash_get_digest_size( + ICP_QAT_HW_AUTH_ALGO_DELIMITER)]; + uint32_t *hash_state_out_be32; + uint64_t *hash_state_out_be64; + int i; + + digest_size = qat_hash_get_digest_size(hash_alg); + if (digest_size <= 0) + return -EFAULT; + + hash_state_out_be32 = (uint32_t *)data_out; + hash_state_out_be64 = (uint64_t *)data_out; + + switch (hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + if (partial_hash_sha1(data_in, digest)) + return -EFAULT; + for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++) + *hash_state_out_be32 = + rte_bswap32(*(((uint32_t *)digest)+i)); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA224: + if (partial_hash_sha224(data_in, digest)) + return -EFAULT; + for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++) + *hash_state_out_be32 = + rte_bswap32(*(((uint32_t *)digest)+i)); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA256: + if (partial_hash_sha256(data_in, digest)) + return -EFAULT; + for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++) + *hash_state_out_be32 = + rte_bswap32(*(((uint32_t *)digest)+i)); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA384: + if (partial_hash_sha384(data_in, digest)) + return -EFAULT; + for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++) + *hash_state_out_be64 = + rte_bswap64(*(((uint64_t *)digest)+i)); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA512: + if (partial_hash_sha512(data_in, digest)) + return -EFAULT; + for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++) + *hash_state_out_be64 = + rte_bswap64(*(((uint64_t *)digest)+i)); + break; + case ICP_QAT_HW_AUTH_ALGO_MD5: + if (partial_hash_md5(data_in, data_out)) + return -EFAULT; + break; + default: + QAT_LOG(ERR, "invalid hash alg %u", hash_alg); + return -EFAULT; + } + + return 0; +} +#define HMAC_IPAD_VALUE 0x36 +#define HMAC_OPAD_VALUE 0x5c +#define HASH_XCBC_PRECOMP_KEY_NUM 3 + +static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg, + const uint8_t *auth_key, + uint16_t auth_keylen, + uint8_t *p_state_buf, + uint16_t *p_state_len) +{ + int block_size; + uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)]; + uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)]; + int i; + + if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) { + static uint8_t qat_aes_xcbc_key_seed[ + ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = { + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + }; + + uint8_t *in = NULL; + uint8_t *out = p_state_buf; + int x; + AES_KEY enc_key; + + in = rte_zmalloc("working mem for key", + ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16); + if (in == NULL) { + QAT_LOG(ERR, "Failed to alloc memory"); + return -ENOMEM; + } + + rte_memcpy(in, qat_aes_xcbc_key_seed, + ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ); + for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) { + if (AES_set_encrypt_key(auth_key, auth_keylen << 3, + &enc_key) != 0) { + rte_free(in - + (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ)); + memset(out - + (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ), + 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ); + return -EFAULT; + } + AES_encrypt(in, out, &enc_key); + in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ; + out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ; + } + *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ; + rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ); + return 0; + } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) || + (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) { + uint8_t *in = NULL; + uint8_t *out = p_state_buf; + AES_KEY enc_key; + + memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ + + ICP_QAT_HW_GALOIS_LEN_A_SZ + + ICP_QAT_HW_GALOIS_E_CTR0_SZ); + in = rte_zmalloc("working mem for key", + ICP_QAT_HW_GALOIS_H_SZ, 16); + if (in == NULL) { + QAT_LOG(ERR, "Failed to alloc memory"); + return -ENOMEM; + } + + memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ); + if (AES_set_encrypt_key(auth_key, auth_keylen << 3, + &enc_key) != 0) { + return -EFAULT; + } + AES_encrypt(in, out, &enc_key); + *p_state_len = ICP_QAT_HW_GALOIS_H_SZ + + ICP_QAT_HW_GALOIS_LEN_A_SZ + + ICP_QAT_HW_GALOIS_E_CTR0_SZ; + rte_free(in); + return 0; + } + + block_size = qat_hash_get_block_size(hash_alg); + if (block_size <= 0) + return -EFAULT; + /* init ipad and opad from key and xor with fixed values */ + memset(ipad, 0, block_size); + memset(opad, 0, block_size); + + if (auth_keylen > (unsigned int)block_size) { + QAT_LOG(ERR, "invalid keylen %u", auth_keylen); + return -EFAULT; + } + rte_memcpy(ipad, auth_key, auth_keylen); + rte_memcpy(opad, auth_key, auth_keylen); + + for (i = 0; i < block_size; i++) { + uint8_t *ipad_ptr = ipad + i; + uint8_t *opad_ptr = opad + i; + *ipad_ptr ^= HMAC_IPAD_VALUE; + *opad_ptr ^= HMAC_OPAD_VALUE; + } + + /* do partial hash of ipad and copy to state1 */ + if (partial_hash_compute(hash_alg, ipad, p_state_buf)) { + memset(ipad, 0, block_size); + memset(opad, 0, block_size); + QAT_LOG(ERR, "ipad precompute failed"); + return -EFAULT; + } + + /* + * State len is a multiple of 8, so may be larger than the digest. + * Put the partial hash of opad state_len bytes after state1 + */ + *p_state_len = qat_hash_get_state1_size(hash_alg); + if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) { + memset(ipad, 0, block_size); + memset(opad, 0, block_size); + QAT_LOG(ERR, "opad precompute failed"); + return -EFAULT; + } + + /* don't leave data lying around */ + memset(ipad, 0, block_size); + memset(opad, 0, block_size); + return 0; +} + +static void +qat_sym_session_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, + enum qat_sym_proto_flag proto_flags) +{ + header->hdr_flags = + ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); + header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA; + header->comn_req_flags = + ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR, + QAT_COMN_PTR_TYPE_FLAT); + ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_PARTIAL_NONE); + ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, + ICP_QAT_FW_CIPH_IV_16BYTE_DATA); + + switch (proto_flags) { + case QAT_CRYPTO_PROTO_FLAG_NONE: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); + break; + case QAT_CRYPTO_PROTO_FLAG_CCM: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_CCM_PROTO); + break; + case QAT_CRYPTO_PROTO_FLAG_GCM: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_GCM_PROTO); + break; + case QAT_CRYPTO_PROTO_FLAG_SNOW3G: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_SNOW_3G_PROTO); + break; + case QAT_CRYPTO_PROTO_FLAG_ZUC: + ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_ZUC_3G_PROTO); + break; + } + + ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_UPDATE_STATE); + ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER); +} + +/* + * Snow3G and ZUC should never use this function + * and set its protocol flag in both cipher and auth part of content + * descriptor building function + */ +static enum qat_sym_proto_flag +qat_get_crypto_proto_flag(uint16_t flags) +{ + int proto = ICP_QAT_FW_LA_PROTO_GET(flags); + enum qat_sym_proto_flag qat_proto_flag = + QAT_CRYPTO_PROTO_FLAG_NONE; + + switch (proto) { + case ICP_QAT_FW_LA_GCM_PROTO: + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM; + break; + case ICP_QAT_FW_LA_CCM_PROTO: + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM; + break; + } + + return qat_proto_flag; +} + +int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc, + uint8_t *cipherkey, + uint32_t cipherkeylen) +{ + struct icp_qat_hw_cipher_algo_blk *cipher; + struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req; + struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + void *ptr = &req_tmpl->cd_ctrl; + struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; + struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; + enum icp_qat_hw_cipher_convert key_convert; + enum qat_sym_proto_flag qat_proto_flag = + QAT_CRYPTO_PROTO_FLAG_NONE; + uint32_t total_key_size; + uint16_t cipher_offset, cd_size; + uint32_t wordIndex = 0; + uint32_t *temp_key = NULL; + + if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { + cd_pars->u.s.content_desc_addr = cdesc->cd_paddr; + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_DRAM_WR); + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_CMP_AUTH_RES); + cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd; + } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { + cd_pars->u.s.content_desc_addr = cdesc->cd_paddr; + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_DRAM_WR); + cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd; + } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) { + QAT_LOG(ERR, "Invalid param, must be a cipher command."); + return -EFAULT; + } + + if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) { + /* + * CTR Streaming ciphers are a special case. Decrypt = encrypt + * Overriding default values previously set + */ + cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; + } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 + || cdesc->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) + key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; + else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; + else + key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; + + if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ + + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; + cipher_cd_ctrl->cipher_state_sz = + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3; + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G; + + } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) { + total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ; + cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3; + cipher_cd_ctrl->cipher_padding_sz = + (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3; + } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) { + total_key_size = ICP_QAT_HW_3DES_KEY_SZ; + cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3; + qat_proto_flag = + qat_get_crypto_proto_flag(header->serv_specif_flags); + } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) { + total_key_size = ICP_QAT_HW_DES_KEY_SZ; + cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3; + qat_proto_flag = + qat_get_crypto_proto_flag(header->serv_specif_flags); + } else if (cdesc->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ + + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; + cipher_cd_ctrl->cipher_state_sz = + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; + cdesc->min_qat_dev_gen = QAT_GEN2; + } else { + total_key_size = cipherkeylen; + cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3; + qat_proto_flag = + qat_get_crypto_proto_flag(header->serv_specif_flags); + } + cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3; + cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd); + cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3; + + header->service_cmd_id = cdesc->qat_cmd; + qat_sym_session_init_common_hdr(header, qat_proto_flag); + + cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr; + cipher->cipher_config.val = + ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode, + cdesc->qat_cipher_alg, key_convert, + cdesc->qat_dir); + + if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) { + temp_key = (uint32_t *)(cdesc->cd_cur_ptr + + sizeof(struct icp_qat_hw_cipher_config) + + cipherkeylen); + memcpy(cipher->key, cipherkey, cipherkeylen); + memcpy(temp_key, cipherkey, cipherkeylen); + + /* XOR Key with KASUMI F8 key modifier at 4 bytes level */ + for (wordIndex = 0; wordIndex < (cipherkeylen >> 2); + wordIndex++) + temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES; + + cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + + cipherkeylen + cipherkeylen; + } else { + memcpy(cipher->key, cipherkey, cipherkeylen); + cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + + cipherkeylen; + } + + if (total_key_size > cipherkeylen) { + uint32_t padding_size = total_key_size-cipherkeylen; + if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) + && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) { + /* K3 not provided so use K1 = K3*/ + memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size); + } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) + && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) { + /* K2 and K3 not provided so use K1 = K2 = K3*/ + memcpy(cdesc->cd_cur_ptr, cipherkey, + cipherkeylen); + memcpy(cdesc->cd_cur_ptr+cipherkeylen, + cipherkey, cipherkeylen); + } else + memset(cdesc->cd_cur_ptr, 0, padding_size); + + cdesc->cd_cur_ptr += padding_size; + } + cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd; + cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3; + + return 0; +} + +int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, + uint8_t *authkey, + uint32_t authkeylen, + uint32_t aad_length, + uint32_t digestsize, + unsigned int operation) +{ + struct icp_qat_hw_auth_setup *hash; + struct icp_qat_hw_cipher_algo_blk *cipherconfig; + struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req; + struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + void *ptr = &req_tmpl->cd_ctrl; + struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; + struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; + struct icp_qat_fw_la_auth_req_params *auth_param = + (struct icp_qat_fw_la_auth_req_params *) + ((char *)&req_tmpl->serv_specif_rqpars + + sizeof(struct icp_qat_fw_la_cipher_req_params)); + uint16_t state1_size = 0, state2_size = 0; + uint16_t hash_offset, cd_size; + uint32_t *aad_len = NULL; + uint32_t wordIndex = 0; + uint32_t *pTempKey; + enum qat_sym_proto_flag qat_proto_flag = + QAT_CRYPTO_PROTO_FLAG_NONE; + + if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) { + ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_DRAM_WR); + cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd; + } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) { + ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_DRAM_WR); + cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd; + } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) { + QAT_LOG(ERR, "Invalid param, must be a hash command."); + return -EFAULT; + } + + if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) { + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_CMP_AUTH_RES); + cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY; + } else { + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_CMP_AUTH_RES); + cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE; + } + + /* + * Setup the inner hash config + */ + hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd); + hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr; + hash->auth_config.reserved = 0; + hash->auth_config.config = + ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, + cdesc->qat_hash_alg, digestsize); + + if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 + || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 + || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) + hash->auth_counter.counter = 0; + else + hash->auth_counter.counter = rte_bswap32( + qat_hash_get_block_size(cdesc->qat_hash_alg)); + + cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup); + + /* + * cd_cur_ptr now points at the state1 information. + */ + switch (cdesc->qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, + authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { + QAT_LOG(ERR, "(SHA)precompute failed"); + return -EFAULT; + } + state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA224: + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, + authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { + QAT_LOG(ERR, "(SHA)precompute failed"); + return -EFAULT; + } + state2_size = ICP_QAT_HW_SHA224_STATE2_SZ; + break; + case ICP_QAT_HW_AUTH_ALGO_SHA256: + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, + authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { + QAT_LOG(ERR, "(SHA)precompute failed"); + return -EFAULT; + } + state2_size = ICP_QAT_HW_SHA256_STATE2_SZ; + break; + case ICP_QAT_HW_AUTH_ALGO_SHA384: + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, + authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { + QAT_LOG(ERR, "(SHA)precompute failed"); + return -EFAULT; + } + state2_size = ICP_QAT_HW_SHA384_STATE2_SZ; + break; + case ICP_QAT_HW_AUTH_ALGO_SHA512: + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, + authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { + QAT_LOG(ERR, "(SHA)precompute failed"); + return -EFAULT; + } + state2_size = ICP_QAT_HW_SHA512_STATE2_SZ; + break; + case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC: + state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ; + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC, + authkey, authkeylen, cdesc->cd_cur_ptr + state1_size, + &state2_size)) { + QAT_LOG(ERR, "(XCBC)precompute failed"); + return -EFAULT; + } + break; + case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: + case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM; + state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ; + if (qat_sym_do_precomputes(cdesc->qat_hash_alg, + authkey, authkeylen, cdesc->cd_cur_ptr + state1_size, + &state2_size)) { + QAT_LOG(ERR, "(GCM)precompute failed"); + return -EFAULT; + } + /* + * Write (the length of AAD) into bytes 16-19 of state2 + * in big-endian format. This field is 8 bytes + */ + auth_param->u2.aad_sz = + RTE_ALIGN_CEIL(aad_length, 16); + auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3; + + aad_len = (uint32_t *)(cdesc->cd_cur_ptr + + ICP_QAT_HW_GALOIS_128_STATE1_SZ + + ICP_QAT_HW_GALOIS_H_SZ); + *aad_len = rte_bswap32(aad_length); + cdesc->aad_len = aad_length; + break; + case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G; + state1_size = qat_hash_get_state1_size( + ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2); + state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ; + memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size); + + cipherconfig = (struct icp_qat_hw_cipher_algo_blk *) + (cdesc->cd_cur_ptr + state1_size + state2_size); + cipherconfig->cipher_config.val = + ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE, + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2, + ICP_QAT_HW_CIPHER_KEY_CONVERT, + ICP_QAT_HW_CIPHER_ENCRYPT); + memcpy(cipherconfig->key, authkey, authkeylen); + memset(cipherconfig->key + authkeylen, + 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ); + cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + + authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; + auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3; + break; + case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: + hash->auth_config.config = + ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0, + cdesc->qat_hash_alg, digestsize); + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; + state1_size = qat_hash_get_state1_size( + ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3); + state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ; + memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size + + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ); + + memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); + cdesc->cd_cur_ptr += state1_size + state2_size + + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; + auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; + cdesc->min_qat_dev_gen = QAT_GEN2; + + break; + case ICP_QAT_HW_AUTH_ALGO_MD5: + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, + authkey, authkeylen, cdesc->cd_cur_ptr, + &state1_size)) { + QAT_LOG(ERR, "(MD5)precompute failed"); + return -EFAULT; + } + state2_size = ICP_QAT_HW_MD5_STATE2_SZ; + break; + case ICP_QAT_HW_AUTH_ALGO_NULL: + state1_size = qat_hash_get_state1_size( + ICP_QAT_HW_AUTH_ALGO_NULL); + state2_size = ICP_QAT_HW_NULL_STATE2_SZ; + break; + case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC: + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM; + state1_size = qat_hash_get_state1_size( + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC); + state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ + + ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ; + + if (aad_length > 0) { + aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN + + ICP_QAT_HW_CCM_AAD_LEN_INFO; + auth_param->u2.aad_sz = + RTE_ALIGN_CEIL(aad_length, + ICP_QAT_HW_CCM_AAD_ALIGNMENT); + } else { + auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN; + } + cdesc->aad_len = aad_length; + hash->auth_counter.counter = 0; + + hash_cd_ctrl->outer_prefix_sz = digestsize; + auth_param->hash_state_sz = digestsize; + + memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); + break; + case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: + state1_size = qat_hash_get_state1_size( + ICP_QAT_HW_AUTH_ALGO_KASUMI_F9); + state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ; + memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size); + pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size + + authkeylen); + /* + * The Inner Hash Initial State2 block must contain IK + * (Initialisation Key), followed by IK XOR-ed with KM + * (Key Modifier): IK||(IK^KM). + */ + /* write the auth key */ + memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); + /* initialise temp key with auth key */ + memcpy(pTempKey, authkey, authkeylen); + /* XOR Key with KASUMI F9 key modifier at 4 bytes level */ + for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++) + pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES; + break; + default: + QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg); + return -EFAULT; + } + + /* Request template setup */ + qat_sym_session_init_common_hdr(header, qat_proto_flag); + header->service_cmd_id = cdesc->qat_cmd; + + /* Auth CD config setup */ + hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3; + hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; + hash_cd_ctrl->inner_res_sz = digestsize; + hash_cd_ctrl->final_sz = digestsize; + hash_cd_ctrl->inner_state1_sz = state1_size; + auth_param->auth_res_sz = digestsize; + + hash_cd_ctrl->inner_state2_sz = state2_size; + hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + + ((sizeof(struct icp_qat_hw_auth_setup) + + RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8)) + >> 3); + + cdesc->cd_cur_ptr += state1_size + state2_size; + cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd; + + cd_pars->u.s.content_desc_addr = cdesc->cd_paddr; + cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3; + + return 0; +} + +int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_AES_128_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_AES128; + break; + case ICP_QAT_HW_AES_192_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_AES192; + break; + case ICP_QAT_HW_AES_256_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_AES256; + break; + default: + return -EINVAL; + } + return 0; +} + +int qat_sym_validate_aes_docsisbpi_key(int key_len, + enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_AES_128_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_AES128; + break; + default: + return -EINVAL; + } + return 0; +} + +int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2; + break; + default: + return -EINVAL; + } + return 0; +} + +int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_KASUMI_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI; + break; + default: + return -EINVAL; + } + return 0; +} + +int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_DES_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_DES; + break; + default: + return -EINVAL; + } + return 0; +} + +int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case QAT_3DES_KEY_SZ_OPT1: + case QAT_3DES_KEY_SZ_OPT2: + case QAT_3DES_KEY_SZ_OPT3: + *alg = ICP_QAT_HW_CIPHER_ALGO_3DES; + break; + default: + return -EINVAL; + } + return 0; +} + +int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3; + break; + default: + return -EINVAL; + } + return 0; +} diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h new file mode 100644 index 00000000..e8f51e5b --- /dev/null +++ b/drivers/crypto/qat/qat_sym_session.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ +#ifndef _QAT_SYM_SESSION_H_ +#define _QAT_SYM_SESSION_H_ + +#include +#include + +#include "qat_common.h" +#include "icp_qat_hw.h" +#include "icp_qat_fw.h" +#include "icp_qat_fw_la.h" + +/* + * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR + * Integrity Key (IK) + */ +#define KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA + +#define KASUMI_F8_KEY_MODIFIER_4_BYTES 0x55555555 + +/* 3DES key sizes */ +#define QAT_3DES_KEY_SZ_OPT1 24 /* Keys are independent */ +#define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */ +#define QAT_3DES_KEY_SZ_OPT3 8 /* K1=K2=K3 */ + + +#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \ + ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ + ICP_QAT_HW_CIPHER_NO_CONVERT, \ + ICP_QAT_HW_CIPHER_ENCRYPT) + +#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \ + ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ + ICP_QAT_HW_CIPHER_KEY_CONVERT, \ + ICP_QAT_HW_CIPHER_DECRYPT) + +enum qat_sym_proto_flag { + QAT_CRYPTO_PROTO_FLAG_NONE = 0, + QAT_CRYPTO_PROTO_FLAG_CCM = 1, + QAT_CRYPTO_PROTO_FLAG_GCM = 2, + QAT_CRYPTO_PROTO_FLAG_SNOW3G = 3, + QAT_CRYPTO_PROTO_FLAG_ZUC = 4 +}; + +/* Common content descriptor */ +struct qat_sym_cd { + struct icp_qat_hw_cipher_algo_blk cipher; + struct icp_qat_hw_auth_algo_blk hash; +} __rte_packed __rte_cache_aligned; + +struct qat_sym_session { + enum icp_qat_fw_la_cmd_id qat_cmd; + enum icp_qat_hw_cipher_algo qat_cipher_alg; + enum icp_qat_hw_cipher_dir qat_dir; + enum icp_qat_hw_cipher_mode qat_mode; + enum icp_qat_hw_auth_algo qat_hash_alg; + enum icp_qat_hw_auth_op auth_op; + void *bpi_ctx; + struct qat_sym_cd cd; + uint8_t *cd_cur_ptr; + phys_addr_t cd_paddr; + struct icp_qat_fw_la_bulk_req fw_req; + uint8_t aad_len; + struct qat_crypto_instance *inst; + struct { + uint16_t offset; + uint16_t length; + } cipher_iv; + struct { + uint16_t offset; + uint16_t length; + } auth_iv; + uint16_t digest_length; + rte_spinlock_t lock; /* protects this struct */ + enum qat_device_gen min_qat_dev_gen; +}; + +int +qat_sym_session_configure(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sess, + struct rte_mempool *mempool); + +int +qat_sym_session_set_parameters(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, void *session_private); + +int +qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform, + struct qat_sym_session *session); + +int +qat_sym_session_configure_cipher(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct qat_sym_session *session); + +int +qat_sym_session_configure_auth(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct qat_sym_session *session); + +int +qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cd, + uint8_t *enckey, + uint32_t enckeylen); + +int +qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, + uint8_t *authkey, + uint32_t authkeylen, + uint32_t aad_length, + uint32_t digestsize, + unsigned int operation); + +void +qat_sym_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *session); + +unsigned int +qat_sym_session_get_private_size(struct rte_cryptodev *dev); + +void +qat_sym_sesssion_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, + enum qat_sym_proto_flag proto_flags); +int +qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg); +int +qat_sym_validate_aes_docsisbpi_key(int key_len, + enum icp_qat_hw_cipher_algo *alg); +int +qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg); +int +qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg); +int +qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg); +int +qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg); +int +qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg); +int +qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg); + +#endif /* _QAT_SYM_SESSION_H_ */ diff --git a/drivers/crypto/qat/rte_pmd_qat_version.map b/drivers/crypto/qat/rte_pmd_qat_version.map deleted file mode 100644 index bbaf1c85..00000000 --- a/drivers/crypto/qat/rte_pmd_qat_version.map +++ /dev/null @@ -1,3 +0,0 @@ -DPDK_2.2 { - local: *; -}; \ No newline at end of file diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c deleted file mode 100644 index bf837401..00000000 --- a/drivers/crypto/qat/rte_qat_cryptodev.c +++ /dev/null @@ -1,180 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2017 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include - -#include "qat_crypto.h" -#include "qat_logs.h" - -uint8_t cryptodev_qat_driver_id; - -static const struct rte_cryptodev_capabilities qat_gen1_capabilities[] = { - QAT_BASE_GEN1_SYM_CAPABILITIES, - RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() -}; - -static const struct rte_cryptodev_capabilities qat_gen2_capabilities[] = { - QAT_BASE_GEN1_SYM_CAPABILITIES, - QAT_EXTRA_GEN2_SYM_CAPABILITIES, - RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() -}; - -static struct rte_cryptodev_ops crypto_qat_ops = { - - /* Device related operations */ - .dev_configure = qat_dev_config, - .dev_start = qat_dev_start, - .dev_stop = qat_dev_stop, - .dev_close = qat_dev_close, - .dev_infos_get = qat_dev_info_get, - - .stats_get = qat_crypto_sym_stats_get, - .stats_reset = qat_crypto_sym_stats_reset, - .queue_pair_setup = qat_crypto_sym_qp_setup, - .queue_pair_release = qat_crypto_sym_qp_release, - .queue_pair_start = NULL, - .queue_pair_stop = NULL, - .queue_pair_count = NULL, - - /* Crypto related operations */ - .session_get_size = qat_crypto_sym_get_session_private_size, - .session_configure = qat_crypto_sym_configure_session, - .session_clear = qat_crypto_sym_clear_session -}; - -/* - * The set of PCI devices this driver supports - */ - -static const struct rte_pci_id pci_id_qat_map[] = { - { - RTE_PCI_DEVICE(0x8086, 0x0443), - }, - { - RTE_PCI_DEVICE(0x8086, 0x37c9), - }, - { - RTE_PCI_DEVICE(0x8086, 0x19e3), - }, - { - RTE_PCI_DEVICE(0x8086, 0x6f55), - }, - {.device_id = 0}, -}; - -static int -crypto_qat_create(const char *name, struct rte_pci_device *pci_dev, - struct rte_cryptodev_pmd_init_params *init_params) -{ - struct rte_cryptodev *cryptodev; - struct qat_pmd_private *internals; - - PMD_INIT_FUNC_TRACE(); - - cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device, - init_params); - if (cryptodev == NULL) - return -ENODEV; - - cryptodev->driver_id = cryptodev_qat_driver_id; - cryptodev->dev_ops = &crypto_qat_ops; - - cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst; - cryptodev->dequeue_burst = qat_pmd_dequeue_op_burst; - - cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | - RTE_CRYPTODEV_FF_HW_ACCELERATED | - RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | - RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; - - internals = cryptodev->data->dev_private; - internals->max_nb_sessions = init_params->max_nb_sessions; - switch (pci_dev->id.device_id) { - case 0x0443: - internals->qat_dev_gen = QAT_GEN1; - internals->qat_dev_capabilities = qat_gen1_capabilities; - break; - case 0x37c9: - case 0x19e3: - case 0x6f55: - internals->qat_dev_gen = QAT_GEN2; - internals->qat_dev_capabilities = qat_gen2_capabilities; - break; - default: - PMD_DRV_LOG(ERR, - "Invalid dev_id, can't determine capabilities"); - break; - } - - /* - * For secondary processes, we don't initialise any further as primary - * has already done this work. Only check we don't need a different - * RX function - */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - PMD_DRV_LOG(DEBUG, "Device already initialised by primary process"); - return 0; - } - - return 0; -} - -static int crypto_qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, - struct rte_pci_device *pci_dev) -{ - struct rte_cryptodev_pmd_init_params init_params = { - .name = "", - .socket_id = rte_socket_id(), - .private_data_size = sizeof(struct qat_pmd_private), - .max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS - }; - char name[RTE_CRYPTODEV_NAME_MAX_LEN]; - - PMD_DRV_LOG(DEBUG, "Found QAT device at %02x:%02x.%x", - pci_dev->addr.bus, - pci_dev->addr.devid, - pci_dev->addr.function); - - rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); - - return crypto_qat_create(name, pci_dev, &init_params); -} - -static int crypto_qat_pci_remove(struct rte_pci_device *pci_dev) -{ - struct rte_cryptodev *cryptodev; - char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; - - if (pci_dev == NULL) - return -EINVAL; - - rte_pci_device_name(&pci_dev->addr, cryptodev_name, - sizeof(cryptodev_name)); - - cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name); - if (cryptodev == NULL) - return -ENODEV; - - /* free crypto device */ - return rte_cryptodev_pmd_destroy(cryptodev); -} - -static struct rte_pci_driver rte_qat_pmd = { - .id_table = pci_id_qat_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, - .probe = crypto_qat_pci_probe, - .remove = crypto_qat_pci_remove -}; - -static struct cryptodev_driver qat_crypto_drv; - -RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd); -RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map); -RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, rte_qat_pmd, - cryptodev_qat_driver_id); diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c index 140c8b41..6e4919c4 100644 --- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c +++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c @@ -9,6 +9,8 @@ #include "rte_cryptodev_scheduler.h" #include "scheduler_pmd_private.h" +int scheduler_logtype_driver; + /** update the scheduler pmd's capability with attaching device's * capability. * For each device to be attached, the scheduler's capability should be @@ -91,8 +93,10 @@ update_scheduler_capability(struct scheduler_ctx *sched_ctx) struct rte_cryptodev_capabilities tmp_caps[256] = { {0} }; uint32_t nb_caps = 0, i; - if (sched_ctx->capabilities) + if (sched_ctx->capabilities) { rte_free(sched_ctx->capabilities); + sched_ctx->capabilities = NULL; + } for (i = 0; i < sched_ctx->nb_slaves; i++) { struct rte_cryptodev_info dev_info; @@ -166,30 +170,30 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id) uint32_t i; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->driver_id != cryptodev_driver_id) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->data->dev_started) { - CS_LOG_ERR("Illegal operation"); + CR_SCHED_LOG(ERR, "Illegal operation"); return -EBUSY; } sched_ctx = dev->data->dev_private; if (sched_ctx->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) { - CS_LOG_ERR("Too many slaves attached"); + CR_SCHED_LOG(ERR, "Too many slaves attached"); return -ENOMEM; } for (i = 0; i < sched_ctx->nb_slaves; i++) if (sched_ctx->slaves[i].dev_id == slave_id) { - CS_LOG_ERR("Slave already added"); + CR_SCHED_LOG(ERR, "Slave already added"); return -ENOTSUP; } @@ -206,7 +210,7 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id) slave->driver_id = 0; sched_ctx->nb_slaves--; - CS_LOG_ERR("capabilities update failed"); + CR_SCHED_LOG(ERR, "capabilities update failed"); return -ENOTSUP; } @@ -225,17 +229,17 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id) uint32_t i, slave_pos; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->driver_id != cryptodev_driver_id) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->data->dev_started) { - CS_LOG_ERR("Illegal operation"); + CR_SCHED_LOG(ERR, "Illegal operation"); return -EBUSY; } @@ -245,12 +249,12 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id) if (sched_ctx->slaves[slave_pos].dev_id == slave_id) break; if (slave_pos == sched_ctx->nb_slaves) { - CS_LOG_ERR("Cannot find slave"); + CR_SCHED_LOG(ERR, "Cannot find slave"); return -ENOTSUP; } if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) { - CS_LOG_ERR("Failed to detach slave"); + CR_SCHED_LOG(ERR, "Failed to detach slave"); return -ENOTSUP; } @@ -263,7 +267,7 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id) sched_ctx->nb_slaves--; if (update_scheduler_capability(sched_ctx) < 0) { - CS_LOG_ERR("capabilities update failed"); + CR_SCHED_LOG(ERR, "capabilities update failed"); return -ENOTSUP; } @@ -282,17 +286,17 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id, struct scheduler_ctx *sched_ctx; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->driver_id != cryptodev_driver_id) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->data->dev_started) { - CS_LOG_ERR("Illegal operation"); + CR_SCHED_LOG(ERR, "Illegal operation"); return -EBUSY; } @@ -305,33 +309,33 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id, case CDEV_SCHED_MODE_ROUNDROBIN: if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id, roundrobin_scheduler) < 0) { - CS_LOG_ERR("Failed to load scheduler"); + CR_SCHED_LOG(ERR, "Failed to load scheduler"); return -1; } break; case CDEV_SCHED_MODE_PKT_SIZE_DISTR: if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id, pkt_size_based_distr_scheduler) < 0) { - CS_LOG_ERR("Failed to load scheduler"); + CR_SCHED_LOG(ERR, "Failed to load scheduler"); return -1; } break; case CDEV_SCHED_MODE_FAILOVER: if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id, failover_scheduler) < 0) { - CS_LOG_ERR("Failed to load scheduler"); + CR_SCHED_LOG(ERR, "Failed to load scheduler"); return -1; } break; case CDEV_SCHED_MODE_MULTICORE: if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id, multicore_scheduler) < 0) { - CS_LOG_ERR("Failed to load scheduler"); + CR_SCHED_LOG(ERR, "Failed to load scheduler"); return -1; } break; default: - CS_LOG_ERR("Not yet supported"); + CR_SCHED_LOG(ERR, "Not yet supported"); return -ENOTSUP; } @@ -345,12 +349,12 @@ rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id) struct scheduler_ctx *sched_ctx; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->driver_id != cryptodev_driver_id) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } @@ -367,17 +371,17 @@ rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id, struct scheduler_ctx *sched_ctx; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->driver_id != cryptodev_driver_id) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->data->dev_started) { - CS_LOG_ERR("Illegal operation"); + CR_SCHED_LOG(ERR, "Illegal operation"); return -EBUSY; } @@ -395,12 +399,12 @@ rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id) struct scheduler_ctx *sched_ctx; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->driver_id != cryptodev_driver_id) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } @@ -417,25 +421,25 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id, struct scheduler_ctx *sched_ctx; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->driver_id != cryptodev_driver_id) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->data->dev_started) { - CS_LOG_ERR("Illegal operation"); + CR_SCHED_LOG(ERR, "Illegal operation"); return -EBUSY; } sched_ctx = dev->data->dev_private; if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) { - CS_LOG_ERR("Invalid name %s, should be less than " - "%u bytes.\n", scheduler->name, + CR_SCHED_LOG(ERR, "Invalid name %s, should be less than " + "%u bytes.", scheduler->name, RTE_CRYPTODEV_NAME_MAX_LEN); return -EINVAL; } @@ -444,8 +448,8 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id, if (strlen(scheduler->description) > RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) { - CS_LOG_ERR("Invalid description %s, should be less than " - "%u bytes.\n", scheduler->description, + CR_SCHED_LOG(ERR, "Invalid description %s, should be less than " + "%u bytes.", scheduler->description, RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1); return -EINVAL; } @@ -462,14 +466,16 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id, sched_ctx->ops.option_set = scheduler->ops->option_set; sched_ctx->ops.option_get = scheduler->ops->option_get; - if (sched_ctx->private_ctx) + if (sched_ctx->private_ctx) { rte_free(sched_ctx->private_ctx); + sched_ctx->private_ctx = NULL; + } if (sched_ctx->ops.create_private_ctx) { int ret = (*sched_ctx->ops.create_private_ctx)(dev); if (ret < 0) { - CS_LOG_ERR("Unable to create scheduler private " + CR_SCHED_LOG(ERR, "Unable to create scheduler private " "context"); return ret; } @@ -488,12 +494,12 @@ rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves) uint32_t nb_slaves = 0; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->driver_id != cryptodev_driver_id) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } @@ -521,17 +527,17 @@ rte_cryptodev_scheduler_option_set(uint8_t scheduler_id, if (option_type == CDEV_SCHED_OPTION_NOT_SET || option_type >= CDEV_SCHED_OPTION_COUNT) { - CS_LOG_ERR("Invalid option parameter"); + CR_SCHED_LOG(ERR, "Invalid option parameter"); return -EINVAL; } if (!option) { - CS_LOG_ERR("Invalid option parameter"); + CR_SCHED_LOG(ERR, "Invalid option parameter"); return -EINVAL; } if (dev->data->dev_started) { - CS_LOG_ERR("Illegal operation"); + CR_SCHED_LOG(ERR, "Illegal operation"); return -EBUSY; } @@ -551,17 +557,17 @@ rte_cryptodev_scheduler_option_get(uint8_t scheduler_id, struct scheduler_ctx *sched_ctx; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (!option) { - CS_LOG_ERR("Invalid option parameter"); + CR_SCHED_LOG(ERR, "Invalid option parameter"); return -EINVAL; } if (dev->driver_id != cryptodev_driver_id) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } @@ -571,3 +577,8 @@ rte_cryptodev_scheduler_option_get(uint8_t scheduler_id, return (*sched_ctx->ops.option_get)(dev, option_type, option); } + +RTE_INIT(scheduler_init_log) +{ + scheduler_logtype_driver = rte_log_register("pmd.crypto.scheduler"); +} diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h index 01e7646c..3faea409 100644 --- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h +++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h @@ -30,7 +30,7 @@ extern "C" { #endif /** Maximum number of multi-core worker cores */ -#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (64) +#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (RTE_MAX_LCORE - 1) /** Round-robin scheduling mode string */ #define SCHEDULER_MODE_NAME_ROUND_ROBIN round-robin @@ -76,6 +76,7 @@ enum rte_cryptodev_schedule_option_type { /** * Threshold option structure */ +#define RTE_CRYPTODEV_SCHEDULER_PARAM_THRES "threshold" struct rte_cryptodev_scheduler_threshold_option { uint32_t threshold; /**< Threshold for packet-size mode */ }; diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c index 005b1638..ddfb5b81 100644 --- a/drivers/crypto/scheduler/scheduler_failover.c +++ b/drivers/crypto/scheduler/scheduler_failover.c @@ -139,7 +139,7 @@ scheduler_start(struct rte_cryptodev *dev) uint16_t i; if (sched_ctx->nb_slaves < 2) { - CS_LOG_ERR("Number of slaves shall no less than 2"); + CR_SCHED_LOG(ERR, "Number of slaves shall no less than 2"); return -ENOMEM; } @@ -182,7 +182,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id) fo_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*fo_qp_ctx), 0, rte_socket_id()); if (!fo_qp_ctx) { - CS_LOG_ERR("failed allocate memory for private queue pair"); + CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair"); return -ENOMEM; } diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c index b2ce44ce..d410e69d 100644 --- a/drivers/crypto/scheduler/scheduler_multicore.c +++ b/drivers/crypto/scheduler/scheduler_multicore.c @@ -21,8 +21,8 @@ struct mc_scheduler_ctx { uint32_t num_workers; /**< Number of workers polling */ uint32_t stop_signal; - struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES]; - struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES]; + struct rte_ring *sched_enq_ring[RTE_MAX_LCORE]; + struct rte_ring *sched_deq_ring[RTE_MAX_LCORE]; }; struct mc_scheduler_qp_ctx { @@ -178,7 +178,8 @@ mc_scheduler_worker(struct rte_cryptodev *dev) } } if (worker_idx == -1) { - CS_LOG_ERR("worker on core %u:cannot find worker index!\n", core_id); + CR_SCHED_LOG(ERR, "worker on core %u:cannot find worker index!", + core_id); return -1; } @@ -313,7 +314,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id) mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0, rte_socket_id()); if (!mc_qp_ctx) { - CS_LOG_ERR("failed allocate memory for private queue pair"); + CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair"); return -ENOMEM; } @@ -328,16 +329,18 @@ static int scheduler_create_private_ctx(struct rte_cryptodev *dev) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; - struct mc_scheduler_ctx *mc_ctx; + struct mc_scheduler_ctx *mc_ctx = NULL; uint16_t i; - if (sched_ctx->private_ctx) + if (sched_ctx->private_ctx) { rte_free(sched_ctx->private_ctx); + sched_ctx->private_ctx = NULL; + } mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0, rte_socket_id()); if (!mc_ctx) { - CS_LOG_ERR("failed allocate memory"); + CR_SCHED_LOG(ERR, "failed allocate memory"); return -ENOMEM; } @@ -345,25 +348,48 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev) for (i = 0; i < sched_ctx->nb_wc; i++) { char r_name[16]; - snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i); - mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE, - rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ); + snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX + "%u_%u", dev->data->dev_id, i); + mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name); if (!mc_ctx->sched_enq_ring[i]) { - CS_LOG_ERR("Cannot create ring for worker %u", i); - return -1; + mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, + PER_SLAVE_BUFF_SIZE, + rte_socket_id(), + RING_F_SC_DEQ | RING_F_SP_ENQ); + if (!mc_ctx->sched_enq_ring[i]) { + CR_SCHED_LOG(ERR, "Cannot create ring for worker %u", + i); + goto exit; + } } - snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i); - mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE, - rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ); + snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX + "%u_%u", dev->data->dev_id, i); + mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name); if (!mc_ctx->sched_deq_ring[i]) { - CS_LOG_ERR("Cannot create ring for worker %u", i); - return -1; + mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, + PER_SLAVE_BUFF_SIZE, + rte_socket_id(), + RING_F_SC_DEQ | RING_F_SP_ENQ); + if (!mc_ctx->sched_deq_ring[i]) { + CR_SCHED_LOG(ERR, "Cannot create ring for worker %u", + i); + goto exit; + } } } sched_ctx->private_ctx = (void *)mc_ctx; return 0; + +exit: + for (i = 0; i < sched_ctx->nb_wc; i++) { + rte_ring_free(mc_ctx->sched_enq_ring[i]); + rte_ring_free(mc_ctx->sched_deq_ring[i]); + } + rte_free(mc_ctx); + + return -1; } struct rte_cryptodev_scheduler_ops scheduler_mc_ops = { diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c index 96bf0161..74129b66 100644 --- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c +++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c @@ -258,7 +258,7 @@ scheduler_start(struct rte_cryptodev *dev) /* for packet size based scheduler, nb_slaves have to >= 2 */ if (sched_ctx->nb_slaves < NB_PKT_SIZE_SLAVES) { - CS_LOG_ERR("not enough slaves to start"); + CR_SCHED_LOG(ERR, "not enough slaves to start"); return -1; } @@ -302,7 +302,7 @@ scheduler_stop(struct rte_cryptodev *dev) if (ps_qp_ctx->primary_slave.nb_inflight_cops + ps_qp_ctx->secondary_slave.nb_inflight_cops) { - CS_LOG_ERR("Some crypto ops left in slave queue"); + CR_SCHED_LOG(ERR, "Some crypto ops left in slave queue"); return -1; } } @@ -319,7 +319,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id) ps_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*ps_qp_ctx), 0, rte_socket_id()); if (!ps_qp_ctx) { - CS_LOG_ERR("failed allocate memory for private queue pair"); + CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair"); return -ENOMEM; } @@ -334,13 +334,15 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev) struct scheduler_ctx *sched_ctx = dev->data->dev_private; struct psd_scheduler_ctx *psd_ctx; - if (sched_ctx->private_ctx) + if (sched_ctx->private_ctx) { rte_free(sched_ctx->private_ctx); + sched_ctx->private_ctx = NULL; + } psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0, rte_socket_id()); if (!psd_ctx) { - CS_LOG_ERR("failed allocate memory"); + CR_SCHED_LOG(ERR, "failed allocate memory"); return -ENOMEM; } @@ -360,14 +362,14 @@ scheduler_option_set(struct rte_cryptodev *dev, uint32_t option_type, if ((enum rte_cryptodev_schedule_option_type)option_type != CDEV_SCHED_OPTION_THRESHOLD) { - CS_LOG_ERR("Option not supported"); + CR_SCHED_LOG(ERR, "Option not supported"); return -EINVAL; } threshold = ((struct rte_cryptodev_scheduler_threshold_option *) option)->threshold; if (!rte_is_power_of_2(threshold)) { - CS_LOG_ERR("Threshold is not power of 2"); + CR_SCHED_LOG(ERR, "Threshold is not power of 2"); return -EINVAL; } @@ -386,7 +388,7 @@ scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type, if ((enum rte_cryptodev_schedule_option_type)option_type != CDEV_SCHED_OPTION_THRESHOLD) { - CS_LOG_ERR("Option not supported"); + CR_SCHED_LOG(ERR, "Option not supported"); return -EINVAL; } diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c index 51a85fa6..a9221a94 100644 --- a/drivers/crypto/scheduler/scheduler_pmd.c +++ b/drivers/crypto/scheduler/scheduler_pmd.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "rte_cryptodev_scheduler.h" #include "scheduler_pmd_private.h" @@ -19,8 +20,10 @@ struct scheduler_init_params { struct rte_cryptodev_pmd_init_params def_p; uint32_t nb_slaves; enum rte_cryptodev_scheduler_mode mode; + char mode_param_str[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN]; uint32_t enable_ordering; - uint64_t wcmask; + uint16_t wc_pool[RTE_MAX_LCORE]; + uint16_t nb_wc; char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES] [RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN]; }; @@ -28,9 +31,9 @@ struct scheduler_init_params { #define RTE_CRYPTODEV_VDEV_NAME ("name") #define RTE_CRYPTODEV_VDEV_SLAVE ("slave") #define RTE_CRYPTODEV_VDEV_MODE ("mode") +#define RTE_CRYPTODEV_VDEV_MODE_PARAM ("mode_param") #define RTE_CRYPTODEV_VDEV_ORDERING ("ordering") #define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs") -#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions") #define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id") #define RTE_CRYPTODEV_VDEV_COREMASK ("coremask") #define RTE_CRYPTODEV_VDEV_CORELIST ("corelist") @@ -39,9 +42,9 @@ const char *scheduler_valid_params[] = { RTE_CRYPTODEV_VDEV_NAME, RTE_CRYPTODEV_VDEV_SLAVE, RTE_CRYPTODEV_VDEV_MODE, + RTE_CRYPTODEV_VDEV_MODE_PARAM, RTE_CRYPTODEV_VDEV_ORDERING, RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG, - RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG, RTE_CRYPTODEV_VDEV_SOCKET_ID, RTE_CRYPTODEV_VDEV_COREMASK, RTE_CRYPTODEV_VDEV_CORELIST @@ -68,6 +71,8 @@ const struct scheduler_parse_map scheduler_ordering_map[] = { {"disable", 0} }; +#define CDEV_SCHED_MODE_PARAM_SEP_CHAR ':' + static int cryptodev_scheduler_create(const char *name, struct rte_vdev_device *vdev, @@ -81,15 +86,11 @@ cryptodev_scheduler_create(const char *name, dev = rte_cryptodev_pmd_create(name, &vdev->device, &init_params->def_p); if (dev == NULL) { - CS_LOG_ERR("driver %s: failed to create cryptodev vdev", + CR_SCHED_LOG(ERR, "driver %s: failed to create cryptodev vdev", name); return -EFAULT; } - if (init_params->wcmask != 0) - RTE_LOG(INFO, PMD, " workers core mask = %"PRIx64"\n", - init_params->wcmask); - dev->driver_id = cryptodev_driver_id; dev->dev_ops = rte_crypto_scheduler_pmd_ops; @@ -100,20 +101,26 @@ cryptodev_scheduler_create(const char *name, if (init_params->mode == CDEV_SCHED_MODE_MULTICORE) { uint16_t i; - sched_ctx->nb_wc = 0; + sched_ctx->nb_wc = init_params->nb_wc; - for (i = 0; i < RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES; i++) { - if (init_params->wcmask & (1ULL << i)) { - sched_ctx->wc_pool[sched_ctx->nb_wc++] = i; - RTE_LOG(INFO, PMD, - " Worker core[%u]=%u added\n", - sched_ctx->nb_wc-1, i); - } + for (i = 0; i < sched_ctx->nb_wc; i++) { + sched_ctx->wc_pool[i] = init_params->wc_pool[i]; + CR_SCHED_LOG(INFO, " Worker core[%u]=%u added", + i, sched_ctx->wc_pool[i]); } } if (init_params->mode > CDEV_SCHED_MODE_USERDEFINED && init_params->mode < CDEV_SCHED_MODE_COUNT) { + union { + struct rte_cryptodev_scheduler_threshold_option + threshold_option; + } option; + enum rte_cryptodev_schedule_option_type option_type; + char param_name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN] = {0}; + char param_val[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN] = {0}; + char *s, *end; + ret = rte_cryptodev_scheduler_mode_set(dev->data->dev_id, init_params->mode); if (ret < 0) { @@ -125,10 +132,52 @@ cryptodev_scheduler_create(const char *name, if (scheduler_mode_map[i].val != sched_ctx->mode) continue; - RTE_LOG(INFO, PMD, " Scheduling mode = %s\n", + CR_SCHED_LOG(INFO, " Scheduling mode = %s", scheduler_mode_map[i].name); break; } + + if (strlen(init_params->mode_param_str) > 0) { + s = strchr(init_params->mode_param_str, + CDEV_SCHED_MODE_PARAM_SEP_CHAR); + if (s == NULL) { + CR_SCHED_LOG(ERR, "Invalid mode param"); + return -EINVAL; + } + + strlcpy(param_name, init_params->mode_param_str, + s - init_params->mode_param_str + 1); + s++; + strlcpy(param_val, s, + RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN); + + switch (init_params->mode) { + case CDEV_SCHED_MODE_PKT_SIZE_DISTR: + if (strcmp(param_name, + RTE_CRYPTODEV_SCHEDULER_PARAM_THRES) + != 0) { + CR_SCHED_LOG(ERR, "Invalid mode param"); + return -EINVAL; + } + option_type = CDEV_SCHED_OPTION_THRESHOLD; + + option.threshold_option.threshold = + strtoul(param_val, &end, 0); + break; + default: + CR_SCHED_LOG(ERR, "Invalid mode param"); + return -EINVAL; + } + + if (sched_ctx->ops.option_set(dev, option_type, + (void *)&option) < 0) { + CR_SCHED_LOG(ERR, "Invalid mode param"); + return -EINVAL; + } + + RTE_LOG(INFO, PMD, " Sched mode param (%s = %s)\n", + param_name, param_val); + } } sched_ctx->reordering_enabled = init_params->enable_ordering; @@ -138,7 +187,7 @@ cryptodev_scheduler_create(const char *name, sched_ctx->reordering_enabled) continue; - RTE_LOG(INFO, PMD, " Packet ordering = %s\n", + CR_SCHED_LOG(INFO, " Packet ordering = %s", scheduler_ordering_map[i].name); break; @@ -153,7 +202,7 @@ cryptodev_scheduler_create(const char *name, if (!sched_ctx->init_slave_names[ sched_ctx->nb_init_slaves]) { - CS_LOG_ERR("driver %s: Insufficient memory", + CR_SCHED_LOG(ERR, "driver %s: Insufficient memory", name); return -ENOMEM; } @@ -175,8 +224,8 @@ cryptodev_scheduler_create(const char *name, 0, SOCKET_ID_ANY); if (!sched_ctx->capabilities) { - RTE_LOG(ERR, PMD, "Not enough memory for capability " - "information\n"); + CR_SCHED_LOG(ERR, "Not enough memory for capability " + "information"); return -ENOMEM; } @@ -220,7 +269,7 @@ parse_integer_arg(const char *key __rte_unused, *i = atoi(value); if (*i < 0) { - CS_LOG_ERR("Argument has to be positive.\n"); + CR_SCHED_LOG(ERR, "Argument has to be positive."); return -EINVAL; } @@ -232,9 +281,47 @@ static int parse_coremask_arg(const char *key __rte_unused, const char *value, void *extra_args) { + int i, j, val; + uint16_t idx = 0; + char c; struct scheduler_init_params *params = extra_args; - params->wcmask = strtoull(value, NULL, 16); + params->nb_wc = 0; + + if (value == NULL) + return -1; + /* Remove all blank characters ahead and after . + * Remove 0x/0X if exists. + */ + while (isblank(*value)) + value++; + if (value[0] == '0' && ((value[1] == 'x') || (value[1] == 'X'))) + value += 2; + i = strlen(value); + while ((i > 0) && isblank(value[i - 1])) + i--; + + if (i == 0) + return -1; + + for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) { + c = value[i]; + if (isxdigit(c) == 0) { + /* invalid characters */ + return -1; + } + if (isdigit(c)) + val = c - '0'; + else if (isupper(c)) + val = c - 'A' + 10; + else + val = c - 'a' + 10; + + for (j = 0; j < 4 && idx < RTE_MAX_LCORE; j++, idx++) { + if ((1 << j) & val) + params->wc_pool[params->nb_wc++] = idx; + } + } return 0; } @@ -246,7 +333,7 @@ parse_corelist_arg(const char *key __rte_unused, { struct scheduler_init_params *params = extra_args; - params->wcmask = 0ULL; + params->nb_wc = 0; const char *token = value; @@ -254,7 +341,11 @@ parse_corelist_arg(const char *key __rte_unused, char *rval; unsigned int core = strtoul(token, &rval, 10); - params->wcmask |= 1ULL << core; + if (core >= RTE_MAX_LCORE) { + CR_SCHED_LOG(ERR, "Invalid worker core %u, should be smaller " + "than %u.", core, RTE_MAX_LCORE); + } + params->wc_pool[params->nb_wc++] = (uint16_t)core; token = (const char *)rval; if (token[0] == '\0') break; @@ -272,8 +363,8 @@ parse_name_arg(const char *key __rte_unused, struct rte_cryptodev_pmd_init_params *params = extra_args; if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) { - CS_LOG_ERR("Invalid name %s, should be less than " - "%u bytes.\n", value, + CR_SCHED_LOG(ERR, "Invalid name %s, should be less than " + "%u bytes.", value, RTE_CRYPTODEV_NAME_MAX_LEN - 1); return -EINVAL; } @@ -291,7 +382,7 @@ parse_slave_arg(const char *key __rte_unused, struct scheduler_init_params *param = extra_args; if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) { - CS_LOG_ERR("Too many slaves.\n"); + CR_SCHED_LOG(ERR, "Too many slaves."); return -ENOMEM; } @@ -312,18 +403,31 @@ parse_mode_arg(const char *key __rte_unused, if (strcmp(value, scheduler_mode_map[i].name) == 0) { param->mode = (enum rte_cryptodev_scheduler_mode) scheduler_mode_map[i].val; + break; } } if (i == RTE_DIM(scheduler_mode_map)) { - CS_LOG_ERR("Unrecognized input.\n"); + CR_SCHED_LOG(ERR, "Unrecognized input."); return -EINVAL; } return 0; } +static int +parse_mode_param_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + struct scheduler_init_params *param = extra_args; + + strlcpy(param->mode_param_str, value, + RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN); + + return 0; +} + static int parse_ordering_arg(const char *key __rte_unused, const char *value, void *extra_args) @@ -340,7 +444,7 @@ parse_ordering_arg(const char *key __rte_unused, } if (i == RTE_DIM(scheduler_ordering_map)) { - CS_LOG_ERR("Unrecognized input.\n"); + CR_SCHED_LOG(ERR, "Unrecognized input."); return -EINVAL; } @@ -370,13 +474,6 @@ scheduler_parse_init_params(struct scheduler_init_params *params, if (ret < 0) goto free_kvlist; - ret = rte_kvargs_process(kvlist, - RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG, - &parse_integer_arg, - ¶ms->def_p.max_nb_sessions); - if (ret < 0) - goto free_kvlist; - ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID, &parse_integer_arg, ¶ms->def_p.socket_id); @@ -411,6 +508,11 @@ scheduler_parse_init_params(struct scheduler_init_params *params, if (ret < 0) goto free_kvlist; + ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_MODE_PARAM, + &parse_mode_param_arg, params); + if (ret < 0) + goto free_kvlist; + ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_ORDERING, &parse_ordering_arg, params); if (ret < 0) @@ -430,8 +532,7 @@ cryptodev_scheduler_probe(struct rte_vdev_device *vdev) "", sizeof(struct scheduler_ctx), rte_socket_id(), - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }, .nb_slaves = 0, .mode = CDEV_SCHED_MODE_NOT_SET, @@ -464,9 +565,8 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SCHEDULER_PMD, cryptodev_scheduler_pmd_drv); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id= " "slave="); RTE_PMD_REGISTER_CRYPTO_DRIVER(scheduler_crypto_drv, - cryptodev_scheduler_pmd_drv, + cryptodev_scheduler_pmd_drv.driver, cryptodev_driver_id); diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c index 680c2afb..778071ca 100644 --- a/drivers/crypto/scheduler/scheduler_pmd_ops.c +++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c @@ -27,7 +27,7 @@ scheduler_attach_init_slave(struct rte_cryptodev *dev) int status; if (!slave_dev) { - CS_LOG_ERR("Failed to locate slave dev %s", + CR_SCHED_LOG(ERR, "Failed to locate slave dev %s", dev_name); return -EINVAL; } @@ -36,16 +36,17 @@ scheduler_attach_init_slave(struct rte_cryptodev *dev) scheduler_id, slave_dev->data->dev_id); if (status < 0) { - CS_LOG_ERR("Failed to attach slave cryptodev %u", + CR_SCHED_LOG(ERR, "Failed to attach slave cryptodev %u", slave_dev->data->dev_id); return status; } - CS_LOG_INFO("Scheduler %s attached slave %s\n", + CR_SCHED_LOG(INFO, "Scheduler %s attached slave %s", dev->data->name, sched_ctx->init_slave_names[i]); rte_free(sched_ctx->init_slave_names[i]); + sched_ctx->init_slave_names[i] = NULL; sched_ctx->nb_init_slaves -= 1; } @@ -101,7 +102,7 @@ update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id) if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), dev->data->dev_id, qp_id) < 0) { - CS_LOG_ERR("failed to create unique reorder buffer " + CR_SCHED_LOG(ERR, "failed to create unique reorder buffer" "name"); return -ENOMEM; } @@ -110,7 +111,7 @@ update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id) buff_size, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ); if (!qp_ctx->order_ring) { - CS_LOG_ERR("failed to create order ring"); + CR_SCHED_LOG(ERR, "failed to create order ring"); return -ENOMEM; } } else { @@ -144,18 +145,18 @@ scheduler_pmd_start(struct rte_cryptodev *dev) for (i = 0; i < dev->data->nb_queue_pairs; i++) { ret = update_order_ring(dev, i); if (ret < 0) { - CS_LOG_ERR("Failed to update reorder buffer"); + CR_SCHED_LOG(ERR, "Failed to update reorder buffer"); return ret; } } if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) { - CS_LOG_ERR("Scheduler mode is not set"); + CR_SCHED_LOG(ERR, "Scheduler mode is not set"); return -1; } if (!sched_ctx->nb_slaves) { - CS_LOG_ERR("No slave in the scheduler"); + CR_SCHED_LOG(ERR, "No slave in the scheduler"); return -1; } @@ -165,7 +166,7 @@ scheduler_pmd_start(struct rte_cryptodev *dev) uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) { - CS_LOG_ERR("Failed to attach slave"); + CR_SCHED_LOG(ERR, "Failed to attach slave"); return -ENOTSUP; } } @@ -173,7 +174,7 @@ scheduler_pmd_start(struct rte_cryptodev *dev) RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP); if ((*sched_ctx->ops.scheduler_start)(dev) < 0) { - CS_LOG_ERR("Scheduler start failed"); + CR_SCHED_LOG(ERR, "Scheduler start failed"); return -1; } @@ -185,7 +186,7 @@ scheduler_pmd_start(struct rte_cryptodev *dev) ret = (*slave_dev->dev_ops->dev_start)(slave_dev); if (ret < 0) { - CS_LOG_ERR("Failed to start slave dev %u", + CR_SCHED_LOG(ERR, "Failed to start slave dev %u", slave_dev_id); return ret; } @@ -261,11 +262,15 @@ scheduler_pmd_close(struct rte_cryptodev *dev) } } - if (sched_ctx->private_ctx) + if (sched_ctx->private_ctx) { rte_free(sched_ctx->private_ctx); + sched_ctx->private_ctx = NULL; + } - if (sched_ctx->capabilities) + if (sched_ctx->capabilities) { rte_free(sched_ctx->capabilities); + sched_ctx->capabilities = NULL; + } return 0; } @@ -316,8 +321,9 @@ scheduler_pmd_info_get(struct rte_cryptodev *dev, struct rte_cryptodev_info *dev_info) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; - uint32_t max_nb_sessions = sched_ctx->nb_slaves ? - UINT32_MAX : RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS; + uint32_t max_nb_sess = 0; + uint16_t headroom_sz = 0; + uint16_t tailroom_sz = 0; uint32_t i; if (!dev_info) @@ -333,17 +339,32 @@ scheduler_pmd_info_get(struct rte_cryptodev *dev, struct rte_cryptodev_info slave_info; rte_cryptodev_info_get(slave_dev_id, &slave_info); - max_nb_sessions = slave_info.sym.max_nb_sessions < - max_nb_sessions ? - slave_info.sym.max_nb_sessions : - max_nb_sessions; + uint32_t dev_max_sess = slave_info.sym.max_nb_sessions; + if (dev_max_sess != 0) { + if (max_nb_sess == 0 || dev_max_sess < max_nb_sess) + max_nb_sess = slave_info.sym.max_nb_sessions; + } + + /* Get the max headroom requirement among slave PMDs */ + headroom_sz = slave_info.min_mbuf_headroom_req > + headroom_sz ? + slave_info.min_mbuf_headroom_req : + headroom_sz; + + /* Get the max tailroom requirement among slave PMDs */ + tailroom_sz = slave_info.min_mbuf_tailroom_req > + tailroom_sz ? + slave_info.min_mbuf_tailroom_req : + tailroom_sz; } dev_info->driver_id = dev->driver_id; dev_info->feature_flags = dev->feature_flags; dev_info->capabilities = sched_ctx->capabilities; dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs; - dev_info->sym.max_nb_sessions = max_nb_sessions; + dev_info->min_mbuf_headroom_req = headroom_sz; + dev_info->min_mbuf_tailroom_req = tailroom_sz; + dev_info->sym.max_nb_sessions = max_nb_sess; } /** Release queue pair */ @@ -381,7 +402,7 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "CRYTO_SCHE PMD %u QP %u", dev->data->dev_id, qp_id) < 0) { - CS_LOG_ERR("Failed to create unique queue pair name"); + CR_SCHED_LOG(ERR, "Failed to create unique queue pair name"); return -EFAULT; } @@ -419,14 +440,14 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, */ ret = scheduler_attach_init_slave(dev); if (ret < 0) { - CS_LOG_ERR("Failed to attach slave"); + CR_SCHED_LOG(ERR, "Failed to attach slave"); scheduler_pmd_qp_release(dev, qp_id); return ret; } if (*sched_ctx->ops.config_queue_pair) { if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) { - CS_LOG_ERR("Unable to configure queue pair"); + CR_SCHED_LOG(ERR, "Unable to configure queue pair"); return -1; } } @@ -434,22 +455,6 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, return 0; } -/** Start queue pair */ -static int -scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair */ -static int -scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - /** Return the number of allocated queue pairs */ static uint32_t scheduler_pmd_qp_count(struct rte_cryptodev *dev) @@ -458,7 +463,7 @@ scheduler_pmd_qp_count(struct rte_cryptodev *dev) } static uint32_t -scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; uint8_t i = 0; @@ -468,7 +473,7 @@ scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) for (i = 0; i < sched_ctx->nb_slaves; i++) { uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id]; - uint32_t priv_sess_size = (*dev->dev_ops->session_get_size)(dev); + uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev); if (max_priv_sess_size < priv_sess_size) max_priv_sess_size = priv_sess_size; @@ -478,7 +483,7 @@ scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) } static int -scheduler_pmd_session_configure(struct rte_cryptodev *dev, +scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -493,7 +498,7 @@ scheduler_pmd_session_configure(struct rte_cryptodev *dev, ret = rte_cryptodev_sym_session_init(slave->dev_id, sess, xform, mempool); if (ret < 0) { - CS_LOG_ERR("unabled to config sym session"); + CR_SCHED_LOG(ERR, "unable to config sym session"); return ret; } } @@ -503,7 +508,7 @@ scheduler_pmd_session_configure(struct rte_cryptodev *dev, /** Clear the memory of session so it doesn't leave key material behind */ static void -scheduler_pmd_session_clear(struct rte_cryptodev *dev, +scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; @@ -530,13 +535,11 @@ struct rte_cryptodev_ops scheduler_pmd_ops = { .queue_pair_setup = scheduler_pmd_qp_setup, .queue_pair_release = scheduler_pmd_qp_release, - .queue_pair_start = scheduler_pmd_qp_start, - .queue_pair_stop = scheduler_pmd_qp_stop, .queue_pair_count = scheduler_pmd_qp_count, - .session_get_size = scheduler_pmd_session_get_size, - .session_configure = scheduler_pmd_session_configure, - .session_clear = scheduler_pmd_session_clear, + .sym_session_get_size = scheduler_pmd_sym_session_get_size, + .sym_session_configure = scheduler_pmd_sym_session_configure, + .sym_session_clear = scheduler_pmd_sym_session_clear, }; struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops; diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h index dd7ca5a4..d5e602a2 100644 --- a/drivers/crypto/scheduler/scheduler_pmd_private.h +++ b/drivers/crypto/scheduler/scheduler_pmd_private.h @@ -12,25 +12,11 @@ #define PER_SLAVE_BUFF_SIZE (256) -#define CS_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \ - __func__, __LINE__, ## args) - -#ifdef RTE_LIBRTE_CRYPTO_SCHEDULER_DEBUG -#define CS_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \ - __func__, __LINE__, ## args) - -#define CS_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \ - __func__, __LINE__, ## args) -#else -#define CS_LOG_INFO(fmt, args...) -#define CS_LOG_DBG(fmt, args...) -#endif +extern int scheduler_logtype_driver; + +#define CR_SCHED_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, scheduler_logtype_driver, \ + "%s() line %u: "fmt "\n", __func__, __LINE__, ##args) struct scheduler_slave { uint8_t dev_id; @@ -60,7 +46,7 @@ struct scheduler_ctx { char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN]; char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN]; - uint16_t wc_pool[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES]; + uint16_t wc_pool[RTE_MAX_LCORE]; uint16_t nb_wc; char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]; diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c index c6e03e21..c7082a64 100644 --- a/drivers/crypto/scheduler/scheduler_roundrobin.c +++ b/drivers/crypto/scheduler/scheduler_roundrobin.c @@ -175,7 +175,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id) rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0, rte_socket_id()); if (!rr_qp_ctx) { - CS_LOG_ERR("failed allocate memory for private queue pair"); + CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair"); return -ENOMEM; } diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c index 27af69f2..a17536b7 100644 --- a/drivers/crypto/snow3g/rte_snow3g_pmd.c +++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2018 Intel Corporation */ #include @@ -79,7 +79,7 @@ snow3g_set_session_parameters(struct snow3g_session *sess, break; case SNOW3G_OP_NOT_SUPPORTED: default: - SNOW3G_LOG_ERR("Unsupported operation chain order parameter"); + SNOW3G_LOG(ERR, "Unsupported operation chain order parameter"); return -ENOTSUP; } @@ -89,7 +89,7 @@ snow3g_set_session_parameters(struct snow3g_session *sess, return -ENOTSUP; if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) { - SNOW3G_LOG_ERR("Wrong IV length"); + SNOW3G_LOG(ERR, "Wrong IV length"); return -EINVAL; } sess->cipher_iv_offset = cipher_xform->cipher.iv.offset; @@ -105,14 +105,14 @@ snow3g_set_session_parameters(struct snow3g_session *sess, return -ENOTSUP; if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) { - SNOW3G_LOG_ERR("Wrong digest length"); + SNOW3G_LOG(ERR, "Wrong digest length"); return -EINVAL; } sess->auth_op = auth_xform->auth.op; if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) { - SNOW3G_LOG_ERR("Wrong IV length"); + SNOW3G_LOG(ERR, "Wrong IV length"); return -EINVAL; } sess->auth_iv_offset = auth_xform->auth.iv.offset; @@ -137,7 +137,7 @@ snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op) if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { if (likely(op->sym->session != NULL)) sess = (struct snow3g_session *) - get_session_private_data( + get_sym_session_private_data( op->sym->session, cryptodev_driver_id); } else { @@ -159,8 +159,8 @@ snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op) sess = NULL; } op->sym->session = (struct rte_cryptodev_sym_session *)_sess; - set_session_private_data(op->sym->session, cryptodev_driver_id, - _sess_private_data); + set_sym_session_private_data(op->sym->session, + cryptodev_driver_id, _sess_private_data); } if (unlikely(sess == NULL)) @@ -216,7 +216,7 @@ process_snow3g_cipher_op_bit(struct rte_crypto_op *op, src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); if (op->sym->m_dst == NULL) { op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - SNOW3G_LOG_ERR("bit-level in-place not supported\n"); + SNOW3G_LOG(ERR, "bit-level in-place not supported\n"); return 0; } dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *); @@ -246,7 +246,7 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops, /* Data must be byte aligned */ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - SNOW3G_LOG_ERR("Offset"); + SNOW3G_LOG(ERR, "Offset"); break; } @@ -295,7 +295,7 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session, (ops[i]->sym->m_dst != NULL && !rte_pktmbuf_is_contiguous( ops[i]->sym->m_dst))) { - SNOW3G_LOG_ERR("PMD supports only contiguous mbufs, " + SNOW3G_LOG(ERR, "PMD supports only contiguous mbufs, " "op (%p) provides noncontiguous mbuf as " "source/destination buffer.\n", ops[i]); ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; @@ -339,7 +339,7 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session, if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { memset(session, 0, sizeof(struct snow3g_session)); memset(ops[i]->sym->session, 0, - rte_cryptodev_get_header_session_size()); + rte_cryptodev_sym_get_header_session_size()); rte_mempool_put(qp->sess_mp, session); rte_mempool_put(qp->sess_mp, ops[i]->sym->session); ops[i]->sym->session = NULL; @@ -537,7 +537,7 @@ cryptodev_snow3g_create(const char *name, dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); if (dev == NULL) { - SNOW3G_LOG_ERR("failed to create cryptodev vdev"); + SNOW3G_LOG(ERR, "failed to create cryptodev vdev"); goto init_error; } @@ -555,11 +555,10 @@ cryptodev_snow3g_create(const char *name, internals = dev->data->dev_private; internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; return 0; init_error: - SNOW3G_LOG_ERR("driver %s: cryptodev_snow3g_create failed", + SNOW3G_LOG(ERR, "driver %s: cryptodev_snow3g_create failed", init_params->name); cryptodev_snow3g_remove(vdev); @@ -573,8 +572,7 @@ cryptodev_snow3g_probe(struct rte_vdev_device *vdev) "", sizeof(struct snow3g_private), rte_socket_id(), - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }; const char *name; const char *input_args; @@ -617,7 +615,11 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv); RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id="); -RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv, cryptodev_snow3g_pmd_drv, - cryptodev_driver_id); +RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv, + cryptodev_snow3g_pmd_drv.driver, cryptodev_driver_id); + +RTE_INIT(snow3g_init_log) +{ + snow3g_logtype_driver = rte_log_register("pmd.crypto.snow3g"); +} diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c index f60b4759..cfbc9522 100644 --- a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c +++ b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2018 Intel Corporation */ #include @@ -130,7 +130,8 @@ snow3g_pmd_info_get(struct rte_cryptodev *dev, if (dev_info != NULL) { dev_info->driver_id = dev->driver_id; dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; - dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; dev_info->feature_flags = dev->feature_flags; dev_info->capabilities = snow3g_pmd_capabilities; } @@ -172,13 +173,13 @@ snow3g_pmd_qp_create_processed_ops_ring(struct snow3g_qp *qp, r = rte_ring_lookup(qp->name); if (r) { if (rte_ring_get_size(r) >= ring_size) { - SNOW3G_LOG_INFO("Reusing existing ring %s" + SNOW3G_LOG(INFO, "Reusing existing ring %s" " for processed packets", qp->name); return r; } - SNOW3G_LOG_ERR("Unable to reuse existing ring %s" + SNOW3G_LOG(ERR, "Unable to reuse existing ring %s" " for processed packets", qp->name); return NULL; @@ -230,22 +231,6 @@ qp_setup_cleanup: return -1; } -/** Start queue pair */ -static int -snow3g_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair */ -static int -snow3g_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - /** Return the number of allocated queue pairs */ static uint32_t snow3g_pmd_qp_count(struct rte_cryptodev *dev) @@ -255,14 +240,14 @@ snow3g_pmd_qp_count(struct rte_cryptodev *dev) /** Returns the size of the SNOW 3G session structure */ static unsigned -snow3g_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +snow3g_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { return sizeof(struct snow3g_session); } /** Configure a SNOW 3G session from a crypto xform chain */ static int -snow3g_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, +snow3g_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -271,26 +256,26 @@ snow3g_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, int ret; if (unlikely(sess == NULL)) { - SNOW3G_LOG_ERR("invalid session struct"); + SNOW3G_LOG(ERR, "invalid session struct"); return -EINVAL; } if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( + SNOW3G_LOG(ERR, "Couldn't get object from session mempool"); return -ENOMEM; } ret = snow3g_set_session_parameters(sess_private_data, xform); if (ret != 0) { - SNOW3G_LOG_ERR("failed configure session parameters"); + SNOW3G_LOG(ERR, "failed configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); return 0; @@ -298,17 +283,17 @@ snow3g_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, /** Clear the memory of session so it doesn't leave key material behind */ static void -snow3g_pmd_session_clear(struct rte_cryptodev *dev, +snow3g_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); /* Zero out the whole structure */ if (sess_priv) { memset(sess_priv, 0, sizeof(struct snow3g_session)); struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -326,13 +311,11 @@ struct rte_cryptodev_ops snow3g_pmd_ops = { .queue_pair_setup = snow3g_pmd_qp_setup, .queue_pair_release = snow3g_pmd_qp_release, - .queue_pair_start = snow3g_pmd_qp_start, - .queue_pair_stop = snow3g_pmd_qp_stop, .queue_pair_count = snow3g_pmd_qp_count, - .session_get_size = snow3g_pmd_session_get_size, - .session_configure = snow3g_pmd_session_configure, - .session_clear = snow3g_pmd_session_clear + .sym_session_get_size = snow3g_pmd_sym_session_get_size, + .sym_session_configure = snow3g_pmd_sym_session_configure, + .sym_session_clear = snow3g_pmd_sym_session_clear }; struct rte_cryptodev_ops *rte_snow3g_pmd_ops = &snow3g_pmd_ops; diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h index eea900e0..b7807b62 100644 --- a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h +++ b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2018 Intel Corporation */ #ifndef _RTE_SNOW3G_PMD_PRIVATE_H_ @@ -10,25 +10,13 @@ #define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g /**< SNOW 3G PMD device name */ -#define SNOW3G_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \ - __func__, __LINE__, ## args) - -#ifdef RTE_LIBRTE_SNOW3G_DEBUG -#define SNOW3G_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \ - __func__, __LINE__, ## args) - -#define SNOW3G_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \ - __func__, __LINE__, ## args) -#else -#define SNOW3G_LOG_INFO(fmt, args...) -#define SNOW3G_LOG_DBG(fmt, args...) -#endif +/** SNOW 3G PMD LOGTYPE DRIVER */ +int snow3g_logtype_driver; + +#define SNOW3G_LOG(level, fmt, ...) \ + rte_log(RTE_LOG_ ## level, snow3g_logtype_driver, \ + "%s() line %u: " fmt "\n", __func__, __LINE__, \ + ## __VA_ARGS__) #define SNOW3G_DIGEST_LENGTH 4 @@ -36,8 +24,6 @@ struct snow3g_private { unsigned max_nb_queue_pairs; /**< Max number of queue pairs supported by device */ - unsigned max_nb_sessions; - /**< Max number of sessions supported by device */ }; /** SNOW 3G buffer queue pair */ diff --git a/drivers/crypto/virtio/Makefile b/drivers/crypto/virtio/Makefile new file mode 100644 index 00000000..be7b828f --- /dev/null +++ b/drivers/crypto/virtio/Makefile @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_virtio_crypto.a + +# +# include virtio_crypto.h +# +CFLAGS += -I$(RTE_SDK)/lib/librte_vhost +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_virtio_crypto_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtqueue.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_pci.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_cryptodev.c + +# this lib depends upon: +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool +LDLIBS += -lrte_cryptodev +LDLIBS += -lrte_pci -lrte_bus_pci + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/virtio/meson.build b/drivers/crypto/virtio/meson.build new file mode 100644 index 00000000..b15b3f9f --- /dev/null +++ b/drivers/crypto/virtio/meson.build @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + +includes += include_directories('../../../lib/librte_vhost') +deps += 'bus_pci' +name = 'virtio_crypto' +sources = files('virtio_cryptodev.c', 'virtio_pci.c', + 'virtio_rxtx.c', 'virtqueue.c') diff --git a/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map b/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map new file mode 100644 index 00000000..de8e412f --- /dev/null +++ b/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map @@ -0,0 +1,3 @@ +DPDK_18.05 { + local: *; +}; diff --git a/drivers/crypto/virtio/virtio_crypto_algs.h b/drivers/crypto/virtio/virtio_crypto_algs.h new file mode 100644 index 00000000..4c44af37 --- /dev/null +++ b/drivers/crypto/virtio/virtio_crypto_algs.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ + +#ifndef _VIRTIO_CRYPTO_ALGS_H_ +#define _VIRTIO_CRYPTO_ALGS_H_ + +#include + +#include "virtio_crypto.h" + +struct virtio_crypto_session { + uint64_t session_id; + + struct { + uint16_t offset; + uint16_t length; + } iv; + + struct { + uint32_t length; + phys_addr_t phys_addr; + } aad; + + struct virtio_crypto_op_ctrl_req ctrl; +}; + +#endif /* _VIRTIO_CRYPTO_ALGS_H_ */ diff --git a/drivers/crypto/virtio/virtio_crypto_capabilities.h b/drivers/crypto/virtio/virtio_crypto_capabilities.h new file mode 100644 index 00000000..03c30dee --- /dev/null +++ b/drivers/crypto/virtio/virtio_crypto_capabilities.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ + +#ifndef _VIRTIO_CRYPTO_CAPABILITIES_H_ +#define _VIRTIO_CRYPTO_CAPABILITIES_H_ + +#define VIRTIO_SYM_CAPABILITIES \ + { /* SHA1 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 1, \ + .max = 20, \ + .increment = 1 \ + }, \ + .iv_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* AES CBC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_AES_CBC, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + } + +#endif /* _VIRTIO_CRYPTO_CAPABILITIES_H_ */ diff --git a/drivers/crypto/virtio/virtio_cryptodev.c b/drivers/crypto/virtio/virtio_cryptodev.c new file mode 100644 index 00000000..568b5a40 --- /dev/null +++ b/drivers/crypto/virtio/virtio_cryptodev.c @@ -0,0 +1,1505 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "virtio_cryptodev.h" +#include "virtqueue.h" +#include "virtio_crypto_algs.h" +#include "virtio_crypto_capabilities.h" + +int virtio_crypto_logtype_init; +int virtio_crypto_logtype_session; +int virtio_crypto_logtype_rx; +int virtio_crypto_logtype_tx; +int virtio_crypto_logtype_driver; + +static int virtio_crypto_dev_configure(struct rte_cryptodev *dev, + struct rte_cryptodev_config *config); +static int virtio_crypto_dev_start(struct rte_cryptodev *dev); +static void virtio_crypto_dev_stop(struct rte_cryptodev *dev); +static int virtio_crypto_dev_close(struct rte_cryptodev *dev); +static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev, + struct rte_cryptodev_info *dev_info); +static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats); +static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev); +static int virtio_crypto_qp_setup(struct rte_cryptodev *dev, + uint16_t queue_pair_id, + const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id, + struct rte_mempool *session_pool); +static int virtio_crypto_qp_release(struct rte_cryptodev *dev, + uint16_t queue_pair_id); +static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev); +static unsigned int virtio_crypto_sym_get_session_private_size( + struct rte_cryptodev *dev); +static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *sess); +static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *session, + struct rte_mempool *mp); + +/* + * The set of PCI devices this driver supports + */ +static const struct rte_pci_id pci_id_virtio_crypto_map[] = { + { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID, + VIRTIO_CRYPTO_PCI_DEVICEID) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +static const struct rte_cryptodev_capabilities virtio_capabilities[] = { + VIRTIO_SYM_CAPABILITIES, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +uint8_t cryptodev_virtio_driver_id; + +#define NUM_ENTRY_SYM_CREATE_SESSION 4 + +static int +virtio_crypto_send_command(struct virtqueue *vq, + struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key, + uint8_t *auth_key, struct virtio_crypto_session *session) +{ + uint8_t idx = 0; + uint8_t needed = 1; + uint32_t head = 0; + uint32_t len_cipher_key = 0; + uint32_t len_auth_key = 0; + uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req); + uint32_t len_session_input = sizeof(struct virtio_crypto_session_input); + uint32_t len_total = 0; + uint32_t input_offset = 0; + void *virt_addr_started = NULL; + phys_addr_t phys_addr_started; + struct vring_desc *desc; + uint32_t desc_offset; + struct virtio_crypto_session_input *input; + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (session == NULL) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL."); + return -EINVAL; + } + /* cipher only is supported, it is available if auth_key is NULL */ + if (!cipher_key) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL."); + return -EINVAL; + } + + head = vq->vq_desc_head_idx; + VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p", + head, vq); + + if (vq->vq_free_cnt < needed) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry"); + return -ENOSPC; + } + + /* calculate the length of cipher key */ + if (cipher_key) { + switch (ctrl->u.sym_create_session.op_type) { + case VIRTIO_CRYPTO_SYM_OP_CIPHER: + len_cipher_key + = ctrl->u.sym_create_session.u.cipher + .para.keylen; + break; + case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING: + len_cipher_key + = ctrl->u.sym_create_session.u.chain + .para.cipher_param.keylen; + break; + default: + VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type"); + return -EINVAL; + } + } + + /* calculate the length of auth key */ + if (auth_key) { + len_auth_key = + ctrl->u.sym_create_session.u.chain.para.u.mac_param + .auth_key_len; + } + + /* + * malloc memory to store indirect vring_desc entries, including + * ctrl request, cipher key, auth key, session input and desc vring + */ + desc_offset = len_ctrl_req + len_cipher_key + len_auth_key + + len_session_input; + virt_addr_started = rte_malloc(NULL, + desc_offset + NUM_ENTRY_SYM_CREATE_SESSION + * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE); + if (virt_addr_started == NULL) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory"); + return -ENOSPC; + } + phys_addr_started = rte_malloc_virt2iova(virt_addr_started); + + /* address to store indirect vring desc entries */ + desc = (struct vring_desc *) + ((uint8_t *)virt_addr_started + desc_offset); + + /* ctrl req part */ + memcpy(virt_addr_started, ctrl, len_ctrl_req); + desc[idx].addr = phys_addr_started; + desc[idx].len = len_ctrl_req; + desc[idx].flags = VRING_DESC_F_NEXT; + desc[idx].next = idx + 1; + idx++; + len_total += len_ctrl_req; + input_offset += len_ctrl_req; + + /* cipher key part */ + if (len_cipher_key > 0) { + memcpy((uint8_t *)virt_addr_started + len_total, + cipher_key, len_cipher_key); + + desc[idx].addr = phys_addr_started + len_total; + desc[idx].len = len_cipher_key; + desc[idx].flags = VRING_DESC_F_NEXT; + desc[idx].next = idx + 1; + idx++; + len_total += len_cipher_key; + input_offset += len_cipher_key; + } + + /* auth key part */ + if (len_auth_key > 0) { + memcpy((uint8_t *)virt_addr_started + len_total, + auth_key, len_auth_key); + + desc[idx].addr = phys_addr_started + len_total; + desc[idx].len = len_auth_key; + desc[idx].flags = VRING_DESC_F_NEXT; + desc[idx].next = idx + 1; + idx++; + len_total += len_auth_key; + input_offset += len_auth_key; + } + + /* input part */ + input = (struct virtio_crypto_session_input *) + ((uint8_t *)virt_addr_started + input_offset); + input->status = VIRTIO_CRYPTO_ERR; + input->session_id = ~0ULL; + desc[idx].addr = phys_addr_started + len_total; + desc[idx].len = len_session_input; + desc[idx].flags = VRING_DESC_F_WRITE; + idx++; + + /* use a single desc entry */ + vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset; + vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc); + vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT; + vq->vq_free_cnt--; + + vq->vq_desc_head_idx = vq->vq_ring.desc[head].next; + + vq_update_avail_ring(vq, head); + vq_update_avail_idx(vq); + + VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d", + vq->vq_queue_index); + + virtqueue_notify(vq); + + rte_rmb(); + while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) { + rte_rmb(); + usleep(100); + } + + while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) { + uint32_t idx, desc_idx, used_idx; + struct vring_used_elem *uep; + + used_idx = (uint32_t)(vq->vq_used_cons_idx + & (vq->vq_nentries - 1)); + uep = &vq->vq_ring.used->ring[used_idx]; + idx = (uint32_t) uep->id; + desc_idx = idx; + + while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) { + desc_idx = vq->vq_ring.desc[desc_idx].next; + vq->vq_free_cnt++; + } + + vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx; + vq->vq_desc_head_idx = idx; + + vq->vq_used_cons_idx++; + vq->vq_free_cnt++; + } + + VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n" + "vq->vq_desc_head_idx=%d", + vq->vq_free_cnt, vq->vq_desc_head_idx); + + /* get the result */ + if (input->status != VIRTIO_CRYPTO_OK) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! " + "status=%u, session_id=%" PRIu64 "", + input->status, input->session_id); + rte_free(virt_addr_started); + ret = -1; + } else { + session->session_id = input->session_id; + + VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, " + "session_id=%" PRIu64 "", input->session_id); + rte_free(virt_addr_started); + ret = 0; + } + + return ret; +} + +void +virtio_crypto_queue_release(struct virtqueue *vq) +{ + struct virtio_crypto_hw *hw; + + PMD_INIT_FUNC_TRACE(); + + if (vq) { + hw = vq->hw; + /* Select and deactivate the queue */ + VTPCI_OPS(hw)->del_queue(hw, vq); + + rte_memzone_free(vq->mz); + rte_mempool_free(vq->mpool); + rte_free(vq); + } +} + +#define MPOOL_MAX_NAME_SZ 32 + +int +virtio_crypto_queue_setup(struct rte_cryptodev *dev, + int queue_type, + uint16_t vtpci_queue_idx, + uint16_t nb_desc, + int socket_id, + struct virtqueue **pvq) +{ + char vq_name[VIRTQUEUE_MAX_NAME_SZ]; + char mpool_name[MPOOL_MAX_NAME_SZ]; + const struct rte_memzone *mz; + unsigned int vq_size, size; + struct virtio_crypto_hw *hw = dev->data->dev_private; + struct virtqueue *vq = NULL; + uint32_t i = 0; + uint32_t j; + + PMD_INIT_FUNC_TRACE(); + + VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx); + + /* + * Read the virtqueue size from the Queue Size field + * Always power of 2 and if 0 virtqueue does not exist + */ + vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx); + if (vq_size == 0) { + VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist"); + return -EINVAL; + } + VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size); + + if (!rte_is_power_of_2(vq_size)) { + VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2"); + return -EINVAL; + } + + if (queue_type == VTCRYPTO_DATAQ) { + snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d", + dev->data->dev_id, vtpci_queue_idx); + snprintf(mpool_name, sizeof(mpool_name), + "dev%d_dataqueue%d_mpool", + dev->data->dev_id, vtpci_queue_idx); + } else if (queue_type == VTCRYPTO_CTRLQ) { + snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue", + dev->data->dev_id); + snprintf(mpool_name, sizeof(mpool_name), + "dev%d_controlqueue_mpool", + dev->data->dev_id); + } + size = RTE_ALIGN_CEIL(sizeof(*vq) + + vq_size * sizeof(struct vq_desc_extra), + RTE_CACHE_LINE_SIZE); + vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE, + socket_id); + if (vq == NULL) { + VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue"); + return -ENOMEM; + } + + if (queue_type == VTCRYPTO_DATAQ) { + /* pre-allocate a mempool and use it in the data plane to + * improve performance + */ + vq->mpool = rte_mempool_lookup(mpool_name); + if (vq->mpool == NULL) + vq->mpool = rte_mempool_create(mpool_name, + vq_size, + sizeof(struct virtio_crypto_op_cookie), + RTE_CACHE_LINE_SIZE, 0, + NULL, NULL, NULL, NULL, socket_id, + 0); + if (!vq->mpool) { + VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD " + "Cannot create mempool"); + goto mpool_create_err; + } + for (i = 0; i < vq_size; i++) { + vq->vq_descx[i].cookie = + rte_zmalloc("crypto PMD op cookie pointer", + sizeof(struct virtio_crypto_op_cookie), + RTE_CACHE_LINE_SIZE); + if (vq->vq_descx[i].cookie == NULL) { + VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to " + "alloc mem for cookie"); + goto cookie_alloc_err; + } + } + } + + vq->hw = hw; + vq->dev_id = dev->data->dev_id; + vq->vq_queue_index = vtpci_queue_idx; + vq->vq_nentries = vq_size; + + /* + * Using part of the vring entries is permitted, but the maximum + * is vq_size + */ + if (nb_desc == 0 || nb_desc > vq_size) + nb_desc = vq_size; + vq->vq_free_cnt = nb_desc; + + /* + * Reserve a memzone for vring elements + */ + size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN); + vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN); + VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d", + (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq", + size, vq->vq_ring_size); + + mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, + socket_id, 0, VIRTIO_PCI_VRING_ALIGN); + if (mz == NULL) { + if (rte_errno == EEXIST) + mz = rte_memzone_lookup(vq_name); + if (mz == NULL) { + VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory"); + goto mz_reserve_err; + } + } + + /* + * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, + * and only accepts 32 bit page frame number. + * Check if the allocated physical memory exceeds 16TB. + */ + if ((mz->phys_addr + vq->vq_ring_size - 1) + >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { + VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be " + "above 16TB!"); + goto vring_addr_err; + } + + memset(mz->addr, 0, sizeof(mz->len)); + vq->mz = mz; + vq->vq_ring_mem = mz->phys_addr; + vq->vq_ring_virt_mem = mz->addr; + VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64, + (uint64_t)mz->phys_addr); + VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64, + (uint64_t)(uintptr_t)mz->addr); + + *pvq = vq; + + return 0; + +vring_addr_err: + rte_memzone_free(mz); +mz_reserve_err: +cookie_alloc_err: + rte_mempool_free(vq->mpool); + if (i != 0) { + for (j = 0; j < i; j++) + rte_free(vq->vq_descx[j].cookie); + } +mpool_create_err: + rte_free(vq); + return -ENOMEM; +} + +static int +virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx) +{ + int ret; + struct virtqueue *vq; + struct virtio_crypto_hw *hw = dev->data->dev_private; + + /* if virtio device has started, do not touch the virtqueues */ + if (dev->data->dev_started) + return 0; + + PMD_INIT_FUNC_TRACE(); + + ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx, + 0, SOCKET_ID_ANY, &vq); + if (ret < 0) { + VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed"); + return ret; + } + + hw->cvq = vq; + + return 0; +} + +static void +virtio_crypto_free_queues(struct rte_cryptodev *dev) +{ + unsigned int i; + struct virtio_crypto_hw *hw = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + /* control queue release */ + virtio_crypto_queue_release(hw->cvq); + + /* data queue release */ + for (i = 0; i < hw->max_dataqueues; i++) + virtio_crypto_queue_release(dev->data->queue_pairs[i]); +} + +static int +virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused) +{ + return 0; +} + +/* + * dev_ops for virtio, bare necessities for basic operation + */ +static struct rte_cryptodev_ops virtio_crypto_dev_ops = { + /* Device related operations */ + .dev_configure = virtio_crypto_dev_configure, + .dev_start = virtio_crypto_dev_start, + .dev_stop = virtio_crypto_dev_stop, + .dev_close = virtio_crypto_dev_close, + .dev_infos_get = virtio_crypto_dev_info_get, + + .stats_get = virtio_crypto_dev_stats_get, + .stats_reset = virtio_crypto_dev_stats_reset, + + .queue_pair_setup = virtio_crypto_qp_setup, + .queue_pair_release = virtio_crypto_qp_release, + .queue_pair_count = NULL, + + /* Crypto related operations */ + .sym_session_get_size = virtio_crypto_sym_get_session_private_size, + .sym_session_configure = virtio_crypto_sym_configure_session, + .sym_session_clear = virtio_crypto_sym_clear_session +}; + +static void +virtio_crypto_update_stats(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats) +{ + unsigned int i; + struct virtio_crypto_hw *hw = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + if (stats == NULL) { + VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer"); + return; + } + + for (i = 0; i < hw->max_dataqueues; i++) { + const struct virtqueue *data_queue + = dev->data->queue_pairs[i]; + if (data_queue == NULL) + continue; + + stats->enqueued_count += data_queue->packets_sent_total; + stats->enqueue_err_count += data_queue->packets_sent_failed; + + stats->dequeued_count += data_queue->packets_received_total; + stats->dequeue_err_count + += data_queue->packets_received_failed; + } +} + +static void +virtio_crypto_dev_stats_get(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats) +{ + PMD_INIT_FUNC_TRACE(); + + virtio_crypto_update_stats(dev, stats); +} + +static void +virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev) +{ + unsigned int i; + struct virtio_crypto_hw *hw = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < hw->max_dataqueues; i++) { + struct virtqueue *data_queue = dev->data->queue_pairs[i]; + if (data_queue == NULL) + continue; + + data_queue->packets_sent_total = 0; + data_queue->packets_sent_failed = 0; + + data_queue->packets_received_total = 0; + data_queue->packets_received_failed = 0; + } +} + +static int +virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, + const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id, + struct rte_mempool *session_pool __rte_unused) +{ + int ret; + struct virtqueue *vq; + + PMD_INIT_FUNC_TRACE(); + + /* if virtio dev is started, do not touch the virtqueues */ + if (dev->data->dev_started) + return 0; + + ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id, + qp_conf->nb_descriptors, socket_id, &vq); + if (ret < 0) { + VIRTIO_CRYPTO_INIT_LOG_ERR( + "virtio crypto data queue initialization failed\n"); + return ret; + } + + dev->data->queue_pairs[queue_pair_id] = vq; + + return 0; +} + +static int +virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) +{ + struct virtqueue *vq + = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id]; + + PMD_INIT_FUNC_TRACE(); + + if (vq == NULL) { + VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed"); + return 0; + } + + virtio_crypto_queue_release(vq); + return 0; +} + +static int +virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features) +{ + uint64_t host_features; + + PMD_INIT_FUNC_TRACE(); + + /* Prepare guest_features: feature that driver wants to support */ + VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64, + req_features); + + /* Read device(host) feature bits */ + host_features = VTPCI_OPS(hw)->get_features(hw); + VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64, + host_features); + + /* + * Negotiate features: Subset of device feature bits are written back + * guest feature bits. + */ + hw->guest_features = req_features; + hw->guest_features = vtpci_cryptodev_negotiate_features(hw, + host_features); + VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64, + hw->guest_features); + + if (hw->modern) { + if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) { + VIRTIO_CRYPTO_INIT_LOG_ERR( + "VIRTIO_F_VERSION_1 features is not enabled."); + return -1; + } + vtpci_cryptodev_set_status(hw, + VIRTIO_CONFIG_STATUS_FEATURES_OK); + if (!(vtpci_cryptodev_get_status(hw) & + VIRTIO_CONFIG_STATUS_FEATURES_OK)) { + VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK " + "status!"); + return -1; + } + } + + hw->req_guest_features = req_features; + + return 0; +} + +/* reset device and renegotiate features if needed */ +static int +virtio_crypto_init_device(struct rte_cryptodev *cryptodev, + uint64_t req_features) +{ + struct virtio_crypto_hw *hw = cryptodev->data->dev_private; + struct virtio_crypto_config local_config; + struct virtio_crypto_config *config = &local_config; + + PMD_INIT_FUNC_TRACE(); + + /* Reset the device although not necessary at startup */ + vtpci_cryptodev_reset(hw); + + /* Tell the host we've noticed this device. */ + vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK); + + /* Tell the host we've known how to drive the device. */ + vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER); + if (virtio_negotiate_features(hw, req_features) < 0) + return -1; + + /* Get status of the device */ + vtpci_read_cryptodev_config(hw, + offsetof(struct virtio_crypto_config, status), + &config->status, sizeof(config->status)); + if (config->status != VIRTIO_CRYPTO_S_HW_READY) { + VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is " + "not ready"); + return -1; + } + + /* Get number of data queues */ + vtpci_read_cryptodev_config(hw, + offsetof(struct virtio_crypto_config, max_dataqueues), + &config->max_dataqueues, + sizeof(config->max_dataqueues)); + hw->max_dataqueues = config->max_dataqueues; + + VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d", + hw->max_dataqueues); + + return 0; +} + +/* + * This function is based on probe() function + * It returns 0 on success. + */ +static int +crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev, + struct rte_cryptodev_pmd_init_params *init_params) +{ + struct rte_cryptodev *cryptodev; + struct virtio_crypto_hw *hw; + + PMD_INIT_FUNC_TRACE(); + + cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device, + init_params); + if (cryptodev == NULL) + return -ENODEV; + + cryptodev->driver_id = cryptodev_virtio_driver_id; + cryptodev->dev_ops = &virtio_crypto_dev_ops; + + cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst; + cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst; + + cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING; + + hw = cryptodev->data->dev_private; + hw->dev_id = cryptodev->data->dev_id; + hw->virtio_dev_capabilities = virtio_capabilities; + + VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x", + cryptodev->data->dev_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + + /* pci device init */ + if (vtpci_cryptodev_init(pci_dev, hw)) + return -1; + + if (virtio_crypto_init_device(cryptodev, + VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0) + return -1; + + return 0; +} + +static int +virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev) +{ + struct virtio_crypto_hw *hw = cryptodev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) + return -EPERM; + + if (cryptodev->data->dev_started) { + virtio_crypto_dev_stop(cryptodev); + virtio_crypto_dev_close(cryptodev); + } + + cryptodev->dev_ops = NULL; + cryptodev->enqueue_burst = NULL; + cryptodev->dequeue_burst = NULL; + + /* release control queue */ + virtio_crypto_queue_release(hw->cvq); + + rte_free(cryptodev->data); + cryptodev->data = NULL; + + VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed"); + + return 0; +} + +static int +virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev, + struct rte_cryptodev_config *config __rte_unused) +{ + struct virtio_crypto_hw *hw = cryptodev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + if (virtio_crypto_init_device(cryptodev, + VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0) + return -1; + + /* setup control queue + * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues + * config->max_dataqueues is the control queue + */ + if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) { + VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error"); + return -1; + } + virtio_crypto_ctrlq_start(cryptodev); + + return 0; +} + +static void +virtio_crypto_dev_stop(struct rte_cryptodev *dev) +{ + struct virtio_crypto_hw *hw = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop"); + + vtpci_cryptodev_reset(hw); + + virtio_crypto_dev_free_mbufs(dev); + virtio_crypto_free_queues(dev); + + dev->data->dev_started = 0; +} + +static int +virtio_crypto_dev_start(struct rte_cryptodev *dev) +{ + struct virtio_crypto_hw *hw = dev->data->dev_private; + + if (dev->data->dev_started) + return 0; + + /* Do final configuration before queue engine starts */ + virtio_crypto_dataq_start(dev); + vtpci_cryptodev_reinit_complete(hw); + + dev->data->dev_started = 1; + + return 0; +} + +static void +virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev) +{ + uint32_t i; + struct virtio_crypto_hw *hw = dev->data->dev_private; + + for (i = 0; i < hw->max_dataqueues; i++) { + VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used " + "and unused buf", i); + VIRTQUEUE_DUMP((struct virtqueue *) + dev->data->queue_pairs[i]); + + VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p", + i, dev->data->queue_pairs[i]); + + virtqueue_detatch_unused(dev->data->queue_pairs[i]); + + VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and " + "unused buf", i); + VIRTQUEUE_DUMP( + (struct virtqueue *)dev->data->queue_pairs[i]); + } +} + +static unsigned int +virtio_crypto_sym_get_session_private_size( + struct rte_cryptodev *dev __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); + + return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16); +} + +static int +virtio_crypto_check_sym_session_paras( + struct rte_cryptodev *dev) +{ + struct virtio_crypto_hw *hw; + + PMD_INIT_FUNC_TRACE(); + + if (unlikely(dev == NULL)) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL"); + return -1; + } + if (unlikely(dev->data == NULL)) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL"); + return -1; + } + hw = dev->data->dev_private; + if (unlikely(hw == NULL)) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL"); + return -1; + } + if (unlikely(hw->cvq == NULL)) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL"); + return -1; + } + + return 0; +} + +static int +virtio_crypto_check_sym_clear_session_paras( + struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *sess) +{ + PMD_INIT_FUNC_TRACE(); + + if (sess == NULL) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL"); + return -1; + } + + return virtio_crypto_check_sym_session_paras(dev); +} + +#define NUM_ENTRY_SYM_CLEAR_SESSION 2 + +static void +virtio_crypto_sym_clear_session( + struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *sess) +{ + struct virtio_crypto_hw *hw; + struct virtqueue *vq; + struct virtio_crypto_session *session; + struct virtio_crypto_op_ctrl_req *ctrl; + struct vring_desc *desc; + uint8_t *status; + uint8_t needed = 1; + uint32_t head; + uint8_t *malloc_virt_addr; + uint64_t malloc_phys_addr; + uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr); + uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req); + uint32_t desc_offset = len_op_ctrl_req + len_inhdr; + + PMD_INIT_FUNC_TRACE(); + + if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0) + return; + + hw = dev->data->dev_private; + vq = hw->cvq; + session = (struct virtio_crypto_session *)get_sym_session_private_data( + sess, cryptodev_virtio_driver_id); + if (session == NULL) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter"); + return; + } + + VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, " + "vq = %p", vq->vq_desc_head_idx, vq); + + if (vq->vq_free_cnt < needed) { + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "vq->vq_free_cnt = %d is less than %d, " + "not enough", vq->vq_free_cnt, needed); + return; + } + + /* + * malloc memory to store information of ctrl request op, + * returned status and desc vring + */ + malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr + + NUM_ENTRY_SYM_CLEAR_SESSION + * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE); + if (malloc_virt_addr == NULL) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room"); + return; + } + malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr); + + /* assign ctrl request op part */ + ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr; + ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION; + /* default data virtqueue is 0 */ + ctrl->header.queue_id = 0; + ctrl->u.destroy_session.session_id = session->session_id; + + /* status part */ + status = &(((struct virtio_crypto_inhdr *) + ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status); + *status = VIRTIO_CRYPTO_ERR; + + /* indirect desc vring part */ + desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr + + desc_offset); + + /* ctrl request part */ + desc[0].addr = malloc_phys_addr; + desc[0].len = len_op_ctrl_req; + desc[0].flags = VRING_DESC_F_NEXT; + desc[0].next = 1; + + /* status part */ + desc[1].addr = malloc_phys_addr + len_op_ctrl_req; + desc[1].len = len_inhdr; + desc[1].flags = VRING_DESC_F_WRITE; + + /* use only a single desc entry */ + head = vq->vq_desc_head_idx; + vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT; + vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset; + vq->vq_ring.desc[head].len + = NUM_ENTRY_SYM_CLEAR_SESSION + * sizeof(struct vring_desc); + vq->vq_free_cnt -= needed; + + vq->vq_desc_head_idx = vq->vq_ring.desc[head].next; + + vq_update_avail_ring(vq, head); + vq_update_avail_idx(vq); + + VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d", + vq->vq_queue_index); + + virtqueue_notify(vq); + + rte_rmb(); + while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) { + rte_rmb(); + usleep(100); + } + + while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) { + uint32_t idx, desc_idx, used_idx; + struct vring_used_elem *uep; + + used_idx = (uint32_t)(vq->vq_used_cons_idx + & (vq->vq_nentries - 1)); + uep = &vq->vq_ring.used->ring[used_idx]; + idx = (uint32_t) uep->id; + desc_idx = idx; + while (vq->vq_ring.desc[desc_idx].flags + & VRING_DESC_F_NEXT) { + desc_idx = vq->vq_ring.desc[desc_idx].next; + vq->vq_free_cnt++; + } + + vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx; + vq->vq_desc_head_idx = idx; + vq->vq_used_cons_idx++; + vq->vq_free_cnt++; + } + + if (*status != VIRTIO_CRYPTO_OK) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed " + "status=%"PRIu32", session_id=%"PRIu64"", + *status, session->session_id); + rte_free(malloc_virt_addr); + return; + } + + VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n" + "vq->vq_desc_head_idx=%d", + vq->vq_free_cnt, vq->vq_desc_head_idx); + + VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ", + session->session_id); + + memset(session, 0, sizeof(struct virtio_crypto_session)); + struct rte_mempool *sess_mp = rte_mempool_from_obj(session); + set_sym_session_private_data(sess, cryptodev_virtio_driver_id, NULL); + rte_mempool_put(sess_mp, session); + rte_free(malloc_virt_addr); +} + +static struct rte_crypto_cipher_xform * +virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform) +{ + do { + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) + return &xform->cipher; + + xform = xform->next; + } while (xform); + + return NULL; +} + +static struct rte_crypto_auth_xform * +virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform) +{ + do { + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) + return &xform->auth; + + xform = xform->next; + } while (xform); + + return NULL; +} + +/** Get xform chain order */ +static int +virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform) +{ + if (xform == NULL) + return -1; + + /* Cipher Only */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && + xform->next == NULL) + return VIRTIO_CRYPTO_CMD_CIPHER; + + /* Authentication Only */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && + xform->next == NULL) + return VIRTIO_CRYPTO_CMD_AUTH; + + /* Authenticate then Cipher */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && + xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) + return VIRTIO_CRYPTO_CMD_HASH_CIPHER; + + /* Cipher then Authenticate */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && + xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) + return VIRTIO_CRYPTO_CMD_CIPHER_HASH; + + return -1; +} + +static int +virtio_crypto_sym_pad_cipher_param( + struct virtio_crypto_cipher_session_para *para, + struct rte_crypto_cipher_xform *cipher_xform) +{ + switch (cipher_xform->algo) { + case RTE_CRYPTO_CIPHER_AES_CBC: + para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC; + break; + default: + VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported " + "Cipher alg %u", cipher_xform->algo); + return -1; + } + + para->keylen = cipher_xform->key.length; + switch (cipher_xform->op) { + case RTE_CRYPTO_CIPHER_OP_ENCRYPT: + para->op = VIRTIO_CRYPTO_OP_ENCRYPT; + break; + case RTE_CRYPTO_CIPHER_OP_DECRYPT: + para->op = VIRTIO_CRYPTO_OP_DECRYPT; + break; + default: + VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation " + "parameter"); + return -1; + } + + return 0; +} + +static int +virtio_crypto_sym_pad_auth_param( + struct virtio_crypto_op_ctrl_req *ctrl, + struct rte_crypto_auth_xform *auth_xform) +{ + uint32_t *algo; + struct virtio_crypto_alg_chain_session_para *para = + &(ctrl->u.sym_create_session.u.chain.para); + + switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) { + case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN: + algo = &(para->u.hash_param.algo); + break; + case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH: + algo = &(para->u.mac_param.algo); + break; + default: + VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u " + "specified", + ctrl->u.sym_create_session.u.chain.para.hash_mode); + return -1; + } + + switch (auth_xform->algo) { + case RTE_CRYPTO_AUTH_SHA1_HMAC: + *algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1; + break; + default: + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "Crypto: Undefined Hash algo %u specified", + auth_xform->algo); + return -1; + } + + return 0; +} + +static int +virtio_crypto_sym_pad_op_ctrl_req( + struct virtio_crypto_op_ctrl_req *ctrl, + struct rte_crypto_sym_xform *xform, bool is_chainned, + uint8_t **cipher_key_data, uint8_t **auth_key_data, + struct virtio_crypto_session *session) +{ + int ret; + struct rte_crypto_auth_xform *auth_xform = NULL; + struct rte_crypto_cipher_xform *cipher_xform = NULL; + + /* Get cipher xform from crypto xform chain */ + cipher_xform = virtio_crypto_get_cipher_xform(xform); + if (cipher_xform) { + if (cipher_xform->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE) { + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "cipher IV size cannot be longer than %u", + VIRTIO_CRYPTO_MAX_IV_SIZE); + return -1; + } + if (is_chainned) + ret = virtio_crypto_sym_pad_cipher_param( + &ctrl->u.sym_create_session.u.chain.para + .cipher_param, cipher_xform); + else + ret = virtio_crypto_sym_pad_cipher_param( + &ctrl->u.sym_create_session.u.cipher.para, + cipher_xform); + + if (ret < 0) { + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "pad cipher parameter failed"); + return -1; + } + + *cipher_key_data = cipher_xform->key.data; + + session->iv.offset = cipher_xform->iv.offset; + session->iv.length = cipher_xform->iv.length; + } + + /* Get auth xform from crypto xform chain */ + auth_xform = virtio_crypto_get_auth_xform(xform); + if (auth_xform) { + /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */ + struct virtio_crypto_alg_chain_session_para *para = + &(ctrl->u.sym_create_session.u.chain.para); + if (auth_xform->key.length) { + para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH; + para->u.mac_param.auth_key_len = + (uint32_t)auth_xform->key.length; + para->u.mac_param.hash_result_len = + auth_xform->digest_length; + + *auth_key_data = auth_xform->key.data; + } else { + para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN; + para->u.hash_param.hash_result_len = + auth_xform->digest_length; + } + + ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform); + if (ret < 0) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter " + "failed"); + return -1; + } + } + + return 0; +} + +static int +virtio_crypto_check_sym_configure_session_paras( + struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sym_sess, + struct rte_mempool *mempool) +{ + if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) || + unlikely(mempool == NULL)) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer"); + return -1; + } + + if (virtio_crypto_check_sym_session_paras(dev) < 0) + return -1; + + return 0; +} + +static int +virtio_crypto_sym_configure_session( + struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sess, + struct rte_mempool *mempool) +{ + int ret; + struct virtio_crypto_session crypto_sess; + void *session_private = &crypto_sess; + struct virtio_crypto_session *session; + struct virtio_crypto_op_ctrl_req *ctrl_req; + enum virtio_crypto_cmd_id cmd_id; + uint8_t *cipher_key_data = NULL; + uint8_t *auth_key_data = NULL; + struct virtio_crypto_hw *hw; + struct virtqueue *control_vq; + + PMD_INIT_FUNC_TRACE(); + + ret = virtio_crypto_check_sym_configure_session_paras(dev, xform, + sess, mempool); + if (ret < 0) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters"); + return ret; + } + + if (rte_mempool_get(mempool, &session_private)) { + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "Couldn't get object from session mempool"); + return -ENOMEM; + } + + session = (struct virtio_crypto_session *)session_private; + memset(session, 0, sizeof(struct virtio_crypto_session)); + ctrl_req = &session->ctrl; + ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION; + /* FIXME: support multiqueue */ + ctrl_req->header.queue_id = 0; + + hw = dev->data->dev_private; + control_vq = hw->cvq; + + cmd_id = virtio_crypto_get_chain_order(xform); + if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH) + ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order + = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH; + if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER) + ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order + = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER; + + switch (cmd_id) { + case VIRTIO_CRYPTO_CMD_CIPHER_HASH: + case VIRTIO_CRYPTO_CMD_HASH_CIPHER: + ctrl_req->u.sym_create_session.op_type + = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING; + + ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, + xform, true, &cipher_key_data, &auth_key_data, session); + if (ret < 0) { + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "padding sym op ctrl req failed"); + goto error_out; + } + ret = virtio_crypto_send_command(control_vq, ctrl_req, + cipher_key_data, auth_key_data, session); + if (ret < 0) { + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "create session failed: %d", ret); + goto error_out; + } + break; + case VIRTIO_CRYPTO_CMD_CIPHER: + ctrl_req->u.sym_create_session.op_type + = VIRTIO_CRYPTO_SYM_OP_CIPHER; + ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform, + false, &cipher_key_data, &auth_key_data, session); + if (ret < 0) { + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "padding sym op ctrl req failed"); + goto error_out; + } + ret = virtio_crypto_send_command(control_vq, ctrl_req, + cipher_key_data, NULL, session); + if (ret < 0) { + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "create session failed: %d", ret); + goto error_out; + } + break; + default: + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "Unsupported operation chain order parameter"); + goto error_out; + } + + set_sym_session_private_data(sess, dev->driver_id, + session_private); + + return 0; + +error_out: + return -1; +} + +static void +virtio_crypto_dev_info_get(struct rte_cryptodev *dev, + struct rte_cryptodev_info *info) +{ + struct virtio_crypto_hw *hw = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + if (info != NULL) { + info->driver_id = cryptodev_virtio_driver_id; + info->feature_flags = dev->feature_flags; + info->max_nb_queue_pairs = hw->max_dataqueues; + /* No limit of number of sessions */ + info->sym.max_nb_sessions = 0; + info->capabilities = hw->virtio_dev_capabilities; + } +} + +static int +crypto_virtio_pci_probe( + struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + struct rte_cryptodev_pmd_init_params init_params = { + .name = "", + .socket_id = rte_socket_id(), + .private_data_size = sizeof(struct virtio_crypto_hw) + }; + char name[RTE_CRYPTODEV_NAME_MAX_LEN]; + + VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x", + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); + + rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); + + return crypto_virtio_create(name, pci_dev, &init_params); +} + +static int +crypto_virtio_pci_remove( + struct rte_pci_device *pci_dev __rte_unused) +{ + struct rte_cryptodev *cryptodev; + char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; + + if (pci_dev == NULL) + return -EINVAL; + + rte_pci_device_name(&pci_dev->addr, cryptodev_name, + sizeof(cryptodev_name)); + + cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name); + if (cryptodev == NULL) + return -ENODEV; + + return virtio_crypto_dev_uninit(cryptodev); +} + +static struct rte_pci_driver rte_virtio_crypto_driver = { + .id_table = pci_id_virtio_crypto_map, + .drv_flags = 0, + .probe = crypto_virtio_pci_probe, + .remove = crypto_virtio_pci_remove +}; + +static struct cryptodev_driver virtio_crypto_drv; + +RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver); +RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv, + rte_virtio_crypto_driver.driver, + cryptodev_virtio_driver_id); + +RTE_INIT(virtio_crypto_init_log) +{ + virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init"); + if (virtio_crypto_logtype_init >= 0) + rte_log_set_level(virtio_crypto_logtype_init, RTE_LOG_NOTICE); + + virtio_crypto_logtype_session = + rte_log_register("pmd.crypto.virtio.session"); + if (virtio_crypto_logtype_session >= 0) + rte_log_set_level(virtio_crypto_logtype_session, + RTE_LOG_NOTICE); + + virtio_crypto_logtype_rx = rte_log_register("pmd.crypto.virtio.rx"); + if (virtio_crypto_logtype_rx >= 0) + rte_log_set_level(virtio_crypto_logtype_rx, RTE_LOG_NOTICE); + + virtio_crypto_logtype_tx = rte_log_register("pmd.crypto.virtio.tx"); + if (virtio_crypto_logtype_tx >= 0) + rte_log_set_level(virtio_crypto_logtype_tx, RTE_LOG_NOTICE); + + virtio_crypto_logtype_driver = + rte_log_register("pmd.crypto.virtio.driver"); + if (virtio_crypto_logtype_driver >= 0) + rte_log_set_level(virtio_crypto_logtype_driver, RTE_LOG_NOTICE); +} diff --git a/drivers/crypto/virtio/virtio_cryptodev.h b/drivers/crypto/virtio/virtio_cryptodev.h new file mode 100644 index 00000000..0fd7b722 --- /dev/null +++ b/drivers/crypto/virtio/virtio_cryptodev.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ + +#ifndef _VIRTIO_CRYPTODEV_H_ +#define _VIRTIO_CRYPTODEV_H_ + +#include "virtio_crypto.h" +#include "virtio_pci.h" +#include "virtio_ring.h" + +/* Features desired/implemented by this driver. */ +#define VIRTIO_CRYPTO_PMD_GUEST_FEATURES (1ULL << VIRTIO_F_VERSION_1) + +#define CRYPTODEV_NAME_VIRTIO_PMD crypto_virtio + +#define NUM_ENTRY_VIRTIO_CRYPTO_OP 7 + +#define VIRTIO_CRYPTO_MAX_IV_SIZE 16 + +extern uint8_t cryptodev_virtio_driver_id; + +enum virtio_crypto_cmd_id { + VIRTIO_CRYPTO_CMD_CIPHER = 0, + VIRTIO_CRYPTO_CMD_AUTH = 1, + VIRTIO_CRYPTO_CMD_CIPHER_HASH = 2, + VIRTIO_CRYPTO_CMD_HASH_CIPHER = 3 +}; + +struct virtio_crypto_op_cookie { + struct virtio_crypto_op_data_req data_req; + struct virtio_crypto_inhdr inhdr; + struct vring_desc desc[NUM_ENTRY_VIRTIO_CRYPTO_OP]; + uint8_t iv[VIRTIO_CRYPTO_MAX_IV_SIZE]; +}; + +/* + * Control queue function prototype + */ +void virtio_crypto_ctrlq_start(struct rte_cryptodev *dev); + +/* + * Data queue function prototype + */ +void virtio_crypto_dataq_start(struct rte_cryptodev *dev); + +int virtio_crypto_queue_setup(struct rte_cryptodev *dev, + int queue_type, + uint16_t vtpci_queue_idx, + uint16_t nb_desc, + int socket_id, + struct virtqueue **pvq); + +void virtio_crypto_queue_release(struct virtqueue *vq); + +uint16_t virtio_crypto_pkt_tx_burst(void *tx_queue, + struct rte_crypto_op **tx_pkts, + uint16_t nb_pkts); + +uint16_t virtio_crypto_pkt_rx_burst(void *tx_queue, + struct rte_crypto_op **tx_pkts, + uint16_t nb_pkts); + +#endif /* _VIRTIO_CRYPTODEV_H_ */ diff --git a/drivers/crypto/virtio/virtio_logs.h b/drivers/crypto/virtio/virtio_logs.h new file mode 100644 index 00000000..26a286cf --- /dev/null +++ b/drivers/crypto/virtio/virtio_logs.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ + +#ifndef _VIRTIO_LOGS_H_ +#define _VIRTIO_LOGS_H_ + +#include + +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \ + "PMD: %s(): " fmt "\n", __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") + +extern int virtio_crypto_logtype_init; + +#define VIRTIO_CRYPTO_INIT_LOG_IMPL(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_init, \ + "INIT: %s(): " fmt "\n", __func__, ##args) + +#define VIRTIO_CRYPTO_INIT_LOG_INFO(fmt, args...) \ + VIRTIO_CRYPTO_INIT_LOG_IMPL(INFO, fmt, ## args) + +#define VIRTIO_CRYPTO_INIT_LOG_DBG(fmt, args...) \ + VIRTIO_CRYPTO_INIT_LOG_IMPL(DEBUG, fmt, ## args) + +#define VIRTIO_CRYPTO_INIT_LOG_ERR(fmt, args...) \ + VIRTIO_CRYPTO_INIT_LOG_IMPL(ERR, fmt, ## args) + +extern int virtio_crypto_logtype_session; + +#define VIRTIO_CRYPTO_SESSION_LOG_IMPL(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_session, \ + "SESSION: %s(): " fmt "\n", __func__, ##args) + +#define VIRTIO_CRYPTO_SESSION_LOG_INFO(fmt, args...) \ + VIRTIO_CRYPTO_SESSION_LOG_IMPL(INFO, fmt, ## args) + +#define VIRTIO_CRYPTO_SESSION_LOG_DBG(fmt, args...) \ + VIRTIO_CRYPTO_SESSION_LOG_IMPL(DEBUG, fmt, ## args) + +#define VIRTIO_CRYPTO_SESSION_LOG_ERR(fmt, args...) \ + VIRTIO_CRYPTO_SESSION_LOG_IMPL(ERR, fmt, ## args) + +extern int virtio_crypto_logtype_rx; + +#define VIRTIO_CRYPTO_RX_LOG_IMPL(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_rx, \ + "RX: %s(): " fmt "\n", __func__, ##args) + +#define VIRTIO_CRYPTO_RX_LOG_INFO(fmt, args...) \ + VIRTIO_CRYPTO_RX_LOG_IMPL(INFO, fmt, ## args) + +#define VIRTIO_CRYPTO_RX_LOG_DBG(fmt, args...) \ + VIRTIO_CRYPTO_RX_LOG_IMPL(DEBUG, fmt, ## args) + +#define VIRTIO_CRYPTO_RX_LOG_ERR(fmt, args...) \ + VIRTIO_CRYPTO_RX_LOG_IMPL(ERR, fmt, ## args) + +extern int virtio_crypto_logtype_tx; + +#define VIRTIO_CRYPTO_TX_LOG_IMPL(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_tx, \ + "TX: %s(): " fmt "\n", __func__, ##args) + +#define VIRTIO_CRYPTO_TX_LOG_INFO(fmt, args...) \ + VIRTIO_CRYPTO_TX_LOG_IMPL(INFO, fmt, ## args) + +#define VIRTIO_CRYPTO_TX_LOG_DBG(fmt, args...) \ + VIRTIO_CRYPTO_TX_LOG_IMPL(DEBUG, fmt, ## args) + +#define VIRTIO_CRYPTO_TX_LOG_ERR(fmt, args...) \ + VIRTIO_CRYPTO_TX_LOG_IMPL(ERR, fmt, ## args) + +extern int virtio_crypto_logtype_driver; + +#define VIRTIO_CRYPTO_DRV_LOG_IMPL(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_driver, \ + "DRIVER: %s(): " fmt "\n", __func__, ##args) + +#define VIRTIO_CRYPTO_DRV_LOG_INFO(fmt, args...) \ + VIRTIO_CRYPTO_DRV_LOG_IMPL(INFO, fmt, ## args) + +#define VIRTIO_CRYPTO_DRV_LOG_DBG(fmt, args...) \ + VIRTIO_CRYPTO_DRV_LOG_IMPL(DEBUG, fmt, ## args) + +#define VIRTIO_CRYPTO_DRV_LOG_ERR(fmt, args...) \ + VIRTIO_CRYPTO_DRV_LOG_IMPL(ERR, fmt, ## args) + +#endif /* _VIRTIO_LOGS_H_ */ diff --git a/drivers/crypto/virtio/virtio_pci.c b/drivers/crypto/virtio/virtio_pci.c new file mode 100644 index 00000000..832c465b --- /dev/null +++ b/drivers/crypto/virtio/virtio_pci.c @@ -0,0 +1,462 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ + +#include + +#ifdef RTE_EXEC_ENV_LINUXAPP + #include + #include +#endif + +#include +#include + +#include "virtio_pci.h" +#include "virtqueue.h" + +/* + * Following macros are derived from linux/pci_regs.h, however, + * we can't simply include that header here, as there is no such + * file for non-Linux platform. + */ +#define PCI_CAPABILITY_LIST 0x34 +#define PCI_CAP_ID_VNDR 0x09 +#define PCI_CAP_ID_MSIX 0x11 + +/* + * The remaining space is defined by each driver as the per-driver + * configuration space. + */ +#define VIRTIO_PCI_CONFIG(hw) \ + (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20) + +struct virtio_hw_internal virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO]; + +static inline int +check_vq_phys_addr_ok(struct virtqueue *vq) +{ + /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, + * and only accepts 32 bit page frame number. + * Check if the allocated physical memory exceeds 16TB. + */ + if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> + (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { + VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be above 16TB!"); + return 0; + } + + return 1; +} + +static inline void +io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi) +{ + rte_write32(val & ((1ULL << 32) - 1), lo); + rte_write32(val >> 32, hi); +} + +static void +modern_read_dev_config(struct virtio_crypto_hw *hw, size_t offset, + void *dst, int length) +{ + int i; + uint8_t *p; + uint8_t old_gen, new_gen; + + do { + old_gen = rte_read8(&hw->common_cfg->config_generation); + + p = dst; + for (i = 0; i < length; i++) + *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i); + + new_gen = rte_read8(&hw->common_cfg->config_generation); + } while (old_gen != new_gen); +} + +static void +modern_write_dev_config(struct virtio_crypto_hw *hw, size_t offset, + const void *src, int length) +{ + int i; + const uint8_t *p = src; + + for (i = 0; i < length; i++) + rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i)); +} + +static uint64_t +modern_get_features(struct virtio_crypto_hw *hw) +{ + uint32_t features_lo, features_hi; + + rte_write32(0, &hw->common_cfg->device_feature_select); + features_lo = rte_read32(&hw->common_cfg->device_feature); + + rte_write32(1, &hw->common_cfg->device_feature_select); + features_hi = rte_read32(&hw->common_cfg->device_feature); + + return ((uint64_t)features_hi << 32) | features_lo; +} + +static void +modern_set_features(struct virtio_crypto_hw *hw, uint64_t features) +{ + rte_write32(0, &hw->common_cfg->guest_feature_select); + rte_write32(features & ((1ULL << 32) - 1), + &hw->common_cfg->guest_feature); + + rte_write32(1, &hw->common_cfg->guest_feature_select); + rte_write32(features >> 32, + &hw->common_cfg->guest_feature); +} + +static uint8_t +modern_get_status(struct virtio_crypto_hw *hw) +{ + return rte_read8(&hw->common_cfg->device_status); +} + +static void +modern_set_status(struct virtio_crypto_hw *hw, uint8_t status) +{ + rte_write8(status, &hw->common_cfg->device_status); +} + +static void +modern_reset(struct virtio_crypto_hw *hw) +{ + modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET); + modern_get_status(hw); +} + +static uint8_t +modern_get_isr(struct virtio_crypto_hw *hw) +{ + return rte_read8(hw->isr); +} + +static uint16_t +modern_set_config_irq(struct virtio_crypto_hw *hw, uint16_t vec) +{ + rte_write16(vec, &hw->common_cfg->msix_config); + return rte_read16(&hw->common_cfg->msix_config); +} + +static uint16_t +modern_set_queue_irq(struct virtio_crypto_hw *hw, struct virtqueue *vq, + uint16_t vec) +{ + rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); + rte_write16(vec, &hw->common_cfg->queue_msix_vector); + return rte_read16(&hw->common_cfg->queue_msix_vector); +} + +static uint16_t +modern_get_queue_num(struct virtio_crypto_hw *hw, uint16_t queue_id) +{ + rte_write16(queue_id, &hw->common_cfg->queue_select); + return rte_read16(&hw->common_cfg->queue_size); +} + +static int +modern_setup_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq) +{ + uint64_t desc_addr, avail_addr, used_addr; + uint16_t notify_off; + + if (!check_vq_phys_addr_ok(vq)) + return -1; + + desc_addr = vq->vq_ring_mem; + avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); + used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, + ring[vq->vq_nentries]), + VIRTIO_PCI_VRING_ALIGN); + + rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); + + io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo, + &hw->common_cfg->queue_desc_hi); + io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo, + &hw->common_cfg->queue_avail_hi); + io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo, + &hw->common_cfg->queue_used_hi); + + notify_off = rte_read16(&hw->common_cfg->queue_notify_off); + vq->notify_addr = (void *)((uint8_t *)hw->notify_base + + notify_off * hw->notify_off_multiplier); + + rte_write16(1, &hw->common_cfg->queue_enable); + + VIRTIO_CRYPTO_INIT_LOG_DBG("queue %u addresses:", vq->vq_queue_index); + VIRTIO_CRYPTO_INIT_LOG_DBG("\t desc_addr: %" PRIx64, desc_addr); + VIRTIO_CRYPTO_INIT_LOG_DBG("\t aval_addr: %" PRIx64, avail_addr); + VIRTIO_CRYPTO_INIT_LOG_DBG("\t used_addr: %" PRIx64, used_addr); + VIRTIO_CRYPTO_INIT_LOG_DBG("\t notify addr: %p (notify offset: %u)", + vq->notify_addr, notify_off); + + return 0; +} + +static void +modern_del_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq) +{ + rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); + + io_write64_twopart(0, &hw->common_cfg->queue_desc_lo, + &hw->common_cfg->queue_desc_hi); + io_write64_twopart(0, &hw->common_cfg->queue_avail_lo, + &hw->common_cfg->queue_avail_hi); + io_write64_twopart(0, &hw->common_cfg->queue_used_lo, + &hw->common_cfg->queue_used_hi); + + rte_write16(0, &hw->common_cfg->queue_enable); +} + +static void +modern_notify_queue(struct virtio_crypto_hw *hw __rte_unused, + struct virtqueue *vq) +{ + rte_write16(vq->vq_queue_index, vq->notify_addr); +} + +const struct virtio_pci_ops virtio_crypto_modern_ops = { + .read_dev_cfg = modern_read_dev_config, + .write_dev_cfg = modern_write_dev_config, + .reset = modern_reset, + .get_status = modern_get_status, + .set_status = modern_set_status, + .get_features = modern_get_features, + .set_features = modern_set_features, + .get_isr = modern_get_isr, + .set_config_irq = modern_set_config_irq, + .set_queue_irq = modern_set_queue_irq, + .get_queue_num = modern_get_queue_num, + .setup_queue = modern_setup_queue, + .del_queue = modern_del_queue, + .notify_queue = modern_notify_queue, +}; + +void +vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset, + void *dst, int length) +{ + VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length); +} + +void +vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset, + const void *src, int length) +{ + VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length); +} + +uint64_t +vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw, + uint64_t host_features) +{ + uint64_t features; + + /* + * Limit negotiated features to what the driver, virtqueue, and + * host all support. + */ + features = host_features & hw->guest_features; + VTPCI_OPS(hw)->set_features(hw, features); + + return features; +} + +void +vtpci_cryptodev_reset(struct virtio_crypto_hw *hw) +{ + VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET); + /* flush status write */ + VTPCI_OPS(hw)->get_status(hw); +} + +void +vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw) +{ + vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK); +} + +void +vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status) +{ + if (status != VIRTIO_CONFIG_STATUS_RESET) + status |= VTPCI_OPS(hw)->get_status(hw); + + VTPCI_OPS(hw)->set_status(hw, status); +} + +uint8_t +vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw) +{ + return VTPCI_OPS(hw)->get_status(hw); +} + +uint8_t +vtpci_cryptodev_isr(struct virtio_crypto_hw *hw) +{ + return VTPCI_OPS(hw)->get_isr(hw); +} + +static void * +get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap) +{ + uint8_t bar = cap->bar; + uint32_t length = cap->length; + uint32_t offset = cap->offset; + uint8_t *base; + + if (bar >= PCI_MAX_RESOURCE) { + VIRTIO_CRYPTO_INIT_LOG_ERR("invalid bar: %u", bar); + return NULL; + } + + if (offset + length < offset) { + VIRTIO_CRYPTO_INIT_LOG_ERR("offset(%u) + length(%u) overflows", + offset, length); + return NULL; + } + + if (offset + length > dev->mem_resource[bar].len) { + VIRTIO_CRYPTO_INIT_LOG_ERR( + "invalid cap: overflows bar space: %u > %" PRIu64, + offset + length, dev->mem_resource[bar].len); + return NULL; + } + + base = dev->mem_resource[bar].addr; + if (base == NULL) { + VIRTIO_CRYPTO_INIT_LOG_ERR("bar %u base addr is NULL", bar); + return NULL; + } + + return base + offset; +} + +#define PCI_MSIX_ENABLE 0x8000 + +static int +virtio_read_caps(struct rte_pci_device *dev, struct virtio_crypto_hw *hw) +{ + uint8_t pos; + struct virtio_pci_cap cap; + int ret; + + if (rte_pci_map_device(dev)) { + VIRTIO_CRYPTO_INIT_LOG_DBG("failed to map pci device!"); + return -1; + } + + ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST); + if (ret < 0) { + VIRTIO_CRYPTO_INIT_LOG_DBG("failed to read pci capability list"); + return -1; + } + + while (pos) { + ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos); + if (ret < 0) { + VIRTIO_CRYPTO_INIT_LOG_ERR( + "failed to read pci cap at pos: %x", pos); + break; + } + + if (cap.cap_vndr == PCI_CAP_ID_MSIX) { + /* Transitional devices would also have this capability, + * that's why we also check if msix is enabled. + * 1st byte is cap ID; 2nd byte is the position of next + * cap; next two bytes are the flags. + */ + uint16_t flags = ((uint16_t *)&cap)[1]; + + if (flags & PCI_MSIX_ENABLE) + hw->use_msix = VIRTIO_MSIX_ENABLED; + else + hw->use_msix = VIRTIO_MSIX_DISABLED; + } + + if (cap.cap_vndr != PCI_CAP_ID_VNDR) { + VIRTIO_CRYPTO_INIT_LOG_DBG( + "[%2x] skipping non VNDR cap id: %02x", + pos, cap.cap_vndr); + goto next; + } + + VIRTIO_CRYPTO_INIT_LOG_DBG( + "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u", + pos, cap.cfg_type, cap.bar, cap.offset, cap.length); + + switch (cap.cfg_type) { + case VIRTIO_PCI_CAP_COMMON_CFG: + hw->common_cfg = get_cfg_addr(dev, &cap); + break; + case VIRTIO_PCI_CAP_NOTIFY_CFG: + rte_pci_read_config(dev, &hw->notify_off_multiplier, + 4, pos + sizeof(cap)); + hw->notify_base = get_cfg_addr(dev, &cap); + break; + case VIRTIO_PCI_CAP_DEVICE_CFG: + hw->dev_cfg = get_cfg_addr(dev, &cap); + break; + case VIRTIO_PCI_CAP_ISR_CFG: + hw->isr = get_cfg_addr(dev, &cap); + break; + } + +next: + pos = cap.cap_next; + } + + if (hw->common_cfg == NULL || hw->notify_base == NULL || + hw->dev_cfg == NULL || hw->isr == NULL) { + VIRTIO_CRYPTO_INIT_LOG_INFO("no modern virtio pci device found."); + return -1; + } + + VIRTIO_CRYPTO_INIT_LOG_INFO("found modern virtio pci device."); + + VIRTIO_CRYPTO_INIT_LOG_DBG("common cfg mapped at: %p", hw->common_cfg); + VIRTIO_CRYPTO_INIT_LOG_DBG("device cfg mapped at: %p", hw->dev_cfg); + VIRTIO_CRYPTO_INIT_LOG_DBG("isr cfg mapped at: %p", hw->isr); + VIRTIO_CRYPTO_INIT_LOG_DBG("notify base: %p, notify off multiplier: %u", + hw->notify_base, hw->notify_off_multiplier); + + return 0; +} + +/* + * Return -1: + * if there is error mapping with VFIO/UIO. + * if port map error when driver type is KDRV_NONE. + * if whitelisted but driver type is KDRV_UNKNOWN. + * Return 1 if kernel driver is managing the device. + * Return 0 on success. + */ +int +vtpci_cryptodev_init(struct rte_pci_device *dev, struct virtio_crypto_hw *hw) +{ + /* + * Try if we can succeed reading virtio pci caps, which exists + * only on modern pci device. If failed, we fallback to legacy + * virtio handling. + */ + if (virtio_read_caps(dev, hw) == 0) { + VIRTIO_CRYPTO_INIT_LOG_INFO("modern virtio pci detected."); + virtio_hw_internal[hw->dev_id].vtpci_ops = + &virtio_crypto_modern_ops; + hw->modern = 1; + return 0; + } + + /* + * virtio crypto conforms to virtio 1.0 and doesn't support + * legacy mode + */ + return -1; +} diff --git a/drivers/crypto/virtio/virtio_pci.h b/drivers/crypto/virtio/virtio_pci.h new file mode 100644 index 00000000..604ec366 --- /dev/null +++ b/drivers/crypto/virtio/virtio_pci.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ + +#ifndef _VIRTIO_PCI_H_ +#define _VIRTIO_PCI_H_ + +#include + +#include +#include +#include + +#include "virtio_crypto.h" + +struct virtqueue; + +/* VirtIO PCI vendor/device ID. */ +#define VIRTIO_CRYPTO_PCI_VENDORID 0x1AF4 +#define VIRTIO_CRYPTO_PCI_DEVICEID 0x1054 + +/* VirtIO ABI version, this must match exactly. */ +#define VIRTIO_PCI_ABI_VERSION 0 + +/* + * VirtIO Header, located in BAR 0. + */ +#define VIRTIO_PCI_HOST_FEATURES 0 /* host's supported features (32bit, RO)*/ +#define VIRTIO_PCI_GUEST_FEATURES 4 /* guest's supported features (32, RW) */ +#define VIRTIO_PCI_QUEUE_PFN 8 /* physical address of VQ (32, RW) */ +#define VIRTIO_PCI_QUEUE_NUM 12 /* number of ring entries (16, RO) */ +#define VIRTIO_PCI_QUEUE_SEL 14 /* current VQ selection (16, RW) */ +#define VIRTIO_PCI_QUEUE_NOTIFY 16 /* notify host regarding VQ (16, RW) */ +#define VIRTIO_PCI_STATUS 18 /* device status register (8, RW) */ +#define VIRTIO_PCI_ISR 19 /* interrupt status register, reading + * also clears the register (8, RO) + */ +/* Only if MSIX is enabled: */ + +/* configuration change vector (16, RW) */ +#define VIRTIO_MSI_CONFIG_VECTOR 20 +/* vector for selected VQ notifications */ +#define VIRTIO_MSI_QUEUE_VECTOR 22 + +/* The bit of the ISR which indicates a device has an interrupt. */ +#define VIRTIO_PCI_ISR_INTR 0x1 +/* The bit of the ISR which indicates a device configuration change. */ +#define VIRTIO_PCI_ISR_CONFIG 0x2 +/* Vector value used to disable MSI for queue. */ +#define VIRTIO_MSI_NO_VECTOR 0xFFFF + +/* Status byte for guest to report progress. */ +#define VIRTIO_CONFIG_STATUS_RESET 0x00 +#define VIRTIO_CONFIG_STATUS_ACK 0x01 +#define VIRTIO_CONFIG_STATUS_DRIVER 0x02 +#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04 +#define VIRTIO_CONFIG_STATUS_FEATURES_OK 0x08 +#define VIRTIO_CONFIG_STATUS_FAILED 0x80 + +/* + * Each virtqueue indirect descriptor list must be physically contiguous. + * To allow us to malloc(9) each list individually, limit the number + * supported to what will fit in one page. With 4KB pages, this is a limit + * of 256 descriptors. If there is ever a need for more, we can switch to + * contigmalloc(9) for the larger allocations, similar to what + * bus_dmamem_alloc(9) does. + * + * Note the sizeof(struct vring_desc) is 16 bytes. + */ +#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16)) + +/* Do we get callbacks when the ring is completely used, even if we've + * suppressed them? + */ +#define VIRTIO_F_NOTIFY_ON_EMPTY 24 + +/* Can the device handle any descriptor layout? */ +#define VIRTIO_F_ANY_LAYOUT 27 + +/* We support indirect buffer descriptors */ +#define VIRTIO_RING_F_INDIRECT_DESC 28 + +#define VIRTIO_F_VERSION_1 32 +#define VIRTIO_F_IOMMU_PLATFORM 33 + +/* The Guest publishes the used index for which it expects an interrupt + * at the end of the avail ring. Host should ignore the avail->flags field. + */ +/* The Host publishes the avail index for which it expects a kick + * at the end of the used ring. Guest should ignore the used->flags field. + */ +#define VIRTIO_RING_F_EVENT_IDX 29 + +/* Common configuration */ +#define VIRTIO_PCI_CAP_COMMON_CFG 1 +/* Notifications */ +#define VIRTIO_PCI_CAP_NOTIFY_CFG 2 +/* ISR Status */ +#define VIRTIO_PCI_CAP_ISR_CFG 3 +/* Device specific configuration */ +#define VIRTIO_PCI_CAP_DEVICE_CFG 4 +/* PCI configuration access */ +#define VIRTIO_PCI_CAP_PCI_CFG 5 + +/* This is the PCI capability header: */ +struct virtio_pci_cap { + uint8_t cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */ + uint8_t cap_next; /* Generic PCI field: next ptr. */ + uint8_t cap_len; /* Generic PCI field: capability length */ + uint8_t cfg_type; /* Identifies the structure. */ + uint8_t bar; /* Where to find it. */ + uint8_t padding[3]; /* Pad to full dword. */ + uint32_t offset; /* Offset within bar. */ + uint32_t length; /* Length of the structure, in bytes. */ +}; + +struct virtio_pci_notify_cap { + struct virtio_pci_cap cap; + uint32_t notify_off_multiplier; /* Multiplier for queue_notify_off. */ +}; + +/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */ +struct virtio_pci_common_cfg { + /* About the whole device. */ + uint32_t device_feature_select; /* read-write */ + uint32_t device_feature; /* read-only */ + uint32_t guest_feature_select; /* read-write */ + uint32_t guest_feature; /* read-write */ + uint16_t msix_config; /* read-write */ + uint16_t num_queues; /* read-only */ + uint8_t device_status; /* read-write */ + uint8_t config_generation; /* read-only */ + + /* About a specific virtqueue. */ + uint16_t queue_select; /* read-write */ + uint16_t queue_size; /* read-write, power of 2. */ + uint16_t queue_msix_vector; /* read-write */ + uint16_t queue_enable; /* read-write */ + uint16_t queue_notify_off; /* read-only */ + uint32_t queue_desc_lo; /* read-write */ + uint32_t queue_desc_hi; /* read-write */ + uint32_t queue_avail_lo; /* read-write */ + uint32_t queue_avail_hi; /* read-write */ + uint32_t queue_used_lo; /* read-write */ + uint32_t queue_used_hi; /* read-write */ +}; + +struct virtio_crypto_hw; + +struct virtio_pci_ops { + void (*read_dev_cfg)(struct virtio_crypto_hw *hw, size_t offset, + void *dst, int len); + void (*write_dev_cfg)(struct virtio_crypto_hw *hw, size_t offset, + const void *src, int len); + void (*reset)(struct virtio_crypto_hw *hw); + + uint8_t (*get_status)(struct virtio_crypto_hw *hw); + void (*set_status)(struct virtio_crypto_hw *hw, uint8_t status); + + uint64_t (*get_features)(struct virtio_crypto_hw *hw); + void (*set_features)(struct virtio_crypto_hw *hw, uint64_t features); + + uint8_t (*get_isr)(struct virtio_crypto_hw *hw); + + uint16_t (*set_config_irq)(struct virtio_crypto_hw *hw, uint16_t vec); + + uint16_t (*set_queue_irq)(struct virtio_crypto_hw *hw, + struct virtqueue *vq, uint16_t vec); + + uint16_t (*get_queue_num)(struct virtio_crypto_hw *hw, + uint16_t queue_id); + int (*setup_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq); + void (*del_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq); + void (*notify_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq); +}; + +struct virtio_crypto_hw { + /* control queue */ + struct virtqueue *cvq; + uint16_t dev_id; + uint16_t max_dataqueues; + uint64_t req_guest_features; + uint64_t guest_features; + uint8_t use_msix; + uint8_t modern; + uint32_t notify_off_multiplier; + uint8_t *isr; + uint16_t *notify_base; + struct virtio_pci_common_cfg *common_cfg; + struct virtio_crypto_config *dev_cfg; + const struct rte_cryptodev_capabilities *virtio_dev_capabilities; +}; + +/* + * While virtio_crypto_hw is stored in shared memory, this structure stores + * some infos that may vary in the multiple process model locally. + * For example, the vtpci_ops pointer. + */ +struct virtio_hw_internal { + const struct virtio_pci_ops *vtpci_ops; + struct rte_pci_ioport io; +}; + +#define VTPCI_OPS(hw) (virtio_hw_internal[(hw)->dev_id].vtpci_ops) +#define VTPCI_IO(hw) (&virtio_hw_internal[(hw)->dev_id].io) + +extern struct virtio_hw_internal virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO]; + +/* + * How many bits to shift physical queue address written to QUEUE_PFN. + * 12 is historical, and due to x86 page size. + */ +#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12 + +/* The alignment to use between consumer and producer parts of vring. */ +#define VIRTIO_PCI_VRING_ALIGN 4096 + +enum virtio_msix_status { + VIRTIO_MSIX_NONE = 0, + VIRTIO_MSIX_DISABLED = 1, + VIRTIO_MSIX_ENABLED = 2 +}; + +static inline int +vtpci_with_feature(struct virtio_crypto_hw *hw, uint64_t bit) +{ + return (hw->guest_features & (1ULL << bit)) != 0; +} + +/* + * Function declaration from virtio_pci.c + */ +int vtpci_cryptodev_init(struct rte_pci_device *dev, + struct virtio_crypto_hw *hw); +void vtpci_cryptodev_reset(struct virtio_crypto_hw *hw); + +void vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw); + +uint8_t vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw); +void vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status); + +uint64_t vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw, + uint64_t host_features); + +void vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset, + const void *src, int length); + +void vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset, + void *dst, int length); + +uint8_t vtpci_cryptodev_isr(struct virtio_crypto_hw *hw); + +#endif /* _VIRTIO_PCI_H_ */ diff --git a/drivers/crypto/virtio/virtio_ring.h b/drivers/crypto/virtio/virtio_ring.h new file mode 100644 index 00000000..ee306745 --- /dev/null +++ b/drivers/crypto/virtio/virtio_ring.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ + +#ifndef _VIRTIO_RING_H_ +#define _VIRTIO_RING_H_ + +#include + +#include + +/* This marks a buffer as continuing via the next field. */ +#define VRING_DESC_F_NEXT 1 +/* This marks a buffer as write-only (otherwise read-only). */ +#define VRING_DESC_F_WRITE 2 +/* This means the buffer contains a list of buffer descriptors. */ +#define VRING_DESC_F_INDIRECT 4 + +/* The Host uses this in used->flags to advise the Guest: don't kick me + * when you add a buffer. It's unreliable, so it's simply an + * optimization. Guest will still kick if it's out of buffers. + */ +#define VRING_USED_F_NO_NOTIFY 1 +/* The Guest uses this in avail->flags to advise the Host: don't + * interrupt me when you consume a buffer. It's unreliable, so it's + * simply an optimization. + */ +#define VRING_AVAIL_F_NO_INTERRUPT 1 + +/* VirtIO ring descriptors: 16 bytes. + * These can chain together via "next". + */ +struct vring_desc { + uint64_t addr; /* Address (guest-physical). */ + uint32_t len; /* Length. */ + uint16_t flags; /* The flags as indicated above. */ + uint16_t next; /* We chain unused descriptors via this. */ +}; + +struct vring_avail { + uint16_t flags; + uint16_t idx; + uint16_t ring[0]; +}; + +/* id is a 16bit index. uint32_t is used here for ids for padding reasons. */ +struct vring_used_elem { + /* Index of start of used descriptor chain. */ + uint32_t id; + /* Total length of the descriptor chain which was written to. */ + uint32_t len; +}; + +struct vring_used { + uint16_t flags; + volatile uint16_t idx; + struct vring_used_elem ring[0]; +}; + +struct vring { + unsigned int num; + struct vring_desc *desc; + struct vring_avail *avail; + struct vring_used *used; +}; + +/* The standard layout for the ring is a continuous chunk of memory which + * looks like this. We assume num is a power of 2. + * + * struct vring { + * // The actual descriptors (16 bytes each) + * struct vring_desc desc[num]; + * + * // A ring of available descriptor heads with free-running index. + * __u16 avail_flags; + * __u16 avail_idx; + * __u16 available[num]; + * __u16 used_event_idx; + * + * // Padding to the next align boundary. + * char pad[]; + * + * // A ring of used descriptor heads with free-running index. + * __u16 used_flags; + * __u16 used_idx; + * struct vring_used_elem used[num]; + * __u16 avail_event_idx; + * }; + * + * NOTE: for VirtIO PCI, align is 4096. + */ + +/* + * We publish the used event index at the end of the available ring, and vice + * versa. They are at the end for backwards compatibility. + */ +#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num]) +#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num]) + +static inline size_t +vring_size(unsigned int num, unsigned long align) +{ + size_t size; + + size = num * sizeof(struct vring_desc); + size += sizeof(struct vring_avail) + (num * sizeof(uint16_t)); + size = RTE_ALIGN_CEIL(size, align); + size += sizeof(struct vring_used) + + (num * sizeof(struct vring_used_elem)); + return size; +} + +static inline void +vring_init(struct vring *vr, unsigned int num, uint8_t *p, + unsigned long align) +{ + vr->num = num; + vr->desc = (struct vring_desc *) p; + vr->avail = (struct vring_avail *) (p + + num * sizeof(struct vring_desc)); + vr->used = (void *) + RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align); +} + +/* + * The following is used with VIRTIO_RING_F_EVENT_IDX. + * Assuming a given event_idx value from the other size, if we have + * just incremented index from old to new_idx, should we trigger an + * event? + */ +static inline int +vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) +{ + return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old); +} + +#endif /* _VIRTIO_RING_H_ */ diff --git a/drivers/crypto/virtio/virtio_rxtx.c b/drivers/crypto/virtio/virtio_rxtx.c new file mode 100644 index 00000000..e32a1ecd --- /dev/null +++ b/drivers/crypto/virtio/virtio_rxtx.c @@ -0,0 +1,527 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ +#include + +#include "virtqueue.h" +#include "virtio_cryptodev.h" +#include "virtio_crypto_algs.h" + +static void +vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) +{ + struct vring_desc *dp, *dp_tail; + struct vq_desc_extra *dxp; + uint16_t desc_idx_last = desc_idx; + + dp = &vq->vq_ring.desc[desc_idx]; + dxp = &vq->vq_descx[desc_idx]; + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs); + if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) { + while (dp->flags & VRING_DESC_F_NEXT) { + desc_idx_last = dp->next; + dp = &vq->vq_ring.desc[dp->next]; + } + } + dxp->ndescs = 0; + + /* + * We must append the existing free chain, if any, to the end of + * newly freed chain. If the virtqueue was completely used, then + * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above). + */ + if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) { + vq->vq_desc_head_idx = desc_idx; + } else { + dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx]; + dp_tail->next = desc_idx; + } + + vq->vq_desc_tail_idx = desc_idx_last; + dp->next = VQ_RING_DESC_CHAIN_END; +} + +static uint16_t +virtqueue_dequeue_burst_rx(struct virtqueue *vq, + struct rte_crypto_op **rx_pkts, uint16_t num) +{ + struct vring_used_elem *uep; + struct rte_crypto_op *cop; + uint16_t used_idx, desc_idx; + uint16_t i; + struct virtio_crypto_inhdr *inhdr; + struct virtio_crypto_op_cookie *op_cookie; + + /* Caller does the check */ + for (i = 0; i < num ; i++) { + used_idx = (uint16_t)(vq->vq_used_cons_idx + & (vq->vq_nentries - 1)); + uep = &vq->vq_ring.used->ring[used_idx]; + desc_idx = (uint16_t)uep->id; + cop = (struct rte_crypto_op *) + vq->vq_descx[desc_idx].crypto_op; + if (unlikely(cop == NULL)) { + VIRTIO_CRYPTO_RX_LOG_DBG("vring descriptor with no " + "mbuf cookie at %u", + vq->vq_used_cons_idx); + break; + } + + op_cookie = (struct virtio_crypto_op_cookie *) + vq->vq_descx[desc_idx].cookie; + inhdr = &(op_cookie->inhdr); + switch (inhdr->status) { + case VIRTIO_CRYPTO_OK: + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + break; + case VIRTIO_CRYPTO_ERR: + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + vq->packets_received_failed++; + break; + case VIRTIO_CRYPTO_BADMSG: + cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + vq->packets_received_failed++; + break; + case VIRTIO_CRYPTO_NOTSUPP: + cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + vq->packets_received_failed++; + break; + case VIRTIO_CRYPTO_INVSESS: + cop->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + vq->packets_received_failed++; + break; + default: + break; + } + + vq->packets_received_total++; + + rx_pkts[i] = cop; + rte_mempool_put(vq->mpool, op_cookie); + + vq->vq_used_cons_idx++; + vq_ring_free_chain(vq, desc_idx); + vq->vq_descx[desc_idx].crypto_op = NULL; + } + + return i; +} + +static int +virtqueue_crypto_sym_pkt_header_arrange( + struct rte_crypto_op *cop, + struct virtio_crypto_op_data_req *data, + struct virtio_crypto_session *session) +{ + struct rte_crypto_sym_op *sym_op = cop->sym; + struct virtio_crypto_op_data_req *req_data = data; + struct virtio_crypto_op_ctrl_req *ctrl = &session->ctrl; + struct virtio_crypto_sym_create_session_req *sym_sess_req = + &ctrl->u.sym_create_session; + struct virtio_crypto_alg_chain_session_para *chain_para = + &sym_sess_req->u.chain.para; + struct virtio_crypto_cipher_session_para *cipher_para; + + req_data->header.session_id = session->session_id; + + switch (sym_sess_req->op_type) { + case VIRTIO_CRYPTO_SYM_OP_CIPHER: + req_data->u.sym_req.op_type = VIRTIO_CRYPTO_SYM_OP_CIPHER; + + cipher_para = &sym_sess_req->u.cipher.para; + if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT) + req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT; + else + req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT; + + req_data->u.sym_req.u.cipher.para.iv_len + = session->iv.length; + + req_data->u.sym_req.u.cipher.para.src_data_len = + (sym_op->cipher.data.length + + sym_op->cipher.data.offset); + req_data->u.sym_req.u.cipher.para.dst_data_len = + req_data->u.sym_req.u.cipher.para.src_data_len; + break; + case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING: + req_data->u.sym_req.op_type = + VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING; + + cipher_para = &chain_para->cipher_param; + if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT) + req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT; + else + req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT; + + req_data->u.sym_req.u.chain.para.iv_len = session->iv.length; + req_data->u.sym_req.u.chain.para.aad_len = session->aad.length; + + req_data->u.sym_req.u.chain.para.src_data_len = + (sym_op->cipher.data.length + + sym_op->cipher.data.offset); + req_data->u.sym_req.u.chain.para.dst_data_len = + req_data->u.sym_req.u.chain.para.src_data_len; + req_data->u.sym_req.u.chain.para.cipher_start_src_offset = + sym_op->cipher.data.offset; + req_data->u.sym_req.u.chain.para.len_to_cipher = + sym_op->cipher.data.length; + req_data->u.sym_req.u.chain.para.hash_start_src_offset = + sym_op->auth.data.offset; + req_data->u.sym_req.u.chain.para.len_to_hash = + sym_op->auth.data.length; + req_data->u.sym_req.u.chain.para.aad_len = + chain_para->aad_len; + + if (chain_para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN) + req_data->u.sym_req.u.chain.para.hash_result_len = + chain_para->u.hash_param.hash_result_len; + if (chain_para->hash_mode == + VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH) + req_data->u.sym_req.u.chain.para.hash_result_len = + chain_para->u.mac_param.hash_result_len; + break; + default: + return -1; + } + + return 0; +} + +static int +virtqueue_crypto_sym_enqueue_xmit( + struct virtqueue *txvq, + struct rte_crypto_op *cop) +{ + uint16_t idx = 0; + uint16_t num_entry; + uint16_t needed = 1; + uint16_t head_idx; + struct vq_desc_extra *dxp; + struct vring_desc *start_dp; + struct vring_desc *desc; + uint64_t indirect_op_data_req_phys_addr; + uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req); + uint32_t indirect_vring_addr_offset = req_data_len + + sizeof(struct virtio_crypto_inhdr); + uint32_t indirect_iv_addr_offset = indirect_vring_addr_offset + + sizeof(struct vring_desc) * NUM_ENTRY_VIRTIO_CRYPTO_OP; + struct rte_crypto_sym_op *sym_op = cop->sym; + struct virtio_crypto_session *session = + (struct virtio_crypto_session *)get_sym_session_private_data( + cop->sym->session, cryptodev_virtio_driver_id); + struct virtio_crypto_op_data_req *op_data_req; + uint32_t hash_result_len = 0; + struct virtio_crypto_op_cookie *crypto_op_cookie; + struct virtio_crypto_alg_chain_session_para *para; + + if (unlikely(sym_op->m_src->nb_segs != 1)) + return -EMSGSIZE; + if (unlikely(txvq->vq_free_cnt == 0)) + return -ENOSPC; + if (unlikely(txvq->vq_free_cnt < needed)) + return -EMSGSIZE; + head_idx = txvq->vq_desc_head_idx; + if (unlikely(head_idx >= txvq->vq_nentries)) + return -EFAULT; + if (unlikely(session == NULL)) + return -EFAULT; + + dxp = &txvq->vq_descx[head_idx]; + + if (rte_mempool_get(txvq->mpool, &dxp->cookie)) { + VIRTIO_CRYPTO_TX_LOG_ERR("can not get cookie"); + return -EFAULT; + } + crypto_op_cookie = dxp->cookie; + indirect_op_data_req_phys_addr = + rte_mempool_virt2iova(crypto_op_cookie); + op_data_req = (struct virtio_crypto_op_data_req *)crypto_op_cookie; + + if (virtqueue_crypto_sym_pkt_header_arrange(cop, op_data_req, session)) + return -EFAULT; + + /* status is initialized to VIRTIO_CRYPTO_ERR */ + ((struct virtio_crypto_inhdr *) + ((uint8_t *)op_data_req + req_data_len))->status = + VIRTIO_CRYPTO_ERR; + + /* point to indirect vring entry */ + desc = (struct vring_desc *) + ((uint8_t *)op_data_req + indirect_vring_addr_offset); + for (idx = 0; idx < (NUM_ENTRY_VIRTIO_CRYPTO_OP - 1); idx++) + desc[idx].next = idx + 1; + desc[NUM_ENTRY_VIRTIO_CRYPTO_OP - 1].next = VQ_RING_DESC_CHAIN_END; + + idx = 0; + + /* indirect vring: first part, virtio_crypto_op_data_req */ + desc[idx].addr = indirect_op_data_req_phys_addr; + desc[idx].len = req_data_len; + desc[idx++].flags = VRING_DESC_F_NEXT; + + /* indirect vring: iv of cipher */ + if (session->iv.length) { + if (cop->phys_addr) + desc[idx].addr = cop->phys_addr + session->iv.offset; + else { + rte_memcpy(crypto_op_cookie->iv, + rte_crypto_op_ctod_offset(cop, + uint8_t *, session->iv.offset), + session->iv.length); + desc[idx].addr = indirect_op_data_req_phys_addr + + indirect_iv_addr_offset; + } + + desc[idx].len = session->iv.length; + desc[idx++].flags = VRING_DESC_F_NEXT; + } + + /* indirect vring: additional auth data */ + if (session->aad.length) { + desc[idx].addr = session->aad.phys_addr; + desc[idx].len = session->aad.length; + desc[idx++].flags = VRING_DESC_F_NEXT; + } + + /* indirect vring: src data */ + desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0); + desc[idx].len = (sym_op->cipher.data.offset + + sym_op->cipher.data.length); + desc[idx++].flags = VRING_DESC_F_NEXT; + + /* indirect vring: dst data */ + if (sym_op->m_dst) { + desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_dst, 0); + desc[idx].len = (sym_op->cipher.data.offset + + sym_op->cipher.data.length); + } else { + desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0); + desc[idx].len = (sym_op->cipher.data.offset + + sym_op->cipher.data.length); + } + desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT; + + /* indirect vring: digest result */ + para = &(session->ctrl.u.sym_create_session.u.chain.para); + if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN) + hash_result_len = para->u.hash_param.hash_result_len; + if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH) + hash_result_len = para->u.mac_param.hash_result_len; + if (hash_result_len > 0) { + desc[idx].addr = sym_op->auth.digest.phys_addr; + desc[idx].len = hash_result_len; + desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT; + } + + /* indirect vring: last part, status returned */ + desc[idx].addr = indirect_op_data_req_phys_addr + req_data_len; + desc[idx].len = sizeof(struct virtio_crypto_inhdr); + desc[idx++].flags = VRING_DESC_F_WRITE; + + num_entry = idx; + + /* save the infos to use when receiving packets */ + dxp->crypto_op = (void *)cop; + dxp->ndescs = needed; + + /* use a single buffer */ + start_dp = txvq->vq_ring.desc; + start_dp[head_idx].addr = indirect_op_data_req_phys_addr + + indirect_vring_addr_offset; + start_dp[head_idx].len = num_entry * sizeof(struct vring_desc); + start_dp[head_idx].flags = VRING_DESC_F_INDIRECT; + + idx = start_dp[head_idx].next; + txvq->vq_desc_head_idx = idx; + if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) + txvq->vq_desc_tail_idx = idx; + txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed); + vq_update_avail_ring(txvq, head_idx); + + return 0; +} + +static int +virtqueue_crypto_enqueue_xmit(struct virtqueue *txvq, + struct rte_crypto_op *cop) +{ + int ret; + + switch (cop->type) { + case RTE_CRYPTO_OP_TYPE_SYMMETRIC: + ret = virtqueue_crypto_sym_enqueue_xmit(txvq, cop); + break; + default: + VIRTIO_CRYPTO_TX_LOG_ERR("invalid crypto op type %u", + cop->type); + ret = -EFAULT; + break; + } + + return ret; +} + +static int +virtio_crypto_vring_start(struct virtqueue *vq) +{ + struct virtio_crypto_hw *hw = vq->hw; + int i, size = vq->vq_nentries; + struct vring *vr = &vq->vq_ring; + uint8_t *ring_mem = vq->vq_ring_virt_mem; + + PMD_INIT_FUNC_TRACE(); + + vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN); + vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); + vq->vq_free_cnt = vq->vq_nentries; + + /* Chain all the descriptors in the ring with an END */ + for (i = 0; i < size - 1; i++) + vr->desc[i].next = (uint16_t)(i + 1); + vr->desc[i].next = VQ_RING_DESC_CHAIN_END; + + /* + * Disable device(host) interrupting guest + */ + virtqueue_disable_intr(vq); + + /* + * Set guest physical address of the virtqueue + * in VIRTIO_PCI_QUEUE_PFN config register of device + * to share with the backend + */ + if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) { + VIRTIO_CRYPTO_INIT_LOG_ERR("setup_queue failed"); + return -EINVAL; + } + + return 0; +} + +void +virtio_crypto_ctrlq_start(struct rte_cryptodev *dev) +{ + struct virtio_crypto_hw *hw = dev->data->dev_private; + + if (hw->cvq) { + virtio_crypto_vring_start(hw->cvq); + VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq); + } +} + +void +virtio_crypto_dataq_start(struct rte_cryptodev *dev) +{ + /* + * Start data vrings + * - Setup vring structure for data queues + */ + uint16_t i; + struct virtio_crypto_hw *hw = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + /* Start data vring. */ + for (i = 0; i < hw->max_dataqueues; i++) { + virtio_crypto_vring_start(dev->data->queue_pairs[i]); + VIRTQUEUE_DUMP((struct virtqueue *)dev->data->queue_pairs[i]); + } +} + +/* vring size of data queue is 1024 */ +#define VIRTIO_MBUF_BURST_SZ 1024 + +uint16_t +virtio_crypto_pkt_rx_burst(void *tx_queue, struct rte_crypto_op **rx_pkts, + uint16_t nb_pkts) +{ + struct virtqueue *txvq = tx_queue; + uint16_t nb_used, num, nb_rx; + + nb_used = VIRTQUEUE_NUSED(txvq); + + virtio_rmb(); + + num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts); + num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) + ? num : VIRTIO_MBUF_BURST_SZ); + + if (num == 0) + return 0; + + nb_rx = virtqueue_dequeue_burst_rx(txvq, rx_pkts, num); + VIRTIO_CRYPTO_RX_LOG_DBG("used:%d dequeue:%d", nb_used, num); + + return nb_rx; +} + +uint16_t +virtio_crypto_pkt_tx_burst(void *tx_queue, struct rte_crypto_op **tx_pkts, + uint16_t nb_pkts) +{ + struct virtqueue *txvq; + uint16_t nb_tx; + int error; + + if (unlikely(nb_pkts < 1)) + return nb_pkts; + if (unlikely(tx_queue == NULL)) { + VIRTIO_CRYPTO_TX_LOG_ERR("tx_queue is NULL"); + return 0; + } + txvq = tx_queue; + + VIRTIO_CRYPTO_TX_LOG_DBG("%d packets to xmit", nb_pkts); + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + struct rte_mbuf *txm = tx_pkts[nb_tx]->sym->m_src; + /* nb_segs is always 1 at virtio crypto situation */ + int need = txm->nb_segs - txvq->vq_free_cnt; + + /* + * Positive value indicates it hasn't enough space in vring + * descriptors + */ + if (unlikely(need > 0)) { + /* + * try it again because the receive process may be + * free some space + */ + need = txm->nb_segs - txvq->vq_free_cnt; + if (unlikely(need > 0)) { + VIRTIO_CRYPTO_TX_LOG_DBG("No free tx " + "descriptors to transmit"); + break; + } + } + + txvq->packets_sent_total++; + + /* Enqueue Packet buffers */ + error = virtqueue_crypto_enqueue_xmit(txvq, tx_pkts[nb_tx]); + if (unlikely(error)) { + if (error == ENOSPC) + VIRTIO_CRYPTO_TX_LOG_ERR( + "virtqueue_enqueue Free count = 0"); + else if (error == EMSGSIZE) + VIRTIO_CRYPTO_TX_LOG_ERR( + "virtqueue_enqueue Free count < 1"); + else + VIRTIO_CRYPTO_TX_LOG_ERR( + "virtqueue_enqueue error: %d", error); + txvq->packets_sent_failed++; + break; + } + } + + if (likely(nb_tx)) { + vq_update_avail_idx(txvq); + + if (unlikely(virtqueue_kick_prepare(txvq))) { + virtqueue_notify(txvq); + VIRTIO_CRYPTO_TX_LOG_DBG("Notified backend after xmit"); + } + } + + return nb_tx; +} diff --git a/drivers/crypto/virtio/virtqueue.c b/drivers/crypto/virtio/virtqueue.c new file mode 100644 index 00000000..fd8be581 --- /dev/null +++ b/drivers/crypto/virtio/virtqueue.c @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ + +#include + +#include +#include +#include + +#include "virtqueue.h" + +void +virtqueue_disable_intr(struct virtqueue *vq) +{ + /* + * Set VRING_AVAIL_F_NO_INTERRUPT to hint host + * not to interrupt when it consumes packets + * Note: this is only considered a hint to the host + */ + vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; +} + +void +virtqueue_detatch_unused(struct virtqueue *vq) +{ + struct rte_crypto_op *cop = NULL; + + int idx; + + if (vq != NULL) + for (idx = 0; idx < vq->vq_nentries; idx++) { + cop = vq->vq_descx[idx].crypto_op; + if (cop) { + if (cop->sym->m_src) + rte_pktmbuf_free(cop->sym->m_src); + if (cop->sym->m_dst) + rte_pktmbuf_free(cop->sym->m_dst); + rte_crypto_op_free(cop); + vq->vq_descx[idx].crypto_op = NULL; + } + } +} diff --git a/drivers/crypto/virtio/virtqueue.h b/drivers/crypto/virtio/virtqueue.h new file mode 100644 index 00000000..bf10c657 --- /dev/null +++ b/drivers/crypto/virtio/virtqueue.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ + +#ifndef _VIRTQUEUE_H_ +#define _VIRTQUEUE_H_ + +#include + +#include +#include +#include +#include + +#include "virtio_pci.h" +#include "virtio_ring.h" +#include "virtio_logs.h" +#include "virtio_crypto.h" + +struct rte_mbuf; + +/* + * Per virtio_config.h in Linux. + * For virtio_pci on SMP, we don't need to order with respect to MMIO + * accesses through relaxed memory I/O windows, so smp_mb() et al are + * sufficient. + * + */ +#define virtio_mb() rte_smp_mb() +#define virtio_rmb() rte_smp_rmb() +#define virtio_wmb() rte_smp_wmb() + +#define VIRTQUEUE_MAX_NAME_SZ 32 + +enum { VTCRYPTO_DATAQ = 0, VTCRYPTO_CTRLQ = 1 }; + +/** + * The maximum virtqueue size is 2^15. Use that value as the end of + * descriptor chain terminator since it will never be a valid index + * in the descriptor table. This is used to verify we are correctly + * handling vq_free_cnt. + */ +#define VQ_RING_DESC_CHAIN_END 32768 + +struct vq_desc_extra { + void *crypto_op; + void *cookie; + uint16_t ndescs; +}; + +struct virtqueue { + /**< virtio_crypto_hw structure pointer. */ + struct virtio_crypto_hw *hw; + /**< mem zone to populate RX ring. */ + const struct rte_memzone *mz; + /**< memzone to populate hdr and request. */ + struct rte_mempool *mpool; + uint8_t dev_id; /**< Device identifier. */ + uint16_t vq_queue_index; /**< PCI queue index */ + + void *vq_ring_virt_mem; /**< linear address of vring*/ + unsigned int vq_ring_size; + phys_addr_t vq_ring_mem; /**< physical address of vring */ + + struct vring vq_ring; /**< vring keeping desc, used and avail */ + uint16_t vq_free_cnt; /**< num of desc available */ + uint16_t vq_nentries; /**< vring desc numbers */ + + /** + * Head of the free chain in the descriptor table. If + * there are no free descriptors, this will be set to + * VQ_RING_DESC_CHAIN_END. + */ + uint16_t vq_desc_head_idx; + uint16_t vq_desc_tail_idx; + /** + * Last consumed descriptor in the used table, + * trails vq_ring.used->idx. + */ + uint16_t vq_used_cons_idx; + uint16_t vq_avail_idx; + + /* Statistics */ + uint64_t packets_sent_total; + uint64_t packets_sent_failed; + uint64_t packets_received_total; + uint64_t packets_received_failed; + + uint16_t *notify_addr; + + struct vq_desc_extra vq_descx[0]; +}; + +/** + * Tell the backend not to interrupt us. + */ +void virtqueue_disable_intr(struct virtqueue *vq); + +/** + * Get all mbufs to be freed. + */ +void virtqueue_detatch_unused(struct virtqueue *vq); + +static inline int +virtqueue_full(const struct virtqueue *vq) +{ + return vq->vq_free_cnt == 0; +} + +#define VIRTQUEUE_NUSED(vq) \ + ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx)) + +static inline void +vq_update_avail_idx(struct virtqueue *vq) +{ + virtio_wmb(); + vq->vq_ring.avail->idx = vq->vq_avail_idx; +} + +static inline void +vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx) +{ + uint16_t avail_idx; + /* + * Place the head of the descriptor chain into the next slot and make + * it usable to the host. The chain is made available now rather than + * deferring to virtqueue_notify() in the hopes that if the host is + * currently running on another CPU, we can keep it processing the new + * descriptor. + */ + avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1)); + if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx)) + vq->vq_ring.avail->ring[avail_idx] = desc_idx; + vq->vq_avail_idx++; +} + +static inline int +virtqueue_kick_prepare(struct virtqueue *vq) +{ + return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY); +} + +static inline void +virtqueue_notify(struct virtqueue *vq) +{ + /* + * Ensure updated avail->idx is visible to host. + * For virtio on IA, the notificaiton is through io port operation + * which is a serialization instruction itself. + */ + VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq); +} + +/** + * Dump virtqueue internal structures, for debug purpose only. + */ +#define VIRTQUEUE_DUMP(vq) do { \ + uint16_t used_idx, nused; \ + used_idx = (vq)->vq_ring.used->idx; \ + nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \ + VIRTIO_CRYPTO_INIT_LOG_DBG(\ + "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \ + " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \ + " avail.flags=0x%x; used.flags=0x%x", \ + (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \ + (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \ + (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \ + (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \ +} while (0) + +#endif /* _VIRTQUEUE_H_ */ diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c index ad65b80c..313f4590 100644 --- a/drivers/crypto/zuc/rte_zuc_pmd.c +++ b/drivers/crypto/zuc/rte_zuc_pmd.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2018 Intel Corporation */ #include @@ -11,8 +11,7 @@ #include #include "rte_zuc_pmd_private.h" - -#define ZUC_MAX_BURST 8 +#define ZUC_MAX_BURST 4 #define BYTE_LEN 8 static uint8_t cryptodev_driver_id; @@ -78,7 +77,7 @@ zuc_set_session_parameters(struct zuc_session *sess, break; case ZUC_OP_NOT_SUPPORTED: default: - ZUC_LOG_ERR("Unsupported operation chain order parameter"); + ZUC_LOG(ERR, "Unsupported operation chain order parameter"); return -ENOTSUP; } @@ -88,7 +87,7 @@ zuc_set_session_parameters(struct zuc_session *sess, return -ENOTSUP; if (cipher_xform->cipher.iv.length != ZUC_IV_KEY_LENGTH) { - ZUC_LOG_ERR("Wrong IV length"); + ZUC_LOG(ERR, "Wrong IV length"); return -EINVAL; } sess->cipher_iv_offset = cipher_xform->cipher.iv.offset; @@ -104,14 +103,14 @@ zuc_set_session_parameters(struct zuc_session *sess, return -ENOTSUP; if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) { - ZUC_LOG_ERR("Wrong digest length"); + ZUC_LOG(ERR, "Wrong digest length"); return -EINVAL; } sess->auth_op = auth_xform->auth.op; if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) { - ZUC_LOG_ERR("Wrong IV length"); + ZUC_LOG(ERR, "Wrong IV length"); return -EINVAL; } sess->auth_iv_offset = auth_xform->auth.iv.offset; @@ -135,7 +134,7 @@ zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op) if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { if (likely(op->sym->session != NULL)) - sess = (struct zuc_session *)get_session_private_data( + sess = (struct zuc_session *)get_sym_session_private_data( op->sym->session, cryptodev_driver_id); } else { @@ -157,8 +156,8 @@ zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op) sess = NULL; } op->sym->session = (struct rte_cryptodev_sym_session *)_sess; - set_session_private_data(op->sym->session, cryptodev_driver_id, - _sess_private_data); + set_sym_session_private_data(op->sym->session, + cryptodev_driver_id, _sess_private_data); } if (unlikely(sess == NULL)) @@ -168,10 +167,10 @@ zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op) return sess; } -/** Encrypt/decrypt mbufs with same cipher key. */ +/** Encrypt/decrypt mbufs. */ static uint8_t process_zuc_cipher_op(struct rte_crypto_op **ops, - struct zuc_session *session, + struct zuc_session **sessions, uint8_t num_ops) { unsigned i; @@ -180,22 +179,25 @@ process_zuc_cipher_op(struct rte_crypto_op **ops, uint8_t *iv[ZUC_MAX_BURST]; uint32_t num_bytes[ZUC_MAX_BURST]; uint8_t *cipher_keys[ZUC_MAX_BURST]; + struct zuc_session *sess; for (i = 0; i < num_ops; i++) { if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0) || ((ops[i]->sym->cipher.data.offset % BYTE_LEN) != 0)) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - ZUC_LOG_ERR("Data Length or offset"); + ZUC_LOG(ERR, "Data Length or offset"); break; } + sess = sessions[i]; + #ifdef RTE_LIBRTE_PMD_ZUC_DEBUG if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) || (ops[i]->sym->m_dst != NULL && !rte_pktmbuf_is_contiguous( ops[i]->sym->m_dst))) { - ZUC_LOG_ERR("PMD supports only contiguous mbufs, " + ZUC_LOG(ERR, "PMD supports only contiguous mbufs, " "op (%p) provides noncontiguous mbuf as " "source/destination buffer.\n", ops[i]); ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; @@ -211,10 +213,10 @@ process_zuc_cipher_op(struct rte_crypto_op **ops, rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) + (ops[i]->sym->cipher.data.offset >> 3); iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *, - session->cipher_iv_offset); + sess->cipher_iv_offset); num_bytes[i] = ops[i]->sym->cipher.data.length >> 3; - cipher_keys[i] = session->pKey_cipher; + cipher_keys[i] = sess->pKey_cipher; processed_ops++; } @@ -225,10 +227,10 @@ process_zuc_cipher_op(struct rte_crypto_op **ops, return processed_ops; } -/** Generate/verify hash from mbufs with same hash key. */ +/** Generate/verify hash from mbufs. */ static int process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops, - struct zuc_session *session, + struct zuc_session **sessions, uint8_t num_ops) { unsigned i; @@ -237,26 +239,29 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops, uint32_t *dst; uint32_t length_in_bits; uint8_t *iv; + struct zuc_session *sess; for (i = 0; i < num_ops; i++) { /* Data must be byte aligned */ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - ZUC_LOG_ERR("Offset"); + ZUC_LOG(ERR, "Offset"); break; } + sess = sessions[i]; + length_in_bits = ops[i]->sym->auth.data.length; src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) + (ops[i]->sym->auth.data.offset >> 3); iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *, - session->auth_iv_offset); + sess->auth_iv_offset); - if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) { + if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) { dst = (uint32_t *)qp->temp_digest; - sso_zuc_eia3_1_buffer(session->pKey_hash, + sso_zuc_eia3_1_buffer(sess->pKey_hash, iv, src, length_in_bits, dst); /* Verify digest. */ @@ -266,7 +271,7 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops, } else { dst = (uint32_t *)ops[i]->sym->auth.digest.data; - sso_zuc_eia3_1_buffer(session->pKey_hash, + sso_zuc_eia3_1_buffer(sess->pKey_hash, iv, src, length_in_bits, dst); } @@ -276,33 +281,34 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops, return processed_ops; } -/** Process a batch of crypto ops which shares the same session. */ +/** Process a batch of crypto ops which shares the same operation type. */ static int -process_ops(struct rte_crypto_op **ops, struct zuc_session *session, +process_ops(struct rte_crypto_op **ops, enum zuc_operation op_type, + struct zuc_session **sessions, struct zuc_qp *qp, uint8_t num_ops, uint16_t *accumulated_enqueued_ops) { unsigned i; unsigned enqueued_ops, processed_ops; - switch (session->op) { + switch (op_type) { case ZUC_OP_ONLY_CIPHER: processed_ops = process_zuc_cipher_op(ops, - session, num_ops); + sessions, num_ops); break; case ZUC_OP_ONLY_AUTH: - processed_ops = process_zuc_hash_op(qp, ops, session, + processed_ops = process_zuc_hash_op(qp, ops, sessions, num_ops); break; case ZUC_OP_CIPHER_AUTH: - processed_ops = process_zuc_cipher_op(ops, session, + processed_ops = process_zuc_cipher_op(ops, sessions, num_ops); - process_zuc_hash_op(qp, ops, session, processed_ops); + process_zuc_hash_op(qp, ops, sessions, processed_ops); break; case ZUC_OP_AUTH_CIPHER: - processed_ops = process_zuc_hash_op(qp, ops, session, + processed_ops = process_zuc_hash_op(qp, ops, sessions, num_ops); - process_zuc_cipher_op(ops, session, processed_ops); + process_zuc_cipher_op(ops, sessions, processed_ops); break; default: /* Operation not supported. */ @@ -318,10 +324,10 @@ process_ops(struct rte_crypto_op **ops, struct zuc_session *session, ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; /* Free session if a session-less crypto op. */ if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { - memset(session, 0, sizeof(struct zuc_session)); + memset(sessions[i], 0, sizeof(struct zuc_session)); memset(ops[i]->sym->session, 0, - rte_cryptodev_get_header_session_size()); - rte_mempool_put(qp->sess_mp, session); + rte_cryptodev_sym_get_header_session_size()); + rte_mempool_put(qp->sess_mp, sessions[i]); rte_mempool_put(qp->sess_mp, ops[i]->sym->session); ops[i]->sym->session = NULL; } @@ -342,7 +348,10 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, struct rte_crypto_op *c_ops[ZUC_MAX_BURST]; struct rte_crypto_op *curr_c_op; - struct zuc_session *prev_sess = NULL, *curr_sess = NULL; + struct zuc_session *curr_sess; + struct zuc_session *sessions[ZUC_MAX_BURST]; + enum zuc_operation prev_zuc_op = ZUC_OP_NOT_SUPPORTED; + enum zuc_operation curr_zuc_op; struct zuc_qp *qp = queue_pair; unsigned i; uint8_t burst_size = 0; @@ -352,61 +361,70 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, for (i = 0; i < nb_ops; i++) { curr_c_op = ops[i]; - /* Set status as enqueued (not processed yet) by default. */ - curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - curr_sess = zuc_get_session(qp, curr_c_op); - if (unlikely(curr_sess == NULL || - curr_sess->op == ZUC_OP_NOT_SUPPORTED)) { + if (unlikely(curr_sess == NULL)) { curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; break; } - /* Batch ops that share the same session. */ - if (prev_sess == NULL) { - prev_sess = curr_sess; - c_ops[burst_size++] = curr_c_op; - } else if (curr_sess == prev_sess) { - c_ops[burst_size++] = curr_c_op; + curr_zuc_op = curr_sess->op; + + /* + * Batch ops that share the same operation type + * (cipher only, auth only...). + */ + if (burst_size == 0) { + prev_zuc_op = curr_zuc_op; + c_ops[0] = curr_c_op; + sessions[0] = curr_sess; + burst_size++; + } else if (curr_zuc_op == prev_zuc_op) { + c_ops[burst_size] = curr_c_op; + sessions[burst_size] = curr_sess; + burst_size++; /* * When there are enough ops to process in a batch, * process them, and start a new batch. */ if (burst_size == ZUC_MAX_BURST) { - processed_ops = process_ops(c_ops, prev_sess, - qp, burst_size, &enqueued_ops); + processed_ops = process_ops(c_ops, curr_zuc_op, + sessions, qp, burst_size, + &enqueued_ops); if (processed_ops < burst_size) { burst_size = 0; break; } burst_size = 0; - prev_sess = NULL; } } else { /* - * Different session, process the ops - * of the previous session. + * Different operation type, process the ops + * of the previous type. */ - processed_ops = process_ops(c_ops, prev_sess, - qp, burst_size, &enqueued_ops); + processed_ops = process_ops(c_ops, prev_zuc_op, + sessions, qp, burst_size, + &enqueued_ops); if (processed_ops < burst_size) { burst_size = 0; break; } burst_size = 0; - prev_sess = curr_sess; + prev_zuc_op = curr_zuc_op; - c_ops[burst_size++] = curr_c_op; + c_ops[0] = curr_c_op; + sessions[0] = curr_sess; + burst_size++; } } if (burst_size != 0) { - /* Process the crypto ops of the last session. */ - processed_ops = process_ops(c_ops, prev_sess, - qp, burst_size, &enqueued_ops); + /* Process the crypto ops of the last operation type. */ + processed_ops = process_ops(c_ops, prev_zuc_op, + sessions, qp, burst_size, + &enqueued_ops); } qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops; @@ -442,7 +460,7 @@ cryptodev_zuc_create(const char *name, dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); if (dev == NULL) { - ZUC_LOG_ERR("failed to create cryptodev vdev"); + ZUC_LOG(ERR, "failed to create cryptodev vdev"); goto init_error; } @@ -460,11 +478,10 @@ cryptodev_zuc_create(const char *name, internals = dev->data->dev_private; internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; return 0; init_error: - ZUC_LOG_ERR("driver %s: cryptodev_zuc_create failed", + ZUC_LOG(ERR, "driver %s: failed", init_params->name); cryptodev_zuc_remove(vdev); @@ -478,8 +495,7 @@ cryptodev_zuc_probe(struct rte_vdev_device *vdev) "", sizeof(struct zuc_private), rte_socket_id(), - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }; const char *name; const char *input_args; @@ -522,7 +538,11 @@ static struct cryptodev_driver zuc_crypto_drv; RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ZUC_PMD, cryptodev_zuc_pmd_drv); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ZUC_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id="); -RTE_PMD_REGISTER_CRYPTO_DRIVER(zuc_crypto_drv, cryptodev_zuc_pmd_drv, +RTE_PMD_REGISTER_CRYPTO_DRIVER(zuc_crypto_drv, cryptodev_zuc_pmd_drv.driver, cryptodev_driver_id); + +RTE_INIT(zuc_init_log) +{ + zuc_logtype_driver = rte_log_register("pmd.crypto.zuc"); +} diff --git a/drivers/crypto/zuc/rte_zuc_pmd_ops.c b/drivers/crypto/zuc/rte_zuc_pmd_ops.c index 8abac898..6da39654 100644 --- a/drivers/crypto/zuc/rte_zuc_pmd_ops.c +++ b/drivers/crypto/zuc/rte_zuc_pmd_ops.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2018 Intel Corporation */ #include @@ -130,7 +130,8 @@ zuc_pmd_info_get(struct rte_cryptodev *dev, if (dev_info != NULL) { dev_info->driver_id = dev->driver_id; dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; - dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; dev_info->feature_flags = dev->feature_flags; dev_info->capabilities = zuc_pmd_capabilities; } @@ -172,13 +173,13 @@ zuc_pmd_qp_create_processed_ops_ring(struct zuc_qp *qp, r = rte_ring_lookup(qp->name); if (r) { if (rte_ring_get_size(r) >= ring_size) { - ZUC_LOG_INFO("Reusing existing ring %s" + ZUC_LOG(INFO, "Reusing existing ring %s" " for processed packets", qp->name); return r; } - ZUC_LOG_ERR("Unable to reuse existing ring %s" + ZUC_LOG(ERR, "Unable to reuse existing ring %s" " for processed packets", qp->name); return NULL; @@ -230,22 +231,6 @@ qp_setup_cleanup: return -1; } -/** Start queue pair */ -static int -zuc_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair */ -static int -zuc_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - /** Return the number of allocated queue pairs */ static uint32_t zuc_pmd_qp_count(struct rte_cryptodev *dev) @@ -255,14 +240,14 @@ zuc_pmd_qp_count(struct rte_cryptodev *dev) /** Returns the size of the ZUC session structure */ static unsigned -zuc_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +zuc_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { return sizeof(struct zuc_session); } /** Configure a ZUC session from a crypto xform chain */ static int -zuc_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, +zuc_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -271,26 +256,27 @@ zuc_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, int ret; if (unlikely(sess == NULL)) { - ZUC_LOG_ERR("invalid session struct"); + ZUC_LOG(ERR, "invalid session struct"); return -EINVAL; } if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( + ZUC_LOG(ERR, "Couldn't get object from session mempool"); + return -ENOMEM; } ret = zuc_set_session_parameters(sess_private_data, xform); if (ret != 0) { - ZUC_LOG_ERR("failed configure session parameters"); + ZUC_LOG(ERR, "failed configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); return 0; @@ -298,17 +284,17 @@ zuc_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, /** Clear the memory of session so it doesn't leave key material behind */ static void -zuc_pmd_session_clear(struct rte_cryptodev *dev, +zuc_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); /* Zero out the whole structure */ if (sess_priv) { memset(sess_priv, 0, sizeof(struct zuc_session)); struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -326,13 +312,11 @@ struct rte_cryptodev_ops zuc_pmd_ops = { .queue_pair_setup = zuc_pmd_qp_setup, .queue_pair_release = zuc_pmd_qp_release, - .queue_pair_start = zuc_pmd_qp_start, - .queue_pair_stop = zuc_pmd_qp_stop, .queue_pair_count = zuc_pmd_qp_count, - .session_get_size = zuc_pmd_session_get_size, - .session_configure = zuc_pmd_session_configure, - .session_clear = zuc_pmd_session_clear + .sym_session_get_size = zuc_pmd_sym_session_get_size, + .sym_session_configure = zuc_pmd_sym_session_configure, + .sym_session_clear = zuc_pmd_sym_session_clear }; struct rte_cryptodev_ops *rte_zuc_pmd_ops = &zuc_pmd_ops; diff --git a/drivers/crypto/zuc/rte_zuc_pmd_private.h b/drivers/crypto/zuc/rte_zuc_pmd_private.h index b83c4a04..5e5906dd 100644 --- a/drivers/crypto/zuc/rte_zuc_pmd_private.h +++ b/drivers/crypto/zuc/rte_zuc_pmd_private.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2018 Intel Corporation */ #ifndef _RTE_ZUC_PMD_PRIVATE_H_ @@ -10,25 +10,12 @@ #define CRYPTODEV_NAME_ZUC_PMD crypto_zuc /**< KASUMI PMD device name */ -#define ZUC_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \ - __func__, __LINE__, ## args) - -#ifdef RTE_LIBRTE_ZUC_DEBUG -#define ZUC_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \ - __func__, __LINE__, ## args) - -#define ZUC_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \ - __func__, __LINE__, ## args) -#else -#define ZUC_LOG_INFO(fmt, args...) -#define ZUC_LOG_DBG(fmt, args...) -#endif +/** ZUC PMD LOGTYPE DRIVER */ +int zuc_logtype_driver; +#define ZUC_LOG(level, fmt, ...) \ + rte_log(RTE_LOG_ ## level, zuc_logtype_driver, \ + "%s()... line %u: " fmt "\n", __func__, __LINE__, \ + ## __VA_ARGS__) #define ZUC_IV_KEY_LENGTH 16 #define ZUC_DIGEST_LENGTH 4 @@ -37,8 +24,6 @@ struct zuc_private { unsigned max_nb_queue_pairs; /**< Max number of queue pairs supported by device */ - unsigned max_nb_sessions; - /**< Max number of sessions supported by device */ }; /** ZUC buffer queue pair */ diff --git a/drivers/event/Makefile b/drivers/event/Makefile index c3d89a15..f301d8dc 100644 --- a/drivers/event/Makefile +++ b/drivers/event/Makefile @@ -7,8 +7,12 @@ include $(RTE_SDK)/mk/rte.vars.mk DIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += skeleton DIRS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += octeontx +ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y) DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV) += dpaa +endif +ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy) DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2 +endif DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c index 00068015..5443ef56 100644 --- a/drivers/event/dpaa/dpaa_eventdev.c +++ b/drivers/event/dpaa/dpaa_eventdev.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include "dpaa_eventdev.h" @@ -571,7 +571,7 @@ dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev, return 0; } -static const struct rte_eventdev_ops dpaa_eventdev_ops = { +static struct rte_eventdev_ops dpaa_eventdev_ops = { .dev_infos_get = dpaa_event_dev_info_get, .dev_configure = dpaa_event_dev_configure, .dev_start = dpaa_event_dev_start, diff --git a/drivers/event/dpaa/dpaa_eventdev.h b/drivers/event/dpaa/dpaa_eventdev.h index 918fe35c..583e46ca 100644 --- a/drivers/event/dpaa/dpaa_eventdev.h +++ b/drivers/event/dpaa/dpaa_eventdev.h @@ -26,7 +26,7 @@ #define DPAA_EVENT_MAX_QUEUE_FLOWS 2048 #define DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS 8 #define DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS 0 -#define DPAA_EVENT_MAX_EVENT_PORT RTE_MAX_LCORE +#define DPAA_EVENT_MAX_EVENT_PORT RTE_MIN(RTE_MAX_LCORE, INT8_MAX) #define DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH 8 #define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS 100UL #define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID ((uint64_t)-1) diff --git a/drivers/event/dpaa/meson.build b/drivers/event/dpaa/meson.build new file mode 100644 index 00000000..0914f858 --- /dev/null +++ b/drivers/event/dpaa/meson.build @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if host_machine.system() != 'linux' + build = false +endif +deps += ['pmd_dpaa'] +sources = files('dpaa_eventdev.c') + +allow_experimental_apis = true diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile index b26862cd..5e1a6320 100644 --- a/drivers/event/dpaa2/Makefile +++ b/drivers/event/dpaa2/Makefile @@ -18,7 +18,8 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2 CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa2 CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal -LDLIBS += -lrte_eal -lrte_eventdev -lrte_bus_fslmc -lrte_pmd_dpaa2 +LDLIBS += -lrte_eal -lrte_eventdev +LDLIBS += -lrte_bus_fslmc -lrte_mempool_dpaa2 -lrte_pmd_dpaa2 LDLIBS += -lrte_bus_vdev CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2 CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc @@ -28,6 +29,9 @@ EXPORT_MAP := rte_pmd_dpaa2_event_version.map LIBABIVER := 1 +# depends on fslmc bus which uses experimental API +CFLAGS += -DALLOW_EXPERIMENTAL_API + # # all source are stored in SRCS-y # diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c index c3e6fbff..ea1e5cc6 100644 --- a/drivers/event/dpaa2/dpaa2_eventdev.c +++ b/drivers/event/dpaa2/dpaa2_eventdev.c @@ -72,7 +72,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[], if (unlikely(!DPAA2_PER_LCORE_DPIO)) { ret = dpaa2_affine_qbman_swp(); if (ret) { - DPAA2_EVENTDEV_ERR("Failure in affining portal\n"); + DPAA2_EVENTDEV_ERR("Failure in affining portal"); return 0; } } @@ -87,10 +87,10 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[], const struct rte_event *event = &ev[num_tx + loop]; if (event->sched_type != RTE_SCHED_TYPE_ATOMIC) - fqid = evq_info->dpci->queue[ + fqid = evq_info->dpci->rx_queue[ DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid; else - fqid = evq_info->dpci->queue[ + fqid = evq_info->dpci->rx_queue[ DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid; /* Prepare enqueue descriptor */ @@ -122,11 +122,12 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[], if (!loop) return num_tx; frames_to_send = loop; - DPAA2_EVENTDEV_ERR("Unable to allocate memory"); + DPAA2_EVENTDEV_ERR( + "Unable to allocate event object"); goto send_partial; } rte_memcpy(ev_temp, event, sizeof(struct rte_event)); - DPAA2_SET_FD_ADDR((&fd_arr[loop]), ev_temp); + DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp); DPAA2_SET_FD_LEN((&fd_arr[loop]), sizeof(struct rte_event)); } @@ -153,26 +154,12 @@ dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev) static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks) { struct epoll_event epoll_ev; - int ret, i = 0; qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL, QBMAN_SWP_INTERRUPT_DQRI); -RETRY: - ret = epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd, + epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd, &epoll_ev, 1, timeout_ticks); - if (ret < 1) { - /* sometimes due to some spurious interrupts epoll_wait fails - * with errno EINTR. so here we are retrying epoll_wait in such - * case to avoid the problem. - */ - if (errno == EINTR) { - DPAA2_EVENTDEV_DEBUG("epoll_wait fails\n"); - if (i++ > 10) - DPAA2_EVENTDEV_DEBUG("Dequeue burst Failed\n"); - goto RETRY; - } - } } static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp, @@ -182,7 +169,7 @@ static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp, struct rte_event *ev) { struct rte_event *ev_temp = - (struct rte_event *)DPAA2_GET_FD_ADDR(fd); + (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd); RTE_SET_USED(rxq); @@ -199,7 +186,7 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp, struct rte_event *ev) { struct rte_event *ev_temp = - (struct rte_event *)DPAA2_GET_FD_ADDR(fd); + (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd); uint8_t dqrr_index = qbman_get_dqrr_idx(dq); RTE_SET_USED(swp); @@ -227,7 +214,7 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[], if (unlikely(!DPAA2_PER_LCORE_DPIO)) { ret = dpaa2_affine_qbman_swp(); if (ret) { - DPAA2_EVENTDEV_ERR("Failure in affining portal\n"); + DPAA2_EVENTDEV_ERR("Failure in affining portal"); return 0; } } @@ -258,12 +245,12 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[], qbman_swp_prefetch_dqrr_next(swp); fd = qbman_result_DQ_fd(dq); - rxq = (struct dpaa2_queue *)qbman_result_DQ_fqd_ctx(dq); + rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq); if (rxq) { rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]); } else { qbman_swp_dqrr_consume(swp, dq); - DPAA2_EVENTDEV_ERR("Null Return VQ received\n"); + DPAA2_EVENTDEV_ERR("Null Return VQ received"); return 0; } @@ -335,7 +322,7 @@ dpaa2_eventdev_configure(const struct rte_eventdev *dev) priv->event_dev_cfg = conf->event_dev_cfg; DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d", - dev->data->dev_id); + dev->data->dev_id); return 0; } @@ -473,7 +460,6 @@ dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port, dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio, 0, dpaa2_portal->dpio_dev->token, evq_info->dpcon->dpcon_id); - evq_info->link = 0; } return (int)nb_unlinks; @@ -494,23 +480,20 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port, for (i = 0; i < nb_links; i++) { evq_info = &priv->evq_info[queues[i]]; - if (evq_info->link) - continue; ret = dpio_add_static_dequeue_channel( dpaa2_portal->dpio_dev->dpio, CMD_PRI_LOW, dpaa2_portal->dpio_dev->token, evq_info->dpcon->dpcon_id, &channel_index); if (ret < 0) { - DPAA2_EVENTDEV_ERR("Static dequeue cfg failed with ret: %d\n", - ret); + DPAA2_EVENTDEV_ERR( + "Static dequeue config failed: err(%d)", ret); goto err; } qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal, channel_index, 1); evq_info->dpcon->channel_index = channel_index; - evq_info->link = 1; } RTE_SET_USED(priorities); @@ -524,7 +507,6 @@ err: dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio, 0, dpaa2_portal->dpio_dev->token, evq_info->dpcon->dpcon_id); - evq_info->link = 0; } return ret; } @@ -587,8 +569,8 @@ dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev, ret = dpaa2_eth_eventq_attach(eth_dev, i, dpcon_id, queue_conf); if (ret) { - DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_attach failed: ret %d\n", - ret); + DPAA2_EVENTDEV_ERR( + "Event queue attach failed: err(%d)", ret); goto fail; } } @@ -620,7 +602,8 @@ dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev, ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id, dpcon_id, queue_conf); if (ret) { - DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_attach failed: ret: %d\n", ret); + DPAA2_EVENTDEV_ERR( + "Event queue attach failed: err(%d)", ret); return ret; } return 0; @@ -639,8 +622,8 @@ dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev, for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { ret = dpaa2_eth_eventq_detach(eth_dev, i); if (ret) { - DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_detach failed: ret %d\n", - ret); + DPAA2_EVENTDEV_ERR( + "Event queue detach failed: err(%d)", ret); return ret; } } @@ -662,7 +645,8 @@ dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev, ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id); if (ret) { - DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_detach failed: ret: %d\n", ret); + DPAA2_EVENTDEV_ERR( + "Event queue detach failed: err(%d)", ret); return ret; } @@ -693,7 +677,7 @@ dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev, return 0; } -static const struct rte_eventdev_ops dpaa2_eventdev_ops = { +static struct rte_eventdev_ops dpaa2_eventdev_ops = { .dev_infos_get = dpaa2_eventdev_info_get, .dev_configure = dpaa2_eventdev_configure, .dev_start = dpaa2_eventdev_start, @@ -730,20 +714,21 @@ dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev, rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id; rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO; - dpci_dev->queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb = + dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb = dpaa2_eventdev_process_parallel; - dpci_dev->queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb = + dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb = dpaa2_eventdev_process_atomic; for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) { - rx_queue_cfg.user_ctx = (uint64_t)(&dpci_dev->queue[i]); + rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]); ret = dpci_set_rx_queue(&dpci_dev->dpci, CMD_PRI_LOW, dpci_dev->token, i, &rx_queue_cfg); if (ret) { DPAA2_EVENTDEV_ERR( - "set_rx_q failed with err code: %d", ret); + "DPCI Rx queue setup failed: err(%d)", + ret); return ret; } } @@ -763,7 +748,7 @@ dpaa2_eventdev_create(const char *name) sizeof(struct dpaa2_eventdev), rte_socket_id()); if (eventdev == NULL) { - DPAA2_EVENTDEV_ERR("Failed to create eventdev vdev %s", name); + DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name); goto fail; } @@ -798,7 +783,7 @@ dpaa2_eventdev_create(const char *name) ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev); if (ret) { DPAA2_EVENTDEV_ERR( - "dpci setup failed with err code: %d", ret); + "DPCI setup failed: err(%d)", ret); return ret; } priv->max_event_queues++; @@ -836,3 +821,10 @@ static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = { }; RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd); + +RTE_INIT(dpaa2_eventdev_init_log) +{ + dpaa2_logtype_event = rte_log_register("pmd.event.dpaa2"); + if (dpaa2_logtype_event >= 0) + rte_log_set_level(dpaa2_logtype_event, RTE_LOG_NOTICE); +} diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h index 91c8f2a3..229f66af 100644 --- a/drivers/event/dpaa2/dpaa2_eventdev.h +++ b/drivers/event/dpaa2/dpaa2_eventdev.h @@ -63,7 +63,6 @@ struct evq_info_t { struct dpaa2_dpci_dev *dpci; /* Configuration provided by the user */ uint32_t event_queue_cfg; - uint8_t link; }; struct dpaa2_eventdev { diff --git a/drivers/event/dpaa2/dpaa2_eventdev_logs.h b/drivers/event/dpaa2/dpaa2_eventdev_logs.h index 7d250c3f..a2c2060c 100644 --- a/drivers/event/dpaa2/dpaa2_eventdev_logs.h +++ b/drivers/event/dpaa2/dpaa2_eventdev_logs.h @@ -9,13 +9,15 @@ extern int dpaa2_logtype_event; #define DPAA2_EVENTDEV_LOG(level, fmt, args...) \ - rte_log(RTE_LOG_ ## level, dpaa2_logtype_event, "%s(): " fmt "\n", \ - __func__, ##args) - -#define EVENTDEV_INIT_FUNC_TRACE() DPAA2_EVENTDEV_LOG(DEBUG, " >>") + rte_log(RTE_LOG_ ## level, dpaa2_logtype_event, "dpaa2_event: " \ + fmt "\n", ##args) #define DPAA2_EVENTDEV_DEBUG(fmt, args...) \ - DPAA2_EVENTDEV_LOG(DEBUG, fmt, ## args) + rte_log(RTE_LOG_DEBUG, dpaa2_logtype_event, "dpaa2_event: %s(): " \ + fmt "\n", __func__, ##args) + +#define EVENTDEV_INIT_FUNC_TRACE() DPAA2_EVENTDEV_DEBUG(" >>") + #define DPAA2_EVENTDEV_INFO(fmt, args...) \ DPAA2_EVENTDEV_LOG(INFO, fmt, ## args) #define DPAA2_EVENTDEV_ERR(fmt, args...) \ diff --git a/drivers/event/dpaa2/dpaa2_hw_dpcon.c b/drivers/event/dpaa2/dpaa2_hw_dpcon.c index f2377b98..d64e588a 100644 --- a/drivers/event/dpaa2/dpaa2_hw_dpcon.c +++ b/drivers/event/dpaa2/dpaa2_hw_dpcon.c @@ -20,11 +20,11 @@ #include #include -#include #include #include #include #include "dpaa2_eventdev.h" +#include "dpaa2_eventdev_logs.h" TAILQ_HEAD(dpcon_dev_list, dpaa2_dpcon_dev); static struct dpcon_dev_list dpcon_dev_list @@ -42,7 +42,8 @@ rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused, /* Allocate DPAA2 dpcon handle */ dpcon_node = rte_malloc(NULL, sizeof(struct dpaa2_dpcon_dev), 0); if (!dpcon_node) { - PMD_DRV_LOG(ERR, "Memory allocation failed for DPCON Device"); + DPAA2_EVENTDEV_ERR( + "Memory allocation failed for dpcon device"); return -1; } @@ -51,8 +52,8 @@ rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused, ret = dpcon_open(&dpcon_node->dpcon, CMD_PRI_LOW, dpcon_id, &dpcon_node->token); if (ret) { - PMD_DRV_LOG(ERR, "Resource alloc failure with err code: %d", - ret); + DPAA2_EVENTDEV_ERR("Unable to open dpcon device: err(%d)", + ret); rte_free(dpcon_node); return -1; } @@ -61,8 +62,8 @@ rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused, ret = dpcon_get_attributes(&dpcon_node->dpcon, CMD_PRI_LOW, dpcon_node->token, &attr); if (ret != 0) { - PMD_DRV_LOG(ERR, "Reading device failed with err code: %d", - ret); + DPAA2_EVENTDEV_ERR("dpcon attribute fetch failed: err(%d)", + ret); rte_free(dpcon_node); return -1; } @@ -75,8 +76,6 @@ rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused, TAILQ_INSERT_TAIL(&dpcon_dev_list, dpcon_node, next); - RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpcon.%d]\n", dpcon_id); - return 0; } diff --git a/drivers/event/dpaa2/meson.build b/drivers/event/dpaa2/meson.build new file mode 100644 index 00000000..de7a4615 --- /dev/null +++ b/drivers/event/dpaa2/meson.build @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if host_machine.system() != 'linux' + build = false +endif +deps += ['bus_vdev', 'pmd_dpaa2'] +sources = files('dpaa2_hw_dpcon.c', + 'dpaa2_eventdev.c') + +allow_experimental_apis = true diff --git a/drivers/event/meson.build b/drivers/event/meson.build index d7bc4854..e9511993 100644 --- a/drivers/event/meson.build +++ b/drivers/event/meson.build @@ -1,7 +1,7 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation -drivers = ['skeleton', 'sw', 'octeontx'] +drivers = ['dpaa', 'dpaa2', 'octeontx', 'skeleton', 'sw'] std_deps = ['eventdev', 'kvargs'] config_flag_fmt = 'RTE_LIBRTE_@0@_EVENTDEV_PMD' driver_name_fmt = 'rte_pmd_@0@_event' diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile index 0e49efd8..90ad2217 100644 --- a/drivers/event/octeontx/Makefile +++ b/drivers/event/octeontx/Makefile @@ -10,10 +10,12 @@ include $(RTE_SDK)/mk/rte.vars.mk LIB = librte_pmd_octeontx_ssovf.a CFLAGS += $(WERROR_FLAGS) +CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/ CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/ CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx/ +CFLAGS += -DALLOW_EXPERIMENTAL_API -LDLIBS += -lrte_eal -lrte_eventdev -lrte_mempool_octeontx -lrte_pmd_octeontx +LDLIBS += -lrte_eal -lrte_eventdev -lrte_common_octeontx -lrte_pmd_octeontx LDLIBS += -lrte_bus_pci -lrte_mempool -lrte_mbuf -lrte_kvargs LDLIBS += -lrte_bus_vdev @@ -27,18 +29,26 @@ LIBABIVER := 1 SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_worker.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev_selftest.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_probe.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_worker.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_evdev.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_probe.c ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) CFLAGS_ssovf_worker.o += -fno-prefetch-loop-arrays +CFLAGS_timvf_worker.o += -fno-prefetch-loop-arrays ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1) CFLAGS_ssovf_worker.o += -Ofast +CFLAGS_timvf_worker.o += -Ofast else CFLAGS_ssovf_worker.o += -O3 -ffast-math +CFLAGS_timvf_worker.o += -O3 -ffast-math endif else CFLAGS_ssovf_worker.o += -Ofast +CFLAGS_timvf_worker.o += -Ofast endif include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/event/octeontx/meson.build b/drivers/event/octeontx/meson.build index 358fc9fc..04185533 100644 --- a/drivers/event/octeontx/meson.build +++ b/drivers/event/octeontx/meson.build @@ -3,7 +3,12 @@ sources = files('ssovf_worker.c', 'ssovf_evdev.c', - 'ssovf_evdev_selftest.c' + 'ssovf_evdev_selftest.c', + 'ssovf_probe.c', + 'timvf_worker.c', + 'timvf_evdev.c', + 'timvf_probe.c' ) -deps += ['mempool_octeontx', 'bus_vdev', 'pmd_octeontx'] +allow_experimental_apis = true +deps += ['common_octeontx', 'mempool_octeontx', 'bus_vdev', 'pmd_octeontx'] diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c index a1086077..16a3a04b 100644 --- a/drivers/event/octeontx/ssovf_evdev.c +++ b/drivers/event/octeontx/ssovf_evdev.c @@ -18,12 +18,12 @@ #include #include "ssovf_evdev.h" +#include "timvf_evdev.h" int otx_logtype_ssovf; +static uint8_t timvf_enable_stats; -RTE_INIT(otx_ssovf_init_log); -static void -otx_ssovf_init_log(void) +RTE_INIT(otx_ssovf_init_log) { otx_logtype_ssovf = rte_log_register("pmd.event.octeontx"); if (otx_logtype_ssovf >= 0) @@ -49,7 +49,7 @@ ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info) hdr.vfid = 0; memset(info, 0, len); - return octeontx_ssovf_mbox_send(&hdr, NULL, 0, info, len); + return octeontx_mbox_send(&hdr, NULL, 0, info, len); } struct ssovf_mbox_getwork_wait { @@ -69,7 +69,7 @@ ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns) hdr.vfid = 0; tmo_set.wait_ns = timeout_ns; - ret = octeontx_ssovf_mbox_send(&hdr, &tmo_set, len, NULL, 0); + ret = octeontx_mbox_send(&hdr, &tmo_set, len, NULL, 0); if (ret) ssovf_log_err("Failed to set getwork timeout(%d)", ret); @@ -99,7 +99,7 @@ ssovf_mbox_priority_set(uint8_t queue, uint8_t prio) grp.affinity = 0xff; grp.priority = prio / 32; /* Normalize to 0 to 7 */ - ret = octeontx_ssovf_mbox_send(&hdr, &grp, len, NULL, 0); + ret = octeontx_mbox_send(&hdr, &grp, len, NULL, 0); if (ret) ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio); @@ -125,7 +125,7 @@ ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks) memset(&ns2iter, 0, len); ns2iter.wait_ns = ns; - ret = octeontx_ssovf_mbox_send(&hdr, &ns2iter, len, &ns2iter, len); + ret = octeontx_mbox_send(&hdr, &ns2iter, len, &ns2iter, len); if (ret < 0 || (ret != len)) { ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns); return -EIO; @@ -276,7 +276,7 @@ ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id, return -ENOMEM; } - ws->base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0); + ws->base = ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0); if (ws->base == NULL) { rte_free(ws); ssovf_log_err("Failed to get hws base addr port=%d", port_id); @@ -290,7 +290,7 @@ ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id, ws->port = port_id; for (q = 0; q < edev->nb_event_queues; q++) { - ws->grps[q] = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, q, 2); + ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2); if (ws->grps[q] == NULL) { rte_free(ws); ssovf_log_err("Failed to get grp%d base addr", q); @@ -474,14 +474,9 @@ static int ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev) { - int ret; - const struct octeontx_nic *nic = eth_dev->data->dev_private; RTE_SET_USED(dev); + RTE_SET_USED(eth_dev); - ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); - if (ret) - return 0; - octeontx_pki_port_start(nic->port_id); return 0; } @@ -490,14 +485,9 @@ static int ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev) { - int ret; - const struct octeontx_nic *nic = eth_dev->data->dev_private; RTE_SET_USED(dev); + RTE_SET_USED(eth_dev); - ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); - if (ret) - return 0; - octeontx_pki_port_stop(nic->port_id); return 0; } @@ -529,9 +519,9 @@ ssovf_start(struct rte_eventdev *dev) for (i = 0; i < edev->nb_event_queues; i++) { /* Consume all the events through HWS0 */ - ssows_flush_events(dev->data->ports[0], i); + ssows_flush_events(dev->data->ports[0], i, NULL, NULL); - base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); + base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); base += SSO_VHGRP_QCTL; ssovf_write64(1, base); /* Enable SSO group */ } @@ -540,6 +530,16 @@ ssovf_start(struct rte_eventdev *dev) return 0; } +static void +ssows_handle_event(void *arg, struct rte_event event) +{ + struct rte_eventdev *dev = arg; + + if (dev->dev_ops->dev_stop_flush != NULL) + dev->dev_ops->dev_stop_flush(dev->data->dev_id, event, + dev->data->dev_stop_flush_arg); +} + static void ssovf_stop(struct rte_eventdev *dev) { @@ -557,9 +557,10 @@ ssovf_stop(struct rte_eventdev *dev) for (i = 0; i < edev->nb_event_queues; i++) { /* Consume all the events through HWS0 */ - ssows_flush_events(dev->data->ports[0], i); + ssows_flush_events(dev->data->ports[0], i, + ssows_handle_event, dev); - base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); + base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); base += SSO_VHGRP_QCTL; ssovf_write64(0, base); /* Disable SSO group */ } @@ -590,8 +591,16 @@ ssovf_selftest(const char *key __rte_unused, const char *value, return 0; } +static int +ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags, + uint32_t *caps, const struct rte_event_timer_adapter_ops **ops) +{ + return timvf_timer_adapter_caps_get(dev, flags, caps, ops, + timvf_enable_stats); +} + /* Initialize and register event driver with DPDK Application */ -static const struct rte_eventdev_ops ssovf_ops = { +static struct rte_eventdev_ops ssovf_ops = { .dev_infos_get = ssovf_info_get, .dev_configure = ssovf_configure, .queue_def_conf = ssovf_queue_def_conf, @@ -610,6 +619,8 @@ static const struct rte_eventdev_ops ssovf_ops = { .eth_rx_adapter_start = ssovf_eth_rx_adapter_start, .eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop, + .timer_adapter_caps_get = ssovf_timvf_caps_get, + .dev_selftest = test_eventdev_octeontx, .dump = ssovf_dump, @@ -621,7 +632,7 @@ static const struct rte_eventdev_ops ssovf_ops = { static int ssovf_vdev_probe(struct rte_vdev_device *vdev) { - struct octeontx_ssovf_info oinfo; + struct ssovf_info oinfo; struct ssovf_mbox_dev_info info; struct ssovf_evdev *edev; struct rte_eventdev *eventdev; @@ -633,6 +644,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev) static const char *const args[] = { SSOVF_SELFTEST_ARG, + TIMVF_ENABLE_STATS_ARG, NULL }; @@ -660,6 +672,15 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev) rte_kvargs_free(kvlist); return ret; } + + ret = rte_kvargs_process(kvlist, + TIMVF_ENABLE_STATS_ARG, + ssovf_selftest, &timvf_enable_stats); + if (ret != 0) { + ssovf_log_err("%s: Error in timvf stats", name); + rte_kvargs_free(kvlist); + return ret; + } } rte_kvargs_free(kvlist); @@ -679,7 +700,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev) return 0; } - ret = octeontx_ssovf_info(&oinfo); + ret = ssovf_info(&oinfo); if (ret) { ssovf_log_err("Failed to probe and validate ssovfs %d", ret); goto error; diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h index d1825b4f..18293e96 100644 --- a/drivers/event/octeontx/ssovf_evdev.h +++ b/drivers/event/octeontx/ssovf_evdev.h @@ -119,6 +119,16 @@ do { \ } while (0) #endif +struct ssovf_info { + uint16_t domain; /* Domain id */ + uint8_t total_ssovfs; /* Total sso groups available in domain */ + uint8_t total_ssowvfs;/* Total sso hws available in domain */ +}; + +enum ssovf_type { + OCTEONTX_SSO_GROUP, /* SSO group vf */ + OCTEONTX_SSO_HWS, /* SSO hardware workslot vf */ +}; struct ssovf_evdev { uint8_t max_event_queues; @@ -164,8 +174,13 @@ uint16_t ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks); uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); -void ssows_flush_events(struct ssows *ws, uint8_t queue_id); + +typedef void (*ssows_handle_event_t)(void *arg, struct rte_event ev); +void ssows_flush_events(struct ssows *ws, uint8_t queue_id, + ssows_handle_event_t fn, void *arg); void ssows_reset(struct ssows *ws); +int ssovf_info(struct ssovf_info *info); +void *ssovf_bar(enum ssovf_type, uint8_t id, uint8_t bar); int test_eventdev_octeontx(void); #endif /* __SSOVF_EVDEV_H__ */ diff --git a/drivers/event/octeontx/ssovf_evdev_selftest.c b/drivers/event/octeontx/ssovf_evdev_selftest.c index 5e012a95..239362fc 100644 --- a/drivers/event/octeontx/ssovf_evdev_selftest.c +++ b/drivers/event/octeontx/ssovf_evdev_selftest.c @@ -688,6 +688,40 @@ test_multi_queue_enq_multi_port_deq(void) nr_ports, 0xff /* invalid */); } +static +void flush(uint8_t dev_id, struct rte_event event, void *arg) +{ + unsigned int *count = arg; + + RTE_SET_USED(dev_id); + if (event.event_type == RTE_EVENT_TYPE_CPU) + *count = *count + 1; + +} + +static int +test_dev_stop_flush(void) +{ + unsigned int total_events = MAX_EVENTS, count = 0; + int ret; + + ret = generate_random_events(total_events); + if (ret) + return -1; + + ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count); + if (ret) + return -2; + rte_event_dev_stop(evdev); + ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL); + if (ret) + return -3; + RTE_TEST_ASSERT_EQUAL(total_events, count, + "count mismatch total_events=%d count=%d", + total_events, count); + return 0; +} + static int validate_queue_to_port_single_link(uint32_t index, uint8_t port, struct rte_event *ev) @@ -1414,6 +1448,8 @@ test_eventdev_octeontx(void) test_simple_enqdeq_parallel); OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, test_multi_queue_enq_single_port_deq); + OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, + test_dev_stop_flush); OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, test_multi_queue_enq_multi_port_deq); OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, diff --git a/drivers/event/octeontx/ssovf_probe.c b/drivers/event/octeontx/ssovf_probe.c new file mode 100644 index 00000000..b3db596d --- /dev/null +++ b/drivers/event/octeontx/ssovf_probe.c @@ -0,0 +1,290 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include +#include +#include +#include +#include +#include + +#include "octeontx_mbox.h" +#include "ssovf_evdev.h" + +#define PCI_VENDOR_ID_CAVIUM 0x177D +#define PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF 0xA04B +#define PCI_DEVICE_ID_OCTEONTX_SSOWS_VF 0xA04D + +#define SSO_MAX_VHGRP (64) +#define SSO_MAX_VHWS (32) + +#define SSO_VHGRP_AQ_THR (0x1E0ULL) + +struct ssovf_res { + uint16_t domain; + uint16_t vfid; + void *bar0; + void *bar2; +}; + +struct ssowvf_res { + uint16_t domain; + uint16_t vfid; + void *bar0; + void *bar2; + void *bar4; +}; + +struct ssowvf_identify { + uint16_t domain; + uint16_t vfid; +}; + +struct ssodev { + uint8_t total_ssovfs; + uint8_t total_ssowvfs; + struct ssovf_res grp[SSO_MAX_VHGRP]; + struct ssowvf_res hws[SSO_MAX_VHWS]; +}; + +static struct ssodev sdev; + +/* Interface functions */ +int +ssovf_info(struct ssovf_info *info) +{ + uint8_t i; + uint16_t domain; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY || info == NULL) + return -EINVAL; + + if (sdev.total_ssovfs == 0 || sdev.total_ssowvfs == 0) + return -ENODEV; + + domain = sdev.grp[0].domain; + for (i = 0; i < sdev.total_ssovfs; i++) { + /* Check vfid's are contiguous and belong to same domain */ + if (sdev.grp[i].vfid != i || + sdev.grp[i].bar0 == NULL || + sdev.grp[i].domain != domain) { + mbox_log_err("GRP error, vfid=%d/%d domain=%d/%d %p", + i, sdev.grp[i].vfid, + domain, sdev.grp[i].domain, + sdev.grp[i].bar0); + return -EINVAL; + } + } + + for (i = 0; i < sdev.total_ssowvfs; i++) { + /* Check vfid's are contiguous and belong to same domain */ + if (sdev.hws[i].vfid != i || + sdev.hws[i].bar0 == NULL || + sdev.hws[i].domain != domain) { + mbox_log_err("HWS error, vfid=%d/%d domain=%d/%d %p", + i, sdev.hws[i].vfid, + domain, sdev.hws[i].domain, + sdev.hws[i].bar0); + return -EINVAL; + } + } + + info->domain = domain; + info->total_ssovfs = sdev.total_ssovfs; + info->total_ssowvfs = sdev.total_ssowvfs; + return 0; +} + +void* +ssovf_bar(enum ssovf_type type, uint8_t id, uint8_t bar) +{ + if (rte_eal_process_type() != RTE_PROC_PRIMARY || + type > OCTEONTX_SSO_HWS) + return NULL; + + if (type == OCTEONTX_SSO_GROUP) { + if (id >= sdev.total_ssovfs) + return NULL; + } else { + if (id >= sdev.total_ssowvfs) + return NULL; + } + + if (type == OCTEONTX_SSO_GROUP) { + switch (bar) { + case 0: + return sdev.grp[id].bar0; + case 2: + return sdev.grp[id].bar2; + default: + return NULL; + } + } else { + switch (bar) { + case 0: + return sdev.hws[id].bar0; + case 2: + return sdev.hws[id].bar2; + case 4: + return sdev.hws[id].bar4; + default: + return NULL; + } + } +} + +/* SSOWVF pcie device aka event port probe */ + +static int +ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +{ + uint16_t vfid; + struct ssowvf_res *res; + struct ssowvf_identify *id; + uint8_t *ram_mbox_base; + + RTE_SET_USED(pci_drv); + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (pci_dev->mem_resource[0].addr == NULL || + pci_dev->mem_resource[2].addr == NULL || + pci_dev->mem_resource[4].addr == NULL) { + mbox_log_err("Empty bars %p %p %p", + pci_dev->mem_resource[0].addr, + pci_dev->mem_resource[2].addr, + pci_dev->mem_resource[4].addr); + return -ENODEV; + } + + if (pci_dev->mem_resource[4].len != SSOW_BAR4_LEN) { + mbox_log_err("Bar4 len mismatch %d != %d", + SSOW_BAR4_LEN, (int)pci_dev->mem_resource[4].len); + return -EINVAL; + } + + id = pci_dev->mem_resource[4].addr; + vfid = id->vfid; + if (vfid >= SSO_MAX_VHWS) { + mbox_log_err("Invalid vfid(%d/%d)", vfid, SSO_MAX_VHWS); + return -EINVAL; + } + + res = &sdev.hws[vfid]; + res->vfid = vfid; + res->bar0 = pci_dev->mem_resource[0].addr; + res->bar2 = pci_dev->mem_resource[2].addr; + res->bar4 = pci_dev->mem_resource[4].addr; + res->domain = id->domain; + + sdev.total_ssowvfs++; + if (vfid == 0) { + ram_mbox_base = ssovf_bar(OCTEONTX_SSO_HWS, 0, 4); + if (octeontx_mbox_set_ram_mbox_base(ram_mbox_base)) { + mbox_log_err("Invalid Failed to set ram mbox base"); + return -EINVAL; + } + } + + rte_wmb(); + mbox_log_dbg("Domain=%d hws=%d total_ssowvfs=%d", res->domain, + res->vfid, sdev.total_ssowvfs); + return 0; +} + +static const struct rte_pci_id pci_ssowvf_map[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_OCTEONTX_SSOWS_VF) + }, + { + .vendor_id = 0, + }, +}; + +static struct rte_pci_driver pci_ssowvf = { + .id_table = pci_ssowvf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = ssowvf_probe, +}; + +RTE_PMD_REGISTER_PCI(octeontx_ssowvf, pci_ssowvf); + +/* SSOVF pcie device aka event queue probe */ + +static int +ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +{ + uint64_t val; + uint16_t vfid; + uint8_t *idreg; + struct ssovf_res *res; + uint8_t *reg; + + RTE_SET_USED(pci_drv); + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (pci_dev->mem_resource[0].addr == NULL || + pci_dev->mem_resource[2].addr == NULL) { + mbox_log_err("Empty bars %p %p", + pci_dev->mem_resource[0].addr, + pci_dev->mem_resource[2].addr); + return -ENODEV; + } + idreg = pci_dev->mem_resource[0].addr; + idreg += SSO_VHGRP_AQ_THR; + val = rte_read64(idreg); + + /* Write back the default value of aq_thr */ + rte_write64((1ULL << 33) - 1, idreg); + vfid = (val >> 16) & 0xffff; + if (vfid >= SSO_MAX_VHGRP) { + mbox_log_err("Invalid vfid (%d/%d)", vfid, SSO_MAX_VHGRP); + return -EINVAL; + } + + res = &sdev.grp[vfid]; + res->vfid = vfid; + res->bar0 = pci_dev->mem_resource[0].addr; + res->bar2 = pci_dev->mem_resource[2].addr; + res->domain = val & 0xffff; + + sdev.total_ssovfs++; + if (vfid == 0) { + reg = ssovf_bar(OCTEONTX_SSO_GROUP, 0, 0); + reg += SSO_VHGRP_PF_MBOX(1); + if (octeontx_mbox_set_reg(reg)) { + mbox_log_err("Invalid Failed to set mbox_reg"); + return -EINVAL; + } + } + + rte_wmb(); + mbox_log_dbg("Domain=%d group=%d total_ssovfs=%d", res->domain, + res->vfid, sdev.total_ssovfs); + return 0; +} + +static const struct rte_pci_id pci_ssovf_map[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF) + }, + { + .vendor_id = 0, + }, +}; + +static struct rte_pci_driver pci_ssovf = { + .id_table = pci_ssovf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = ssovf_probe, +}; + +RTE_PMD_REGISTER_PCI(octeontx_ssovf, pci_ssovf); diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c index 753c1e9f..fffa9024 100644 --- a/drivers/event/octeontx/ssovf_worker.c +++ b/drivers/event/octeontx/ssovf_worker.c @@ -198,13 +198,15 @@ ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events) } void -ssows_flush_events(struct ssows *ws, uint8_t queue_id) +ssows_flush_events(struct ssows *ws, uint8_t queue_id, + ssows_handle_event_t fn, void *arg) { uint32_t reg_off; - uint64_t aq_cnt = 1; - uint64_t cq_ds_cnt = 1; - uint64_t enable, get_work0, get_work1; - uint8_t *base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0); + struct rte_event ev; + uint64_t enable, aq_cnt = 1, cq_ds_cnt = 1; + uint64_t get_work0, get_work1; + uint64_t sched_type_queue; + uint8_t *base = ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0); enable = ssovf_read64(base + SSO_VHGRP_QCTL); if (!enable) @@ -219,11 +221,23 @@ ssows_flush_events(struct ssows *ws, uint8_t queue_id) cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT); /* Extract cq and ds count */ cq_ds_cnt &= 0x1FFF1FFF0000; + ssovf_load_pair(get_work0, get_work1, ws->base + reg_off); - } - RTE_SET_USED(get_work0); - RTE_SET_USED(get_work1); + sched_type_queue = (get_work0 >> 32) & 0xfff; + ws->cur_tt = sched_type_queue & 0x3; + ws->cur_grp = sched_type_queue >> 2; + sched_type_queue = sched_type_queue << 38; + ev.event = sched_type_queue | (get_work0 & 0xffffffff); + if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV) + ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1, + (ev.event >> 20) & 0x7F); + else + ev.u64 = get_work1; + + if (fn != NULL && ev.u64 != 0) + fn(arg, ev); + } } void diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h index d55018a9..7c7306b5 100644 --- a/drivers/event/octeontx/ssovf_worker.h +++ b/drivers/event/octeontx/ssovf_worker.h @@ -28,11 +28,11 @@ ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info) { struct rte_mbuf *mbuf; octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work; - rte_prefetch_non_temporal(wqe); /* Get mbuf from wqe */ mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP); + rte_prefetch_non_temporal(mbuf); mbuf->packet_type = ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty]; mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr); diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c new file mode 100644 index 00000000..abbc9a77 --- /dev/null +++ b/drivers/event/octeontx/timvf_evdev.c @@ -0,0 +1,405 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include "timvf_evdev.h" + +int otx_logtype_timvf; + +RTE_INIT(otx_timvf_init_log) +{ + otx_logtype_timvf = rte_log_register("pmd.event.octeontx.timer"); + if (otx_logtype_timvf >= 0) + rte_log_set_level(otx_logtype_timvf, RTE_LOG_NOTICE); +} + +struct __rte_packed timvf_mbox_dev_info { + uint64_t ring_active[4]; + uint64_t clk_freq; +}; + +/* Response messages */ +enum { + MBOX_RET_SUCCESS, + MBOX_RET_INVALID, + MBOX_RET_INTERNAL_ERR, +}; + +static int +timvf_mbox_dev_info_get(struct timvf_mbox_dev_info *info) +{ + struct octeontx_mbox_hdr hdr = {0}; + uint16_t len = sizeof(struct timvf_mbox_dev_info); + + hdr.coproc = TIM_COPROC; + hdr.msg = TIM_GET_DEV_INFO; + hdr.vfid = 0; /* TIM DEV is always 0. TIM RING ID changes. */ + + memset(info, 0, len); + return octeontx_mbox_send(&hdr, NULL, 0, info, len); +} + +static void +timvf_ring_info_get(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer_adapter_info *adptr_info) +{ + struct timvf_ring *timr = adptr->data->adapter_priv; + adptr_info->max_tmo_ns = timr->max_tout; + adptr_info->min_resolution_ns = timr->tck_nsec; + rte_memcpy(&adptr_info->conf, &adptr->data->conf, + sizeof(struct rte_event_timer_adapter_conf)); +} + +static int +timvf_ring_conf_set(struct timvf_ctrl_reg *rctl, uint8_t ring_id) +{ + struct octeontx_mbox_hdr hdr = {0}; + uint16_t len = sizeof(struct timvf_ctrl_reg); + int ret; + + hdr.coproc = TIM_COPROC; + hdr.msg = TIM_SET_RING_INFO; + hdr.vfid = ring_id; + + ret = octeontx_mbox_send(&hdr, rctl, len, NULL, 0); + if (ret < 0 || hdr.res_code != MBOX_RET_SUCCESS) + return -EACCES; + return 0; +} + +static int +timvf_get_start_cyc(uint64_t *now, uint8_t ring_id) +{ + struct octeontx_mbox_hdr hdr = {0}; + + hdr.coproc = TIM_COPROC; + hdr.msg = TIM_RING_START_CYC_GET; + hdr.vfid = ring_id; + *now = 0; + return octeontx_mbox_send(&hdr, NULL, 0, now, sizeof(uint64_t)); +} + +static int +optimize_bucket_parameters(struct timvf_ring *timr) +{ + uint32_t hbkts; + uint32_t lbkts; + uint64_t tck_nsec; + + hbkts = rte_align32pow2(timr->nb_bkts); + tck_nsec = RTE_ALIGN_MUL_CEIL(timr->max_tout / (hbkts - 1), 10); + + if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS)) + hbkts = 0; + + lbkts = rte_align32prevpow2(timr->nb_bkts); + tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout / (lbkts - 1)), 10); + + if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS)) + lbkts = 0; + + if (!hbkts && !lbkts) + return 0; + + if (!hbkts) { + timr->nb_bkts = lbkts; + goto end; + } else if (!lbkts) { + timr->nb_bkts = hbkts; + goto end; + } + + timr->nb_bkts = (hbkts - timr->nb_bkts) < + (timr->nb_bkts - lbkts) ? hbkts : lbkts; +end: + timr->get_target_bkt = bkt_and; + timr->tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout / + (timr->nb_bkts - 1)), 10); + return 1; +} + +static int +timvf_ring_start(const struct rte_event_timer_adapter *adptr) +{ + int ret; + uint8_t use_fpa = 0; + uint64_t interval; + uintptr_t pool; + struct timvf_ctrl_reg rctrl; + struct timvf_mbox_dev_info dinfo; + struct timvf_ring *timr = adptr->data->adapter_priv; + + ret = timvf_mbox_dev_info_get(&dinfo); + if (ret < 0 || ret != sizeof(struct timvf_mbox_dev_info)) + return -EINVAL; + + /* Calculate the interval cycles according to clock source. */ + switch (timr->clk_src) { + case TIM_CLK_SRC_SCLK: + interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq); + break; + case TIM_CLK_SRC_GPIO: + /* GPIO doesn't work on tck_nsec. */ + interval = 0; + break; + case TIM_CLK_SRC_GTI: + interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq); + break; + case TIM_CLK_SRC_PTP: + interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq); + break; + default: + timvf_log_err("Unsupported clock source configured %d", + timr->clk_src); + return -EINVAL; + } + + if (!strcmp(rte_mbuf_best_mempool_ops(), "octeontx_fpavf")) + use_fpa = 1; + + /*CTRL0 register.*/ + rctrl.rctrl0 = interval; + + /*CTRL1 register.*/ + rctrl.rctrl1 = (uint64_t)(timr->clk_src) << 51 | + 1ull << 48 /* LOCK_EN (Enable hw bucket lock mechanism) */ | + 1ull << 47 /* ENA */ | + 1ull << 44 /* ENA_LDWB */ | + (timr->nb_bkts - 1); + + rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40; + + if (use_fpa) { + pool = (uintptr_t)((struct rte_mempool *) + timr->chunk_pool)->pool_id; + ret = octeontx_fpa_bufpool_gaura(pool); + if (ret < 0) { + timvf_log_dbg("Unable to get gaura id"); + ret = -ENOMEM; + goto error; + } + timvf_write64((uint64_t)ret, + (uint8_t *)timr->vbar0 + TIM_VRING_AURA); + } else { + rctrl.rctrl1 |= 1ull << 43 /* ENA_DFB (Enable don't free) */; + } + + timvf_write64((uintptr_t)timr->bkt, + (uint8_t *)timr->vbar0 + TIM_VRING_BASE); + timvf_set_chunk_refill(timr, use_fpa); + if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) { + ret = -EACCES; + goto error; + } + + if (timvf_get_start_cyc(&timr->ring_start_cyc, + timr->tim_ring_id) < 0) { + ret = -EACCES; + goto error; + } + timr->tck_int = NSEC2CLK(timr->tck_nsec, rte_get_timer_hz()); + timr->fast_div = rte_reciprocal_value_u64(timr->tck_int); + timvf_log_info("nb_bkts %d min_ns %"PRIu64" min_cyc %"PRIu64"" + " maxtmo %"PRIu64"\n", + timr->nb_bkts, timr->tck_nsec, interval, + timr->max_tout); + + return 0; +error: + rte_free(timr->bkt); + rte_mempool_free(timr->chunk_pool); + return ret; +} + +static int +timvf_ring_stop(const struct rte_event_timer_adapter *adptr) +{ + struct timvf_ring *timr = adptr->data->adapter_priv; + struct timvf_ctrl_reg rctrl = {0}; + rctrl.rctrl0 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL0); + rctrl.rctrl1 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL1); + rctrl.rctrl1 &= ~(1ull << 47); /* Disable */ + rctrl.rctrl2 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL2); + + if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) + return -EACCES; + return 0; +} + +static int +timvf_ring_create(struct rte_event_timer_adapter *adptr) +{ + char pool_name[25]; + int ret; + uint64_t nb_timers; + struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf; + struct timvf_ring *timr; + struct timvf_info tinfo; + const char *mempool_ops; + unsigned int mp_flags = 0; + + if (timvf_info(&tinfo) < 0) + return -ENODEV; + + if (adptr->data->id >= tinfo.total_timvfs) + return -ENODEV; + + timr = rte_zmalloc("octeontx_timvf_priv", + sizeof(struct timvf_ring), 0); + if (timr == NULL) + return -ENOMEM; + + adptr->data->adapter_priv = timr; + /* Check config parameters. */ + if ((rcfg->clk_src != RTE_EVENT_TIMER_ADAPTER_CPU_CLK) && + (!rcfg->timer_tick_ns || + rcfg->timer_tick_ns < TIM_MIN_INTERVAL)) { + timvf_log_err("Too low timer ticks"); + goto cfg_err; + } + + timr->clk_src = (int) rcfg->clk_src; + timr->tim_ring_id = adptr->data->id; + timr->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10); + timr->max_tout = rcfg->max_tmo_ns; + timr->nb_bkts = (timr->max_tout / timr->tck_nsec); + timr->vbar0 = timvf_bar(timr->tim_ring_id, 0); + timr->bkt_pos = (uint8_t *)timr->vbar0 + TIM_VRING_REL; + nb_timers = rcfg->nb_timers; + timr->get_target_bkt = bkt_mod; + + timr->nb_chunks = nb_timers / nb_chunk_slots; + + /* Try to optimize the bucket parameters. */ + if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES) + && !rte_is_power_of_2(timr->nb_bkts)) { + if (optimize_bucket_parameters(timr)) { + timvf_log_info("Optimized configured values"); + timvf_log_dbg("nb_bkts : %"PRIu32"", timr->nb_bkts); + timvf_log_dbg("tck_nsec : %"PRIu64"", timr->tck_nsec); + } else + timvf_log_info("Failed to Optimize configured values"); + } + + if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) { + mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET; + timvf_log_info("Using single producer mode"); + } + + timr->bkt = rte_zmalloc("octeontx_timvf_bucket", + (timr->nb_bkts) * sizeof(struct tim_mem_bucket), + 0); + if (timr->bkt == NULL) + goto mem_err; + + snprintf(pool_name, sizeof(pool_name), "timvf_chunk_pool%d", + timr->tim_ring_id); + timr->chunk_pool = (void *)rte_mempool_create_empty(pool_name, + timr->nb_chunks, TIM_CHUNK_SIZE, 0, 0, rte_socket_id(), + mp_flags); + + if (!timr->chunk_pool) { + rte_free(timr->bkt); + timvf_log_err("Unable to create chunkpool."); + return -ENOMEM; + } + + mempool_ops = rte_mbuf_best_mempool_ops(); + ret = rte_mempool_set_ops_byname(timr->chunk_pool, + mempool_ops, NULL); + + if (ret != 0) { + timvf_log_err("Unable to set chunkpool ops."); + goto mem_err; + } + + ret = rte_mempool_populate_default(timr->chunk_pool); + if (ret < 0) { + timvf_log_err("Unable to set populate chunkpool."); + goto mem_err; + } + timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VRING_BASE); + timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT); + timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT_W1S); + timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1C); + timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1S); + + return 0; +mem_err: + rte_free(timr); + return -ENOMEM; +cfg_err: + rte_free(timr); + return -EINVAL; +} + +static int +timvf_ring_free(struct rte_event_timer_adapter *adptr) +{ + struct timvf_ring *timr = adptr->data->adapter_priv; + rte_mempool_free(timr->chunk_pool); + rte_free(timr->bkt); + rte_free(adptr->data->adapter_priv); + return 0; +} + +static int +timvf_stats_get(const struct rte_event_timer_adapter *adapter, + struct rte_event_timer_adapter_stats *stats) +{ + struct timvf_ring *timr = adapter->data->adapter_priv; + uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc; + + stats->evtim_exp_count = timr->tim_arm_cnt; + stats->ev_enq_count = timr->tim_arm_cnt; + stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc, + &timr->fast_div); + return 0; +} + +static int +timvf_stats_reset(const struct rte_event_timer_adapter *adapter) +{ + struct timvf_ring *timr = adapter->data->adapter_priv; + + timr->tim_arm_cnt = 0; + return 0; +} + +static struct rte_event_timer_adapter_ops timvf_ops = { + .init = timvf_ring_create, + .uninit = timvf_ring_free, + .start = timvf_ring_start, + .stop = timvf_ring_stop, + .get_info = timvf_ring_info_get, +}; + +int +timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags, + uint32_t *caps, const struct rte_event_timer_adapter_ops **ops, + uint8_t enable_stats) +{ + RTE_SET_USED(dev); + + if (enable_stats) { + timvf_ops.stats_get = timvf_stats_get; + timvf_ops.stats_reset = timvf_stats_reset; + } + + if (flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) + timvf_ops.arm_burst = enable_stats ? + timvf_timer_arm_burst_sp_stats : + timvf_timer_arm_burst_sp; + else + timvf_ops.arm_burst = enable_stats ? + timvf_timer_arm_burst_mp_stats : + timvf_timer_arm_burst_mp; + + timvf_ops.arm_tmo_tick_burst = enable_stats ? + timvf_timer_arm_tmo_brst_stats : + timvf_timer_arm_tmo_brst; + timvf_ops.cancel_burst = timvf_timer_cancel_burst; + *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT; + *ops = &timvf_ops; + return 0; +} diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h new file mode 100644 index 00000000..0185593f --- /dev/null +++ b/drivers/event/octeontx/timvf_evdev.h @@ -0,0 +1,225 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef __TIMVF_EVDEV_H__ +#define __TIMVF_EVDEV_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define timvf_log(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, otx_logtype_timvf, \ + "[%s] %s() " fmt "\n", \ + RTE_STR(event_timer_octeontx), __func__, ## args) + +#define timvf_log_info(fmt, ...) timvf_log(INFO, fmt, ##__VA_ARGS__) +#define timvf_log_dbg(fmt, ...) timvf_log(DEBUG, fmt, ##__VA_ARGS__) +#define timvf_log_err(fmt, ...) timvf_log(ERR, fmt, ##__VA_ARGS__) +#define timvf_func_trace timvf_log_dbg + +#define TIM_COPROC (8) +#define TIM_GET_DEV_INFO (1) +#define TIM_GET_RING_INFO (2) +#define TIM_SET_RING_INFO (3) +#define TIM_RING_START_CYC_GET (4) + +#define TIM_MAX_RINGS (64) +#define TIM_DEV_PER_NODE (1) +#define TIM_VF_PER_DEV (64) +#define TIM_RING_PER_DEV (TIM_VF_PER_DEV) +#define TIM_RING_NODE_SHIFT (6) +#define TIM_RING_MASK ((TIM_RING_PER_DEV) - 1) +#define TIM_RING_INVALID (-1) + +#define TIM_MIN_INTERVAL (1E3) +#define TIM_MAX_INTERVAL ((1ull << 32) - 1) +#define TIM_MAX_BUCKETS (1ull << 20) +#define TIM_CHUNK_SIZE (4096) +#define TIM_MAX_CHUNKS_PER_BUCKET (1ull << 32) + +#define TIMVF_MAX_BURST (8) + +/* TIM VF Control/Status registers (CSRs): */ +/* VF_BAR0: */ +#define TIM_VF_NRSPERR_INT (0x0) +#define TIM_VF_NRSPERR_INT_W1S (0x8) +#define TIM_VF_NRSPERR_ENA_W1C (0x10) +#define TIM_VF_NRSPERR_ENA_W1S (0x18) +#define TIM_VRING_FR_RN_CYCLES (0x20) +#define TIM_VRING_FR_RN_GPIOS (0x28) +#define TIM_VRING_FR_RN_GTI (0x30) +#define TIM_VRING_FR_RN_PTP (0x38) +#define TIM_VRING_CTL0 (0x40) +#define TIM_VRING_CTL1 (0x50) +#define TIM_VRING_CTL2 (0x60) +#define TIM_VRING_BASE (0x100) +#define TIM_VRING_AURA (0x108) +#define TIM_VRING_REL (0x110) + +#define TIM_CTL1_W0_S_BUCKET 20 +#define TIM_CTL1_W0_M_BUCKET ((1ull << (40 - 20)) - 1) + +#define TIM_BUCKET_W1_S_NUM_ENTRIES (0) /*Shift*/ +#define TIM_BUCKET_W1_M_NUM_ENTRIES ((1ull << (32 - 0)) - 1) +#define TIM_BUCKET_W1_S_SBT (32) +#define TIM_BUCKET_W1_M_SBT ((1ull << (33 - 32)) - 1) +#define TIM_BUCKET_W1_S_HBT (33) +#define TIM_BUCKET_W1_M_HBT ((1ull << (34 - 33)) - 1) +#define TIM_BUCKET_W1_S_BSK (34) +#define TIM_BUCKET_W1_M_BSK ((1ull << (35 - 34)) - 1) +#define TIM_BUCKET_W1_S_LOCK (40) +#define TIM_BUCKET_W1_M_LOCK ((1ull << (48 - 40)) - 1) +#define TIM_BUCKET_W1_S_CHUNK_REMAINDER (48) +#define TIM_BUCKET_W1_M_CHUNK_REMAINDER ((1ull << (64 - 48)) - 1) + +#define TIM_BUCKET_SEMA \ + (TIM_BUCKET_CHUNK_REMAIN) + +#define TIM_BUCKET_CHUNK_REMAIN \ + (TIM_BUCKET_W1_M_CHUNK_REMAINDER << TIM_BUCKET_W1_S_CHUNK_REMAINDER) + +#define TIM_BUCKET_LOCK \ + (TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK) + +#define TIM_BUCKET_SEMA_WLOCK \ + (TIM_BUCKET_CHUNK_REMAIN | (1ull << TIM_BUCKET_W1_S_LOCK)) + +#define NSEC_PER_SEC 1E9 +#define NSEC2CLK(__ns, __freq) (((__ns) * (__freq)) / NSEC_PER_SEC) +#define CLK2NSEC(__clk, __freq) (((__clk) * NSEC_PER_SEC) / (__freq)) + +#define timvf_read64 rte_read64_relaxed +#define timvf_write64 rte_write64_relaxed + +#define TIMVF_ENABLE_STATS_ARG ("timvf_stats") + +extern int otx_logtype_timvf; +static const uint16_t nb_chunk_slots = (TIM_CHUNK_SIZE / 16) - 1; + +struct timvf_info { + uint16_t domain; /* Domain id */ + uint8_t total_timvfs; /* Total timvf available in domain */ +}; + +enum timvf_clk_src { + TIM_CLK_SRC_SCLK = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, + TIM_CLK_SRC_GPIO = RTE_EVENT_TIMER_ADAPTER_EXT_CLK0, + TIM_CLK_SRC_GTI = RTE_EVENT_TIMER_ADAPTER_EXT_CLK1, + TIM_CLK_SRC_PTP = RTE_EVENT_TIMER_ADAPTER_EXT_CLK2, +}; + +/* TIM_MEM_BUCKET */ +struct tim_mem_bucket { + uint64_t first_chunk; + union { + uint64_t w1; + struct { + uint32_t nb_entry; + uint8_t sbt:1; + uint8_t hbt:1; + uint8_t bsk:1; + uint8_t rsvd:5; + uint8_t lock; + int16_t chunk_remainder; + }; + }; + uint64_t current_chunk; + uint64_t pad; +} __rte_packed __rte_aligned(8); + +struct tim_mem_entry { + uint64_t w0; + uint64_t wqe; +} __rte_packed; + +struct timvf_ctrl_reg { + uint64_t rctrl0; + uint64_t rctrl1; + uint64_t rctrl2; + uint8_t use_pmu; +} __rte_packed; + +struct timvf_ring; + +typedef uint32_t (*bkt_id)(const uint32_t bkt_tcks, const uint32_t nb_bkts); +typedef struct tim_mem_entry * (*refill_chunk)( + struct tim_mem_bucket * const bkt, + struct timvf_ring * const timr); + +struct timvf_ring { + bkt_id get_target_bkt; + refill_chunk refill_chunk; + struct rte_reciprocal_u64 fast_div; + uint64_t ring_start_cyc; + uint32_t nb_bkts; + struct tim_mem_bucket *bkt; + void *chunk_pool; + uint64_t tck_int; + volatile uint64_t tim_arm_cnt; + uint64_t tck_nsec; + void *vbar0; + void *bkt_pos; + uint64_t max_tout; + uint64_t nb_chunks; + enum timvf_clk_src clk_src; + uint16_t tim_ring_id; +} __rte_cache_aligned; + +static __rte_always_inline uint32_t +bkt_mod(const uint32_t rel_bkt, const uint32_t nb_bkts) +{ + return rel_bkt % nb_bkts; +} + +static __rte_always_inline uint32_t +bkt_and(uint32_t rel_bkt, uint32_t nb_bkts) +{ + return rel_bkt & (nb_bkts - 1); +} + +int timvf_info(struct timvf_info *tinfo); +void *timvf_bar(uint8_t id, uint8_t bar); +int timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags, + uint32_t *caps, const struct rte_event_timer_adapter_ops **ops, + uint8_t enable_stats); +uint16_t timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers); +uint16_t timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers); +uint16_t timvf_timer_arm_burst_sp_stats( + const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers); +uint16_t timvf_timer_arm_burst_mp(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers); +uint16_t timvf_timer_arm_burst_mp_stats( + const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers); +uint16_t timvf_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint64_t timeout_tick, + const uint16_t nb_timers); +uint16_t timvf_timer_arm_tmo_brst_stats( + const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint64_t timeout_tick, + const uint16_t nb_timers); +void timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa); + +#endif /* __TIMVF_EVDEV_H__ */ diff --git a/drivers/event/octeontx/timvf_probe.c b/drivers/event/octeontx/timvf_probe.c new file mode 100644 index 00000000..08dbd2be --- /dev/null +++ b/drivers/event/octeontx/timvf_probe.c @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include +#include +#include +#include + +#include + +#include "ssovf_evdev.h" +#include "timvf_evdev.h" + +#ifndef PCI_VENDOR_ID_CAVIUM +#define PCI_VENDOR_ID_CAVIUM (0x177D) +#endif + +#define PCI_DEVICE_ID_OCTEONTX_TIM_VF (0xA051) +#define TIM_MAX_RINGS (64) + +struct timvf_res { + uint16_t domain; + uint16_t vfid; + void *bar0; + void *bar2; + void *bar4; +}; + +struct timdev { + uint8_t total_timvfs; + struct timvf_res rings[TIM_MAX_RINGS]; +}; + +static struct timdev tdev; + +int +timvf_info(struct timvf_info *tinfo) +{ + int i; + struct ssovf_info info; + + if (tinfo == NULL) + return -EINVAL; + + if (!tdev.total_timvfs) + return -ENODEV; + + if (ssovf_info(&info) < 0) + return -EINVAL; + + for (i = 0; i < tdev.total_timvfs; i++) { + if (info.domain != tdev.rings[i].domain) { + timvf_log_err("GRP error, vfid=%d/%d domain=%d/%d %p", + i, tdev.rings[i].vfid, + info.domain, tdev.rings[i].domain, + tdev.rings[i].bar0); + return -EINVAL; + } + } + + tinfo->total_timvfs = tdev.total_timvfs; + tinfo->domain = info.domain; + return 0; +} + +void* +timvf_bar(uint8_t id, uint8_t bar) +{ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return NULL; + + if (id > tdev.total_timvfs) + return NULL; + + switch (bar) { + case 0: + return tdev.rings[id].bar0; + case 4: + return tdev.rings[id].bar4; + default: + return NULL; + } +} + +static int +timvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +{ + uint64_t val; + uint16_t vfid; + struct timvf_res *res; + + RTE_SET_USED(pci_drv); + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (pci_dev->mem_resource[0].addr == NULL || + pci_dev->mem_resource[4].addr == NULL) { + timvf_log_err("Empty bars %p %p", + pci_dev->mem_resource[0].addr, + pci_dev->mem_resource[4].addr); + return -ENODEV; + } + + val = rte_read64((uint8_t *)pci_dev->mem_resource[0].addr + + 0x100 /* TIM_VRINGX_BASE */); + vfid = (val >> 23) & 0xff; + if (vfid >= TIM_MAX_RINGS) { + timvf_log_err("Invalid vfid(%d/%d)", vfid, TIM_MAX_RINGS); + return -EINVAL; + } + + res = &tdev.rings[tdev.total_timvfs]; + res->vfid = vfid; + res->bar0 = pci_dev->mem_resource[0].addr; + res->bar2 = pci_dev->mem_resource[2].addr; + res->bar4 = pci_dev->mem_resource[4].addr; + res->domain = (val >> 7) & 0xffff; + tdev.total_timvfs++; + rte_wmb(); + + timvf_log_dbg("Domain=%d VFid=%d bar0 %p total_timvfs=%d", res->domain, + res->vfid, pci_dev->mem_resource[0].addr, + tdev.total_timvfs); + return 0; +} + + +static const struct rte_pci_id pci_timvf_map[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_OCTEONTX_TIM_VF) + }, + { + .vendor_id = 0, + }, +}; + +static struct rte_pci_driver pci_timvf = { + .id_table = pci_timvf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, + .probe = timvf_probe, + .remove = NULL, +}; + +RTE_PMD_REGISTER_PCI(octeontx_timvf, pci_timvf); diff --git a/drivers/event/octeontx/timvf_worker.c b/drivers/event/octeontx/timvf_worker.c new file mode 100644 index 00000000..50790e19 --- /dev/null +++ b/drivers/event/octeontx/timvf_worker.c @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include "timvf_worker.h" + +static inline int +timvf_timer_reg_checks(const struct timvf_ring * const timr, + struct rte_event_timer * const tim) +{ + if (unlikely(tim->state)) { + tim->state = RTE_EVENT_TIMER_ERROR; + rte_errno = EALREADY; + goto fail; + } + + if (unlikely(!tim->timeout_ticks || + tim->timeout_ticks >= timr->nb_bkts)) { + tim->state = tim->timeout_ticks ? RTE_EVENT_TIMER_ERROR_TOOLATE + : RTE_EVENT_TIMER_ERROR_TOOEARLY; + rte_errno = EINVAL; + goto fail; + } + + return 0; +fail: + return -EINVAL; +} + +static inline void +timvf_format_event(const struct rte_event_timer * const tim, + struct tim_mem_entry * const entry) +{ + entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 | + (tim->ev.event & 0xFFFFFFFFF); + entry->wqe = tim->ev.u64; +} + +uint16_t +timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers) +{ + RTE_SET_USED(adptr); + int ret; + uint16_t index; + + for (index = 0; index < nb_timers; index++) { + if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) { + rte_errno = EALREADY; + break; + } + + if (tim[index]->state != RTE_EVENT_TIMER_ARMED) { + rte_errno = EINVAL; + break; + } + ret = timvf_rem_entry(tim[index]); + if (ret) { + rte_errno = -ret; + break; + } + } + return index; +} + +uint16_t +timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers) +{ + int ret; + uint16_t index; + struct tim_mem_entry entry; + struct timvf_ring *timr = adptr->data->adapter_priv; + for (index = 0; index < nb_timers; index++) { + if (timvf_timer_reg_checks(timr, tim[index])) + break; + + timvf_format_event(tim[index], &entry); + ret = timvf_add_entry_sp(timr, tim[index]->timeout_ticks, + tim[index], &entry); + if (unlikely(ret)) { + rte_errno = -ret; + break; + } + } + + return index; +} + +uint16_t +timvf_timer_arm_burst_sp_stats(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers) +{ + uint16_t ret; + struct timvf_ring *timr = adptr->data->adapter_priv; + + ret = timvf_timer_arm_burst_sp(adptr, tim, nb_timers); + timr->tim_arm_cnt += ret; + + return ret; +} + +uint16_t +timvf_timer_arm_burst_mp(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers) +{ + int ret; + uint16_t index; + struct tim_mem_entry entry; + struct timvf_ring *timr = adptr->data->adapter_priv; + for (index = 0; index < nb_timers; index++) { + if (timvf_timer_reg_checks(timr, tim[index])) + break; + timvf_format_event(tim[index], &entry); + ret = timvf_add_entry_mp(timr, tim[index]->timeout_ticks, + tim[index], &entry); + if (unlikely(ret)) { + rte_errno = -ret; + break; + } + } + + return index; +} + +uint16_t +timvf_timer_arm_burst_mp_stats(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers) +{ + uint16_t ret; + struct timvf_ring *timr = adptr->data->adapter_priv; + + ret = timvf_timer_arm_burst_mp(adptr, tim, nb_timers); + timr->tim_arm_cnt += ret; + + return ret; +} + +uint16_t +timvf_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint64_t timeout_tick, + const uint16_t nb_timers) +{ + int ret; + uint16_t set_timers = 0; + uint16_t idx; + uint16_t arr_idx = 0; + struct timvf_ring *timr = adptr->data->adapter_priv; + struct tim_mem_entry entry[TIMVF_MAX_BURST] __rte_cache_aligned; + + if (unlikely(!timeout_tick || timeout_tick >= timr->nb_bkts)) { + const enum rte_event_timer_state state = timeout_tick ? + RTE_EVENT_TIMER_ERROR_TOOLATE : + RTE_EVENT_TIMER_ERROR_TOOEARLY; + for (idx = 0; idx < nb_timers; idx++) + tim[idx]->state = state; + rte_errno = EINVAL; + return 0; + } + + while (arr_idx < nb_timers) { + for (idx = 0; idx < TIMVF_MAX_BURST && (arr_idx < nb_timers); + idx++, arr_idx++) { + timvf_format_event(tim[arr_idx], &entry[idx]); + } + ret = timvf_add_entry_brst(timr, timeout_tick, &tim[set_timers], + entry, idx); + set_timers += ret; + if (ret != idx) + break; + } + + return set_timers; +} + + +uint16_t +timvf_timer_arm_tmo_brst_stats(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint64_t timeout_tick, + const uint16_t nb_timers) +{ + uint16_t set_timers; + struct timvf_ring *timr = adptr->data->adapter_priv; + + set_timers = timvf_timer_arm_tmo_brst(adptr, tim, timeout_tick, + nb_timers); + timr->tim_arm_cnt += set_timers; + + return set_timers; +} + +void +timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa) +{ + if (use_fpa) + timr->refill_chunk = timvf_refill_chunk_fpa; + else + timr->refill_chunk = timvf_refill_chunk_generic; +} diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h new file mode 100644 index 00000000..dede1a4a --- /dev/null +++ b/drivers/event/octeontx/timvf_worker.h @@ -0,0 +1,443 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include +#include + +#include "timvf_evdev.h" + +static inline int16_t +timr_bkt_fetch_rem(uint64_t w1) +{ + return (w1 >> TIM_BUCKET_W1_S_CHUNK_REMAINDER) & + TIM_BUCKET_W1_M_CHUNK_REMAINDER; +} + +static inline int16_t +timr_bkt_get_rem(struct tim_mem_bucket *bktp) +{ + return __atomic_load_n(&bktp->chunk_remainder, + __ATOMIC_ACQUIRE); +} + +static inline void +timr_bkt_set_rem(struct tim_mem_bucket *bktp, uint16_t v) +{ + __atomic_store_n(&bktp->chunk_remainder, v, + __ATOMIC_RELEASE); +} + +static inline void +timr_bkt_sub_rem(struct tim_mem_bucket *bktp, uint16_t v) +{ + __atomic_fetch_sub(&bktp->chunk_remainder, v, + __ATOMIC_RELEASE); +} + +static inline uint8_t +timr_bkt_get_sbt(uint64_t w1) +{ + return (w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT; +} + +static inline uint64_t +timr_bkt_set_sbt(struct tim_mem_bucket *bktp) +{ + const uint64_t v = TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT; + return __atomic_fetch_or(&bktp->w1, v, __ATOMIC_ACQ_REL); +} + +static inline uint64_t +timr_bkt_clr_sbt(struct tim_mem_bucket *bktp) +{ + const uint64_t v = ~(TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT); + return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL); +} + +static inline uint8_t +timr_bkt_get_shbt(uint64_t w1) +{ + return ((w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT) | + ((w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT); +} + +static inline uint8_t +timr_bkt_get_hbt(uint64_t w1) +{ + return (w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT; +} + +static inline uint8_t +timr_bkt_get_bsk(uint64_t w1) +{ + return (w1 >> TIM_BUCKET_W1_S_BSK) & TIM_BUCKET_W1_M_BSK; +} + +static inline uint64_t +timr_bkt_clr_bsk(struct tim_mem_bucket *bktp) +{ + /*Clear everything except lock. */ + const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK; + return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL); +} + +static inline uint64_t +timr_bkt_fetch_sema_lock(struct tim_mem_bucket *bktp) +{ + return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK, + __ATOMIC_ACQ_REL); +} + +static inline uint64_t +timr_bkt_fetch_sema(struct tim_mem_bucket *bktp) +{ + return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA, + __ATOMIC_RELAXED); +} + +static inline uint64_t +timr_bkt_inc_lock(struct tim_mem_bucket *bktp) +{ + const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK; + return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQ_REL); +} + +static inline void +timr_bkt_dec_lock(struct tim_mem_bucket *bktp) +{ + __atomic_add_fetch(&bktp->lock, 0xff, __ATOMIC_ACQ_REL); +} + +static inline uint32_t +timr_bkt_get_nent(uint64_t w1) +{ + return (w1 >> TIM_BUCKET_W1_S_NUM_ENTRIES) & + TIM_BUCKET_W1_M_NUM_ENTRIES; +} + +static inline void +timr_bkt_inc_nent(struct tim_mem_bucket *bktp) +{ + __atomic_add_fetch(&bktp->nb_entry, 1, __ATOMIC_RELAXED); +} + +static inline void +timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v) +{ + __atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELAXED); +} + +static inline uint64_t +timr_bkt_clr_nent(struct tim_mem_bucket *bktp) +{ + const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES << + TIM_BUCKET_W1_S_NUM_ENTRIES); + return __atomic_and_fetch(&bktp->w1, v, __ATOMIC_ACQ_REL); +} + +static inline struct tim_mem_entry * +timr_clr_bkt(struct timvf_ring * const timr, struct tim_mem_bucket * const bkt) +{ + struct tim_mem_entry *chunk; + struct tim_mem_entry *pnext; + chunk = ((struct tim_mem_entry *)(uintptr_t)bkt->first_chunk); + chunk = (struct tim_mem_entry *)(uintptr_t)(chunk + nb_chunk_slots)->w0; + + while (chunk) { + pnext = (struct tim_mem_entry *)(uintptr_t) + ((chunk + nb_chunk_slots)->w0); + rte_mempool_put(timr->chunk_pool, chunk); + chunk = pnext; + } + return (struct tim_mem_entry *)(uintptr_t)bkt->first_chunk; +} + +static inline int +timvf_rem_entry(struct rte_event_timer *tim) +{ + uint64_t lock_sema; + struct tim_mem_entry *entry; + struct tim_mem_bucket *bkt; + if (tim->impl_opaque[1] == 0 || + tim->impl_opaque[0] == 0) + return -ENOENT; + + entry = (struct tim_mem_entry *)(uintptr_t)tim->impl_opaque[0]; + if (entry->wqe != tim->ev.u64) { + tim->impl_opaque[1] = tim->impl_opaque[0] = 0; + return -ENOENT; + } + bkt = (struct tim_mem_bucket *)(uintptr_t)tim->impl_opaque[1]; + lock_sema = timr_bkt_inc_lock(bkt); + if (timr_bkt_get_shbt(lock_sema) + || !timr_bkt_get_nent(lock_sema)) { + timr_bkt_dec_lock(bkt); + tim->impl_opaque[1] = tim->impl_opaque[0] = 0; + return -ENOENT; + } + + entry->w0 = entry->wqe = 0; + timr_bkt_dec_lock(bkt); + + tim->state = RTE_EVENT_TIMER_CANCELED; + tim->impl_opaque[1] = tim->impl_opaque[0] = 0; + return 0; +} + +static inline struct tim_mem_entry * +timvf_refill_chunk_generic(struct tim_mem_bucket * const bkt, + struct timvf_ring * const timr) +{ + struct tim_mem_entry *chunk; + + if (bkt->nb_entry || !bkt->first_chunk) { + if (unlikely(rte_mempool_get(timr->chunk_pool, + (void **)&chunk))) { + return NULL; + } + if (bkt->nb_entry) { + *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t) + bkt->current_chunk) + + nb_chunk_slots) = + (uintptr_t) chunk; + } else { + bkt->first_chunk = (uintptr_t) chunk; + } + } else { + chunk = timr_clr_bkt(timr, bkt); + bkt->first_chunk = (uintptr_t)chunk; + } + *(uint64_t *)(chunk + nb_chunk_slots) = 0; + + return chunk; +} + +static inline struct tim_mem_entry * +timvf_refill_chunk_fpa(struct tim_mem_bucket * const bkt, + struct timvf_ring * const timr) +{ + struct tim_mem_entry *chunk; + + if (unlikely(rte_mempool_get(timr->chunk_pool, (void **)&chunk))) + return NULL; + + *(uint64_t *)(chunk + nb_chunk_slots) = 0; + if (bkt->nb_entry) { + *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t) + bkt->current_chunk) + + nb_chunk_slots) = + (uintptr_t) chunk; + } else { + bkt->first_chunk = (uintptr_t) chunk; + } + + return chunk; +} + +static inline struct tim_mem_bucket * +timvf_get_target_bucket(struct timvf_ring * const timr, const uint32_t rel_bkt) +{ + const uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc; + const uint32_t bucket = rte_reciprocal_divide_u64(bkt_cyc, + &timr->fast_div) + rel_bkt; + const uint32_t tbkt_id = timr->get_target_bkt(bucket, + timr->nb_bkts); + return &timr->bkt[tbkt_id]; +} + +/* Single producer functions. */ +static inline int +timvf_add_entry_sp(struct timvf_ring * const timr, const uint32_t rel_bkt, + struct rte_event_timer * const tim, + const struct tim_mem_entry * const pent) +{ + int16_t rem; + uint64_t lock_sema; + struct tim_mem_bucket *bkt; + struct tim_mem_entry *chunk; + + + bkt = timvf_get_target_bucket(timr, rel_bkt); +__retry: + /*Get Bucket sema*/ + lock_sema = timr_bkt_fetch_sema(bkt); + /* Bucket related checks. */ + if (unlikely(timr_bkt_get_hbt(lock_sema))) + goto __retry; + + /* Insert the work. */ + rem = timr_bkt_fetch_rem(lock_sema); + + if (!rem) { + chunk = timr->refill_chunk(bkt, timr); + if (unlikely(chunk == NULL)) { + timr_bkt_set_rem(bkt, 0); + tim->impl_opaque[0] = tim->impl_opaque[1] = 0; + tim->state = RTE_EVENT_TIMER_ERROR; + return -ENOMEM; + } + bkt->current_chunk = (uintptr_t) chunk; + timr_bkt_set_rem(bkt, nb_chunk_slots - 1); + } else { + chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk; + chunk += nb_chunk_slots - rem; + } + /* Copy work entry. */ + *chunk = *pent; + timr_bkt_inc_nent(bkt); + + tim->impl_opaque[0] = (uintptr_t)chunk; + tim->impl_opaque[1] = (uintptr_t)bkt; + tim->state = RTE_EVENT_TIMER_ARMED; + return 0; +} + +/* Multi producer functions. */ +static inline int +timvf_add_entry_mp(struct timvf_ring * const timr, const uint32_t rel_bkt, + struct rte_event_timer * const tim, + const struct tim_mem_entry * const pent) +{ + int16_t rem; + uint64_t lock_sema; + struct tim_mem_bucket *bkt; + struct tim_mem_entry *chunk; + +__retry: + bkt = timvf_get_target_bucket(timr, rel_bkt); + /* Bucket related checks. */ + /*Get Bucket sema*/ + lock_sema = timr_bkt_fetch_sema_lock(bkt); + if (unlikely(timr_bkt_get_shbt(lock_sema))) { + timr_bkt_dec_lock(bkt); + goto __retry; + } + + rem = timr_bkt_fetch_rem(lock_sema); + + if (rem < 0) { + /* goto diff bucket. */ + timr_bkt_dec_lock(bkt); + goto __retry; + } else if (!rem) { + /*Only one thread can be here*/ + chunk = timr->refill_chunk(bkt, timr); + if (unlikely(chunk == NULL)) { + timr_bkt_set_rem(bkt, 0); + timr_bkt_dec_lock(bkt); + tim->impl_opaque[0] = tim->impl_opaque[1] = 0; + tim->state = RTE_EVENT_TIMER_ERROR; + return -ENOMEM; + } + bkt->current_chunk = (uintptr_t) chunk; + timr_bkt_set_rem(bkt, nb_chunk_slots - 1); + } else { + chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk; + chunk += nb_chunk_slots - rem; + } + /* Copy work entry. */ + *chunk = *pent; + timr_bkt_inc_nent(bkt); + timr_bkt_dec_lock(bkt); + + tim->impl_opaque[0] = (uintptr_t)chunk; + tim->impl_opaque[1] = (uintptr_t)bkt; + tim->state = RTE_EVENT_TIMER_ARMED; + return 0; +} + +static inline uint16_t +timvf_cpy_wrk(uint16_t index, uint16_t cpy_lmt, + struct tim_mem_entry *chunk, + struct rte_event_timer ** const tim, + const struct tim_mem_entry * const ents, + const struct tim_mem_bucket * const bkt) +{ + for (; index < cpy_lmt; index++) { + *chunk = *(ents + index); + tim[index]->impl_opaque[0] = (uintptr_t)chunk++; + tim[index]->impl_opaque[1] = (uintptr_t)bkt; + tim[index]->state = RTE_EVENT_TIMER_ARMED; + } + + return index; +} + +/* Burst mode functions */ +static inline int +timvf_add_entry_brst(struct timvf_ring * const timr, const uint16_t rel_bkt, + struct rte_event_timer ** const tim, + const struct tim_mem_entry *ents, + const uint16_t nb_timers) +{ + int16_t rem; + int16_t crem; + uint8_t lock_cnt; + uint16_t index = 0; + uint16_t chunk_remainder; + uint64_t lock_sema; + struct tim_mem_bucket *bkt; + struct tim_mem_entry *chunk; + +__retry: + bkt = timvf_get_target_bucket(timr, rel_bkt); + + /* Only one thread beyond this. */ + lock_sema = timr_bkt_inc_lock(bkt); + lock_cnt = (uint8_t) + ((lock_sema >> TIM_BUCKET_W1_S_LOCK) & TIM_BUCKET_W1_M_LOCK); + + if (lock_cnt) { + timr_bkt_dec_lock(bkt); + goto __retry; + } + + /* Bucket related checks. */ + if (unlikely(timr_bkt_get_hbt(lock_sema))) { + timr_bkt_dec_lock(bkt); + goto __retry; + } + + chunk_remainder = timr_bkt_fetch_rem(lock_sema); + rem = chunk_remainder - nb_timers; + if (rem < 0) { + crem = nb_chunk_slots - chunk_remainder; + if (chunk_remainder && crem) { + chunk = ((struct tim_mem_entry *) + (uintptr_t)bkt->current_chunk) + crem; + + index = timvf_cpy_wrk(index, chunk_remainder, + chunk, tim, ents, bkt); + timr_bkt_sub_rem(bkt, chunk_remainder); + timr_bkt_add_nent(bkt, chunk_remainder); + } + rem = nb_timers - chunk_remainder; + ents = ents + chunk_remainder; + + chunk = timr->refill_chunk(bkt, timr); + if (unlikely(chunk == NULL)) { + timr_bkt_dec_lock(bkt); + rte_errno = ENOMEM; + tim[index]->state = RTE_EVENT_TIMER_ERROR; + return crem; + } + *(uint64_t *)(chunk + nb_chunk_slots) = 0; + bkt->current_chunk = (uintptr_t) chunk; + + index = timvf_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt); + timr_bkt_set_rem(bkt, nb_chunk_slots - rem); + timr_bkt_add_nent(bkt, rem); + } else { + chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk; + chunk += (nb_chunk_slots - chunk_remainder); + + index = timvf_cpy_wrk(index, nb_timers, + chunk, tim, ents, bkt); + timr_bkt_sub_rem(bkt, nb_timers); + timr_bkt_add_nent(bkt, nb_timers); + } + + timr_bkt_dec_lock(bkt); + return nb_timers; +} diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c index 77083691..a4f0bc8b 100644 --- a/drivers/event/opdl/opdl_evdev.c +++ b/drivers/event/opdl/opdl_evdev.c @@ -607,7 +607,7 @@ set_do_test(const char *key __rte_unused, const char *value, void *opaque) static int opdl_probe(struct rte_vdev_device *vdev) { - static const struct rte_eventdev_ops evdev_opdl_ops = { + static struct rte_eventdev_ops evdev_opdl_ops = { .dev_configure = opdl_dev_configure, .dev_infos_get = opdl_info_get, .dev_close = opdl_close, @@ -753,10 +753,7 @@ static struct rte_vdev_driver evdev_opdl_pmd_drv = { .remove = opdl_remove }; -RTE_INIT(opdl_init_log); - -static void -opdl_init_log(void) +RTE_INIT(opdl_init_log) { opdl_logtype_driver = rte_log_register("pmd.event.opdl.driver"); if (opdl_logtype_driver >= 0) diff --git a/drivers/event/opdl/opdl_evdev_init.c b/drivers/event/opdl/opdl_evdev_init.c index 1454de53..582ad698 100644 --- a/drivers/event/opdl/opdl_evdev_init.c +++ b/drivers/event/opdl/opdl_evdev_init.c @@ -733,6 +733,9 @@ initialise_all_other_ports(struct rte_eventdev *dev) queue->ports[queue->nb_ports] = port; port->instance_id = queue->nb_ports; queue->nb_ports++; + opdl_stage_set_queue_id(stage_inst, + port->queue_id); + } else if (queue->q_pos == OPDL_Q_POS_END) { /* tx port */ diff --git a/drivers/event/opdl/opdl_ring.c b/drivers/event/opdl/opdl_ring.c index eca7712b..8aca481c 100644 --- a/drivers/event/opdl/opdl_ring.c +++ b/drivers/event/opdl/opdl_ring.c @@ -25,7 +25,10 @@ #define OPDL_NAME_SIZE 64 -#define OPDL_EVENT_MASK (0xFFFF0000000FFFFFULL) +#define OPDL_EVENT_MASK (0x00000000000FFFFFULL) +#define OPDL_FLOWID_MASK (0xFFFFF) +#define OPDL_OPA_MASK (0xFF) +#define OPDL_OPA_OFFSET (0x38) int opdl_logtype_driver; @@ -86,7 +89,6 @@ struct opdl_stage { */ uint32_t available_seq; uint32_t head; /* Current head for single-thread operation */ - uint32_t shadow_head; /* Shadow head for single-thread operation */ uint32_t nb_instance; /* Number of instances */ uint32_t instance_id; /* ID of this stage instance */ uint16_t num_claimed; /* Number of slots claimed */ @@ -102,6 +104,9 @@ struct opdl_stage { /* For managing disclaims in multi-threaded processing stages */ struct claim_manager pending_disclaims[RTE_MAX_LCORE] __rte_cache_aligned; + uint32_t shadow_head; /* Shadow head for single-thread operation */ + uint32_t queue_id; /* ID of Queue which is assigned to this stage */ + uint32_t pos; /* Atomic scan position */ } __rte_cache_aligned; /* Context for opdl_ring */ @@ -494,6 +499,9 @@ opdl_stage_claim_singlethread(struct opdl_stage *s, void *entries, uint32_t num_entries, uint32_t *seq, bool block, bool atomic) { uint32_t i = 0, j = 0, offset; + uint32_t opa_id = 0; + uint32_t flow_id = 0; + uint64_t event = 0; void *get_slots; struct rte_event *ev; RTE_SET_USED(seq); @@ -520,7 +528,17 @@ opdl_stage_claim_singlethread(struct opdl_stage *s, void *entries, for (j = 0; j < num_entries; j++) { ev = (struct rte_event *)get_slot(t, s->head+j); - if ((ev->flow_id%s->nb_instance) == s->instance_id) { + + event = __atomic_load_n(&(ev->event), + __ATOMIC_ACQUIRE); + + opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET); + flow_id = OPDL_FLOWID_MASK & event; + + if (opa_id >= s->queue_id) + continue; + + if ((flow_id % s->nb_instance) == s->instance_id) { memcpy(entries_offset, ev, t->slot_size); entries_offset += t->slot_size; i++; @@ -531,6 +549,7 @@ opdl_stage_claim_singlethread(struct opdl_stage *s, void *entries, s->head += num_entries; s->num_claimed = num_entries; s->num_event = i; + s->pos = 0; /* automatically disclaim entries if number of rte_events is zero */ if (unlikely(i == 0)) @@ -953,21 +972,26 @@ opdl_ring_get_slot(const struct opdl_ring *t, uint32_t index) } bool -opdl_ring_cas_slot(const struct opdl_stage *s, const struct rte_event *ev, +opdl_ring_cas_slot(struct opdl_stage *s, const struct rte_event *ev, uint32_t index, bool atomic) { - uint32_t i = 0, j = 0, offset; + uint32_t i = 0, offset; struct opdl_ring *t = s->t; struct rte_event *ev_orig = NULL; bool ev_updated = false; - uint64_t ev_temp = 0; + uint64_t ev_temp = 0; + uint64_t ev_update = 0; + + uint32_t opa_id = 0; + uint32_t flow_id = 0; + uint64_t event = 0; if (index > s->num_event) { PMD_DRV_LOG(ERR, "index is overflow"); return ev_updated; } - ev_temp = ev->event&OPDL_EVENT_MASK; + ev_temp = ev->event & OPDL_EVENT_MASK; if (!atomic) { offset = opdl_first_entry_id(s->seq, s->nb_instance, @@ -984,27 +1008,39 @@ opdl_ring_cas_slot(const struct opdl_stage *s, const struct rte_event *ev, } } else { - for (i = 0; i < s->num_claimed; i++) { + for (i = s->pos; i < s->num_claimed; i++) { ev_orig = (struct rte_event *) get_slot(t, s->shadow_head+i); - if ((ev_orig->flow_id%s->nb_instance) == - s->instance_id) { - - if (j == index) { - if ((ev_orig->event&OPDL_EVENT_MASK) != - ev_temp) { - ev_orig->event = ev->event; - ev_updated = true; - } - if (ev_orig->u64 != ev->u64) { - ev_orig->u64 = ev->u64; - ev_updated = true; - } - - break; + event = __atomic_load_n(&(ev_orig->event), + __ATOMIC_ACQUIRE); + + opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET); + flow_id = OPDL_FLOWID_MASK & event; + + if (opa_id >= s->queue_id) + continue; + + if ((flow_id % s->nb_instance) == s->instance_id) { + ev_update = s->queue_id; + ev_update = (ev_update << OPDL_OPA_OFFSET) + | ev->event; + + s->pos = i + 1; + + if ((event & OPDL_EVENT_MASK) != + ev_temp) { + __atomic_store_n(&(ev_orig->event), + ev_update, + __ATOMIC_RELEASE); + ev_updated = true; } - j++; + if (ev_orig->u64 != ev->u64) { + ev_orig->u64 = ev->u64; + ev_updated = true; + } + + break; } } @@ -1049,11 +1085,7 @@ check_deps(struct opdl_ring *t, struct opdl_stage *deps[], return -EINVAL; } } - if (num_deps > t->num_stages) { - PMD_DRV_LOG(ERR, "num_deps (%u) > number stages (%u)", - num_deps, t->num_stages); - return -EINVAL; - } + return 0; } @@ -1153,6 +1185,13 @@ opdl_stage_get_opdl_ring(const struct opdl_stage *s) return s->t; } +void +opdl_stage_set_queue_id(struct opdl_stage *s, + uint32_t queue_id) +{ + s->queue_id = queue_id; +} + void opdl_ring_dump(const struct opdl_ring *t, FILE *f) { diff --git a/drivers/event/opdl/opdl_ring.h b/drivers/event/opdl/opdl_ring.h index 9e8c33e6..751a59db 100644 --- a/drivers/event/opdl/opdl_ring.h +++ b/drivers/event/opdl/opdl_ring.h @@ -518,6 +518,20 @@ opdl_stage_find_num_available(struct opdl_stage *s, uint32_t num_entries); struct opdl_stage * opdl_stage_create(struct opdl_ring *t, bool threadsafe); + +/** + * Set the internal queue id for each stage instance. + * + * @param s + * The pointer of stage instance. + * + * @param queue_id + * The value of internal queue id. + */ +void +opdl_stage_set_queue_id(struct opdl_stage *s, + uint32_t queue_id); + /** * Prints information on opdl_ring instance and all its stages * @@ -590,7 +604,7 @@ opdl_ring_set_stage_threadsafe(struct opdl_stage *s, bool threadsafe); */ bool -opdl_ring_cas_slot(const struct opdl_stage *s, const struct rte_event *ev, +opdl_ring_cas_slot(struct opdl_stage *s, const struct rte_event *ev, uint32_t index, bool atomic); #ifdef __cplusplus diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c index 7f467568..c889220e 100644 --- a/drivers/event/skeleton/skeleton_eventdev.c +++ b/drivers/event/skeleton/skeleton_eventdev.c @@ -319,7 +319,7 @@ skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f) /* Initialize and register event driver with DPDK Application */ -static const struct rte_eventdev_ops skeleton_eventdev_ops = { +static struct rte_eventdev_ops skeleton_eventdev_ops = { .dev_infos_get = skeleton_eventdev_info_get, .dev_configure = skeleton_eventdev_configure, .dev_start = skeleton_eventdev_start, diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c index 6672fd8e..a6bb9138 100644 --- a/drivers/event/sw/sw_evdev.c +++ b/drivers/event/sw/sw_evdev.c @@ -361,9 +361,99 @@ sw_init_qid_iqs(struct sw_evdev *sw) } } +static int +sw_qids_empty(struct sw_evdev *sw) +{ + unsigned int i, j; + + for (i = 0; i < sw->qid_count; i++) { + for (j = 0; j < SW_IQS_MAX; j++) { + if (iq_count(&sw->qids[i].iq[j])) + return 0; + } + } + + return 1; +} + +static int +sw_ports_empty(struct sw_evdev *sw) +{ + unsigned int i; + + for (i = 0; i < sw->port_count; i++) { + if ((rte_event_ring_count(sw->ports[i].rx_worker_ring)) || + rte_event_ring_count(sw->ports[i].cq_worker_ring)) + return 0; + } + + return 1; +} + static void -sw_clean_qid_iqs(struct sw_evdev *sw) +sw_drain_ports(struct rte_eventdev *dev) { + struct sw_evdev *sw = sw_pmd_priv(dev); + eventdev_stop_flush_t flush; + unsigned int i; + uint8_t dev_id; + void *arg; + + flush = dev->dev_ops->dev_stop_flush; + dev_id = dev->data->dev_id; + arg = dev->data->dev_stop_flush_arg; + + for (i = 0; i < sw->port_count; i++) { + struct rte_event ev; + + while (rte_event_dequeue_burst(dev_id, i, &ev, 1, 0)) { + if (flush) + flush(dev_id, ev, arg); + + ev.op = RTE_EVENT_OP_RELEASE; + rte_event_enqueue_burst(dev_id, i, &ev, 1); + } + } +} + +static void +sw_drain_queue(struct rte_eventdev *dev, struct sw_iq *iq) +{ + struct sw_evdev *sw = sw_pmd_priv(dev); + eventdev_stop_flush_t flush; + uint8_t dev_id; + void *arg; + + flush = dev->dev_ops->dev_stop_flush; + dev_id = dev->data->dev_id; + arg = dev->data->dev_stop_flush_arg; + + while (iq_count(iq) > 0) { + struct rte_event ev; + + iq_dequeue_burst(sw, iq, &ev, 1); + + if (flush) + flush(dev_id, ev, arg); + } +} + +static void +sw_drain_queues(struct rte_eventdev *dev) +{ + struct sw_evdev *sw = sw_pmd_priv(dev); + unsigned int i, j; + + for (i = 0; i < sw->qid_count; i++) { + for (j = 0; j < SW_IQS_MAX; j++) + sw_drain_queue(dev, &sw->qids[i].iq[j]); + } +} + +static void +sw_clean_qid_iqs(struct rte_eventdev *dev) +{ + struct sw_evdev *sw = sw_pmd_priv(dev); int i, j; /* Release the IQ memory of all configured qids */ @@ -464,6 +554,33 @@ sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev, return 0; } +static int +sw_timer_adapter_caps_get(const struct rte_eventdev *dev, + uint64_t flags, + uint32_t *caps, + const struct rte_event_timer_adapter_ops **ops) +{ + RTE_SET_USED(dev); + RTE_SET_USED(flags); + *caps = 0; + + /* Use default SW ops */ + *ops = NULL; + + return 0; +} + +static int +sw_crypto_adapter_caps_get(const struct rte_eventdev *dev, + const struct rte_cryptodev *cdev, + uint32_t *caps) +{ + RTE_SET_USED(dev); + RTE_SET_USED(cdev); + *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP; + return 0; +} + static void sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info) { @@ -702,10 +819,30 @@ static void sw_stop(struct rte_eventdev *dev) { struct sw_evdev *sw = sw_pmd_priv(dev); - sw_clean_qid_iqs(sw); + int32_t runstate; + + /* Stop the scheduler if it's running */ + runstate = rte_service_runstate_get(sw->service_id); + if (runstate == 1) + rte_service_runstate_set(sw->service_id, 0); + + while (rte_service_may_be_active(sw->service_id)) + rte_pause(); + + /* Flush all events out of the device */ + while (!(sw_qids_empty(sw) && sw_ports_empty(sw))) { + sw_event_schedule(dev); + sw_drain_ports(dev); + sw_drain_queues(dev); + } + + sw_clean_qid_iqs(dev); sw_xstats_uninit(sw); sw->started = 0; rte_smp_wmb(); + + if (runstate == 1) + rte_service_runstate_set(sw->service_id, 1); } static int @@ -772,7 +909,7 @@ static int32_t sw_sched_service_func(void *args) static int sw_probe(struct rte_vdev_device *vdev) { - static const struct rte_eventdev_ops evdev_sw_ops = { + static struct rte_eventdev_ops evdev_sw_ops = { .dev_configure = sw_dev_configure, .dev_infos_get = sw_info_get, .dev_close = sw_close, @@ -791,6 +928,10 @@ sw_probe(struct rte_vdev_device *vdev) .eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get, + .timer_adapter_caps_get = sw_timer_adapter_caps_get, + + .crypto_adapter_caps_get = sw_crypto_adapter_caps_get, + .xstats_get = sw_xstats_get, .xstats_get_names = sw_xstats_get_names, .xstats_get_by_name = sw_xstats_get_by_name, @@ -933,9 +1074,7 @@ RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "= " /* declared extern in header, for access from other .c files */ int eventdev_sw_log_level; -RTE_INIT(evdev_sw_init_log); -static void -evdev_sw_init_log(void) +RTE_INIT(evdev_sw_init_log) { eventdev_sw_log_level = rte_log_register("pmd.event.sw"); if (eventdev_sw_log_level >= 0) diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c index 3106eb33..e3a41e02 100644 --- a/drivers/event/sw/sw_evdev_scheduler.c +++ b/drivers/event/sw/sw_evdev_scheduler.c @@ -508,7 +508,7 @@ sw_event_schedule(struct rte_eventdev *dev) uint32_t i; sw->sched_called++; - if (!sw->started) + if (unlikely(!sw->started)) return; do { @@ -532,8 +532,7 @@ sw_event_schedule(struct rte_eventdev *dev) } while (in_pkts > 4 && (int)in_pkts_this_iteration < sched_quanta); - out_pkts = 0; - out_pkts += sw_schedule_qid_to_cq(sw); + out_pkts = sw_schedule_qid_to_cq(sw); out_pkts_total += out_pkts; in_pkts_total += in_pkts_this_iteration; @@ -541,6 +540,12 @@ sw_event_schedule(struct rte_eventdev *dev) break; } while ((int)out_pkts_total < sched_quanta); + sw->stats.tx_pkts += out_pkts_total; + sw->stats.rx_pkts += in_pkts_total; + + sw->sched_no_iq_enqueues += (in_pkts_total == 0); + sw->sched_no_cq_enqueues += (out_pkts_total == 0); + /* push all the internal buffered QEs in port->cq_ring to the * worker cores: aka, do the ring transfers batched. */ @@ -552,10 +557,4 @@ sw_event_schedule(struct rte_eventdev *dev) sw->ports[i].cq_buf_count = 0; } - sw->stats.tx_pkts += out_pkts_total; - sw->stats.rx_pkts += in_pkts_total; - - sw->sched_no_iq_enqueues += (in_pkts_total == 0); - sw->sched_no_cq_enqueues += (out_pkts_total == 0); - } diff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c index 78d30e07..c40912db 100644 --- a/drivers/event/sw/sw_evdev_selftest.c +++ b/drivers/event/sw/sw_evdev_selftest.c @@ -28,6 +28,7 @@ #define MAX_PORTS 16 #define MAX_QIDS 16 #define NUM_PACKETS (1<<18) +#define DEQUEUE_DEPTH 128 static int evdev; @@ -147,7 +148,7 @@ init(struct test *t, int nb_queues, int nb_ports) .nb_event_ports = nb_ports, .nb_event_queue_flows = 1024, .nb_events_limit = 4096, - .nb_event_port_dequeue_depth = 128, + .nb_event_port_dequeue_depth = DEQUEUE_DEPTH, .nb_event_port_enqueue_depth = 128, }; int ret; @@ -2807,6 +2808,78 @@ err: return -1; } +static void +flush(uint8_t dev_id __rte_unused, struct rte_event event, void *arg) +{ + *((uint8_t *) arg) += (event.u64 == 0xCA11BACC) ? 1 : 0; +} + +static int +dev_stop_flush(struct test *t) /* test to check we can properly flush events */ +{ + const struct rte_event new_ev = { + .op = RTE_EVENT_OP_NEW, + .u64 = 0xCA11BACC, + .queue_id = 0 + }; + struct rte_event ev = new_ev; + uint8_t count = 0; + int i; + + if (init(t, 1, 1) < 0 || + create_ports(t, 1) < 0 || + create_atomic_qids(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* Link the queue so *_start() doesn't error out */ + if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1) { + printf("%d: Error linking queue to port\n", __LINE__); + goto err; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + goto err; + } + + for (i = 0; i < DEQUEUE_DEPTH + 1; i++) { + if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) { + printf("%d: Error enqueuing events\n", __LINE__); + goto err; + } + } + + /* Schedule the events from the port to the IQ. At least one event + * should be remaining in the queue. + */ + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + if (rte_event_dev_stop_flush_callback_register(evdev, flush, &count)) { + printf("%d: Error installing the flush callback\n", __LINE__); + goto err; + } + + cleanup(t); + + if (count == 0) { + printf("%d: Error executing the flush callback\n", __LINE__); + goto err; + } + + if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) { + printf("%d: Error uninstalling the flush callback\n", __LINE__); + goto err; + } + + return 0; +err: + rte_event_dev_dump(evdev, stdout); + cleanup(t); + return -1; +} + static int worker_loopback_worker_fn(void *arg) { @@ -3211,6 +3284,12 @@ test_sw_eventdev(void) printf("ERROR - Head-of-line-blocking test FAILED.\n"); goto test_fail; } + printf("*** Running Stop Flush test...\n"); + ret = dev_stop_flush(t); + if (ret != 0) { + printf("ERROR - Stop Flush test FAILED.\n"); + goto test_fail; + } if (rte_lcore_count() >= 3) { printf("*** Running Worker loopback test...\n"); ret = worker_loopback(t, 0); diff --git a/drivers/event/sw/sw_evdev_worker.c b/drivers/event/sw/sw_evdev_worker.c index 67151f77..063b919c 100644 --- a/drivers/event/sw/sw_evdev_worker.c +++ b/drivers/event/sw/sw_evdev_worker.c @@ -77,8 +77,10 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num) rte_atomic32_add(&sw->inflights, credit_update_quanta); p->inflight_credits += (credit_update_quanta); - if (p->inflight_credits < new) - return 0; + /* If there are fewer inflight credits than new events, limit + * the number of enqueued events. + */ + num = (p->inflight_credits < new) ? p->inflight_credits : new; } for (i = 0; i < num; i++) { diff --git a/drivers/mempool/Makefile b/drivers/mempool/Makefile index aae2cb10..28c2e836 100644 --- a/drivers/mempool/Makefile +++ b/drivers/mempool/Makefile @@ -3,8 +3,13 @@ include $(RTE_SDK)/mk/rte.vars.mk +DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_BUCKET) += bucket +ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y) DIRS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa +endif +ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy) DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2 +endif DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_RING) += ring DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_STACK) += stack DIRS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx diff --git a/drivers/mempool/bucket/Makefile b/drivers/mempool/bucket/Makefile new file mode 100644 index 00000000..7364916b --- /dev/null +++ b/drivers/mempool/bucket/Makefile @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: BSD-3-Clause +# +# Copyright (c) 2017-2018 Solarflare Communications Inc. +# All rights reserved. +# +# This software was jointly developed between OKTET Labs (under contract +# for Solarflare) and Solarflare Communications, Inc. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_mempool_bucket.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +LDLIBS += -lrte_eal -lrte_mempool -lrte_ring + +EXPORT_MAP := rte_mempool_bucket_version.map + +LIBABIVER := 1 + +SRCS-$(CONFIG_RTE_DRIVER_MEMPOOL_BUCKET) += rte_mempool_bucket.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/mempool/bucket/meson.build b/drivers/mempool/bucket/meson.build new file mode 100644 index 00000000..618d7912 --- /dev/null +++ b/drivers/mempool/bucket/meson.build @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: BSD-3-Clause +# +# Copyright (c) 2017-2018 Solarflare Communications Inc. +# All rights reserved. +# +# This software was jointly developed between OKTET Labs (under contract +# for Solarflare) and Solarflare Communications, Inc. + +sources = files('rte_mempool_bucket.c') diff --git a/drivers/mempool/bucket/rte_mempool_bucket.c b/drivers/mempool/bucket/rte_mempool_bucket.c new file mode 100644 index 00000000..78d2b9d0 --- /dev/null +++ b/drivers/mempool/bucket/rte_mempool_bucket.c @@ -0,0 +1,628 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2017-2018 Solarflare Communications Inc. + * All rights reserved. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#include +#include +#include + +#include +#include +#include +#include + +/* + * The general idea of the bucket mempool driver is as follows. + * We keep track of physically contiguous groups (buckets) of objects + * of a certain size. Every such a group has a counter that is + * incremented every time an object from that group is enqueued. + * Until the bucket is full, no objects from it are eligible for allocation. + * If a request is made to dequeue a multiply of bucket size, it is + * satisfied by returning the whole buckets, instead of separate objects. + */ + + +struct bucket_header { + unsigned int lcore_id; + uint8_t fill_cnt; +}; + +struct bucket_stack { + unsigned int top; + unsigned int limit; + void *objects[]; +}; + +struct bucket_data { + unsigned int header_size; + unsigned int total_elt_size; + unsigned int obj_per_bucket; + unsigned int bucket_stack_thresh; + uintptr_t bucket_page_mask; + struct rte_ring *shared_bucket_ring; + struct bucket_stack *buckets[RTE_MAX_LCORE]; + /* + * Multi-producer single-consumer ring to hold objects that are + * returned to the mempool at a different lcore than initially + * dequeued + */ + struct rte_ring *adoption_buffer_rings[RTE_MAX_LCORE]; + struct rte_ring *shared_orphan_ring; + struct rte_mempool *pool; + unsigned int bucket_mem_size; +}; + +static struct bucket_stack * +bucket_stack_create(const struct rte_mempool *mp, unsigned int n_elts) +{ + struct bucket_stack *stack; + + stack = rte_zmalloc_socket("bucket_stack", + sizeof(struct bucket_stack) + + n_elts * sizeof(void *), + RTE_CACHE_LINE_SIZE, + mp->socket_id); + if (stack == NULL) + return NULL; + stack->limit = n_elts; + stack->top = 0; + + return stack; +} + +static void +bucket_stack_push(struct bucket_stack *stack, void *obj) +{ + RTE_ASSERT(stack->top < stack->limit); + stack->objects[stack->top++] = obj; +} + +static void * +bucket_stack_pop_unsafe(struct bucket_stack *stack) +{ + RTE_ASSERT(stack->top > 0); + return stack->objects[--stack->top]; +} + +static void * +bucket_stack_pop(struct bucket_stack *stack) +{ + if (stack->top == 0) + return NULL; + return bucket_stack_pop_unsafe(stack); +} + +static int +bucket_enqueue_single(struct bucket_data *bd, void *obj) +{ + int rc = 0; + uintptr_t addr = (uintptr_t)obj; + struct bucket_header *hdr; + unsigned int lcore_id = rte_lcore_id(); + + addr &= bd->bucket_page_mask; + hdr = (struct bucket_header *)addr; + + if (likely(hdr->lcore_id == lcore_id)) { + if (hdr->fill_cnt < bd->obj_per_bucket - 1) { + hdr->fill_cnt++; + } else { + hdr->fill_cnt = 0; + /* Stack is big enough to put all buckets */ + bucket_stack_push(bd->buckets[lcore_id], hdr); + } + } else if (hdr->lcore_id != LCORE_ID_ANY) { + struct rte_ring *adopt_ring = + bd->adoption_buffer_rings[hdr->lcore_id]; + + rc = rte_ring_enqueue(adopt_ring, obj); + /* Ring is big enough to put all objects */ + RTE_ASSERT(rc == 0); + } else if (hdr->fill_cnt < bd->obj_per_bucket - 1) { + hdr->fill_cnt++; + } else { + hdr->fill_cnt = 0; + rc = rte_ring_enqueue(bd->shared_bucket_ring, hdr); + /* Ring is big enough to put all buckets */ + RTE_ASSERT(rc == 0); + } + + return rc; +} + +static int +bucket_enqueue(struct rte_mempool *mp, void * const *obj_table, + unsigned int n) +{ + struct bucket_data *bd = mp->pool_data; + struct bucket_stack *local_stack = bd->buckets[rte_lcore_id()]; + unsigned int i; + int rc = 0; + + for (i = 0; i < n; i++) { + rc = bucket_enqueue_single(bd, obj_table[i]); + RTE_ASSERT(rc == 0); + } + if (local_stack->top > bd->bucket_stack_thresh) { + rte_ring_enqueue_bulk(bd->shared_bucket_ring, + &local_stack->objects + [bd->bucket_stack_thresh], + local_stack->top - + bd->bucket_stack_thresh, + NULL); + local_stack->top = bd->bucket_stack_thresh; + } + return rc; +} + +static void ** +bucket_fill_obj_table(const struct bucket_data *bd, void **pstart, + void **obj_table, unsigned int n) +{ + unsigned int i; + uint8_t *objptr = *pstart; + + for (objptr += bd->header_size, i = 0; i < n; + i++, objptr += bd->total_elt_size) + *obj_table++ = objptr; + *pstart = objptr; + return obj_table; +} + +static int +bucket_dequeue_orphans(struct bucket_data *bd, void **obj_table, + unsigned int n_orphans) +{ + unsigned int i; + int rc; + uint8_t *objptr; + + rc = rte_ring_dequeue_bulk(bd->shared_orphan_ring, obj_table, + n_orphans, NULL); + if (unlikely(rc != (int)n_orphans)) { + struct bucket_header *hdr; + + objptr = bucket_stack_pop(bd->buckets[rte_lcore_id()]); + hdr = (struct bucket_header *)objptr; + + if (objptr == NULL) { + rc = rte_ring_dequeue(bd->shared_bucket_ring, + (void **)&objptr); + if (rc != 0) { + rte_errno = ENOBUFS; + return -rte_errno; + } + hdr = (struct bucket_header *)objptr; + hdr->lcore_id = rte_lcore_id(); + } + hdr->fill_cnt = 0; + bucket_fill_obj_table(bd, (void **)&objptr, obj_table, + n_orphans); + for (i = n_orphans; i < bd->obj_per_bucket; i++, + objptr += bd->total_elt_size) { + rc = rte_ring_enqueue(bd->shared_orphan_ring, + objptr); + if (rc != 0) { + RTE_ASSERT(0); + rte_errno = -rc; + return rc; + } + } + } + + return 0; +} + +static int +bucket_dequeue_buckets(struct bucket_data *bd, void **obj_table, + unsigned int n_buckets) +{ + struct bucket_stack *cur_stack = bd->buckets[rte_lcore_id()]; + unsigned int n_buckets_from_stack = RTE_MIN(n_buckets, cur_stack->top); + void **obj_table_base = obj_table; + + n_buckets -= n_buckets_from_stack; + while (n_buckets_from_stack-- > 0) { + void *obj = bucket_stack_pop_unsafe(cur_stack); + + obj_table = bucket_fill_obj_table(bd, &obj, obj_table, + bd->obj_per_bucket); + } + while (n_buckets-- > 0) { + struct bucket_header *hdr; + + if (unlikely(rte_ring_dequeue(bd->shared_bucket_ring, + (void **)&hdr) != 0)) { + /* + * Return the already-dequeued buffers + * back to the mempool + */ + bucket_enqueue(bd->pool, obj_table_base, + obj_table - obj_table_base); + rte_errno = ENOBUFS; + return -rte_errno; + } + hdr->lcore_id = rte_lcore_id(); + obj_table = bucket_fill_obj_table(bd, (void **)&hdr, + obj_table, + bd->obj_per_bucket); + } + + return 0; +} + +static int +bucket_adopt_orphans(struct bucket_data *bd) +{ + int rc = 0; + struct rte_ring *adopt_ring = + bd->adoption_buffer_rings[rte_lcore_id()]; + + if (unlikely(!rte_ring_empty(adopt_ring))) { + void *orphan; + + while (rte_ring_sc_dequeue(adopt_ring, &orphan) == 0) { + rc = bucket_enqueue_single(bd, orphan); + RTE_ASSERT(rc == 0); + } + } + return rc; +} + +static int +bucket_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) +{ + struct bucket_data *bd = mp->pool_data; + unsigned int n_buckets = n / bd->obj_per_bucket; + unsigned int n_orphans = n - n_buckets * bd->obj_per_bucket; + int rc = 0; + + bucket_adopt_orphans(bd); + + if (unlikely(n_orphans > 0)) { + rc = bucket_dequeue_orphans(bd, obj_table + + (n_buckets * bd->obj_per_bucket), + n_orphans); + if (rc != 0) + return rc; + } + + if (likely(n_buckets > 0)) { + rc = bucket_dequeue_buckets(bd, obj_table, n_buckets); + if (unlikely(rc != 0) && n_orphans > 0) { + rte_ring_enqueue_bulk(bd->shared_orphan_ring, + obj_table + (n_buckets * + bd->obj_per_bucket), + n_orphans, NULL); + } + } + + return rc; +} + +static int +bucket_dequeue_contig_blocks(struct rte_mempool *mp, void **first_obj_table, + unsigned int n) +{ + struct bucket_data *bd = mp->pool_data; + const uint32_t header_size = bd->header_size; + struct bucket_stack *cur_stack = bd->buckets[rte_lcore_id()]; + unsigned int n_buckets_from_stack = RTE_MIN(n, cur_stack->top); + struct bucket_header *hdr; + void **first_objp = first_obj_table; + + bucket_adopt_orphans(bd); + + n -= n_buckets_from_stack; + while (n_buckets_from_stack-- > 0) { + hdr = bucket_stack_pop_unsafe(cur_stack); + *first_objp++ = (uint8_t *)hdr + header_size; + } + if (n > 0) { + if (unlikely(rte_ring_dequeue_bulk(bd->shared_bucket_ring, + first_objp, n, NULL) != n)) { + /* Return the already dequeued buckets */ + while (first_objp-- != first_obj_table) { + bucket_stack_push(cur_stack, + (uint8_t *)*first_objp - + header_size); + } + rte_errno = ENOBUFS; + return -rte_errno; + } + while (n-- > 0) { + hdr = (struct bucket_header *)*first_objp; + hdr->lcore_id = rte_lcore_id(); + *first_objp++ = (uint8_t *)hdr + header_size; + } + } + + return 0; +} + +static void +count_underfilled_buckets(struct rte_mempool *mp, + void *opaque, + struct rte_mempool_memhdr *memhdr, + __rte_unused unsigned int mem_idx) +{ + unsigned int *pcount = opaque; + const struct bucket_data *bd = mp->pool_data; + unsigned int bucket_page_sz = + (unsigned int)(~bd->bucket_page_mask + 1); + uintptr_t align; + uint8_t *iter; + + align = (uintptr_t)RTE_PTR_ALIGN_CEIL(memhdr->addr, bucket_page_sz) - + (uintptr_t)memhdr->addr; + + for (iter = (uint8_t *)memhdr->addr + align; + iter < (uint8_t *)memhdr->addr + memhdr->len; + iter += bucket_page_sz) { + struct bucket_header *hdr = (struct bucket_header *)iter; + + *pcount += hdr->fill_cnt; + } +} + +static unsigned int +bucket_get_count(const struct rte_mempool *mp) +{ + const struct bucket_data *bd = mp->pool_data; + unsigned int count = + bd->obj_per_bucket * rte_ring_count(bd->shared_bucket_ring) + + rte_ring_count(bd->shared_orphan_ring); + unsigned int i; + + for (i = 0; i < RTE_MAX_LCORE; i++) { + if (!rte_lcore_is_enabled(i)) + continue; + count += bd->obj_per_bucket * bd->buckets[i]->top + + rte_ring_count(bd->adoption_buffer_rings[i]); + } + + rte_mempool_mem_iter((struct rte_mempool *)(uintptr_t)mp, + count_underfilled_buckets, &count); + + return count; +} + +static int +bucket_alloc(struct rte_mempool *mp) +{ + int rg_flags = 0; + int rc = 0; + char rg_name[RTE_RING_NAMESIZE]; + struct bucket_data *bd; + unsigned int i; + unsigned int bucket_header_size; + + bd = rte_zmalloc_socket("bucket_pool", sizeof(*bd), + RTE_CACHE_LINE_SIZE, mp->socket_id); + if (bd == NULL) { + rc = -ENOMEM; + goto no_mem_for_data; + } + bd->pool = mp; + if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN) + bucket_header_size = sizeof(struct bucket_header); + else + bucket_header_size = RTE_CACHE_LINE_SIZE; + RTE_BUILD_BUG_ON(sizeof(struct bucket_header) > RTE_CACHE_LINE_SIZE); + bd->header_size = mp->header_size + bucket_header_size; + bd->total_elt_size = mp->header_size + mp->elt_size + mp->trailer_size; + bd->bucket_mem_size = RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB * 1024; + bd->obj_per_bucket = (bd->bucket_mem_size - bucket_header_size) / + bd->total_elt_size; + bd->bucket_page_mask = ~(rte_align64pow2(bd->bucket_mem_size) - 1); + /* eventually this should be a tunable parameter */ + bd->bucket_stack_thresh = (mp->size / bd->obj_per_bucket) * 4 / 3; + + if (mp->flags & MEMPOOL_F_SP_PUT) + rg_flags |= RING_F_SP_ENQ; + if (mp->flags & MEMPOOL_F_SC_GET) + rg_flags |= RING_F_SC_DEQ; + + for (i = 0; i < RTE_MAX_LCORE; i++) { + if (!rte_lcore_is_enabled(i)) + continue; + bd->buckets[i] = + bucket_stack_create(mp, mp->size / bd->obj_per_bucket); + if (bd->buckets[i] == NULL) { + rc = -ENOMEM; + goto no_mem_for_stacks; + } + rc = snprintf(rg_name, sizeof(rg_name), + RTE_MEMPOOL_MZ_FORMAT ".a%u", mp->name, i); + if (rc < 0 || rc >= (int)sizeof(rg_name)) { + rc = -ENAMETOOLONG; + goto no_mem_for_stacks; + } + bd->adoption_buffer_rings[i] = + rte_ring_create(rg_name, rte_align32pow2(mp->size + 1), + mp->socket_id, + rg_flags | RING_F_SC_DEQ); + if (bd->adoption_buffer_rings[i] == NULL) { + rc = -rte_errno; + goto no_mem_for_stacks; + } + } + + rc = snprintf(rg_name, sizeof(rg_name), + RTE_MEMPOOL_MZ_FORMAT ".0", mp->name); + if (rc < 0 || rc >= (int)sizeof(rg_name)) { + rc = -ENAMETOOLONG; + goto invalid_shared_orphan_ring; + } + bd->shared_orphan_ring = + rte_ring_create(rg_name, rte_align32pow2(mp->size + 1), + mp->socket_id, rg_flags); + if (bd->shared_orphan_ring == NULL) { + rc = -rte_errno; + goto cannot_create_shared_orphan_ring; + } + + rc = snprintf(rg_name, sizeof(rg_name), + RTE_MEMPOOL_MZ_FORMAT ".1", mp->name); + if (rc < 0 || rc >= (int)sizeof(rg_name)) { + rc = -ENAMETOOLONG; + goto invalid_shared_bucket_ring; + } + bd->shared_bucket_ring = + rte_ring_create(rg_name, + rte_align32pow2((mp->size + 1) / + bd->obj_per_bucket), + mp->socket_id, rg_flags); + if (bd->shared_bucket_ring == NULL) { + rc = -rte_errno; + goto cannot_create_shared_bucket_ring; + } + + mp->pool_data = bd; + + return 0; + +cannot_create_shared_bucket_ring: +invalid_shared_bucket_ring: + rte_ring_free(bd->shared_orphan_ring); +cannot_create_shared_orphan_ring: +invalid_shared_orphan_ring: +no_mem_for_stacks: + for (i = 0; i < RTE_MAX_LCORE; i++) { + rte_free(bd->buckets[i]); + rte_ring_free(bd->adoption_buffer_rings[i]); + } + rte_free(bd); +no_mem_for_data: + rte_errno = -rc; + return rc; +} + +static void +bucket_free(struct rte_mempool *mp) +{ + unsigned int i; + struct bucket_data *bd = mp->pool_data; + + if (bd == NULL) + return; + + for (i = 0; i < RTE_MAX_LCORE; i++) { + rte_free(bd->buckets[i]); + rte_ring_free(bd->adoption_buffer_rings[i]); + } + + rte_ring_free(bd->shared_orphan_ring); + rte_ring_free(bd->shared_bucket_ring); + + rte_free(bd); +} + +static ssize_t +bucket_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num, + __rte_unused uint32_t pg_shift, size_t *min_total_elt_size, + size_t *align) +{ + struct bucket_data *bd = mp->pool_data; + unsigned int bucket_page_sz; + + if (bd == NULL) + return -EINVAL; + + bucket_page_sz = rte_align32pow2(bd->bucket_mem_size); + *align = bucket_page_sz; + *min_total_elt_size = bucket_page_sz; + /* + * Each bucket occupies its own block aligned to + * bucket_page_sz, so the required amount of memory is + * a multiple of bucket_page_sz. + * We also need extra space for a bucket header + */ + return ((obj_num + bd->obj_per_bucket - 1) / + bd->obj_per_bucket) * bucket_page_sz; +} + +static int +bucket_populate(struct rte_mempool *mp, unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg) +{ + struct bucket_data *bd = mp->pool_data; + unsigned int bucket_page_sz; + unsigned int bucket_header_sz; + unsigned int n_objs; + uintptr_t align; + uint8_t *iter; + int rc; + + if (bd == NULL) + return -EINVAL; + + bucket_page_sz = rte_align32pow2(bd->bucket_mem_size); + align = RTE_PTR_ALIGN_CEIL((uintptr_t)vaddr, bucket_page_sz) - + (uintptr_t)vaddr; + + bucket_header_sz = bd->header_size - mp->header_size; + if (iova != RTE_BAD_IOVA) + iova += align + bucket_header_sz; + + for (iter = (uint8_t *)vaddr + align, n_objs = 0; + iter < (uint8_t *)vaddr + len && n_objs < max_objs; + iter += bucket_page_sz) { + struct bucket_header *hdr = (struct bucket_header *)iter; + unsigned int chunk_len = bd->bucket_mem_size; + + if ((size_t)(iter - (uint8_t *)vaddr) + chunk_len > len) + chunk_len = len - (iter - (uint8_t *)vaddr); + if (chunk_len <= bucket_header_sz) + break; + chunk_len -= bucket_header_sz; + + hdr->fill_cnt = 0; + hdr->lcore_id = LCORE_ID_ANY; + rc = rte_mempool_op_populate_default(mp, + RTE_MIN(bd->obj_per_bucket, + max_objs - n_objs), + iter + bucket_header_sz, + iova, chunk_len, + obj_cb, obj_cb_arg); + if (rc < 0) + return rc; + n_objs += rc; + if (iova != RTE_BAD_IOVA) + iova += bucket_page_sz; + } + + return n_objs; +} + +static int +bucket_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info) +{ + struct bucket_data *bd = mp->pool_data; + + info->contig_block_size = bd->obj_per_bucket; + return 0; +} + + +static const struct rte_mempool_ops ops_bucket = { + .name = "bucket", + .alloc = bucket_alloc, + .free = bucket_free, + .enqueue = bucket_enqueue, + .dequeue = bucket_dequeue, + .get_count = bucket_get_count, + .calc_mem_size = bucket_calc_mem_size, + .populate = bucket_populate, + .get_info = bucket_get_info, + .dequeue_contig_blocks = bucket_dequeue_contig_blocks, +}; + + +MEMPOOL_REGISTER_OPS(ops_bucket); diff --git a/drivers/mempool/bucket/rte_mempool_bucket_version.map b/drivers/mempool/bucket/rte_mempool_bucket_version.map new file mode 100644 index 00000000..9b9ab1a4 --- /dev/null +++ b/drivers/mempool/bucket/rte_mempool_bucket_version.map @@ -0,0 +1,4 @@ +DPDK_18.05 { + + local: *; +}; diff --git a/drivers/mempool/dpaa/Makefile b/drivers/mempool/dpaa/Makefile index 4c0d7aaa..da8da1e9 100644 --- a/drivers/mempool/dpaa/Makefile +++ b/drivers/mempool/dpaa/Makefile @@ -22,6 +22,9 @@ EXPORT_MAP := rte_mempool_dpaa_version.map # Lbrary version LIBABIVER := 1 +# depends on dpaa bus which uses experimental API +CFLAGS += -DALLOW_EXPERIMENTAL_API + # all source are stored in SRCS-y # SRCS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa_mempool.c diff --git a/drivers/mempool/dpaa/dpaa_mempool.c b/drivers/mempool/dpaa/dpaa_mempool.c index fb3b6ba0..10c536bf 100644 --- a/drivers/mempool/dpaa/dpaa_mempool.c +++ b/drivers/mempool/dpaa/dpaa_mempool.c @@ -27,6 +27,13 @@ #include +/* List of all the memseg information locally maintained in dpaa driver. This + * is to optimize the PA_to_VA searches until a better mechanism (algo) is + * available. + */ +struct dpaa_memseg_list rte_dpaa_memsegs + = TAILQ_HEAD_INITIALIZER(rte_dpaa_memsegs); + struct dpaa_bp_info rte_dpaa_bpid_info[DPAA_MAX_BPOOLS]; static int @@ -115,7 +122,8 @@ dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr) struct bm_buffer buf; int ret; - DPAA_MEMPOOL_DEBUG("Free 0x%lx to bpid: %d", addr, bp_info->bpid); + DPAA_MEMPOOL_DEBUG("Free 0x%" PRIx64 " to bpid: %d", + addr, bp_info->bpid); bm_buffer_set64(&buf, addr); retry: @@ -154,8 +162,7 @@ dpaa_mbuf_free_bulk(struct rte_mempool *pool, if (unlikely(!bp_info->ptov_off)) { /* buffers are from single mem segment */ if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) { - bp_info->ptov_off - = (uint64_t)obj_table[i] - phy; + bp_info->ptov_off = (size_t)obj_table[i] - phy; rte_dpaa_bpid_info[bp_info->bpid].ptov_off = bp_info->ptov_off; } @@ -264,10 +271,9 @@ dpaa_mbuf_get_count(const struct rte_mempool *mp) } static int -dpaa_register_memory_area(const struct rte_mempool *mp, - char *vaddr __rte_unused, - rte_iova_t paddr __rte_unused, - size_t len) +dpaa_populate(struct rte_mempool *mp, unsigned int max_objs, + void *vaddr, rte_iova_t paddr, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg) { struct dpaa_bp_info *bp_info; unsigned int total_elt_sz; @@ -282,14 +288,40 @@ dpaa_register_memory_area(const struct rte_mempool *mp, bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; - DPAA_MEMPOOL_DEBUG("Req size %lu vs Available %u\n", - len, total_elt_sz * mp->size); + DPAA_MEMPOOL_DEBUG("Req size %" PRIx64 " vs Available %u\n", + (uint64_t)len, total_elt_sz * mp->size); /* Detect pool area has sufficient space for elements in this memzone */ if (len >= total_elt_sz * mp->size) bp_info->flags |= DPAA_MPOOL_SINGLE_SEGMENT; + struct dpaa_memseg *ms; + + /* For each memory chunk pinned to the Mempool, a linked list of the + * contained memsegs is created for searching when PA to VA + * conversion is required. + */ + ms = rte_zmalloc(NULL, sizeof(struct dpaa_memseg), 0); + if (!ms) { + DPAA_MEMPOOL_ERR("Unable to allocate internal memory."); + DPAA_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available."); + /* If the element is not added, it would only lead to failure + * in searching for the element and the logic would Fallback + * to traditional DPDK memseg traversal code. So, this is not + * a blocking error - but, error would be printed on screen. + */ + return 0; + } - return 0; + ms->vaddr = vaddr; + ms->iova = paddr; + ms->len = len; + /* Head insertions are generally faster than tail insertions as the + * buffers pinned are picked from rear end. + */ + TAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next); + + return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len, + obj_cb, obj_cb_arg); } struct rte_mempool_ops dpaa_mpool_ops = { @@ -299,7 +331,7 @@ struct rte_mempool_ops dpaa_mpool_ops = { .enqueue = dpaa_mbuf_free_bulk, .dequeue = dpaa_mbuf_alloc_bulk, .get_count = dpaa_mbuf_get_count, - .register_memory_area = dpaa_register_memory_area, + .populate = dpaa_populate, }; MEMPOOL_REGISTER_OPS(dpaa_mpool_ops); diff --git a/drivers/mempool/dpaa/dpaa_mempool.h b/drivers/mempool/dpaa/dpaa_mempool.h index 9435dd2f..092f326c 100644 --- a/drivers/mempool/dpaa/dpaa_mempool.h +++ b/drivers/mempool/dpaa/dpaa_mempool.h @@ -46,7 +46,7 @@ static inline void * DPAA_MEMPOOL_PTOV(struct dpaa_bp_info *bp_info, uint64_t addr) { if (bp_info->ptov_off) - return ((void *)(addr + bp_info->ptov_off)); + return ((void *) (size_t)(addr + bp_info->ptov_off)); return rte_dpaa_mem_ptov(addr); } diff --git a/drivers/mempool/dpaa/meson.build b/drivers/mempool/dpaa/meson.build new file mode 100644 index 00000000..9163b3db --- /dev/null +++ b/drivers/mempool/dpaa/meson.build @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if host_machine.system() != 'linux' + build = false +endif + +deps += ['bus_dpaa'] +sources = files('dpaa_mempool.c') + +# depends on dpaa bus which uses experimental API +allow_experimental_apis = true diff --git a/drivers/mempool/dpaa/rte_mempool_dpaa_version.map b/drivers/mempool/dpaa/rte_mempool_dpaa_version.map index d05f274d..60bf50b2 100644 --- a/drivers/mempool/dpaa/rte_mempool_dpaa_version.map +++ b/drivers/mempool/dpaa/rte_mempool_dpaa_version.map @@ -2,6 +2,7 @@ DPDK_17.11 { global: rte_dpaa_bpid_info; + rte_dpaa_memsegs; local: *; }; diff --git a/drivers/mempool/dpaa2/Makefile b/drivers/mempool/dpaa2/Makefile index efaac96e..9e4c87d7 100644 --- a/drivers/mempool/dpaa2/Makefile +++ b/drivers/mempool/dpaa2/Makefile @@ -9,14 +9,8 @@ include $(RTE_SDK)/mk/rte.vars.mk # LIB = librte_mempool_dpaa2.a -ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y) -CFLAGS += -O0 -g -CFLAGS += "-Wno-error" -else CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) -endif - CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal @@ -27,6 +21,9 @@ EXPORT_MAP := rte_mempool_dpaa2_version.map # Lbrary version LIBABIVER := 1 +# depends on fslmc bus which uses experimental API +CFLAGS += -DALLOW_EXPERIMENTAL_API + # all source are stored in SRCS-y # SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2_hw_mempool.c @@ -34,4 +31,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2_hw_mempool.c LDLIBS += -lrte_bus_fslmc LDLIBS += -lrte_eal -lrte_mempool -lrte_ring +SYMLINK-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL)-include := rte_dpaa2_mempool.h + include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c index 2bd62e88..7d0435f5 100644 --- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c +++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c @@ -21,16 +21,28 @@ #include #include #include +#include "rte_dpaa2_mempool.h" #include #include #include #include #include "dpaa2_hw_mempool.h" +#include "dpaa2_hw_mempool_logs.h" struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID]; static struct dpaa2_bp_list *h_bp_list; +/* List of all the memseg information locally maintained in dpaa2 driver. This + * is to optimize the PA_to_VA searches until a better mechanism (algo) is + * available. + */ +struct dpaa2_memseg_list rte_dpaa2_memsegs + = TAILQ_HEAD_INITIALIZER(rte_dpaa2_memsegs); + +/* Dynamic logging identified for mempool */ +int dpaa2_logtype_mempool; + static int rte_hw_mbuf_create_pool(struct rte_mempool *mp) { @@ -44,30 +56,30 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp) avail_dpbp = dpaa2_alloc_dpbp_dev(); if (!avail_dpbp) { - PMD_DRV_LOG(ERR, "DPAA2 resources not available"); + DPAA2_MEMPOOL_ERR("DPAA2 pool not available!"); return -ENOENT; } if (unlikely(!DPAA2_PER_LCORE_DPIO)) { ret = dpaa2_affine_qbman_swp(); if (ret) { - RTE_LOG(ERR, PMD, "Failure in affining portal\n"); + DPAA2_MEMPOOL_ERR("Failure in affining portal"); goto err1; } } ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token); if (ret != 0) { - PMD_INIT_LOG(ERR, "Resource enable failure with" - " err code: %d\n", ret); + DPAA2_MEMPOOL_ERR("Resource enable failure with err code: %d", + ret); goto err1; } ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token, &dpbp_attr); if (ret != 0) { - PMD_INIT_LOG(ERR, "Resource read failure with" - " err code: %d\n", ret); + DPAA2_MEMPOOL_ERR("Resource read failure with err code: %d", + ret); goto err2; } @@ -75,7 +87,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp) sizeof(struct dpaa2_bp_info), RTE_CACHE_LINE_SIZE); if (!bp_info) { - PMD_INIT_LOG(ERR, "No heap memory available for bp_info"); + DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory"); ret = -ENOMEM; goto err2; } @@ -84,7 +96,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp) bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list), RTE_CACHE_LINE_SIZE); if (!bp_list) { - PMD_INIT_LOG(ERR, "No heap memory available"); + DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory"); ret = -ENOMEM; goto err3; } @@ -112,7 +124,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp) sizeof(struct dpaa2_bp_info)); mp->pool_data = (void *)bp_info; - PMD_INIT_LOG(DEBUG, "BP List created for bpid =%d", dpbp_attr.bpid); + DPAA2_MEMPOOL_DEBUG("BP List created for bpid =%d", dpbp_attr.bpid); h_bp_list = bp_list; return 0; @@ -134,7 +146,7 @@ rte_hw_mbuf_free_pool(struct rte_mempool *mp) struct dpaa2_dpbp_dev *dpbp_node; if (!mp->pool_data) { - PMD_DRV_LOG(ERR, "Not a valid dpaa22 pool"); + DPAA2_MEMPOOL_ERR("Not a valid dpaa2 buffer pool"); return; } @@ -180,7 +192,7 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused, if (unlikely(!DPAA2_PER_LCORE_DPIO)) { ret = dpaa2_affine_qbman_swp(); if (ret != 0) { - RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n"); + DPAA2_MEMPOOL_ERR("Failed to allocate IO portal"); return; } } @@ -233,6 +245,35 @@ aligned: } } +uint16_t +rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp) +{ + struct dpaa2_bp_info *bp_info; + + bp_info = mempool_to_bpinfo(mp); + if (!(bp_info->bp_list)) { + RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n"); + return -ENOMEM; + } + + return bp_info->bpid; +} + +struct rte_mbuf * +rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr) +{ + struct dpaa2_bp_info *bp_info; + + bp_info = mempool_to_bpinfo(mp); + if (!(bp_info->bp_list)) { + RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n"); + return NULL; + } + + return (struct rte_mbuf *)((uint8_t *)buf_addr - + bp_info->meta_data_size); +} + int rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool, void **obj_table, unsigned int count) @@ -242,7 +283,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool, #endif struct qbman_swp *swp; uint16_t bpid; - uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL]; + size_t bufs[DPAA2_MBUF_MAX_ACQ_REL]; int i, ret; unsigned int n = 0; struct dpaa2_bp_info *bp_info; @@ -250,7 +291,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool, bp_info = mempool_to_bpinfo(pool); if (!(bp_info->bp_list)) { - RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n"); + DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured"); return -ENOENT; } @@ -259,7 +300,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool, if (unlikely(!DPAA2_PER_LCORE_DPIO)) { ret = dpaa2_affine_qbman_swp(); if (ret != 0) { - RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n"); + DPAA2_MEMPOOL_ERR("Failed to allocate IO portal"); return ret; } } @@ -270,18 +311,18 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool, * then the remainder. */ if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) { - ret = qbman_swp_acquire(swp, bpid, bufs, + ret = qbman_swp_acquire(swp, bpid, (void *)bufs, DPAA2_MBUF_MAX_ACQ_REL); } else { - ret = qbman_swp_acquire(swp, bpid, bufs, + ret = qbman_swp_acquire(swp, bpid, (void *)bufs, count - n); } /* In case of less than requested number of buffers available * in pool, qbman_swp_acquire returns 0 */ if (ret <= 0) { - PMD_TX_LOG(ERR, "Buffer acquire failed with" - " err code: %d", ret); + DPAA2_MEMPOOL_ERR("Buffer acquire failed with" + " err code: %d", ret); /* The API expect the exact number of requested bufs */ /* Releasing all buffers allocated */ rte_dpaa2_mbuf_release(pool, obj_table, bpid, @@ -290,10 +331,11 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool, } /* assigning mbuf from the acquired objects */ for (i = 0; (i < ret) && bufs[i]; i++) { - DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], uint64_t); + DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], size_t); obj_table[n] = (struct rte_mbuf *) (bufs[i] - bp_info->meta_data_size); - PMD_TX_LOG(DEBUG, "Acquired %p address %p from BMAN", + DPAA2_MEMPOOL_DP_DEBUG( + "Acquired %p address %p from BMAN\n", (void *)bufs[i], (void *)obj_table[n]); n++; } @@ -301,8 +343,8 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool, #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER alloc += n; - PMD_TX_LOG(DEBUG, "Total = %d , req = %d done = %d", - alloc, count, n); + DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d\n", + alloc, count, n); #endif return 0; } @@ -315,7 +357,7 @@ rte_hw_mbuf_free_bulk(struct rte_mempool *pool, bp_info = mempool_to_bpinfo(pool); if (!(bp_info->bp_list)) { - RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n"); + DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured"); return -ENOENT; } rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid, @@ -333,7 +375,7 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp) struct dpaa2_dpbp_dev *dpbp_node; if (!mp || !mp->pool_data) { - RTE_LOG(ERR, PMD, "Invalid mempool provided\n"); + DPAA2_MEMPOOL_ERR("Invalid mempool provided"); return 0; } @@ -343,16 +385,51 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp) ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token, &num_of_bufs); if (ret) { - RTE_LOG(ERR, PMD, "Unable to obtain free buf count (err=%d)\n", - ret); + DPAA2_MEMPOOL_ERR("Unable to obtain free buf count (err=%d)", + ret); return 0; } - RTE_LOG(DEBUG, PMD, "Free bufs = %u\n", num_of_bufs); + DPAA2_MEMPOOL_DP_DEBUG("Free bufs = %u\n", num_of_bufs); return num_of_bufs; } +static int +dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs, + void *vaddr, rte_iova_t paddr, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg) +{ + struct dpaa2_memseg *ms; + + /* For each memory chunk pinned to the Mempool, a linked list of the + * contained memsegs is created for searching when PA to VA + * conversion is required. + */ + ms = rte_zmalloc(NULL, sizeof(struct dpaa2_memseg), 0); + if (!ms) { + DPAA2_MEMPOOL_ERR("Unable to allocate internal memory."); + DPAA2_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available."); + /* If the element is not added, it would only lead to failure + * in searching for the element and the logic would Fallback + * to traditional DPDK memseg traversal code. So, this is not + * a blocking error - but, error would be printed on screen. + */ + return 0; + } + + ms->vaddr = vaddr; + ms->iova = paddr; + ms->len = len; + /* Head insertions are generally faster than tail insertions as the + * buffers pinned are picked from rear end. + */ + TAILQ_INSERT_HEAD(&rte_dpaa2_memsegs, ms, next); + + return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len, + obj_cb, obj_cb_arg); +} + struct rte_mempool_ops dpaa2_mpool_ops = { .name = DPAA2_MEMPOOL_OPS_NAME, .alloc = rte_hw_mbuf_create_pool, @@ -360,6 +437,14 @@ struct rte_mempool_ops dpaa2_mpool_ops = { .enqueue = rte_hw_mbuf_free_bulk, .dequeue = rte_dpaa2_mbuf_alloc_bulk, .get_count = rte_hw_mbuf_get_count, + .populate = dpaa2_populate, }; MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops); + +RTE_INIT(dpaa2_mempool_init_log) +{ + dpaa2_logtype_mempool = rte_log_register("mempool.dpaa2"); + if (dpaa2_logtype_mempool >= 0) + rte_log_set_level(dpaa2_logtype_mempool, RTE_LOG_NOTICE); +} diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h b/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h new file mode 100644 index 00000000..c79b3d1c --- /dev/null +++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h @@ -0,0 +1,38 @@ +/*- + * SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 NXP + */ + +#ifndef _DPAA2_HW_MEMPOOL_LOGS_H_ +#define _DPAA2_HW_MEMPOOL_LOGS_H_ + +extern int dpaa2_logtype_mempool; + +#define DPAA2_MEMPOOL_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, dpaa2_logtype_mempool, \ + "mempool/dpaa2: " fmt "\n", ##args) + +/* Debug logs are with Function names */ +#define DPAA2_MEMPOOL_DEBUG(fmt, args...) \ + rte_log(RTE_LOG_DEBUG, dpaa2_logtype_mempool, \ + "mempool/dpaa2: %s(): " fmt "\n", __func__, ##args) + +#define DPAA2_MEMPOOL_INFO(fmt, args...) \ + DPAA2_MEMPOOL_LOG(INFO, fmt, ## args) +#define DPAA2_MEMPOOL_ERR(fmt, args...) \ + DPAA2_MEMPOOL_LOG(ERR, fmt, ## args) +#define DPAA2_MEMPOOL_WARN(fmt, args...) \ + DPAA2_MEMPOOL_LOG(WARNING, fmt, ## args) + +/* DP Logs, toggled out at compile time if level lower than current level */ +#define DPAA2_MEMPOOL_DP_LOG(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, fmt, ## args) + +#define DPAA2_MEMPOOL_DP_DEBUG(fmt, args...) \ + DPAA2_MEMPOOL_DP_LOG(DEBUG, fmt, ## args) +#define DPAA2_MEMPOOL_DP_INFO(fmt, args...) \ + DPAA2_MEMPOOL_DP_LOG(INFO, fmt, ## args) +#define DPAA2_MEMPOOL_DP_WARN(fmt, args...) \ + DPAA2_MEMPOOL_DP_LOG(WARNING, fmt, ## args) + +#endif /* _DPAA2_HW_MEMPOOL_LOGS_H_ */ diff --git a/drivers/mempool/dpaa2/meson.build b/drivers/mempool/dpaa2/meson.build new file mode 100644 index 00000000..90bab606 --- /dev/null +++ b/drivers/mempool/dpaa2/meson.build @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if host_machine.system() != 'linux' + build = false +endif + +deps += ['bus_fslmc'] +sources = files('dpaa2_hw_mempool.c') + +# depends on fslmc bus which uses experimental API +allow_experimental_apis = true diff --git a/drivers/mempool/dpaa2/rte_dpaa2_mempool.h b/drivers/mempool/dpaa2/rte_dpaa2_mempool.h new file mode 100644 index 00000000..4a22b7c4 --- /dev/null +++ b/drivers/mempool/dpaa2/rte_dpaa2_mempool.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 NXP + */ + +#ifndef __RTE_DPAA2_MEMPOOL_H__ +#define __RTE_DPAA2_MEMPOOL_H__ + +/** + * @file + * + * NXP specific mempool related functions. + * + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/** + * Get BPID corresponding to the packet pool + * + * @param mp + * memory pool + * + * @return + * BPID of the buffer pool + */ +uint16_t +rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp); + +/** + * Get MBUF from the corresponding 'buf_addr' + * + * @param mp + * memory pool + * @param buf_addr + * The 'buf_addr' of the mbuf. This is the start buffer address + * of the packet buffer (mbuf). + * + * @return + * - MBUF pointer for success + * - NULL in case of error + */ +struct rte_mbuf * +rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr); + +#ifdef __cplusplus +} +#endif + +#endif /* __RTE_DPAA2_MEMPOOL_H__ */ diff --git a/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map b/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map index a8aa685c..b9d996a6 100644 --- a/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map +++ b/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map @@ -3,6 +3,15 @@ DPDK_17.05 { rte_dpaa2_bpid_info; rte_dpaa2_mbuf_alloc_bulk; + rte_dpaa2_memsegs; local: *; }; + +DPDK_18.05 { + global: + + rte_dpaa2_mbuf_from_buf_addr; + rte_dpaa2_mbuf_pool_bpid; + +} DPDK_17.05; diff --git a/drivers/mempool/meson.build b/drivers/mempool/meson.build index 59918560..4527d980 100644 --- a/drivers/mempool/meson.build +++ b/drivers/mempool/meson.build @@ -1,7 +1,7 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation -drivers = ['ring', 'stack', 'octeontx'] +drivers = ['bucket', 'dpaa', 'dpaa2', 'octeontx', 'ring', 'stack'] std_deps = ['mempool'] config_flag_fmt = 'RTE_LIBRTE_@0@_MEMPOOL' driver_name_fmt = 'rte_mempool_@0@' diff --git a/drivers/mempool/octeontx/Makefile b/drivers/mempool/octeontx/Makefile index dfc373e6..a3e1dce8 100644 --- a/drivers/mempool/octeontx/Makefile +++ b/drivers/mempool/octeontx/Makefile @@ -10,6 +10,7 @@ include $(RTE_SDK)/mk/rte.vars.mk LIB = librte_mempool_octeontx.a CFLAGS += $(WERROR_FLAGS) +CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/ EXPORT_MAP := rte_mempool_octeontx_version.map LIBABIVER := 1 @@ -17,8 +18,6 @@ LIBABIVER := 1 # # all source are stored in SRCS-y # -SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_ssovf.c -SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_mbox.c SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_fpavf.c SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += rte_mempool_octeontx.c @@ -36,6 +35,6 @@ CFLAGS_rte_mempool_octeontx.o += -Ofast endif LDLIBS += -lrte_eal -lrte_mempool -lrte_ring -lrte_mbuf -LDLIBS += -lrte_bus_pci +LDLIBS += -lrte_bus_pci -lrte_common_octeontx include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/mempool/octeontx/meson.build b/drivers/mempool/octeontx/meson.build index 1e894a56..3baaf7db 100644 --- a/drivers/mempool/octeontx/meson.build +++ b/drivers/mempool/octeontx/meson.build @@ -1,10 +1,8 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Cavium, Inc -sources = files('octeontx_ssovf.c', - 'octeontx_mbox.c', - 'octeontx_fpavf.c', +sources = files('octeontx_fpavf.c', 'rte_mempool_octeontx.c' ) -deps += ['mbuf', 'bus_pci'] +deps += ['mbuf', 'bus_pci', 'common_octeontx'] diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c index 61c72c7c..4cf387e8 100644 --- a/drivers/mempool/octeontx/octeontx_fpavf.c +++ b/drivers/mempool/octeontx/octeontx_fpavf.c @@ -108,17 +108,11 @@ static struct octeontx_fpadev fpadev; int octeontx_logtype_fpavf; int octeontx_logtype_fpavf_mbox; -RTE_INIT(otx_pool_init_log); -static void -otx_pool_init_log(void) +RTE_INIT(otx_pool_init_log) { octeontx_logtype_fpavf = rte_log_register("pmd.mempool.octeontx"); if (octeontx_logtype_fpavf >= 0) rte_log_set_level(octeontx_logtype_fpavf, RTE_LOG_NOTICE); - - octeontx_logtype_fpavf_mbox = rte_log_register("pmd.mempool.octeontx.mbox"); - if (octeontx_logtype_fpavf_mbox >= 0) - rte_log_set_level(octeontx_logtype_fpavf_mbox, RTE_LOG_NOTICE); } /* lock is taken by caller */ @@ -247,13 +241,13 @@ octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size, POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN | POOL_ENA; - cfg.aid = 0; + cfg.aid = FPA_AURA_IDX(gpool); cfg.pool_cfg = reg; cfg.pool_stack_base = phys_addr; cfg.pool_stack_end = phys_addr + memsz; cfg.aura_cfg = (1 << 9); - ret = octeontx_ssovf_mbox_send(&hdr, &cfg, + ret = octeontx_mbox_send(&hdr, &cfg, sizeof(struct octeontx_mbox_fpa_cfg), &resp, sizeof(resp)); if (ret < 0) { @@ -298,7 +292,7 @@ octeontx_fpapf_pool_destroy(unsigned int gpool_index) cfg.pool_stack_end = 0; cfg.aura_cfg = 0; - ret = octeontx_ssovf_mbox_send(&hdr, &cfg, + ret = octeontx_mbox_send(&hdr, &cfg, sizeof(struct octeontx_mbox_fpa_cfg), &resp, sizeof(resp)); if (ret < 0) { @@ -331,15 +325,16 @@ octeontx_fpapf_aura_attach(unsigned int gpool_index) hdr.vfid = gpool_index; hdr.res_code = 0; memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg)); - cfg.aid = gpool_index; /* gpool is guara */ + cfg.aid = FPA_AURA_IDX(gpool_index); - ret = octeontx_ssovf_mbox_send(&hdr, &cfg, + ret = octeontx_mbox_send(&hdr, &cfg, sizeof(struct octeontx_mbox_fpa_cfg), &resp, sizeof(resp)); if (ret < 0) { fpavf_log_err("Could not attach fpa "); fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n", - gpool_index, gpool_index, ret, hdr.res_code); + FPA_AURA_IDX(gpool_index), gpool_index, ret, + hdr.res_code); ret = -EACCES; goto err; } @@ -359,14 +354,15 @@ octeontx_fpapf_aura_detach(unsigned int gpool_index) goto err; } - cfg.aid = gpool_index; /* gpool is gaura */ + cfg.aid = FPA_AURA_IDX(gpool_index); hdr.coproc = FPA_COPROC; hdr.msg = FPA_DETACHAURA; hdr.vfid = gpool_index; - ret = octeontx_ssovf_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0); + ret = octeontx_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0); if (ret < 0) { fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n", - gpool_index, ret, hdr.res_code); + FPA_AURA_IDX(gpool_index), ret, + hdr.res_code); ret = -EINVAL; } @@ -410,7 +406,7 @@ octeontx_fpapf_start_count(uint16_t gpool_index) hdr.coproc = FPA_COPROC; hdr.msg = FPA_START_COUNT; hdr.vfid = gpool_index; - ret = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0); + ret = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); if (ret < 0) { fpavf_log_err("Could not start buffer counting for "); fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n", @@ -473,6 +469,7 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle) { uint64_t cnt, limit, avail; uint8_t gpool; + uint16_t gaura; uintptr_t pool_bar; if (unlikely(!octeontx_fpa_handle_valid(handle))) @@ -480,14 +477,16 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle) /* get the gpool */ gpool = octeontx_fpa_bufpool_gpool(handle); + /* get the aura */ + gaura = octeontx_fpa_bufpool_gaura(handle); /* Get pool bar address from handle */ pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK; cnt = fpavf_read64((void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT(gpool))); + FPA_VF_VHAURA_CNT(gaura))); limit = fpavf_read64((void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT_LIMIT(gpool))); + FPA_VF_VHAURA_CNT_LIMIT(gaura))); avail = fpavf_read64((void *)((uintptr_t)pool_bar + FPA_VF_VHPOOL_AVAILABLE(gpool))); @@ -500,6 +499,7 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count, unsigned int buf_offset, int node_id) { unsigned int gpool; + unsigned int gaura; uintptr_t gpool_handle; uintptr_t pool_bar; int res; @@ -549,16 +549,18 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count, goto error_pool_destroy; } + gaura = FPA_AURA_IDX(gpool); + /* Release lock */ rte_spinlock_unlock(&fpadev.lock); /* populate AURA registers */ fpavf_write64(object_count, (void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT(gpool))); + FPA_VF_VHAURA_CNT(gaura))); fpavf_write64(object_count, (void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT_LIMIT(gpool))); + FPA_VF_VHAURA_CNT_LIMIT(gaura))); fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT_THRESHOLD(gpool))); + FPA_VF_VHAURA_CNT_THRESHOLD(gaura))); octeontx_fpapf_start_count(gpool); @@ -585,6 +587,7 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id) uint64_t sz; uint64_t cnt, avail; uint8_t gpool; + uint16_t gaura; uintptr_t pool_bar; int ret; @@ -598,13 +601,15 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id) /* get the pool */ gpool = octeontx_fpa_bufpool_gpool(handle); + /* get the aura */ + gaura = octeontx_fpa_bufpool_gaura(handle); /* Get pool bar address from handle */ pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK; /* Check for no outstanding buffers */ cnt = fpavf_read64((void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT(gpool))); + FPA_VF_VHAURA_CNT(gaura))); if (cnt) { fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt); return -EBUSY; @@ -617,9 +622,9 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id) /* Prepare to empty the entire POOL */ fpavf_write64(avail, (void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT_LIMIT(gpool))); + FPA_VF_VHAURA_CNT_LIMIT(gaura))); fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT_THRESHOLD(gpool))); + FPA_VF_VHAURA_CNT_THRESHOLD(gaura))); /* Empty the pool */ /* Invalidate the POOL */ @@ -631,11 +636,11 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id) /* Yank a buffer from the pool */ node = (void *)(uintptr_t) fpavf_read64((void *) - (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gpool))); + (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gaura))); if (node == NULL) { fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n", - gpool, avail); + gaura, avail); break; } @@ -669,9 +674,9 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id) /* Deactivate the AURA */ fpavf_write64(0, (void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT_LIMIT(gpool))); + FPA_VF_VHAURA_CNT_LIMIT(gaura))); fpavf_write64(0, (void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT_THRESHOLD(gpool))); + FPA_VF_VHAURA_CNT_THRESHOLD(gaura))); ret = octeontx_fpapf_aura_detach(gpool); if (ret) { diff --git a/drivers/mempool/octeontx/octeontx_fpavf.h b/drivers/mempool/octeontx/octeontx_fpavf.h index b76f40e7..b00be137 100644 --- a/drivers/mempool/octeontx/octeontx_fpavf.h +++ b/drivers/mempool/octeontx/octeontx_fpavf.h @@ -14,6 +14,7 @@ #define FPA_VF_MAX 32 #define FPA_GPOOL_MASK (FPA_VF_MAX-1) +#define FPA_GAURA_SHIFT 4 /* FPA VF register offsets */ #define FPA_VF_INT(x) (0x200ULL | ((x) << 22)) @@ -36,6 +37,7 @@ #define FPA_VF_FREE_ADDRS_S(x, y, z) \ ((x) | (((y) & 0x1ff) << 3) | ((((z) & 1)) << 14)) +#define FPA_AURA_IDX(gpool) (gpool << FPA_GAURA_SHIFT) /* FPA VF register offsets from VF_BAR4, size 2 MByte */ #define FPA_VF_MSIX_VEC_ADDR 0x00000 #define FPA_VF_MSIX_VEC_CTL 0x00008 @@ -102,4 +104,11 @@ octeontx_fpa_bufpool_gpool(uintptr_t handle) { return (uint8_t)handle & FPA_GPOOL_MASK; } + +static __rte_always_inline uint16_t +octeontx_fpa_bufpool_gaura(uintptr_t handle) +{ + return octeontx_fpa_bufpool_gpool(handle) << FPA_GAURA_SHIFT; +} + #endif /* __OCTEONTX_FPAVF_H__ */ diff --git a/drivers/mempool/octeontx/octeontx_mbox.c b/drivers/mempool/octeontx/octeontx_mbox.c deleted file mode 100644 index f8cb6a45..00000000 --- a/drivers/mempool/octeontx/octeontx_mbox.c +++ /dev/null @@ -1,214 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017 Cavium, Inc - */ - -#include - -#include -#include -#include -#include -#include - -#include "octeontx_mbox.h" -#include "octeontx_pool_logs.h" - -/* Mbox operation timeout in seconds */ -#define MBOX_WAIT_TIME_SEC 3 -#define MAX_RAM_MBOX_LEN ((SSOW_BAR4_LEN >> 1) - 8 /* Mbox header */) - -/* Mbox channel state */ -enum { - MBOX_CHAN_STATE_REQ = 1, - MBOX_CHAN_STATE_RES = 0, -}; - -/* Response messages */ -enum { - MBOX_RET_SUCCESS, - MBOX_RET_INVALID, - MBOX_RET_INTERNAL_ERR, -}; - -struct mbox { - int init_once; - uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */ - uint8_t *reg; /* Store to this register triggers PF mbox interrupt */ - uint16_t tag_own; /* Last tag which was written to own channel */ - rte_spinlock_t lock; -}; - -static struct mbox octeontx_mbox; - -/* - * Structure used for mbox synchronization - * This structure sits at the begin of Mbox RAM and used as main - * synchronization point for channel communication - */ -struct mbox_ram_hdr { - union { - uint64_t u64; - struct { - uint8_t chan_state : 1; - uint8_t coproc : 7; - uint8_t msg; - uint8_t vfid; - uint8_t res_code; - uint16_t tag; - uint16_t len; - }; - }; -}; - -static inline void -mbox_msgcpy(volatile uint8_t *d, volatile const uint8_t *s, uint16_t size) -{ - uint16_t i; - - for (i = 0; i < size; i++) - d[i] = s[i]; -} - -static inline void -mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr, - const void *txmsg, uint16_t txsize) -{ - struct mbox_ram_hdr old_hdr; - struct mbox_ram_hdr new_hdr = { {0} }; - uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base; - uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr); - - /* - * Initialize the channel with the tag left by last send. - * On success full mbox send complete, PF increments the tag by one. - * The sender can validate integrity of PF message with this scheme - */ - old_hdr.u64 = rte_read64(ram_mbox_hdr); - m->tag_own = (old_hdr.tag + 2) & (~0x1ul); /* next even number */ - - /* Copy msg body */ - if (txmsg) - mbox_msgcpy(ram_mbox_msg, txmsg, txsize); - - /* Prepare new hdr */ - new_hdr.chan_state = MBOX_CHAN_STATE_REQ; - new_hdr.coproc = hdr->coproc; - new_hdr.msg = hdr->msg; - new_hdr.vfid = hdr->vfid; - new_hdr.tag = m->tag_own; - new_hdr.len = txsize; - - /* Write the msg header */ - rte_write64(new_hdr.u64, ram_mbox_hdr); - rte_smp_wmb(); - /* Notify PF about the new msg - write to MBOX reg generates PF IRQ */ - rte_write64(0, m->reg); -} - -static inline int -mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr, - void *rxmsg, uint16_t rxsize) -{ - int res = 0, wait; - uint16_t len; - struct mbox_ram_hdr rx_hdr; - uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base; - uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr); - - /* Wait for response */ - wait = MBOX_WAIT_TIME_SEC * 1000 * 10; - while (wait > 0) { - rte_delay_us(100); - rx_hdr.u64 = rte_read64(ram_mbox_hdr); - if (rx_hdr.chan_state == MBOX_CHAN_STATE_RES) - break; - --wait; - } - - hdr->res_code = rx_hdr.res_code; - m->tag_own++; - - /* Timeout */ - if (wait <= 0) { - res = -ETIMEDOUT; - goto error; - } - - /* Tag mismatch */ - if (m->tag_own != rx_hdr.tag) { - res = -EINVAL; - goto error; - } - - /* PF nacked the msg */ - if (rx_hdr.res_code != MBOX_RET_SUCCESS) { - res = -EBADMSG; - goto error; - } - - len = RTE_MIN(rx_hdr.len, rxsize); - if (rxmsg) - mbox_msgcpy(rxmsg, ram_mbox_msg, len); - - return len; - -error: - mbox_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)", - m->tag_own, rx_hdr.tag, hdr->coproc, hdr->msg, res, - hdr->res_code); - return res; -} - -static inline int -mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg, - uint16_t txsize, void *rxmsg, uint16_t rxsize) -{ - int res = -EINVAL; - - if (m->init_once == 0 || hdr == NULL || - txsize > MAX_RAM_MBOX_LEN || rxsize > MAX_RAM_MBOX_LEN) { - mbox_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d", - m->init_once, hdr, txsize, rxsize); - return res; - } - - rte_spinlock_lock(&m->lock); - - mbox_send_request(m, hdr, txmsg, txsize); - res = mbox_wait_response(m, hdr, rxmsg, rxsize); - - rte_spinlock_unlock(&m->lock); - return res; -} - -static inline int -mbox_setup(struct mbox *m) -{ - if (unlikely(m->init_once == 0)) { - rte_spinlock_init(&m->lock); - m->ram_mbox_base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, 0, 4); - m->reg = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, 0, 0); - m->reg += SSO_VHGRP_PF_MBOX(1); - - if (m->ram_mbox_base == NULL || m->reg == NULL) { - mbox_log_err("Invalid ram_mbox_base=%p or reg=%p", - m->ram_mbox_base, m->reg); - return -EINVAL; - } - m->init_once = 1; - } - return 0; -} - -int -octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata, - uint16_t txlen, void *rxdata, uint16_t rxlen) -{ - struct mbox *m = &octeontx_mbox; - - RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8); - if (rte_eal_process_type() != RTE_PROC_PRIMARY || mbox_setup(m)) - return -EINVAL; - - return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen); -} diff --git a/drivers/mempool/octeontx/octeontx_mbox.h b/drivers/mempool/octeontx/octeontx_mbox.h deleted file mode 100644 index 1b056071..00000000 --- a/drivers/mempool/octeontx/octeontx_mbox.h +++ /dev/null @@ -1,36 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017 Cavium, Inc - */ - -#ifndef __OCTEONTX_MBOX_H__ -#define __OCTEONTX_MBOX_H__ - -#include - -#define SSOW_BAR4_LEN (64 * 1024) -#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3)) - -struct octeontx_ssovf_info { - uint16_t domain; /* Domain id */ - uint8_t total_ssovfs; /* Total sso groups available in domain */ - uint8_t total_ssowvfs;/* Total sso hws available in domain */ -}; - -enum octeontx_ssovf_type { - OCTEONTX_SSO_GROUP, /* SSO group vf */ - OCTEONTX_SSO_HWS, /* SSO hardware workslot vf */ -}; - -struct octeontx_mbox_hdr { - uint16_t vfid; /* VF index or pf resource index local to the domain */ - uint8_t coproc; /* Coprocessor id */ - uint8_t msg; /* Message id */ - uint8_t res_code; /* Functional layer response code */ -}; - -int octeontx_ssovf_info(struct octeontx_ssovf_info *info); -void *octeontx_ssovf_bar(enum octeontx_ssovf_type, uint8_t id, uint8_t bar); -int octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr, - void *txdata, uint16_t txlen, void *rxdata, uint16_t rxlen); - -#endif /* __OCTEONTX_MBOX_H__ */ diff --git a/drivers/mempool/octeontx/octeontx_pool_logs.h b/drivers/mempool/octeontx/octeontx_pool_logs.h index 95865192..7b4e1b38 100644 --- a/drivers/mempool/octeontx/octeontx_pool_logs.h +++ b/drivers/mempool/octeontx/octeontx_pool_logs.h @@ -11,21 +11,12 @@ rte_log(RTE_LOG_ ## level, octeontx_logtype_fpavf,\ "%s() line %u: " fmt "\n", __func__, __LINE__, ## args) -#define MBOX_LOG(level, fmt, args...) \ - rte_log(RTE_LOG_ ## level, octeontx_logtype_fpavf_mbox,\ - "%s() line %u: " fmt "\n", __func__, __LINE__, ## args) - #define fpavf_log_info(fmt, ...) FPAVF_LOG(INFO, fmt, ##__VA_ARGS__) #define fpavf_log_dbg(fmt, ...) FPAVF_LOG(DEBUG, fmt, ##__VA_ARGS__) #define fpavf_log_err(fmt, ...) FPAVF_LOG(ERR, fmt, ##__VA_ARGS__) #define fpavf_func_trace fpavf_log_dbg -#define mbox_log_info(fmt, ...) MBOX_LOG(INFO, fmt, ##__VA_ARGS__) -#define mbox_log_dbg(fmt, ...) MBOX_LOG(DEBUG, fmt, ##__VA_ARGS__) -#define mbox_log_err(fmt, ...) MBOX_LOG(ERR, fmt, ##__VA_ARGS__) -#define mbox_func_trace mbox_log_dbg extern int octeontx_logtype_fpavf; -extern int octeontx_logtype_fpavf_mbox; #endif /* __OCTEONTX_POOL_LOGS_H__*/ diff --git a/drivers/mempool/octeontx/octeontx_ssovf.c b/drivers/mempool/octeontx/octeontx_ssovf.c deleted file mode 100644 index 97b24066..00000000 --- a/drivers/mempool/octeontx/octeontx_ssovf.c +++ /dev/null @@ -1,271 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017 Cavium, Inc - */ - -#include -#include -#include -#include -#include -#include - -#include "octeontx_mbox.h" -#include "octeontx_pool_logs.h" - -#define PCI_VENDOR_ID_CAVIUM 0x177D -#define PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF 0xA04B -#define PCI_DEVICE_ID_OCTEONTX_SSOWS_VF 0xA04D - -#define SSO_MAX_VHGRP (64) -#define SSO_MAX_VHWS (32) - -#define SSO_VHGRP_AQ_THR (0x1E0ULL) - -struct ssovf_res { - uint16_t domain; - uint16_t vfid; - void *bar0; - void *bar2; -}; - -struct ssowvf_res { - uint16_t domain; - uint16_t vfid; - void *bar0; - void *bar2; - void *bar4; -}; - -struct ssowvf_identify { - uint16_t domain; - uint16_t vfid; -}; - -struct ssodev { - uint8_t total_ssovfs; - uint8_t total_ssowvfs; - struct ssovf_res grp[SSO_MAX_VHGRP]; - struct ssowvf_res hws[SSO_MAX_VHWS]; -}; - -static struct ssodev sdev; - -/* Interface functions */ -int -octeontx_ssovf_info(struct octeontx_ssovf_info *info) -{ - uint8_t i; - uint16_t domain; - - if (rte_eal_process_type() != RTE_PROC_PRIMARY || info == NULL) - return -EINVAL; - - if (sdev.total_ssovfs == 0 || sdev.total_ssowvfs == 0) - return -ENODEV; - - domain = sdev.grp[0].domain; - for (i = 0; i < sdev.total_ssovfs; i++) { - /* Check vfid's are contiguous and belong to same domain */ - if (sdev.grp[i].vfid != i || - sdev.grp[i].bar0 == NULL || - sdev.grp[i].domain != domain) { - mbox_log_err("GRP error, vfid=%d/%d domain=%d/%d %p", - i, sdev.grp[i].vfid, - domain, sdev.grp[i].domain, - sdev.grp[i].bar0); - return -EINVAL; - } - } - - for (i = 0; i < sdev.total_ssowvfs; i++) { - /* Check vfid's are contiguous and belong to same domain */ - if (sdev.hws[i].vfid != i || - sdev.hws[i].bar0 == NULL || - sdev.hws[i].domain != domain) { - mbox_log_err("HWS error, vfid=%d/%d domain=%d/%d %p", - i, sdev.hws[i].vfid, - domain, sdev.hws[i].domain, - sdev.hws[i].bar0); - return -EINVAL; - } - } - - info->domain = domain; - info->total_ssovfs = sdev.total_ssovfs; - info->total_ssowvfs = sdev.total_ssowvfs; - return 0; -} - -void* -octeontx_ssovf_bar(enum octeontx_ssovf_type type, uint8_t id, uint8_t bar) -{ - if (rte_eal_process_type() != RTE_PROC_PRIMARY || - type > OCTEONTX_SSO_HWS) - return NULL; - - if (type == OCTEONTX_SSO_GROUP) { - if (id >= sdev.total_ssovfs) - return NULL; - } else { - if (id >= sdev.total_ssowvfs) - return NULL; - } - - if (type == OCTEONTX_SSO_GROUP) { - switch (bar) { - case 0: - return sdev.grp[id].bar0; - case 2: - return sdev.grp[id].bar2; - default: - return NULL; - } - } else { - switch (bar) { - case 0: - return sdev.hws[id].bar0; - case 2: - return sdev.hws[id].bar2; - case 4: - return sdev.hws[id].bar4; - default: - return NULL; - } - } -} - -/* SSOWVF pcie device aka event port probe */ - -static int -ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) -{ - uint16_t vfid; - struct ssowvf_res *res; - struct ssowvf_identify *id; - - RTE_SET_USED(pci_drv); - - /* For secondary processes, the primary has done all the work */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; - - if (pci_dev->mem_resource[0].addr == NULL || - pci_dev->mem_resource[2].addr == NULL || - pci_dev->mem_resource[4].addr == NULL) { - mbox_log_err("Empty bars %p %p %p", - pci_dev->mem_resource[0].addr, - pci_dev->mem_resource[2].addr, - pci_dev->mem_resource[4].addr); - return -ENODEV; - } - - if (pci_dev->mem_resource[4].len != SSOW_BAR4_LEN) { - mbox_log_err("Bar4 len mismatch %d != %d", - SSOW_BAR4_LEN, (int)pci_dev->mem_resource[4].len); - return -EINVAL; - } - - id = pci_dev->mem_resource[4].addr; - vfid = id->vfid; - if (vfid >= SSO_MAX_VHWS) { - mbox_log_err("Invalid vfid(%d/%d)", vfid, SSO_MAX_VHWS); - return -EINVAL; - } - - res = &sdev.hws[vfid]; - res->vfid = vfid; - res->bar0 = pci_dev->mem_resource[0].addr; - res->bar2 = pci_dev->mem_resource[2].addr; - res->bar4 = pci_dev->mem_resource[4].addr; - res->domain = id->domain; - - sdev.total_ssowvfs++; - rte_wmb(); - mbox_log_dbg("Domain=%d hws=%d total_ssowvfs=%d", res->domain, - res->vfid, sdev.total_ssowvfs); - return 0; -} - -static const struct rte_pci_id pci_ssowvf_map[] = { - { - RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, - PCI_DEVICE_ID_OCTEONTX_SSOWS_VF) - }, - { - .vendor_id = 0, - }, -}; - -static struct rte_pci_driver pci_ssowvf = { - .id_table = pci_ssowvf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, - .probe = ssowvf_probe, -}; - -RTE_PMD_REGISTER_PCI(octeontx_ssowvf, pci_ssowvf); - -/* SSOVF pcie device aka event queue probe */ - -static int -ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) -{ - uint64_t val; - uint16_t vfid; - uint8_t *idreg; - struct ssovf_res *res; - - RTE_SET_USED(pci_drv); - - /* For secondary processes, the primary has done all the work */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; - - if (pci_dev->mem_resource[0].addr == NULL || - pci_dev->mem_resource[2].addr == NULL) { - mbox_log_err("Empty bars %p %p", - pci_dev->mem_resource[0].addr, - pci_dev->mem_resource[2].addr); - return -ENODEV; - } - idreg = pci_dev->mem_resource[0].addr; - idreg += SSO_VHGRP_AQ_THR; - val = rte_read64(idreg); - - /* Write back the default value of aq_thr */ - rte_write64((1ULL << 33) - 1, idreg); - vfid = (val >> 16) & 0xffff; - if (vfid >= SSO_MAX_VHGRP) { - mbox_log_err("Invalid vfid (%d/%d)", vfid, SSO_MAX_VHGRP); - return -EINVAL; - } - - res = &sdev.grp[vfid]; - res->vfid = vfid; - res->bar0 = pci_dev->mem_resource[0].addr; - res->bar2 = pci_dev->mem_resource[2].addr; - res->domain = val & 0xffff; - - sdev.total_ssovfs++; - rte_wmb(); - mbox_log_dbg("Domain=%d group=%d total_ssovfs=%d", res->domain, - res->vfid, sdev.total_ssovfs); - return 0; -} - -static const struct rte_pci_id pci_ssovf_map[] = { - { - RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, - PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF) - }, - { - .vendor_id = 0, - }, -}; - -static struct rte_pci_driver pci_ssovf = { - .id_table = pci_ssovf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, - .probe = ssovf_probe, -}; - -RTE_PMD_REGISTER_PCI(octeontx_ssovf, pci_ssovf); diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c b/drivers/mempool/octeontx/rte_mempool_octeontx.c index d143d05c..ab94dfe9 100644 --- a/drivers/mempool/octeontx/rte_mempool_octeontx.c +++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c @@ -126,28 +126,66 @@ octeontx_fpavf_get_count(const struct rte_mempool *mp) return octeontx_fpa_bufpool_free_count(pool); } -static int -octeontx_fpavf_get_capabilities(const struct rte_mempool *mp, - unsigned int *flags) +static ssize_t +octeontx_fpavf_calc_mem_size(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align) { - RTE_SET_USED(mp); - *flags |= (MEMPOOL_F_CAPA_PHYS_CONTIG | - MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS); - return 0; + ssize_t mem_size; + + /* + * Simply need space for one more object to be able to + * fulfil alignment requirements. + */ + mem_size = rte_mempool_op_calc_mem_size_default(mp, obj_num + 1, + pg_shift, + min_chunk_size, align); + if (mem_size >= 0) { + /* + * Memory area which contains objects must be physically + * contiguous. + */ + *min_chunk_size = mem_size; + } + + return mem_size; } static int -octeontx_fpavf_register_memory_area(const struct rte_mempool *mp, - char *vaddr, rte_iova_t paddr, size_t len) +octeontx_fpavf_populate(struct rte_mempool *mp, unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg) { - RTE_SET_USED(paddr); + size_t total_elt_sz; + size_t off; uint8_t gpool; uintptr_t pool_bar; + int ret; + + if (iova == RTE_BAD_IOVA) + return -EINVAL; + + total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; + + /* align object start address to a multiple of total_elt_sz */ + off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz); + + if (len < off) + return -EINVAL; + + vaddr = (char *)vaddr + off; + iova += off; + len -= off; gpool = octeontx_fpa_bufpool_gpool(mp->pool_id); pool_bar = mp->pool_id & ~(uint64_t)FPA_GPOOL_MASK; - return octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool); + ret = octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool); + if (ret < 0) + return ret; + + return rte_mempool_op_populate_default(mp, max_objs, vaddr, iova, len, + obj_cb, obj_cb_arg); } static struct rte_mempool_ops octeontx_fpavf_ops = { @@ -157,8 +195,8 @@ static struct rte_mempool_ops octeontx_fpavf_ops = { .enqueue = octeontx_fpavf_enqueue, .dequeue = octeontx_fpavf_dequeue, .get_count = octeontx_fpavf_get_count, - .get_capabilities = octeontx_fpavf_get_capabilities, - .register_memory_area = octeontx_fpavf_register_memory_area, + .calc_mem_size = octeontx_fpavf_calc_mem_size, + .populate = octeontx_fpavf_populate, }; MEMPOOL_REGISTER_OPS(octeontx_fpavf_ops); diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx_version.map b/drivers/mempool/octeontx/rte_mempool_octeontx_version.map index fe8cdeca..a7530317 100644 --- a/drivers/mempool/octeontx/rte_mempool_octeontx_version.map +++ b/drivers/mempool/octeontx/rte_mempool_octeontx_version.map @@ -1,9 +1,3 @@ DPDK_17.11 { - global: - - octeontx_ssovf_info; - octeontx_ssovf_bar; - octeontx_ssovf_mbox_send; - local: *; }; diff --git a/drivers/meson.build b/drivers/meson.build index b41a0f18..f94e2fe6 100644 --- a/drivers/meson.build +++ b/drivers/meson.build @@ -2,12 +2,19 @@ # Copyright(c) 2017 Intel Corporation # Defines the order in which the drivers are buit. -driver_classes = ['bus', - 'mempool', # depends on bus. - 'net', # depends on bus and mempool. - 'crypto', # depenss on bus, mempool (net in future). - 'event'] # depends on bus, mempool and net. - +driver_classes = ['common', + 'bus', + 'mempool', # depends on common and bus. + 'net', # depends on common, bus and mempool. + 'crypto', # depends on common, bus and mempool (net in future). + 'compress', # depends on common, bus, mempool. + 'event', # depends on common, bus, mempool and net. + 'raw'] # depends on common, bus, mempool, net and event. + +default_cflags = machine_args +if cc.has_argument('-Wno-format-truncation') + default_cflags += '-Wno-format-truncation' +endif foreach class:driver_classes drivers = [] std_deps = [] @@ -28,7 +35,7 @@ foreach class:driver_classes allow_experimental_apis = false sources = [] objs = [] - cflags = machine_args + cflags = default_cflags includes = [include_directories(drv_path)] # set up internal deps. Drivers can append/override as necessary deps = std_deps @@ -55,6 +62,10 @@ foreach class:driver_classes shared_objs = [] static_objs = [] foreach d:deps + if not is_variable('shared_rte_' + d) + error('Missing dependency ' + d + + ' for driver ' + lib_name) + endif shared_objs += [get_variable('shared_rte_' + d)] static_objs += [get_variable('static_rte_' + d)] endforeach diff --git a/drivers/net/Makefile b/drivers/net/Makefile index e1127326..664398de 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -12,11 +12,16 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += af_packet DIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark DIRS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf DIRS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp +DIRS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe DIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x DIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += bonding DIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe +ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y) DIRS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa +endif +ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy) DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2 +endif DIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000 DIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena DIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic @@ -27,7 +32,8 @@ DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe DIRS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += liquidio DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4 DIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5 -DIRS-$(CONFIG_RTE_LIBRTE_MRVL_PMD) += mrvl +DIRS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mvpp2 +DIRS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += netvsc DIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp DIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += null @@ -53,9 +59,12 @@ endif # $(CONFIG_RTE_LIBRTE_SCHED) ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y) DIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += vhost +ifeq ($(CONFIG_RTE_EAL_VFIO),y) +DIRS-$(CONFIG_RTE_LIBRTE_IFC_PMD) += ifc +endif endif # $(CONFIG_RTE_LIBRTE_VHOST) -ifeq ($(CONFIG_RTE_LIBRTE_MRVL_PMD),y) +ifeq ($(CONFIG_RTE_LIBRTE_MVPP2_PMD),y) ifeq ($(CONFIG_RTE_LIBRTE_CFGFILE),n) $(error "RTE_LIBRTE_CFGFILE must be enabled in configuration!") endif diff --git a/drivers/net/af_packet/Makefile b/drivers/net/af_packet/Makefile index bb37d67a..39a1e0d2 100644 --- a/drivers/net/af_packet/Makefile +++ b/drivers/net/af_packet/Makefile @@ -1,35 +1,8 @@ -# BSD LICENSE -# -# Copyright(c) 2014 John W. Linville -# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. -# Copyright(c) 2014 6WIND S.A. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Intel Corporation nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2014 John W. Linville +# Copyright(c) 2010-2014 Intel Corporation. +# Copyright(c) 2014 6WIND S.A. +# All rights reserved. include $(RTE_SDK)/mk/rte.vars.mk diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c index 57eccfd0..eb3cce3a 100644 --- a/drivers/net/af_packet/rte_eth_af_packet.c +++ b/drivers/net/af_packet/rte_eth_af_packet.c @@ -94,9 +94,15 @@ static struct rte_eth_link pmd_link = { .link_speed = ETH_SPEED_NUM_10G, .link_duplex = ETH_LINK_FULL_DUPLEX, .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_AUTONEG + .link_autoneg = ETH_LINK_FIXED, }; +static int af_packet_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, af_packet_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + static uint16_t eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { @@ -299,6 +305,7 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = (uint16_t)internals->nb_queues; dev_info->max_tx_queues = (uint16_t)internals->nb_queues; dev_info->min_rx_bufsize = 0; + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP; } static int @@ -393,8 +400,8 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, data_size -= TPACKET2_HDRLEN - sizeof(struct sockaddr_ll); if (data_size > buf_size) { - RTE_LOG(ERR, PMD, - "%s: %d bytes will not fit in mbuf (%d bytes)\n", + PMD_LOG(ERR, + "%s: %d bytes will not fit in mbuf (%d bytes)", dev->device->name, data_size, buf_size); return -ENOMEM; } @@ -515,7 +522,7 @@ open_packet_iface(const char *key __rte_unused, /* Open an AF_PACKET socket... */ *sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); if (*sockfd == -1) { - RTE_LOG(ERR, PMD, "Could not open AF_PACKET socket\n"); + PMD_LOG(ERR, "Could not open AF_PACKET socket"); return -1; } @@ -561,28 +568,20 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, break; } if (pair == NULL) { - RTE_LOG(ERR, PMD, - "%s: no interface specified for AF_PACKET ethdev\n", + PMD_LOG(ERR, + "%s: no interface specified for AF_PACKET ethdev", name); - goto error_early; + return -1; } - RTE_LOG(INFO, PMD, - "%s: creating AF_PACKET-backed ethdev on numa socket %u\n", + PMD_LOG(INFO, + "%s: creating AF_PACKET-backed ethdev on numa socket %u", name, numa_node); - /* - * now do all data allocation - for eth_dev structure, dummy pci driver - * and internal (private) data - */ - data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); - if (data == NULL) - goto error_early; - *internals = rte_zmalloc_socket(name, sizeof(**internals), 0, numa_node); if (*internals == NULL) - goto error_early; + return -1; for (q = 0; q < nb_queues; q++) { (*internals)->rx_queue[q].map = MAP_FAILED; @@ -601,27 +600,27 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, memcpy(ifr.ifr_name, pair->value, ifnamelen); ifr.ifr_name[ifnamelen] = '\0'; } else { - RTE_LOG(ERR, PMD, - "%s: I/F name too long (%s)\n", + PMD_LOG(ERR, + "%s: I/F name too long (%s)", name, pair->value); - goto error_early; + return -1; } if (ioctl(sockfd, SIOCGIFINDEX, &ifr) == -1) { - RTE_LOG(ERR, PMD, - "%s: ioctl failed (SIOCGIFINDEX)\n", + PMD_LOG(ERR, + "%s: ioctl failed (SIOCGIFINDEX)", name); - goto error_early; + return -1; } (*internals)->if_name = strdup(pair->value); if ((*internals)->if_name == NULL) - goto error_early; + return -1; (*internals)->if_index = ifr.ifr_ifindex; if (ioctl(sockfd, SIOCGIFHWADDR, &ifr) == -1) { - RTE_LOG(ERR, PMD, - "%s: ioctl failed (SIOCGIFHWADDR)\n", + PMD_LOG(ERR, + "%s: ioctl failed (SIOCGIFHWADDR)", name); - goto error_early; + return -1; } memcpy(&(*internals)->eth_addr, ifr.ifr_hwaddr.sa_data, ETH_ALEN); @@ -642,8 +641,8 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, /* Open an AF_PACKET socket for this queue... */ qsockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); if (qsockfd == -1) { - RTE_LOG(ERR, PMD, - "%s: could not open AF_PACKET socket\n", + PMD_LOG(ERR, + "%s: could not open AF_PACKET socket", name); return -1; } @@ -652,9 +651,9 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, rc = setsockopt(qsockfd, SOL_PACKET, PACKET_VERSION, &tpver, sizeof(tpver)); if (rc == -1) { - RTE_LOG(ERR, PMD, - "%s: could not set PACKET_VERSION on AF_PACKET " - "socket for %s\n", name, pair->value); + PMD_LOG(ERR, + "%s: could not set PACKET_VERSION on AF_PACKET socket for %s", + name, pair->value); goto error; } @@ -662,9 +661,9 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, rc = setsockopt(qsockfd, SOL_PACKET, PACKET_LOSS, &discard, sizeof(discard)); if (rc == -1) { - RTE_LOG(ERR, PMD, - "%s: could not set PACKET_LOSS on " - "AF_PACKET socket for %s\n", name, pair->value); + PMD_LOG(ERR, + "%s: could not set PACKET_LOSS on AF_PACKET socket for %s", + name, pair->value); goto error; } @@ -672,10 +671,9 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, rc = setsockopt(qsockfd, SOL_PACKET, PACKET_QDISC_BYPASS, &qdisc_bypass, sizeof(qdisc_bypass)); if (rc == -1) { - RTE_LOG(ERR, PMD, - "%s: could not set PACKET_QDISC_BYPASS " - "on AF_PACKET socket for %s\n", name, - pair->value); + PMD_LOG(ERR, + "%s: could not set PACKET_QDISC_BYPASS on AF_PACKET socket for %s", + name, pair->value); goto error; } #else @@ -684,17 +682,17 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, rc = setsockopt(qsockfd, SOL_PACKET, PACKET_RX_RING, req, sizeof(*req)); if (rc == -1) { - RTE_LOG(ERR, PMD, - "%s: could not set PACKET_RX_RING on AF_PACKET " - "socket for %s\n", name, pair->value); + PMD_LOG(ERR, + "%s: could not set PACKET_RX_RING on AF_PACKET socket for %s", + name, pair->value); goto error; } rc = setsockopt(qsockfd, SOL_PACKET, PACKET_TX_RING, req, sizeof(*req)); if (rc == -1) { - RTE_LOG(ERR, PMD, + PMD_LOG(ERR, "%s: could not set PACKET_TX_RING on AF_PACKET " - "socket for %s\n", name, pair->value); + "socket for %s", name, pair->value); goto error; } @@ -705,8 +703,8 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED, qsockfd, 0); if (rx_queue->map == MAP_FAILED) { - RTE_LOG(ERR, PMD, - "%s: call to mmap failed on AF_PACKET socket for %s\n", + PMD_LOG(ERR, + "%s: call to mmap failed on AF_PACKET socket for %s", name, pair->value); goto error; } @@ -742,8 +740,8 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, rc = bind(qsockfd, (const struct sockaddr*)&sockaddr, sizeof(sockaddr)); if (rc == -1) { - RTE_LOG(ERR, PMD, - "%s: could not bind AF_PACKET socket to %s\n", + PMD_LOG(ERR, + "%s: could not bind AF_PACKET socket to %s", name, pair->value); goto error; } @@ -752,9 +750,9 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, rc = setsockopt(qsockfd, SOL_PACKET, PACKET_FANOUT, &fanout_arg, sizeof(fanout_arg)); if (rc == -1) { - RTE_LOG(ERR, PMD, + PMD_LOG(ERR, "%s: could not set PACKET_FANOUT on AF_PACKET socket " - "for %s\n", name, pair->value); + "for %s", name, pair->value); goto error; } #endif @@ -775,14 +773,13 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, (*internals)->nb_queues = nb_queues; - rte_memcpy(data, (*eth_dev)->data, sizeof(*data)); + data = (*eth_dev)->data; data->dev_private = *internals; data->nb_rx_queues = (uint16_t)nb_queues; data->nb_tx_queues = (uint16_t)nb_queues; data->dev_link = pmd_link; data->mac_addrs = &(*internals)->eth_addr; - (*eth_dev)->data = data; (*eth_dev)->dev_ops = &ops; return 0; @@ -802,8 +799,6 @@ error: } free((*internals)->if_name); rte_free(*internals); -error_early: - rte_free(data); return -1; } @@ -837,8 +832,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev, qpairs = atoi(pair->value); if (qpairs < 1 || qpairs > RTE_PMD_AF_PACKET_MAX_RINGS) { - RTE_LOG(ERR, PMD, - "%s: invalid qpairs value\n", + PMD_LOG(ERR, + "%s: invalid qpairs value", name); return -1; } @@ -847,8 +842,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev, if (strstr(pair->key, ETH_AF_PACKET_BLOCKSIZE_ARG) != NULL) { blocksize = atoi(pair->value); if (!blocksize) { - RTE_LOG(ERR, PMD, - "%s: invalid blocksize value\n", + PMD_LOG(ERR, + "%s: invalid blocksize value", name); return -1; } @@ -857,8 +852,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev, if (strstr(pair->key, ETH_AF_PACKET_FRAMESIZE_ARG) != NULL) { framesize = atoi(pair->value); if (!framesize) { - RTE_LOG(ERR, PMD, - "%s: invalid framesize value\n", + PMD_LOG(ERR, + "%s: invalid framesize value", name); return -1; } @@ -867,8 +862,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev, if (strstr(pair->key, ETH_AF_PACKET_FRAMECOUNT_ARG) != NULL) { framecount = atoi(pair->value); if (!framecount) { - RTE_LOG(ERR, PMD, - "%s: invalid framecount value\n", + PMD_LOG(ERR, + "%s: invalid framecount value", name); return -1; } @@ -877,8 +872,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev, if (strstr(pair->key, ETH_AF_PACKET_QDISC_BYPASS_ARG) != NULL) { qdisc_bypass = atoi(pair->value); if (qdisc_bypass > 1) { - RTE_LOG(ERR, PMD, - "%s: invalid bypass value\n", + PMD_LOG(ERR, + "%s: invalid bypass value", name); return -1; } @@ -887,24 +882,24 @@ rte_eth_from_packet(struct rte_vdev_device *dev, } if (framesize > blocksize) { - RTE_LOG(ERR, PMD, - "%s: AF_PACKET MMAP frame size exceeds block size!\n", + PMD_LOG(ERR, + "%s: AF_PACKET MMAP frame size exceeds block size!", name); return -1; } blockcount = framecount / (blocksize / framesize); if (!blockcount) { - RTE_LOG(ERR, PMD, - "%s: invalid AF_PACKET MMAP parameters\n", name); + PMD_LOG(ERR, + "%s: invalid AF_PACKET MMAP parameters", name); return -1; } - RTE_LOG(INFO, PMD, "%s: AF_PACKET MMAP parameters:\n", name); - RTE_LOG(INFO, PMD, "%s:\tblock size %d\n", name, blocksize); - RTE_LOG(INFO, PMD, "%s:\tblock count %d\n", name, blockcount); - RTE_LOG(INFO, PMD, "%s:\tframe size %d\n", name, framesize); - RTE_LOG(INFO, PMD, "%s:\tframe count %d\n", name, framecount); + PMD_LOG(INFO, "%s: AF_PACKET MMAP parameters:", name); + PMD_LOG(INFO, "%s:\tblock size %d", name, blocksize); + PMD_LOG(INFO, "%s:\tblock count %d", name, blockcount); + PMD_LOG(INFO, "%s:\tframe size %d", name, framesize); + PMD_LOG(INFO, "%s:\tframe count %d", name, framecount); if (rte_pmd_init_internals(dev, *sockfd, qpairs, blocksize, blockcount, @@ -917,6 +912,7 @@ rte_eth_from_packet(struct rte_vdev_device *dev, eth_dev->rx_pkt_burst = eth_af_packet_rx; eth_dev->tx_pkt_burst = eth_af_packet_tx; + rte_eth_dev_probing_finish(eth_dev); return 0; } @@ -926,9 +922,24 @@ rte_pmd_af_packet_probe(struct rte_vdev_device *dev) int ret = 0; struct rte_kvargs *kvlist; int sockfd = -1; + struct rte_eth_dev *eth_dev; + const char *name = rte_vdev_device_name(dev); + + PMD_LOG(INFO, "Initializing pmd_af_packet for %s", name); - RTE_LOG(INFO, PMD, "Initializing pmd_af_packet for %s\n", - rte_vdev_device_name(dev)); + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(rte_vdev_device_args(dev)) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + PMD_LOG(ERR, "Failed to probe %s", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &ops; + eth_dev->device = &dev->device; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments); if (kvlist == NULL) { @@ -966,8 +977,8 @@ rte_pmd_af_packet_remove(struct rte_vdev_device *dev) struct pmd_internals *internals; unsigned q; - RTE_LOG(INFO, PMD, "Closing AF_PACKET ethdev on numa socket %u\n", - rte_socket_id()); + PMD_LOG(INFO, "Closing AF_PACKET ethdev on numa socket %u", + rte_socket_id()); if (dev == NULL) return -1; @@ -985,7 +996,6 @@ rte_pmd_af_packet_remove(struct rte_vdev_device *dev) free(internals->if_name); rte_free(eth_dev->data->dev_private); - rte_free(eth_dev->data); rte_eth_dev_release_port(eth_dev); @@ -1006,3 +1016,10 @@ RTE_PMD_REGISTER_PARAM_STRING(net_af_packet, "framesz= " "framecnt= " "qdisc_bypass=<0|1>"); + +RTE_INIT(af_packet_init_log) +{ + af_packet_logtype = rte_log_register("pmd.net.packet"); + if (af_packet_logtype >= 0) + rte_log_set_level(af_packet_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/ark/Makefile b/drivers/net/ark/Makefile index f1433bd2..2e232be8 100644 --- a/drivers/net/ark/Makefile +++ b/drivers/net/ark/Makefile @@ -1,33 +1,5 @@ -# BSD LICENSE -# -# Copyright (c) 2015-2017 Atomic Rules LLC -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of copyright holder nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2015-2018 Atomic Rules LLC include $(RTE_SDK)/mk/rte.vars.mk diff --git a/drivers/net/ark/ark_ddm.c b/drivers/net/ark/ark_ddm.c index 929dc7d1..eea388a1 100644 --- a/drivers/net/ark/ark_ddm.c +++ b/drivers/net/ark/ark_ddm.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #include diff --git a/drivers/net/ark/ark_ddm.h b/drivers/net/ark/ark_ddm.h index f67ad012..b37d1e09 100644 --- a/drivers/net/ark/ark_ddm.h +++ b/drivers/net/ark/ark_ddm.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #ifndef _ARK_DDM_H_ diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c index ff87c20e..552ca01a 100644 --- a/drivers/net/ark/ark_ethdev.c +++ b/drivers/net/ark/ark_ethdev.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #include @@ -69,7 +40,7 @@ static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev); static int eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static void eth_ark_dev_stats_reset(struct rte_eth_dev *dev); -static void eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, +static int eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); static int eth_ark_macaddr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, @@ -390,6 +361,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev) if (p == 0) { /* First port is already allocated by DPDK */ eth_dev = ark->eth_dev; + rte_eth_dev_probing_finish(eth_dev); continue; } @@ -422,6 +394,8 @@ eth_ark_dev_init(struct rte_eth_dev *dev) ark->user_data[eth_dev->data->port_id] = ark->user_ext.dev_init(dev, ark->a_bar, p); } + + rte_eth_dev_probing_finish(eth_dev); } return ret; @@ -771,7 +745,6 @@ eth_ark_dev_info_get(struct rte_eth_dev *dev, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); } static int @@ -887,16 +860,19 @@ eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) ark->user_data[dev->data->port_id]); } -static void +static int eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { struct ark_adapter *ark = (struct ark_adapter *)dev->data->dev_private; - if (ark->user_ext.mac_addr_set) + if (ark->user_ext.mac_addr_set) { ark->user_ext.mac_addr_set(dev, mac_addr, ark->user_data[dev->data->port_id]); + return 0; + } + return -ENOTSUP; } static int diff --git a/drivers/net/ark/ark_ethdev_rx.c b/drivers/net/ark/ark_ethdev_rx.c index 987d085e..16f0d11e 100644 --- a/drivers/net/ark/ark_ethdev_rx.c +++ b/drivers/net/ark/ark_ethdev_rx.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #include diff --git a/drivers/net/ark/ark_ethdev_rx.h b/drivers/net/ark/ark_ethdev_rx.h index 14678711..0fdd29b1 100644 --- a/drivers/net/ark/ark_ethdev_rx.h +++ b/drivers/net/ark/ark_ethdev_rx.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #ifndef _ARK_ETHDEV_RX_H_ diff --git a/drivers/net/ark/ark_ethdev_tx.c b/drivers/net/ark/ark_ethdev_tx.c index 4ef55d10..57188c24 100644 --- a/drivers/net/ark/ark_ethdev_tx.c +++ b/drivers/net/ark/ark_ethdev_tx.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #include diff --git a/drivers/net/ark/ark_ethdev_tx.h b/drivers/net/ark/ark_ethdev_tx.h index 657f895a..e448ce22 100644 --- a/drivers/net/ark/ark_ethdev_tx.h +++ b/drivers/net/ark/ark_ethdev_tx.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #ifndef _ARK_ETHDEV_TX_H_ diff --git a/drivers/net/ark/ark_ext.h b/drivers/net/ark/ark_ext.h index 031cfddc..f5af2153 100644 --- a/drivers/net/ark/ark_ext.h +++ b/drivers/net/ark/ark_ext.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #ifndef _ARK_EXT_H_ diff --git a/drivers/net/ark/ark_global.h b/drivers/net/ark/ark_global.h index 41eb260e..f820091d 100644 --- a/drivers/net/ark/ark_global.h +++ b/drivers/net/ark/ark_global.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #ifndef _ARK_GLOBAL_H_ diff --git a/drivers/net/ark/ark_logs.h b/drivers/net/ark/ark_logs.h index 8aff2963..b90e9f0a 100644 --- a/drivers/net/ark/ark_logs.h +++ b/drivers/net/ark/ark_logs.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #ifndef _ARK_DEBUG_H_ diff --git a/drivers/net/ark/ark_mpu.c b/drivers/net/ark/ark_mpu.c index d4ba6dc7..21f840f3 100644 --- a/drivers/net/ark/ark_mpu.c +++ b/drivers/net/ark/ark_mpu.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #include diff --git a/drivers/net/ark/ark_mpu.h b/drivers/net/ark/ark_mpu.h index f6f6c808..92c3e67c 100644 --- a/drivers/net/ark/ark_mpu.h +++ b/drivers/net/ark/ark_mpu.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #ifndef _ARK_MPU_H_ diff --git a/drivers/net/ark/ark_pktchkr.c b/drivers/net/ark/ark_pktchkr.c index 2cadaab8..c21003a0 100644 --- a/drivers/net/ark/ark_pktchkr.c +++ b/drivers/net/ark/ark_pktchkr.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #include diff --git a/drivers/net/ark/ark_pktchkr.h b/drivers/net/ark/ark_pktchkr.h index f4025dd6..a50f428b 100644 --- a/drivers/net/ark/ark_pktchkr.h +++ b/drivers/net/ark/ark_pktchkr.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #ifndef _ARK_PKTCHKR_H_ diff --git a/drivers/net/ark/ark_pktdir.c b/drivers/net/ark/ark_pktdir.c index eb47dedb..1f2c8182 100644 --- a/drivers/net/ark/ark_pktdir.c +++ b/drivers/net/ark/ark_pktdir.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #include diff --git a/drivers/net/ark/ark_pktdir.h b/drivers/net/ark/ark_pktdir.h index e13fe821..314e6dea 100644 --- a/drivers/net/ark/ark_pktdir.h +++ b/drivers/net/ark/ark_pktdir.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #ifndef _ARK_PKTDIR_H_ diff --git a/drivers/net/ark/ark_pktgen.c b/drivers/net/ark/ark_pktgen.c index d3c3dee1..2a2b428e 100644 --- a/drivers/net/ark/ark_pktgen.c +++ b/drivers/net/ark/ark_pktgen.c @@ -1,34 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #include diff --git a/drivers/net/ark/ark_pktgen.h b/drivers/net/ark/ark_pktgen.h index bf5a241b..0e5f76aa 100644 --- a/drivers/net/ark/ark_pktgen.h +++ b/drivers/net/ark/ark_pktgen.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #ifndef _ARK_PKTGEN_H_ diff --git a/drivers/net/ark/ark_rqp.c b/drivers/net/ark/ark_rqp.c index 41c497b0..bf1af4d6 100644 --- a/drivers/net/ark/ark_rqp.c +++ b/drivers/net/ark/ark_rqp.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #include diff --git a/drivers/net/ark/ark_rqp.h b/drivers/net/ark/ark_rqp.h index 0c380071..6c804606 100644 --- a/drivers/net/ark/ark_rqp.h +++ b/drivers/net/ark/ark_rqp.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #ifndef _ARK_RQP_H_ diff --git a/drivers/net/ark/ark_udm.c b/drivers/net/ark/ark_udm.c index 7a429ac7..03f1922c 100644 --- a/drivers/net/ark/ark_udm.c +++ b/drivers/net/ark/ark_udm.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #include diff --git a/drivers/net/ark/ark_udm.h b/drivers/net/ark/ark_udm.h index 915343fe..5846c825 100644 --- a/drivers/net/ark/ark_udm.h +++ b/drivers/net/ark/ark_udm.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015-2017 Atomic Rules LLC - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC */ #ifndef _ARK_UDM_H_ diff --git a/drivers/net/ark/meson.build b/drivers/net/ark/meson.build new file mode 100644 index 00000000..99151bba --- /dev/null +++ b/drivers/net/ark/meson.build @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +sources = files('ark_ddm.c', + 'ark_ethdev.c', + 'ark_ethdev_rx.c', + 'ark_ethdev_tx.c', + 'ark_mpu.c', + 'ark_pktchkr.c', + 'ark_pktdir.c', + 'ark_pktgen.c', + 'ark_rqp.c', + 'ark_udm.c') diff --git a/drivers/net/avf/avf_ethdev.c b/drivers/net/avf/avf_ethdev.c index 4df66170..3a2baaf2 100644 --- a/drivers/net/avf/avf_ethdev.c +++ b/drivers/net/avf/avf_ethdev.c @@ -65,7 +65,7 @@ static int avf_dev_rss_hash_update(struct rte_eth_dev *dev, static int avf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); static int avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); -static void avf_dev_set_default_mac_addr(struct rte_eth_dev *dev, +static int avf_dev_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); static int avf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); @@ -339,17 +339,18 @@ static int avf_config_rx_queues_irqs(struct rte_eth_dev *dev, AVF_WRITE_FLUSH(hw); /* map all queues to the same interrupt */ for (i = 0; i < dev->data->nb_rx_queues; i++) - vf->rxq_map[0] |= 1 << i; + vf->rxq_map[vf->msix_base] |= 1 << i; } else { if (!rte_intr_allow_others(intr_handle)) { vf->nb_msix = 1; vf->msix_base = AVF_MISC_VEC_ID; for (i = 0; i < dev->data->nb_rx_queues; i++) { - vf->rxq_map[0] |= 1 << i; + vf->rxq_map[vf->msix_base] |= 1 << i; intr_handle->intr_vec[i] = AVF_MISC_VEC_ID; } PMD_DRV_LOG(DEBUG, - "vector 0 are mapping to all Rx queues"); + "vector %u are mapping to all Rx queues", + vf->msix_base); } else { /* If Rx interrupt is reuquired, and we can use * multi interrupts, then the vec is from 1 @@ -474,7 +475,7 @@ avf_dev_stop(struct rte_eth_dev *dev) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); - struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev); + struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = dev->intr_handle; int ret, i; @@ -507,7 +508,6 @@ avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); memset(dev_info, 0, sizeof(*dev_info)); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs; dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs; dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN; @@ -518,27 +518,41 @@ avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_mac_addrs = AVF_NUM_MACADDR_MAX; dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_QINQ_STRIP | DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_VLAN_FILTER; dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_QINQ_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IPIP_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { .tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + .offloads = 0, }; dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { @@ -608,7 +622,7 @@ avf_dev_link_update(struct rte_eth_dev *dev, new_link.link_duplex = ETH_LINK_FULL_DUPLEX; new_link.link_status = vf->link_up ? ETH_LINK_UP : ETH_LINK_DOWN; - new_link.link_autoneg = !!(dev->data->dev_conf.link_speeds & + new_link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); if (rte_atomic64_cmpset((uint64_t *)&dev->data->dev_link, @@ -759,7 +773,7 @@ avf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) /* Vlan stripping setting */ if (mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ - if (dev_conf->rxmode.hw_vlan_strip) + if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) err = avf_enable_vlan_strip(adapter); else err = avf_disable_vlan_strip(adapter); @@ -926,7 +940,7 @@ avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return ret; } -static void +static int avf_dev_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { @@ -940,11 +954,11 @@ avf_dev_set_default_mac_addr(struct rte_eth_dev *dev, perm_addr = (struct ether_addr *)hw->mac.perm_addr; if (is_same_ether_addr(mac_addr, old_addr)) - return; + return 0; /* If the MAC address is configured by host, skip the setting */ if (is_valid_assigned_ether_addr(perm_addr)) - return; + return -EPERM; ret = avf_add_del_eth_addr(adapter, old_addr, FALSE); if (ret) @@ -968,7 +982,11 @@ avf_dev_set_default_mac_addr(struct rte_eth_dev *dev, mac_addr->addr_bytes[4], mac_addr->addr_bytes[5]); + if (ret) + return -EIO; + ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr); + return 0; } static int @@ -1339,9 +1357,7 @@ static struct rte_pci_driver rte_avf_pmd = { RTE_PMD_REGISTER_PCI(net_avf, rte_avf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_avf, pci_id_avf_map); RTE_PMD_REGISTER_KMOD_DEP(net_avf, "* igb_uio | vfio-pci"); -RTE_INIT(avf_init_log); -static void -avf_init_log(void) +RTE_INIT(avf_init_log) { avf_logtype_init = rte_log_register("pmd.net.avf.init"); if (avf_logtype_init >= 0) @@ -1365,8 +1381,8 @@ avf_allocate_dma_mem_d(__rte_unused struct avf_hw *hw, return AVF_ERR_PARAM; snprintf(z_name, sizeof(z_name), "avf_dma_%"PRIu64, rte_rand()); - mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0, - alignment, RTE_PGSIZE_2M); + mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M); if (!mz) return AVF_ERR_NO_MEMORY; diff --git a/drivers/net/avf/avf_rxtx.c b/drivers/net/avf/avf_rxtx.c index d276d975..e03a136f 100644 --- a/drivers/net/avf/avf_rxtx.c +++ b/drivers/net/avf/avf_rxtx.c @@ -109,7 +109,7 @@ check_rx_vec_allow(struct avf_rx_queue *rxq) static inline bool check_tx_vec_allow(struct avf_tx_queue *txq) { - if ((txq->txq_flags & AVF_SIMPLE_FLAGS) == AVF_SIMPLE_FLAGS && + if (!(txq->offloads & AVF_NO_VECTOR_FLAGS) && txq->rs_thresh >= AVF_VPMD_TX_MAX_BURST && txq->rs_thresh <= AVF_VPMD_TX_MAX_FREE_BUF) { PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq."); @@ -435,9 +435,12 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint32_t ring_size; uint16_t tx_rs_thresh, tx_free_thresh; uint16_t i, base, bsf, tc_mapping; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + if (nb_desc % AVF_ALIGN_RING_DESC != 0 || nb_desc > AVF_MAX_RING_DESC || nb_desc < AVF_MIN_RING_DESC) { @@ -474,7 +477,7 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->free_thresh = tx_free_thresh; txq->queue_id = queue_idx; txq->port_id = dev->data->port_id; - txq->txq_flags = tx_conf->txq_flags; + txq->offloads = offloads; txq->tx_deferred_start = tx_conf->tx_deferred_start; /* Allocate software ring */ @@ -1831,7 +1834,7 @@ avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_free_thresh = txq->free_thresh; qinfo->conf.tx_rs_thresh = txq->rs_thresh; - qinfo->conf.txq_flags = txq->txq_flags; + qinfo->conf.offloads = txq->offloads; qinfo->conf.tx_deferred_start = txq->tx_deferred_start; } diff --git a/drivers/net/avf/avf_rxtx.h b/drivers/net/avf/avf_rxtx.h index d1701cd6..297d0776 100644 --- a/drivers/net/avf/avf_rxtx.h +++ b/drivers/net/avf/avf_rxtx.h @@ -22,8 +22,12 @@ #define AVF_VPMD_DESCS_PER_LOOP 4 #define AVF_VPMD_TX_MAX_FREE_BUF 64 -#define AVF_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ - ETH_TXQ_FLAGS_NOOFFLOADS) +#define AVF_NO_VECTOR_FLAGS ( \ + DEV_TX_OFFLOAD_MULTI_SEGS | \ + DEV_TX_OFFLOAD_VLAN_INSERT | \ + DEV_TX_OFFLOAD_SCTP_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM) #define DEFAULT_TX_RS_THRESH 32 #define DEFAULT_TX_FREE_THRESH 32 @@ -125,7 +129,7 @@ struct avf_tx_queue { uint16_t port_id; uint16_t queue_id; - uint32_t txq_flags; + uint64_t offloads; uint16_t next_dd; /* next to set RS, for VPMD */ uint16_t next_rs; /* next to check DD, for VPMD */ diff --git a/drivers/net/avp/Makefile b/drivers/net/avp/Makefile index c29ecf45..c9db667f 100644 --- a/drivers/net/avp/Makefile +++ b/drivers/net/avp/Makefile @@ -1,34 +1,5 @@ -# BSD LICENSE -# -# Copyright(c) 2013-2017, Wind River Systems, Inc. All rights reserved. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Wind River Systems nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2013-2017, Wind River Systems, Inc. include $(RTE_SDK)/mk/rte.vars.mk diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c index dba99120..761f6c1c 100644 --- a/drivers/net/avp/avp_ethdev.c +++ b/drivers/net/avp/avp_ethdev.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (c) 2013-2017, Wind River Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1) Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2) Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * 3) Neither the name of Wind River Systems nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013-2017 Wind River Systems, Inc. */ #include @@ -411,7 +383,7 @@ avp_dev_translate_address(struct rte_eth_dev *eth_dev, (host_phys_addr < (map->phys_addr + map->length))) { /* address is within this segment */ offset += (host_phys_addr - map->phys_addr); - addr = RTE_PTR_ADD(addr, offset); + addr = RTE_PTR_ADD(addr, (uintptr_t)offset); PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n", host_phys_addr, addr); @@ -1076,19 +1048,8 @@ static int eth_avp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { - struct rte_eth_dev *eth_dev; - int ret; - - eth_dev = rte_eth_dev_pci_allocate(pci_dev, - sizeof(struct avp_adapter)); - if (eth_dev == NULL) - return -ENOMEM; - - ret = eth_avp_dev_init(eth_dev); - if (ret) - rte_eth_dev_pci_release(eth_dev); - - return ret; + return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct avp_adapter), + eth_avp_dev_init); } static int @@ -2074,12 +2035,6 @@ avp_dev_start(struct rte_eth_dev *eth_dev) goto unlock; } - /* disable features that we do not support */ - eth_dev->data->dev_conf.rxmode.hw_ip_checksum = 0; - eth_dev->data->dev_conf.rxmode.hw_vlan_filter = 0; - eth_dev->data->dev_conf.rxmode.hw_vlan_extend = 0; - eth_dev->data->dev_conf.rxmode.hw_strip_crc = 0; - /* update link state */ ret = avp_dev_ctrl_set_link_state(eth_dev, 1); if (ret < 0) { @@ -2206,7 +2161,6 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev, { struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); dev_info->max_rx_queues = avp->max_rx_queues; dev_info->max_tx_queues = avp->max_tx_queues; dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE; @@ -2216,16 +2170,19 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev, dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; } + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_CRC_STRIP; } static int avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) { struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; + uint64_t offloads = dev_conf->rxmode.offloads; if (mask & ETH_VLAN_STRIP_MASK) { if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) { - if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) + if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP) avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD; else avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD; @@ -2235,12 +2192,12 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) } if (mask & ETH_VLAN_FILTER_MASK) { - if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter) + if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n"); } if (mask & ETH_VLAN_EXTEND_MASK) { - if (eth_dev->data->dev_conf.rxmode.hw_vlan_extend) + if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n"); } @@ -2314,9 +2271,7 @@ avp_dev_stats_reset(struct rte_eth_dev *eth_dev) RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map); -RTE_INIT(avp_init_log); -static void -avp_init_log(void) +RTE_INIT(avp_init_log) { avp_logtype_driver = rte_log_register("pmd.net.avp.driver"); if (avp_logtype_driver >= 0) diff --git a/drivers/net/avp/avp_logs.h b/drivers/net/avp/avp_logs.h index e29394d5..6e297c7a 100644 --- a/drivers/net/avp/avp_logs.h +++ b/drivers/net/avp/avp_logs.h @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (c) 2013-2015, Wind River Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1) Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2) Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * 3) Neither the name of Wind River Systems nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013-2017 Wind River Systems, Inc. */ #ifndef _AVP_LOGS_H_ diff --git a/drivers/net/avp/meson.build b/drivers/net/avp/meson.build new file mode 100644 index 00000000..6076c31b --- /dev/null +++ b/drivers/net/avp/meson.build @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +sources = files('avp_ethdev.c') +install_headers('rte_avp_common.h', 'rte_avp_fifo.h') diff --git a/drivers/net/avp/rte_avp_common.h b/drivers/net/avp/rte_avp_common.h index 81dfe5ea..aa95159c 100644 --- a/drivers/net/avp/rte_avp_common.h +++ b/drivers/net/avp/rte_avp_common.h @@ -1,57 +1,6 @@ -/*- - * This file is provided under a dual BSD/LGPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GNU LESSER GENERAL PUBLIC LICENSE - * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. - * Copyright(c) 2014-2017 Wind River Systems, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2.1 of the GNU Lesser General Public License - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * Contact Information: - * Wind River Systems, Inc. - * - * - * BSD LICENSE - * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. - * Copyright(c) 2014-2017 Wind River Systems, Inc. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * +/* SPDX-License-Identifier: (BSD-3-Clause OR LGPL-2.1) + * Copyright(c) 2010-2013 Intel Corporation. + * Copyright(c) 2014-2017 Wind River Systems, Inc. */ #ifndef _RTE_AVP_COMMON_H_ diff --git a/drivers/net/avp/rte_avp_fifo.h b/drivers/net/avp/rte_avp_fifo.h index 803eb80a..c1658da6 100644 --- a/drivers/net/avp/rte_avp_fifo.h +++ b/drivers/net/avp/rte_avp_fifo.h @@ -1,57 +1,6 @@ -/*- - * This file is provided under a dual BSD/LGPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GNU LESSER GENERAL PUBLIC LICENSE - * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. - * Copyright(c) 2014 Wind River Systems, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2.1 of the GNU Lesser General Public License - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * Contact Information: - * Wind River Systems, Inc. - * - * - * BSD LICENSE - * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. - * Copyright(c) 2013-2017 Wind River Systems, Inc. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * +/* SPDX-License-Identifier: (BSD-3-Clause OR LGPL-2.1) + * Copyright(c) 2010-2013 Intel Corporation. + * Copyright(c) 2013-2017 Wind River Systems, Inc. */ #ifndef _RTE_AVP_FIFO_H_ diff --git a/drivers/net/axgbe/Makefile b/drivers/net/axgbe/Makefile new file mode 100644 index 00000000..72215aed --- /dev/null +++ b/drivers/net/axgbe/Makefile @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_axgbe.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_axgbe_version.map + +LIBABIVER := 1 + +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool +LDLIBS += -lrte_pci -lrte_bus_pci +LDLIBS += -lrte_ethdev + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_dev.c +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_mdio.c +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_phy_impl.c +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_i2c.c +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx.c +ifeq ($(CONFIG_RTE_ARCH_X86),y) +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx_vec_sse.c +endif + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h new file mode 100644 index 00000000..d25d54ca --- /dev/null +++ b/drivers/net/axgbe/axgbe_common.h @@ -0,0 +1,1710 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#ifndef __AXGBE_COMMON_H__ +#define __AXGBE_COMMON_H__ + +#include "axgbe_logs.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BIT(nr) (1 << (nr)) +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) +#endif + +#define AXGBE_HZ 250 + +/* DMA register offsets */ +#define DMA_MR 0x3000 +#define DMA_SBMR 0x3004 +#define DMA_ISR 0x3008 +#define DMA_AXIARCR 0x3010 +#define DMA_AXIAWCR 0x3018 +#define DMA_AXIAWRCR 0x301c +#define DMA_DSR0 0x3020 +#define DMA_DSR1 0x3024 +#define EDMA_TX_CONTROL 0x3040 +#define EDMA_RX_CONTROL 0x3044 + +/* DMA register entry bit positions and sizes */ +#define DMA_AXIARCR_DRC_INDEX 0 +#define DMA_AXIARCR_DRC_WIDTH 4 +#define DMA_AXIARCR_DRD_INDEX 4 +#define DMA_AXIARCR_DRD_WIDTH 2 +#define DMA_AXIARCR_TEC_INDEX 8 +#define DMA_AXIARCR_TEC_WIDTH 4 +#define DMA_AXIARCR_TED_INDEX 12 +#define DMA_AXIARCR_TED_WIDTH 2 +#define DMA_AXIARCR_THC_INDEX 16 +#define DMA_AXIARCR_THC_WIDTH 4 +#define DMA_AXIARCR_THD_INDEX 20 +#define DMA_AXIARCR_THD_WIDTH 2 +#define DMA_AXIAWCR_DWC_INDEX 0 +#define DMA_AXIAWCR_DWC_WIDTH 4 +#define DMA_AXIAWCR_DWD_INDEX 4 +#define DMA_AXIAWCR_DWD_WIDTH 2 +#define DMA_AXIAWCR_RPC_INDEX 8 +#define DMA_AXIAWCR_RPC_WIDTH 4 +#define DMA_AXIAWCR_RPD_INDEX 12 +#define DMA_AXIAWCR_RPD_WIDTH 2 +#define DMA_AXIAWCR_RHC_INDEX 16 +#define DMA_AXIAWCR_RHC_WIDTH 4 +#define DMA_AXIAWCR_RHD_INDEX 20 +#define DMA_AXIAWCR_RHD_WIDTH 2 +#define DMA_AXIAWCR_RDC_INDEX 24 +#define DMA_AXIAWCR_RDC_WIDTH 4 +#define DMA_AXIAWCR_RDD_INDEX 28 +#define DMA_AXIAWCR_RDD_WIDTH 2 +#define DMA_AXIAWRCR_TDWC_INDEX 0 +#define DMA_AXIAWRCR_TDWC_WIDTH 4 +#define DMA_AXIAWRCR_TDWD_INDEX 4 +#define DMA_AXIAWRCR_TDWD_WIDTH 4 +#define DMA_AXIAWRCR_RDRC_INDEX 8 +#define DMA_AXIAWRCR_RDRC_WIDTH 4 +#define DMA_ISR_MACIS_INDEX 17 +#define DMA_ISR_MACIS_WIDTH 1 +#define DMA_ISR_MTLIS_INDEX 16 +#define DMA_ISR_MTLIS_WIDTH 1 +#define DMA_MR_INTM_INDEX 12 +#define DMA_MR_INTM_WIDTH 2 +#define DMA_MR_SWR_INDEX 0 +#define DMA_MR_SWR_WIDTH 1 +#define DMA_SBMR_WR_OSR_INDEX 24 +#define DMA_SBMR_WR_OSR_WIDTH 6 +#define DMA_SBMR_RD_OSR_INDEX 16 +#define DMA_SBMR_RD_OSR_WIDTH 6 +#define DMA_SBMR_AAL_INDEX 12 +#define DMA_SBMR_AAL_WIDTH 1 +#define DMA_SBMR_EAME_INDEX 11 +#define DMA_SBMR_EAME_WIDTH 1 +#define DMA_SBMR_BLEN_256_INDEX 7 +#define DMA_SBMR_BLEN_256_WIDTH 1 +#define DMA_SBMR_BLEN_32_INDEX 4 +#define DMA_SBMR_BLEN_32_WIDTH 1 +#define DMA_SBMR_UNDEF_INDEX 0 +#define DMA_SBMR_UNDEF_WIDTH 1 + +/* DMA register values */ +#define DMA_DSR_RPS_WIDTH 4 +#define DMA_DSR_TPS_WIDTH 4 +#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH) +#define DMA_DSR0_RPS_START 8 +#define DMA_DSR0_TPS_START 12 +#define DMA_DSRX_FIRST_QUEUE 3 +#define DMA_DSRX_INC 4 +#define DMA_DSRX_QPR 4 +#define DMA_DSRX_RPS_START 0 +#define DMA_DSRX_TPS_START 4 +#define DMA_TPS_STOPPED 0x00 +#define DMA_TPS_SUSPENDED 0x06 + +/* DMA channel register offsets + * Multiple channels can be active. The first channel has registers + * that begin at 0x3100. Each subsequent channel has registers that + * are accessed using an offset of 0x80 from the previous channel. + */ +#define DMA_CH_BASE 0x3100 +#define DMA_CH_INC 0x80 + +#define DMA_CH_CR 0x00 +#define DMA_CH_TCR 0x04 +#define DMA_CH_RCR 0x08 +#define DMA_CH_TDLR_HI 0x10 +#define DMA_CH_TDLR_LO 0x14 +#define DMA_CH_RDLR_HI 0x18 +#define DMA_CH_RDLR_LO 0x1c +#define DMA_CH_TDTR_LO 0x24 +#define DMA_CH_RDTR_LO 0x2c +#define DMA_CH_TDRLR 0x30 +#define DMA_CH_RDRLR 0x34 +#define DMA_CH_IER 0x38 +#define DMA_CH_RIWT 0x3c +#define DMA_CH_CATDR_LO 0x44 +#define DMA_CH_CARDR_LO 0x4c +#define DMA_CH_CATBR_HI 0x50 +#define DMA_CH_CATBR_LO 0x54 +#define DMA_CH_CARBR_HI 0x58 +#define DMA_CH_CARBR_LO 0x5c +#define DMA_CH_SR 0x60 + +/* DMA channel register entry bit positions and sizes */ +#define DMA_CH_CR_PBLX8_INDEX 16 +#define DMA_CH_CR_PBLX8_WIDTH 1 +#define DMA_CH_CR_SPH_INDEX 24 +#define DMA_CH_CR_SPH_WIDTH 1 +#define DMA_CH_IER_AIE_INDEX 14 +#define DMA_CH_IER_AIE_WIDTH 1 +#define DMA_CH_IER_FBEE_INDEX 12 +#define DMA_CH_IER_FBEE_WIDTH 1 +#define DMA_CH_IER_NIE_INDEX 15 +#define DMA_CH_IER_NIE_WIDTH 1 +#define DMA_CH_IER_RBUE_INDEX 7 +#define DMA_CH_IER_RBUE_WIDTH 1 +#define DMA_CH_IER_RIE_INDEX 6 +#define DMA_CH_IER_RIE_WIDTH 1 +#define DMA_CH_IER_RSE_INDEX 8 +#define DMA_CH_IER_RSE_WIDTH 1 +#define DMA_CH_IER_TBUE_INDEX 2 +#define DMA_CH_IER_TBUE_WIDTH 1 +#define DMA_CH_IER_TIE_INDEX 0 +#define DMA_CH_IER_TIE_WIDTH 1 +#define DMA_CH_IER_TXSE_INDEX 1 +#define DMA_CH_IER_TXSE_WIDTH 1 +#define DMA_CH_RCR_PBL_INDEX 16 +#define DMA_CH_RCR_PBL_WIDTH 6 +#define DMA_CH_RCR_RBSZ_INDEX 1 +#define DMA_CH_RCR_RBSZ_WIDTH 14 +#define DMA_CH_RCR_SR_INDEX 0 +#define DMA_CH_RCR_SR_WIDTH 1 +#define DMA_CH_RIWT_RWT_INDEX 0 +#define DMA_CH_RIWT_RWT_WIDTH 8 +#define DMA_CH_SR_FBE_INDEX 12 +#define DMA_CH_SR_FBE_WIDTH 1 +#define DMA_CH_SR_RBU_INDEX 7 +#define DMA_CH_SR_RBU_WIDTH 1 +#define DMA_CH_SR_RI_INDEX 6 +#define DMA_CH_SR_RI_WIDTH 1 +#define DMA_CH_SR_RPS_INDEX 8 +#define DMA_CH_SR_RPS_WIDTH 1 +#define DMA_CH_SR_TBU_INDEX 2 +#define DMA_CH_SR_TBU_WIDTH 1 +#define DMA_CH_SR_TI_INDEX 0 +#define DMA_CH_SR_TI_WIDTH 1 +#define DMA_CH_SR_TPS_INDEX 1 +#define DMA_CH_SR_TPS_WIDTH 1 +#define DMA_CH_TCR_OSP_INDEX 4 +#define DMA_CH_TCR_OSP_WIDTH 1 +#define DMA_CH_TCR_PBL_INDEX 16 +#define DMA_CH_TCR_PBL_WIDTH 6 +#define DMA_CH_TCR_ST_INDEX 0 +#define DMA_CH_TCR_ST_WIDTH 1 +#define DMA_CH_TCR_TSE_INDEX 12 +#define DMA_CH_TCR_TSE_WIDTH 1 + +/* DMA channel register values */ +#define DMA_OSP_DISABLE 0x00 +#define DMA_OSP_ENABLE 0x01 +#define DMA_PBL_1 1 +#define DMA_PBL_2 2 +#define DMA_PBL_4 4 +#define DMA_PBL_8 8 +#define DMA_PBL_16 16 +#define DMA_PBL_32 32 +#define DMA_PBL_64 64 /* 8 x 8 */ +#define DMA_PBL_128 128 /* 8 x 16 */ +#define DMA_PBL_256 256 /* 8 x 32 */ +#define DMA_PBL_X8_DISABLE 0x00 +#define DMA_PBL_X8_ENABLE 0x01 + +/* MAC register offsets */ +#define MAC_TCR 0x0000 +#define MAC_RCR 0x0004 +#define MAC_PFR 0x0008 +#define MAC_WTR 0x000c +#define MAC_HTR0 0x0010 +#define MAC_VLANTR 0x0050 +#define MAC_VLANHTR 0x0058 +#define MAC_VLANIR 0x0060 +#define MAC_IVLANIR 0x0064 +#define MAC_RETMR 0x006c +#define MAC_Q0TFCR 0x0070 +#define MAC_RFCR 0x0090 +#define MAC_RQC0R 0x00a0 +#define MAC_RQC1R 0x00a4 +#define MAC_RQC2R 0x00a8 +#define MAC_RQC3R 0x00ac +#define MAC_ISR 0x00b0 +#define MAC_IER 0x00b4 +#define MAC_RTSR 0x00b8 +#define MAC_PMTCSR 0x00c0 +#define MAC_RWKPFR 0x00c4 +#define MAC_LPICSR 0x00d0 +#define MAC_LPITCR 0x00d4 +#define MAC_VR 0x0110 +#define MAC_DR 0x0114 +#define MAC_HWF0R 0x011c +#define MAC_HWF1R 0x0120 +#define MAC_HWF2R 0x0124 +#define MAC_MDIOSCAR 0x0200 +#define MAC_MDIOSCCDR 0x0204 +#define MAC_MDIOISR 0x0214 +#define MAC_MDIOIER 0x0218 +#define MAC_MDIOCL22R 0x0220 +#define MAC_GPIOCR 0x0278 +#define MAC_GPIOSR 0x027c +#define MAC_MACA0HR 0x0300 +#define MAC_MACA0LR 0x0304 +#define MAC_MACA1HR 0x0308 +#define MAC_MACA1LR 0x030c +#define MAC_RSSCR 0x0c80 +#define MAC_RSSAR 0x0c88 +#define MAC_RSSDR 0x0c8c +#define MAC_TSCR 0x0d00 +#define MAC_SSIR 0x0d04 +#define MAC_STSR 0x0d08 +#define MAC_STNR 0x0d0c +#define MAC_STSUR 0x0d10 +#define MAC_STNUR 0x0d14 +#define MAC_TSAR 0x0d18 +#define MAC_TSSR 0x0d20 +#define MAC_TXSNR 0x0d30 +#define MAC_TXSSR 0x0d34 + +#define MAC_QTFCR_INC 4 +#define MAC_MACA_INC 4 +#define MAC_HTR_INC 4 + +#define MAC_RQC2_INC 4 +#define MAC_RQC2_Q_PER_REG 4 + +/* MAC register entry bit positions and sizes */ +#define MAC_HWF0R_ADDMACADRSEL_INDEX 18 +#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5 +#define MAC_HWF0R_ARPOFFSEL_INDEX 9 +#define MAC_HWF0R_ARPOFFSEL_WIDTH 1 +#define MAC_HWF0R_EEESEL_INDEX 13 +#define MAC_HWF0R_EEESEL_WIDTH 1 +#define MAC_HWF0R_GMIISEL_INDEX 1 +#define MAC_HWF0R_GMIISEL_WIDTH 1 +#define MAC_HWF0R_MGKSEL_INDEX 7 +#define MAC_HWF0R_MGKSEL_WIDTH 1 +#define MAC_HWF0R_MMCSEL_INDEX 8 +#define MAC_HWF0R_MMCSEL_WIDTH 1 +#define MAC_HWF0R_RWKSEL_INDEX 6 +#define MAC_HWF0R_RWKSEL_WIDTH 1 +#define MAC_HWF0R_RXCOESEL_INDEX 16 +#define MAC_HWF0R_RXCOESEL_WIDTH 1 +#define MAC_HWF0R_SAVLANINS_INDEX 27 +#define MAC_HWF0R_SAVLANINS_WIDTH 1 +#define MAC_HWF0R_SMASEL_INDEX 5 +#define MAC_HWF0R_SMASEL_WIDTH 1 +#define MAC_HWF0R_TSSEL_INDEX 12 +#define MAC_HWF0R_TSSEL_WIDTH 1 +#define MAC_HWF0R_TSSTSSEL_INDEX 25 +#define MAC_HWF0R_TSSTSSEL_WIDTH 2 +#define MAC_HWF0R_TXCOESEL_INDEX 14 +#define MAC_HWF0R_TXCOESEL_WIDTH 1 +#define MAC_HWF0R_VLHASH_INDEX 4 +#define MAC_HWF0R_VLHASH_WIDTH 1 +#define MAC_HWF1R_ADDR64_INDEX 14 +#define MAC_HWF1R_ADDR64_WIDTH 2 +#define MAC_HWF1R_ADVTHWORD_INDEX 13 +#define MAC_HWF1R_ADVTHWORD_WIDTH 1 +#define MAC_HWF1R_DBGMEMA_INDEX 19 +#define MAC_HWF1R_DBGMEMA_WIDTH 1 +#define MAC_HWF1R_DCBEN_INDEX 16 +#define MAC_HWF1R_DCBEN_WIDTH 1 +#define MAC_HWF1R_HASHTBLSZ_INDEX 24 +#define MAC_HWF1R_HASHTBLSZ_WIDTH 3 +#define MAC_HWF1R_L3L4FNUM_INDEX 27 +#define MAC_HWF1R_L3L4FNUM_WIDTH 4 +#define MAC_HWF1R_NUMTC_INDEX 21 +#define MAC_HWF1R_NUMTC_WIDTH 3 +#define MAC_HWF1R_RSSEN_INDEX 20 +#define MAC_HWF1R_RSSEN_WIDTH 1 +#define MAC_HWF1R_RXFIFOSIZE_INDEX 0 +#define MAC_HWF1R_RXFIFOSIZE_WIDTH 5 +#define MAC_HWF1R_SPHEN_INDEX 17 +#define MAC_HWF1R_SPHEN_WIDTH 1 +#define MAC_HWF1R_TSOEN_INDEX 18 +#define MAC_HWF1R_TSOEN_WIDTH 1 +#define MAC_HWF1R_TXFIFOSIZE_INDEX 6 +#define MAC_HWF1R_TXFIFOSIZE_WIDTH 5 +#define MAC_HWF2R_AUXSNAPNUM_INDEX 28 +#define MAC_HWF2R_AUXSNAPNUM_WIDTH 3 +#define MAC_HWF2R_PPSOUTNUM_INDEX 24 +#define MAC_HWF2R_PPSOUTNUM_WIDTH 3 +#define MAC_HWF2R_RXCHCNT_INDEX 12 +#define MAC_HWF2R_RXCHCNT_WIDTH 4 +#define MAC_HWF2R_RXQCNT_INDEX 0 +#define MAC_HWF2R_RXQCNT_WIDTH 4 +#define MAC_HWF2R_TXCHCNT_INDEX 18 +#define MAC_HWF2R_TXCHCNT_WIDTH 4 +#define MAC_HWF2R_TXQCNT_INDEX 6 +#define MAC_HWF2R_TXQCNT_WIDTH 4 +#define MAC_IER_TSIE_INDEX 12 +#define MAC_IER_TSIE_WIDTH 1 +#define MAC_ISR_MMCRXIS_INDEX 9 +#define MAC_ISR_MMCRXIS_WIDTH 1 +#define MAC_ISR_MMCTXIS_INDEX 10 +#define MAC_ISR_MMCTXIS_WIDTH 1 +#define MAC_ISR_PMTIS_INDEX 4 +#define MAC_ISR_PMTIS_WIDTH 1 +#define MAC_ISR_SMI_INDEX 1 +#define MAC_ISR_SMI_WIDTH 1 +#define MAC_ISR_LSI_INDEX 0 +#define MAC_ISR_LSI_WIDTH 1 +#define MAC_ISR_LS_INDEX 24 +#define MAC_ISR_LS_WIDTH 2 +#define MAC_ISR_TSIS_INDEX 12 +#define MAC_ISR_TSIS_WIDTH 1 +#define MAC_MACA1HR_AE_INDEX 31 +#define MAC_MACA1HR_AE_WIDTH 1 +#define MAC_MDIOIER_SNGLCOMPIE_INDEX 12 +#define MAC_MDIOIER_SNGLCOMPIE_WIDTH 1 +#define MAC_MDIOISR_SNGLCOMPINT_INDEX 12 +#define MAC_MDIOISR_SNGLCOMPINT_WIDTH 1 +#define MAC_MDIOSCAR_DA_INDEX 21 +#define MAC_MDIOSCAR_DA_WIDTH 5 +#define MAC_MDIOSCAR_PA_INDEX 16 +#define MAC_MDIOSCAR_PA_WIDTH 5 +#define MAC_MDIOSCAR_RA_INDEX 0 +#define MAC_MDIOSCAR_RA_WIDTH 16 +#define MAC_MDIOSCAR_REG_INDEX 0 +#define MAC_MDIOSCAR_REG_WIDTH 21 +#define MAC_MDIOSCCDR_BUSY_INDEX 22 +#define MAC_MDIOSCCDR_BUSY_WIDTH 1 +#define MAC_MDIOSCCDR_CMD_INDEX 16 +#define MAC_MDIOSCCDR_CMD_WIDTH 2 +#define MAC_MDIOSCCDR_CR_INDEX 19 +#define MAC_MDIOSCCDR_CR_WIDTH 3 +#define MAC_MDIOSCCDR_DATA_INDEX 0 +#define MAC_MDIOSCCDR_DATA_WIDTH 16 +#define MAC_MDIOSCCDR_SADDR_INDEX 18 +#define MAC_MDIOSCCDR_SADDR_WIDTH 1 +#define MAC_PFR_HMC_INDEX 2 +#define MAC_PFR_HMC_WIDTH 1 +#define MAC_PFR_HPF_INDEX 10 +#define MAC_PFR_HPF_WIDTH 1 +#define MAC_PFR_HUC_INDEX 1 +#define MAC_PFR_HUC_WIDTH 1 +#define MAC_PFR_PM_INDEX 4 +#define MAC_PFR_PM_WIDTH 1 +#define MAC_PFR_PR_INDEX 0 +#define MAC_PFR_PR_WIDTH 1 +#define MAC_PFR_VTFE_INDEX 16 +#define MAC_PFR_VTFE_WIDTH 1 +#define MAC_PMTCSR_MGKPKTEN_INDEX 1 +#define MAC_PMTCSR_MGKPKTEN_WIDTH 1 +#define MAC_PMTCSR_PWRDWN_INDEX 0 +#define MAC_PMTCSR_PWRDWN_WIDTH 1 +#define MAC_PMTCSR_RWKFILTRST_INDEX 31 +#define MAC_PMTCSR_RWKFILTRST_WIDTH 1 +#define MAC_PMTCSR_RWKPKTEN_INDEX 2 +#define MAC_PMTCSR_RWKPKTEN_WIDTH 1 +#define MAC_Q0TFCR_PT_INDEX 16 +#define MAC_Q0TFCR_PT_WIDTH 16 +#define MAC_Q0TFCR_TFE_INDEX 1 +#define MAC_Q0TFCR_TFE_WIDTH 1 +#define MAC_RCR_ACS_INDEX 1 +#define MAC_RCR_ACS_WIDTH 1 +#define MAC_RCR_CST_INDEX 2 +#define MAC_RCR_CST_WIDTH 1 +#define MAC_RCR_DCRCC_INDEX 3 +#define MAC_RCR_DCRCC_WIDTH 1 +#define MAC_RCR_HDSMS_INDEX 12 +#define MAC_RCR_HDSMS_WIDTH 3 +#define MAC_RCR_IPC_INDEX 9 +#define MAC_RCR_IPC_WIDTH 1 +#define MAC_RCR_JE_INDEX 8 +#define MAC_RCR_JE_WIDTH 1 +#define MAC_RCR_LM_INDEX 10 +#define MAC_RCR_LM_WIDTH 1 +#define MAC_RCR_RE_INDEX 0 +#define MAC_RCR_RE_WIDTH 1 +#define MAC_RFCR_PFCE_INDEX 8 +#define MAC_RFCR_PFCE_WIDTH 1 +#define MAC_RFCR_RFE_INDEX 0 +#define MAC_RFCR_RFE_WIDTH 1 +#define MAC_RFCR_UP_INDEX 1 +#define MAC_RFCR_UP_WIDTH 1 +#define MAC_RQC0R_RXQ0EN_INDEX 0 +#define MAC_RQC0R_RXQ0EN_WIDTH 2 +#define MAC_RSSAR_ADDRT_INDEX 2 +#define MAC_RSSAR_ADDRT_WIDTH 1 +#define MAC_RSSAR_CT_INDEX 1 +#define MAC_RSSAR_CT_WIDTH 1 +#define MAC_RSSAR_OB_INDEX 0 +#define MAC_RSSAR_OB_WIDTH 1 +#define MAC_RSSAR_RSSIA_INDEX 8 +#define MAC_RSSAR_RSSIA_WIDTH 8 +#define MAC_RSSCR_IP2TE_INDEX 1 +#define MAC_RSSCR_IP2TE_WIDTH 1 +#define MAC_RSSCR_RSSE_INDEX 0 +#define MAC_RSSCR_RSSE_WIDTH 1 +#define MAC_RSSCR_TCP4TE_INDEX 2 +#define MAC_RSSCR_TCP4TE_WIDTH 1 +#define MAC_RSSCR_UDP4TE_INDEX 3 +#define MAC_RSSCR_UDP4TE_WIDTH 1 +#define MAC_RSSDR_DMCH_INDEX 0 +#define MAC_RSSDR_DMCH_WIDTH 4 +#define MAC_SSIR_SNSINC_INDEX 8 +#define MAC_SSIR_SNSINC_WIDTH 8 +#define MAC_SSIR_SSINC_INDEX 16 +#define MAC_SSIR_SSINC_WIDTH 8 +#define MAC_TCR_SS_INDEX 29 +#define MAC_TCR_SS_WIDTH 2 +#define MAC_TCR_TE_INDEX 0 +#define MAC_TCR_TE_WIDTH 1 +#define MAC_TSCR_AV8021ASMEN_INDEX 28 +#define MAC_TSCR_AV8021ASMEN_WIDTH 1 +#define MAC_TSCR_SNAPTYPSEL_INDEX 16 +#define MAC_TSCR_SNAPTYPSEL_WIDTH 2 +#define MAC_TSCR_TSADDREG_INDEX 5 +#define MAC_TSCR_TSADDREG_WIDTH 1 +#define MAC_TSCR_TSCFUPDT_INDEX 1 +#define MAC_TSCR_TSCFUPDT_WIDTH 1 +#define MAC_TSCR_TSCTRLSSR_INDEX 9 +#define MAC_TSCR_TSCTRLSSR_WIDTH 1 +#define MAC_TSCR_TSENA_INDEX 0 +#define MAC_TSCR_TSENA_WIDTH 1 +#define MAC_TSCR_TSENALL_INDEX 8 +#define MAC_TSCR_TSENALL_WIDTH 1 +#define MAC_TSCR_TSEVNTENA_INDEX 14 +#define MAC_TSCR_TSEVNTENA_WIDTH 1 +#define MAC_TSCR_TSINIT_INDEX 2 +#define MAC_TSCR_TSINIT_WIDTH 1 +#define MAC_TSCR_TSIPENA_INDEX 11 +#define MAC_TSCR_TSIPENA_WIDTH 1 +#define MAC_TSCR_TSIPV4ENA_INDEX 13 +#define MAC_TSCR_TSIPV4ENA_WIDTH 1 +#define MAC_TSCR_TSIPV6ENA_INDEX 12 +#define MAC_TSCR_TSIPV6ENA_WIDTH 1 +#define MAC_TSCR_TSMSTRENA_INDEX 15 +#define MAC_TSCR_TSMSTRENA_WIDTH 1 +#define MAC_TSCR_TSVER2ENA_INDEX 10 +#define MAC_TSCR_TSVER2ENA_WIDTH 1 +#define MAC_TSCR_TXTSSTSM_INDEX 24 +#define MAC_TSCR_TXTSSTSM_WIDTH 1 +#define MAC_TSSR_TXTSC_INDEX 15 +#define MAC_TSSR_TXTSC_WIDTH 1 +#define MAC_TXSNR_TXTSSTSMIS_INDEX 31 +#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1 +#define MAC_VLANHTR_VLHT_INDEX 0 +#define MAC_VLANHTR_VLHT_WIDTH 16 +#define MAC_VLANIR_VLTI_INDEX 20 +#define MAC_VLANIR_VLTI_WIDTH 1 +#define MAC_VLANIR_CSVL_INDEX 19 +#define MAC_VLANIR_CSVL_WIDTH 1 +#define MAC_VLANTR_DOVLTC_INDEX 20 +#define MAC_VLANTR_DOVLTC_WIDTH 1 +#define MAC_VLANTR_ERSVLM_INDEX 19 +#define MAC_VLANTR_ERSVLM_WIDTH 1 +#define MAC_VLANTR_ESVL_INDEX 18 +#define MAC_VLANTR_ESVL_WIDTH 1 +#define MAC_VLANTR_ETV_INDEX 16 +#define MAC_VLANTR_ETV_WIDTH 1 +#define MAC_VLANTR_EVLS_INDEX 21 +#define MAC_VLANTR_EVLS_WIDTH 2 +#define MAC_VLANTR_EVLRXS_INDEX 24 +#define MAC_VLANTR_EVLRXS_WIDTH 1 +#define MAC_VLANTR_VL_INDEX 0 +#define MAC_VLANTR_VL_WIDTH 16 +#define MAC_VLANTR_VTHM_INDEX 25 +#define MAC_VLANTR_VTHM_WIDTH 1 +#define MAC_VLANTR_VTIM_INDEX 17 +#define MAC_VLANTR_VTIM_WIDTH 1 +#define MAC_VR_DEVID_INDEX 8 +#define MAC_VR_DEVID_WIDTH 8 +#define MAC_VR_SNPSVER_INDEX 0 +#define MAC_VR_SNPSVER_WIDTH 8 +#define MAC_VR_USERVER_INDEX 16 +#define MAC_VR_USERVER_WIDTH 8 + +/* MMC register offsets */ +#define MMC_CR 0x0800 +#define MMC_RISR 0x0804 +#define MMC_TISR 0x0808 +#define MMC_RIER 0x080c +#define MMC_TIER 0x0810 +#define MMC_TXOCTETCOUNT_GB_LO 0x0814 +#define MMC_TXOCTETCOUNT_GB_HI 0x0818 +#define MMC_TXFRAMECOUNT_GB_LO 0x081c +#define MMC_TXFRAMECOUNT_GB_HI 0x0820 +#define MMC_TXBROADCASTFRAMES_G_LO 0x0824 +#define MMC_TXBROADCASTFRAMES_G_HI 0x0828 +#define MMC_TXMULTICASTFRAMES_G_LO 0x082c +#define MMC_TXMULTICASTFRAMES_G_HI 0x0830 +#define MMC_TX64OCTETS_GB_LO 0x0834 +#define MMC_TX64OCTETS_GB_HI 0x0838 +#define MMC_TX65TO127OCTETS_GB_LO 0x083c +#define MMC_TX65TO127OCTETS_GB_HI 0x0840 +#define MMC_TX128TO255OCTETS_GB_LO 0x0844 +#define MMC_TX128TO255OCTETS_GB_HI 0x0848 +#define MMC_TX256TO511OCTETS_GB_LO 0x084c +#define MMC_TX256TO511OCTETS_GB_HI 0x0850 +#define MMC_TX512TO1023OCTETS_GB_LO 0x0854 +#define MMC_TX512TO1023OCTETS_GB_HI 0x0858 +#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c +#define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860 +#define MMC_TXUNICASTFRAMES_GB_LO 0x0864 +#define MMC_TXUNICASTFRAMES_GB_HI 0x0868 +#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c +#define MMC_TXMULTICASTFRAMES_GB_HI 0x0870 +#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874 +#define MMC_TXBROADCASTFRAMES_GB_HI 0x0878 +#define MMC_TXUNDERFLOWERROR_LO 0x087c +#define MMC_TXUNDERFLOWERROR_HI 0x0880 +#define MMC_TXOCTETCOUNT_G_LO 0x0884 +#define MMC_TXOCTETCOUNT_G_HI 0x0888 +#define MMC_TXFRAMECOUNT_G_LO 0x088c +#define MMC_TXFRAMECOUNT_G_HI 0x0890 +#define MMC_TXPAUSEFRAMES_LO 0x0894 +#define MMC_TXPAUSEFRAMES_HI 0x0898 +#define MMC_TXVLANFRAMES_G_LO 0x089c +#define MMC_TXVLANFRAMES_G_HI 0x08a0 +#define MMC_RXFRAMECOUNT_GB_LO 0x0900 +#define MMC_RXFRAMECOUNT_GB_HI 0x0904 +#define MMC_RXOCTETCOUNT_GB_LO 0x0908 +#define MMC_RXOCTETCOUNT_GB_HI 0x090c +#define MMC_RXOCTETCOUNT_G_LO 0x0910 +#define MMC_RXOCTETCOUNT_G_HI 0x0914 +#define MMC_RXBROADCASTFRAMES_G_LO 0x0918 +#define MMC_RXBROADCASTFRAMES_G_HI 0x091c +#define MMC_RXMULTICASTFRAMES_G_LO 0x0920 +#define MMC_RXMULTICASTFRAMES_G_HI 0x0924 +#define MMC_RXCRCERROR_LO 0x0928 +#define MMC_RXCRCERROR_HI 0x092c +#define MMC_RXRUNTERROR 0x0930 +#define MMC_RXJABBERERROR 0x0934 +#define MMC_RXUNDERSIZE_G 0x0938 +#define MMC_RXOVERSIZE_G 0x093c +#define MMC_RX64OCTETS_GB_LO 0x0940 +#define MMC_RX64OCTETS_GB_HI 0x0944 +#define MMC_RX65TO127OCTETS_GB_LO 0x0948 +#define MMC_RX65TO127OCTETS_GB_HI 0x094c +#define MMC_RX128TO255OCTETS_GB_LO 0x0950 +#define MMC_RX128TO255OCTETS_GB_HI 0x0954 +#define MMC_RX256TO511OCTETS_GB_LO 0x0958 +#define MMC_RX256TO511OCTETS_GB_HI 0x095c +#define MMC_RX512TO1023OCTETS_GB_LO 0x0960 +#define MMC_RX512TO1023OCTETS_GB_HI 0x0964 +#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968 +#define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c +#define MMC_RXUNICASTFRAMES_G_LO 0x0970 +#define MMC_RXUNICASTFRAMES_G_HI 0x0974 +#define MMC_RXLENGTHERROR_LO 0x0978 +#define MMC_RXLENGTHERROR_HI 0x097c +#define MMC_RXOUTOFRANGETYPE_LO 0x0980 +#define MMC_RXOUTOFRANGETYPE_HI 0x0984 +#define MMC_RXPAUSEFRAMES_LO 0x0988 +#define MMC_RXPAUSEFRAMES_HI 0x098c +#define MMC_RXFIFOOVERFLOW_LO 0x0990 +#define MMC_RXFIFOOVERFLOW_HI 0x0994 +#define MMC_RXVLANFRAMES_GB_LO 0x0998 +#define MMC_RXVLANFRAMES_GB_HI 0x099c +#define MMC_RXWATCHDOGERROR 0x09a0 + +/* MMC register entry bit positions and sizes */ +#define MMC_CR_CR_INDEX 0 +#define MMC_CR_CR_WIDTH 1 +#define MMC_CR_CSR_INDEX 1 +#define MMC_CR_CSR_WIDTH 1 +#define MMC_CR_ROR_INDEX 2 +#define MMC_CR_ROR_WIDTH 1 +#define MMC_CR_MCF_INDEX 3 +#define MMC_CR_MCF_WIDTH 1 +#define MMC_CR_MCT_INDEX 4 +#define MMC_CR_MCT_WIDTH 2 +#define MMC_RIER_ALL_INTERRUPTS_INDEX 0 +#define MMC_RIER_ALL_INTERRUPTS_WIDTH 23 +#define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0 +#define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1 +#define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1 +#define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1 +#define MMC_RISR_RXOCTETCOUNT_G_INDEX 2 +#define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1 +#define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3 +#define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1 +#define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4 +#define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1 +#define MMC_RISR_RXCRCERROR_INDEX 5 +#define MMC_RISR_RXCRCERROR_WIDTH 1 +#define MMC_RISR_RXRUNTERROR_INDEX 6 +#define MMC_RISR_RXRUNTERROR_WIDTH 1 +#define MMC_RISR_RXJABBERERROR_INDEX 7 +#define MMC_RISR_RXJABBERERROR_WIDTH 1 +#define MMC_RISR_RXUNDERSIZE_G_INDEX 8 +#define MMC_RISR_RXUNDERSIZE_G_WIDTH 1 +#define MMC_RISR_RXOVERSIZE_G_INDEX 9 +#define MMC_RISR_RXOVERSIZE_G_WIDTH 1 +#define MMC_RISR_RX64OCTETS_GB_INDEX 10 +#define MMC_RISR_RX64OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11 +#define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12 +#define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13 +#define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14 +#define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15 +#define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1 +#define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16 +#define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1 +#define MMC_RISR_RXLENGTHERROR_INDEX 17 +#define MMC_RISR_RXLENGTHERROR_WIDTH 1 +#define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18 +#define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1 +#define MMC_RISR_RXPAUSEFRAMES_INDEX 19 +#define MMC_RISR_RXPAUSEFRAMES_WIDTH 1 +#define MMC_RISR_RXFIFOOVERFLOW_INDEX 20 +#define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1 +#define MMC_RISR_RXVLANFRAMES_GB_INDEX 21 +#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1 +#define MMC_RISR_RXWATCHDOGERROR_INDEX 22 +#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1 +#define MMC_TIER_ALL_INTERRUPTS_INDEX 0 +#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18 +#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0 +#define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1 +#define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1 +#define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1 +#define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2 +#define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1 +#define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3 +#define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1 +#define MMC_TISR_TX64OCTETS_GB_INDEX 4 +#define MMC_TISR_TX64OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5 +#define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6 +#define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7 +#define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8 +#define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9 +#define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1 +#define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10 +#define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1 +#define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11 +#define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1 +#define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12 +#define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1 +#define MMC_TISR_TXUNDERFLOWERROR_INDEX 13 +#define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1 +#define MMC_TISR_TXOCTETCOUNT_G_INDEX 14 +#define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1 +#define MMC_TISR_TXFRAMECOUNT_G_INDEX 15 +#define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1 +#define MMC_TISR_TXPAUSEFRAMES_INDEX 16 +#define MMC_TISR_TXPAUSEFRAMES_WIDTH 1 +#define MMC_TISR_TXVLANFRAMES_G_INDEX 17 +#define MMC_TISR_TXVLANFRAMES_G_WIDTH 1 + +/* MTL register offsets */ +#define MTL_OMR 0x1000 +#define MTL_FDCR 0x1008 +#define MTL_FDSR 0x100c +#define MTL_FDDR 0x1010 +#define MTL_ISR 0x1020 +#define MTL_RQDCM0R 0x1030 +#define MTL_TCPM0R 0x1040 +#define MTL_TCPM1R 0x1044 + +#define MTL_RQDCM_INC 4 +#define MTL_RQDCM_Q_PER_REG 4 +#define MTL_TCPM_INC 4 +#define MTL_TCPM_TC_PER_REG 4 + +/* MTL register entry bit positions and sizes */ +#define MTL_OMR_ETSALG_INDEX 5 +#define MTL_OMR_ETSALG_WIDTH 2 +#define MTL_OMR_RAA_INDEX 2 +#define MTL_OMR_RAA_WIDTH 1 + +/* MTL queue register offsets + * Multiple queues can be active. The first queue has registers + * that begin at 0x1100. Each subsequent queue has registers that + * are accessed using an offset of 0x80 from the previous queue. + */ +#define MTL_Q_BASE 0x1100 +#define MTL_Q_INC 0x80 + +#define MTL_Q_TQOMR 0x00 +#define MTL_Q_TQUR 0x04 +#define MTL_Q_TQDR 0x08 +#define MTL_Q_RQOMR 0x40 +#define MTL_Q_RQMPOCR 0x44 +#define MTL_Q_RQDR 0x48 +#define MTL_Q_RQFCR 0x50 +#define MTL_Q_IER 0x70 +#define MTL_Q_ISR 0x74 + +/* MTL queue register entry bit positions and sizes */ +#define MTL_Q_RQDR_PRXQ_INDEX 16 +#define MTL_Q_RQDR_PRXQ_WIDTH 14 +#define MTL_Q_RQDR_RXQSTS_INDEX 4 +#define MTL_Q_RQDR_RXQSTS_WIDTH 2 +#define MTL_Q_RQFCR_RFA_INDEX 1 +#define MTL_Q_RQFCR_RFA_WIDTH 6 +#define MTL_Q_RQFCR_RFD_INDEX 17 +#define MTL_Q_RQFCR_RFD_WIDTH 6 +#define MTL_Q_RQOMR_EHFC_INDEX 7 +#define MTL_Q_RQOMR_EHFC_WIDTH 1 +#define MTL_Q_RQOMR_RQS_INDEX 16 +#define MTL_Q_RQOMR_RQS_WIDTH 9 +#define MTL_Q_RQOMR_RSF_INDEX 5 +#define MTL_Q_RQOMR_RSF_WIDTH 1 +#define MTL_Q_RQOMR_RTC_INDEX 0 +#define MTL_Q_RQOMR_RTC_WIDTH 2 +#define MTL_Q_TQDR_TRCSTS_INDEX 1 +#define MTL_Q_TQDR_TRCSTS_WIDTH 2 +#define MTL_Q_TQDR_TXQSTS_INDEX 4 +#define MTL_Q_TQDR_TXQSTS_WIDTH 1 +#define MTL_Q_TQOMR_FTQ_INDEX 0 +#define MTL_Q_TQOMR_FTQ_WIDTH 1 +#define MTL_Q_TQOMR_Q2TCMAP_INDEX 8 +#define MTL_Q_TQOMR_Q2TCMAP_WIDTH 3 +#define MTL_Q_TQOMR_TQS_INDEX 16 +#define MTL_Q_TQOMR_TQS_WIDTH 10 +#define MTL_Q_TQOMR_TSF_INDEX 1 +#define MTL_Q_TQOMR_TSF_WIDTH 1 +#define MTL_Q_TQOMR_TTC_INDEX 4 +#define MTL_Q_TQOMR_TTC_WIDTH 3 +#define MTL_Q_TQOMR_TXQEN_INDEX 2 +#define MTL_Q_TQOMR_TXQEN_WIDTH 2 + +/* MTL queue register value */ +#define MTL_RSF_DISABLE 0x00 +#define MTL_RSF_ENABLE 0x01 +#define MTL_TSF_DISABLE 0x00 +#define MTL_TSF_ENABLE 0x01 + +#define MTL_RX_THRESHOLD_64 0x00 +#define MTL_RX_THRESHOLD_96 0x02 +#define MTL_RX_THRESHOLD_128 0x03 +#define MTL_TX_THRESHOLD_32 0x01 +#define MTL_TX_THRESHOLD_64 0x00 +#define MTL_TX_THRESHOLD_96 0x02 +#define MTL_TX_THRESHOLD_128 0x03 +#define MTL_TX_THRESHOLD_192 0x04 +#define MTL_TX_THRESHOLD_256 0x05 +#define MTL_TX_THRESHOLD_384 0x06 +#define MTL_TX_THRESHOLD_512 0x07 + +#define MTL_ETSALG_WRR 0x00 +#define MTL_ETSALG_WFQ 0x01 +#define MTL_ETSALG_DWRR 0x02 +#define MTL_RAA_SP 0x00 +#define MTL_RAA_WSP 0x01 + +#define MTL_Q_DISABLED 0x00 +#define MTL_Q_ENABLED 0x02 + +/* MTL traffic class register offsets + * Multiple traffic classes can be active. The first class has registers + * that begin at 0x1100. Each subsequent queue has registers that + * are accessed using an offset of 0x80 from the previous queue. + */ +#define MTL_TC_BASE MTL_Q_BASE +#define MTL_TC_INC MTL_Q_INC + +#define MTL_TC_ETSCR 0x10 +#define MTL_TC_ETSSR 0x14 +#define MTL_TC_QWR 0x18 + +/* MTL traffic class register entry bit positions and sizes */ +#define MTL_TC_ETSCR_TSA_INDEX 0 +#define MTL_TC_ETSCR_TSA_WIDTH 2 +#define MTL_TC_QWR_QW_INDEX 0 +#define MTL_TC_QWR_QW_WIDTH 21 + +/* MTL traffic class register value */ +#define MTL_TSA_SP 0x00 +#define MTL_TSA_ETS 0x02 + +/* PCS register offsets */ +#define PCS_V1_WINDOW_SELECT 0x03fc +#define PCS_V2_WINDOW_DEF 0x9060 +#define PCS_V2_WINDOW_SELECT 0x9064 + +/* PCS register entry bit positions and sizes */ +#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6 +#define PCS_V2_WINDOW_DEF_OFFSET_WIDTH 14 +#define PCS_V2_WINDOW_DEF_SIZE_INDEX 2 +#define PCS_V2_WINDOW_DEF_SIZE_WIDTH 4 + +/* SerDes integration register offsets */ +#define SIR0_KR_RT_1 0x002c +#define SIR0_STATUS 0x0040 +#define SIR1_SPEED 0x0000 + +/* SerDes integration register entry bit positions and sizes */ +#define SIR0_KR_RT_1_RESET_INDEX 11 +#define SIR0_KR_RT_1_RESET_WIDTH 1 +#define SIR0_STATUS_RX_READY_INDEX 0 +#define SIR0_STATUS_RX_READY_WIDTH 1 +#define SIR0_STATUS_TX_READY_INDEX 8 +#define SIR0_STATUS_TX_READY_WIDTH 1 +#define SIR1_SPEED_CDR_RATE_INDEX 12 +#define SIR1_SPEED_CDR_RATE_WIDTH 4 +#define SIR1_SPEED_DATARATE_INDEX 4 +#define SIR1_SPEED_DATARATE_WIDTH 2 +#define SIR1_SPEED_PLLSEL_INDEX 3 +#define SIR1_SPEED_PLLSEL_WIDTH 1 +#define SIR1_SPEED_RATECHANGE_INDEX 6 +#define SIR1_SPEED_RATECHANGE_WIDTH 1 +#define SIR1_SPEED_TXAMP_INDEX 8 +#define SIR1_SPEED_TXAMP_WIDTH 4 +#define SIR1_SPEED_WORDMODE_INDEX 0 +#define SIR1_SPEED_WORDMODE_WIDTH 3 + +/* SerDes RxTx register offsets */ +#define RXTX_REG6 0x0018 +#define RXTX_REG20 0x0050 +#define RXTX_REG22 0x0058 +#define RXTX_REG114 0x01c8 +#define RXTX_REG129 0x0204 + +/* SerDes RxTx register entry bit positions and sizes */ +#define RXTX_REG6_RESETB_RXD_INDEX 8 +#define RXTX_REG6_RESETB_RXD_WIDTH 1 +#define RXTX_REG20_BLWC_ENA_INDEX 2 +#define RXTX_REG20_BLWC_ENA_WIDTH 1 +#define RXTX_REG114_PQ_REG_INDEX 9 +#define RXTX_REG114_PQ_REG_WIDTH 7 +#define RXTX_REG129_RXDFE_CONFIG_INDEX 14 +#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2 + +/* MAC Control register offsets */ +#define XP_PROP_0 0x0000 +#define XP_PROP_1 0x0004 +#define XP_PROP_2 0x0008 +#define XP_PROP_3 0x000c +#define XP_PROP_4 0x0010 +#define XP_PROP_5 0x0014 +#define XP_MAC_ADDR_LO 0x0020 +#define XP_MAC_ADDR_HI 0x0024 +#define XP_ECC_ISR 0x0030 +#define XP_ECC_IER 0x0034 +#define XP_ECC_CNT0 0x003c +#define XP_ECC_CNT1 0x0040 +#define XP_DRIVER_INT_REQ 0x0060 +#define XP_DRIVER_INT_RO 0x0064 +#define XP_DRIVER_SCRATCH_0 0x0068 +#define XP_DRIVER_SCRATCH_1 0x006c +#define XP_INT_EN 0x0078 +#define XP_I2C_MUTEX 0x0080 +#define XP_MDIO_MUTEX 0x0084 + +/* MAC Control register entry bit positions and sizes */ +#define XP_DRIVER_INT_REQ_REQUEST_INDEX 0 +#define XP_DRIVER_INT_REQ_REQUEST_WIDTH 1 +#define XP_DRIVER_INT_RO_STATUS_INDEX 0 +#define XP_DRIVER_INT_RO_STATUS_WIDTH 1 +#define XP_DRIVER_SCRATCH_0_COMMAND_INDEX 0 +#define XP_DRIVER_SCRATCH_0_COMMAND_WIDTH 8 +#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_INDEX 8 +#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_WIDTH 8 +#define XP_ECC_CNT0_RX_DED_INDEX 24 +#define XP_ECC_CNT0_RX_DED_WIDTH 8 +#define XP_ECC_CNT0_RX_SEC_INDEX 16 +#define XP_ECC_CNT0_RX_SEC_WIDTH 8 +#define XP_ECC_CNT0_TX_DED_INDEX 8 +#define XP_ECC_CNT0_TX_DED_WIDTH 8 +#define XP_ECC_CNT0_TX_SEC_INDEX 0 +#define XP_ECC_CNT0_TX_SEC_WIDTH 8 +#define XP_ECC_CNT1_DESC_DED_INDEX 8 +#define XP_ECC_CNT1_DESC_DED_WIDTH 8 +#define XP_ECC_CNT1_DESC_SEC_INDEX 0 +#define XP_ECC_CNT1_DESC_SEC_WIDTH 8 +#define XP_ECC_IER_DESC_DED_INDEX 0 +#define XP_ECC_IER_DESC_DED_WIDTH 1 +#define XP_ECC_IER_DESC_SEC_INDEX 1 +#define XP_ECC_IER_DESC_SEC_WIDTH 1 +#define XP_ECC_IER_RX_DED_INDEX 2 +#define XP_ECC_IER_RX_DED_WIDTH 1 +#define XP_ECC_IER_RX_SEC_INDEX 3 +#define XP_ECC_IER_RX_SEC_WIDTH 1 +#define XP_ECC_IER_TX_DED_INDEX 4 +#define XP_ECC_IER_TX_DED_WIDTH 1 +#define XP_ECC_IER_TX_SEC_INDEX 5 +#define XP_ECC_IER_TX_SEC_WIDTH 1 +#define XP_ECC_ISR_DESC_DED_INDEX 0 +#define XP_ECC_ISR_DESC_DED_WIDTH 1 +#define XP_ECC_ISR_DESC_SEC_INDEX 1 +#define XP_ECC_ISR_DESC_SEC_WIDTH 1 +#define XP_ECC_ISR_RX_DED_INDEX 2 +#define XP_ECC_ISR_RX_DED_WIDTH 1 +#define XP_ECC_ISR_RX_SEC_INDEX 3 +#define XP_ECC_ISR_RX_SEC_WIDTH 1 +#define XP_ECC_ISR_TX_DED_INDEX 4 +#define XP_ECC_ISR_TX_DED_WIDTH 1 +#define XP_ECC_ISR_TX_SEC_INDEX 5 +#define XP_ECC_ISR_TX_SEC_WIDTH 1 +#define XP_I2C_MUTEX_BUSY_INDEX 31 +#define XP_I2C_MUTEX_BUSY_WIDTH 1 +#define XP_I2C_MUTEX_ID_INDEX 29 +#define XP_I2C_MUTEX_ID_WIDTH 2 +#define XP_I2C_MUTEX_ACTIVE_INDEX 0 +#define XP_I2C_MUTEX_ACTIVE_WIDTH 1 +#define XP_MAC_ADDR_HI_VALID_INDEX 31 +#define XP_MAC_ADDR_HI_VALID_WIDTH 1 +#define XP_PROP_0_CONN_TYPE_INDEX 28 +#define XP_PROP_0_CONN_TYPE_WIDTH 3 +#define XP_PROP_0_MDIO_ADDR_INDEX 16 +#define XP_PROP_0_MDIO_ADDR_WIDTH 5 +#define XP_PROP_0_PORT_ID_INDEX 0 +#define XP_PROP_0_PORT_ID_WIDTH 8 +#define XP_PROP_0_PORT_MODE_INDEX 8 +#define XP_PROP_0_PORT_MODE_WIDTH 4 +#define XP_PROP_0_PORT_SPEEDS_INDEX 23 +#define XP_PROP_0_PORT_SPEEDS_WIDTH 4 +#define XP_PROP_1_MAX_RX_DMA_INDEX 24 +#define XP_PROP_1_MAX_RX_DMA_WIDTH 5 +#define XP_PROP_1_MAX_RX_QUEUES_INDEX 8 +#define XP_PROP_1_MAX_RX_QUEUES_WIDTH 5 +#define XP_PROP_1_MAX_TX_DMA_INDEX 16 +#define XP_PROP_1_MAX_TX_DMA_WIDTH 5 +#define XP_PROP_1_MAX_TX_QUEUES_INDEX 0 +#define XP_PROP_1_MAX_TX_QUEUES_WIDTH 5 +#define XP_PROP_2_RX_FIFO_SIZE_INDEX 16 +#define XP_PROP_2_RX_FIFO_SIZE_WIDTH 16 +#define XP_PROP_2_TX_FIFO_SIZE_INDEX 0 +#define XP_PROP_2_TX_FIFO_SIZE_WIDTH 16 +#define XP_PROP_3_GPIO_MASK_INDEX 28 +#define XP_PROP_3_GPIO_MASK_WIDTH 4 +#define XP_PROP_3_GPIO_MOD_ABS_INDEX 20 +#define XP_PROP_3_GPIO_MOD_ABS_WIDTH 4 +#define XP_PROP_3_GPIO_RATE_SELECT_INDEX 16 +#define XP_PROP_3_GPIO_RATE_SELECT_WIDTH 4 +#define XP_PROP_3_GPIO_RX_LOS_INDEX 24 +#define XP_PROP_3_GPIO_RX_LOS_WIDTH 4 +#define XP_PROP_3_GPIO_TX_FAULT_INDEX 12 +#define XP_PROP_3_GPIO_TX_FAULT_WIDTH 4 +#define XP_PROP_3_GPIO_ADDR_INDEX 8 +#define XP_PROP_3_GPIO_ADDR_WIDTH 3 +#define XP_PROP_3_MDIO_RESET_INDEX 0 +#define XP_PROP_3_MDIO_RESET_WIDTH 2 +#define XP_PROP_3_MDIO_RESET_I2C_ADDR_INDEX 8 +#define XP_PROP_3_MDIO_RESET_I2C_ADDR_WIDTH 3 +#define XP_PROP_3_MDIO_RESET_I2C_GPIO_INDEX 12 +#define XP_PROP_3_MDIO_RESET_I2C_GPIO_WIDTH 4 +#define XP_PROP_3_MDIO_RESET_INT_GPIO_INDEX 4 +#define XP_PROP_3_MDIO_RESET_INT_GPIO_WIDTH 2 +#define XP_PROP_4_MUX_ADDR_HI_INDEX 8 +#define XP_PROP_4_MUX_ADDR_HI_WIDTH 5 +#define XP_PROP_4_MUX_ADDR_LO_INDEX 0 +#define XP_PROP_4_MUX_ADDR_LO_WIDTH 3 +#define XP_PROP_4_MUX_CHAN_INDEX 4 +#define XP_PROP_4_MUX_CHAN_WIDTH 3 +#define XP_PROP_4_REDRV_ADDR_INDEX 16 +#define XP_PROP_4_REDRV_ADDR_WIDTH 7 +#define XP_PROP_4_REDRV_IF_INDEX 23 +#define XP_PROP_4_REDRV_IF_WIDTH 1 +#define XP_PROP_4_REDRV_LANE_INDEX 24 +#define XP_PROP_4_REDRV_LANE_WIDTH 3 +#define XP_PROP_4_REDRV_MODEL_INDEX 28 +#define XP_PROP_4_REDRV_MODEL_WIDTH 3 +#define XP_PROP_4_REDRV_PRESENT_INDEX 31 +#define XP_PROP_4_REDRV_PRESENT_WIDTH 1 + +/* I2C Control register offsets */ +#define IC_CON 0x0000 +#define IC_TAR 0x0004 +#define IC_DATA_CMD 0x0010 +#define IC_INTR_STAT 0x002c +#define IC_INTR_MASK 0x0030 +#define IC_RAW_INTR_STAT 0x0034 +#define IC_CLR_INTR 0x0040 +#define IC_CLR_TX_ABRT 0x0054 +#define IC_CLR_STOP_DET 0x0060 +#define IC_ENABLE 0x006c +#define IC_TXFLR 0x0074 +#define IC_RXFLR 0x0078 +#define IC_TX_ABRT_SOURCE 0x0080 +#define IC_ENABLE_STATUS 0x009c +#define IC_COMP_PARAM_1 0x00f4 + +/* I2C Control register entry bit positions and sizes */ +#define IC_COMP_PARAM_1_MAX_SPEED_MODE_INDEX 2 +#define IC_COMP_PARAM_1_MAX_SPEED_MODE_WIDTH 2 +#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_INDEX 8 +#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_WIDTH 8 +#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_INDEX 16 +#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_WIDTH 8 +#define IC_CON_MASTER_MODE_INDEX 0 +#define IC_CON_MASTER_MODE_WIDTH 1 +#define IC_CON_RESTART_EN_INDEX 5 +#define IC_CON_RESTART_EN_WIDTH 1 +#define IC_CON_RX_FIFO_FULL_HOLD_INDEX 9 +#define IC_CON_RX_FIFO_FULL_HOLD_WIDTH 1 +#define IC_CON_SLAVE_DISABLE_INDEX 6 +#define IC_CON_SLAVE_DISABLE_WIDTH 1 +#define IC_CON_SPEED_INDEX 1 +#define IC_CON_SPEED_WIDTH 2 +#define IC_DATA_CMD_CMD_INDEX 8 +#define IC_DATA_CMD_CMD_WIDTH 1 +#define IC_DATA_CMD_STOP_INDEX 9 +#define IC_DATA_CMD_STOP_WIDTH 1 +#define IC_ENABLE_ABORT_INDEX 1 +#define IC_ENABLE_ABORT_WIDTH 1 +#define IC_ENABLE_EN_INDEX 0 +#define IC_ENABLE_EN_WIDTH 1 +#define IC_ENABLE_STATUS_EN_INDEX 0 +#define IC_ENABLE_STATUS_EN_WIDTH 1 +#define IC_INTR_MASK_TX_EMPTY_INDEX 4 +#define IC_INTR_MASK_TX_EMPTY_WIDTH 1 +#define IC_RAW_INTR_STAT_RX_FULL_INDEX 2 +#define IC_RAW_INTR_STAT_RX_FULL_WIDTH 1 +#define IC_RAW_INTR_STAT_STOP_DET_INDEX 9 +#define IC_RAW_INTR_STAT_STOP_DET_WIDTH 1 +#define IC_RAW_INTR_STAT_TX_ABRT_INDEX 6 +#define IC_RAW_INTR_STAT_TX_ABRT_WIDTH 1 +#define IC_RAW_INTR_STAT_TX_EMPTY_INDEX 4 +#define IC_RAW_INTR_STAT_TX_EMPTY_WIDTH 1 + +/* I2C Control register value */ +#define IC_TX_ABRT_7B_ADDR_NOACK 0x0001 +#define IC_TX_ABRT_ARB_LOST 0x1000 + +/* Descriptor/Packet entry bit positions and sizes */ +#define RX_PACKET_ERRORS_CRC_INDEX 2 +#define RX_PACKET_ERRORS_CRC_WIDTH 1 +#define RX_PACKET_ERRORS_FRAME_INDEX 3 +#define RX_PACKET_ERRORS_FRAME_WIDTH 1 +#define RX_PACKET_ERRORS_LENGTH_INDEX 0 +#define RX_PACKET_ERRORS_LENGTH_WIDTH 1 +#define RX_PACKET_ERRORS_OVERRUN_INDEX 1 +#define RX_PACKET_ERRORS_OVERRUN_WIDTH 1 + +#define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0 +#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1 +#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2 +#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3 +#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4 +#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5 +#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6 +#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 + +#define RX_NORMAL_DESC0_OVT_INDEX 0 +#define RX_NORMAL_DESC0_OVT_WIDTH 16 +#define RX_NORMAL_DESC2_HL_INDEX 0 +#define RX_NORMAL_DESC2_HL_WIDTH 10 +#define RX_NORMAL_DESC3_CDA_INDEX 27 +#define RX_NORMAL_DESC3_CDA_WIDTH 1 +#define RX_NORMAL_DESC3_CTXT_INDEX 30 +#define RX_NORMAL_DESC3_CTXT_WIDTH 1 +#define RX_NORMAL_DESC3_ES_INDEX 15 +#define RX_NORMAL_DESC3_ES_WIDTH 1 +#define RX_NORMAL_DESC3_ETLT_INDEX 16 +#define RX_NORMAL_DESC3_ETLT_WIDTH 4 +#define RX_NORMAL_DESC3_FD_INDEX 29 +#define RX_NORMAL_DESC3_FD_WIDTH 1 +#define RX_NORMAL_DESC3_INTE_INDEX 30 +#define RX_NORMAL_DESC3_INTE_WIDTH 1 +#define RX_NORMAL_DESC3_L34T_INDEX 20 +#define RX_NORMAL_DESC3_L34T_WIDTH 4 +#define RX_NORMAL_DESC3_LD_INDEX 28 +#define RX_NORMAL_DESC3_LD_WIDTH 1 +#define RX_NORMAL_DESC3_OWN_INDEX 31 +#define RX_NORMAL_DESC3_OWN_WIDTH 1 +#define RX_NORMAL_DESC3_PL_INDEX 0 +#define RX_NORMAL_DESC3_PL_WIDTH 14 +#define RX_NORMAL_DESC3_RSV_INDEX 26 +#define RX_NORMAL_DESC3_RSV_WIDTH 1 + +#define RX_DESC3_L34T_IPV4_TCP 1 +#define RX_DESC3_L34T_IPV4_UDP 2 +#define RX_DESC3_L34T_IPV4_ICMP 3 +#define RX_DESC3_L34T_IPV6_TCP 9 +#define RX_DESC3_L34T_IPV6_UDP 10 +#define RX_DESC3_L34T_IPV6_ICMP 11 + +#define RX_CONTEXT_DESC3_TSA_INDEX 4 +#define RX_CONTEXT_DESC3_TSA_WIDTH 1 +#define RX_CONTEXT_DESC3_TSD_INDEX 6 +#define RX_CONTEXT_DESC3_TSD_WIDTH 1 + +#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0 +#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1 +#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1 +#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1 +#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2 +#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 +#define TX_PACKET_ATTRIBUTES_PTP_INDEX 3 +#define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1 + +#define TX_CONTEXT_DESC2_MSS_INDEX 0 +#define TX_CONTEXT_DESC2_MSS_WIDTH 15 +#define TX_CONTEXT_DESC3_CTXT_INDEX 30 +#define TX_CONTEXT_DESC3_CTXT_WIDTH 1 +#define TX_CONTEXT_DESC3_TCMSSV_INDEX 26 +#define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1 +#define TX_CONTEXT_DESC3_VLTV_INDEX 16 +#define TX_CONTEXT_DESC3_VLTV_WIDTH 1 +#define TX_CONTEXT_DESC3_VT_INDEX 0 +#define TX_CONTEXT_DESC3_VT_WIDTH 16 + +#define TX_NORMAL_DESC2_HL_B1L_INDEX 0 +#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14 +#define TX_NORMAL_DESC2_IC_INDEX 31 +#define TX_NORMAL_DESC2_IC_WIDTH 1 +#define TX_NORMAL_DESC2_TTSE_INDEX 30 +#define TX_NORMAL_DESC2_TTSE_WIDTH 1 +#define TX_NORMAL_DESC2_VTIR_INDEX 14 +#define TX_NORMAL_DESC2_VTIR_WIDTH 2 +#define TX_NORMAL_DESC3_CIC_INDEX 16 +#define TX_NORMAL_DESC3_CIC_WIDTH 2 +#define TX_NORMAL_DESC3_CPC_INDEX 26 +#define TX_NORMAL_DESC3_CPC_WIDTH 2 +#define TX_NORMAL_DESC3_CTXT_INDEX 30 +#define TX_NORMAL_DESC3_CTXT_WIDTH 1 +#define TX_NORMAL_DESC3_FD_INDEX 29 +#define TX_NORMAL_DESC3_FD_WIDTH 1 +#define TX_NORMAL_DESC3_FL_INDEX 0 +#define TX_NORMAL_DESC3_FL_WIDTH 15 +#define TX_NORMAL_DESC3_LD_INDEX 28 +#define TX_NORMAL_DESC3_LD_WIDTH 1 +#define TX_NORMAL_DESC3_OWN_INDEX 31 +#define TX_NORMAL_DESC3_OWN_WIDTH 1 +#define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19 +#define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4 +#define TX_NORMAL_DESC3_TCPPL_INDEX 0 +#define TX_NORMAL_DESC3_TCPPL_WIDTH 18 +#define TX_NORMAL_DESC3_TSE_INDEX 18 +#define TX_NORMAL_DESC3_TSE_WIDTH 1 + +#define TX_NORMAL_DESC2_VLAN_INSERT 0x2 + +/* MDIO undefined or vendor specific registers */ +#ifndef MDIO_PMA_10GBR_PMD_CTRL +#define MDIO_PMA_10GBR_PMD_CTRL 0x0096 +#endif + +#ifndef MDIO_PMA_10GBR_FECCTRL +#define MDIO_PMA_10GBR_FECCTRL 0x00ab +#endif + +#ifndef MDIO_PCS_DIG_CTRL +#define MDIO_PCS_DIG_CTRL 0x8000 +#endif + +#ifndef MDIO_AN_XNP +#define MDIO_AN_XNP 0x0016 +#endif + +#ifndef MDIO_AN_LPX +#define MDIO_AN_LPX 0x0019 +#endif + +#ifndef MDIO_AN_COMP_STAT +#define MDIO_AN_COMP_STAT 0x0030 +#endif + +#ifndef MDIO_AN_INTMASK +#define MDIO_AN_INTMASK 0x8001 +#endif + +#ifndef MDIO_AN_INT +#define MDIO_AN_INT 0x8002 +#endif + +#ifndef MDIO_VEND2_AN_ADVERTISE +#define MDIO_VEND2_AN_ADVERTISE 0x0004 +#endif + +#ifndef MDIO_VEND2_AN_LP_ABILITY +#define MDIO_VEND2_AN_LP_ABILITY 0x0005 +#endif + +#ifndef MDIO_VEND2_AN_CTRL +#define MDIO_VEND2_AN_CTRL 0x8001 +#endif + +#ifndef MDIO_VEND2_AN_STAT +#define MDIO_VEND2_AN_STAT 0x8002 +#endif + +#ifndef MDIO_VEND2_PMA_CDR_CONTROL +#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056 +#endif + +#ifndef MDIO_CTRL1_SPEED1G +#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) +#endif + +#ifndef MDIO_VEND2_CTRL1_AN_ENABLE +#define MDIO_VEND2_CTRL1_AN_ENABLE BIT(12) +#endif + +#ifndef MDIO_VEND2_CTRL1_AN_RESTART +#define MDIO_VEND2_CTRL1_AN_RESTART BIT(9) +#endif + +#ifndef MDIO_VEND2_CTRL1_SS6 +#define MDIO_VEND2_CTRL1_SS6 BIT(6) +#endif + +#ifndef MDIO_VEND2_CTRL1_SS13 +#define MDIO_VEND2_CTRL1_SS13 BIT(13) +#endif + +/* MDIO mask values */ +#define AXGBE_AN_CL73_INT_CMPLT BIT(0) +#define AXGBE_AN_CL73_INC_LINK BIT(1) +#define AXGBE_AN_CL73_PG_RCV BIT(2) +#define AXGBE_AN_CL73_INT_MASK 0x07 + +#define AXGBE_XNP_MCF_NULL_MESSAGE 0x001 +#define AXGBE_XNP_ACK_PROCESSED BIT(12) +#define AXGBE_XNP_MP_FORMATTED BIT(13) +#define AXGBE_XNP_NP_EXCHANGE BIT(15) + +#define AXGBE_KR_TRAINING_START BIT(0) +#define AXGBE_KR_TRAINING_ENABLE BIT(1) + +#define AXGBE_PCS_CL37_BP BIT(12) + +#define AXGBE_AN_CL37_INT_CMPLT BIT(0) +#define AXGBE_AN_CL37_INT_MASK 0x01 + +#define AXGBE_AN_CL37_HD_MASK 0x40 +#define AXGBE_AN_CL37_FD_MASK 0x20 + +#define AXGBE_AN_CL37_PCS_MODE_MASK 0x06 +#define AXGBE_AN_CL37_PCS_MODE_BASEX 0x00 +#define AXGBE_AN_CL37_PCS_MODE_SGMII 0x04 +#define AXGBE_AN_CL37_TX_CONFIG_MASK 0x08 + +#define AXGBE_PMA_CDR_TRACK_EN_MASK 0x01 +#define AXGBE_PMA_CDR_TRACK_EN_OFF 0x00 +#define AXGBE_PMA_CDR_TRACK_EN_ON 0x01 + +/*generic*/ +#define __iomem + +#define rmb() rte_rmb() /* dpdk rte provided rmb */ +#define wmb() rte_wmb() /* dpdk rte provided wmb */ + +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 + +typedef unsigned char u8; +typedef unsigned short u16; +typedef unsigned int u32; +typedef unsigned long long u64; +typedef unsigned long long dma_addr_t; + +static inline uint32_t low32_value(uint64_t addr) +{ + return (addr) & 0x0ffffffff; +} + +static inline uint32_t high32_value(uint64_t addr) +{ + return (addr >> 32) & 0x0ffffffff; +} + +/*END*/ + +/* Bit setting and getting macros + * The get macro will extract the current bit field value from within + * the variable + * + * The set macro will clear the current bit field value within the + * variable and then set the bit field of the variable to the + * specified value + */ +#define GET_BITS(_var, _index, _width) \ + (((_var) >> (_index)) & ((0x1 << (_width)) - 1)) + +#define SET_BITS(_var, _index, _width, _val) \ +do { \ + (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \ + (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \ +} while (0) + +#define GET_BITS_LE(_var, _index, _width) \ + ((rte_le_to_cpu_32((_var)) >> (_index)) & ((0x1 << (_width)) - 1)) + +#define SET_BITS_LE(_var, _index, _width, _val) \ +do { \ + (_var) &= rte_cpu_to_le_32(~(((0x1 << (_width)) - 1) << (_index)));\ + (_var) |= rte_cpu_to_le_32((((_val) & \ + ((0x1 << (_width)) - 1)) << (_index))); \ +} while (0) + +/* Bit setting and getting macros based on register fields + * The get macro uses the bit field definitions formed using the input + * names to extract the current bit field value from within the + * variable + * + * The set macro uses the bit field definitions formed using the input + * names to set the bit field of the variable to the specified value + */ +#define AXGMAC_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define AXGMAC_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define AXGMAC_GET_BITS_LE(_var, _prefix, _field) \ + GET_BITS_LE((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define AXGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \ + SET_BITS_LE((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +/* Macros for reading or writing registers + * The ioread macros will get bit fields or full values using the + * register definitions formed using the input names + * + * The iowrite macros will set bit fields or full values using the + * register definitions formed using the input names + */ +#define AXGMAC_IOREAD(_pdata, _reg) \ + rte_read32((uint8_t *)((_pdata)->xgmac_regs) + (_reg)) + +#define AXGMAC_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(AXGMAC_IOREAD((_pdata), _reg), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define AXGMAC_IOWRITE(_pdata, _reg, _val) \ + rte_write32((_val), \ + (uint8_t *)((_pdata)->xgmac_regs) + (_reg)) + +#define AXGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u32 reg_val = AXGMAC_IOREAD((_pdata), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + AXGMAC_IOWRITE((_pdata), _reg, reg_val); \ +} while (0) + +/* Macros for reading or writing MTL queue or traffic class registers + * Similar to the standard read and write macros except that the + * base register value is calculated by the queue or traffic class number + */ +#define AXGMAC_MTL_IOREAD(_pdata, _n, _reg) \ + rte_read32((uint8_t *)((_pdata)->xgmac_regs) + \ + MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg)) + +#define AXGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \ + GET_BITS(AXGMAC_MTL_IOREAD((_pdata), (_n), (_reg)), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define AXGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \ + rte_write32((_val), (uint8_t *)((_pdata)->xgmac_regs) +\ + MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg)) + +#define AXGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \ +do { \ + u32 reg_val = AXGMAC_MTL_IOREAD((_pdata), (_n), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + AXGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \ +} while (0) + +/* Macros for reading or writing DMA channel registers + * Similar to the standard read and write macros except that the + * base register value is obtained from the ring + */ +#define AXGMAC_DMA_IOREAD(_channel, _reg) \ + rte_read32((uint8_t *)((_channel)->dma_regs) + (_reg)) + +#define AXGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \ + GET_BITS(AXGMAC_DMA_IOREAD((_channel), _reg), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define AXGMAC_DMA_IOWRITE(_channel, _reg, _val) \ + rte_write32((_val), \ + (uint8_t *)((_channel)->dma_regs) + (_reg)) + +#define AXGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \ +do { \ + u32 reg_val = AXGMAC_DMA_IOREAD((_channel), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + AXGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \ +} while (0) + +/* Macros for building, reading or writing register values or bits + * within the register values of XPCS registers. + */ +#define XPCS_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define XPCS_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define XPCS32_IOWRITE(_pdata, _off, _val) \ + rte_write32(_val, \ + (uint8_t *)((_pdata)->xpcs_regs) + (_off)) + +#define XPCS32_IOREAD(_pdata, _off) \ + rte_read32((uint8_t *)((_pdata)->xpcs_regs) + (_off)) + +#define XPCS16_IOWRITE(_pdata, _off, _val) \ + rte_write16(_val, \ + (uint8_t *)((_pdata)->xpcs_regs) + (_off)) + +#define XPCS16_IOREAD(_pdata, _off) \ + rte_read16((uint8_t *)((_pdata)->xpcs_regs) + (_off)) + +/* Macros for building, reading or writing register values or bits + * within the register values of SerDes integration registers. + */ +#define XSIR_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define XSIR_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define XSIR0_IOREAD(_pdata, _reg) \ + rte_read16((uint8_t *)((_pdata)->sir0_regs) + (_reg)) + +#define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(XSIR0_IOREAD((_pdata), _reg), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XSIR0_IOWRITE(_pdata, _reg, _val) \ + rte_write16((_val), \ + (uint8_t *)((_pdata)->sir0_regs) + (_reg)) + +#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u16 reg_val = XSIR0_IOREAD((_pdata), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XSIR0_IOWRITE((_pdata), _reg, reg_val); \ +} while (0) + +#define XSIR1_IOREAD(_pdata, _reg) \ + rte_read16((uint8_t *)((_pdata)->sir1_regs) + _reg) + +#define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(XSIR1_IOREAD((_pdata), _reg), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XSIR1_IOWRITE(_pdata, _reg, _val) \ + rte_write16((_val), \ + (uint8_t *)((_pdata)->sir1_regs) + (_reg)) + +#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u16 reg_val = XSIR1_IOREAD((_pdata), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XSIR1_IOWRITE((_pdata), _reg, reg_val); \ +} while (0) + +/* Macros for building, reading or writing register values or bits + * within the register values of SerDes RxTx registers. + */ +#define XRXTX_IOREAD(_pdata, _reg) \ + rte_read16((uint8_t *)((_pdata)->rxtx_regs) + (_reg)) + +#define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(XRXTX_IOREAD((_pdata), _reg), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XRXTX_IOWRITE(_pdata, _reg, _val) \ + rte_write16((_val), \ + (uint8_t *)((_pdata)->rxtx_regs) + (_reg)) + +#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u16 reg_val = XRXTX_IOREAD((_pdata), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XRXTX_IOWRITE((_pdata), _reg, reg_val); \ +} while (0) + +/* Macros for building, reading or writing register values or bits + * within the register values of MAC Control registers. + */ +#define XP_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define XP_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define XP_IOREAD(_pdata, _reg) \ + rte_read32((uint8_t *)((_pdata)->xprop_regs) + (_reg)) + +#define XP_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(XP_IOREAD((_pdata), (_reg)), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XP_IOWRITE(_pdata, _reg, _val) \ + rte_write32((_val), \ + (uint8_t *)((_pdata)->xprop_regs) + (_reg)) + +#define XP_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u32 reg_val = XP_IOREAD((_pdata), (_reg)); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XP_IOWRITE((_pdata), (_reg), reg_val); \ +} while (0) + +/* Macros for building, reading or writing register values or bits + * within the register values of I2C Control registers. + */ +#define XI2C_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define XI2C_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define XI2C_IOREAD(_pdata, _reg) \ + rte_read32((uint8_t *)((_pdata)->xi2c_regs) + (_reg)) + +#define XI2C_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(XI2C_IOREAD((_pdata), (_reg)), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XI2C_IOWRITE(_pdata, _reg, _val) \ + rte_write32((_val), \ + (uint8_t *)((_pdata)->xi2c_regs) + (_reg)) + +#define XI2C_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u32 reg_val = XI2C_IOREAD((_pdata), (_reg)); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XI2C_IOWRITE((_pdata), (_reg), reg_val); \ +} while (0) + +/* Macros for building, reading or writing register values or bits + * using MDIO. Different from above because of the use of standardized + * Linux include values. No shifting is performed with the bit + * operations, everything works on mask values. + */ +#define XMDIO_READ(_pdata, _mmd, _reg) \ + ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \ + MII_ADDR_C45 | ((_mmd) << 16) | ((_reg) & 0xffff))) + +#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \ + (XMDIO_READ((_pdata), _mmd, _reg) & _mask) + +#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \ + ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \ + MII_ADDR_C45 | ((_mmd) << 16) | ((_reg) & 0xffff), (_val))) + +#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \ +do { \ + u32 mmd_val = XMDIO_READ((_pdata), (_mmd), (_reg)); \ + mmd_val &= ~(_mask); \ + mmd_val |= (_val); \ + XMDIO_WRITE((_pdata), (_mmd), (_reg), (mmd_val)); \ +} while (0) + +/* + * time_after(a,b) returns true if the time a is after time b. + * + * Do this with "<0" and ">=0" to only test the sign of the result. A + * good compiler would generate better code (and a really good compiler + * wouldn't care). Gcc is currently neither. + */ +#define time_after(a, b) ((long)((b) - (a)) < 0) +#define time_before(a, b) time_after(b, a) + +#define time_after_eq(a, b) ((long)((a) - (b)) >= 0) +#define time_before_eq(a, b) time_after_eq(b, a) + +/*---bitmap support apis---*/ +static inline int axgbe_test_bit(int nr, volatile unsigned long *addr) +{ + int res; + + rte_mb(); + res = ((*addr) & (1UL << nr)) != 0; + rte_mb(); + return res; +} + +static inline void axgbe_set_bit(unsigned int nr, volatile unsigned long *addr) +{ + __sync_fetch_and_or(addr, (1UL << nr)); +} + +static inline void axgbe_clear_bit(int nr, volatile unsigned long *addr) +{ + __sync_fetch_and_and(addr, ~(1UL << nr)); +} + +static inline int axgbe_test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = (1UL << nr); + + return __sync_fetch_and_and(addr, ~mask) & mask; +} + +static inline unsigned long msecs_to_timer_cycles(unsigned int m) +{ + return rte_get_timer_hz() * (m / 1000); +} + +#endif /* __AXGBE_COMMON_H__ */ diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c new file mode 100644 index 00000000..707f1ee9 --- /dev/null +++ b/drivers/net/axgbe/axgbe_dev.c @@ -0,0 +1,1103 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_ethdev.h" +#include "axgbe_common.h" +#include "axgbe_phy.h" +#include "axgbe_rxtx.h" + +static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata) +{ + return pdata->eth_dev->data->mtu + ETHER_HDR_LEN + + ETHER_CRC_LEN + VLAN_HLEN; +} + +/* query busy bit */ +static int mdio_complete(struct axgbe_port *pdata) +{ + if (!AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, BUSY)) + return 1; + + return 0; +} + +static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr, + int reg, u16 val) +{ + unsigned int mdio_sca, mdio_sccd; + uint64_t timeout; + + mdio_sca = 0; + AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); + AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); + AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); + + mdio_sccd = 0; + AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val); + AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1); + AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); + AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); + + timeout = rte_get_timer_cycles() + rte_get_timer_hz(); + while (time_before(rte_get_timer_cycles(), timeout)) { + rte_delay_us(100); + if (mdio_complete(pdata)) + return 0; + } + + PMD_DRV_LOG(ERR, "Mdio write operation timed out\n"); + return -ETIMEDOUT; +} + +static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr, + int reg) +{ + unsigned int mdio_sca, mdio_sccd; + uint64_t timeout; + + mdio_sca = 0; + AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); + AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); + AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); + + mdio_sccd = 0; + AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3); + AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); + AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); + + timeout = rte_get_timer_cycles() + rte_get_timer_hz(); + + while (time_before(rte_get_timer_cycles(), timeout)) { + rte_delay_us(100); + if (mdio_complete(pdata)) + goto success; + } + + PMD_DRV_LOG(ERR, "Mdio read operation timed out\n"); + return -ETIMEDOUT; + +success: + return AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA); +} + +static int axgbe_set_ext_mii_mode(struct axgbe_port *pdata, unsigned int port, + enum axgbe_mdio_mode mode) +{ + unsigned int reg_val = 0; + + switch (mode) { + case AXGBE_MDIO_MODE_CL22: + if (port > AXGMAC_MAX_C22_PORT) + return -EINVAL; + reg_val |= (1 << port); + break; + case AXGBE_MDIO_MODE_CL45: + break; + default: + return -EINVAL; + } + AXGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val); + + return 0; +} + +static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata, + int prtad __rte_unused, int mmd_reg) +{ + unsigned int mmd_address, index, offset; + int mmd_data; + + if (mmd_reg & MII_ADDR_C45) + mmd_address = mmd_reg & ~MII_ADDR_C45; + else + mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + + /* The PCS registers are accessed using mmio. The underlying + * management interface uses indirect addressing to access the MMD + * register sets. This requires accessing of the PCS register in two + * phases, an address phase and a data phase. + * + * The mmio interface is based on 16-bit offsets and values. All + * register offsets must therefore be adjusted by left shifting the + * offset 1 bit and reading 16 bits of data. + */ + mmd_address <<= 1; + index = mmd_address & ~pdata->xpcs_window_mask; + offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); + + pthread_mutex_lock(&pdata->xpcs_mutex); + + XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); + mmd_data = XPCS16_IOREAD(pdata, offset); + + pthread_mutex_unlock(&pdata->xpcs_mutex); + + return mmd_data; +} + +static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata, + int prtad __rte_unused, + int mmd_reg, int mmd_data) +{ + unsigned int mmd_address, index, offset; + + if (mmd_reg & MII_ADDR_C45) + mmd_address = mmd_reg & ~MII_ADDR_C45; + else + mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + + /* The PCS registers are accessed using mmio. The underlying + * management interface uses indirect addressing to access the MMD + * register sets. This requires accessing of the PCS register in two + * phases, an address phase and a data phase. + * + * The mmio interface is based on 16-bit offsets and values. All + * register offsets must therefore be adjusted by left shifting the + * offset 1 bit and writing 16 bits of data. + */ + mmd_address <<= 1; + index = mmd_address & ~pdata->xpcs_window_mask; + offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); + + pthread_mutex_lock(&pdata->xpcs_mutex); + + XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); + XPCS16_IOWRITE(pdata, offset, mmd_data); + + pthread_mutex_unlock(&pdata->xpcs_mutex); +} + +static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad, + int mmd_reg) +{ + switch (pdata->vdata->xpcs_access) { + case AXGBE_XPCS_ACCESS_V1: + PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n"); + return -1; + case AXGBE_XPCS_ACCESS_V2: + default: + return axgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg); + } +} + +static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad, + int mmd_reg, int mmd_data) +{ + switch (pdata->vdata->xpcs_access) { + case AXGBE_XPCS_ACCESS_V1: + PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n"); + return; + case AXGBE_XPCS_ACCESS_V2: + default: + return axgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data); + } +} + +static int axgbe_set_speed(struct axgbe_port *pdata, int speed) +{ + unsigned int ss; + + switch (speed) { + case SPEED_1000: + ss = 0x03; + break; + case SPEED_2500: + ss = 0x02; + break; + case SPEED_10000: + ss = 0x00; + break; + default: + return -EINVAL; + } + + if (AXGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss) + AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss); + + return 0; +} + +static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata) +{ + unsigned int max_q_count, q_count; + unsigned int reg, reg_val; + unsigned int i; + + /* Clear MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); + + /* Clear MAC flow control */ + max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; + q_count = RTE_MIN(pdata->tx_q_count, + max_q_count); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + reg_val = AXGMAC_IOREAD(pdata, reg); + AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0); + AXGMAC_IOWRITE(pdata, reg, reg_val); + + reg += MAC_QTFCR_INC; + } + + return 0; +} + +static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata) +{ + unsigned int max_q_count, q_count; + unsigned int reg, reg_val; + unsigned int i; + + /* Set MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) { + unsigned int ehfc = 0; + + /* Flow control thresholds are established */ + if (pdata->rx_rfd[i]) + ehfc = 1; + + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc); + } + + /* Set MAC flow control */ + max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; + q_count = RTE_MIN(pdata->tx_q_count, + max_q_count); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + reg_val = AXGMAC_IOREAD(pdata, reg); + + /* Enable transmit flow control */ + AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); + /* Set pause time */ + AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff); + + AXGMAC_IOWRITE(pdata, reg, reg_val); + + reg += MAC_QTFCR_INC; + } + + return 0; +} + +static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata) +{ + AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0); + + return 0; +} + +static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata) +{ + AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1); + + return 0; +} + +static int axgbe_config_tx_flow_control(struct axgbe_port *pdata) +{ + if (pdata->tx_pause) + axgbe_enable_tx_flow_control(pdata); + else + axgbe_disable_tx_flow_control(pdata); + + return 0; +} + +static int axgbe_config_rx_flow_control(struct axgbe_port *pdata) +{ + if (pdata->rx_pause) + axgbe_enable_rx_flow_control(pdata); + else + axgbe_disable_rx_flow_control(pdata); + + return 0; +} + +static void axgbe_config_flow_control(struct axgbe_port *pdata) +{ + axgbe_config_tx_flow_control(pdata); + axgbe_config_rx_flow_control(pdata); + + AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); +} + +static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata, + unsigned int queue, + unsigned int q_fifo_size) +{ + unsigned int frame_fifo_size; + unsigned int rfa, rfd; + + frame_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata)); + + /* This path deals with just maximum frame sizes which are + * limited to a jumbo frame of 9,000 (plus headers, etc.) + * so we can never exceed the maximum allowable RFA/RFD + * values. + */ + if (q_fifo_size <= 2048) { + /* rx_rfd to zero to signal no flow control */ + pdata->rx_rfa[queue] = 0; + pdata->rx_rfd[queue] = 0; + return; + } + + if (q_fifo_size <= 4096) { + /* Between 2048 and 4096 */ + pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */ + pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */ + return; + } + + if (q_fifo_size <= frame_fifo_size) { + /* Between 4096 and max-frame */ + pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */ + pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */ + return; + } + + if (q_fifo_size <= (frame_fifo_size * 3)) { + /* Between max-frame and 3 max-frames, + * trigger if we get just over a frame of data and + * resume when we have just under half a frame left. + */ + rfa = q_fifo_size - frame_fifo_size; + rfd = rfa + (frame_fifo_size / 2); + } else { + /* Above 3 max-frames - trigger when just over + * 2 frames of space available + */ + rfa = frame_fifo_size * 2; + rfa += AXGMAC_FLOW_CONTROL_UNIT; + rfd = rfa + frame_fifo_size; + } + + pdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa); + pdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd); +} + +static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata) +{ + unsigned int q_fifo_size; + unsigned int i; + + for (i = 0; i < pdata->rx_q_count; i++) { + q_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT; + + axgbe_queue_flow_control_threshold(pdata, i, q_fifo_size); + } +} + +static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata) +{ + unsigned int i; + + for (i = 0; i < pdata->rx_q_count; i++) { + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, + pdata->rx_rfa[i]); + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, + pdata->rx_rfd[i]); + } +} + +static int __axgbe_exit(struct axgbe_port *pdata) +{ + unsigned int count = 2000; + + /* Issue a software reset */ + AXGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1); + rte_delay_us(10); + + /* Poll Until Poll Condition */ + while (--count && AXGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) + rte_delay_us(500); + + if (!count) + return -EBUSY; + + return 0; +} + +static int axgbe_exit(struct axgbe_port *pdata) +{ + int ret; + + /* To guard against possible incorrectly generated interrupts, + * issue the software reset twice. + */ + ret = __axgbe_exit(pdata); + if (ret) + return ret; + + return __axgbe_exit(pdata); +} + +static int axgbe_flush_tx_queues(struct axgbe_port *pdata) +{ + unsigned int i, count; + + if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) + return 0; + + for (i = 0; i < pdata->tx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); + + /* Poll Until Poll Condition */ + for (i = 0; i < pdata->tx_q_count; i++) { + count = 2000; + while (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i, + MTL_Q_TQOMR, FTQ)) + rte_delay_us(500); + + if (!count) + return -EBUSY; + } + + return 0; +} + +static void axgbe_config_dma_bus(struct axgbe_port *pdata) +{ + /* Set enhanced addressing mode */ + AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1); + + /* Out standing read/write requests*/ + AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f); + AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f); + + /* Set the System Bus mode */ + AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1); + AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1); + AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1); +} + +static void axgbe_config_dma_cache(struct axgbe_port *pdata) +{ + unsigned int arcache, awcache, arwcache; + + arcache = 0; + AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3); + AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache); + + awcache = 0; + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3); + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3); + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1); + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3); + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1); + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3); + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1); + AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache); + + arwcache = 0; + AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1); + AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3); + AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3); + AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache); +} + +static void axgbe_config_edma_control(struct axgbe_port *pdata) +{ + AXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5); + AXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5); +} + +static int axgbe_config_osp_mode(struct axgbe_port *pdata) +{ + /* Force DMA to operate on second packet before closing descriptors + * of first packet + */ + struct axgbe_tx_queue *txq; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { + txq = pdata->eth_dev->data->tx_queues[i]; + AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP, + pdata->tx_osp_mode); + } + + return 0; +} + +static int axgbe_config_pblx8(struct axgbe_port *pdata) +{ + struct axgbe_tx_queue *txq; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { + txq = pdata->eth_dev->data->tx_queues[i]; + AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8, + pdata->pblx8); + } + return 0; +} + +static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata) +{ + struct axgbe_tx_queue *txq; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { + txq = pdata->eth_dev->data->tx_queues[i]; + AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL, + pdata->tx_pbl); + } + + return 0; +} + +static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata) +{ + struct axgbe_rx_queue *rxq; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) { + rxq = pdata->eth_dev->data->rx_queues[i]; + AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL, + pdata->rx_pbl); + } + + return 0; +} + +static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata) +{ + struct axgbe_rx_queue *rxq; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) { + rxq = pdata->eth_dev->data->rx_queues[i]; + + rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM; + rxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) & + ~(AXGBE_RX_BUF_ALIGN - 1); + + if (rxq->buf_size > pdata->rx_buf_size) + pdata->rx_buf_size = rxq->buf_size; + + AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ, + rxq->buf_size); + } +} + +static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type, + unsigned int index, unsigned int val) +{ + unsigned int wait; + + if (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) + return -EBUSY; + + AXGMAC_IOWRITE(pdata, MAC_RSSDR, val); + + AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index); + AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type); + AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0); + AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1); + + wait = 1000; + while (wait--) { + if (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) + return 0; + + rte_delay_us(1500); + } + + return -EBUSY; +} + +static int axgbe_write_rss_hash_key(struct axgbe_port *pdata) +{ + struct rte_eth_rss_conf *rss_conf; + unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); + unsigned int *key; + int ret; + + rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf; + + if (!rss_conf->rss_key) + key = (unsigned int *)&pdata->rss_key; + else + key = (unsigned int *)&rss_conf->rss_key; + + while (key_regs--) { + ret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE, + key_regs, *key++); + if (ret) + return ret; + } + + return 0; +} + +static int axgbe_write_rss_lookup_table(struct axgbe_port *pdata) +{ + unsigned int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { + ret = axgbe_write_rss_reg(pdata, + AXGBE_RSS_LOOKUP_TABLE_TYPE, i, + pdata->rss_table[i]); + if (ret) + return ret; + } + + return 0; +} + +static int axgbe_enable_rss(struct axgbe_port *pdata) +{ + int ret; + + /* Program the hash key */ + ret = axgbe_write_rss_hash_key(pdata); + if (ret) + return ret; + + /* Program the lookup table */ + ret = axgbe_write_rss_lookup_table(pdata); + if (ret) + return ret; + + /* Set the RSS options */ + AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); + + /* Enable RSS */ + AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1); + + return 0; +} + +static void axgbe_rss_options(struct axgbe_port *pdata) +{ + struct rte_eth_rss_conf *rss_conf; + uint64_t rss_hf; + + rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf; + rss_hf = rss_conf->rss_hf; + + if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6)) + AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); + if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP)) + AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); + if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP)) + AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); +} + +static int axgbe_config_rss(struct axgbe_port *pdata) +{ + uint32_t i; + + if (pdata->rss_enable) { + /* Initialize RSS hash key and lookup table */ + uint32_t *key = (uint32_t *)pdata->rss_key; + + for (i = 0; i < sizeof(pdata->rss_key) / 4; i++) + *key++ = (uint32_t)rte_rand(); + for (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++) + AXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, + i % pdata->eth_dev->data->nb_rx_queues); + axgbe_rss_options(pdata); + if (axgbe_enable_rss(pdata)) { + PMD_DRV_LOG(ERR, "Error in enabling RSS support\n"); + return -1; + } + } else { + AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0); + } + + return 0; +} + +static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata) +{ + struct axgbe_tx_queue *txq; + unsigned int dma_ch_isr, dma_ch_ier; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { + txq = pdata->eth_dev->data->tx_queues[i]; + + /* Clear all the interrupts which are set */ + dma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR); + AXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr); + + /* Clear all interrupt enable bits */ + dma_ch_ier = 0; + + /* Enable following interrupts + * NIE - Normal Interrupt Summary Enable + * AIE - Abnormal Interrupt Summary Enable + * FBEE - Fatal Bus Error Enable + */ + AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0); + AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1); + AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); + + /* Enable following Rx interrupts + * RBUE - Receive Buffer Unavailable Enable + * RIE - Receive Interrupt Enable (unless using + * per channel interrupts in edge triggered + * mode) + */ + AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0); + + AXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier); + } +} + +static void wrapper_tx_desc_init(struct axgbe_port *pdata) +{ + struct axgbe_tx_queue *txq; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { + txq = pdata->eth_dev->data->tx_queues[i]; + txq->cur = 0; + txq->dirty = 0; + /* Update the total number of Tx descriptors */ + AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1); + /* Update the starting address of descriptor ring */ + AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI, + high32_value(txq->ring_phys_addr)); + AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO, + low32_value(txq->ring_phys_addr)); + } +} + +static int wrapper_rx_desc_init(struct axgbe_port *pdata) +{ + struct axgbe_rx_queue *rxq; + struct rte_mbuf *mbuf; + volatile union axgbe_rx_desc *desc; + unsigned int i, j; + + for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) { + rxq = pdata->eth_dev->data->rx_queues[i]; + + /* Initialize software ring entries */ + rxq->mbuf_alloc = 0; + rxq->cur = 0; + rxq->dirty = 0; + desc = AXGBE_GET_DESC_PT(rxq, 0); + + for (j = 0; j < rxq->nb_desc; j++) { + mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + if (mbuf == NULL) { + PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n", + (unsigned int)rxq->queue_id, j); + axgbe_dev_rx_queue_release(rxq); + return -ENOMEM; + } + rxq->sw_ring[j] = mbuf; + /* Mbuf populate */ + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; + desc->read.baddr = + rte_cpu_to_le_64( + rte_mbuf_data_iova_default(mbuf)); + rte_wmb(); + AXGMAC_SET_BITS_LE(desc->read.desc3, + RX_NORMAL_DESC3, OWN, 1); + rte_wmb(); + rxq->mbuf_alloc++; + desc++; + } + /* Update the total number of Rx descriptors */ + AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR, + rxq->nb_desc - 1); + /* Update the starting address of descriptor ring */ + AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI, + high32_value(rxq->ring_phys_addr)); + AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO, + low32_value(rxq->ring_phys_addr)); + /* Update the Rx Descriptor Tail Pointer */ + AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, + low32_value(rxq->ring_phys_addr + + (rxq->nb_desc - 1) * + sizeof(union axgbe_rx_desc))); + } + return 0; +} + +static void axgbe_config_mtl_mode(struct axgbe_port *pdata) +{ + unsigned int i; + + /* Set Tx to weighted round robin scheduling algorithm */ + AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); + + /* Set Tx traffic classes to use WRR algorithm with equal weights */ + for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, + MTL_TSA_ETS); + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1); + } + + /* Set Rx to strict priority algorithm */ + AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); +} + +static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val) +{ + unsigned int i; + + for (i = 0; i < pdata->tx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); + + return 0; +} + +static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val) +{ + unsigned int i; + + for (i = 0; i < pdata->rx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); + + return 0; +} + +static int axgbe_config_tx_threshold(struct axgbe_port *pdata, + unsigned int val) +{ + unsigned int i; + + for (i = 0; i < pdata->tx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); + + return 0; +} + +static int axgbe_config_rx_threshold(struct axgbe_port *pdata, + unsigned int val) +{ + unsigned int i; + + for (i = 0; i < pdata->rx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); + + return 0; +} + +/*Distrubting fifo size */ +static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata) +{ + unsigned int fifo_size; + unsigned int q_fifo_size; + unsigned int p_fifo, i; + + fifo_size = RTE_MIN(pdata->rx_max_fifo_size, + pdata->hw_feat.rx_fifo_size); + q_fifo_size = fifo_size / pdata->rx_q_count; + + /* Calculate the fifo setting by dividing the queue's fifo size + * by the fifo allocation increment (with 0 representing the + * base allocation increment so decrement the result + * by 1). + */ + p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT; + if (p_fifo) + p_fifo--; + + for (i = 0; i < pdata->rx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo); + pdata->fifo = p_fifo; + + /*Calculate and config Flow control threshold*/ + axgbe_calculate_flow_control_threshold(pdata); + axgbe_config_flow_control_threshold(pdata); +} + +static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata) +{ + unsigned int fifo_size; + unsigned int q_fifo_size; + unsigned int p_fifo, i; + + fifo_size = RTE_MIN(pdata->tx_max_fifo_size, + pdata->hw_feat.tx_fifo_size); + q_fifo_size = fifo_size / pdata->tx_q_count; + + /* Calculate the fifo setting by dividing the queue's fifo size + * by the fifo allocation increment (with 0 representing the + * base allocation increment so decrement the result + * by 1). + */ + p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT; + if (p_fifo) + p_fifo--; + + for (i = 0; i < pdata->tx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo); +} + +static void axgbe_config_queue_mapping(struct axgbe_port *pdata) +{ + unsigned int qptc, qptc_extra, queue; + unsigned int i, j, reg, reg_val; + + /* Map the MTL Tx Queues to Traffic Classes + * Note: Tx Queues >= Traffic Classes + */ + qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; + qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; + + for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { + for (j = 0; j < qptc; j++) + AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, + Q2TCMAP, i); + if (i < qptc_extra) + AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, + Q2TCMAP, i); + } + + if (pdata->rss_enable) { + /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ + reg = MTL_RQDCM0R; + reg_val = 0; + for (i = 0; i < pdata->rx_q_count;) { + reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); + + if ((i % MTL_RQDCM_Q_PER_REG) && + (i != pdata->rx_q_count)) + continue; + + AXGMAC_IOWRITE(pdata, reg, reg_val); + + reg += MTL_RQDCM_INC; + reg_val = 0; + } + } +} + +static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata) +{ + unsigned int mtl_q_isr; + unsigned int q_count, i; + + q_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); + for (i = 0; i < q_count; i++) { + /* Clear all the interrupts which are set */ + mtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR); + AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr); + + /* No MTL interrupts to be enabled */ + AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0); + } +} + +static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr) +{ + unsigned int mac_addr_hi, mac_addr_lo; + + mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); + mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | + (addr[1] << 8) | (addr[0] << 0); + + AXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi); + AXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo); + + return 0; +} + +static void axgbe_config_mac_address(struct axgbe_port *pdata) +{ + axgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes); +} + +static void axgbe_config_jumbo_enable(struct axgbe_port *pdata) +{ + unsigned int val; + + val = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0; + + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); +} + +static void axgbe_config_mac_speed(struct axgbe_port *pdata) +{ + axgbe_set_speed(pdata, pdata->phy_speed); +} + +static void axgbe_config_checksum_offload(struct axgbe_port *pdata) +{ + if (pdata->rx_csum_enable) + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1); + else + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0); +} + +static int axgbe_init(struct axgbe_port *pdata) +{ + int ret; + + /* Flush Tx queues */ + ret = axgbe_flush_tx_queues(pdata); + if (ret) + return ret; + /* Initialize DMA related features */ + axgbe_config_dma_bus(pdata); + axgbe_config_dma_cache(pdata); + axgbe_config_edma_control(pdata); + axgbe_config_osp_mode(pdata); + axgbe_config_pblx8(pdata); + axgbe_config_tx_pbl_val(pdata); + axgbe_config_rx_pbl_val(pdata); + axgbe_config_rx_buffer_size(pdata); + axgbe_config_rss(pdata); + wrapper_tx_desc_init(pdata); + ret = wrapper_rx_desc_init(pdata); + if (ret) + return ret; + axgbe_enable_dma_interrupts(pdata); + + /* Initialize MTL related features */ + axgbe_config_mtl_mode(pdata); + axgbe_config_queue_mapping(pdata); + axgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); + axgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); + axgbe_config_tx_threshold(pdata, pdata->tx_threshold); + axgbe_config_rx_threshold(pdata, pdata->rx_threshold); + axgbe_config_tx_fifo_size(pdata); + axgbe_config_rx_fifo_size(pdata); + + axgbe_enable_mtl_interrupts(pdata); + + /* Initialize MAC related features */ + axgbe_config_mac_address(pdata); + axgbe_config_jumbo_enable(pdata); + axgbe_config_flow_control(pdata); + axgbe_config_mac_speed(pdata); + axgbe_config_checksum_offload(pdata); + + return 0; +} + +void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if) +{ + hw_if->exit = axgbe_exit; + hw_if->config_flow_control = axgbe_config_flow_control; + + hw_if->init = axgbe_init; + + hw_if->read_mmd_regs = axgbe_read_mmd_regs; + hw_if->write_mmd_regs = axgbe_write_mmd_regs; + + hw_if->set_speed = axgbe_set_speed; + + hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode; + hw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs; + hw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs; + /* For FLOW ctrl */ + hw_if->config_tx_flow_control = axgbe_config_tx_flow_control; + hw_if->config_rx_flow_control = axgbe_config_rx_flow_control; +} diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c new file mode 100644 index 00000000..9ae9f063 --- /dev/null +++ b/drivers/net/axgbe/axgbe_ethdev.c @@ -0,0 +1,770 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_rxtx.h" +#include "axgbe_ethdev.h" +#include "axgbe_common.h" +#include "axgbe_phy.h" + +static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev); +static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev); +static int axgbe_dev_configure(struct rte_eth_dev *dev); +static int axgbe_dev_start(struct rte_eth_dev *dev); +static void axgbe_dev_stop(struct rte_eth_dev *dev); +static void axgbe_dev_interrupt_handler(void *param); +static void axgbe_dev_close(struct rte_eth_dev *dev); +static void axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); +static void axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); +static void axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); +static void axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int axgbe_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static int axgbe_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static void axgbe_dev_stats_reset(struct rte_eth_dev *dev); +static void axgbe_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); + +/* The set of PCI devices this driver supports */ +#define AMD_PCI_VENDOR_ID 0x1022 +#define AMD_PCI_AXGBE_DEVICE_V2A 0x1458 +#define AMD_PCI_AXGBE_DEVICE_V2B 0x1459 + +int axgbe_logtype_init; +int axgbe_logtype_driver; + +static const struct rte_pci_id pci_id_axgbe_map[] = { + {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)}, + {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)}, + { .vendor_id = 0, }, +}; + +static struct axgbe_version_data axgbe_v2a = { + .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, + .xpcs_access = AXGBE_XPCS_ACCESS_V2, + .mmc_64bit = 1, + .tx_max_fifo_size = 229376, + .rx_max_fifo_size = 229376, + .tx_tstamp_workaround = 1, + .ecc_support = 1, + .i2c_support = 1, + .an_cdr_workaround = 1, +}; + +static struct axgbe_version_data axgbe_v2b = { + .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, + .xpcs_access = AXGBE_XPCS_ACCESS_V2, + .mmc_64bit = 1, + .tx_max_fifo_size = 65536, + .rx_max_fifo_size = 65536, + .tx_tstamp_workaround = 1, + .ecc_support = 1, + .i2c_support = 1, + .an_cdr_workaround = 1, +}; + +static const struct rte_eth_desc_lim rx_desc_lim = { + .nb_max = AXGBE_MAX_RING_DESC, + .nb_min = AXGBE_MIN_RING_DESC, + .nb_align = 8, +}; + +static const struct rte_eth_desc_lim tx_desc_lim = { + .nb_max = AXGBE_MAX_RING_DESC, + .nb_min = AXGBE_MIN_RING_DESC, + .nb_align = 8, +}; + +static const struct eth_dev_ops axgbe_eth_dev_ops = { + .dev_configure = axgbe_dev_configure, + .dev_start = axgbe_dev_start, + .dev_stop = axgbe_dev_stop, + .dev_close = axgbe_dev_close, + .promiscuous_enable = axgbe_dev_promiscuous_enable, + .promiscuous_disable = axgbe_dev_promiscuous_disable, + .allmulticast_enable = axgbe_dev_allmulticast_enable, + .allmulticast_disable = axgbe_dev_allmulticast_disable, + .link_update = axgbe_dev_link_update, + .stats_get = axgbe_dev_stats_get, + .stats_reset = axgbe_dev_stats_reset, + .dev_infos_get = axgbe_dev_info_get, + .rx_queue_setup = axgbe_dev_rx_queue_setup, + .rx_queue_release = axgbe_dev_rx_queue_release, + .tx_queue_setup = axgbe_dev_tx_queue_setup, + .tx_queue_release = axgbe_dev_tx_queue_release, +}; + +static int axgbe_phy_reset(struct axgbe_port *pdata) +{ + pdata->phy_link = -1; + pdata->phy_speed = SPEED_UNKNOWN; + return pdata->phy_if.phy_reset(pdata); +} + +/* + * Interrupt handler triggered by NIC for handling + * specific interrupt. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +axgbe_dev_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct axgbe_port *pdata = dev->data->dev_private; + unsigned int dma_isr, dma_ch_isr; + + pdata->phy_if.an_isr(pdata); + /*DMA related interrupts*/ + dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR); + if (dma_isr) { + if (dma_isr & 1) { + dma_ch_isr = + AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *) + pdata->rx_queues[0], + DMA_CH_SR); + AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *) + pdata->rx_queues[0], + DMA_CH_SR, dma_ch_isr); + } + } + /* Enable interrupts since disabled after generation*/ + rte_intr_enable(&pdata->pci_dev->intr_handle); +} + +/* + * Configure device link speed and setup link. + * It returns 0 on success. + */ +static int +axgbe_dev_configure(struct rte_eth_dev *dev) +{ + struct axgbe_port *pdata = dev->data->dev_private; + /* Checksum offload to hardware */ + pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_CHECKSUM; + return 0; +} + +static int +axgbe_dev_rx_mq_config(struct rte_eth_dev *dev) +{ + struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private; + + if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) + pdata->rss_enable = 1; + else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) + pdata->rss_enable = 0; + else + return -1; + return 0; +} + +static int +axgbe_dev_start(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private; + int ret; + + /* Multiqueue RSS */ + ret = axgbe_dev_rx_mq_config(dev); + if (ret) { + PMD_DRV_LOG(ERR, "Unable to config RX MQ\n"); + return ret; + } + ret = axgbe_phy_reset(pdata); + if (ret) { + PMD_DRV_LOG(ERR, "phy reset failed\n"); + return ret; + } + ret = pdata->hw_if.init(pdata); + if (ret) { + PMD_DRV_LOG(ERR, "dev_init failed\n"); + return ret; + } + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(&pdata->pci_dev->intr_handle); + + /* phy start*/ + pdata->phy_if.phy_start(pdata); + axgbe_dev_enable_tx(dev); + axgbe_dev_enable_rx(dev); + + axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state); + axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state); + return 0; +} + +/* Stop device: disable rx and tx functions to allow for reconfiguring. */ +static void +axgbe_dev_stop(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + struct axgbe_port *pdata = dev->data->dev_private; + + rte_intr_disable(&pdata->pci_dev->intr_handle); + + if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state)) + return; + + axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); + axgbe_dev_disable_tx(dev); + axgbe_dev_disable_rx(dev); + + pdata->phy_if.phy_stop(pdata); + pdata->hw_if.exit(pdata); + memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); + axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); +} + +/* Clear all resources like TX/RX queues. */ +static void +axgbe_dev_close(struct rte_eth_dev *dev) +{ + axgbe_dev_clear_queues(dev); +} + +static void +axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + struct axgbe_port *pdata = dev->data->dev_private; + + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1); +} + +static void +axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + struct axgbe_port *pdata = dev->data->dev_private; + + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0); +} + +static void +axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + struct axgbe_port *pdata = dev->data->dev_private; + + if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) + return; + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1); +} + +static void +axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + struct axgbe_port *pdata = dev->data->dev_private; + + if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) + return; + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0); +} + +/* return 0 means link status changed, -1 means not changed */ +static int +axgbe_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete __rte_unused) +{ + struct axgbe_port *pdata = dev->data->dev_private; + struct rte_eth_link link; + int ret = 0; + + PMD_INIT_FUNC_TRACE(); + rte_delay_ms(800); + + pdata->phy_if.phy_status(pdata); + + memset(&link, 0, sizeof(struct rte_eth_link)); + link.link_duplex = pdata->phy.duplex; + link.link_status = pdata->phy_link; + link.link_speed = pdata->phy_speed; + link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + ret = rte_eth_linkstatus_set(dev, &link); + if (ret == -1) + PMD_DRV_LOG(ERR, "No change in link status\n"); + + return ret; +} + +static int +axgbe_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + struct axgbe_rx_queue *rxq; + struct axgbe_tx_queue *txq; + unsigned int i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + stats->q_ipackets[i] = rxq->pkts; + stats->ipackets += rxq->pkts; + stats->q_ibytes[i] = rxq->bytes; + stats->ibytes += rxq->bytes; + } + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + stats->q_opackets[i] = txq->pkts; + stats->opackets += txq->pkts; + stats->q_obytes[i] = txq->bytes; + stats->obytes += txq->bytes; + } + + return 0; +} + +static void +axgbe_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct axgbe_rx_queue *rxq; + struct axgbe_tx_queue *txq; + unsigned int i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rxq->pkts = 0; + rxq->bytes = 0; + rxq->errors = 0; + } + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + txq->pkts = 0; + txq->bytes = 0; + txq->errors = 0; + } +} + +static void +axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct axgbe_port *pdata = dev->data->dev_private; + + dev_info->max_rx_queues = pdata->rx_ring_count; + dev_info->max_tx_queues = pdata->tx_ring_count; + dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE; + dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE; + dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS; + dev_info->speed_capa = ETH_LINK_SPEED_10G; + + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_KEEP_CRC; + + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + + if (pdata->hw_feat.rss) { + dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD; + dev_info->reta_size = pdata->hw_feat.hash_table_size; + dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE; + } + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = AXGBE_RX_FREE_THRESH, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_free_thresh = AXGBE_TX_FREE_THRESH, + }; +} + +static void axgbe_get_all_hw_features(struct axgbe_port *pdata) +{ + unsigned int mac_hfr0, mac_hfr1, mac_hfr2; + struct axgbe_hw_features *hw_feat = &pdata->hw_feat; + + mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R); + mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R); + mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R); + + memset(hw_feat, 0, sizeof(*hw_feat)); + + hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR); + + /* Hardware feature register 0 */ + hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); + hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); + hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); + hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); + hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); + hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); + hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); + hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); + hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); + hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); + hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); + hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, + ADDMACADRSEL); + hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); + hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); + + /* Hardware feature register 1 */ + hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, + RXFIFOSIZE); + hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, + TXFIFOSIZE); + hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1, + MAC_HWF1R, ADVTHWORD); + hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); + hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); + hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); + hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); + hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); + hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); + hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); + hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, + HASHTBLSZ); + hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, + L3L4FNUM); + + /* Hardware feature register 2 */ + hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); + hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); + hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); + hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); + hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); + hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, + AUXSNAPNUM); + + /* Translate the Hash Table size into actual number */ + switch (hw_feat->hash_table_size) { + case 0: + break; + case 1: + hw_feat->hash_table_size = 64; + break; + case 2: + hw_feat->hash_table_size = 128; + break; + case 3: + hw_feat->hash_table_size = 256; + break; + } + + /* Translate the address width setting into actual number */ + switch (hw_feat->dma_width) { + case 0: + hw_feat->dma_width = 32; + break; + case 1: + hw_feat->dma_width = 40; + break; + case 2: + hw_feat->dma_width = 48; + break; + default: + hw_feat->dma_width = 32; + } + + /* The Queue, Channel and TC counts are zero based so increment them + * to get the actual number + */ + hw_feat->rx_q_cnt++; + hw_feat->tx_q_cnt++; + hw_feat->rx_ch_cnt++; + hw_feat->tx_ch_cnt++; + hw_feat->tc_cnt++; + + /* Translate the fifo sizes into actual numbers */ + hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); + hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); +} + +static void axgbe_init_all_fptrs(struct axgbe_port *pdata) +{ + axgbe_init_function_ptrs_dev(&pdata->hw_if); + axgbe_init_function_ptrs_phy(&pdata->phy_if); + axgbe_init_function_ptrs_i2c(&pdata->i2c_if); + pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); +} + +static void axgbe_set_counts(struct axgbe_port *pdata) +{ + /* Set all the function pointers */ + axgbe_init_all_fptrs(pdata); + + /* Populate the hardware features */ + axgbe_get_all_hw_features(pdata); + + /* Set default max values if not provided */ + if (!pdata->tx_max_channel_count) + pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; + if (!pdata->rx_max_channel_count) + pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; + + if (!pdata->tx_max_q_count) + pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; + if (!pdata->rx_max_q_count) + pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; + + /* Calculate the number of Tx and Rx rings to be created + * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set + * the number of Tx queues to the number of Tx channels + * enabled + * -Rx (DMA) Channels do not map 1-to-1 so use the actual + * number of Rx queues or maximum allowed + */ + pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt, + pdata->tx_max_channel_count); + pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count, + pdata->tx_max_q_count); + + pdata->tx_q_count = pdata->tx_ring_count; + + pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt, + pdata->rx_max_channel_count); + + pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt, + pdata->rx_max_q_count); +} + +static void axgbe_default_config(struct axgbe_port *pdata) +{ + pdata->pblx8 = DMA_PBL_X8_ENABLE; + pdata->tx_sf_mode = MTL_TSF_ENABLE; + pdata->tx_threshold = MTL_TX_THRESHOLD_64; + pdata->tx_pbl = DMA_PBL_32; + pdata->tx_osp_mode = DMA_OSP_ENABLE; + pdata->rx_sf_mode = MTL_RSF_ENABLE; + pdata->rx_threshold = MTL_RX_THRESHOLD_64; + pdata->rx_pbl = DMA_PBL_32; + pdata->pause_autoneg = 1; + pdata->tx_pause = 0; + pdata->rx_pause = 0; + pdata->phy_speed = SPEED_UNKNOWN; + pdata->power_down = 0; +} + +/* + * It returns 0 on success. + */ +static int +eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) +{ + PMD_INIT_FUNC_TRACE(); + struct axgbe_port *pdata; + struct rte_pci_device *pci_dev; + uint32_t reg, mac_lo, mac_hi; + int ret; + + eth_dev->dev_ops = &axgbe_eth_dev_ops; + eth_dev->rx_pkt_burst = &axgbe_recv_pkts; + + /* + * For secondary processes, we don't initialise any further as primary + * has already done this work. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + pdata = (struct axgbe_port *)eth_dev->data->dev_private; + /* initial state */ + axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); + axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); + pdata->eth_dev = eth_dev; + + pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + pdata->pci_dev = pci_dev; + + pdata->xgmac_regs = + (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr; + pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs + + AXGBE_MAC_PROP_OFFSET); + pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs + + AXGBE_I2C_CTRL_OFFSET); + pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr; + + /* version specific driver data*/ + if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A) + pdata->vdata = &axgbe_v2a; + else + pdata->vdata = &axgbe_v2b; + + /* Configure the PCS indirect addressing support */ + reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF); + pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); + pdata->xpcs_window <<= 6; + pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); + pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); + pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; + pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; + PMD_INIT_LOG(DEBUG, + "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window, + pdata->xpcs_window_size, pdata->xpcs_window_mask); + XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); + + /* Retrieve the MAC address */ + mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); + mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); + pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff; + pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff; + pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff; + pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff; + pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff; + pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff; + + eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", + ETHER_ADDR_LEN, 0); + if (!eth_dev->data->mac_addrs) { + PMD_INIT_LOG(ERR, + "Failed to alloc %u bytes needed to store MAC addr tbl", + ETHER_ADDR_LEN); + return -ENOMEM; + } + + if (!is_valid_assigned_ether_addr(&pdata->mac_addr)) + eth_random_addr(pdata->mac_addr.addr_bytes); + + /* Copy the permanent MAC address */ + ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]); + + /* Clock settings */ + pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ; + pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ; + + /* Set the DMA coherency values */ + pdata->coherent = 1; + pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN; + pdata->arcache = AXGBE_DMA_OS_ARCACHE; + pdata->awcache = AXGBE_DMA_OS_AWCACHE; + + /* Set the maximum channels and queues */ + reg = XP_IOREAD(pdata, XP_PROP_1); + pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); + pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); + pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); + pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); + + /* Set the hardware channel and queue counts */ + axgbe_set_counts(pdata); + + /* Set the maximum fifo amounts */ + reg = XP_IOREAD(pdata, XP_PROP_2); + pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); + pdata->tx_max_fifo_size *= 16384; + pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size, + pdata->vdata->tx_max_fifo_size); + pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); + pdata->rx_max_fifo_size *= 16384; + pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size, + pdata->vdata->rx_max_fifo_size); + /* Issue software reset to DMA */ + ret = pdata->hw_if.exit(pdata); + if (ret) + PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n"); + + /* Set default configuration data */ + axgbe_default_config(pdata); + + /* Set default max values if not provided */ + if (!pdata->tx_max_fifo_size) + pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; + if (!pdata->rx_max_fifo_size) + pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; + + pdata->tx_desc_count = AXGBE_MAX_RING_DESC; + pdata->rx_desc_count = AXGBE_MAX_RING_DESC; + pthread_mutex_init(&pdata->xpcs_mutex, NULL); + pthread_mutex_init(&pdata->i2c_mutex, NULL); + pthread_mutex_init(&pdata->an_mutex, NULL); + pthread_mutex_init(&pdata->phy_mutex, NULL); + + ret = pdata->phy_if.phy_init(pdata); + if (ret) { + rte_free(eth_dev->data->mac_addrs); + return ret; + } + + rte_intr_callback_register(&pci_dev->intr_handle, + axgbe_dev_interrupt_handler, + (void *)eth_dev); + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + + return 0; +} + +static int +eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + /*Free macaddres*/ + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + axgbe_dev_clear_queues(eth_dev); + + /* disable uio intr before callback unregister */ + rte_intr_disable(&pci_dev->intr_handle); + rte_intr_callback_unregister(&pci_dev->intr_handle, + axgbe_dev_interrupt_handler, + (void *)eth_dev); + + return 0; +} + +static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct axgbe_port), eth_axgbe_dev_init); +} + +static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit); +} + +static struct rte_pci_driver rte_axgbe_pmd = { + .id_table = pci_id_axgbe_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_axgbe_pci_probe, + .remove = eth_axgbe_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map); +RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci"); + +RTE_INIT(axgbe_init_log) +{ + axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init"); + if (axgbe_logtype_init >= 0) + rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE); + axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver"); + if (axgbe_logtype_driver >= 0) + rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE); +} diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h new file mode 100644 index 00000000..b1cd2980 --- /dev/null +++ b/drivers/net/axgbe/axgbe_ethdev.h @@ -0,0 +1,586 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#ifndef RTE_ETH_AXGBE_H_ +#define RTE_ETH_AXGBE_H_ + +#include +#include +#include "axgbe_common.h" + +#define IRQ 0xff +#define VLAN_HLEN 4 + +#define AXGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) +#define AXGBE_RX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) +#define AXGBE_RX_MIN_BUF_SIZE (ETHER_MAX_LEN + VLAN_HLEN) +#define AXGBE_MAX_MAC_ADDRS 1 + +#define AXGBE_RX_BUF_ALIGN 64 + +#define AXGBE_MAX_DMA_CHANNELS 16 +#define AXGBE_MAX_QUEUES 16 +#define AXGBE_PRIORITY_QUEUES 8 +#define AXGBE_DMA_STOP_TIMEOUT 1 + +/* DMA cache settings - Outer sharable, write-back, write-allocate */ +#define AXGBE_DMA_OS_AXDOMAIN 0x2 +#define AXGBE_DMA_OS_ARCACHE 0xb +#define AXGBE_DMA_OS_AWCACHE 0xf + +/* DMA cache settings - System, no caches used */ +#define AXGBE_DMA_SYS_AXDOMAIN 0x3 +#define AXGBE_DMA_SYS_ARCACHE 0x0 +#define AXGBE_DMA_SYS_AWCACHE 0x0 + +/* DMA channel interrupt modes */ +#define AXGBE_IRQ_MODE_EDGE 0 +#define AXGBE_IRQ_MODE_LEVEL 1 + +#define AXGBE_DMA_INTERRUPT_MASK 0x31c7 + +#define AXGMAC_MIN_PACKET 60 +#define AXGMAC_STD_PACKET_MTU 1500 +#define AXGMAC_MAX_STD_PACKET 1518 +#define AXGMAC_JUMBO_PACKET_MTU 9000 +#define AXGMAC_MAX_JUMBO_PACKET 9018 +/* Inter-frame gap + preamble */ +#define AXGMAC_ETH_PREAMBLE (12 + 8) + +#define AXGMAC_PFC_DATA_LEN 46 +#define AXGMAC_PFC_DELAYS 14000 + +/* PCI BAR mapping */ +#define AXGBE_AXGMAC_BAR 0 +#define AXGBE_XPCS_BAR 1 +#define AXGBE_MAC_PROP_OFFSET 0x1d000 +#define AXGBE_I2C_CTRL_OFFSET 0x1e000 + +/* PCI clock frequencies */ +#define AXGBE_V2_DMA_CLOCK_FREQ 500000000 +#define AXGBE_V2_PTP_CLOCK_FREQ 125000000 + +#define AXGMAC_FIFO_MIN_ALLOC 2048 +#define AXGMAC_FIFO_UNIT 256 +#define AXGMAC_FIFO_ALIGN(_x) \ + (((_x) + AXGMAC_FIFO_UNIT - 1) & ~(XGMAC_FIFO_UNIT - 1)) +#define AXGMAC_FIFO_FC_OFF 2048 +#define AXGMAC_FIFO_FC_MIN 4096 + +#define AXGBE_TC_MIN_QUANTUM 10 + +/* Flow control queue count */ +#define AXGMAC_MAX_FLOW_CONTROL_QUEUES 8 + +/* Flow control threshold units */ +#define AXGMAC_FLOW_CONTROL_UNIT 512 +#define AXGMAC_FLOW_CONTROL_ALIGN(_x) \ + (((_x) + AXGMAC_FLOW_CONTROL_UNIT - 1) & \ + ~(AXGMAC_FLOW_CONTROL_UNIT - 1)) +#define AXGMAC_FLOW_CONTROL_VALUE(_x) \ + (((_x) < 1024) ? 0 : ((_x) / AXGMAC_FLOW_CONTROL_UNIT) - 2) +#define AXGMAC_FLOW_CONTROL_MAX 33280 + +/* Maximum MAC address hash table size (256 bits = 8 bytes) */ +#define AXGBE_MAC_HASH_TABLE_SIZE 8 + +/* Receive Side Scaling */ +#define AXGBE_RSS_OFFLOAD ( \ + ETH_RSS_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP) + +#define AXGBE_RSS_HASH_KEY_SIZE 40 +#define AXGBE_RSS_MAX_TABLE_SIZE 256 +#define AXGBE_RSS_LOOKUP_TABLE_TYPE 0 +#define AXGBE_RSS_HASH_KEY_TYPE 1 + +/* Auto-negotiation */ +#define AXGBE_AN_MS_TIMEOUT 500 +#define AXGBE_LINK_TIMEOUT 5 + +#define AXGBE_SGMII_AN_LINK_STATUS BIT(1) +#define AXGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) +#define AXGBE_SGMII_AN_LINK_SPEED_100 0x04 +#define AXGBE_SGMII_AN_LINK_SPEED_1000 0x08 +#define AXGBE_SGMII_AN_LINK_DUPLEX BIT(4) + +/* ECC correctable error notification window (seconds) */ +#define AXGBE_ECC_LIMIT 60 + +/* MDIO port types */ +#define AXGMAC_MAX_C22_PORT 3 + +/* Helper macro for descriptor handling + * Always use AXGBE_GET_DESC_DATA to access the descriptor data + * since the index is free-running and needs to be and-ed + * with the descriptor count value of the ring to index to + * the proper descriptor data. + */ +#define AXGBE_GET_DESC_DATA(_ring, _idx) \ + ((_ring)->rdata + \ + ((_idx) & ((_ring)->rdesc_count - 1))) + +struct axgbe_port; + +enum axgbe_state { + AXGBE_DOWN, + AXGBE_LINK_INIT, + AXGBE_LINK_ERR, + AXGBE_STOPPED, +}; + +enum axgbe_int { + AXGMAC_INT_DMA_CH_SR_TI, + AXGMAC_INT_DMA_CH_SR_TPS, + AXGMAC_INT_DMA_CH_SR_TBU, + AXGMAC_INT_DMA_CH_SR_RI, + AXGMAC_INT_DMA_CH_SR_RBU, + AXGMAC_INT_DMA_CH_SR_RPS, + AXGMAC_INT_DMA_CH_SR_TI_RI, + AXGMAC_INT_DMA_CH_SR_FBE, + AXGMAC_INT_DMA_ALL, +}; + +enum axgbe_int_state { + AXGMAC_INT_STATE_SAVE, + AXGMAC_INT_STATE_RESTORE, +}; + +enum axgbe_ecc_sec { + AXGBE_ECC_SEC_TX, + AXGBE_ECC_SEC_RX, + AXGBE_ECC_SEC_DESC, +}; + +enum axgbe_speed { + AXGBE_SPEED_1000 = 0, + AXGBE_SPEED_2500, + AXGBE_SPEED_10000, + AXGBE_SPEEDS, +}; + +enum axgbe_xpcs_access { + AXGBE_XPCS_ACCESS_V1 = 0, + AXGBE_XPCS_ACCESS_V2, +}; + +enum axgbe_an_mode { + AXGBE_AN_MODE_CL73 = 0, + AXGBE_AN_MODE_CL73_REDRV, + AXGBE_AN_MODE_CL37, + AXGBE_AN_MODE_CL37_SGMII, + AXGBE_AN_MODE_NONE, +}; + +enum axgbe_an { + AXGBE_AN_READY = 0, + AXGBE_AN_PAGE_RECEIVED, + AXGBE_AN_INCOMPAT_LINK, + AXGBE_AN_COMPLETE, + AXGBE_AN_NO_LINK, + AXGBE_AN_ERROR, +}; + +enum axgbe_rx { + AXGBE_RX_BPA = 0, + AXGBE_RX_XNP, + AXGBE_RX_COMPLETE, + AXGBE_RX_ERROR, +}; + +enum axgbe_mode { + AXGBE_MODE_KX_1000 = 0, + AXGBE_MODE_KX_2500, + AXGBE_MODE_KR, + AXGBE_MODE_X, + AXGBE_MODE_SGMII_100, + AXGBE_MODE_SGMII_1000, + AXGBE_MODE_SFI, + AXGBE_MODE_UNKNOWN, +}; + +enum axgbe_speedset { + AXGBE_SPEEDSET_1000_10000 = 0, + AXGBE_SPEEDSET_2500_10000, +}; + +enum axgbe_mdio_mode { + AXGBE_MDIO_MODE_NONE = 0, + AXGBE_MDIO_MODE_CL22, + AXGBE_MDIO_MODE_CL45, +}; + +struct axgbe_phy { + uint32_t supported; + uint32_t advertising; + uint32_t lp_advertising; + + int address; + + int autoneg; + int speed; + int duplex; + + int link; + + int pause_autoneg; + int tx_pause; + int rx_pause; +}; + +enum axgbe_i2c_cmd { + AXGBE_I2C_CMD_READ = 0, + AXGBE_I2C_CMD_WRITE, +}; + +struct axgbe_i2c_op { + enum axgbe_i2c_cmd cmd; + + unsigned int target; + + uint8_t *buf; + unsigned int len; +}; + +struct axgbe_i2c_op_state { + struct axgbe_i2c_op *op; + + unsigned int tx_len; + unsigned char *tx_buf; + + unsigned int rx_len; + unsigned char *rx_buf; + + unsigned int tx_abort_source; + + int ret; +}; + +struct axgbe_i2c { + unsigned int started; + unsigned int max_speed_mode; + unsigned int rx_fifo_size; + unsigned int tx_fifo_size; + + struct axgbe_i2c_op_state op_state; +}; + +struct axgbe_hw_if { + void (*config_flow_control)(struct axgbe_port *); + int (*config_rx_mode)(struct axgbe_port *); + + int (*init)(struct axgbe_port *); + + int (*read_mmd_regs)(struct axgbe_port *, int, int); + void (*write_mmd_regs)(struct axgbe_port *, int, int, int); + int (*set_speed)(struct axgbe_port *, int); + + int (*set_ext_mii_mode)(struct axgbe_port *, unsigned int, + enum axgbe_mdio_mode); + int (*read_ext_mii_regs)(struct axgbe_port *, int, int); + int (*write_ext_mii_regs)(struct axgbe_port *, int, int, uint16_t); + + /* For FLOW ctrl */ + int (*config_tx_flow_control)(struct axgbe_port *); + int (*config_rx_flow_control)(struct axgbe_port *); + + int (*exit)(struct axgbe_port *); +}; + +/* This structure represents implementation specific routines for an + * implementation of a PHY. All routines are required unless noted below. + * Optional routines: + * kr_training_pre, kr_training_post + */ +struct axgbe_phy_impl_if { + /* Perform Setup/teardown actions */ + int (*init)(struct axgbe_port *); + void (*exit)(struct axgbe_port *); + + /* Perform start/stop specific actions */ + int (*reset)(struct axgbe_port *); + int (*start)(struct axgbe_port *); + void (*stop)(struct axgbe_port *); + + /* Return the link status */ + int (*link_status)(struct axgbe_port *, int *); + + /* Indicate if a particular speed is valid */ + int (*valid_speed)(struct axgbe_port *, int); + + /* Check if the specified mode can/should be used */ + bool (*use_mode)(struct axgbe_port *, enum axgbe_mode); + /* Switch the PHY into various modes */ + void (*set_mode)(struct axgbe_port *, enum axgbe_mode); + /* Retrieve mode needed for a specific speed */ + enum axgbe_mode (*get_mode)(struct axgbe_port *, int); + /* Retrieve new/next mode when trying to auto-negotiate */ + enum axgbe_mode (*switch_mode)(struct axgbe_port *); + /* Retrieve current mode */ + enum axgbe_mode (*cur_mode)(struct axgbe_port *); + + /* Retrieve current auto-negotiation mode */ + enum axgbe_an_mode (*an_mode)(struct axgbe_port *); + + /* Configure auto-negotiation settings */ + int (*an_config)(struct axgbe_port *); + + /* Set/override auto-negotiation advertisement settings */ + unsigned int (*an_advertising)(struct axgbe_port *port); + + /* Process results of auto-negotiation */ + enum axgbe_mode (*an_outcome)(struct axgbe_port *); + + /* Pre/Post auto-negotiation support */ + void (*an_pre)(struct axgbe_port *port); + void (*an_post)(struct axgbe_port *port); + + /* Pre/Post KR training enablement support */ + void (*kr_training_pre)(struct axgbe_port *); + void (*kr_training_post)(struct axgbe_port *); +}; + +struct axgbe_phy_if { + /* For PHY setup/teardown */ + int (*phy_init)(struct axgbe_port *); + void (*phy_exit)(struct axgbe_port *); + + /* For PHY support when setting device up/down */ + int (*phy_reset)(struct axgbe_port *); + int (*phy_start)(struct axgbe_port *); + void (*phy_stop)(struct axgbe_port *); + + /* For PHY support while device is up */ + void (*phy_status)(struct axgbe_port *); + int (*phy_config_aneg)(struct axgbe_port *); + + /* For PHY settings validation */ + int (*phy_valid_speed)(struct axgbe_port *, int); + /* For single interrupt support */ + void (*an_isr)(struct axgbe_port *); + /* PHY implementation specific services */ + struct axgbe_phy_impl_if phy_impl; +}; + +struct axgbe_i2c_if { + /* For initial I2C setup */ + int (*i2c_init)(struct axgbe_port *); + + /* For I2C support when setting device up/down */ + int (*i2c_start)(struct axgbe_port *); + void (*i2c_stop)(struct axgbe_port *); + + /* For performing I2C operations */ + int (*i2c_xfer)(struct axgbe_port *, struct axgbe_i2c_op *); +}; + +/* This structure contains flags that indicate what hardware features + * or configurations are present in the device. + */ +struct axgbe_hw_features { + /* HW Version */ + unsigned int version; + + /* HW Feature Register0 */ + unsigned int gmii; /* 1000 Mbps support */ + unsigned int vlhash; /* VLAN Hash Filter */ + unsigned int sma; /* SMA(MDIO) Interface */ + unsigned int rwk; /* PMT remote wake-up packet */ + unsigned int mgk; /* PMT magic packet */ + unsigned int mmc; /* RMON module */ + unsigned int aoe; /* ARP Offload */ + unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */ + unsigned int eee; /* Energy Efficient Ethernet */ + unsigned int tx_coe; /* Tx Checksum Offload */ + unsigned int rx_coe; /* Rx Checksum Offload */ + unsigned int addn_mac; /* Additional MAC Addresses */ + unsigned int ts_src; /* Timestamp Source */ + unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */ + + /* HW Feature Register1 */ + unsigned int rx_fifo_size; /* MTL Receive FIFO Size */ + unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */ + unsigned int adv_ts_hi; /* Advance Timestamping High Word */ + unsigned int dma_width; /* DMA width */ + unsigned int dcb; /* DCB Feature */ + unsigned int sph; /* Split Header Feature */ + unsigned int tso; /* TCP Segmentation Offload */ + unsigned int dma_debug; /* DMA Debug Registers */ + unsigned int rss; /* Receive Side Scaling */ + unsigned int tc_cnt; /* Number of Traffic Classes */ + unsigned int hash_table_size; /* Hash Table Size */ + unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */ + + /* HW Feature Register2 */ + unsigned int rx_q_cnt; /* Number of MTL Receive Queues */ + unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */ + unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */ + unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */ + unsigned int pps_out_num; /* Number of PPS outputs */ + unsigned int aux_snap_num; /* Number of Aux snapshot inputs */ +}; + +struct axgbe_version_data { + void (*init_function_ptrs_phy_impl)(struct axgbe_phy_if *); + enum axgbe_xpcs_access xpcs_access; + unsigned int mmc_64bit; + unsigned int tx_max_fifo_size; + unsigned int rx_max_fifo_size; + unsigned int tx_tstamp_workaround; + unsigned int ecc_support; + unsigned int i2c_support; + unsigned int an_cdr_workaround; +}; + +/* + * Structure to store private data for each port. + */ +struct axgbe_port { + /* Ethdev where port belongs*/ + struct rte_eth_dev *eth_dev; + /* Pci dev info */ + const struct rte_pci_device *pci_dev; + /* Version related data */ + struct axgbe_version_data *vdata; + + /* AXGMAC/XPCS related mmio registers */ + void *xgmac_regs; /* AXGMAC CSRs */ + void *xpcs_regs; /* XPCS MMD registers */ + void *xprop_regs; /* AXGBE property registers */ + void *xi2c_regs; /* AXGBE I2C CSRs */ + + bool cdr_track_early; + /* XPCS indirect addressing lock */ + unsigned int xpcs_window_def_reg; + unsigned int xpcs_window_sel_reg; + unsigned int xpcs_window; + unsigned int xpcs_window_size; + unsigned int xpcs_window_mask; + + /* Flags representing axgbe_state */ + unsigned long dev_state; + + struct axgbe_hw_if hw_if; + struct axgbe_phy_if phy_if; + struct axgbe_i2c_if i2c_if; + + /* AXI DMA settings */ + unsigned int coherent; + unsigned int axdomain; + unsigned int arcache; + unsigned int awcache; + + unsigned int tx_max_channel_count; + unsigned int rx_max_channel_count; + unsigned int channel_count; + unsigned int tx_ring_count; + unsigned int tx_desc_count; + unsigned int rx_ring_count; + unsigned int rx_desc_count; + + unsigned int tx_max_q_count; + unsigned int rx_max_q_count; + unsigned int tx_q_count; + unsigned int rx_q_count; + + /* Tx/Rx common settings */ + unsigned int pblx8; + + /* Tx settings */ + unsigned int tx_sf_mode; + unsigned int tx_threshold; + unsigned int tx_pbl; + unsigned int tx_osp_mode; + unsigned int tx_max_fifo_size; + + /* Rx settings */ + unsigned int rx_sf_mode; + unsigned int rx_threshold; + unsigned int rx_pbl; + unsigned int rx_max_fifo_size; + unsigned int rx_buf_size; + + /* Device clocks */ + unsigned long sysclk_rate; + unsigned long ptpclk_rate; + + /* Keeps track of power mode */ + unsigned int power_down; + + /* Current PHY settings */ + int phy_link; + int phy_speed; + + pthread_mutex_t xpcs_mutex; + pthread_mutex_t i2c_mutex; + pthread_mutex_t an_mutex; + pthread_mutex_t phy_mutex; + + /* Flow control settings */ + unsigned int pause_autoneg; + unsigned int tx_pause; + unsigned int rx_pause; + unsigned int rx_rfa[AXGBE_MAX_QUEUES]; + unsigned int rx_rfd[AXGBE_MAX_QUEUES]; + unsigned int fifo; + + /* Receive Side Scaling settings */ + u8 rss_key[AXGBE_RSS_HASH_KEY_SIZE]; + uint32_t rss_table[AXGBE_RSS_MAX_TABLE_SIZE]; + uint32_t rss_options; + int rss_enable; + + /* Hardware features of the device */ + struct axgbe_hw_features hw_feat; + + struct ether_addr mac_addr; + + /* Software Tx/Rx structure pointers*/ + void **rx_queues; + void **tx_queues; + + /* MDIO/PHY related settings */ + unsigned int phy_started; + void *phy_data; + struct axgbe_phy phy; + int mdio_mmd; + unsigned long link_check; + volatile int mdio_completion; + + unsigned int kr_redrv; + + /* Auto-negotiation atate machine support */ + unsigned int an_int; + unsigned int an_status; + enum axgbe_an an_result; + enum axgbe_an an_state; + enum axgbe_rx kr_state; + enum axgbe_rx kx_state; + unsigned int an_supported; + unsigned int parallel_detect; + unsigned int fec_ability; + unsigned long an_start; + enum axgbe_an_mode an_mode; + + /* I2C support */ + struct axgbe_i2c i2c; + volatile int i2c_complete; + + /* CRC stripping by H/w for Rx packet*/ + int crc_strip_enable; + /* csum enable to hardware */ + uint32_t rx_csum_enable; +}; + +void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if); +void axgbe_init_function_ptrs_phy(struct axgbe_phy_if *phy_if); +void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if); +void axgbe_init_function_ptrs_i2c(struct axgbe_i2c_if *i2c_if); + +#endif /* RTE_ETH_AXGBE_H_ */ diff --git a/drivers/net/axgbe/axgbe_i2c.c b/drivers/net/axgbe/axgbe_i2c.c new file mode 100644 index 00000000..204ec367 --- /dev/null +++ b/drivers/net/axgbe/axgbe_i2c.c @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_ethdev.h" +#include "axgbe_common.h" + +#define AXGBE_ABORT_COUNT 500 +#define AXGBE_DISABLE_COUNT 1000 + +#define AXGBE_STD_SPEED 1 + +#define AXGBE_INTR_RX_FULL BIT(IC_RAW_INTR_STAT_RX_FULL_INDEX) +#define AXGBE_INTR_TX_EMPTY BIT(IC_RAW_INTR_STAT_TX_EMPTY_INDEX) +#define AXGBE_INTR_TX_ABRT BIT(IC_RAW_INTR_STAT_TX_ABRT_INDEX) +#define AXGBE_INTR_STOP_DET BIT(IC_RAW_INTR_STAT_STOP_DET_INDEX) +#define AXGBE_DEFAULT_INT_MASK (AXGBE_INTR_RX_FULL | \ + AXGBE_INTR_TX_EMPTY | \ + AXGBE_INTR_TX_ABRT | \ + AXGBE_INTR_STOP_DET) + +#define AXGBE_I2C_READ BIT(8) +#define AXGBE_I2C_STOP BIT(9) + +static int axgbe_i2c_abort(struct axgbe_port *pdata) +{ + unsigned int wait = AXGBE_ABORT_COUNT; + + /* Must be enabled to recognize the abort request */ + XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, 1); + + /* Issue the abort */ + XI2C_IOWRITE_BITS(pdata, IC_ENABLE, ABORT, 1); + + while (wait--) { + if (!XI2C_IOREAD_BITS(pdata, IC_ENABLE, ABORT)) + return 0; + rte_delay_us(500); + } + + return -EBUSY; +} + +static int axgbe_i2c_set_enable(struct axgbe_port *pdata, bool enable) +{ + unsigned int wait = AXGBE_DISABLE_COUNT; + unsigned int mode = enable ? 1 : 0; + + while (wait--) { + XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, mode); + if (XI2C_IOREAD_BITS(pdata, IC_ENABLE_STATUS, EN) == mode) + return 0; + + rte_delay_us(100); + } + + return -EBUSY; +} + +static int axgbe_i2c_disable(struct axgbe_port *pdata) +{ + unsigned int ret; + + ret = axgbe_i2c_set_enable(pdata, false); + if (ret) { + /* Disable failed, try an abort */ + ret = axgbe_i2c_abort(pdata); + if (ret) + return ret; + + /* Abort succeeded, try to disable again */ + ret = axgbe_i2c_set_enable(pdata, false); + } + + return ret; +} + +static int axgbe_i2c_enable(struct axgbe_port *pdata) +{ + return axgbe_i2c_set_enable(pdata, true); +} + +static void axgbe_i2c_clear_all_interrupts(struct axgbe_port *pdata) +{ + XI2C_IOREAD(pdata, IC_CLR_INTR); +} + +static void axgbe_i2c_disable_interrupts(struct axgbe_port *pdata) +{ + XI2C_IOWRITE(pdata, IC_INTR_MASK, 0); +} + +static void axgbe_i2c_enable_interrupts(struct axgbe_port *pdata) +{ + XI2C_IOWRITE(pdata, IC_INTR_MASK, AXGBE_DEFAULT_INT_MASK); +} + +static void axgbe_i2c_write(struct axgbe_port *pdata) +{ + struct axgbe_i2c_op_state *state = &pdata->i2c.op_state; + unsigned int tx_slots; + unsigned int cmd; + + /* Configured to never receive Rx overflows, so fill up Tx fifo */ + tx_slots = pdata->i2c.tx_fifo_size - XI2C_IOREAD(pdata, IC_TXFLR); + while (tx_slots && state->tx_len) { + if (state->op->cmd == AXGBE_I2C_CMD_READ) + cmd = AXGBE_I2C_READ; + else + cmd = *state->tx_buf++; + + if (state->tx_len == 1) + XI2C_SET_BITS(cmd, IC_DATA_CMD, STOP, 1); + + XI2C_IOWRITE(pdata, IC_DATA_CMD, cmd); + + tx_slots--; + state->tx_len--; + } + + /* No more Tx operations, so ignore TX_EMPTY and return */ + if (!state->tx_len) + XI2C_IOWRITE_BITS(pdata, IC_INTR_MASK, TX_EMPTY, 0); +} + +static void axgbe_i2c_read(struct axgbe_port *pdata) +{ + struct axgbe_i2c_op_state *state = &pdata->i2c.op_state; + unsigned int rx_slots; + + /* Anything to be read? */ + if (state->op->cmd != AXGBE_I2C_CMD_READ) + return; + + rx_slots = XI2C_IOREAD(pdata, IC_RXFLR); + while (rx_slots && state->rx_len) { + *state->rx_buf++ = XI2C_IOREAD(pdata, IC_DATA_CMD); + state->rx_len--; + rx_slots--; + } +} + +static void axgbe_i2c_clear_isr_interrupts(struct axgbe_port *pdata, + unsigned int isr) +{ + struct axgbe_i2c_op_state *state = &pdata->i2c.op_state; + + if (isr & AXGBE_INTR_TX_ABRT) { + state->tx_abort_source = XI2C_IOREAD(pdata, IC_TX_ABRT_SOURCE); + XI2C_IOREAD(pdata, IC_CLR_TX_ABRT); + } + + if (isr & AXGBE_INTR_STOP_DET) + XI2C_IOREAD(pdata, IC_CLR_STOP_DET); +} + +static int axgbe_i2c_isr(struct axgbe_port *pdata) +{ + struct axgbe_i2c_op_state *state = &pdata->i2c.op_state; + unsigned int isr; + + isr = XI2C_IOREAD(pdata, IC_RAW_INTR_STAT); + + axgbe_i2c_clear_isr_interrupts(pdata, isr); + + if (isr & AXGBE_INTR_TX_ABRT) { + axgbe_i2c_disable_interrupts(pdata); + + state->ret = -EIO; + goto out; + } + + /* Check for data in the Rx fifo */ + axgbe_i2c_read(pdata); + + /* Fill up the Tx fifo next */ + axgbe_i2c_write(pdata); + +out: + /* Complete on an error or STOP condition */ + if (state->ret || XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET)) + return 1; + + return 0; +} + +static void axgbe_i2c_set_mode(struct axgbe_port *pdata) +{ + unsigned int reg; + + reg = XI2C_IOREAD(pdata, IC_CON); + XI2C_SET_BITS(reg, IC_CON, MASTER_MODE, 1); + XI2C_SET_BITS(reg, IC_CON, SLAVE_DISABLE, 1); + XI2C_SET_BITS(reg, IC_CON, RESTART_EN, 1); + XI2C_SET_BITS(reg, IC_CON, SPEED, AXGBE_STD_SPEED); + XI2C_SET_BITS(reg, IC_CON, RX_FIFO_FULL_HOLD, 1); + XI2C_IOWRITE(pdata, IC_CON, reg); +} + +static void axgbe_i2c_get_features(struct axgbe_port *pdata) +{ + struct axgbe_i2c *i2c = &pdata->i2c; + unsigned int reg; + + reg = XI2C_IOREAD(pdata, IC_COMP_PARAM_1); + i2c->max_speed_mode = XI2C_GET_BITS(reg, IC_COMP_PARAM_1, + MAX_SPEED_MODE); + i2c->rx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1, + RX_BUFFER_DEPTH); + i2c->tx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1, + TX_BUFFER_DEPTH); +} + +static void axgbe_i2c_set_target(struct axgbe_port *pdata, unsigned int addr) +{ + XI2C_IOWRITE(pdata, IC_TAR, addr); +} + +static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op) +{ + struct axgbe_i2c_op_state *state = &pdata->i2c.op_state; + int ret; + uint64_t timeout; + + pthread_mutex_lock(&pdata->i2c_mutex); + ret = axgbe_i2c_disable(pdata); + if (ret) { + PMD_DRV_LOG(ERR, "failed to disable i2c master\n"); + return ret; + } + + axgbe_i2c_set_target(pdata, op->target); + + memset(state, 0, sizeof(*state)); + state->op = op; + state->tx_len = op->len; + state->tx_buf = (unsigned char *)op->buf; + state->rx_len = op->len; + state->rx_buf = (unsigned char *)op->buf; + + axgbe_i2c_clear_all_interrupts(pdata); + ret = axgbe_i2c_enable(pdata); + if (ret) { + PMD_DRV_LOG(ERR, "failed to enable i2c master\n"); + return ret; + } + + /* Enabling the interrupts will cause the TX FIFO empty interrupt to + * fire and begin to process the command via the ISR. + */ + axgbe_i2c_enable_interrupts(pdata); + timeout = rte_get_timer_cycles() + rte_get_timer_hz(); + + while (time_before(rte_get_timer_cycles(), timeout)) { + rte_delay_us(100); + if (XI2C_IOREAD(pdata, IC_RAW_INTR_STAT)) { + if (axgbe_i2c_isr(pdata)) + goto success; + } + } + + PMD_DRV_LOG(ERR, "i2c operation timed out\n"); + axgbe_i2c_disable_interrupts(pdata); + axgbe_i2c_disable(pdata); + ret = -ETIMEDOUT; + goto unlock; + +success: + ret = state->ret; + if (ret) { + if (state->tx_abort_source & IC_TX_ABRT_7B_ADDR_NOACK) + ret = -ENOTCONN; + else if (state->tx_abort_source & IC_TX_ABRT_ARB_LOST) + ret = -EAGAIN; + } + +unlock: + pthread_mutex_unlock(&pdata->i2c_mutex); + return ret; +} + +static void axgbe_i2c_stop(struct axgbe_port *pdata) +{ + if (!pdata->i2c.started) + return; + + pdata->i2c.started = 0; + axgbe_i2c_disable_interrupts(pdata); + axgbe_i2c_disable(pdata); + axgbe_i2c_clear_all_interrupts(pdata); +} + +static int axgbe_i2c_start(struct axgbe_port *pdata) +{ + if (pdata->i2c.started) + return 0; + + pdata->i2c.started = 1; + + return 0; +} + +static int axgbe_i2c_init(struct axgbe_port *pdata) +{ + int ret; + + axgbe_i2c_disable_interrupts(pdata); + + ret = axgbe_i2c_disable(pdata); + if (ret) { + PMD_DRV_LOG(ERR, "failed to disable i2c master\n"); + return ret; + } + + axgbe_i2c_get_features(pdata); + + axgbe_i2c_set_mode(pdata); + + axgbe_i2c_clear_all_interrupts(pdata); + + return 0; +} + +void axgbe_init_function_ptrs_i2c(struct axgbe_i2c_if *i2c_if) +{ + i2c_if->i2c_init = axgbe_i2c_init; + i2c_if->i2c_start = axgbe_i2c_start; + i2c_if->i2c_stop = axgbe_i2c_stop; + i2c_if->i2c_xfer = axgbe_i2c_xfer; +} diff --git a/drivers/net/axgbe/axgbe_logs.h b/drivers/net/axgbe/axgbe_logs.h new file mode 100644 index 00000000..d1487017 --- /dev/null +++ b/drivers/net/axgbe/axgbe_logs.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#ifndef _AXGBE_LOGS_H_ +#define _AXGBE_LOGS_H_ + +#include + +extern int axgbe_logtype_init; +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, axgbe_logtype_init, "%s(): " fmt "\n", \ + __func__, ##args) + +#ifdef RTE_LIBRTE_AXGBE_PMD_DEBUG +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") +#else +#define PMD_INIT_FUNC_TRACE() do { } while (0) +#endif + +extern int axgbe_logtype_driver; +#define PMD_DRV_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, axgbe_logtype_driver, "%s(): " fmt, \ + __func__, ## args) + +#endif /* _AXGBE_LOGS_H_ */ diff --git a/drivers/net/axgbe/axgbe_mdio.c b/drivers/net/axgbe/axgbe_mdio.c new file mode 100644 index 00000000..2721e5cc --- /dev/null +++ b/drivers/net/axgbe/axgbe_mdio.c @@ -0,0 +1,1066 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_ethdev.h" +#include "axgbe_common.h" +#include "axgbe_phy.h" + +static void axgbe_an37_clear_interrupts(struct axgbe_port *pdata) +{ + int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT); + reg &= ~AXGBE_AN_CL37_INT_MASK; + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg); +} + +static void axgbe_an37_disable_interrupts(struct axgbe_port *pdata) +{ + int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL); + reg &= ~AXGBE_AN_CL37_INT_MASK; + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg); + + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL); + reg &= ~AXGBE_PCS_CL37_BP; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg); +} + +static void axgbe_an73_clear_interrupts(struct axgbe_port *pdata) +{ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); +} + +static void axgbe_an73_disable_interrupts(struct axgbe_port *pdata) +{ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); +} + +static void axgbe_an73_enable_interrupts(struct axgbe_port *pdata) +{ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, + AXGBE_AN_CL73_INT_MASK); +} + +static void axgbe_an_enable_interrupts(struct axgbe_port *pdata) +{ + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + axgbe_an73_enable_interrupts(pdata); + break; + case AXGBE_AN_MODE_CL37: + case AXGBE_AN_MODE_CL37_SGMII: + PMD_DRV_LOG(ERR, "Unsupported AN_MOD_37\n"); + break; + default: + break; + } +} + +static void axgbe_an_clear_interrupts_all(struct axgbe_port *pdata) +{ + axgbe_an73_clear_interrupts(pdata); + axgbe_an37_clear_interrupts(pdata); +} + +static void axgbe_an73_enable_kr_training(struct axgbe_port *pdata) +{ + unsigned int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); + + reg |= AXGBE_KR_TRAINING_ENABLE; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); +} + +static void axgbe_an73_disable_kr_training(struct axgbe_port *pdata) +{ + unsigned int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); + + reg &= ~AXGBE_KR_TRAINING_ENABLE; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); +} + +static void axgbe_kr_mode(struct axgbe_port *pdata) +{ + /* Enable KR training */ + axgbe_an73_enable_kr_training(pdata); + + /* Set MAC to 10G speed */ + pdata->hw_if.set_speed(pdata, SPEED_10000); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KR); +} + +static void axgbe_kx_2500_mode(struct axgbe_port *pdata) +{ + /* Disable KR training */ + axgbe_an73_disable_kr_training(pdata); + + /* Set MAC to 2.5G speed */ + pdata->hw_if.set_speed(pdata, SPEED_2500); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KX_2500); +} + +static void axgbe_kx_1000_mode(struct axgbe_port *pdata) +{ + /* Disable KR training */ + axgbe_an73_disable_kr_training(pdata); + + /* Set MAC to 1G speed */ + pdata->hw_if.set_speed(pdata, SPEED_1000); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KX_1000); +} + +static void axgbe_sfi_mode(struct axgbe_port *pdata) +{ + /* If a KR re-driver is present, change to KR mode instead */ + if (pdata->kr_redrv) + return axgbe_kr_mode(pdata); + + /* Disable KR training */ + axgbe_an73_disable_kr_training(pdata); + + /* Set MAC to 10G speed */ + pdata->hw_if.set_speed(pdata, SPEED_10000); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SFI); +} + +static void axgbe_x_mode(struct axgbe_port *pdata) +{ + /* Disable KR training */ + axgbe_an73_disable_kr_training(pdata); + + /* Set MAC to 1G speed */ + pdata->hw_if.set_speed(pdata, SPEED_1000); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_X); +} + +static void axgbe_sgmii_1000_mode(struct axgbe_port *pdata) +{ + /* Disable KR training */ + axgbe_an73_disable_kr_training(pdata); + + /* Set MAC to 1G speed */ + pdata->hw_if.set_speed(pdata, SPEED_1000); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SGMII_1000); +} + +static void axgbe_sgmii_100_mode(struct axgbe_port *pdata) +{ + /* Disable KR training */ + axgbe_an73_disable_kr_training(pdata); + + /* Set MAC to 1G speed */ + pdata->hw_if.set_speed(pdata, SPEED_1000); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SGMII_100); +} + +static enum axgbe_mode axgbe_cur_mode(struct axgbe_port *pdata) +{ + return pdata->phy_if.phy_impl.cur_mode(pdata); +} + +static bool axgbe_in_kr_mode(struct axgbe_port *pdata) +{ + return axgbe_cur_mode(pdata) == AXGBE_MODE_KR; +} + +static void axgbe_change_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + switch (mode) { + case AXGBE_MODE_KX_1000: + axgbe_kx_1000_mode(pdata); + break; + case AXGBE_MODE_KX_2500: + axgbe_kx_2500_mode(pdata); + break; + case AXGBE_MODE_KR: + axgbe_kr_mode(pdata); + break; + case AXGBE_MODE_SGMII_100: + axgbe_sgmii_100_mode(pdata); + break; + case AXGBE_MODE_SGMII_1000: + axgbe_sgmii_1000_mode(pdata); + break; + case AXGBE_MODE_X: + axgbe_x_mode(pdata); + break; + case AXGBE_MODE_SFI: + axgbe_sfi_mode(pdata); + break; + case AXGBE_MODE_UNKNOWN: + break; + default: + PMD_DRV_LOG(ERR, "invalid operation mode requested (%u)\n", mode); + } +} + +static void axgbe_switch_mode(struct axgbe_port *pdata) +{ + axgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata)); +} + +static void axgbe_set_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + if (mode == axgbe_cur_mode(pdata)) + return; + + axgbe_change_mode(pdata, mode); +} + +static bool axgbe_use_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + return pdata->phy_if.phy_impl.use_mode(pdata, mode); +} + +static void axgbe_an37_set(struct axgbe_port *pdata, bool enable, + bool restart) +{ + unsigned int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_CTRL1); + reg &= ~MDIO_VEND2_CTRL1_AN_ENABLE; + + if (enable) + reg |= MDIO_VEND2_CTRL1_AN_ENABLE; + + if (restart) + reg |= MDIO_VEND2_CTRL1_AN_RESTART; + + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg); +} + +static void axgbe_an37_disable(struct axgbe_port *pdata) +{ + axgbe_an37_set(pdata, false, false); + axgbe_an37_disable_interrupts(pdata); +} + +static void axgbe_an73_set(struct axgbe_port *pdata, bool enable, + bool restart) +{ + unsigned int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1); + reg &= ~MDIO_AN_CTRL1_ENABLE; + + if (enable) + reg |= MDIO_AN_CTRL1_ENABLE; + + if (restart) + reg |= MDIO_AN_CTRL1_RESTART; + + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg); +} + +static void axgbe_an73_restart(struct axgbe_port *pdata) +{ + axgbe_an73_enable_interrupts(pdata); + axgbe_an73_set(pdata, true, true); +} + +static void axgbe_an73_disable(struct axgbe_port *pdata) +{ + axgbe_an73_set(pdata, false, false); + axgbe_an73_disable_interrupts(pdata); + pdata->an_start = 0; +} + +static void axgbe_an_restart(struct axgbe_port *pdata) +{ + if (pdata->phy_if.phy_impl.an_pre) + pdata->phy_if.phy_impl.an_pre(pdata); + + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + axgbe_an73_restart(pdata); + break; + case AXGBE_AN_MODE_CL37: + case AXGBE_AN_MODE_CL37_SGMII: + PMD_DRV_LOG(ERR, "Unsupported AN_MODE_CL37\n"); + break; + default: + break; + } +} + +static void axgbe_an_disable(struct axgbe_port *pdata) +{ + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + axgbe_an73_disable(pdata); + break; + case AXGBE_AN_MODE_CL37: + case AXGBE_AN_MODE_CL37_SGMII: + PMD_DRV_LOG(ERR, "Unsupported AN_MODE_CL37\n"); + break; + default: + break; + } +} + +static void axgbe_an_disable_all(struct axgbe_port *pdata) +{ + axgbe_an73_disable(pdata); + axgbe_an37_disable(pdata); +} + +static enum axgbe_an axgbe_an73_tx_training(struct axgbe_port *pdata, + enum axgbe_rx *state) +{ + unsigned int ad_reg, lp_reg, reg; + + *state = AXGBE_RX_COMPLETE; + + /* If we're not in KR mode then we're done */ + if (!axgbe_in_kr_mode(pdata)) + return AXGBE_AN_PAGE_RECEIVED; + + /* Enable/Disable FEC */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); + + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL); + reg &= ~(MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE); + if ((ad_reg & 0xc000) && (lp_reg & 0xc000)) + reg |= pdata->fec_ability; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg); + + /* Start KR training */ + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); + if (reg & AXGBE_KR_TRAINING_ENABLE) { + if (pdata->phy_if.phy_impl.kr_training_pre) + pdata->phy_if.phy_impl.kr_training_pre(pdata); + + reg |= AXGBE_KR_TRAINING_START; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, + reg); + + if (pdata->phy_if.phy_impl.kr_training_post) + pdata->phy_if.phy_impl.kr_training_post(pdata); + } + + return AXGBE_AN_PAGE_RECEIVED; +} + +static enum axgbe_an axgbe_an73_tx_xnp(struct axgbe_port *pdata, + enum axgbe_rx *state) +{ + u16 msg; + + *state = AXGBE_RX_XNP; + + msg = AXGBE_XNP_MCF_NULL_MESSAGE; + msg |= AXGBE_XNP_MP_FORMATTED; + + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0); + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0); + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP, msg); + + return AXGBE_AN_PAGE_RECEIVED; +} + +static enum axgbe_an axgbe_an73_rx_bpa(struct axgbe_port *pdata, + enum axgbe_rx *state) +{ + unsigned int link_support; + unsigned int reg, ad_reg, lp_reg; + + /* Read Base Ability register 2 first */ + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); + + /* Check for a supported mode, otherwise restart in a different one */ + link_support = axgbe_in_kr_mode(pdata) ? 0x80 : 0x20; + if (!(reg & link_support)) + return AXGBE_AN_INCOMPAT_LINK; + + /* Check Extended Next Page support */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); + + return ((ad_reg & AXGBE_XNP_NP_EXCHANGE) || + (lp_reg & AXGBE_XNP_NP_EXCHANGE)) + ? axgbe_an73_tx_xnp(pdata, state) + : axgbe_an73_tx_training(pdata, state); +} + +static enum axgbe_an axgbe_an73_rx_xnp(struct axgbe_port *pdata, + enum axgbe_rx *state) +{ + unsigned int ad_reg, lp_reg; + + /* Check Extended Next Page support */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_XNP); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPX); + + return ((ad_reg & AXGBE_XNP_NP_EXCHANGE) || + (lp_reg & AXGBE_XNP_NP_EXCHANGE)) + ? axgbe_an73_tx_xnp(pdata, state) + : axgbe_an73_tx_training(pdata, state); +} + +static enum axgbe_an axgbe_an73_page_received(struct axgbe_port *pdata) +{ + enum axgbe_rx *state; + unsigned long an_timeout; + enum axgbe_an ret; + unsigned long ticks; + + if (!pdata->an_start) { + pdata->an_start = rte_get_timer_cycles(); + } else { + an_timeout = pdata->an_start + + msecs_to_timer_cycles(AXGBE_AN_MS_TIMEOUT); + ticks = rte_get_timer_cycles(); + if (time_after(ticks, an_timeout)) { + /* Auto-negotiation timed out, reset state */ + pdata->kr_state = AXGBE_RX_BPA; + pdata->kx_state = AXGBE_RX_BPA; + + pdata->an_start = rte_get_timer_cycles(); + } + } + + state = axgbe_in_kr_mode(pdata) ? &pdata->kr_state + : &pdata->kx_state; + + switch (*state) { + case AXGBE_RX_BPA: + ret = axgbe_an73_rx_bpa(pdata, state); + break; + case AXGBE_RX_XNP: + ret = axgbe_an73_rx_xnp(pdata, state); + break; + default: + ret = AXGBE_AN_ERROR; + } + + return ret; +} + +static enum axgbe_an axgbe_an73_incompat_link(struct axgbe_port *pdata) +{ + /* Be sure we aren't looping trying to negotiate */ + if (axgbe_in_kr_mode(pdata)) { + pdata->kr_state = AXGBE_RX_ERROR; + + if (!(pdata->phy.advertising & ADVERTISED_1000baseKX_Full) && + !(pdata->phy.advertising & ADVERTISED_2500baseX_Full)) + return AXGBE_AN_NO_LINK; + + if (pdata->kx_state != AXGBE_RX_BPA) + return AXGBE_AN_NO_LINK; + } else { + pdata->kx_state = AXGBE_RX_ERROR; + + if (!(pdata->phy.advertising & ADVERTISED_10000baseKR_Full)) + return AXGBE_AN_NO_LINK; + + if (pdata->kr_state != AXGBE_RX_BPA) + return AXGBE_AN_NO_LINK; + } + + axgbe_an_disable(pdata); + axgbe_switch_mode(pdata); + axgbe_an_restart(pdata); + + return AXGBE_AN_INCOMPAT_LINK; +} + +static void axgbe_an73_state_machine(struct axgbe_port *pdata) +{ + enum axgbe_an cur_state = pdata->an_state; + + if (!pdata->an_int) + return; + +next_int: + if (pdata->an_int & AXGBE_AN_CL73_PG_RCV) { + pdata->an_state = AXGBE_AN_PAGE_RECEIVED; + pdata->an_int &= ~AXGBE_AN_CL73_PG_RCV; + } else if (pdata->an_int & AXGBE_AN_CL73_INC_LINK) { + pdata->an_state = AXGBE_AN_INCOMPAT_LINK; + pdata->an_int &= ~AXGBE_AN_CL73_INC_LINK; + } else if (pdata->an_int & AXGBE_AN_CL73_INT_CMPLT) { + pdata->an_state = AXGBE_AN_COMPLETE; + pdata->an_int &= ~AXGBE_AN_CL73_INT_CMPLT; + } else { + pdata->an_state = AXGBE_AN_ERROR; + } + +again: + cur_state = pdata->an_state; + + switch (pdata->an_state) { + case AXGBE_AN_READY: + pdata->an_supported = 0; + break; + case AXGBE_AN_PAGE_RECEIVED: + pdata->an_state = axgbe_an73_page_received(pdata); + pdata->an_supported++; + break; + case AXGBE_AN_INCOMPAT_LINK: + pdata->an_supported = 0; + pdata->parallel_detect = 0; + pdata->an_state = axgbe_an73_incompat_link(pdata); + break; + case AXGBE_AN_COMPLETE: + pdata->parallel_detect = pdata->an_supported ? 0 : 1; + break; + case AXGBE_AN_NO_LINK: + break; + default: + pdata->an_state = AXGBE_AN_ERROR; + } + + if (pdata->an_state == AXGBE_AN_NO_LINK) { + pdata->an_int = 0; + axgbe_an73_clear_interrupts(pdata); + pdata->eth_dev->data->dev_link.link_status = + ETH_LINK_DOWN; + } else if (pdata->an_state == AXGBE_AN_ERROR) { + PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n", + cur_state); + pdata->an_int = 0; + axgbe_an73_clear_interrupts(pdata); + } + + if (pdata->an_state >= AXGBE_AN_COMPLETE) { + pdata->an_result = pdata->an_state; + pdata->an_state = AXGBE_AN_READY; + pdata->kr_state = AXGBE_RX_BPA; + pdata->kx_state = AXGBE_RX_BPA; + pdata->an_start = 0; + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + } + + if (cur_state != pdata->an_state) + goto again; + + if (pdata->an_int) + goto next_int; + + axgbe_an73_enable_interrupts(pdata); +} + +static void axgbe_an73_isr(struct axgbe_port *pdata) +{ + /* Disable AN interrupts */ + axgbe_an73_disable_interrupts(pdata); + + /* Save the interrupt(s) that fired */ + pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT); + + if (pdata->an_int) { + /* Clear the interrupt(s) that fired and process them */ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int); + pthread_mutex_lock(&pdata->an_mutex); + axgbe_an73_state_machine(pdata); + pthread_mutex_unlock(&pdata->an_mutex); + } else { + /* Enable AN interrupts */ + axgbe_an73_enable_interrupts(pdata); + } +} + +static void axgbe_an_isr(struct axgbe_port *pdata) +{ + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + axgbe_an73_isr(pdata); + break; + case AXGBE_AN_MODE_CL37: + case AXGBE_AN_MODE_CL37_SGMII: + PMD_DRV_LOG(ERR, "AN_MODE_37 not supported\n"); + break; + default: + break; + } +} + +static void axgbe_an_combined_isr(struct axgbe_port *pdata) +{ + axgbe_an_isr(pdata); +} + +static void axgbe_an73_init(struct axgbe_port *pdata) +{ + unsigned int advertising, reg; + + advertising = pdata->phy_if.phy_impl.an_advertising(pdata); + + /* Set up Advertisement register 3 first */ + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); + if (advertising & ADVERTISED_10000baseR_FEC) + reg |= 0xc000; + else + reg &= ~0xc000; + + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg); + + /* Set up Advertisement register 2 next */ + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); + if (advertising & ADVERTISED_10000baseKR_Full) + reg |= 0x80; + else + reg &= ~0x80; + + if ((advertising & ADVERTISED_1000baseKX_Full) || + (advertising & ADVERTISED_2500baseX_Full)) + reg |= 0x20; + else + reg &= ~0x20; + + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, reg); + + /* Set up Advertisement register 1 last */ + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); + if (advertising & ADVERTISED_Pause) + reg |= 0x400; + else + reg &= ~0x400; + + if (advertising & ADVERTISED_Asym_Pause) + reg |= 0x800; + else + reg &= ~0x800; + + /* We don't intend to perform XNP */ + reg &= ~AXGBE_XNP_NP_EXCHANGE; + + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg); +} + +static void axgbe_an_init(struct axgbe_port *pdata) +{ + /* Set up advertisement registers based on current settings */ + pdata->an_mode = pdata->phy_if.phy_impl.an_mode(pdata); + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + axgbe_an73_init(pdata); + break; + case AXGBE_AN_MODE_CL37: + case AXGBE_AN_MODE_CL37_SGMII: + PMD_DRV_LOG(ERR, "Unsupported AN_CL37\n"); + break; + default: + break; + } +} + +static void axgbe_phy_adjust_link(struct axgbe_port *pdata) +{ + if (pdata->phy.link) { + /* Flow control support */ + pdata->pause_autoneg = pdata->phy.pause_autoneg; + + if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) { + pdata->hw_if.config_tx_flow_control(pdata); + pdata->tx_pause = pdata->phy.tx_pause; + } + + if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) { + pdata->hw_if.config_rx_flow_control(pdata); + pdata->rx_pause = pdata->phy.rx_pause; + } + + /* Speed support */ + if (pdata->phy_speed != pdata->phy.speed) + pdata->phy_speed = pdata->phy.speed; + if (pdata->phy_link != pdata->phy.link) + pdata->phy_link = pdata->phy.link; + } else if (pdata->phy_link) { + pdata->phy_link = 0; + pdata->phy_speed = SPEED_UNKNOWN; + } +} + +static int axgbe_phy_config_fixed(struct axgbe_port *pdata) +{ + enum axgbe_mode mode; + + /* Disable auto-negotiation */ + axgbe_an_disable(pdata); + + /* Set specified mode for specified speed */ + mode = pdata->phy_if.phy_impl.get_mode(pdata, pdata->phy.speed); + switch (mode) { + case AXGBE_MODE_KX_1000: + case AXGBE_MODE_KX_2500: + case AXGBE_MODE_KR: + case AXGBE_MODE_SGMII_100: + case AXGBE_MODE_SGMII_1000: + case AXGBE_MODE_X: + case AXGBE_MODE_SFI: + break; + case AXGBE_MODE_UNKNOWN: + default: + return -EINVAL; + } + + /* Validate duplex mode */ + if (pdata->phy.duplex != DUPLEX_FULL) + return -EINVAL; + + axgbe_set_mode(pdata, mode); + + return 0; +} + +static int __axgbe_phy_config_aneg(struct axgbe_port *pdata) +{ + int ret; + + axgbe_set_bit(AXGBE_LINK_INIT, &pdata->dev_state); + pdata->link_check = rte_get_timer_cycles(); + + ret = pdata->phy_if.phy_impl.an_config(pdata); + if (ret) + return ret; + + if (pdata->phy.autoneg != AUTONEG_ENABLE) { + ret = axgbe_phy_config_fixed(pdata); + if (ret || !pdata->kr_redrv) + return ret; + } + + /* Disable auto-negotiation interrupt */ + rte_intr_disable(&pdata->pci_dev->intr_handle); + + /* Start auto-negotiation in a supported mode */ + if (axgbe_use_mode(pdata, AXGBE_MODE_KR)) { + axgbe_set_mode(pdata, AXGBE_MODE_KR); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_2500)) { + axgbe_set_mode(pdata, AXGBE_MODE_KX_2500); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_1000)) { + axgbe_set_mode(pdata, AXGBE_MODE_KX_1000); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_SFI)) { + axgbe_set_mode(pdata, AXGBE_MODE_SFI); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_X)) { + axgbe_set_mode(pdata, AXGBE_MODE_X); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_1000)) { + axgbe_set_mode(pdata, AXGBE_MODE_SGMII_1000); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_100)) { + axgbe_set_mode(pdata, AXGBE_MODE_SGMII_100); + } else { + rte_intr_enable(&pdata->pci_dev->intr_handle); + return -EINVAL; + } + + /* Disable and stop any in progress auto-negotiation */ + axgbe_an_disable_all(pdata); + + /* Clear any auto-negotitation interrupts */ + axgbe_an_clear_interrupts_all(pdata); + + pdata->an_result = AXGBE_AN_READY; + pdata->an_state = AXGBE_AN_READY; + pdata->kr_state = AXGBE_RX_BPA; + pdata->kx_state = AXGBE_RX_BPA; + + /* Re-enable auto-negotiation interrupt */ + rte_intr_enable(&pdata->pci_dev->intr_handle); + + axgbe_an_init(pdata); + axgbe_an_restart(pdata); + + return 0; +} + +static int axgbe_phy_config_aneg(struct axgbe_port *pdata) +{ + int ret; + + pthread_mutex_lock(&pdata->an_mutex); + + ret = __axgbe_phy_config_aneg(pdata); + if (ret) + axgbe_set_bit(AXGBE_LINK_ERR, &pdata->dev_state); + else + axgbe_clear_bit(AXGBE_LINK_ERR, &pdata->dev_state); + + pthread_mutex_unlock(&pdata->an_mutex); + + return ret; +} + +static bool axgbe_phy_aneg_done(struct axgbe_port *pdata) +{ + return pdata->an_result == AXGBE_AN_COMPLETE; +} + +static void axgbe_check_link_timeout(struct axgbe_port *pdata) +{ + unsigned long link_timeout; + unsigned long ticks; + + link_timeout = pdata->link_check + (AXGBE_LINK_TIMEOUT * + 2 * rte_get_timer_hz()); + ticks = rte_get_timer_cycles(); + if (time_after(ticks, link_timeout)) + axgbe_phy_config_aneg(pdata); +} + +static enum axgbe_mode axgbe_phy_status_aneg(struct axgbe_port *pdata) +{ + return pdata->phy_if.phy_impl.an_outcome(pdata); +} + +static void axgbe_phy_status_result(struct axgbe_port *pdata) +{ + enum axgbe_mode mode; + + pdata->phy.lp_advertising = 0; + + if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect) + mode = axgbe_cur_mode(pdata); + else + mode = axgbe_phy_status_aneg(pdata); + + switch (mode) { + case AXGBE_MODE_SGMII_100: + pdata->phy.speed = SPEED_100; + break; + case AXGBE_MODE_X: + case AXGBE_MODE_KX_1000: + case AXGBE_MODE_SGMII_1000: + pdata->phy.speed = SPEED_1000; + break; + case AXGBE_MODE_KX_2500: + pdata->phy.speed = SPEED_2500; + break; + case AXGBE_MODE_KR: + case AXGBE_MODE_SFI: + pdata->phy.speed = SPEED_10000; + break; + case AXGBE_MODE_UNKNOWN: + default: + pdata->phy.speed = SPEED_UNKNOWN; + } + + pdata->phy.duplex = DUPLEX_FULL; + + axgbe_set_mode(pdata, mode); +} + +static void axgbe_phy_status(struct axgbe_port *pdata) +{ + unsigned int link_aneg; + int an_restart; + + if (axgbe_test_bit(AXGBE_LINK_ERR, &pdata->dev_state)) { + pdata->phy.link = 0; + goto adjust_link; + } + + link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE); + + pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata, + &an_restart); + if (an_restart) { + axgbe_phy_config_aneg(pdata); + return; + } + + if (pdata->phy.link) { + if (link_aneg && !axgbe_phy_aneg_done(pdata)) { + axgbe_check_link_timeout(pdata); + return; + } + axgbe_phy_status_result(pdata); + if (axgbe_test_bit(AXGBE_LINK_INIT, &pdata->dev_state)) + axgbe_clear_bit(AXGBE_LINK_INIT, &pdata->dev_state); + } else { + if (axgbe_test_bit(AXGBE_LINK_INIT, &pdata->dev_state)) { + axgbe_check_link_timeout(pdata); + + if (link_aneg) + return; + } + axgbe_phy_status_result(pdata); + } + +adjust_link: + axgbe_phy_adjust_link(pdata); +} + +static void axgbe_phy_stop(struct axgbe_port *pdata) +{ + if (!pdata->phy_started) + return; + /* Indicate the PHY is down */ + pdata->phy_started = 0; + /* Disable auto-negotiation */ + axgbe_an_disable_all(pdata); + pdata->phy_if.phy_impl.stop(pdata); + pdata->phy.link = 0; + axgbe_phy_adjust_link(pdata); +} + +static int axgbe_phy_start(struct axgbe_port *pdata) +{ + int ret; + + ret = pdata->phy_if.phy_impl.start(pdata); + if (ret) + return ret; + /* Set initial mode - call the mode setting routines + * directly to insure we are properly configured + */ + if (axgbe_use_mode(pdata, AXGBE_MODE_KR)) { + axgbe_kr_mode(pdata); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_2500)) { + axgbe_kx_2500_mode(pdata); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_1000)) { + axgbe_kx_1000_mode(pdata); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_SFI)) { + axgbe_sfi_mode(pdata); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_X)) { + axgbe_x_mode(pdata); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_1000)) { + axgbe_sgmii_1000_mode(pdata); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_100)) { + axgbe_sgmii_100_mode(pdata); + } else { + ret = -EINVAL; + goto err_stop; + } + /* Indicate the PHY is up and running */ + pdata->phy_started = 1; + axgbe_an_init(pdata); + axgbe_an_enable_interrupts(pdata); + return axgbe_phy_config_aneg(pdata); + +err_stop: + pdata->phy_if.phy_impl.stop(pdata); + + return ret; +} + +static int axgbe_phy_reset(struct axgbe_port *pdata) +{ + int ret; + + ret = pdata->phy_if.phy_impl.reset(pdata); + if (ret) + return ret; + + /* Disable auto-negotiation for now */ + axgbe_an_disable_all(pdata); + + /* Clear auto-negotiation interrupts */ + axgbe_an_clear_interrupts_all(pdata); + + return 0; +} + +static int axgbe_phy_best_advertised_speed(struct axgbe_port *pdata) +{ + if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) + return SPEED_10000; + else if (pdata->phy.advertising & ADVERTISED_10000baseT_Full) + return SPEED_10000; + else if (pdata->phy.advertising & ADVERTISED_2500baseX_Full) + return SPEED_2500; + else if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full) + return SPEED_1000; + else if (pdata->phy.advertising & ADVERTISED_1000baseT_Full) + return SPEED_1000; + else if (pdata->phy.advertising & ADVERTISED_100baseT_Full) + return SPEED_100; + + return SPEED_UNKNOWN; +} + +static int axgbe_phy_init(struct axgbe_port *pdata) +{ + int ret; + + pdata->mdio_mmd = MDIO_MMD_PCS; + + /* Check for FEC support */ + pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, + MDIO_PMA_10GBR_FECABLE); + pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE | + MDIO_PMA_10GBR_FECABLE_ERRABLE); + + /* Setup the phy (including supported features) */ + ret = pdata->phy_if.phy_impl.init(pdata); + if (ret) + return ret; + pdata->phy.advertising = pdata->phy.supported; + + pdata->phy.address = 0; + + if (pdata->phy.advertising & ADVERTISED_Autoneg) { + pdata->phy.autoneg = AUTONEG_ENABLE; + pdata->phy.speed = SPEED_UNKNOWN; + pdata->phy.duplex = DUPLEX_UNKNOWN; + } else { + pdata->phy.autoneg = AUTONEG_DISABLE; + pdata->phy.speed = axgbe_phy_best_advertised_speed(pdata); + pdata->phy.duplex = DUPLEX_FULL; + } + + pdata->phy.link = 0; + + pdata->phy.pause_autoneg = pdata->pause_autoneg; + pdata->phy.tx_pause = pdata->tx_pause; + pdata->phy.rx_pause = pdata->rx_pause; + + /* Fix up Flow Control advertising */ + pdata->phy.advertising &= ~ADVERTISED_Pause; + pdata->phy.advertising &= ~ADVERTISED_Asym_Pause; + + if (pdata->rx_pause) { + pdata->phy.advertising |= ADVERTISED_Pause; + pdata->phy.advertising |= ADVERTISED_Asym_Pause; + } + + if (pdata->tx_pause) + pdata->phy.advertising ^= ADVERTISED_Asym_Pause; + return 0; +} + +void axgbe_init_function_ptrs_phy(struct axgbe_phy_if *phy_if) +{ + phy_if->phy_init = axgbe_phy_init; + phy_if->phy_reset = axgbe_phy_reset; + phy_if->phy_start = axgbe_phy_start; + phy_if->phy_stop = axgbe_phy_stop; + phy_if->phy_status = axgbe_phy_status; + phy_if->phy_config_aneg = axgbe_phy_config_aneg; + phy_if->an_isr = axgbe_an_combined_isr; +} diff --git a/drivers/net/axgbe/axgbe_phy.h b/drivers/net/axgbe/axgbe_phy.h new file mode 100644 index 00000000..77ee20a3 --- /dev/null +++ b/drivers/net/axgbe/axgbe_phy.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#ifndef __AXGBE_PHY_H__ +#define __AXGBE_PHY_H__ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define SPEED_10000 10000 + + +/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit + * IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. + */ +#define MII_ADDR_C45 (1 << 30) + +/* Basic mode status register. */ +#define BMSR_LSTATUS 0x0004 /* Link status */ + +/* Status register 1. */ +#define MDIO_STAT1_LSTATUS BMSR_LSTATUS + +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +#define MII_MMD_CTRL 0x0d /* MMD Access Control Register */ +#define MII_MMD_DATA 0x0e /* MMD Access Data Register */ +#define MII_ESTATUS 0x0f /* Extended Status */ +#define MII_DCOUNTER 0x12 /* Disconnect counter */ +#define MII_FCSCOUNTER 0x13 /* False carrier counter */ +#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */ +#define MII_RERRCOUNTER 0x15 /* Receive error counter */ +#define MII_SREVISION 0x16 /* Silicon revision */ +#define MII_RESV1 0x17 /* Reserved... */ +#define MII_LBRERROR 0x18 /* Lpback, rx, bypass error */ +#define MII_PHYADDR 0x19 /* PHY address */ +#define MII_RESV2 0x1a /* Reserved... */ +#define MII_TPISTATUS 0x1b /* TPI status for 10mbps */ +#define MII_NCONFIG 0x1c /* Network interface config */ + +/* Basic mode control register. */ +#define BMCR_RESV 0x003f /* Unused... */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ +#define BMCR_CTST 0x0080 /* Collision test */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANRESTART 0x0200 /* Auto negotiation restart */ +#define BMCR_ISOLATE 0x0400 /* Isolate data paths from MII */ +#define BMCR_PDOWN 0x0800 /* Enable low power state */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +#define BMCR_SPEED100 0x2000 /* Select 100Mbps */ +#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */ +#define BMCR_RESET 0x8000 /* Reset to default state */ +#define BMCR_SPEED10 0x0000 /* Select 10Mbps */ + + +/* MDIO Manageable Devices (MMDs). */ +#define MDIO_MMD_PMAPMD 1 /* Physical Medium Attachment + * Physical Medium Dependent + */ +#define MDIO_MMD_WIS 2 /* WAN Interface Sublayer */ +#define MDIO_MMD_PCS 3 /* Physical Coding Sublayer */ +#define MDIO_MMD_PHYXS 4 /* PHY Extender Sublayer */ +#define MDIO_MMD_DTEXS 5 /* DTE Extender Sublayer */ +#define MDIO_MMD_TC 6 /* Transmission Convergence */ +#define MDIO_MMD_AN 7 /* Auto-Negotiation */ +#define MDIO_MMD_C22EXT 29 /* Clause 22 extension */ +#define MDIO_MMD_VEND1 30 /* Vendor specific 1 */ +#define MDIO_MMD_VEND2 31 /* Vendor specific 2 */ + +/* Generic MDIO registers. */ +#define MDIO_CTRL1 MII_BMCR +#define MDIO_STAT1 MII_BMSR +#define MDIO_DEVID1 MII_PHYSID1 +#define MDIO_DEVID2 MII_PHYSID2 +#define MDIO_SPEED 4 /* Speed ability */ +#define MDIO_DEVS1 5 /* Devices in package */ +#define MDIO_DEVS2 6 +#define MDIO_CTRL2 7 /* 10G control 2 */ +#define MDIO_STAT2 8 /* 10G status 2 */ +#define MDIO_PMA_TXDIS 9 /* 10G PMA/PMD transmit disable */ +#define MDIO_PMA_RXDET 10 /* 10G PMA/PMD receive signal detect */ +#define MDIO_PMA_EXTABLE 11 /* 10G PMA/PMD extended ability */ +#define MDIO_PKGID1 14 /* Package identifier */ +#define MDIO_PKGID2 15 +#define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */ +#define MDIO_AN_LPA 19 /* AN LP abilities (base page) */ +#define MDIO_PCS_EEE_ABLE 20 /* EEE Capability register */ +#define MDIO_PCS_EEE_WK_ERR 22 /* EEE wake error counter */ +#define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */ +#define MDIO_AN_EEE_ADV 60 /* EEE advertisement */ +#define MDIO_AN_EEE_LPABLE 61 /* EEE link partner ability */ + +/* Media-dependent registers. */ +#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */ +#define MDIO_PMA_10GBT_TXPWR 131 /* 10GBASE-T TX power control */ +#define MDIO_PMA_10GBT_SNR 133 /* 10GBASE-T SNR margin, lane A. + * Lanes B-D are numbered 134-136. + */ +#define MDIO_PMA_10GBR_FECABLE 170 /* 10GBASE-R FEC ability */ +#define MDIO_PCS_10GBX_STAT1 24 /* 10GBASE-X PCS status 1 */ +#define MDIO_PCS_10GBRT_STAT1 32 /* 10GBASE-R/-T PCS status 1 */ +#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */ +#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */ +#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */ + +/* Control register 1. */ +/* Enable extended speed selection */ +#define MDIO_CTRL1_SPEEDSELEXT (BMCR_SPEED1000 | BMCR_SPEED100) +/* All speed selection bits */ +#define MDIO_CTRL1_SPEEDSEL (MDIO_CTRL1_SPEEDSELEXT | 0x003c) +#define MDIO_CTRL1_FULLDPLX BMCR_FULLDPLX +#define MDIO_CTRL1_LPOWER BMCR_PDOWN +#define MDIO_CTRL1_RESET BMCR_RESET +#define MDIO_PMA_CTRL1_LOOPBACK 0x0001 +#define MDIO_PMA_CTRL1_SPEED1000 BMCR_SPEED1000 +#define MDIO_PMA_CTRL1_SPEED100 BMCR_SPEED100 +#define MDIO_PCS_CTRL1_LOOPBACK BMCR_LOOPBACK +#define MDIO_PHYXS_CTRL1_LOOPBACK BMCR_LOOPBACK +#define MDIO_AN_CTRL1_RESTART BMCR_ANRESTART +#define MDIO_AN_CTRL1_ENABLE BMCR_ANENABLE +#define MDIO_AN_CTRL1_XNP 0x2000 /* Enable extended next page */ +#define MDIO_PCS_CTRL1_CLKSTOP_EN 0x400 /* Stop the clock during LPI */ + + + + + +/* PMA 10GBASE-R FEC ability register. */ +#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 /* FEC ability */ +#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 /* FEC error indic. ability */ + + +/* Autoneg related */ +#define ADVERTISED_Autoneg (1 << 6) +#define SUPPORTED_Autoneg (1 << 6) +#define AUTONEG_DISABLE 0x00 +#define AUTONEG_ENABLE 0x01 + +#define ADVERTISED_Pause (1 << 13) +#define ADVERTISED_Asym_Pause (1 << 14) + +#define SUPPORTED_Pause (1 << 13) +#define SUPPORTED_Asym_Pause (1 << 14) + +#define SUPPORTED_Backplane (1 << 16) +#define SUPPORTED_TP (1 << 7) + +#define ADVERTISED_10000baseR_FEC (1 << 20) + +#define SUPPORTED_10000baseR_FEC (1 << 20) + +#define SUPPORTED_FIBRE (1 << 10) + +#define ADVERTISED_10000baseKR_Full (1 << 19) +#define ADVERTISED_10000baseT_Full (1 << 12) +#define ADVERTISED_2500baseX_Full (1 << 15) +#define ADVERTISED_1000baseKX_Full (1 << 17) +#define ADVERTISED_1000baseT_Full (1 << 5) +#define ADVERTISED_100baseT_Full (1 << 3) +#define ADVERTISED_TP (1 << 7) +#define ADVERTISED_FIBRE (1 << 10) +#define ADVERTISED_Backplane (1 << 16) + +#define SUPPORTED_1000baseKX_Full (1 << 17) +#define SUPPORTED_10000baseKR_Full (1 << 19) +#define SUPPORTED_2500baseX_Full (1 << 15) +#define SUPPORTED_100baseT_Full (1 << 2) +#define SUPPORTED_1000baseT_Full (1 << 5) +#define SUPPORTED_10000baseT_Full (1 << 12) +#define SUPPORTED_2500baseX_Full (1 << 15) + + +#define SPEED_UNKNOWN -1 + +/* Duplex, half or full. */ +#define DUPLEX_HALF 0x00 +#define DUPLEX_FULL 0x01 +#define DUPLEX_UNKNOWN 0xff + +#endif +/* PHY */ diff --git a/drivers/net/axgbe/axgbe_phy_impl.c b/drivers/net/axgbe/axgbe_phy_impl.c new file mode 100644 index 00000000..973177f6 --- /dev/null +++ b/drivers/net/axgbe/axgbe_phy_impl.c @@ -0,0 +1,2191 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_ethdev.h" +#include "axgbe_common.h" +#include "axgbe_phy.h" + +#define AXGBE_PHY_PORT_SPEED_100 BIT(0) +#define AXGBE_PHY_PORT_SPEED_1000 BIT(1) +#define AXGBE_PHY_PORT_SPEED_2500 BIT(2) +#define AXGBE_PHY_PORT_SPEED_10000 BIT(3) + +#define AXGBE_MUTEX_RELEASE 0x80000000 + +#define AXGBE_SFP_DIRECT 7 + +/* I2C target addresses */ +#define AXGBE_SFP_SERIAL_ID_ADDRESS 0x50 +#define AXGBE_SFP_DIAG_INFO_ADDRESS 0x51 +#define AXGBE_SFP_PHY_ADDRESS 0x56 +#define AXGBE_GPIO_ADDRESS_PCA9555 0x20 + +/* SFP sideband signal indicators */ +#define AXGBE_GPIO_NO_TX_FAULT BIT(0) +#define AXGBE_GPIO_NO_RATE_SELECT BIT(1) +#define AXGBE_GPIO_NO_MOD_ABSENT BIT(2) +#define AXGBE_GPIO_NO_RX_LOS BIT(3) + +/* Rate-change complete wait/retry count */ +#define AXGBE_RATECHANGE_COUNT 500 + +/* CDR delay values for KR support (in usec) */ +#define AXGBE_CDR_DELAY_INIT 10000 +#define AXGBE_CDR_DELAY_INC 10000 +#define AXGBE_CDR_DELAY_MAX 100000 + +enum axgbe_port_mode { + AXGBE_PORT_MODE_RSVD = 0, + AXGBE_PORT_MODE_BACKPLANE, + AXGBE_PORT_MODE_BACKPLANE_2500, + AXGBE_PORT_MODE_1000BASE_T, + AXGBE_PORT_MODE_1000BASE_X, + AXGBE_PORT_MODE_NBASE_T, + AXGBE_PORT_MODE_10GBASE_T, + AXGBE_PORT_MODE_10GBASE_R, + AXGBE_PORT_MODE_SFP, + AXGBE_PORT_MODE_MAX, +}; + +enum axgbe_conn_type { + AXGBE_CONN_TYPE_NONE = 0, + AXGBE_CONN_TYPE_SFP, + AXGBE_CONN_TYPE_MDIO, + AXGBE_CONN_TYPE_RSVD1, + AXGBE_CONN_TYPE_BACKPLANE, + AXGBE_CONN_TYPE_MAX, +}; + +/* SFP/SFP+ related definitions */ +enum axgbe_sfp_comm { + AXGBE_SFP_COMM_DIRECT = 0, + AXGBE_SFP_COMM_PCA9545, +}; + +enum axgbe_sfp_cable { + AXGBE_SFP_CABLE_UNKNOWN = 0, + AXGBE_SFP_CABLE_ACTIVE, + AXGBE_SFP_CABLE_PASSIVE, +}; + +enum axgbe_sfp_base { + AXGBE_SFP_BASE_UNKNOWN = 0, + AXGBE_SFP_BASE_1000_T, + AXGBE_SFP_BASE_1000_SX, + AXGBE_SFP_BASE_1000_LX, + AXGBE_SFP_BASE_1000_CX, + AXGBE_SFP_BASE_10000_SR, + AXGBE_SFP_BASE_10000_LR, + AXGBE_SFP_BASE_10000_LRM, + AXGBE_SFP_BASE_10000_ER, + AXGBE_SFP_BASE_10000_CR, +}; + +enum axgbe_sfp_speed { + AXGBE_SFP_SPEED_UNKNOWN = 0, + AXGBE_SFP_SPEED_100_1000, + AXGBE_SFP_SPEED_1000, + AXGBE_SFP_SPEED_10000, +}; + +/* SFP Serial ID Base ID values relative to an offset of 0 */ +#define AXGBE_SFP_BASE_ID 0 +#define AXGBE_SFP_ID_SFP 0x03 + +#define AXGBE_SFP_BASE_EXT_ID 1 +#define AXGBE_SFP_EXT_ID_SFP 0x04 + +#define AXGBE_SFP_BASE_10GBE_CC 3 +#define AXGBE_SFP_BASE_10GBE_CC_SR BIT(4) +#define AXGBE_SFP_BASE_10GBE_CC_LR BIT(5) +#define AXGBE_SFP_BASE_10GBE_CC_LRM BIT(6) +#define AXGBE_SFP_BASE_10GBE_CC_ER BIT(7) + +#define AXGBE_SFP_BASE_1GBE_CC 6 +#define AXGBE_SFP_BASE_1GBE_CC_SX BIT(0) +#define AXGBE_SFP_BASE_1GBE_CC_LX BIT(1) +#define AXGBE_SFP_BASE_1GBE_CC_CX BIT(2) +#define AXGBE_SFP_BASE_1GBE_CC_T BIT(3) + +#define AXGBE_SFP_BASE_CABLE 8 +#define AXGBE_SFP_BASE_CABLE_PASSIVE BIT(2) +#define AXGBE_SFP_BASE_CABLE_ACTIVE BIT(3) + +#define AXGBE_SFP_BASE_BR 12 +#define AXGBE_SFP_BASE_BR_1GBE_MIN 0x0a +#define AXGBE_SFP_BASE_BR_1GBE_MAX 0x0d +#define AXGBE_SFP_BASE_BR_10GBE_MIN 0x64 +#define AXGBE_SFP_BASE_BR_10GBE_MAX 0x68 + +#define AXGBE_SFP_BASE_CU_CABLE_LEN 18 + +#define AXGBE_SFP_BASE_VENDOR_NAME 20 +#define AXGBE_SFP_BASE_VENDOR_NAME_LEN 16 +#define AXGBE_SFP_BASE_VENDOR_PN 40 +#define AXGBE_SFP_BASE_VENDOR_PN_LEN 16 +#define AXGBE_SFP_BASE_VENDOR_REV 56 +#define AXGBE_SFP_BASE_VENDOR_REV_LEN 4 + +#define AXGBE_SFP_BASE_CC 63 + +/* SFP Serial ID Extended ID values relative to an offset of 64 */ +#define AXGBE_SFP_BASE_VENDOR_SN 4 +#define AXGBE_SFP_BASE_VENDOR_SN_LEN 16 + +#define AXGBE_SFP_EXTD_DIAG 28 +#define AXGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2) + +#define AXGBE_SFP_EXTD_SFF_8472 30 + +#define AXGBE_SFP_EXTD_CC 31 + +struct axgbe_sfp_eeprom { + u8 base[64]; + u8 extd[32]; + u8 vendor[32]; +}; + +#define AXGBE_BEL_FUSE_VENDOR "BEL-FUSE" +#define AXGBE_BEL_FUSE_PARTNO "1GBT-SFP06" + +struct axgbe_sfp_ascii { + union { + char vendor[AXGBE_SFP_BASE_VENDOR_NAME_LEN + 1]; + char partno[AXGBE_SFP_BASE_VENDOR_PN_LEN + 1]; + char rev[AXGBE_SFP_BASE_VENDOR_REV_LEN + 1]; + char serno[AXGBE_SFP_BASE_VENDOR_SN_LEN + 1]; + } u; +}; + +/* MDIO PHY reset types */ +enum axgbe_mdio_reset { + AXGBE_MDIO_RESET_NONE = 0, + AXGBE_MDIO_RESET_I2C_GPIO, + AXGBE_MDIO_RESET_INT_GPIO, + AXGBE_MDIO_RESET_MAX, +}; + +/* Re-driver related definitions */ +enum axgbe_phy_redrv_if { + AXGBE_PHY_REDRV_IF_MDIO = 0, + AXGBE_PHY_REDRV_IF_I2C, + AXGBE_PHY_REDRV_IF_MAX, +}; + +enum axgbe_phy_redrv_model { + AXGBE_PHY_REDRV_MODEL_4223 = 0, + AXGBE_PHY_REDRV_MODEL_4227, + AXGBE_PHY_REDRV_MODEL_MAX, +}; + +enum axgbe_phy_redrv_mode { + AXGBE_PHY_REDRV_MODE_CX = 5, + AXGBE_PHY_REDRV_MODE_SR = 9, +}; + +#define AXGBE_PHY_REDRV_MODE_REG 0x12b0 + +/* PHY related configuration information */ +struct axgbe_phy_data { + enum axgbe_port_mode port_mode; + + unsigned int port_id; + + unsigned int port_speeds; + + enum axgbe_conn_type conn_type; + + enum axgbe_mode cur_mode; + enum axgbe_mode start_mode; + + unsigned int rrc_count; + + unsigned int mdio_addr; + + unsigned int comm_owned; + + /* SFP Support */ + enum axgbe_sfp_comm sfp_comm; + unsigned int sfp_mux_address; + unsigned int sfp_mux_channel; + + unsigned int sfp_gpio_address; + unsigned int sfp_gpio_mask; + unsigned int sfp_gpio_rx_los; + unsigned int sfp_gpio_tx_fault; + unsigned int sfp_gpio_mod_absent; + unsigned int sfp_gpio_rate_select; + + unsigned int sfp_rx_los; + unsigned int sfp_tx_fault; + unsigned int sfp_mod_absent; + unsigned int sfp_diags; + unsigned int sfp_changed; + unsigned int sfp_phy_avail; + unsigned int sfp_cable_len; + enum axgbe_sfp_base sfp_base; + enum axgbe_sfp_cable sfp_cable; + enum axgbe_sfp_speed sfp_speed; + struct axgbe_sfp_eeprom sfp_eeprom; + + /* External PHY support */ + enum axgbe_mdio_mode phydev_mode; + enum axgbe_mdio_reset mdio_reset; + unsigned int mdio_reset_addr; + unsigned int mdio_reset_gpio; + + /* Re-driver support */ + unsigned int redrv; + unsigned int redrv_if; + unsigned int redrv_addr; + unsigned int redrv_lane; + unsigned int redrv_model; + + /* KR AN support */ + unsigned int phy_cdr_notrack; + unsigned int phy_cdr_delay; +}; + +static enum axgbe_an_mode axgbe_phy_an_mode(struct axgbe_port *pdata); + +static int axgbe_phy_i2c_xfer(struct axgbe_port *pdata, + struct axgbe_i2c_op *i2c_op) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + /* Be sure we own the bus */ + if (!phy_data->comm_owned) + return -EIO; + + return pdata->i2c_if.i2c_xfer(pdata, i2c_op); +} + +static int axgbe_phy_redrv_write(struct axgbe_port *pdata, unsigned int reg, + unsigned int val) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + struct axgbe_i2c_op i2c_op; + uint16_t *redrv_val; + u8 redrv_data[5], csum; + unsigned int i, retry; + int ret; + + /* High byte of register contains read/write indicator */ + redrv_data[0] = ((reg >> 8) & 0xff) << 1; + redrv_data[1] = reg & 0xff; + redrv_val = (uint16_t *)&redrv_data[2]; + *redrv_val = rte_cpu_to_be_16(val); + + /* Calculate 1 byte checksum */ + csum = 0; + for (i = 0; i < 4; i++) { + csum += redrv_data[i]; + if (redrv_data[i] > csum) + csum++; + } + redrv_data[4] = ~csum; + + retry = 1; +again1: + i2c_op.cmd = AXGBE_I2C_CMD_WRITE; + i2c_op.target = phy_data->redrv_addr; + i2c_op.len = sizeof(redrv_data); + i2c_op.buf = redrv_data; + ret = axgbe_phy_i2c_xfer(pdata, &i2c_op); + if (ret) { + if ((ret == -EAGAIN) && retry--) + goto again1; + + return ret; + } + + retry = 1; +again2: + i2c_op.cmd = AXGBE_I2C_CMD_READ; + i2c_op.target = phy_data->redrv_addr; + i2c_op.len = 1; + i2c_op.buf = redrv_data; + ret = axgbe_phy_i2c_xfer(pdata, &i2c_op); + if (ret) { + if ((ret == -EAGAIN) && retry--) + goto again2; + + return ret; + } + + if (redrv_data[0] != 0xff) { + PMD_DRV_LOG(ERR, "Redriver write checksum error\n"); + ret = -EIO; + } + + return ret; +} + +static int axgbe_phy_i2c_read(struct axgbe_port *pdata, unsigned int target, + void *reg, unsigned int reg_len, + void *val, unsigned int val_len) +{ + struct axgbe_i2c_op i2c_op; + int retry, ret; + + retry = 1; +again1: + /* Set the specified register to read */ + i2c_op.cmd = AXGBE_I2C_CMD_WRITE; + i2c_op.target = target; + i2c_op.len = reg_len; + i2c_op.buf = reg; + ret = axgbe_phy_i2c_xfer(pdata, &i2c_op); + if (ret) { + if ((ret == -EAGAIN) && retry--) + goto again1; + + return ret; + } + + retry = 1; +again2: + /* Read the specfied register */ + i2c_op.cmd = AXGBE_I2C_CMD_READ; + i2c_op.target = target; + i2c_op.len = val_len; + i2c_op.buf = val; + ret = axgbe_phy_i2c_xfer(pdata, &i2c_op); + if ((ret == -EAGAIN) && retry--) + goto again2; + + return ret; +} + +static int axgbe_phy_sfp_put_mux(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + struct axgbe_i2c_op i2c_op; + uint8_t mux_channel; + + if (phy_data->sfp_comm == AXGBE_SFP_COMM_DIRECT) + return 0; + + /* Select no mux channels */ + mux_channel = 0; + i2c_op.cmd = AXGBE_I2C_CMD_WRITE; + i2c_op.target = phy_data->sfp_mux_address; + i2c_op.len = sizeof(mux_channel); + i2c_op.buf = &mux_channel; + + return axgbe_phy_i2c_xfer(pdata, &i2c_op); +} + +static int axgbe_phy_sfp_get_mux(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + struct axgbe_i2c_op i2c_op; + u8 mux_channel; + + if (phy_data->sfp_comm == AXGBE_SFP_COMM_DIRECT) + return 0; + + /* Select desired mux channel */ + mux_channel = 1 << phy_data->sfp_mux_channel; + i2c_op.cmd = AXGBE_I2C_CMD_WRITE; + i2c_op.target = phy_data->sfp_mux_address; + i2c_op.len = sizeof(mux_channel); + i2c_op.buf = &mux_channel; + + return axgbe_phy_i2c_xfer(pdata, &i2c_op); +} + +static void axgbe_phy_put_comm_ownership(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + phy_data->comm_owned = 0; + + pthread_mutex_unlock(&pdata->phy_mutex); +} + +static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + uint64_t timeout; + unsigned int mutex_id; + + if (phy_data->comm_owned) + return 0; + + /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices, + * the driver needs to take the software mutex and then the hardware + * mutexes before being able to use the busses. + */ + pthread_mutex_lock(&pdata->phy_mutex); + + /* Clear the mutexes */ + XP_IOWRITE(pdata, XP_I2C_MUTEX, AXGBE_MUTEX_RELEASE); + XP_IOWRITE(pdata, XP_MDIO_MUTEX, AXGBE_MUTEX_RELEASE); + + /* Mutex formats are the same for I2C and MDIO/GPIO */ + mutex_id = 0; + XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ID, phy_data->port_id); + XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ACTIVE, 1); + + timeout = rte_get_timer_cycles() + (rte_get_timer_hz() * 5); + while (time_before(rte_get_timer_cycles(), timeout)) { + /* Must be all zeroes in order to obtain the mutex */ + if (XP_IOREAD(pdata, XP_I2C_MUTEX) || + XP_IOREAD(pdata, XP_MDIO_MUTEX)) { + rte_delay_us(100); + continue; + } + + /* Obtain the mutex */ + XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id); + XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id); + + phy_data->comm_owned = 1; + return 0; + } + + pthread_mutex_unlock(&pdata->phy_mutex); + + PMD_DRV_LOG(ERR, "unable to obtain hardware mutexes\n"); + + return -ETIMEDOUT; +} + +static void axgbe_phy_sfp_phy_settings(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + if (phy_data->sfp_mod_absent) { + pdata->phy.speed = SPEED_UNKNOWN; + pdata->phy.duplex = DUPLEX_UNKNOWN; + pdata->phy.autoneg = AUTONEG_ENABLE; + pdata->phy.advertising = pdata->phy.supported; + } + + pdata->phy.advertising &= ~ADVERTISED_Autoneg; + pdata->phy.advertising &= ~ADVERTISED_TP; + pdata->phy.advertising &= ~ADVERTISED_FIBRE; + pdata->phy.advertising &= ~ADVERTISED_100baseT_Full; + pdata->phy.advertising &= ~ADVERTISED_1000baseT_Full; + pdata->phy.advertising &= ~ADVERTISED_10000baseT_Full; + pdata->phy.advertising &= ~ADVERTISED_10000baseR_FEC; + + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: + case AXGBE_SFP_BASE_1000_SX: + case AXGBE_SFP_BASE_1000_LX: + case AXGBE_SFP_BASE_1000_CX: + pdata->phy.speed = SPEED_UNKNOWN; + pdata->phy.duplex = DUPLEX_UNKNOWN; + pdata->phy.autoneg = AUTONEG_ENABLE; + pdata->phy.advertising |= ADVERTISED_Autoneg; + break; + case AXGBE_SFP_BASE_10000_SR: + case AXGBE_SFP_BASE_10000_LR: + case AXGBE_SFP_BASE_10000_LRM: + case AXGBE_SFP_BASE_10000_ER: + case AXGBE_SFP_BASE_10000_CR: + default: + pdata->phy.speed = SPEED_10000; + pdata->phy.duplex = DUPLEX_FULL; + pdata->phy.autoneg = AUTONEG_DISABLE; + break; + } + + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: + case AXGBE_SFP_BASE_1000_CX: + case AXGBE_SFP_BASE_10000_CR: + pdata->phy.advertising |= ADVERTISED_TP; + break; + default: + pdata->phy.advertising |= ADVERTISED_FIBRE; + } + + switch (phy_data->sfp_speed) { + case AXGBE_SFP_SPEED_100_1000: + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) + pdata->phy.advertising |= ADVERTISED_100baseT_Full; + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) + pdata->phy.advertising |= ADVERTISED_1000baseT_Full; + break; + case AXGBE_SFP_SPEED_1000: + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) + pdata->phy.advertising |= ADVERTISED_1000baseT_Full; + break; + case AXGBE_SFP_SPEED_10000: + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) + pdata->phy.advertising |= ADVERTISED_10000baseT_Full; + break; + default: + /* Choose the fastest supported speed */ + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) + pdata->phy.advertising |= ADVERTISED_10000baseT_Full; + else if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) + pdata->phy.advertising |= ADVERTISED_1000baseT_Full; + else if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) + pdata->phy.advertising |= ADVERTISED_100baseT_Full; + } +} + +static bool axgbe_phy_sfp_bit_rate(struct axgbe_sfp_eeprom *sfp_eeprom, + enum axgbe_sfp_speed sfp_speed) +{ + u8 *sfp_base, min, max; + + sfp_base = sfp_eeprom->base; + + switch (sfp_speed) { + case AXGBE_SFP_SPEED_1000: + min = AXGBE_SFP_BASE_BR_1GBE_MIN; + max = AXGBE_SFP_BASE_BR_1GBE_MAX; + break; + case AXGBE_SFP_SPEED_10000: + min = AXGBE_SFP_BASE_BR_10GBE_MIN; + max = AXGBE_SFP_BASE_BR_10GBE_MAX; + break; + default: + return false; + } + + return ((sfp_base[AXGBE_SFP_BASE_BR] >= min) && + (sfp_base[AXGBE_SFP_BASE_BR] <= max)); +} + +static void axgbe_phy_sfp_external_phy(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + if (!phy_data->sfp_changed) + return; + + phy_data->sfp_phy_avail = 0; + + if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T) + return; +} + +static bool axgbe_phy_belfuse_parse_quirks(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + struct axgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; + + if (memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_NAME], + AXGBE_BEL_FUSE_VENDOR, strlen(AXGBE_BEL_FUSE_VENDOR))) + return false; + + if (!memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_PN], + AXGBE_BEL_FUSE_PARTNO, strlen(AXGBE_BEL_FUSE_PARTNO))) { + phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX; + phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE; + phy_data->sfp_speed = AXGBE_SFP_SPEED_1000; + return true; + } + + return false; +} + +static bool axgbe_phy_sfp_parse_quirks(struct axgbe_port *pdata) +{ + if (axgbe_phy_belfuse_parse_quirks(pdata)) + return true; + + return false; +} + +static void axgbe_phy_sfp_parse_eeprom(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + struct axgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; + uint8_t *sfp_base; + + sfp_base = sfp_eeprom->base; + + if (sfp_base[AXGBE_SFP_BASE_ID] != AXGBE_SFP_ID_SFP) + return; + + if (sfp_base[AXGBE_SFP_BASE_EXT_ID] != AXGBE_SFP_EXT_ID_SFP) + return; + + if (axgbe_phy_sfp_parse_quirks(pdata)) + return; + + /* Assume ACTIVE cable unless told it is PASSIVE */ + if (sfp_base[AXGBE_SFP_BASE_CABLE] & AXGBE_SFP_BASE_CABLE_PASSIVE) { + phy_data->sfp_cable = AXGBE_SFP_CABLE_PASSIVE; + phy_data->sfp_cable_len = sfp_base[AXGBE_SFP_BASE_CU_CABLE_LEN]; + } else { + phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE; + } + + /* Determine the type of SFP */ + if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_SR) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_SR; + else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_LR) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_LR; + else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & + AXGBE_SFP_BASE_10GBE_CC_LRM) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_LRM; + else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_ER) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_ER; + else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_SX) + phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX; + else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_LX) + phy_data->sfp_base = AXGBE_SFP_BASE_1000_LX; + else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_CX) + phy_data->sfp_base = AXGBE_SFP_BASE_1000_CX; + else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_T) + phy_data->sfp_base = AXGBE_SFP_BASE_1000_T; + else if ((phy_data->sfp_cable == AXGBE_SFP_CABLE_PASSIVE) && + axgbe_phy_sfp_bit_rate(sfp_eeprom, AXGBE_SFP_SPEED_10000)) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_CR; + + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: + phy_data->sfp_speed = AXGBE_SFP_SPEED_100_1000; + break; + case AXGBE_SFP_BASE_1000_SX: + case AXGBE_SFP_BASE_1000_LX: + case AXGBE_SFP_BASE_1000_CX: + phy_data->sfp_speed = AXGBE_SFP_SPEED_1000; + break; + case AXGBE_SFP_BASE_10000_SR: + case AXGBE_SFP_BASE_10000_LR: + case AXGBE_SFP_BASE_10000_LRM: + case AXGBE_SFP_BASE_10000_ER: + case AXGBE_SFP_BASE_10000_CR: + phy_data->sfp_speed = AXGBE_SFP_SPEED_10000; + break; + default: + break; + } +} + +static bool axgbe_phy_sfp_verify_eeprom(uint8_t cc_in, uint8_t *buf, + unsigned int len) +{ + uint8_t cc; + + for (cc = 0; len; buf++, len--) + cc += *buf; + + return (cc == cc_in) ? true : false; +} + +static int axgbe_phy_sfp_read_eeprom(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + struct axgbe_sfp_eeprom sfp_eeprom; + uint8_t eeprom_addr; + int ret; + + ret = axgbe_phy_sfp_get_mux(pdata); + if (ret) { + PMD_DRV_LOG(ERR, "I2C error setting SFP MUX\n"); + return ret; + } + + /* Read the SFP serial ID eeprom */ + eeprom_addr = 0; + ret = axgbe_phy_i2c_read(pdata, AXGBE_SFP_SERIAL_ID_ADDRESS, + &eeprom_addr, sizeof(eeprom_addr), + &sfp_eeprom, sizeof(sfp_eeprom)); + if (ret) { + PMD_DRV_LOG(ERR, "I2C error reading SFP EEPROM\n"); + goto put; + } + + /* Validate the contents read */ + if (!axgbe_phy_sfp_verify_eeprom(sfp_eeprom.base[AXGBE_SFP_BASE_CC], + sfp_eeprom.base, + sizeof(sfp_eeprom.base) - 1)) { + ret = -EINVAL; + goto put; + } + + if (!axgbe_phy_sfp_verify_eeprom(sfp_eeprom.extd[AXGBE_SFP_EXTD_CC], + sfp_eeprom.extd, + sizeof(sfp_eeprom.extd) - 1)) { + ret = -EINVAL; + goto put; + } + + /* Check for an added or changed SFP */ + if (memcmp(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom))) { + phy_data->sfp_changed = 1; + memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom)); + + if (sfp_eeprom.extd[AXGBE_SFP_EXTD_SFF_8472]) { + uint8_t diag_type; + diag_type = sfp_eeprom.extd[AXGBE_SFP_EXTD_DIAG]; + + if (!(diag_type & AXGBE_SFP_EXTD_DIAG_ADDR_CHANGE)) + phy_data->sfp_diags = 1; + } + } else { + phy_data->sfp_changed = 0; + } + +put: + axgbe_phy_sfp_put_mux(pdata); + + return ret; +} + +static void axgbe_phy_sfp_signals(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int gpio_input; + u8 gpio_reg, gpio_ports[2]; + int ret; + + /* Read the input port registers */ + gpio_reg = 0; + ret = axgbe_phy_i2c_read(pdata, phy_data->sfp_gpio_address, + &gpio_reg, sizeof(gpio_reg), + gpio_ports, sizeof(gpio_ports)); + if (ret) { + PMD_DRV_LOG(ERR, "I2C error reading SFP GPIOs\n"); + return; + } + + gpio_input = (gpio_ports[1] << 8) | gpio_ports[0]; + + if (phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_MOD_ABSENT) { + /* No GPIO, just assume the module is present for now */ + phy_data->sfp_mod_absent = 0; + } else { + if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent))) + phy_data->sfp_mod_absent = 0; + } + + if (!(phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_RX_LOS) && + (gpio_input & (1 << phy_data->sfp_gpio_rx_los))) + phy_data->sfp_rx_los = 1; + + if (!(phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_TX_FAULT) && + (gpio_input & (1 << phy_data->sfp_gpio_tx_fault))) + phy_data->sfp_tx_fault = 1; +} + +static void axgbe_phy_sfp_mod_absent(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + phy_data->sfp_mod_absent = 1; + phy_data->sfp_phy_avail = 0; + memset(&phy_data->sfp_eeprom, 0, sizeof(phy_data->sfp_eeprom)); +} + +static void axgbe_phy_sfp_reset(struct axgbe_phy_data *phy_data) +{ + phy_data->sfp_rx_los = 0; + phy_data->sfp_tx_fault = 0; + phy_data->sfp_mod_absent = 1; + phy_data->sfp_diags = 0; + phy_data->sfp_base = AXGBE_SFP_BASE_UNKNOWN; + phy_data->sfp_cable = AXGBE_SFP_CABLE_UNKNOWN; + phy_data->sfp_speed = AXGBE_SFP_SPEED_UNKNOWN; +} + +static void axgbe_phy_sfp_detect(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + int ret; + + /* Reset the SFP signals and info */ + axgbe_phy_sfp_reset(phy_data); + + ret = axgbe_phy_get_comm_ownership(pdata); + if (ret) + return; + + /* Read the SFP signals and check for module presence */ + axgbe_phy_sfp_signals(pdata); + if (phy_data->sfp_mod_absent) { + axgbe_phy_sfp_mod_absent(pdata); + goto put; + } + + ret = axgbe_phy_sfp_read_eeprom(pdata); + if (ret) { + /* Treat any error as if there isn't an SFP plugged in */ + axgbe_phy_sfp_reset(phy_data); + axgbe_phy_sfp_mod_absent(pdata); + goto put; + } + + axgbe_phy_sfp_parse_eeprom(pdata); + axgbe_phy_sfp_external_phy(pdata); + +put: + axgbe_phy_sfp_phy_settings(pdata); + axgbe_phy_put_comm_ownership(pdata); +} + +static void axgbe_phy_phydev_flowctrl(struct axgbe_port *pdata) +{ + pdata->phy.tx_pause = 0; + pdata->phy.rx_pause = 0; +} + +static enum axgbe_mode axgbe_phy_an73_redrv_outcome(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + enum axgbe_mode mode; + unsigned int ad_reg, lp_reg; + + pdata->phy.lp_advertising |= ADVERTISED_Autoneg; + pdata->phy.lp_advertising |= ADVERTISED_Backplane; + + /* Use external PHY to determine flow control */ + if (pdata->phy.pause_autoneg) + axgbe_phy_phydev_flowctrl(pdata); + + /* Compare Advertisement and Link Partner register 2 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); + if (lp_reg & 0x80) + pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full; + if (lp_reg & 0x20) + pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full; + + ad_reg &= lp_reg; + if (ad_reg & 0x80) { + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + mode = AXGBE_MODE_KR; + break; + default: + mode = AXGBE_MODE_SFI; + break; + } + } else if (ad_reg & 0x20) { + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + mode = AXGBE_MODE_KX_1000; + break; + case AXGBE_PORT_MODE_1000BASE_X: + mode = AXGBE_MODE_X; + break; + case AXGBE_PORT_MODE_SFP: + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: + mode = AXGBE_MODE_SGMII_1000; + break; + case AXGBE_SFP_BASE_1000_SX: + case AXGBE_SFP_BASE_1000_LX: + case AXGBE_SFP_BASE_1000_CX: + default: + mode = AXGBE_MODE_X; + break; + } + break; + default: + mode = AXGBE_MODE_SGMII_1000; + break; + } + } else { + mode = AXGBE_MODE_UNKNOWN; + } + + /* Compare Advertisement and Link Partner register 3 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); + if (lp_reg & 0xc000) + pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC; + + return mode; +} + +static enum axgbe_mode axgbe_phy_an73_outcome(struct axgbe_port *pdata) +{ + enum axgbe_mode mode; + unsigned int ad_reg, lp_reg; + + pdata->phy.lp_advertising |= ADVERTISED_Autoneg; + pdata->phy.lp_advertising |= ADVERTISED_Backplane; + + /* Compare Advertisement and Link Partner register 1 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); + if (lp_reg & 0x400) + pdata->phy.lp_advertising |= ADVERTISED_Pause; + if (lp_reg & 0x800) + pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause; + + if (pdata->phy.pause_autoneg) { + /* Set flow control based on auto-negotiation result */ + pdata->phy.tx_pause = 0; + pdata->phy.rx_pause = 0; + + if (ad_reg & lp_reg & 0x400) { + pdata->phy.tx_pause = 1; + pdata->phy.rx_pause = 1; + } else if (ad_reg & lp_reg & 0x800) { + if (ad_reg & 0x400) + pdata->phy.rx_pause = 1; + else if (lp_reg & 0x400) + pdata->phy.tx_pause = 1; + } + } + + /* Compare Advertisement and Link Partner register 2 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); + if (lp_reg & 0x80) + pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full; + if (lp_reg & 0x20) + pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full; + + ad_reg &= lp_reg; + if (ad_reg & 0x80) + mode = AXGBE_MODE_KR; + else if (ad_reg & 0x20) + mode = AXGBE_MODE_KX_1000; + else + mode = AXGBE_MODE_UNKNOWN; + + /* Compare Advertisement and Link Partner register 3 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); + if (lp_reg & 0xc000) + pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC; + + return mode; +} + +static enum axgbe_mode axgbe_phy_an_outcome(struct axgbe_port *pdata) +{ + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + return axgbe_phy_an73_outcome(pdata); + case AXGBE_AN_MODE_CL73_REDRV: + return axgbe_phy_an73_redrv_outcome(pdata); + case AXGBE_AN_MODE_CL37: + case AXGBE_AN_MODE_CL37_SGMII: + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static unsigned int axgbe_phy_an_advertising(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int advertising; + + /* Without a re-driver, just return current advertising */ + if (!phy_data->redrv) + return pdata->phy.advertising; + + /* With the KR re-driver we need to advertise a single speed */ + advertising = pdata->phy.advertising; + advertising &= ~ADVERTISED_1000baseKX_Full; + advertising &= ~ADVERTISED_10000baseKR_Full; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + advertising |= ADVERTISED_10000baseKR_Full; + break; + case AXGBE_PORT_MODE_BACKPLANE_2500: + advertising |= ADVERTISED_1000baseKX_Full; + break; + case AXGBE_PORT_MODE_1000BASE_T: + case AXGBE_PORT_MODE_1000BASE_X: + case AXGBE_PORT_MODE_NBASE_T: + advertising |= ADVERTISED_1000baseKX_Full; + break; + case AXGBE_PORT_MODE_10GBASE_T: + PMD_DRV_LOG(ERR, "10GBASE_T mode is not supported\n"); + break; + case AXGBE_PORT_MODE_10GBASE_R: + advertising |= ADVERTISED_10000baseKR_Full; + break; + case AXGBE_PORT_MODE_SFP: + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: + case AXGBE_SFP_BASE_1000_SX: + case AXGBE_SFP_BASE_1000_LX: + case AXGBE_SFP_BASE_1000_CX: + advertising |= ADVERTISED_1000baseKX_Full; + break; + default: + advertising |= ADVERTISED_10000baseKR_Full; + break; + } + break; + default: + advertising |= ADVERTISED_10000baseKR_Full; + break; + } + + return advertising; +} + +static int axgbe_phy_an_config(struct axgbe_port *pdata __rte_unused) +{ + return 0; + /* Dummy API since there is no case to support + * external phy devices registred through kerenl apis + */ +} + +static enum axgbe_an_mode axgbe_phy_an_sfp_mode(struct axgbe_phy_data *phy_data) +{ + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: + return AXGBE_AN_MODE_CL37_SGMII; + case AXGBE_SFP_BASE_1000_SX: + case AXGBE_SFP_BASE_1000_LX: + case AXGBE_SFP_BASE_1000_CX: + return AXGBE_AN_MODE_CL37; + default: + return AXGBE_AN_MODE_NONE; + } +} + +static enum axgbe_an_mode axgbe_phy_an_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + /* A KR re-driver will always require CL73 AN */ + if (phy_data->redrv) + return AXGBE_AN_MODE_CL73_REDRV; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + return AXGBE_AN_MODE_CL73; + case AXGBE_PORT_MODE_BACKPLANE_2500: + return AXGBE_AN_MODE_NONE; + case AXGBE_PORT_MODE_1000BASE_T: + return AXGBE_AN_MODE_CL37_SGMII; + case AXGBE_PORT_MODE_1000BASE_X: + return AXGBE_AN_MODE_CL37; + case AXGBE_PORT_MODE_NBASE_T: + return AXGBE_AN_MODE_CL37_SGMII; + case AXGBE_PORT_MODE_10GBASE_T: + return AXGBE_AN_MODE_CL73; + case AXGBE_PORT_MODE_10GBASE_R: + return AXGBE_AN_MODE_NONE; + case AXGBE_PORT_MODE_SFP: + return axgbe_phy_an_sfp_mode(phy_data); + default: + return AXGBE_AN_MODE_NONE; + } +} + +static int axgbe_phy_set_redrv_mode_mdio(struct axgbe_port *pdata, + enum axgbe_phy_redrv_mode mode) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + u16 redrv_reg, redrv_val; + + redrv_reg = AXGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000); + redrv_val = (u16)mode; + + return pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr, + redrv_reg, redrv_val); +} + +static int axgbe_phy_set_redrv_mode_i2c(struct axgbe_port *pdata, + enum axgbe_phy_redrv_mode mode) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int redrv_reg; + int ret; + + /* Calculate the register to write */ + redrv_reg = AXGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000); + + ret = axgbe_phy_redrv_write(pdata, redrv_reg, mode); + + return ret; +} + +static void axgbe_phy_set_redrv_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + enum axgbe_phy_redrv_mode mode; + int ret; + + if (!phy_data->redrv) + return; + + mode = AXGBE_PHY_REDRV_MODE_CX; + if ((phy_data->port_mode == AXGBE_PORT_MODE_SFP) && + (phy_data->sfp_base != AXGBE_SFP_BASE_1000_CX) && + (phy_data->sfp_base != AXGBE_SFP_BASE_10000_CR)) + mode = AXGBE_PHY_REDRV_MODE_SR; + + ret = axgbe_phy_get_comm_ownership(pdata); + if (ret) + return; + + if (phy_data->redrv_if) + axgbe_phy_set_redrv_mode_i2c(pdata, mode); + else + axgbe_phy_set_redrv_mode_mdio(pdata, mode); + + axgbe_phy_put_comm_ownership(pdata); +} + +static void axgbe_phy_start_ratechange(struct axgbe_port *pdata) +{ + if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) + return; +} + +static void axgbe_phy_complete_ratechange(struct axgbe_port *pdata) +{ + unsigned int wait; + + /* Wait for command to complete */ + wait = AXGBE_RATECHANGE_COUNT; + while (wait--) { + if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) + return; + + rte_delay_us(1500); + } +} + +static void axgbe_phy_rrc(struct axgbe_port *pdata) +{ + unsigned int s0; + + axgbe_phy_start_ratechange(pdata); + + /* Receiver Reset Cycle */ + s0 = 0; + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 5); + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0); + + /* Call FW to make the change */ + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0); + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0); + XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1); + + axgbe_phy_complete_ratechange(pdata); +} + +static void axgbe_phy_power_off(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + axgbe_phy_start_ratechange(pdata); + + /* Call FW to make the change */ + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, 0); + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0); + XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1); + axgbe_phy_complete_ratechange(pdata); + phy_data->cur_mode = AXGBE_MODE_UNKNOWN; +} + +static void axgbe_phy_sfi_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int s0; + + axgbe_phy_set_redrv_mode(pdata); + + axgbe_phy_start_ratechange(pdata); + + /* 10G/SFI */ + s0 = 0; + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 3); + if (phy_data->sfp_cable != AXGBE_SFP_CABLE_PASSIVE) { + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0); + } else { + if (phy_data->sfp_cable_len <= 1) + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 1); + else if (phy_data->sfp_cable_len <= 3) + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2); + else + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3); + } + + /* Call FW to make the change */ + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0); + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0); + XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1); + axgbe_phy_complete_ratechange(pdata); + phy_data->cur_mode = AXGBE_MODE_SFI; +} + +static void axgbe_phy_kr_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int s0; + + axgbe_phy_set_redrv_mode(pdata); + + axgbe_phy_start_ratechange(pdata); + + /* 10G/KR */ + s0 = 0; + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 4); + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0); + + /* Call FW to make the change */ + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0); + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0); + XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1); + axgbe_phy_complete_ratechange(pdata); + phy_data->cur_mode = AXGBE_MODE_KR; +} + +static enum axgbe_mode axgbe_phy_cur_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + return phy_data->cur_mode; +} + +static enum axgbe_mode axgbe_phy_switch_baset_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + /* No switching if not 10GBase-T */ + if (phy_data->port_mode != AXGBE_PORT_MODE_10GBASE_T) + return axgbe_phy_cur_mode(pdata); + + switch (axgbe_phy_cur_mode(pdata)) { + case AXGBE_MODE_SGMII_100: + case AXGBE_MODE_SGMII_1000: + return AXGBE_MODE_KR; + case AXGBE_MODE_KR: + default: + return AXGBE_MODE_SGMII_1000; + } +} + +static enum axgbe_mode axgbe_phy_switch_bp_2500_mode(struct axgbe_port *pdata + __rte_unused) +{ + return AXGBE_MODE_KX_2500; +} + +static enum axgbe_mode axgbe_phy_switch_bp_mode(struct axgbe_port *pdata) +{ + /* If we are in KR switch to KX, and vice-versa */ + switch (axgbe_phy_cur_mode(pdata)) { + case AXGBE_MODE_KX_1000: + return AXGBE_MODE_KR; + case AXGBE_MODE_KR: + default: + return AXGBE_MODE_KX_1000; + } +} + +static enum axgbe_mode axgbe_phy_switch_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + return axgbe_phy_switch_bp_mode(pdata); + case AXGBE_PORT_MODE_BACKPLANE_2500: + return axgbe_phy_switch_bp_2500_mode(pdata); + case AXGBE_PORT_MODE_1000BASE_T: + case AXGBE_PORT_MODE_NBASE_T: + case AXGBE_PORT_MODE_10GBASE_T: + return axgbe_phy_switch_baset_mode(pdata); + case AXGBE_PORT_MODE_1000BASE_X: + case AXGBE_PORT_MODE_10GBASE_R: + case AXGBE_PORT_MODE_SFP: + /* No switching, so just return current mode */ + return axgbe_phy_cur_mode(pdata); + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static enum axgbe_mode axgbe_phy_get_basex_mode(struct axgbe_phy_data *phy_data + __rte_unused, + int speed) +{ + switch (speed) { + case SPEED_1000: + return AXGBE_MODE_X; + case SPEED_10000: + return AXGBE_MODE_KR; + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static enum axgbe_mode axgbe_phy_get_baset_mode(struct axgbe_phy_data *phy_data + __rte_unused, + int speed) +{ + switch (speed) { + case SPEED_100: + return AXGBE_MODE_SGMII_100; + case SPEED_1000: + return AXGBE_MODE_SGMII_1000; + case SPEED_10000: + return AXGBE_MODE_KR; + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static enum axgbe_mode axgbe_phy_get_sfp_mode(struct axgbe_phy_data *phy_data, + int speed) +{ + switch (speed) { + case SPEED_100: + return AXGBE_MODE_SGMII_100; + case SPEED_1000: + if (phy_data->sfp_base == AXGBE_SFP_BASE_1000_T) + return AXGBE_MODE_SGMII_1000; + else + return AXGBE_MODE_X; + case SPEED_10000: + case SPEED_UNKNOWN: + return AXGBE_MODE_SFI; + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static enum axgbe_mode axgbe_phy_get_bp_2500_mode(int speed) +{ + switch (speed) { + case SPEED_2500: + return AXGBE_MODE_KX_2500; + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static enum axgbe_mode axgbe_phy_get_bp_mode(int speed) +{ + switch (speed) { + case SPEED_1000: + return AXGBE_MODE_KX_1000; + case SPEED_10000: + return AXGBE_MODE_KR; + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static enum axgbe_mode axgbe_phy_get_mode(struct axgbe_port *pdata, + int speed) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + return axgbe_phy_get_bp_mode(speed); + case AXGBE_PORT_MODE_BACKPLANE_2500: + return axgbe_phy_get_bp_2500_mode(speed); + case AXGBE_PORT_MODE_1000BASE_T: + case AXGBE_PORT_MODE_NBASE_T: + case AXGBE_PORT_MODE_10GBASE_T: + return axgbe_phy_get_baset_mode(phy_data, speed); + case AXGBE_PORT_MODE_1000BASE_X: + case AXGBE_PORT_MODE_10GBASE_R: + return axgbe_phy_get_basex_mode(phy_data, speed); + case AXGBE_PORT_MODE_SFP: + return axgbe_phy_get_sfp_mode(phy_data, speed); + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static void axgbe_phy_set_mode(struct axgbe_port *pdata, enum axgbe_mode mode) +{ + switch (mode) { + case AXGBE_MODE_KR: + axgbe_phy_kr_mode(pdata); + break; + case AXGBE_MODE_SFI: + axgbe_phy_sfi_mode(pdata); + break; + default: + break; + } +} + +static bool axgbe_phy_check_mode(struct axgbe_port *pdata, + enum axgbe_mode mode, u32 advert) +{ + if (pdata->phy.autoneg == AUTONEG_ENABLE) { + if (pdata->phy.advertising & advert) + return true; + } else { + enum axgbe_mode cur_mode; + + cur_mode = axgbe_phy_get_mode(pdata, pdata->phy.speed); + if (cur_mode == mode) + return true; + } + + return false; +} + +static bool axgbe_phy_use_basex_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + switch (mode) { + case AXGBE_MODE_X: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_1000baseT_Full); + case AXGBE_MODE_KR: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_10000baseT_Full); + default: + return false; + } +} + +static bool axgbe_phy_use_baset_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + switch (mode) { + case AXGBE_MODE_SGMII_100: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_100baseT_Full); + case AXGBE_MODE_SGMII_1000: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_1000baseT_Full); + case AXGBE_MODE_KR: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_10000baseT_Full); + default: + return false; + } +} + +static bool axgbe_phy_use_sfp_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (mode) { + case AXGBE_MODE_X: + if (phy_data->sfp_base == AXGBE_SFP_BASE_1000_T) + return false; + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_1000baseT_Full); + case AXGBE_MODE_SGMII_100: + if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T) + return false; + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_100baseT_Full); + case AXGBE_MODE_SGMII_1000: + if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T) + return false; + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_1000baseT_Full); + case AXGBE_MODE_SFI: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_10000baseT_Full); + default: + return false; + } +} + +static bool axgbe_phy_use_bp_2500_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + switch (mode) { + case AXGBE_MODE_KX_2500: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_2500baseX_Full); + default: + return false; + } +} + +static bool axgbe_phy_use_bp_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + switch (mode) { + case AXGBE_MODE_KX_1000: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_1000baseKX_Full); + case AXGBE_MODE_KR: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_10000baseKR_Full); + default: + return false; + } +} + +static bool axgbe_phy_use_mode(struct axgbe_port *pdata, enum axgbe_mode mode) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + return axgbe_phy_use_bp_mode(pdata, mode); + case AXGBE_PORT_MODE_BACKPLANE_2500: + return axgbe_phy_use_bp_2500_mode(pdata, mode); + case AXGBE_PORT_MODE_1000BASE_T: + case AXGBE_PORT_MODE_NBASE_T: + case AXGBE_PORT_MODE_10GBASE_T: + return axgbe_phy_use_baset_mode(pdata, mode); + case AXGBE_PORT_MODE_1000BASE_X: + case AXGBE_PORT_MODE_10GBASE_R: + return axgbe_phy_use_basex_mode(pdata, mode); + case AXGBE_PORT_MODE_SFP: + return axgbe_phy_use_sfp_mode(pdata, mode); + default: + return false; + } +} + +static int axgbe_phy_link_status(struct axgbe_port *pdata, int *an_restart) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int reg; + + *an_restart = 0; + + if (phy_data->port_mode == AXGBE_PORT_MODE_SFP) { + /* Check SFP signals */ + axgbe_phy_sfp_detect(pdata); + + if (phy_data->sfp_changed) { + *an_restart = 1; + return 0; + } + + if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los) + return 0; + } + + /* Link status is latched low, so read once to clear + * and then read again to get current state + */ + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + if (reg & MDIO_STAT1_LSTATUS) + return 1; + + /* No link, attempt a receiver reset cycle */ + if (phy_data->rrc_count++) { + phy_data->rrc_count = 0; + axgbe_phy_rrc(pdata); + } + + return 0; +} + +static void axgbe_phy_sfp_gpio_setup(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int reg; + + reg = XP_IOREAD(pdata, XP_PROP_3); + + phy_data->sfp_gpio_address = AXGBE_GPIO_ADDRESS_PCA9555 + + XP_GET_BITS(reg, XP_PROP_3, GPIO_ADDR); + + phy_data->sfp_gpio_mask = XP_GET_BITS(reg, XP_PROP_3, GPIO_MASK); + + phy_data->sfp_gpio_rx_los = XP_GET_BITS(reg, XP_PROP_3, + GPIO_RX_LOS); + phy_data->sfp_gpio_tx_fault = XP_GET_BITS(reg, XP_PROP_3, + GPIO_TX_FAULT); + phy_data->sfp_gpio_mod_absent = XP_GET_BITS(reg, XP_PROP_3, + GPIO_MOD_ABS); + phy_data->sfp_gpio_rate_select = XP_GET_BITS(reg, XP_PROP_3, + GPIO_RATE_SELECT); +} + +static void axgbe_phy_sfp_comm_setup(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int reg, mux_addr_hi, mux_addr_lo; + + reg = XP_IOREAD(pdata, XP_PROP_4); + + mux_addr_hi = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_HI); + mux_addr_lo = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_LO); + if (mux_addr_lo == AXGBE_SFP_DIRECT) + return; + + phy_data->sfp_comm = AXGBE_SFP_COMM_PCA9545; + phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo; + phy_data->sfp_mux_channel = XP_GET_BITS(reg, XP_PROP_4, MUX_CHAN); +} + +static void axgbe_phy_sfp_setup(struct axgbe_port *pdata) +{ + axgbe_phy_sfp_comm_setup(pdata); + axgbe_phy_sfp_gpio_setup(pdata); +} + +static bool axgbe_phy_redrv_error(struct axgbe_phy_data *phy_data) +{ + if (!phy_data->redrv) + return false; + + if (phy_data->redrv_if >= AXGBE_PHY_REDRV_IF_MAX) + return true; + + switch (phy_data->redrv_model) { + case AXGBE_PHY_REDRV_MODEL_4223: + if (phy_data->redrv_lane > 3) + return true; + break; + case AXGBE_PHY_REDRV_MODEL_4227: + if (phy_data->redrv_lane > 1) + return true; + break; + default: + return true; + } + + return false; +} + +static int axgbe_phy_mdio_reset_setup(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int reg; + + if (phy_data->conn_type != AXGBE_CONN_TYPE_MDIO) + return 0; + reg = XP_IOREAD(pdata, XP_PROP_3); + phy_data->mdio_reset = XP_GET_BITS(reg, XP_PROP_3, MDIO_RESET); + switch (phy_data->mdio_reset) { + case AXGBE_MDIO_RESET_NONE: + case AXGBE_MDIO_RESET_I2C_GPIO: + case AXGBE_MDIO_RESET_INT_GPIO: + break; + default: + PMD_DRV_LOG(ERR, "unsupported MDIO reset (%#x)\n", + phy_data->mdio_reset); + return -EINVAL; + } + if (phy_data->mdio_reset == AXGBE_MDIO_RESET_I2C_GPIO) { + phy_data->mdio_reset_addr = AXGBE_GPIO_ADDRESS_PCA9555 + + XP_GET_BITS(reg, XP_PROP_3, + MDIO_RESET_I2C_ADDR); + phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3, + MDIO_RESET_I2C_GPIO); + } else if (phy_data->mdio_reset == AXGBE_MDIO_RESET_INT_GPIO) { + phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3, + MDIO_RESET_INT_GPIO); + } + + return 0; +} + +static bool axgbe_phy_port_mode_mismatch(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)) + return false; + break; + case AXGBE_PORT_MODE_BACKPLANE_2500: + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500) + return false; + break; + case AXGBE_PORT_MODE_1000BASE_T: + if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)) + return false; + break; + case AXGBE_PORT_MODE_1000BASE_X: + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) + return false; + break; + case AXGBE_PORT_MODE_NBASE_T: + if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500)) + return false; + break; + case AXGBE_PORT_MODE_10GBASE_T: + if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)) + return false; + break; + case AXGBE_PORT_MODE_10GBASE_R: + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) + return false; + break; + case AXGBE_PORT_MODE_SFP: + if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)) + return false; + break; + default: + break; + } + + return true; +} + +static bool axgbe_phy_conn_type_mismatch(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + case AXGBE_PORT_MODE_BACKPLANE_2500: + if (phy_data->conn_type == AXGBE_CONN_TYPE_BACKPLANE) + return false; + break; + case AXGBE_PORT_MODE_1000BASE_T: + case AXGBE_PORT_MODE_1000BASE_X: + case AXGBE_PORT_MODE_NBASE_T: + case AXGBE_PORT_MODE_10GBASE_T: + case AXGBE_PORT_MODE_10GBASE_R: + if (phy_data->conn_type == AXGBE_CONN_TYPE_MDIO) + return false; + break; + case AXGBE_PORT_MODE_SFP: + if (phy_data->conn_type == AXGBE_CONN_TYPE_SFP) + return false; + break; + default: + break; + } + + return true; +} + +static bool axgbe_phy_port_enabled(struct axgbe_port *pdata) +{ + unsigned int reg; + + reg = XP_IOREAD(pdata, XP_PROP_0); + if (!XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS)) + return false; + if (!XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE)) + return false; + + return true; +} + +static void axgbe_phy_cdr_track(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + if (!pdata->vdata->an_cdr_workaround) + return; + + if (!phy_data->phy_cdr_notrack) + return; + + rte_delay_us(phy_data->phy_cdr_delay + 400); + + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, + AXGBE_PMA_CDR_TRACK_EN_MASK, + AXGBE_PMA_CDR_TRACK_EN_ON); + + phy_data->phy_cdr_notrack = 0; +} + +static void axgbe_phy_cdr_notrack(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + if (!pdata->vdata->an_cdr_workaround) + return; + + if (phy_data->phy_cdr_notrack) + return; + + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, + AXGBE_PMA_CDR_TRACK_EN_MASK, + AXGBE_PMA_CDR_TRACK_EN_OFF); + + axgbe_phy_rrc(pdata); + + phy_data->phy_cdr_notrack = 1; +} + +static void axgbe_phy_kr_training_post(struct axgbe_port *pdata) +{ + if (!pdata->cdr_track_early) + axgbe_phy_cdr_track(pdata); +} + +static void axgbe_phy_kr_training_pre(struct axgbe_port *pdata) +{ + if (pdata->cdr_track_early) + axgbe_phy_cdr_track(pdata); +} + +static void axgbe_phy_an_post(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + if (phy_data->cur_mode != AXGBE_MODE_KR) + break; + + axgbe_phy_cdr_track(pdata); + + switch (pdata->an_result) { + case AXGBE_AN_READY: + case AXGBE_AN_COMPLETE: + break; + default: + if (phy_data->phy_cdr_delay < AXGBE_CDR_DELAY_MAX) + phy_data->phy_cdr_delay += AXGBE_CDR_DELAY_INC; + break; + } + break; + default: + break; + } +} + +static void axgbe_phy_an_pre(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + if (phy_data->cur_mode != AXGBE_MODE_KR) + break; + + axgbe_phy_cdr_notrack(pdata); + break; + default: + break; + } +} + +static void axgbe_phy_stop(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + /* Reset SFP data */ + axgbe_phy_sfp_reset(phy_data); + axgbe_phy_sfp_mod_absent(pdata); + + /* Reset CDR support */ + axgbe_phy_cdr_track(pdata); + + /* Power off the PHY */ + axgbe_phy_power_off(pdata); + + /* Stop the I2C controller */ + pdata->i2c_if.i2c_stop(pdata); +} + +static int axgbe_phy_start(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + int ret; + + /* Start the I2C controller */ + ret = pdata->i2c_if.i2c_start(pdata); + if (ret) + return ret; + + /* Start in highest supported mode */ + axgbe_phy_set_mode(pdata, phy_data->start_mode); + + /* Reset CDR support */ + axgbe_phy_cdr_track(pdata); + + /* After starting the I2C controller, we can check for an SFP */ + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_SFP: + axgbe_phy_sfp_detect(pdata); + break; + default: + break; + } + + return ret; +} + +static int axgbe_phy_reset(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + enum axgbe_mode cur_mode; + + /* Reset by power cycling the PHY */ + cur_mode = phy_data->cur_mode; + axgbe_phy_power_off(pdata); + /* First time reset is done with passed unknown mode*/ + axgbe_phy_set_mode(pdata, cur_mode); + return 0; +} + +static int axgbe_phy_init(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data; + unsigned int reg; + int ret; + + /* Check if enabled */ + if (!axgbe_phy_port_enabled(pdata)) { + PMD_DRV_LOG(ERR, "device is not enabled\n"); + return -ENODEV; + } + + /* Initialize the I2C controller */ + ret = pdata->i2c_if.i2c_init(pdata); + if (ret) + return ret; + + phy_data = rte_zmalloc("phy_data memory", sizeof(*phy_data), 0); + if (!phy_data) { + PMD_DRV_LOG(ERR, "phy_data allocation failed\n"); + return -ENOMEM; + } + pdata->phy_data = phy_data; + + reg = XP_IOREAD(pdata, XP_PROP_0); + phy_data->port_mode = XP_GET_BITS(reg, XP_PROP_0, PORT_MODE); + phy_data->port_id = XP_GET_BITS(reg, XP_PROP_0, PORT_ID); + phy_data->port_speeds = XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS); + phy_data->conn_type = XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE); + phy_data->mdio_addr = XP_GET_BITS(reg, XP_PROP_0, MDIO_ADDR); + + reg = XP_IOREAD(pdata, XP_PROP_4); + phy_data->redrv = XP_GET_BITS(reg, XP_PROP_4, REDRV_PRESENT); + phy_data->redrv_if = XP_GET_BITS(reg, XP_PROP_4, REDRV_IF); + phy_data->redrv_addr = XP_GET_BITS(reg, XP_PROP_4, REDRV_ADDR); + phy_data->redrv_lane = XP_GET_BITS(reg, XP_PROP_4, REDRV_LANE); + phy_data->redrv_model = XP_GET_BITS(reg, XP_PROP_4, REDRV_MODEL); + + /* Validate the connection requested */ + if (axgbe_phy_conn_type_mismatch(pdata)) { + PMD_DRV_LOG(ERR, "phy mode/connection mismatch (%#x/%#x)\n", + phy_data->port_mode, phy_data->conn_type); + return -EINVAL; + } + + /* Validate the mode requested */ + if (axgbe_phy_port_mode_mismatch(pdata)) { + PMD_DRV_LOG(ERR, "phy mode/speed mismatch (%#x/%#x)\n", + phy_data->port_mode, phy_data->port_speeds); + return -EINVAL; + } + + /* Check for and validate MDIO reset support */ + ret = axgbe_phy_mdio_reset_setup(pdata); + if (ret) + return ret; + + /* Validate the re-driver information */ + if (axgbe_phy_redrv_error(phy_data)) { + PMD_DRV_LOG(ERR, "phy re-driver settings error\n"); + return -EINVAL; + } + pdata->kr_redrv = phy_data->redrv; + + /* Indicate current mode is unknown */ + phy_data->cur_mode = AXGBE_MODE_UNKNOWN; + + /* Initialize supported features */ + pdata->phy.supported = 0; + + switch (phy_data->port_mode) { + /* Backplane support */ + case AXGBE_PORT_MODE_BACKPLANE: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_Backplane; + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) { + pdata->phy.supported |= SUPPORTED_1000baseKX_Full; + phy_data->start_mode = AXGBE_MODE_KX_1000; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) { + pdata->phy.supported |= SUPPORTED_10000baseKR_Full; + if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) + pdata->phy.supported |= + SUPPORTED_10000baseR_FEC; + phy_data->start_mode = AXGBE_MODE_KR; + } + + phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE; + break; + case AXGBE_PORT_MODE_BACKPLANE_2500: + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_Backplane; + pdata->phy.supported |= SUPPORTED_2500baseX_Full; + phy_data->start_mode = AXGBE_MODE_KX_2500; + + phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE; + break; + + /* MDIO 1GBase-T support */ + case AXGBE_PORT_MODE_1000BASE_T: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_TP; + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) { + pdata->phy.supported |= SUPPORTED_100baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_100; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) { + pdata->phy.supported |= SUPPORTED_1000baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_1000; + } + + phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22; + break; + + /* MDIO Base-X support */ + case AXGBE_PORT_MODE_1000BASE_X: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_FIBRE; + pdata->phy.supported |= SUPPORTED_1000baseT_Full; + phy_data->start_mode = AXGBE_MODE_X; + + phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22; + break; + + /* MDIO NBase-T support */ + case AXGBE_PORT_MODE_NBASE_T: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_TP; + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) { + pdata->phy.supported |= SUPPORTED_100baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_100; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) { + pdata->phy.supported |= SUPPORTED_1000baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_1000; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500) { + pdata->phy.supported |= SUPPORTED_2500baseX_Full; + phy_data->start_mode = AXGBE_MODE_KX_2500; + } + + phy_data->phydev_mode = AXGBE_MDIO_MODE_CL45; + break; + + /* 10GBase-T support */ + case AXGBE_PORT_MODE_10GBASE_T: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_TP; + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) { + pdata->phy.supported |= SUPPORTED_100baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_100; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) { + pdata->phy.supported |= SUPPORTED_1000baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_1000; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) { + pdata->phy.supported |= SUPPORTED_10000baseT_Full; + phy_data->start_mode = AXGBE_MODE_KR; + } + + phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE; + break; + + /* 10GBase-R support */ + case AXGBE_PORT_MODE_10GBASE_R: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_TP; + pdata->phy.supported |= SUPPORTED_10000baseT_Full; + if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) + pdata->phy.supported |= SUPPORTED_10000baseR_FEC; + phy_data->start_mode = AXGBE_MODE_SFI; + + phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE; + break; + + /* SFP support */ + case AXGBE_PORT_MODE_SFP: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_TP; + pdata->phy.supported |= SUPPORTED_FIBRE; + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) { + pdata->phy.supported |= SUPPORTED_100baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_100; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) { + pdata->phy.supported |= SUPPORTED_1000baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_1000; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) { + pdata->phy.supported |= SUPPORTED_10000baseT_Full; + phy_data->start_mode = AXGBE_MODE_SFI; + if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) + pdata->phy.supported |= + SUPPORTED_10000baseR_FEC; + } + + phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22; + + axgbe_phy_sfp_setup(pdata); + break; + default: + return -EINVAL; + } + + if ((phy_data->conn_type & AXGBE_CONN_TYPE_MDIO) && + (phy_data->phydev_mode != AXGBE_MDIO_MODE_NONE)) { + ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr, + phy_data->phydev_mode); + if (ret) { + PMD_DRV_LOG(ERR, "mdio port/clause not compatible (%d/%u)\n", + phy_data->mdio_addr, phy_data->phydev_mode); + return -EINVAL; + } + } + + if (phy_data->redrv && !phy_data->redrv_if) { + ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr, + AXGBE_MDIO_MODE_CL22); + if (ret) { + PMD_DRV_LOG(ERR, "redriver mdio port not compatible (%u)\n", + phy_data->redrv_addr); + return -EINVAL; + } + } + + phy_data->phy_cdr_delay = AXGBE_CDR_DELAY_INIT; + return 0; +} +void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if) +{ + struct axgbe_phy_impl_if *phy_impl = &phy_if->phy_impl; + + phy_impl->init = axgbe_phy_init; + phy_impl->reset = axgbe_phy_reset; + phy_impl->start = axgbe_phy_start; + phy_impl->stop = axgbe_phy_stop; + phy_impl->link_status = axgbe_phy_link_status; + phy_impl->use_mode = axgbe_phy_use_mode; + phy_impl->set_mode = axgbe_phy_set_mode; + phy_impl->get_mode = axgbe_phy_get_mode; + phy_impl->switch_mode = axgbe_phy_switch_mode; + phy_impl->cur_mode = axgbe_phy_cur_mode; + phy_impl->an_mode = axgbe_phy_an_mode; + phy_impl->an_config = axgbe_phy_an_config; + phy_impl->an_advertising = axgbe_phy_an_advertising; + phy_impl->an_outcome = axgbe_phy_an_outcome; + + phy_impl->an_pre = axgbe_phy_an_pre; + phy_impl->an_post = axgbe_phy_an_post; + + phy_impl->kr_training_pre = axgbe_phy_kr_training_pre; + phy_impl->kr_training_post = axgbe_phy_kr_training_post; +} diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c new file mode 100644 index 00000000..c5fd5f41 --- /dev/null +++ b/drivers/net/axgbe/axgbe_rxtx.c @@ -0,0 +1,674 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_ethdev.h" +#include "axgbe_rxtx.h" +#include "axgbe_phy.h" + +#include +#include +#include + +static void +axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue) +{ + uint16_t i; + struct rte_mbuf **sw_ring; + + if (rx_queue) { + sw_ring = rx_queue->sw_ring; + if (sw_ring) { + for (i = 0; i < rx_queue->nb_desc; i++) { + if (sw_ring[i]) + rte_pktmbuf_free(sw_ring[i]); + } + rte_free(sw_ring); + } + rte_free(rx_queue); + } +} + +void axgbe_dev_rx_queue_release(void *rxq) +{ + axgbe_rx_queue_release(rxq); +} + +int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + PMD_INIT_FUNC_TRACE(); + uint32_t size; + const struct rte_memzone *dma; + struct axgbe_rx_queue *rxq; + uint32_t rx_desc = nb_desc; + struct axgbe_port *pdata = dev->data->dev_private; + + /* + * validate Rx descriptors count + * should be power of 2 and less than h/w supported + */ + if ((!rte_is_power_of_2(rx_desc)) || + rx_desc > pdata->rx_desc_count) + return -EINVAL; + /* First allocate the rx queue data structure */ + rxq = rte_zmalloc_socket("ethdev RX queue", + sizeof(struct axgbe_rx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (!rxq) { + PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!"); + return -ENOMEM; + } + + rxq->cur = 0; + rxq->dirty = 0; + rxq->pdata = pdata; + rxq->mb_pool = mp; + rxq->queue_id = queue_idx; + rxq->port_id = dev->data->port_id; + rxq->nb_desc = rx_desc; + rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE + + (DMA_CH_INC * rxq->queue_id)); + rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs + + DMA_CH_RDTR_LO); + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) + rxq->crc_len = ETHER_CRC_LEN; + else + rxq->crc_len = 0; + + /* CRC strip in AXGBE supports per port not per queue */ + pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0; + rxq->free_thresh = rx_conf->rx_free_thresh ? + rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH; + if (rxq->free_thresh > rxq->nb_desc) + rxq->free_thresh = rxq->nb_desc >> 3; + + /* Allocate RX ring hardware descriptors */ + size = rxq->nb_desc * sizeof(union axgbe_rx_desc); + dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128, + socket_id); + if (!dma) { + PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed\n"); + axgbe_rx_queue_release(rxq); + return -ENOMEM; + } + rxq->ring_phys_addr = (uint64_t)dma->phys_addr; + rxq->desc = (volatile union axgbe_rx_desc *)dma->addr; + memset((void *)rxq->desc, 0, size); + /* Allocate software ring */ + size = rxq->nb_desc * sizeof(struct rte_mbuf *); + rxq->sw_ring = rte_zmalloc_socket("sw_ring", size, + RTE_CACHE_LINE_SIZE, + socket_id); + if (!rxq->sw_ring) { + PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed\n"); + axgbe_rx_queue_release(rxq); + return -ENOMEM; + } + dev->data->rx_queues[queue_idx] = rxq; + if (!pdata->rx_queues) + pdata->rx_queues = dev->data->rx_queues; + + return 0; +} + +static void axgbe_prepare_rx_stop(struct axgbe_port *pdata, + unsigned int queue) +{ + unsigned int rx_status; + unsigned long rx_timeout; + + /* The Rx engine cannot be stopped if it is actively processing + * packets. Wait for the Rx queue to empty the Rx fifo. Don't + * wait forever though... + */ + rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * + rte_get_timer_hz()); + + while (time_before(rte_get_timer_cycles(), rx_timeout)) { + rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); + if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && + (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) + break; + + rte_delay_us(900); + } + + if (!time_before(rte_get_timer_cycles(), rx_timeout)) + PMD_DRV_LOG(ERR, + "timed out waiting for Rx queue %u to empty\n", + queue); +} + +void axgbe_dev_disable_rx(struct rte_eth_dev *dev) +{ + struct axgbe_rx_queue *rxq; + struct axgbe_port *pdata = dev->data->dev_private; + unsigned int i; + + /* Disable MAC Rx */ + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); + + /* Prepare for Rx DMA channel stop */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + axgbe_prepare_rx_stop(pdata, i); + } + /* Disable each Rx queue */ + AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + /* Disable Rx DMA channel */ + AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0); + } +} + +void axgbe_dev_enable_rx(struct rte_eth_dev *dev) +{ + struct axgbe_rx_queue *rxq; + struct axgbe_port *pdata = dev->data->dev_private; + unsigned int i; + unsigned int reg_val = 0; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + /* Enable Rx DMA channel */ + AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1); + } + + reg_val = 0; + for (i = 0; i < pdata->rx_q_count; i++) + reg_val |= (0x02 << (i << 1)); + AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); + + /* Enable MAC Rx */ + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); + /* Frame is forwarded after stripping CRC to application*/ + if (pdata->crc_strip_enable) { + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); + } + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); +} + +/* Rx function one to one refresh */ +uint16_t +axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + PMD_INIT_FUNC_TRACE(); + uint16_t nb_rx = 0; + struct axgbe_rx_queue *rxq = rx_queue; + volatile union axgbe_rx_desc *desc; + uint64_t old_dirty = rxq->dirty; + struct rte_mbuf *mbuf, *tmbuf; + unsigned int err; + uint32_t error_status; + uint16_t idx, pidx, pkt_len; + + idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); + while (nb_rx < nb_pkts) { + if (unlikely(idx == rxq->nb_desc)) + idx = 0; + + desc = &rxq->desc[idx]; + + if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) + break; + tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + if (unlikely(!tmbuf)) { + PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u" + " queue_id = %u\n", + (unsigned int)rxq->port_id, + (unsigned int)rxq->queue_id); + rte_eth_devices[ + rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + pidx = idx + 1; + if (unlikely(pidx == rxq->nb_desc)) + pidx = 0; + + rte_prefetch0(rxq->sw_ring[pidx]); + if ((pidx & 0x3) == 0) { + rte_prefetch0(&rxq->desc[pidx]); + rte_prefetch0(&rxq->sw_ring[pidx]); + } + + mbuf = rxq->sw_ring[idx]; + /* Check for any errors and free mbuf*/ + err = AXGMAC_GET_BITS_LE(desc->write.desc3, + RX_NORMAL_DESC3, ES); + error_status = 0; + if (unlikely(err)) { + error_status = desc->write.desc3 & AXGBE_ERR_STATUS; + if ((error_status != AXGBE_L3_CSUM_ERR) && + (error_status != AXGBE_L4_CSUM_ERR)) { + rxq->errors++; + rte_pktmbuf_free(mbuf); + goto err_set; + } + } + if (rxq->pdata->rx_csum_enable) { + mbuf->ol_flags = 0; + mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) { + mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD; + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD; + mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; + } else if ( + unlikely(error_status == AXGBE_L4_CSUM_ERR)) { + mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD; + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + } + } + rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *)); + /* Get the RSS hash */ + if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV)) + mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1); + pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, + PL) - rxq->crc_len; + /* Mbuf populate */ + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; + mbuf->pkt_len = pkt_len; + mbuf->data_len = pkt_len; + rxq->bytes += pkt_len; + rx_pkts[nb_rx++] = mbuf; +err_set: + rxq->cur++; + rxq->sw_ring[idx++] = tmbuf; + desc->read.baddr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf)); + memset((void *)(&desc->read.desc2), 0, 8); + AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1); + rxq->dirty++; + } + rxq->pkts += nb_rx; + if (rxq->dirty != old_dirty) { + rte_wmb(); + idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1); + AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, + low32_value(rxq->ring_phys_addr + + (idx * sizeof(union axgbe_rx_desc)))); + } + + return nb_rx; +} + +/* Tx Apis */ +static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue) +{ + uint16_t i; + struct rte_mbuf **sw_ring; + + if (tx_queue) { + sw_ring = tx_queue->sw_ring; + if (sw_ring) { + for (i = 0; i < tx_queue->nb_desc; i++) { + if (sw_ring[i]) + rte_pktmbuf_free(sw_ring[i]); + } + rte_free(sw_ring); + } + rte_free(tx_queue); + } +} + +void axgbe_dev_tx_queue_release(void *txq) +{ + axgbe_tx_queue_release(txq); +} + +int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + PMD_INIT_FUNC_TRACE(); + uint32_t tx_desc; + struct axgbe_port *pdata; + struct axgbe_tx_queue *txq; + unsigned int tsize; + const struct rte_memzone *tz; + + tx_desc = nb_desc; + pdata = (struct axgbe_port *)dev->data->dev_private; + + /* + * validate tx descriptors count + * should be power of 2 and less than h/w supported + */ + if ((!rte_is_power_of_2(tx_desc)) || + tx_desc > pdata->tx_desc_count || + tx_desc < AXGBE_MIN_RING_DESC) + return -EINVAL; + + /* First allocate the tx queue data structure */ + txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue), + RTE_CACHE_LINE_SIZE); + if (!txq) + return -ENOMEM; + txq->pdata = pdata; + + txq->nb_desc = tx_desc; + txq->free_thresh = tx_conf->tx_free_thresh ? + tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH; + if (txq->free_thresh > txq->nb_desc) + txq->free_thresh = (txq->nb_desc >> 1); + txq->free_batch_cnt = txq->free_thresh; + + /* In vector_tx path threshold should be multiple of queue_size*/ + if (txq->nb_desc % txq->free_thresh != 0) + txq->vector_disable = 1; + + if (tx_conf->offloads != 0) + txq->vector_disable = 1; + + /* Allocate TX ring hardware descriptors */ + tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc); + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, + tsize, AXGBE_DESC_ALIGN, socket_id); + if (!tz) { + axgbe_tx_queue_release(txq); + return -ENOMEM; + } + memset(tz->addr, 0, tsize); + txq->ring_phys_addr = (uint64_t)tz->phys_addr; + txq->desc = tz->addr; + txq->queue_id = queue_idx; + txq->port_id = dev->data->port_id; + txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE + + (DMA_CH_INC * txq->queue_id)); + txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs + + DMA_CH_TDTR_LO); + txq->cur = 0; + txq->dirty = 0; + txq->nb_desc_free = txq->nb_desc; + /* Allocate software ring */ + tsize = txq->nb_desc * sizeof(struct rte_mbuf *); + txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize, + RTE_CACHE_LINE_SIZE); + if (!txq->sw_ring) { + axgbe_tx_queue_release(txq); + return -ENOMEM; + } + dev->data->tx_queues[queue_idx] = txq; + if (!pdata->tx_queues) + pdata->tx_queues = dev->data->tx_queues; + + if (txq->vector_disable) + dev->tx_pkt_burst = &axgbe_xmit_pkts; + else +#ifdef RTE_ARCH_X86 + dev->tx_pkt_burst = &axgbe_xmit_pkts_vec; +#else + dev->tx_pkt_burst = &axgbe_xmit_pkts; +#endif + + return 0; +} + +static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata, + unsigned int queue) +{ + unsigned int tx_status; + unsigned long tx_timeout; + + /* The Tx engine cannot be stopped if it is actively processing + * packets. Wait for the Tx queue to empty the Tx fifo. Don't + * wait forever though... + */ + tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * + rte_get_timer_hz()); + while (time_before(rte_get_timer_cycles(), tx_timeout)) { + tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR); + if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) && + (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0)) + break; + + rte_delay_us(900); + } + + if (!time_before(rte_get_timer_cycles(), tx_timeout)) + PMD_DRV_LOG(ERR, + "timed out waiting for Tx queue %u to empty\n", + queue); +} + +static void axgbe_prepare_tx_stop(struct axgbe_port *pdata, + unsigned int queue) +{ + unsigned int tx_dsr, tx_pos, tx_qidx; + unsigned int tx_status; + unsigned long tx_timeout; + + if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20) + return axgbe_txq_prepare_tx_stop(pdata, queue); + + /* Calculate the status register to read and the position within */ + if (queue < DMA_DSRX_FIRST_QUEUE) { + tx_dsr = DMA_DSR0; + tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START; + } else { + tx_qidx = queue - DMA_DSRX_FIRST_QUEUE; + + tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); + tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + + DMA_DSRX_TPS_START; + } + + /* The Tx engine cannot be stopped if it is actively processing + * descriptors. Wait for the Tx engine to enter the stopped or + * suspended state. Don't wait forever though... + */ + tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * + rte_get_timer_hz()); + while (time_before(rte_get_timer_cycles(), tx_timeout)) { + tx_status = AXGMAC_IOREAD(pdata, tx_dsr); + tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); + if ((tx_status == DMA_TPS_STOPPED) || + (tx_status == DMA_TPS_SUSPENDED)) + break; + + rte_delay_us(900); + } + + if (!time_before(rte_get_timer_cycles(), tx_timeout)) + PMD_DRV_LOG(ERR, + "timed out waiting for Tx DMA channel %u to stop\n", + queue); +} + +void axgbe_dev_disable_tx(struct rte_eth_dev *dev) +{ + struct axgbe_tx_queue *txq; + struct axgbe_port *pdata = dev->data->dev_private; + unsigned int i; + + /* Prepare for stopping DMA channel */ + for (i = 0; i < pdata->tx_q_count; i++) { + txq = dev->data->tx_queues[i]; + axgbe_prepare_tx_stop(pdata, i); + } + /* Disable MAC Tx */ + AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); + /* Disable each Tx queue*/ + for (i = 0; i < pdata->tx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, + 0); + /* Disable each Tx DMA channel */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0); + } +} + +void axgbe_dev_enable_tx(struct rte_eth_dev *dev) +{ + struct axgbe_tx_queue *txq; + struct axgbe_port *pdata = dev->data->dev_private; + unsigned int i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + /* Enable Tx DMA channel */ + AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1); + } + /* Enable Tx queue*/ + for (i = 0; i < pdata->tx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, + MTL_Q_ENABLED); + /* Enable MAC Tx */ + AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); +} + +/* Free Tx conformed mbufs */ +static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq) +{ + volatile struct axgbe_tx_desc *desc; + uint16_t idx; + + idx = AXGBE_GET_DESC_IDX(txq, txq->dirty); + while (txq->cur != txq->dirty) { + if (unlikely(idx == txq->nb_desc)) + idx = 0; + desc = &txq->desc[idx]; + /* Check for ownership */ + if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN)) + return; + memset((void *)&desc->desc2, 0, 8); + /* Free mbuf */ + rte_pktmbuf_free(txq->sw_ring[idx]); + txq->sw_ring[idx++] = NULL; + txq->dirty++; + } +} + +/* Tx Descriptor formation + * Considering each mbuf requires one desc + * mbuf is linear + */ +static int axgbe_xmit_hw(struct axgbe_tx_queue *txq, + struct rte_mbuf *mbuf) +{ + volatile struct axgbe_tx_desc *desc; + uint16_t idx; + uint64_t mask; + + idx = AXGBE_GET_DESC_IDX(txq, txq->cur); + desc = &txq->desc[idx]; + + /* Update buffer address and length */ + desc->baddr = rte_mbuf_data_iova(mbuf); + AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L, + mbuf->pkt_len); + /* Total msg length to transmit */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL, + mbuf->pkt_len); + /* Mark it as First and Last Descriptor */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1); + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1); + /* Mark it as a NORMAL descriptor */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0); + /* configure h/w Offload */ + mask = mbuf->ol_flags & PKT_TX_L4_MASK; + if ((mask == PKT_TX_TCP_CKSUM) || (mask == PKT_TX_UDP_CKSUM)) + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3); + else if (mbuf->ol_flags & PKT_TX_IP_CKSUM) + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1); + rte_wmb(); + + /* Set OWN bit */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); + rte_wmb(); + + /* Save mbuf */ + txq->sw_ring[idx] = mbuf; + /* Update current index*/ + txq->cur++; + /* Update stats */ + txq->bytes += mbuf->pkt_len; + + return 0; +} + +/* Eal supported tx wrapper*/ +uint16_t +axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + PMD_INIT_FUNC_TRACE(); + + if (unlikely(nb_pkts == 0)) + return nb_pkts; + + struct axgbe_tx_queue *txq; + uint16_t nb_desc_free; + uint16_t nb_pkt_sent = 0; + uint16_t idx; + uint32_t tail_addr; + struct rte_mbuf *mbuf; + + txq = (struct axgbe_tx_queue *)tx_queue; + nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); + + if (unlikely(nb_desc_free <= txq->free_thresh)) { + axgbe_xmit_cleanup(txq); + nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); + if (unlikely(nb_desc_free == 0)) + return 0; + } + nb_pkts = RTE_MIN(nb_desc_free, nb_pkts); + while (nb_pkts--) { + mbuf = *tx_pkts++; + if (axgbe_xmit_hw(txq, mbuf)) + goto out; + nb_pkt_sent++; + } +out: + /* Sync read and write */ + rte_mb(); + idx = AXGBE_GET_DESC_IDX(txq, txq->cur); + tail_addr = low32_value(txq->ring_phys_addr + + idx * sizeof(struct axgbe_tx_desc)); + /* Update tail reg with next immediate address to kick Tx DMA channel*/ + AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr); + txq->pkts += nb_pkt_sent; + return nb_pkt_sent; +} + +void axgbe_dev_clear_queues(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + uint8_t i; + struct axgbe_rx_queue *rxq; + struct axgbe_tx_queue *txq; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + + if (rxq) { + axgbe_rx_queue_release(rxq); + dev->data->rx_queues[i] = NULL; + } + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + + if (txq) { + axgbe_tx_queue_release(txq); + dev->data->tx_queues[i] = NULL; + } + } +} diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h new file mode 100644 index 00000000..917da58c --- /dev/null +++ b/drivers/net/axgbe/axgbe_rxtx.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#ifndef _AXGBE_RXTX_H_ +#define _AXGBE_RXTX_H_ + +/* to suppress gcc warnings related to descriptor casting*/ +#ifdef RTE_TOOLCHAIN_GCC +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +#ifdef RTE_TOOLCHAIN_CLANG +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +/* Descriptor related defines */ +#define AXGBE_MAX_RING_DESC 4096 /*should be power of 2*/ +#define AXGBE_TX_DESC_MIN_FREE (AXGBE_MAX_RING_DESC >> 3) +#define AXGBE_TX_DESC_MAX_PROC (AXGBE_MAX_RING_DESC >> 1) +#define AXGBE_MIN_RING_DESC 32 +#define RTE_AXGBE_DESCS_PER_LOOP 4 +#define RTE_AXGBE_MAX_RX_BURST 32 + +#define AXGBE_RX_FREE_THRESH 32 +#define AXGBE_TX_FREE_THRESH 32 + +#define AXGBE_DESC_ALIGN 128 +#define AXGBE_DESC_OWN 0x80000000 +#define AXGBE_ERR_STATUS 0x000f0000 +#define AXGBE_L3_CSUM_ERR 0x00050000 +#define AXGBE_L4_CSUM_ERR 0x00060000 + +#include "axgbe_common.h" + +#define AXGBE_GET_DESC_PT(_queue, _idx) \ + (((_queue)->desc) + \ + ((_idx) & ((_queue)->nb_desc - 1))) + +#define AXGBE_GET_DESC_IDX(_queue, _idx) \ + ((_idx) & ((_queue)->nb_desc - 1)) \ + +/* Rx desc format */ +union axgbe_rx_desc { + struct { + uint64_t baddr; + uint32_t desc2; + uint32_t desc3; + } read; + struct { + uint32_t desc0; + uint32_t desc1; + uint32_t desc2; + uint32_t desc3; + } write; +}; + +struct axgbe_rx_queue { + /* membuf pool for rx buffers */ + struct rte_mempool *mb_pool; + /* H/w Rx buffer size configured in DMA */ + unsigned int buf_size; + /* CRC h/w offload */ + uint16_t crc_len; + /* address of s/w rx buffers */ + struct rte_mbuf **sw_ring; + /* Port private data */ + struct axgbe_port *pdata; + /* Number of Rx descriptors in queue */ + uint16_t nb_desc; + /* max free RX desc to hold */ + uint16_t free_thresh; + /* Index of descriptor to check for packet availability */ + uint64_t cur; + /* Index of descriptor to check for buffer reallocation */ + uint64_t dirty; + /* Software Rx descriptor ring*/ + volatile union axgbe_rx_desc *desc; + /* Ring physical address */ + uint64_t ring_phys_addr; + /* Dma Channel register address */ + void *dma_regs; + /* Dma channel tail register address*/ + volatile uint32_t *dma_tail_reg; + /* DPDK queue index */ + uint16_t queue_id; + /* dpdk port id*/ + uint16_t port_id; + /* queue stats */ + uint64_t pkts; + uint64_t bytes; + uint64_t errors; + /* Number of mbufs allocated from pool*/ + uint64_t mbuf_alloc; + +} __rte_cache_aligned; + +/*Tx descriptor format */ +struct axgbe_tx_desc { + phys_addr_t baddr; + uint32_t desc2; + uint32_t desc3; +}; + +struct axgbe_tx_queue { + /* Port private data reference */ + struct axgbe_port *pdata; + /* Number of Tx descriptors in queue*/ + uint16_t nb_desc; + /* Start freeing TX buffers if there are less free descriptors than + * this value + */ + uint16_t free_thresh; + /* Available descriptors for Tx processing*/ + uint16_t nb_desc_free; + /* Batch of mbufs/descs to release */ + uint16_t free_batch_cnt; + /* Flag for vector support */ + uint16_t vector_disable; + /* Index of descriptor to be used for current transfer */ + uint64_t cur; + /* Index of descriptor to check for transfer complete */ + uint64_t dirty; + /* Virtual address of ring */ + volatile struct axgbe_tx_desc *desc; + /* Physical address of ring */ + uint64_t ring_phys_addr; + /* Dma channel register space */ + void *dma_regs; + /* Dma tail register address of ring*/ + volatile uint32_t *dma_tail_reg; + /* Tx queue index/id*/ + uint16_t queue_id; + /* Reference to hold Tx mbufs mapped to Tx descriptors freed + * after transmission confirmation + */ + struct rte_mbuf **sw_ring; + /* dpdk port id*/ + uint16_t port_id; + /* queue stats */ + uint64_t pkts; + uint64_t bytes; + uint64_t errors; + +} __rte_cache_aligned; + +/*Queue related APIs */ + +/* + * RX/TX function prototypes + */ + + +void axgbe_dev_tx_queue_release(void *txq); +int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +void axgbe_dev_enable_tx(struct rte_eth_dev *dev); +void axgbe_dev_disable_tx(struct rte_eth_dev *dev); +int axgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int axgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); + +uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + + +void axgbe_dev_rx_queue_release(void *rxq); +int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); +void axgbe_dev_enable_rx(struct rte_eth_dev *dev); +void axgbe_dev_disable_rx(struct rte_eth_dev *dev); +int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); +uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +void axgbe_dev_clear_queues(struct rte_eth_dev *dev); + +#endif /* _AXGBE_RXTX_H_ */ diff --git a/drivers/net/axgbe/axgbe_rxtx_vec_sse.c b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c new file mode 100644 index 00000000..9be70371 --- /dev/null +++ b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_ethdev.h" +#include "axgbe_rxtx.h" +#include "axgbe_phy.h" + +#include +#include +#include + +/* Useful to avoid shifting for every descriptor prepration*/ +#define TX_DESC_CTRL_FLAGS 0xb000000000000000 +#define TX_FREE_BULK 8 +#define TX_FREE_BULK_CHECK (TX_FREE_BULK - 1) + +static inline void +axgbe_vec_tx(volatile struct axgbe_tx_desc *desc, + struct rte_mbuf *mbuf) +{ + __m128i descriptor = _mm_set_epi64x((uint64_t)mbuf->pkt_len << 32 | + TX_DESC_CTRL_FLAGS | mbuf->data_len, + mbuf->buf_iova + + mbuf->data_off); + _mm_store_si128((__m128i *)desc, descriptor); +} + +static void +axgbe_xmit_cleanup_vec(struct axgbe_tx_queue *txq) +{ + volatile struct axgbe_tx_desc *desc; + int idx, i; + + idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt + - 1); + desc = &txq->desc[idx]; + if (desc->desc3 & AXGBE_DESC_OWN) + return; + /* memset avoided for desc ctrl fields since in vec_tx path + * all 128 bits are populated + */ + for (i = 0; i < txq->free_batch_cnt; i++, idx--) + rte_pktmbuf_free_seg(txq->sw_ring[idx]); + + + txq->dirty += txq->free_batch_cnt; + txq->nb_desc_free += txq->free_batch_cnt; +} + +uint16_t +axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + PMD_INIT_FUNC_TRACE(); + + struct axgbe_tx_queue *txq; + uint16_t idx, nb_commit, loop, i; + uint32_t tail_addr; + + txq = (struct axgbe_tx_queue *)tx_queue; + if (txq->nb_desc_free < txq->free_thresh) { + axgbe_xmit_cleanup_vec(txq); + if (unlikely(txq->nb_desc_free == 0)) + return 0; + } + nb_pkts = RTE_MIN(txq->nb_desc_free, nb_pkts); + nb_commit = nb_pkts; + idx = AXGBE_GET_DESC_IDX(txq, txq->cur); + loop = txq->nb_desc - idx; + if (nb_commit >= loop) { + for (i = 0; i < loop; ++i, ++idx, ++tx_pkts) { + axgbe_vec_tx(&txq->desc[idx], *tx_pkts); + txq->sw_ring[idx] = *tx_pkts; + } + nb_commit -= loop; + idx = 0; + } + for (i = 0; i < nb_commit; ++i, ++idx, ++tx_pkts) { + axgbe_vec_tx(&txq->desc[idx], *tx_pkts); + txq->sw_ring[idx] = *tx_pkts; + } + txq->cur += nb_pkts; + tail_addr = (uint32_t)(txq->ring_phys_addr + + idx * sizeof(struct axgbe_tx_desc)); + /* Update tail reg with next immediate address to kick Tx DMA channel*/ + rte_write32(tail_addr, (void *)txq->dma_tail_reg); + txq->pkts += nb_pkts; + txq->nb_desc_free -= nb_pkts; + + return nb_pkts; +} diff --git a/drivers/net/axgbe/meson.build b/drivers/net/axgbe/meson.build new file mode 100644 index 00000000..548ffff7 --- /dev/null +++ b/drivers/net/axgbe/meson.build @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved. + +if host_machine.system() != 'linux' + build = false +endif + +sources = files('axgbe_ethdev.c', + 'axgbe_dev.c', + 'axgbe_mdio.c', + 'axgbe_phy_impl.c', + 'axgbe_i2c.c', + 'axgbe_rxtx.c') + +cflags += '-Wno-cast-qual' + +if arch_subdir == 'x86' + sources += files('axgbe_rxtx_vec_sse.c') +endif diff --git a/drivers/net/axgbe/rte_pmd_axgbe_version.map b/drivers/net/axgbe/rte_pmd_axgbe_version.map new file mode 100644 index 00000000..b26efa67 --- /dev/null +++ b/drivers/net/axgbe/rte_pmd_axgbe_version.map @@ -0,0 +1,4 @@ +DPDK_18.05 { + + local: *; +}; diff --git a/drivers/net/bnx2x/LICENSE.bnx2x_pmd b/drivers/net/bnx2x/LICENSE.bnx2x_pmd deleted file mode 100644 index 96c7c1e1..00000000 --- a/drivers/net/bnx2x/LICENSE.bnx2x_pmd +++ /dev/null @@ -1,28 +0,0 @@ -/* - * BSD LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Broadcom Corporation nor the name of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written consent. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - */ diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile index 90ff8b1e..55d1ad6e 100644 --- a/drivers/net/bnx2x/Makefile +++ b/drivers/net/bnx2x/Makefile @@ -1,3 +1,7 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2014 - 2018 Cavium Inc. +# All rights reserved. +# www.cavium.com include $(RTE_SDK)/mk/rte.vars.mk # @@ -17,10 +21,6 @@ EXPORT_MAP := rte_pmd_bnx2x_version.map LIBABIVER := 1 -ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) -CFLAGS += -wd188 #188: enumerated type mixed with another type -endif - # # all source are stored in SRCS-y # diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c index fb02d0f3..4904eaf3 100644 --- a/drivers/net/bnx2x/bnx2x.c +++ b/drivers/net/bnx2x/bnx2x.c @@ -1,4 +1,4 @@ -/*- +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2007-2013 Broadcom Corporation. * * Eric Davis @@ -6,11 +6,9 @@ * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #define BNX2X_DRIVER_VERSION "1.78.18" @@ -31,7 +29,7 @@ #define BNX2X_PMD_VER_PREFIX "BNX2X PMD" #define BNX2X_PMD_VERSION_MAJOR 1 #define BNX2X_PMD_VERSION_MINOR 0 -#define BNX2X_PMD_VERSION_REVISION 5 +#define BNX2X_PMD_VERSION_REVISION 6 #define BNX2X_PMD_VERSION_PATCH 1 static inline const char * @@ -125,7 +123,6 @@ int bnx2x_nic_load(struct bnx2x_softc *sc); static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc); static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp); -static void bnx2x_periodic_stop(struct bnx2x_softc *sc); static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, uint8_t storm, uint16_t index, uint8_t op, uint8_t update); @@ -170,16 +167,16 @@ bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma, dma->sc = sc; if (IS_PF(sc)) - sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg, + snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg, rte_get_timer_cycles()); else - sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg, + snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg, rte_get_timer_cycles()); /* Caller must take care that strlen(mz_name) < RTE_MEMZONE_NAMESIZE */ - z = rte_memzone_reserve_aligned(mz_name, (uint64_t) (size), + z = rte_memzone_reserve_aligned(mz_name, (uint64_t)size, SOCKET_ID_ANY, - 0, align); + RTE_MEMZONE_IOVA_CONTIG, align); if (z == NULL) { PMD_DRV_LOG(ERR, "DMA alloc failed for %s", msg); return -ENOMEM; @@ -1969,9 +1966,6 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link PMD_DRV_LOG(DEBUG, "Starting NIC unload..."); - /* stop the periodic callout */ - bnx2x_periodic_stop(sc); - /* mark driver as unloaded in shmem2 */ if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); @@ -4490,6 +4484,8 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp) struct bnx2x_softc *sc = fp->sc; uint8_t more_rx = FALSE; + PMD_DRV_LOG(DEBUG, "---> FP TASK QUEUE (%d) <--", fp->index); + /* update the fastpath index */ bnx2x_update_fp_sb_idx(fp); @@ -4506,7 +4502,7 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp) } bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, - le16toh(fp->fp_hc_idx), IGU_INT_DISABLE, 1); + le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); } /* @@ -6997,16 +6993,6 @@ void bnx2x_link_status_update(struct bnx2x_softc *sc) } } -static void bnx2x_periodic_start(struct bnx2x_softc *sc) -{ - atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); -} - -static void bnx2x_periodic_stop(struct bnx2x_softc *sc) -{ - atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); -} - static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode) { int rc, cfg_idx = bnx2x_get_link_cfg_idx(sc); @@ -7041,10 +7027,6 @@ static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode) bnx2x_link_report(sc); } - if (!CHIP_REV_IS_SLOW(sc)) { - bnx2x_periodic_start(sc); - } - sc->link_params.req_line_speed[cfg_idx] = req_line_speed; return rc; } @@ -8285,16 +8267,6 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc) REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); } - -/* - * Enable internal target-read (in case we are probed after PF - * FLR). Must be done prior to any BAR read access. Only for - * 57712 and up - */ - if (!CHIP_IS_E1x(sc)) { - REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, - 1); - } } /* get the nvram size */ @@ -9671,7 +9643,17 @@ int bnx2x_attach(struct bnx2x_softc *sc) bnx2x_init_rte(sc); if (IS_PF(sc)) { -/* get device info and set params */ + /* Enable internal target-read (in case we are probed after PF + * FLR). Must be done prior to any BAR read access. Only for + * 57712 and up + */ + if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, + 1); + DELAY(200000); + } + + /* get device info and set params */ if (bnx2x_get_device_info(sc) != 0) { PMD_DRV_LOG(NOTICE, "getting device info"); return -ENXIO; @@ -9680,7 +9662,7 @@ int bnx2x_attach(struct bnx2x_softc *sc) /* get phy settings from shmem and 'and' against admin settings */ bnx2x_get_phy_info(sc); } else { -/* Left mac of VF unfilled, PF should set it for VF */ + /* Left mac of VF unfilled, PF should set it for VF */ memset(sc->link_params.mac_addr, 0, ETHER_ADDR_LEN); } diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 17075d38..0f6024fb 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -1,4 +1,4 @@ -/*- +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2007-2013 Broadcom Corporation. * * Eric Davis @@ -6,11 +6,9 @@ * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __BNX2X_H__ @@ -1930,6 +1928,7 @@ void bnx2x_link_status_update(struct bnx2x_softc *sc); int bnx2x_complete_sp(struct bnx2x_softc *sc); int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc); void bnx2x_periodic_callout(struct bnx2x_softc *sc); +void bnx2x_periodic_stop(void *param); int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_count); void bnx2x_vf_close(struct bnx2x_softc *sc); diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c index 483d5a17..575271a8 100644 --- a/drivers/net/bnx2x/bnx2x_ethdev.c +++ b/drivers/net/bnx2x/bnx2x_ethdev.c @@ -1,11 +1,8 @@ -/* +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #include "bnx2x.h" @@ -13,6 +10,7 @@ #include #include +#include int bnx2x_logtype_init; int bnx2x_logtype_driver; @@ -81,26 +79,31 @@ static const struct rte_bnx2x_xstats_name_off bnx2x_xstats_strings[] = { offsetof(struct bnx2x_eth_stats, pfc_frames_received_lo)} }; -static void +static int bnx2x_link_update(struct rte_eth_dev *dev) { struct bnx2x_softc *sc = dev->data->dev_private; + struct rte_eth_link link; PMD_INIT_FUNC_TRACE(); + bnx2x_link_status_update(sc); + memset(&link, 0, sizeof(link)); mb(); - dev->data->dev_link.link_speed = sc->link_vars.line_speed; + link.link_speed = sc->link_vars.line_speed; switch (sc->link_vars.duplex) { case DUPLEX_FULL: - dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_duplex = ETH_LINK_FULL_DUPLEX; break; case DUPLEX_HALF: - dev->data->dev_link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_duplex = ETH_LINK_HALF_DUPLEX; break; } - dev->data->dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & + link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); - dev->data->dev_link.link_status = sc->link_vars.link_up; + link.link_status = sc->link_vars.link_up; + + return rte_eth_linkstatus_set(dev, &link); } static void @@ -109,8 +112,6 @@ bnx2x_interrupt_action(struct rte_eth_dev *dev) struct bnx2x_softc *sc = dev->data->dev_private; uint32_t link_status; - PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled"); - bnx2x_intr_legacy(sc, 0); if (sc->periodic_flags & PERIODIC_GO) @@ -128,10 +129,41 @@ bnx2x_interrupt_handler(void *param) struct rte_eth_dev *dev = (struct rte_eth_dev *)param; struct bnx2x_softc *sc = dev->data->dev_private; + PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled"); + bnx2x_interrupt_action(dev); rte_intr_enable(&sc->pci_dev->intr_handle); } +static void bnx2x_periodic_start(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct bnx2x_softc *sc = dev->data->dev_private; + int ret = 0; + + atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); + bnx2x_interrupt_action(dev); + if (IS_PF(sc)) { + ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD, + bnx2x_periodic_start, (void *)dev); + if (ret) { + PMD_DRV_LOG(ERR, "Unable to start periodic" + " timer rc %d", ret); + assert(false && "Unable to start periodic timer"); + } + } +} + +void bnx2x_periodic_stop(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct bnx2x_softc *sc = dev->data->dev_private; + + atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); + + rte_eal_alarm_cancel(bnx2x_periodic_start, (void *)dev); +} + /* * Devops - helper functions can be called from user application */ @@ -140,11 +172,13 @@ static int bnx2x_dev_configure(struct rte_eth_dev *dev) { struct bnx2x_softc *sc = dev->data->dev_private; + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF); PMD_INIT_FUNC_TRACE(); - if (dev->data->dev_conf.rxmode.jumbo_frame) + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len; if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) { @@ -185,6 +219,10 @@ bnx2x_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + /* start the periodic callout */ + if (sc->periodic_flags & PERIODIC_STOP) + bnx2x_periodic_start(dev); + ret = bnx2x_init(sc); if (ret) { PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret); @@ -225,6 +263,9 @@ bnx2x_dev_stop(struct rte_eth_dev *dev) bnx2x_interrupt_handler, (void *)dev); } + /* stop the periodic callout */ + bnx2x_periodic_stop(dev); + ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE); if (ret) { PMD_DRV_LOG(DEBUG, "bnx2x_nic_unload failed (%d)", ret); @@ -307,20 +348,16 @@ bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete { PMD_INIT_FUNC_TRACE(); - int old_link_status = dev->data->dev_link.link_status; - - bnx2x_link_update(dev); - - return old_link_status == dev->data->dev_link.link_status ? -1 : 0; + return bnx2x_link_update(dev); } static int bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) { - int old_link_status = dev->data->dev_link.link_status; struct bnx2x_softc *sc = dev->data->dev_private; + int ret = 0; - bnx2x_link_update(dev); + ret = bnx2x_link_update(dev); bnx2x_check_bull(sc); if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) { @@ -329,7 +366,7 @@ bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_comple dev->data->dev_link.link_status = ETH_LINK_DOWN; } - return old_link_status == dev->data->dev_link.link_status ? -1 : 0; + return ret; } static int @@ -447,13 +484,13 @@ static void bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct bnx2x_softc *sc = dev->data->dev_private; - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->max_rx_queues = sc->max_rx_queues; dev_info->max_tx_queues = sc->max_tx_queues; dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE; dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN; dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS; dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G; + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME; } static int @@ -583,6 +620,17 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf) return ret; } + /* schedule periodic poll for slowpath link events */ + if (IS_PF(sc)) { + ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD, + bnx2x_periodic_start, (void *)eth_dev); + if (ret) { + PMD_DRV_LOG(ERR, "Unable to start periodic" + " timer rc %d", ret); + return -EINVAL; + } + } + eth_dev->data->mac_addrs = (struct ether_addr *)sc->link_params.mac_addr; PMD_DRV_LOG(INFO, "pcie_bus=%d, pcie_device=%d", @@ -597,18 +645,20 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf) if (IS_VF(sc)) { rte_spinlock_init(&sc->vf2pf_lock); - if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg), - &sc->vf2pf_mbox_mapping, "vf2pf_mbox", - RTE_CACHE_LINE_SIZE) != 0) - return -ENOMEM; + ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg), + &sc->vf2pf_mbox_mapping, "vf2pf_mbox", + RTE_CACHE_LINE_SIZE); + if (ret) + goto out; sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *) sc->vf2pf_mbox_mapping.vaddr; - if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin), - &sc->pf2vf_bulletin_mapping, "vf2pf_bull", - RTE_CACHE_LINE_SIZE) != 0) - return -ENOMEM; + ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin), + &sc->pf2vf_bulletin_mapping, "vf2pf_bull", + RTE_CACHE_LINE_SIZE); + if (ret) + goto out; sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *) sc->pf2vf_bulletin_mapping.vaddr; @@ -616,10 +666,14 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf) ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues, sc->max_rx_queues); if (ret) - return ret; + goto out; } return 0; + +out: + bnx2x_periodic_stop(eth_dev); + return ret; } static int @@ -642,24 +696,14 @@ static struct rte_pci_driver rte_bnx2xvf_pmd; static int eth_bnx2x_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) { - struct rte_eth_dev *eth_dev; - int ret; - - eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct bnx2x_softc)); - if (!eth_dev) - return -ENOMEM; - if (pci_drv == &rte_bnx2x_pmd) - ret = eth_bnx2x_dev_init(eth_dev); + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct bnx2x_softc), eth_bnx2x_dev_init); else if (pci_drv == &rte_bnx2xvf_pmd) - ret = eth_bnx2xvf_dev_init(eth_dev); + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct bnx2x_softc), eth_bnx2xvf_dev_init); else - ret = -EINVAL; - - if (ret) - rte_eth_dev_pci_release(eth_dev); - - return ret; + return -EINVAL; } static int eth_bnx2x_pci_remove(struct rte_pci_device *pci_dev) @@ -691,14 +735,12 @@ RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map); RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio-pci"); -RTE_INIT(bnx2x_init_log); -static void -bnx2x_init_log(void) +RTE_INIT(bnx2x_init_log) { - bnx2x_logtype_init = rte_log_register("pmd.bnx2x.init"); + bnx2x_logtype_init = rte_log_register("pmd.net.bnx2x.init"); if (bnx2x_logtype_init >= 0) rte_log_set_level(bnx2x_logtype_init, RTE_LOG_NOTICE); - bnx2x_logtype_driver = rte_log_register("pmd.bnx2x.driver"); + bnx2x_logtype_driver = rte_log_register("pmd.net.bnx2x.driver"); if (bnx2x_logtype_driver >= 0) rte_log_set_level(bnx2x_logtype_driver, RTE_LOG_NOTICE); } diff --git a/drivers/net/bnx2x/bnx2x_ethdev.h b/drivers/net/bnx2x/bnx2x_ethdev.h index 37cac158..807ba178 100644 --- a/drivers/net/bnx2x/bnx2x_ethdev.h +++ b/drivers/net/bnx2x/bnx2x_ethdev.h @@ -1,11 +1,8 @@ -/* +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef PMD_BNX2X_ETHDEV_H @@ -58,7 +55,6 @@ #define wmb() rte_wmb() #define rmb() rte_rmb() - #define MAX_QUEUES sysconf(_SC_NPROCESSORS_CONF) #define BNX2X_MIN_RX_BUF_SIZE 1024 @@ -72,6 +68,8 @@ /* Maximum number of Rx packets to process at a time */ #define BNX2X_RX_BUDGET 0xffffffff +#define BNX2X_SP_TIMER_PERIOD US_PER_S /* 1 second */ + #endif /* MAC address operations */ diff --git a/drivers/net/bnx2x/bnx2x_logs.h b/drivers/net/bnx2x/bnx2x_logs.h index 08c1b764..9e232a9b 100644 --- a/drivers/net/bnx2x/bnx2x_logs.h +++ b/drivers/net/bnx2x/bnx2x_logs.h @@ -1,11 +1,8 @@ -/* +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef _PMD_LOGS_H_ diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c index a0d4ac92..d9a4127d 100644 --- a/drivers/net/bnx2x/bnx2x_rxtx.c +++ b/drivers/net/bnx2x/bnx2x_rxtx.c @@ -1,11 +1,8 @@ -/* +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #include "bnx2x.h" @@ -26,7 +23,8 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, if (mz) return mz; - return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, BNX2X_PAGE_SIZE); + return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, + RTE_MEMZONE_IOVA_CONTIG, BNX2X_PAGE_SIZE); } static void @@ -140,7 +138,8 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } rxq->sw_ring[idx] = mbuf; - rxq->rx_ring[idx] = mbuf->buf_iova; + rxq->rx_ring[idx] = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); } rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; @@ -400,7 +399,8 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rx_mb = rxq->sw_ring[bd_cons]; rxq->sw_ring[bd_cons] = new_mb; - rxq->rx_ring[bd_prod] = new_mb->buf_iova; + rxq->rx_ring[bd_prod] = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mb)); rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq); rte_prefetch0(rxq->sw_ring[rx_pref]); @@ -409,7 +409,7 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rte_prefetch0(&rxq->sw_ring[rx_pref]); } - rx_mb->data_off = pad; + rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM; rx_mb->nb_segs = 1; rx_mb->next = NULL; rx_mb->pkt_len = rx_mb->data_len = len; diff --git a/drivers/net/bnx2x/bnx2x_rxtx.h b/drivers/net/bnx2x/bnx2x_rxtx.h index 9600e0f1..6ad4928c 100644 --- a/drivers/net/bnx2x/bnx2x_rxtx.h +++ b/drivers/net/bnx2x/bnx2x_rxtx.h @@ -1,11 +1,8 @@ -/* +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef _BNX2X_RXTX_H_ diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c index b9b85963..edc86ccc 100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c @@ -1,16 +1,14 @@ -/*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. * * Eric Davis * David Christensen * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #include "bnx2x.h" diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h index 3396de31..635412bd 100644 --- a/drivers/net/bnx2x/bnx2x_stats.h +++ b/drivers/net/bnx2x/bnx2x_stats.h @@ -1,16 +1,14 @@ -/*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. * * Eric Davis * David Christensen * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef BNX2X_STATS_H diff --git a/drivers/net/bnx2x/bnx2x_vfpf.c b/drivers/net/bnx2x/bnx2x_vfpf.c index 3c08f2a2..50099d46 100644 --- a/drivers/net/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/bnx2x/bnx2x_vfpf.c @@ -1,11 +1,8 @@ -/* +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #include "bnx2x.h" diff --git a/drivers/net/bnx2x/bnx2x_vfpf.h b/drivers/net/bnx2x/bnx2x_vfpf.h index d7cc11be..cc6fef95 100644 --- a/drivers/net/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/bnx2x/bnx2x_vfpf.h @@ -1,11 +1,8 @@ -/* +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef BNX2X_VFPF_H diff --git a/drivers/net/bnx2x/ecore_fw_defs.h b/drivers/net/bnx2x/ecore_fw_defs.h index ab490efa..5984acd9 100644 --- a/drivers/net/bnx2x/ecore_fw_defs.h +++ b/drivers/net/bnx2x/ecore_fw_defs.h @@ -1,15 +1,13 @@ -/*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. * * Eric Davis * David Christensen * Gary Zambrano * - * Copyright (c) 2014-2015 QLogic Corporation. + * Copyright (c) 2014-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef ECORE_FW_DEFS_H diff --git a/drivers/net/bnx2x/ecore_hsi.h b/drivers/net/bnx2x/ecore_hsi.h index 5cce6647..57085ebb 100644 --- a/drivers/net/bnx2x/ecore_hsi.h +++ b/drivers/net/bnx2x/ecore_hsi.h @@ -1,15 +1,13 @@ -/*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. * * Eric Davis * David Christensen * Gary Zambrano * - * Copyright (c) 2014-2015 QLogic Corporation. + * Copyright (c) 2014-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef ECORE_HSI_H diff --git a/drivers/net/bnx2x/ecore_init.h b/drivers/net/bnx2x/ecore_init.h index 4576c565..f2de07e5 100644 --- a/drivers/net/bnx2x/ecore_init.h +++ b/drivers/net/bnx2x/ecore_init.h @@ -1,16 +1,14 @@ -/*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. * * Eric Davis * David Christensen * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef ECORE_INIT_H diff --git a/drivers/net/bnx2x/ecore_init_ops.h b/drivers/net/bnx2x/ecore_init_ops.h index b6f98324..2b003afb 100644 --- a/drivers/net/bnx2x/ecore_init_ops.h +++ b/drivers/net/bnx2x/ecore_init_ops.h @@ -1,16 +1,14 @@ -/*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. * * Eric Davis * David Christensen * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef ECORE_INIT_OPS_H diff --git a/drivers/net/bnx2x/ecore_mfw_req.h b/drivers/net/bnx2x/ecore_mfw_req.h index 57529097..fe945048 100644 --- a/drivers/net/bnx2x/ecore_mfw_req.h +++ b/drivers/net/bnx2x/ecore_mfw_req.h @@ -1,15 +1,13 @@ -/*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. * * Eric Davis * David Christensen * Gary Zambrano * - * Copyright (c) 2014-2015 QLogic Corporation. + * Copyright (c) 2014-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef ECORE_MFW_REQ_H diff --git a/drivers/net/bnx2x/ecore_reg.h b/drivers/net/bnx2x/ecore_reg.h index d8203b45..ae8a93bb 100644 --- a/drivers/net/bnx2x/ecore_reg.h +++ b/drivers/net/bnx2x/ecore_reg.h @@ -1,15 +1,13 @@ -/*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. * * Eric Davis * David Christensen * Gary Zambrano * - * Copyright (c) 2014-2015 QLogic Corporation. + * Copyright (c) 2014-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef ECORE_REG_H diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c index a75a7fa4..0c8685c6 100644 --- a/drivers/net/bnx2x/ecore_sp.c +++ b/drivers/net/bnx2x/ecore_sp.c @@ -1,16 +1,14 @@ -/*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. * * Eric Davis * David Christensen * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #include "bnx2x.h" diff --git a/drivers/net/bnx2x/ecore_sp.h b/drivers/net/bnx2x/ecore_sp.h index ff40413c..6b65a496 100644 --- a/drivers/net/bnx2x/ecore_sp.h +++ b/drivers/net/bnx2x/ecore_sp.h @@ -1,16 +1,14 @@ -/*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. * * Eric Davis * David Christensen * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef ECORE_SP_H diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c index 9d0f3136..b63fd23e 100644 --- a/drivers/net/bnx2x/elink.c +++ b/drivers/net/bnx2x/elink.c @@ -1,16 +1,14 @@ -/* - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. * * Eric Davis * David Christensen * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #include "bnx2x.h" @@ -4143,9 +4141,9 @@ static void elink_sfp_e3_set_transmitter(struct elink_params *params, elink_set_cfg_pin(sc, cfg_pin + 3, tx_en ^ 1); } -static void elink_warpcore_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_warpcore_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; uint32_t serdes_net_if; @@ -4222,7 +4220,7 @@ static void elink_warpcore_config_init(struct elink_phy *phy, case PORT_HW_CFG_NET_SERDES_IF_DXGXS: if (vars->line_speed != ELINK_SPEED_20000) { PMD_DRV_LOG(DEBUG, "Speed not supported yet"); - return; + return 0; } PMD_DRV_LOG(DEBUG, "Setting 20G DXGXS"); elink_warpcore_set_20G_DXGXS(sc, phy, lane); @@ -4242,13 +4240,15 @@ static void elink_warpcore_config_init(struct elink_phy *phy, PMD_DRV_LOG(DEBUG, "Unsupported Serdes Net Interface 0x%x", serdes_net_if); - return; + return 0; } } /* Take lane out of reset after configuration is finished */ elink_warpcore_reset_lane(sc, phy, 0); PMD_DRV_LOG(DEBUG, "Exit config init"); + + return 0; } static void elink_warpcore_link_reset(struct elink_phy *phy, @@ -5226,9 +5226,9 @@ static elink_status_t elink_get_link_speed_duplex(struct elink_phy *phy, return ELINK_STATUS_OK; } -static elink_status_t elink_link_settings_status(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_link_settings_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; @@ -5299,9 +5299,9 @@ static elink_status_t elink_link_settings_status(struct elink_phy *phy, return rc; } -static elink_status_t elink_warpcore_read_status(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_warpcore_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; uint8_t lane; @@ -5520,9 +5520,9 @@ static void elink_set_preemphasis(struct elink_phy *phy, } } -static void elink_xgxs_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_xgxs_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { uint8_t enable_cl73 = (ELINK_SINGLE_MEDIA_DIRECT(params) || (params->loopback_mode == ELINK_LOOPBACK_XGXS)); @@ -5567,6 +5567,8 @@ static void elink_xgxs_config_init(struct elink_phy *phy, elink_initialize_sgmii_process(phy, params, vars); } + + return 0; } static elink_status_t elink_prepare_xgxs(struct elink_phy *phy, @@ -5751,8 +5753,8 @@ static void elink_link_int_ack(struct elink_params *params, } } -static elink_status_t elink_format_ver(uint32_t num, uint8_t * str, - uint16_t * len) +static uint8_t elink_format_ver(uint32_t num, uint8_t * str, + uint16_t * len) { uint8_t *str_ptr = str; uint32_t mask = 0xf0000000; @@ -5790,8 +5792,8 @@ static elink_status_t elink_format_ver(uint32_t num, uint8_t * str, return ELINK_STATUS_OK; } -static elink_status_t elink_null_format_ver(__rte_unused uint32_t spirom_ver, - uint8_t * str, uint16_t * len) +static uint8_t elink_null_format_ver(__rte_unused uint32_t spirom_ver, + uint8_t * str, uint16_t * len) { str[0] = '\0'; (*len)--; @@ -6802,9 +6804,9 @@ static void elink_8073_specific_func(struct elink_phy *phy, } } -static elink_status_t elink_8073_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_8073_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; uint16_t val = 0, tmp1; @@ -7097,9 +7099,9 @@ static void elink_8073_link_reset(__rte_unused struct elink_phy *phy, /******************************************************************/ /* BNX2X8705 PHY SECTION */ /******************************************************************/ -static elink_status_t elink_8705_config_init(struct elink_phy *phy, - struct elink_params *params, - __rte_unused struct elink_vars +static uint8_t elink_8705_config_init(struct elink_phy *phy, + struct elink_params *params, + __rte_unused struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; @@ -8403,9 +8405,9 @@ static uint8_t elink_8706_config_init(struct elink_phy *phy, return ELINK_STATUS_OK; } -static elink_status_t elink_8706_read_status(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_8706_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { return elink_8706_8726_read_status(phy, params, vars); } @@ -8477,9 +8479,9 @@ static uint8_t elink_8726_read_status(struct elink_phy *phy, return link_up; } -static elink_status_t elink_8726_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_8726_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; PMD_DRV_LOG(DEBUG, "Initializing BNX2X8726"); @@ -8684,9 +8686,9 @@ static void elink_8727_config_speed(struct elink_phy *phy, } } -static elink_status_t elink_8727_config_init(struct elink_phy *phy, - struct elink_params *params, - __rte_unused struct elink_vars +static uint8_t elink_8727_config_init(struct elink_phy *phy, + struct elink_params *params, + __rte_unused struct elink_vars *vars) { uint32_t tx_en_mode; @@ -9291,7 +9293,7 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy, return ELINK_STATUS_OK; } -static elink_status_t elink_8481_config_init(struct elink_phy *phy, +static uint8_t elink_8481_config_init(struct elink_phy *phy, struct elink_params *params, struct elink_vars *vars) { @@ -9442,8 +9444,8 @@ static uint8_t elink_84833_get_reset_gpios(struct bnx2x_softc *sc, return reset_gpios; } -static elink_status_t elink_84833_hw_reset_phy(struct elink_phy *phy, - struct elink_params *params) +static void elink_84833_hw_reset_phy(struct elink_phy *phy, + struct elink_params *params) { struct bnx2x_softc *sc = params->sc; uint8_t reset_gpios; @@ -9471,8 +9473,6 @@ static elink_status_t elink_84833_hw_reset_phy(struct elink_phy *phy, MISC_REGISTERS_GPIO_OUTPUT_LOW); DELAY(10); PMD_DRV_LOG(DEBUG, "84833 hw reset on pin values 0x%x", reset_gpios); - - return ELINK_STATUS_OK; } static elink_status_t elink_8483x_disable_eee(struct elink_phy *phy, @@ -9513,9 +9513,9 @@ static elink_status_t elink_8483x_enable_eee(struct elink_phy *phy, } #define PHY84833_CONSTANT_LATENCY 1193 -static elink_status_t elink_848x3_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_848x3_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; uint8_t port, initialize = 1; @@ -9819,7 +9819,7 @@ static uint8_t elink_848xx_read_status(struct elink_phy *phy, return link_up; } -static elink_status_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t * str, +static uint8_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t * str, uint16_t * len) { elink_status_t status = ELINK_STATUS_OK; @@ -10146,9 +10146,9 @@ static void elink_54618se_specific_func(struct elink_phy *phy, } } -static elink_status_t elink_54618se_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_54618se_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; uint8_t port; @@ -10542,9 +10542,9 @@ static void elink_7101_config_loopback(struct elink_phy *phy, MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100); } -static elink_status_t elink_7101_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_7101_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { uint16_t fw_ver1, fw_ver2, val; struct bnx2x_softc *sc = params->sc; @@ -10614,8 +10614,8 @@ static uint8_t elink_7101_read_status(struct elink_phy *phy, return link_up; } -static elink_status_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t * str, - uint16_t * len) +static uint8_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t * str, + uint16_t * len) { if (*len < 5) return ELINK_STATUS_ERROR; @@ -10680,14 +10680,14 @@ static const struct elink_phy phy_null = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) NULL, - .read_status = (read_status_t) NULL, - .link_reset = (link_reset_t) NULL, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) NULL, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = NULL, + .read_status = NULL, + .link_reset = NULL, + .config_loopback = NULL, + .format_fw_ver = NULL, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct elink_phy phy_serdes = { @@ -10714,14 +10714,14 @@ static const struct elink_phy phy_serdes = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_xgxs_config_init, - .read_status = (read_status_t) elink_link_settings_status, - .link_reset = (link_reset_t) elink_int_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) NULL, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_xgxs_config_init, + .read_status = elink_link_settings_status, + .link_reset = elink_int_link_reset, + .config_loopback = NULL, + .format_fw_ver = NULL, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct elink_phy phy_xgxs = { @@ -10749,14 +10749,14 @@ static const struct elink_phy phy_xgxs = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_xgxs_config_init, - .read_status = (read_status_t) elink_link_settings_status, - .link_reset = (link_reset_t) elink_int_link_reset, - .config_loopback = (config_loopback_t) elink_set_xgxs_loopback, - .format_fw_ver = (format_fw_ver_t) NULL, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) elink_xgxs_specific_func + .config_init = elink_xgxs_config_init, + .read_status = elink_link_settings_status, + .link_reset = elink_int_link_reset, + .config_loopback = elink_set_xgxs_loopback, + .format_fw_ver = NULL, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = elink_xgxs_specific_func }; static const struct elink_phy phy_warpcore = { @@ -10785,14 +10785,14 @@ static const struct elink_phy phy_warpcore = { .speed_cap_mask = 0, /* req_duplex = */ 0, /* rsrv = */ 0, - .config_init = (config_init_t) elink_warpcore_config_init, - .read_status = (read_status_t) elink_warpcore_read_status, - .link_reset = (link_reset_t) elink_warpcore_link_reset, - .config_loopback = (config_loopback_t) elink_set_warpcore_loopback, - .format_fw_ver = (format_fw_ver_t) NULL, - .hw_reset = (hw_reset_t) elink_warpcore_hw_reset, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_warpcore_config_init, + .read_status = elink_warpcore_read_status, + .link_reset = elink_warpcore_link_reset, + .config_loopback = elink_set_warpcore_loopback, + .format_fw_ver = NULL, + .hw_reset = elink_warpcore_hw_reset, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct elink_phy phy_7101 = { @@ -10814,14 +10814,14 @@ static const struct elink_phy phy_7101 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_7101_config_init, - .read_status = (read_status_t) elink_7101_read_status, - .link_reset = (link_reset_t) elink_common_ext_link_reset, - .config_loopback = (config_loopback_t) elink_7101_config_loopback, - .format_fw_ver = (format_fw_ver_t) elink_7101_format_ver, - .hw_reset = (hw_reset_t) elink_7101_hw_reset, - .set_link_led = (set_link_led_t) elink_7101_set_link_led, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_7101_config_init, + .read_status = elink_7101_read_status, + .link_reset = elink_common_ext_link_reset, + .config_loopback = elink_7101_config_loopback, + .format_fw_ver = elink_7101_format_ver, + .hw_reset = elink_7101_hw_reset, + .set_link_led = elink_7101_set_link_led, + .phy_specific_func = NULL }; static const struct elink_phy phy_8073 = { @@ -10845,14 +10845,14 @@ static const struct elink_phy phy_8073 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_8073_config_init, - .read_status = (read_status_t) elink_8073_read_status, - .link_reset = (link_reset_t) elink_8073_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_format_ver, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) elink_8073_specific_func + .config_init = elink_8073_config_init, + .read_status = elink_8073_read_status, + .link_reset = elink_8073_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_format_ver, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = elink_8073_specific_func }; static const struct elink_phy phy_8705 = { @@ -10873,14 +10873,14 @@ static const struct elink_phy phy_8705 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_8705_config_init, - .read_status = (read_status_t) elink_8705_read_status, - .link_reset = (link_reset_t) elink_common_ext_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_null_format_ver, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_8705_config_init, + .read_status = elink_8705_read_status, + .link_reset = elink_common_ext_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_null_format_ver, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct elink_phy phy_8706 = { @@ -10902,14 +10902,14 @@ static const struct elink_phy phy_8706 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_8706_config_init, - .read_status = (read_status_t) elink_8706_read_status, - .link_reset = (link_reset_t) elink_common_ext_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_format_ver, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_8706_config_init, + .read_status = elink_8706_read_status, + .link_reset = elink_common_ext_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_format_ver, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct elink_phy phy_8726 = { @@ -10932,14 +10932,14 @@ static const struct elink_phy phy_8726 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_8726_config_init, - .read_status = (read_status_t) elink_8726_read_status, - .link_reset = (link_reset_t) elink_8726_link_reset, - .config_loopback = (config_loopback_t) elink_8726_config_loopback, - .format_fw_ver = (format_fw_ver_t) elink_format_ver, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_8726_config_init, + .read_status = elink_8726_read_status, + .link_reset = elink_8726_link_reset, + .config_loopback = elink_8726_config_loopback, + .format_fw_ver = elink_format_ver, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct elink_phy phy_8727 = { @@ -10961,14 +10961,14 @@ static const struct elink_phy phy_8727 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_8727_config_init, - .read_status = (read_status_t) elink_8727_read_status, - .link_reset = (link_reset_t) elink_8727_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_format_ver, - .hw_reset = (hw_reset_t) elink_8727_hw_reset, - .set_link_led = (set_link_led_t) elink_8727_set_link_led, - .phy_specific_func = (phy_specific_func_t) elink_8727_specific_func + .config_init = elink_8727_config_init, + .read_status = elink_8727_read_status, + .link_reset = elink_8727_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_format_ver, + .hw_reset = elink_8727_hw_reset, + .set_link_led = elink_8727_set_link_led, + .phy_specific_func = elink_8727_specific_func }; static const struct elink_phy phy_8481 = { @@ -10996,14 +10996,14 @@ static const struct elink_phy phy_8481 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_8481_config_init, - .read_status = (read_status_t) elink_848xx_read_status, - .link_reset = (link_reset_t) elink_8481_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver, - .hw_reset = (hw_reset_t) elink_8481_hw_reset, - .set_link_led = (set_link_led_t) elink_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_8481_config_init, + .read_status = elink_848xx_read_status, + .link_reset = elink_8481_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_848xx_format_ver, + .hw_reset = elink_8481_hw_reset, + .set_link_led = elink_848xx_set_link_led, + .phy_specific_func = NULL }; static const struct elink_phy phy_84823 = { @@ -11031,14 +11031,14 @@ static const struct elink_phy phy_84823 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_848x3_config_init, - .read_status = (read_status_t) elink_848xx_read_status, - .link_reset = (link_reset_t) elink_848x3_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) elink_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func + .config_init = elink_848x3_config_init, + .read_status = elink_848xx_read_status, + .link_reset = elink_848x3_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_848xx_format_ver, + .hw_reset = NULL, + .set_link_led = elink_848xx_set_link_led, + .phy_specific_func = elink_848xx_specific_func }; static const struct elink_phy phy_84833 = { @@ -11065,14 +11065,14 @@ static const struct elink_phy phy_84833 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_848x3_config_init, - .read_status = (read_status_t) elink_848xx_read_status, - .link_reset = (link_reset_t) elink_848x3_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver, - .hw_reset = (hw_reset_t) elink_84833_hw_reset_phy, - .set_link_led = (set_link_led_t) elink_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func + .config_init = elink_848x3_config_init, + .read_status = elink_848xx_read_status, + .link_reset = elink_848x3_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_848xx_format_ver, + .hw_reset = elink_84833_hw_reset_phy, + .set_link_led = elink_848xx_set_link_led, + .phy_specific_func = elink_848xx_specific_func }; static const struct elink_phy phy_84834 = { @@ -11098,14 +11098,14 @@ static const struct elink_phy phy_84834 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_848x3_config_init, - .read_status = (read_status_t) elink_848xx_read_status, - .link_reset = (link_reset_t) elink_848x3_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver, - .hw_reset = (hw_reset_t) elink_84833_hw_reset_phy, - .set_link_led = (set_link_led_t) elink_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func + .config_init = elink_848x3_config_init, + .read_status = elink_848xx_read_status, + .link_reset = elink_848x3_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_848xx_format_ver, + .hw_reset = elink_84833_hw_reset_phy, + .set_link_led = elink_848xx_set_link_led, + .phy_specific_func = elink_848xx_specific_func }; static const struct elink_phy phy_54618se = { @@ -11131,14 +11131,14 @@ static const struct elink_phy phy_54618se = { .speed_cap_mask = 0, /* req_duplex = */ 0, /* rsrv = */ 0, - .config_init = (config_init_t) elink_54618se_config_init, - .read_status = (read_status_t) elink_54618se_read_status, - .link_reset = (link_reset_t) elink_54618se_link_reset, - .config_loopback = (config_loopback_t) elink_54618se_config_loopback, - .format_fw_ver = (format_fw_ver_t) NULL, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) elink_5461x_set_link_led, - .phy_specific_func = (phy_specific_func_t) elink_54618se_specific_func + .config_init = elink_54618se_config_init, + .read_status = elink_54618se_read_status, + .link_reset = elink_54618se_link_reset, + .config_loopback = elink_54618se_config_loopback, + .format_fw_ver = NULL, + .hw_reset = NULL, + .set_link_led = elink_5461x_set_link_led, + .phy_specific_func = elink_54618se_specific_func }; /*****************************************************************/ @@ -12919,7 +12919,7 @@ static void elink_check_kr2_wa(struct elink_params *params, */ not_kr2_device = (((base_page & 0x8000) == 0) || (((base_page & 0x8000) && - ((next_page & 0xe0) == 0x2)))); + ((next_page & 0xe0) == 0x20)))); /* In case KR2 is already disabled, check if we need to re-enable it */ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { diff --git a/drivers/net/bnx2x/elink.h b/drivers/net/bnx2x/elink.h index 9401b7cd..40000c24 100644 --- a/drivers/net/bnx2x/elink.h +++ b/drivers/net/bnx2x/elink.h @@ -1,16 +1,14 @@ -/* - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. * * Eric Davis * David Christensen * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef ELINK_H diff --git a/drivers/net/bnx2x/meson.build b/drivers/net/bnx2x/meson.build new file mode 100644 index 00000000..e3c68886 --- /dev/null +++ b/drivers/net/bnx2x/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +dep = cc.find_library('z', required: false) +build = dep.found() +ext_deps += dep +cflags += '-DZLIB_CONST' +sources = files('bnx2x.c', + 'bnx2x_ethdev.c', + 'bnx2x_rxtx.c', + 'bnx2x_stats.c', + 'bnx2x_vfpf.c', + 'ecore_sp.c', + 'elink.c') diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile index 2aa04411..8be3cb0e 100644 --- a/drivers/net/bnxt/Makefile +++ b/drivers/net/bnxt/Makefile @@ -1,35 +1,8 @@ -# BSD LICENSE -# -# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. -# Copyright(c) 2014 6WIND S.A. -# Copyright(c) Broadcom Limited. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Intel Corporation nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation. +# Copyright(c) 2014 6WIND S.A. +# Copyright(c) Broadcom Limited. +# All rights reserved. include $(RTE_SDK)/mk/rte.vars.mk @@ -56,6 +29,7 @@ EXPORT_MAP := rte_pmd_bnxt_version.map SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_cpr.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_filter.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_flow.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_hwrm.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ring.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxq.c @@ -65,6 +39,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txq.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txr.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_vnic.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_irq.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_util.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += rte_pmd_bnxt.c # diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h index b5a0badf..db5c4eb0 100644 --- a/drivers/net/bnxt/bnxt.h +++ b/drivers/net/bnxt/bnxt.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_H_ @@ -50,7 +22,24 @@ #define BNXT_MAX_MTU 9500 #define VLAN_TAG_SIZE 4 +#define BNXT_VF_RSV_NUM_RSS_CTX 1 +#define BNXT_VF_RSV_NUM_L2_CTX 4 +/* TODO: For now, do not support VMDq/RFS on VFs. */ +#define BNXT_VF_RSV_NUM_VNIC 1 #define BNXT_MAX_LED 4 +#define BNXT_NUM_VLANS 2 +#define BNXT_MIN_RING_DESC 16 +#define BNXT_MAX_TX_RING_DESC 4096 +#define BNXT_MAX_RX_RING_DESC 8192 +#define BNXT_DB_SIZE 0x80 + +#define BNXT_INT_LAT_TMR_MIN 75 +#define BNXT_INT_LAT_TMR_MAX 150 +#define BNXT_NUM_CMPL_AGGR_INT 36 +#define BNXT_CMPL_AGGR_DMA_TMR 37 +#define BNXT_NUM_CMPL_DMA_AGGR 36 +#define BNXT_CMPL_AGGR_DMA_TMR_DURING_INT 50 +#define BNXT_NUM_CMPL_DMA_AGGR_DURING_INT 12 struct bnxt_led_info { uint8_t led_id; @@ -125,6 +114,7 @@ struct bnxt_child_vf_info { struct bnxt_pf_info { #define BNXT_FIRST_PF_FID 1 #define BNXT_MAX_VFS(bp) (bp->pf.max_vfs) +#define BNXT_TOTAL_VFS(bp) ((bp)->pf.total_vfs) #define BNXT_FIRST_VF_FID 128 #define BNXT_PF_RINGS_USED(bp) bnxt_get_num_queues(bp) #define BNXT_PF_RINGS_AVAIL(bp) (bp->pf.max_cp_rings - BNXT_PF_RINGS_USED(bp)) @@ -132,6 +122,9 @@ struct bnxt_pf_info { uint16_t first_vf_id; uint16_t active_vfs; uint16_t max_vfs; + uint16_t total_vfs; /* Total VFs possible. + * Not necessarily enabled. + */ uint32_t func_cfg_flags; void *vf_req_buf; rte_iova_t vf_req_buf_dma_addr; @@ -229,6 +222,16 @@ struct bnxt_ptp_cfg { uint32_t tx_mapped_regs[BNXT_PTP_TX_REGS]; }; +struct bnxt_coal { + uint16_t num_cmpl_aggr_int; + uint16_t num_cmpl_dma_aggr; + uint16_t num_cmpl_dma_aggr_during_int; + uint16_t int_lat_tmr_max; + uint16_t int_lat_tmr_min; + uint16_t cmpl_aggr_dma_tmr; + uint16_t cmpl_aggr_dma_tmr_during_int; +}; + #define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) struct bnxt { void *bar0; @@ -236,6 +239,7 @@ struct bnxt { struct rte_eth_dev *eth_dev; struct rte_eth_rss_conf rss_conf; struct rte_pci_device *pdev; + void *doorbell_base; uint32_t flags; #define BNXT_FLAG_REGISTERED (1 << 0) @@ -246,6 +250,7 @@ struct bnxt { #define BNXT_FLAG_UPDATE_HASH (1 << 5) #define BNXT_FLAG_PTP_SUPPORTED (1 << 6) #define BNXT_FLAG_MULTI_HOST (1 << 7) +#define BNXT_FLAG_NEW_RM (1 << 30) #define BNXT_FLAG_INIT_DONE (1 << 31) #define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF)) #define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF) @@ -300,6 +305,7 @@ struct bnxt { struct bnxt_link_info link_info; struct bnxt_cos_queue_info cos_queue[BNXT_COS_QUEUE_COUNT]; + uint8_t tx_cosq_id; uint16_t fw_fid; uint8_t dflt_mac_addr[ETHER_ADDR_LEN]; @@ -321,19 +327,19 @@ struct bnxt { uint16_t vxlan_fw_dst_port_id; uint16_t geneve_fw_dst_port_id; uint32_t fw_ver; - rte_atomic64_t rx_mbuf_alloc_fail; + uint32_t hwrm_spec_code; struct bnxt_led_info leds[BNXT_MAX_LED]; uint8_t num_leds; struct bnxt_ptp_cfg *ptp_cfg; + uint16_t vf_resv_strategy; }; int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete); int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg); -#define RX_PROD_AGG_BD_TYPE_RX_PROD_AGG 0x6 - bool is_bnxt_supported(struct rte_eth_dev *dev); +bool bnxt_stratus_device(struct bnxt *bp); extern const struct rte_flow_ops bnxt_flow_ops; extern int bnxt_logtype_driver; diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c index 737bb060..ff20b6fd 100644 --- a/drivers/net/bnxt/bnxt_cpr.c +++ b/drivers/net/bnxt/bnxt_cpr.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -55,6 +27,7 @@ void bnxt_handle_async_event(struct bnxt *bp, case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: + /* FALLTHROUGH */ bnxt_link_update_op(bp->eth_dev, 1); break; case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: @@ -159,69 +132,31 @@ reject: return; } -/* For the default completion ring only */ -int bnxt_alloc_def_cp_ring(struct bnxt *bp) +int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp) { - struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; - struct bnxt_ring *cp_ring = cpr->cp_ring_struct; - int rc; - - rc = bnxt_hwrm_ring_alloc(bp, cp_ring, - HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, - 0, HWRM_NA_SIGNATURE, - HWRM_NA_SIGNATURE); - if (rc) - goto err_out; - cpr->cp_doorbell = bp->pdev->mem_resource[2].addr; - B_CP_DIS_DB(cpr, cpr->cp_raw_cons); - if (BNXT_PF(bp)) - rc = bnxt_hwrm_func_cfg_def_cp(bp); - else - rc = bnxt_hwrm_vf_func_cfg_def_cp(bp); - -err_out: - return rc; -} + bool evt = 0; -void bnxt_free_def_cp_ring(struct bnxt *bp) -{ - struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; - - if (cpr == NULL) - return; + if (bp == NULL || cmp == NULL) { + PMD_DRV_LOG(ERR, "invalid NULL argument\n"); + return evt; + } - bnxt_free_ring(cpr->cp_ring_struct); - cpr->cp_ring_struct = NULL; - rte_free(cpr->cp_ring_struct); - rte_free(cpr); - bp->def_cp_ring = NULL; -} + switch (CMP_TYPE(cmp)) { + case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: + /* Handle any async event */ + bnxt_handle_async_event(bp, cmp); + evt = 1; + break; + case CMPL_BASE_TYPE_HWRM_FWD_RESP: + /* Handle HWRM forwarded responses */ + bnxt_handle_fwd_req(bp, cmp); + evt = 1; + break; + default: + /* Ignore any other events */ + PMD_DRV_LOG(INFO, "Ignoring %02x completion\n", CMP_TYPE(cmp)); + break; + } -/* For the default completion ring only */ -int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id) -{ - struct bnxt_cp_ring_info *cpr; - struct bnxt_ring *ring; - - cpr = rte_zmalloc_socket("cpr", - sizeof(struct bnxt_cp_ring_info), - RTE_CACHE_LINE_SIZE, socket_id); - if (cpr == NULL) - return -ENOMEM; - bp->def_cp_ring = cpr; - - ring = rte_zmalloc_socket("bnxt_cp_ring_struct", - sizeof(struct bnxt_ring), - RTE_CACHE_LINE_SIZE, socket_id); - if (ring == NULL) - return -ENOMEM; - cpr->cp_ring_struct = ring; - ring->bd = (void *)cpr->cp_desc_ring; - ring->bd_dma = cpr->cp_desc_mapping; - ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE); - ring->ring_mask = ring->ring_size - 1; - ring->vmem_size = 0; - ring->vmem = NULL; - - return 0; + return evt; } diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h index ce2b0cb8..c7af5698 100644 --- a/drivers/net/bnxt/bnxt_cpr.h +++ b/drivers/net/bnxt/bnxt_cpr.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_CPR_H_ @@ -50,12 +22,20 @@ #define ADV_RAW_CMP(idx, n) ((idx) + (n)) #define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1) #define RING_CMP(ring, idx) ((idx) & (ring)->ring_mask) +#define RING_CMPL(ring_mask, idx) ((idx) & (ring_mask)) #define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) #define FLIP_VALID(cons, mask, val) ((cons) >= (mask) ? !(val) : (val)) #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) +#define NEXT_CMPL(cpr, idx, v, inc) do { \ + (idx) += (inc); \ + if (unlikely((idx) == (cpr)->cp_ring_struct->ring_size)) { \ + (v) = !(v); \ + (idx) = 0; \ + } \ +} while (0) #define B_CP_DB_REARM(cpr, raw_cons) \ rte_write32((DB_CP_REARM_FLAGS | \ RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \ @@ -78,6 +58,10 @@ rte_write32((DB_CP_FLAGS | \ RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \ ((cpr)->cp_doorbell)) +#define B_CP_DB(cpr, raw_cons, ring_mask) \ + rte_write32((DB_CP_FLAGS | \ + RING_CMPL((ring_mask), raw_cons)), \ + ((cpr)->cp_doorbell)) struct bnxt_ring; struct bnxt_cp_ring_info { @@ -100,12 +84,9 @@ struct bnxt_cp_ring_info { #define RX_CMP_L2_ERRORS \ (RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_PKT_CMPL_ERRORS_CRC_ERROR) - struct bnxt; -int bnxt_alloc_def_cp_ring(struct bnxt *bp); -void bnxt_free_def_cp_ring(struct bnxt *bp); -int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id); void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp); void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmp); +int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp); #endif diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index 21c46f83..cc7e4391 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -54,15 +26,17 @@ #include "bnxt_vnic.h" #include "hsi_struct_def_dpdk.h" #include "bnxt_nvm_defs.h" +#include "bnxt_util.h" #define DRV_MODULE_NAME "bnxt" static const char bnxt_version[] = - "Broadcom Cumulus driver " DRV_MODULE_NAME "\n"; + "Broadcom NetXtreme driver " DRV_MODULE_NAME "\n"; int bnxt_logtype_driver; #define PCI_VENDOR_ID_BROADCOM 0x14E4 -#define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609 +#define BROADCOM_DEV_ID_STRATUS_NIC_VF1 0x1606 +#define BROADCOM_DEV_ID_STRATUS_NIC_VF2 0x1609 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614 #define BROADCOM_DEV_ID_57414_VF 0x16c1 #define BROADCOM_DEV_ID_57301 0x16c8 @@ -97,10 +71,16 @@ int bnxt_logtype_driver; #define BROADCOM_DEV_ID_57407_MF 0x16ea #define BROADCOM_DEV_ID_57414_MF 0x16ec #define BROADCOM_DEV_ID_57416_MF 0x16ee +#define BROADCOM_DEV_ID_58802 0xd802 +#define BROADCOM_DEV_ID_58804 0xd804 +#define BROADCOM_DEV_ID_58808 0x16f0 +#define BROADCOM_DEV_ID_58802_VF 0xd800 static const struct rte_pci_id bnxt_pci_id_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, - BROADCOM_DEV_ID_STRATUS_NIC_VF) }, + BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, + BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, @@ -135,6 +115,10 @@ static const struct rte_pci_id bnxt_pci_id_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, { .vendor_id = 0, /* sentinel */ }, }; @@ -146,8 +130,33 @@ static const struct rte_pci_id bnxt_pci_id_map[] = { ETH_RSS_NONFRAG_IPV6_TCP | \ ETH_RSS_NONFRAG_IPV6_UDP) +#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \ + DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_TSO | \ + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \ + DEV_TX_OFFLOAD_GRE_TNL_TSO | \ + DEV_TX_OFFLOAD_IPIP_TNL_TSO | \ + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \ + DEV_TX_OFFLOAD_MULTI_SEGS) + +#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \ + DEV_RX_OFFLOAD_VLAN_STRIP | \ + DEV_RX_OFFLOAD_IPV4_CKSUM | \ + DEV_RX_OFFLOAD_UDP_CKSUM | \ + DEV_RX_OFFLOAD_TCP_CKSUM | \ + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \ + DEV_RX_OFFLOAD_JUMBO_FRAME | \ + DEV_RX_OFFLOAD_CRC_STRIP | \ + DEV_RX_OFFLOAD_KEEP_CRC | \ + DEV_RX_OFFLOAD_TCP_LRO) + static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); static void bnxt_print_link_info(struct rte_eth_dev *eth_dev); +static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu); +static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); /***********************/ @@ -164,23 +173,12 @@ static void bnxt_free_mem(struct bnxt *bp) bnxt_free_stats(bp); bnxt_free_tx_rings(bp); bnxt_free_rx_rings(bp); - bnxt_free_def_cp_ring(bp); } static int bnxt_alloc_mem(struct bnxt *bp) { int rc; - /* Default completion ring */ - rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY); - if (rc) - goto alloc_mem_err; - - rc = bnxt_alloc_rings(bp, 0, NULL, NULL, - bp->def_cp_ring, "def_cp"); - if (rc) - goto alloc_mem_err; - rc = bnxt_alloc_vnic_mem(bp); if (rc) goto alloc_mem_err; @@ -202,23 +200,26 @@ alloc_mem_err: static int bnxt_init_chip(struct bnxt *bp) { - unsigned int i; + struct bnxt_rx_queue *rxq; struct rte_eth_link new; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t intr_vector = 0; uint32_t queue_id, base = BNXT_MISC_VEC_ID; uint32_t vec = BNXT_MISC_VEC_ID; + unsigned int i, j; int rc; /* disable uio/vfio intr/eventfd mapping */ rte_intr_disable(intr_handle); if (bp->eth_dev->data->mtu > ETHER_MTU) { - bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; + bp->eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; bp->flags |= BNXT_FLAG_JUMBO; } else { - bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; + bp->eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; bp->flags &= ~BNXT_FLAG_JUMBO; } @@ -248,7 +249,19 @@ static int bnxt_init_chip(struct bnxt *bp) /* VNIC configuration */ for (i = 0; i < bp->nr_vnics; i++) { + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps; + + vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0); + if (!vnic->fw_grp_ids) { + PMD_DRV_LOG(ERR, + "Failed to alloc %d bytes for group ids\n", + size); + rc = -ENOMEM; + goto err_out; + } + memset(vnic->fw_grp_ids, -1, size); rc = bnxt_hwrm_vnic_alloc(bp, vnic); if (rc) { @@ -257,12 +270,15 @@ static int bnxt_init_chip(struct bnxt *bp) goto err_out; } - rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic); - if (rc) { - PMD_DRV_LOG(ERR, - "HWRM vnic %d ctx alloc failure rc: %x\n", - i, rc); - goto err_out; + /* Alloc RSS context only if RSS mode is enabled */ + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) { + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic); + if (rc) { + PMD_DRV_LOG(ERR, + "HWRM vnic %d ctx alloc failure rc: %x\n", + i, rc); + goto err_out; + } } rc = bnxt_hwrm_vnic_cfg(bp, vnic); @@ -280,6 +296,13 @@ static int bnxt_init_chip(struct bnxt *bp) goto err_out; } + for (j = 0; j < bp->rx_nr_rings; j++) { + rxq = bp->eth_dev->data->rx_queues[j]; + + if (rxq->rx_deferred_start) + rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; + } + rc = bnxt_vnic_rss_configure(bp, vnic); if (rc) { PMD_DRV_LOG(ERR, @@ -289,7 +312,8 @@ static int bnxt_init_chip(struct bnxt *bp) bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); - if (bp->eth_dev->data->dev_conf.rxmode.enable_lro) + if (bp->eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_TCP_LRO) bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1); else bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0); @@ -389,10 +413,6 @@ static int bnxt_init_nic(struct bnxt *bp) bnxt_init_vnics(bp); bnxt_init_filters(bp); - rc = bnxt_init_chip(bp); - if (rc) - return rc; - return 0; } @@ -407,8 +427,6 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, uint16_t max_vnics, i, j, vpool, vrxq; unsigned int max_rx_rings; - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - /* MAC Specifics */ dev_info->max_mac_addrs = bp->max_l2_ctx; dev_info->max_hash_mac_addrs = 0; @@ -416,13 +434,11 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, /* PF/VF specifics */ if (BNXT_PF(bp)) dev_info->max_vfs = bp->pdev->max_vfs; - max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx, - RTE_MIN(bp->max_rsscos_ctx, - bp->max_stat_ctx))); + max_rx_rings = RTE_MIN(bp->max_vnics, bp->max_stat_ctx); /* For the sake of symmetry, max_rx_queues = max_tx_queues */ dev_info->max_rx_queues = max_rx_rings; dev_info->max_tx_queues = max_rx_rings; - dev_info->reta_size = bp->max_rsscos_ctx; + dev_info->reta_size = HW_HASH_INDEX_SIZE; dev_info->hash_key_size = 40; max_vnics = bp->max_vnics; @@ -430,21 +446,12 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, dev_info->min_rx_bufsize = 1; dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE; - dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; - dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO | - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GRE_TNL_TSO | - DEV_TX_OFFLOAD_IPIP_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO; + + dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; + if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; + dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT; + dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; /* *INDENT-OFF* */ dev_info->default_rxconf = (struct rte_eth_rxconf) { @@ -454,7 +461,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, .wthresh = 0, }, .rx_free_thresh = 32, - .rx_drop_en = 0, + /* If no descriptors available, pkts are dropped by default */ + .rx_drop_en = 1, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -465,12 +473,14 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, }, .tx_free_thresh = 32, .tx_rs_thresh = 32, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, }; eth_dev->data->dev_conf.intr_conf.lsc = 1; eth_dev->data->dev_conf.intr_conf.rxq = 1; + dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; + dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; + dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; + dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; /* *INDENT-ON* */ @@ -510,18 +520,45 @@ found: static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; + int rc; bp->rx_queues = (void *)eth_dev->data->rx_queues; bp->tx_queues = (void *)eth_dev->data->tx_queues; + bp->tx_nr_rings = eth_dev->data->nb_tx_queues; + bp->rx_nr_rings = eth_dev->data->nb_rx_queues; + + if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { + rc = bnxt_hwrm_check_vf_rings(bp); + if (rc) { + PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); + return -ENOSPC; + } + + rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); + if (rc) { + PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); + return -ENOSPC; + } + } else { + /* legacy driver needs to get updated values */ + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) { + PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc); + return rc; + } + } /* Inherit new configurations */ if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || eth_dev->data->nb_tx_queues > bp->max_tx_rings || - eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues + 1 > + eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > bp->max_cp_rings || eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > bp->max_stat_ctx || - (uint32_t)(eth_dev->data->nb_rx_queues + 1) > bp->max_ring_grps) { + (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps || + (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) && + bp->max_vnics < eth_dev->data->nb_rx_queues)) { PMD_DRV_LOG(ERR, "Insufficient resources to support requested config\n"); PMD_DRV_LOG(ERR, @@ -529,21 +566,22 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) eth_dev->data->nb_tx_queues, eth_dev->data->nb_rx_queues); PMD_DRV_LOG(ERR, - "Res available: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d\n", + "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, - bp->max_stat_ctx, bp->max_ring_grps); + bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); return -ENOSPC; } - bp->rx_nr_rings = eth_dev->data->nb_rx_queues; - bp->tx_nr_rings = eth_dev->data->nb_tx_queues; bp->rx_cp_nr_rings = bp->rx_nr_rings; bp->tx_cp_nr_rings = bp->tx_nr_rings; - if (eth_dev->data->dev_conf.rxmode.jumbo_frame) + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { eth_dev->data->mtu = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - - ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE; + ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * + BNXT_NUM_VLANS; + bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); + } return 0; } @@ -571,6 +609,7 @@ static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev) static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; int vlan_mask = 0; int rc; @@ -581,15 +620,15 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) } bp->dev_stopped = 0; - rc = bnxt_init_nic(bp); + rc = bnxt_init_chip(bp); if (rc) goto error; bnxt_link_update_op(eth_dev, 1); - if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) vlan_mask |= ETH_VLAN_FILTER_MASK; - if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) vlan_mask |= ETH_VLAN_STRIP_MASK; rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); if (rc) @@ -635,13 +674,15 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + bp->flags &= ~BNXT_FLAG_INIT_DONE; if (bp->eth_dev->data->dev_started) { /* TBD: STOP HW queues DMA */ eth_dev->data->dev_link.link_status = 0; } bnxt_set_hwrm_link_config(bp, false); bnxt_hwrm_port_clr_stats(bp); - bp->flags &= ~BNXT_FLAG_INIT_DONE; + bnxt_free_tx_mbufs(bp); + bnxt_free_rx_mbufs(bp); bnxt_shutdown_nic(bp); bp->dev_stopped = 1; } @@ -653,8 +694,6 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) if (bp->dev_stopped == 0) bnxt_dev_stop_op(eth_dev); - bnxt_free_tx_mbufs(bp); - bnxt_free_rx_mbufs(bp); bnxt_free_mem(bp); if (eth_dev->data->mac_addrs != NULL) { rte_free(eth_dev->data->mac_addrs); @@ -664,6 +703,8 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) rte_free(bp->grp_info); bp->grp_info = NULL; } + + bnxt_dev_uninit(eth_dev); } static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, @@ -771,6 +812,11 @@ out: new.link_speed != eth_dev->data->dev_link.link_speed) { memcpy(ð_dev->data->dev_link, &new, sizeof(struct rte_eth_link)); + + _rte_eth_dev_callback_process(eth_dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); + bnxt_print_link_info(eth_dev); } @@ -1282,9 +1328,9 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) struct bnxt_vnic_info *vnic; unsigned int i; int rc = 0; - uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN | - HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK; - uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN; + uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; + uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; /* Cycle through all VNICs */ for (i = 0; i < bp->nr_vnics; i++) { @@ -1331,8 +1377,8 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) memcpy(new_filter->l2_addr, filter->l2_addr, ETHER_ADDR_LEN); /* MAC + VLAN ID filter */ - new_filter->l2_ovlan = vlan_id; - new_filter->l2_ovlan_mask = 0xF000; + new_filter->l2_ivlan = vlan_id; + new_filter->l2_ivlan_mask = 0xF000; new_filter->enables |= en; rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, @@ -1366,30 +1412,31 @@ static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) { struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; unsigned int i; if (mask & ETH_VLAN_FILTER_MASK) { - if (!dev->data->dev_conf.rxmode.hw_vlan_filter) { + if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { /* Remove any VLAN filters programmed */ for (i = 0; i < 4095; i++) bnxt_del_vlan_filter(bp, i); } PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", - dev->data->dev_conf.rxmode.hw_vlan_filter); + !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)); } if (mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ for (i = 0; i < bp->nr_vnics; i++) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) vnic->vlan_strip = true; else vnic->vlan_strip = false; bnxt_hwrm_vnic_cfg(bp, vnic); } PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", - dev->data->dev_conf.rxmode.hw_vlan_strip); + !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)); } if (mask & ETH_VLAN_EXTEND_MASK) @@ -1398,7 +1445,7 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) return 0; } -static void +static int bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) { struct bnxt *bp = (struct bnxt *)dev->data->dev_private; @@ -1408,7 +1455,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) int rc; if (BNXT_VF(bp)) - return; + return -EPERM; memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr)); @@ -1418,7 +1465,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) continue; rc = bnxt_hwrm_clear_l2_filter(bp, filter); if (rc) - break; + return rc; memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN); memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN); filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; @@ -1427,10 +1474,12 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); if (rc) - break; + return rc; filter->mac_index = 0; PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); } + + return 0; } static int @@ -1515,7 +1564,6 @@ bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_free_thresh = txq->tx_free_thresh; qinfo->conf.tx_rs_thresh = 0; - qinfo->conf.txq_flags = txq->txq_flags; qinfo->conf.tx_deferred_start = txq->tx_deferred_start; } @@ -1540,9 +1588,11 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) if (new_mtu > ETHER_MTU) { bp->flags |= BNXT_FLAG_JUMBO; - eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; + bp->eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; } else { - eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; + bp->eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; bp->flags &= ~BNXT_FLAG_JUMBO; } @@ -1554,6 +1604,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) for (i = 0; i < bp->nr_vnics; i++) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + uint16_t size = 0; vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; @@ -1561,9 +1612,14 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) if (rc) break; - rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); - if (rc) - return rc; + size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); + size -= RTE_PKTMBUF_HEADROOM; + + if (size < new_mtu) { + rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); + if (rc) + return rc; + } } return rc; @@ -2358,7 +2414,8 @@ bnxt_parse_fdir_filter(struct bnxt *bp, } static struct bnxt_filter_info * -bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf) +bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf, + struct bnxt_vnic_info **mvnic) { struct bnxt_filter_info *mf = NULL; int i; @@ -2396,8 +2453,11 @@ bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf) !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, sizeof(nf->dst_ipaddr)) && !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, - sizeof(nf->dst_ipaddr_mask))) + sizeof(nf->dst_ipaddr_mask))) { + if (mvnic) + *mvnic = vnic; return mf; + } } } return NULL; @@ -2411,7 +2471,7 @@ bnxt_fdir_filter(struct rte_eth_dev *dev, struct bnxt *bp = (struct bnxt *)dev->data->dev_private; struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg; struct bnxt_filter_info *filter, *match; - struct bnxt_vnic_info *vnic; + struct bnxt_vnic_info *vnic, *mvnic; int ret = 0, i; if (filter_op == RTE_ETH_FILTER_NOP) @@ -2436,11 +2496,31 @@ bnxt_fdir_filter(struct rte_eth_dev *dev, goto free_filter; filter->filter_type = HWRM_CFA_NTUPLE_FILTER; - match = bnxt_match_fdir(bp, filter); + if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) + vnic = STAILQ_FIRST(&bp->ff_pool[0]); + else + vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]); + + match = bnxt_match_fdir(bp, filter, &mvnic); if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) { - PMD_DRV_LOG(ERR, "Flow already exists.\n"); - ret = -EEXIST; - goto free_filter; + if (match->dst_id == vnic->fw_vnic_id) { + PMD_DRV_LOG(ERR, "Flow already exists.\n"); + ret = -EEXIST; + goto free_filter; + } else { + match->dst_id = vnic->fw_vnic_id; + ret = bnxt_hwrm_set_ntuple_filter(bp, + match->dst_id, + match); + STAILQ_REMOVE(&mvnic->filter, match, + bnxt_filter_info, next); + STAILQ_INSERT_TAIL(&vnic->filter, match, next); + PMD_DRV_LOG(ERR, + "Filter with matching pattern exist\n"); + PMD_DRV_LOG(ERR, + "Updated it to new destination q\n"); + goto free_filter; + } } if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) { PMD_DRV_LOG(ERR, "Flow does not exist.\n"); @@ -2448,12 +2528,6 @@ bnxt_fdir_filter(struct rte_eth_dev *dev, goto free_filter; } - if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) - vnic = STAILQ_FIRST(&bp->ff_pool[0]); - else - vnic = - STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]); - if (filter_op == RTE_ETH_FILTER_ADD) { ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, @@ -2489,7 +2563,6 @@ bnxt_fdir_filter(struct rte_eth_dev *dev, case RTE_ETH_FILTER_UPDATE: case RTE_ETH_FILTER_STATS: case RTE_ETH_FILTER_INFO: - /* FALLTHROUGH */ PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op); break; default: @@ -2876,6 +2949,7 @@ static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) case BNX_DIR_TYPE_KONG_PATCH: case BNX_DIR_TYPE_BONO_FW: case BNX_DIR_TYPE_BONO_PATCH: + /* FALLTHROUGH */ return true; } @@ -2894,6 +2968,7 @@ static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) case BNX_DIR_TYPE_ISCSI_BOOT: case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: + /* FALLTHROUGH */ return true; } @@ -3032,7 +3107,20 @@ static bool bnxt_vf_pciid(uint16_t id) id == BROADCOM_DEV_ID_5731X_VF || id == BROADCOM_DEV_ID_5741X_VF || id == BROADCOM_DEV_ID_57414_VF || - id == BROADCOM_DEV_ID_STRATUS_NIC_VF) + id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 || + id == BROADCOM_DEV_ID_STRATUS_NIC_VF2 || + id == BROADCOM_DEV_ID_58802_VF) + return true; + return false; +} + +bool bnxt_stratus_device(struct bnxt *bp) +{ + uint16_t id = bp->pdev->id.device_id; + + if (id == BROADCOM_DEV_ID_STRATUS_NIC || + id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 || + id == BROADCOM_DEV_ID_STRATUS_NIC_VF2) return true; return false; } @@ -3060,18 +3148,29 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev) rc = -ENOMEM; goto init_err_release; } + + if (!pci_dev->mem_resource[2].addr) { + PMD_DRV_LOG(ERR, + "Cannot find PCI device BAR 2 address, aborting\n"); + rc = -ENODEV; + goto init_err_release; + } else { + bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; + } + return 0; init_err_release: if (bp->bar0) bp->bar0 = NULL; + if (bp->doorbell_base) + bp->doorbell_base = NULL; init_err_disable: return rc; } -static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); #define ALLOW_FUNC(x) \ { \ @@ -3098,7 +3197,6 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) bp = eth_dev->data->dev_private; - rte_atomic64_init(&bp->rx_mbuf_alloc_fail); bp->dev_stopped = 1; if (rte_eal_process_type() != RTE_PROC_PRIMARY) @@ -3115,12 +3213,12 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) } skip_init: eth_dev->dev_ops = &bnxt_dev_ops; - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; eth_dev->rx_pkt_burst = &bnxt_recv_pkts; eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; - if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) { + if (pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) { snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid, @@ -3131,9 +3229,10 @@ skip_init: sizeof(struct rx_port_stats) + 512); if (!mz) { mz = rte_memzone_reserve(mz_name, total_alloc_len, - SOCKET_ID_ANY, - RTE_MEMZONE_2MB | - RTE_MEMZONE_SIZE_HINT_ONLY); + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG); if (mz == NULL) return -ENOMEM; } @@ -3165,10 +3264,12 @@ skip_init: total_alloc_len = RTE_CACHE_LINE_ROUNDUP( sizeof(struct tx_port_stats) + 512); if (!mz) { - mz = rte_memzone_reserve(mz_name, total_alloc_len, - SOCKET_ID_ANY, - RTE_MEMZONE_2MB | - RTE_MEMZONE_SIZE_HINT_ONLY); + mz = rte_memzone_reserve(mz_name, + total_alloc_len, + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG); if (mz == NULL) return -ENOMEM; } @@ -3236,7 +3337,7 @@ skip_init: goto error_free; } - if (check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) { + if (bnxt_check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) { PMD_DRV_LOG(ERR, "Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n", bp->dflt_mac_addr[0], bp->dflt_mac_addr[1], @@ -3344,17 +3445,13 @@ skip_init: if (rc) goto error_free_int; - rc = bnxt_alloc_def_cp_ring(bp); - if (rc) - goto error_free_int; - bnxt_enable_int(bp); + bnxt_init_nic(bp); return 0; error_free_int: bnxt_disable_int(bp); - bnxt_free_def_cp_ring(bp); bnxt_hwrm_func_buf_unrgtr(bp); bnxt_free_int(bp); bnxt_free_mem(bp); @@ -3365,13 +3462,15 @@ error: } static int -bnxt_dev_uninit(struct rte_eth_dev *eth_dev) { +bnxt_dev_uninit(struct rte_eth_dev *eth_dev) +{ struct bnxt *bp = eth_dev->data->dev_private; int rc; if (rte_eal_process_type() != RTE_PROC_PRIMARY) return -EPERM; + PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); bnxt_disable_int(bp); bnxt_free_int(bp); bnxt_free_mem(bp); @@ -3385,8 +3484,17 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) { } rc = bnxt_hwrm_func_driver_unregister(bp, 0); bnxt_free_hwrm_resources(bp); - rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); - rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); + + if (bp->tx_mem_zone) { + rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); + bp->tx_mem_zone = NULL; + } + + if (bp->rx_mem_zone) { + rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); + bp->rx_mem_zone = NULL; + } + if (bp->dev_stopped == 0) bnxt_dev_close_op(eth_dev); if (bp->pf.vf_info) @@ -3432,13 +3540,11 @@ bool is_bnxt_supported(struct rte_eth_dev *dev) return is_device_supported(dev, &bnxt_rte_pmd); } -RTE_INIT(bnxt_init_log); -static void -bnxt_init_log(void) +RTE_INIT(bnxt_init_log) { bnxt_logtype_driver = rte_log_register("pmd.bnxt.driver"); if (bnxt_logtype_driver >= 0) - rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE); + rte_log_set_level(bnxt_logtype_driver, RTE_LOG_INFO); } RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c index 032e8eed..1038941e 100644 --- a/drivers/net/bnxt/bnxt_filter.c +++ b/drivers/net/bnxt/bnxt_filter.c @@ -1,38 +1,11 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include +#include #include #include #include @@ -96,9 +69,9 @@ void bnxt_init_filters(struct bnxt *bp) STAILQ_INIT(&bp->free_filter_list); for (i = 0; i < max_filters; i++) { filter = &bp->filter_info[i]; - filter->fw_l2_filter_id = -1; - filter->fw_em_filter_id = -1; - filter->fw_ntuple_filter_id = -1; + filter->fw_l2_filter_id = UINT64_MAX; + filter->fw_em_filter_id = UINT64_MAX; + filter->fw_ntuple_filter_id = UINT64_MAX; STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next); } } @@ -144,21 +117,42 @@ void bnxt_free_filter_mem(struct bnxt *bp) max_filters = bp->max_l2_ctx; for (i = 0; i < max_filters; i++) { filter = &bp->filter_info[i]; - if (filter->fw_l2_filter_id != ((uint64_t)-1)) { - PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n"); + if (filter->fw_l2_filter_id != ((uint64_t)-1) && + filter->filter_type == HWRM_CFA_L2_FILTER) { + PMD_DRV_LOG(ERR, "L2 filter is not free\n"); /* Call HWRM to try to free filter again */ rc = bnxt_hwrm_clear_l2_filter(bp, filter); if (rc) PMD_DRV_LOG(ERR, - "HWRM filter cannot be freed rc = %d\n", - rc); + "Cannot free L2 filter: %d\n", + rc); } filter->fw_l2_filter_id = UINT64_MAX; + + if (filter->fw_ntuple_filter_id != ((uint64_t)-1) && + filter->filter_type == HWRM_CFA_NTUPLE_FILTER) { + PMD_DRV_LOG(ERR, "NTUPLE filter is not free\n"); + /* Call HWRM to try to free filter again */ + rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); + if (rc) + PMD_DRV_LOG(ERR, + "Cannot free NTUPLE filter: %d\n", + rc); + } + filter->fw_ntuple_filter_id = UINT64_MAX; } STAILQ_INIT(&bp->free_filter_list); rte_free(bp->filter_info); bp->filter_info = NULL; + + for (i = 0; i < bp->pf.max_vfs; i++) { + STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) { + rte_free(filter); + STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter, + bnxt_filter_info, next); + } + } } int bnxt_alloc_filter_mem(struct bnxt *bp) @@ -199,1046 +193,3 @@ void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter) { STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next); } - -static int -bnxt_flow_agrs_validate(const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) -{ - if (!pattern) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM_NUM, - NULL, "NULL pattern."); - return -rte_errno; - } - - if (!actions) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_NUM, - NULL, "NULL action."); - return -rte_errno; - } - - if (!attr) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR, - NULL, "NULL attribute."); - return -rte_errno; - } - - return 0; -} - -static const struct rte_flow_item * -nxt_non_void_pattern(const struct rte_flow_item *cur) -{ - while (1) { - if (cur->type != RTE_FLOW_ITEM_TYPE_VOID) - return cur; - cur++; - } -} - -static const struct rte_flow_action * -nxt_non_void_action(const struct rte_flow_action *cur) -{ - while (1) { - if (cur->type != RTE_FLOW_ACTION_TYPE_VOID) - return cur; - cur++; - } -} - -int check_zero_bytes(const uint8_t *bytes, int len) -{ - int i; - for (i = 0; i < len; i++) - if (bytes[i] != 0x00) - return 0; - return 1; -} - -static int -bnxt_filter_type_check(const struct rte_flow_item pattern[], - struct rte_flow_error *error __rte_unused) -{ - const struct rte_flow_item *item = nxt_non_void_pattern(pattern); - int use_ntuple = 1; - - while (item->type != RTE_FLOW_ITEM_TYPE_END) { - switch (item->type) { - case RTE_FLOW_ITEM_TYPE_ETH: - use_ntuple = 1; - break; - case RTE_FLOW_ITEM_TYPE_VLAN: - use_ntuple = 0; - break; - case RTE_FLOW_ITEM_TYPE_IPV4: - case RTE_FLOW_ITEM_TYPE_IPV6: - case RTE_FLOW_ITEM_TYPE_TCP: - case RTE_FLOW_ITEM_TYPE_UDP: - /* FALLTHROUGH */ - /* need ntuple match, reset exact match */ - if (!use_ntuple) { - PMD_DRV_LOG(ERR, - "VLAN flow cannot use NTUPLE filter\n"); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Cannot use VLAN with NTUPLE"); - return -rte_errno; - } - use_ntuple |= 1; - break; - default: - PMD_DRV_LOG(ERR, "Unknown Flow type"); - use_ntuple |= 1; - } - item++; - } - return use_ntuple; -} - -static int -bnxt_validate_and_parse_flow_type(struct bnxt *bp, - const struct rte_flow_item pattern[], - struct rte_flow_error *error, - struct bnxt_filter_info *filter) -{ - const struct rte_flow_item *item = nxt_non_void_pattern(pattern); - const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; - const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; - const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; - const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; - const struct rte_flow_item_udp *udp_spec, *udp_mask; - const struct rte_flow_item_eth *eth_spec, *eth_mask; - const struct rte_flow_item_nvgre *nvgre_spec; - const struct rte_flow_item_nvgre *nvgre_mask; - const struct rte_flow_item_vxlan *vxlan_spec; - const struct rte_flow_item_vxlan *vxlan_mask; - uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF}; - uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF}; - const struct rte_flow_item_vf *vf_spec; - uint32_t tenant_id_be = 0; - bool vni_masked = 0; - bool tni_masked = 0; - uint32_t vf = 0; - int use_ntuple; - uint32_t en = 0; - int dflt_vnic; - - use_ntuple = bnxt_filter_type_check(pattern, error); - PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple); - if (use_ntuple < 0) - return use_ntuple; - - filter->filter_type = use_ntuple ? - HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER; - - while (item->type != RTE_FLOW_ITEM_TYPE_END) { - if (item->last) { - /* last or range is NOT supported as match criteria */ - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "No support for range"); - return -rte_errno; - } - if (!item->spec || !item->mask) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "spec/mask is NULL"); - return -rte_errno; - } - switch (item->type) { - case RTE_FLOW_ITEM_TYPE_ETH: - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; - - /* Source MAC address mask cannot be partially set. - * Should be All 0's or all 1's. - * Destination MAC address mask must not be partially - * set. Should be all 1's or all 0's. - */ - if ((!is_zero_ether_addr(ð_mask->src) && - !is_broadcast_ether_addr(ð_mask->src)) || - (!is_zero_ether_addr(ð_mask->dst) && - !is_broadcast_ether_addr(ð_mask->dst))) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "MAC_addr mask not valid"); - return -rte_errno; - } - - /* Mask is not allowed. Only exact matches are */ - if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "ethertype mask not valid"); - return -rte_errno; - } - - if (is_broadcast_ether_addr(ð_mask->dst)) { - rte_memcpy(filter->dst_macaddr, - ð_spec->dst, 6); - en |= use_ntuple ? - NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR : - EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR; - } - if (is_broadcast_ether_addr(ð_mask->src)) { - rte_memcpy(filter->src_macaddr, - ð_spec->src, 6); - en |= use_ntuple ? - NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR : - EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR; - } /* - * else { - * RTE_LOG(ERR, PMD, "Handle this condition\n"); - * } - */ - if (eth_spec->type) { - filter->ethertype = - rte_be_to_cpu_16(eth_spec->type); - en |= use_ntuple ? - NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE : - EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE; - } - - break; - case RTE_FLOW_ITEM_TYPE_VLAN: - vlan_spec = - (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = - (const struct rte_flow_item_vlan *)item->mask; - if (vlan_mask->tci & 0xFFFF && !vlan_mask->tpid) { - /* Only the VLAN ID can be matched. */ - filter->l2_ovlan = - rte_be_to_cpu_16(vlan_spec->tci & - 0xFFF); - en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; - } else { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "VLAN mask is invalid"); - return -rte_errno; - } - - break; - case RTE_FLOW_ITEM_TYPE_IPV4: - /* If mask is not involved, we could use EM filters. */ - ipv4_spec = - (const struct rte_flow_item_ipv4 *)item->spec; - ipv4_mask = - (const struct rte_flow_item_ipv4 *)item->mask; - /* Only IP DST and SRC fields are maskable. */ - if (ipv4_mask->hdr.version_ihl || - ipv4_mask->hdr.type_of_service || - ipv4_mask->hdr.total_length || - ipv4_mask->hdr.packet_id || - ipv4_mask->hdr.fragment_offset || - ipv4_mask->hdr.time_to_live || - ipv4_mask->hdr.next_proto_id || - ipv4_mask->hdr.hdr_checksum) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid IPv4 mask."); - return -rte_errno; - } - filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr; - filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr; - if (use_ntuple) - en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | - NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; - else - en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | - EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; - if (ipv4_mask->hdr.src_addr) { - filter->src_ipaddr_mask[0] = - ipv4_mask->hdr.src_addr; - en |= !use_ntuple ? 0 : - NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; - } - if (ipv4_mask->hdr.dst_addr) { - filter->dst_ipaddr_mask[0] = - ipv4_mask->hdr.dst_addr; - en |= !use_ntuple ? 0 : - NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; - } - filter->ip_addr_type = use_ntuple ? - HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 : - HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; - if (ipv4_spec->hdr.next_proto_id) { - filter->ip_protocol = - ipv4_spec->hdr.next_proto_id; - if (use_ntuple) - en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; - else - en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO; - } - break; - case RTE_FLOW_ITEM_TYPE_IPV6: - ipv6_spec = - (const struct rte_flow_item_ipv6 *)item->spec; - ipv6_mask = - (const struct rte_flow_item_ipv6 *)item->mask; - - /* Only IP DST and SRC fields are maskable. */ - if (ipv6_mask->hdr.vtc_flow || - ipv6_mask->hdr.payload_len || - ipv6_mask->hdr.proto || - ipv6_mask->hdr.hop_limits) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid IPv6 mask."); - return -rte_errno; - } - - if (use_ntuple) - en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | - NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; - else - en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | - EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; - rte_memcpy(filter->src_ipaddr, - ipv6_spec->hdr.src_addr, 16); - rte_memcpy(filter->dst_ipaddr, - ipv6_spec->hdr.dst_addr, 16); - if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) { - rte_memcpy(filter->src_ipaddr_mask, - ipv6_mask->hdr.src_addr, 16); - en |= !use_ntuple ? 0 : - NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; - } - if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) { - rte_memcpy(filter->dst_ipaddr_mask, - ipv6_mask->hdr.dst_addr, 16); - en |= !use_ntuple ? 0 : - NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; - } - filter->ip_addr_type = use_ntuple ? - NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 : - EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; - break; - case RTE_FLOW_ITEM_TYPE_TCP: - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; - - /* Check TCP mask. Only DST & SRC ports are maskable */ - if (tcp_mask->hdr.sent_seq || - tcp_mask->hdr.recv_ack || - tcp_mask->hdr.data_off || - tcp_mask->hdr.tcp_flags || - tcp_mask->hdr.rx_win || - tcp_mask->hdr.cksum || - tcp_mask->hdr.tcp_urp) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid TCP mask"); - return -rte_errno; - } - filter->src_port = tcp_spec->hdr.src_port; - filter->dst_port = tcp_spec->hdr.dst_port; - if (use_ntuple) - en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | - NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; - else - en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | - EM_FLOW_ALLOC_INPUT_EN_DST_PORT; - if (tcp_mask->hdr.dst_port) { - filter->dst_port_mask = tcp_mask->hdr.dst_port; - en |= !use_ntuple ? 0 : - NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; - } - if (tcp_mask->hdr.src_port) { - filter->src_port_mask = tcp_mask->hdr.src_port; - en |= !use_ntuple ? 0 : - NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; - } - break; - case RTE_FLOW_ITEM_TYPE_UDP: - udp_spec = (const struct rte_flow_item_udp *)item->spec; - udp_mask = (const struct rte_flow_item_udp *)item->mask; - - if (udp_mask->hdr.dgram_len || - udp_mask->hdr.dgram_cksum) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid UDP mask"); - return -rte_errno; - } - - filter->src_port = udp_spec->hdr.src_port; - filter->dst_port = udp_spec->hdr.dst_port; - if (use_ntuple) - en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | - NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; - else - en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | - EM_FLOW_ALLOC_INPUT_EN_DST_PORT; - - if (udp_mask->hdr.dst_port) { - filter->dst_port_mask = udp_mask->hdr.dst_port; - en |= !use_ntuple ? 0 : - NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; - } - if (udp_mask->hdr.src_port) { - filter->src_port_mask = udp_mask->hdr.src_port; - en |= !use_ntuple ? 0 : - NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; - } - break; - case RTE_FLOW_ITEM_TYPE_VXLAN: - vxlan_spec = - (const struct rte_flow_item_vxlan *)item->spec; - vxlan_mask = - (const struct rte_flow_item_vxlan *)item->mask; - /* Check if VXLAN item is used to describe protocol. - * If yes, both spec and mask should be NULL. - * If no, both spec and mask shouldn't be NULL. - */ - if ((!vxlan_spec && vxlan_mask) || - (vxlan_spec && !vxlan_mask)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid VXLAN item"); - return -rte_errno; - } - - if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] || - vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] || - vxlan_spec->flags != 0x8) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid VXLAN item"); - return -rte_errno; - } - - /* Check if VNI is masked. */ - if (vxlan_spec && vxlan_mask) { - vni_masked = - !!memcmp(vxlan_mask->vni, vni_mask, - RTE_DIM(vni_mask)); - if (vni_masked) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid VNI mask"); - return -rte_errno; - } - - rte_memcpy(((uint8_t *)&tenant_id_be + 1), - vxlan_spec->vni, 3); - filter->vni = - rte_be_to_cpu_32(tenant_id_be); - filter->tunnel_type = - CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; - } - break; - case RTE_FLOW_ITEM_TYPE_NVGRE: - nvgre_spec = - (const struct rte_flow_item_nvgre *)item->spec; - nvgre_mask = - (const struct rte_flow_item_nvgre *)item->mask; - /* Check if NVGRE item is used to describe protocol. - * If yes, both spec and mask should be NULL. - * If no, both spec and mask shouldn't be NULL. - */ - if ((!nvgre_spec && nvgre_mask) || - (nvgre_spec && !nvgre_mask)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid NVGRE item"); - return -rte_errno; - } - - if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 || - nvgre_spec->protocol != 0x6558) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid NVGRE item"); - return -rte_errno; - } - - if (nvgre_spec && nvgre_mask) { - tni_masked = - !!memcmp(nvgre_mask->tni, tni_mask, - RTE_DIM(tni_mask)); - if (tni_masked) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid TNI mask"); - return -rte_errno; - } - rte_memcpy(((uint8_t *)&tenant_id_be + 1), - nvgre_spec->tni, 3); - filter->vni = - rte_be_to_cpu_32(tenant_id_be); - filter->tunnel_type = - CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; - } - break; - case RTE_FLOW_ITEM_TYPE_VF: - vf_spec = (const struct rte_flow_item_vf *)item->spec; - vf = vf_spec->id; - if (!BNXT_PF(bp)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Configuring on a VF!"); - return -rte_errno; - } - - if (vf >= bp->pdev->max_vfs) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Incorrect VF id!"); - return -rte_errno; - } - - filter->mirror_vnic_id = - dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); - if (dflt_vnic < 0) { - /* This simply indicates there's no driver - * loaded. This is not an error. - */ - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Unable to get default VNIC for VF"); - return -rte_errno; - } - filter->mirror_vnic_id = dflt_vnic; - en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID; - break; - default: - break; - } - item++; - } - filter->enables = en; - - return 0; -} - -/* Parse attributes */ -static int -bnxt_flow_parse_attr(const struct rte_flow_attr *attr, - struct rte_flow_error *error) -{ - /* Must be input direction */ - if (!attr->ingress) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, - attr, "Only support ingress."); - return -rte_errno; - } - - /* Not supported */ - if (attr->egress) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, - attr, "No support for egress."); - return -rte_errno; - } - - /* Not supported */ - if (attr->priority) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, - attr, "No support for priority."); - return -rte_errno; - } - - /* Not supported */ - if (attr->group) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR_GROUP, - attr, "No support for group."); - return -rte_errno; - } - - return 0; -} - -struct bnxt_filter_info * -bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, - struct bnxt_vnic_info *vnic) -{ - struct bnxt_filter_info *filter1, *f0; - struct bnxt_vnic_info *vnic0; - int rc; - - vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); - f0 = STAILQ_FIRST(&vnic0->filter); - - //This flow has same DST MAC as the port/l2 filter. - if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0) - return f0; - - //This flow needs DST MAC which is not same as port/l2 - PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n"); - filter1 = bnxt_get_unused_filter(bp); - if (filter1 == NULL) - return NULL; - filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; - filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | - L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK; - memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN); - memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN); - rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, - filter1); - if (rc) { - bnxt_free_filter(bp, filter1); - return NULL; - } - return filter1; -} - -static int -bnxt_validate_and_parse_flow(struct rte_eth_dev *dev, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - const struct rte_flow_attr *attr, - struct rte_flow_error *error, - struct bnxt_filter_info *filter) -{ - const struct rte_flow_action *act = nxt_non_void_action(actions); - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; - const struct rte_flow_action_queue *act_q; - const struct rte_flow_action_vf *act_vf; - struct bnxt_vnic_info *vnic, *vnic0; - struct bnxt_filter_info *filter1; - uint32_t vf = 0; - int dflt_vnic; - int rc; - - if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) { - PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n"); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "Cannot create flow on RSS queues"); - rc = -rte_errno; - goto ret; - } - - rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter); - if (rc != 0) - goto ret; - - rc = bnxt_flow_parse_attr(attr, error); - if (rc != 0) - goto ret; - //Since we support ingress attribute only - right now. - filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX; - - switch (act->type) { - case RTE_FLOW_ACTION_TYPE_QUEUE: - /* Allow this flow. Redirect to a VNIC. */ - act_q = (const struct rte_flow_action_queue *)act->conf; - if (act_q->index >= bp->rx_nr_rings) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, act, - "Invalid queue ID."); - rc = -rte_errno; - goto ret; - } - PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index); - - vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); - vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]); - if (vnic == NULL) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, act, - "No matching VNIC for queue ID."); - rc = -rte_errno; - goto ret; - } - filter->dst_id = vnic->fw_vnic_id; - filter1 = bnxt_get_l2_filter(bp, filter, vnic); - if (filter1 == NULL) { - rc = -ENOSPC; - goto ret; - } - filter->fw_l2_filter_id = filter1->fw_l2_filter_id; - PMD_DRV_LOG(DEBUG, "VNIC found\n"); - break; - case RTE_FLOW_ACTION_TYPE_DROP: - vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); - filter1 = bnxt_get_l2_filter(bp, filter, vnic0); - if (filter1 == NULL) { - rc = -ENOSPC; - goto ret; - } - filter->fw_l2_filter_id = filter1->fw_l2_filter_id; - if (filter->filter_type == HWRM_CFA_EM_FILTER) - filter->flags = - HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP; - else - filter->flags = - HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; - break; - case RTE_FLOW_ACTION_TYPE_COUNT: - vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); - filter1 = bnxt_get_l2_filter(bp, filter, vnic0); - if (filter1 == NULL) { - rc = -ENOSPC; - goto ret; - } - filter->fw_l2_filter_id = filter1->fw_l2_filter_id; - filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER; - break; - case RTE_FLOW_ACTION_TYPE_VF: - act_vf = (const struct rte_flow_action_vf *)act->conf; - vf = act_vf->id; - if (!BNXT_PF(bp)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - act, - "Configuring on a VF!"); - rc = -rte_errno; - goto ret; - } - - if (vf >= bp->pdev->max_vfs) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - act, - "Incorrect VF id!"); - rc = -rte_errno; - goto ret; - } - - filter->mirror_vnic_id = - dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); - if (dflt_vnic < 0) { - /* This simply indicates there's no driver loaded. - * This is not an error. - */ - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - act, - "Unable to get default VNIC for VF"); - rc = -rte_errno; - goto ret; - } - filter->mirror_vnic_id = dflt_vnic; - filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID; - - vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); - filter1 = bnxt_get_l2_filter(bp, filter, vnic0); - if (filter1 == NULL) { - rc = -ENOSPC; - goto ret; - } - filter->fw_l2_filter_id = filter1->fw_l2_filter_id; - break; - - default: - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, act, - "Invalid action."); - rc = -rte_errno; - goto ret; - } - - if (filter1) { - bnxt_free_filter(bp, filter1); - filter1->fw_l2_filter_id = -1; - } - - act = nxt_non_void_action(++act); - if (act->type != RTE_FLOW_ACTION_TYPE_END) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - act, "Invalid action."); - rc = -rte_errno; - goto ret; - } -ret: - return rc; -} - -static int -bnxt_flow_validate(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) -{ - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; - struct bnxt_filter_info *filter; - int ret = 0; - - ret = bnxt_flow_agrs_validate(attr, pattern, actions, error); - if (ret != 0) - return ret; - - filter = bnxt_get_unused_filter(bp); - if (filter == NULL) { - PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n"); - return -ENOMEM; - } - - ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, - error, filter); - /* No need to hold on to this filter if we are just validating flow */ - filter->fw_l2_filter_id = -1; - bnxt_free_filter(bp, filter); - - return ret; -} - -static int -bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf) -{ - struct bnxt_filter_info *mf; - struct rte_flow *flow; - int i; - - for (i = bp->nr_vnics - 1; i >= 0; i--) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; - - STAILQ_FOREACH(flow, &vnic->flow_list, next) { - mf = flow->filter; - - if (mf->filter_type == nf->filter_type && - mf->flags == nf->flags && - mf->src_port == nf->src_port && - mf->src_port_mask == nf->src_port_mask && - mf->dst_port == nf->dst_port && - mf->dst_port_mask == nf->dst_port_mask && - mf->ip_protocol == nf->ip_protocol && - mf->ip_addr_type == nf->ip_addr_type && - mf->ethertype == nf->ethertype && - mf->vni == nf->vni && - mf->tunnel_type == nf->tunnel_type && - mf->l2_ovlan == nf->l2_ovlan && - mf->l2_ovlan_mask == nf->l2_ovlan_mask && - mf->l2_ivlan == nf->l2_ivlan && - mf->l2_ivlan_mask == nf->l2_ivlan_mask && - !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) && - !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, - ETHER_ADDR_LEN) && - !memcmp(mf->src_macaddr, nf->src_macaddr, - ETHER_ADDR_LEN) && - !memcmp(mf->dst_macaddr, nf->dst_macaddr, - ETHER_ADDR_LEN) && - !memcmp(mf->src_ipaddr, nf->src_ipaddr, - sizeof(nf->src_ipaddr)) && - !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, - sizeof(nf->src_ipaddr_mask)) && - !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, - sizeof(nf->dst_ipaddr)) && - !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, - sizeof(nf->dst_ipaddr_mask))) { - if (mf->dst_id == nf->dst_id) - return -EEXIST; - /* Same Flow, Different queue - * Clear the old ntuple filter - */ - if (nf->filter_type == HWRM_CFA_EM_FILTER) - bnxt_hwrm_clear_em_filter(bp, mf); - if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER) - bnxt_hwrm_clear_ntuple_filter(bp, mf); - /* Free the old filter, update flow - * with new filter - */ - bnxt_free_filter(bp, mf); - flow->filter = nf; - return -EXDEV; - } - } - } - return 0; -} - -static struct rte_flow * -bnxt_flow_create(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) -{ - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; - struct bnxt_filter_info *filter; - struct bnxt_vnic_info *vnic = NULL; - bool update_flow = false; - struct rte_flow *flow; - unsigned int i; - int ret = 0; - - flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0); - if (!flow) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to allocate memory"); - return flow; - } - - ret = bnxt_flow_agrs_validate(attr, pattern, actions, error); - if (ret != 0) { - PMD_DRV_LOG(ERR, "Not a validate flow.\n"); - goto free_flow; - } - - filter = bnxt_get_unused_filter(bp); - if (filter == NULL) { - PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n"); - goto free_flow; - } - - ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, - error, filter); - if (ret != 0) - goto free_filter; - - ret = bnxt_match_filter(bp, filter); - if (ret == -EEXIST) { - PMD_DRV_LOG(DEBUG, "Flow already exists.\n"); - /* Clear the filter that was created as part of - * validate_and_parse_flow() above - */ - bnxt_hwrm_clear_l2_filter(bp, filter); - goto free_filter; - } else if (ret == -EXDEV) { - PMD_DRV_LOG(DEBUG, "Flow with same pattern exists"); - PMD_DRV_LOG(DEBUG, "Updating with different destination\n"); - update_flow = true; - } - - if (filter->filter_type == HWRM_CFA_EM_FILTER) { - filter->enables |= - HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID; - ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter); - } - if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) { - filter->enables |= - HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; - ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter); - } - - for (i = 0; i < bp->nr_vnics; i++) { - vnic = &bp->vnic_info[i]; - if (filter->dst_id == vnic->fw_vnic_id) - break; - } - - if (!ret) { - flow->filter = filter; - flow->vnic = vnic; - if (update_flow) { - ret = -EXDEV; - goto free_flow; - } - PMD_DRV_LOG(ERR, "Successfully created flow.\n"); - STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next); - return flow; - } -free_filter: - bnxt_free_filter(bp, filter); -free_flow: - if (ret == -EEXIST) - rte_flow_error_set(error, ret, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Matching Flow exists."); - else if (ret == -EXDEV) - rte_flow_error_set(error, ret, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Flow with pattern exists, updating destination queue"); - else - rte_flow_error_set(error, -ret, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to create flow."); - rte_free(flow); - flow = NULL; - return flow; -} - -static int -bnxt_flow_destroy(struct rte_eth_dev *dev, - struct rte_flow *flow, - struct rte_flow_error *error) -{ - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; - struct bnxt_filter_info *filter = flow->filter; - struct bnxt_vnic_info *vnic = flow->vnic; - int ret = 0; - - ret = bnxt_match_filter(bp, filter); - if (ret == 0) - PMD_DRV_LOG(ERR, "Could not find matching flow\n"); - if (filter->filter_type == HWRM_CFA_EM_FILTER) - ret = bnxt_hwrm_clear_em_filter(bp, filter); - if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) - ret = bnxt_hwrm_clear_ntuple_filter(bp, filter); - - bnxt_hwrm_clear_l2_filter(bp, filter); - if (!ret) { - STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); - rte_free(flow); - } else { - rte_flow_error_set(error, -ret, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to destroy flow."); - } - - return ret; -} - -static int -bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) -{ - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; - struct bnxt_vnic_info *vnic; - struct rte_flow *flow; - unsigned int i; - int ret = 0; - - for (i = 0; i < bp->nr_vnics; i++) { - vnic = &bp->vnic_info[i]; - STAILQ_FOREACH(flow, &vnic->flow_list, next) { - struct bnxt_filter_info *filter = flow->filter; - - if (filter->filter_type == HWRM_CFA_EM_FILTER) - ret = bnxt_hwrm_clear_em_filter(bp, filter); - if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) - ret = bnxt_hwrm_clear_ntuple_filter(bp, filter); - - if (ret) { - rte_flow_error_set(error, -ret, - RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, - "Failed to flush flow in HW."); - return -rte_errno; - } - - STAILQ_REMOVE(&vnic->flow_list, flow, - rte_flow, next); - rte_free(flow); - } - } - - return ret; -} - -const struct rte_flow_ops bnxt_flow_ops = { - .validate = bnxt_flow_validate, - .create = bnxt_flow_create, - .destroy = bnxt_flow_destroy, - .flush = bnxt_flow_flush, -}; diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h index a3c702df..a1ecfb19 100644 --- a/drivers/net/bnxt/bnxt_filter.h +++ b/drivers/net/bnxt/bnxt_filter.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_FILTER_H_ @@ -97,7 +69,6 @@ struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp); void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter); struct bnxt_filter_info *bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, struct bnxt_vnic_info *vnic); -int check_zero_bytes(const uint8_t *bytes, int len); #define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR \ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c new file mode 100644 index 00000000..ac765674 --- /dev/null +++ b/drivers/net/bnxt/bnxt_flow.c @@ -0,0 +1,1171 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#include + +#include +#include +#include +#include +#include + +#include "bnxt.h" +#include "bnxt_filter.h" +#include "bnxt_hwrm.h" +#include "bnxt_vnic.h" +#include "bnxt_util.h" +#include "hsi_struct_def_dpdk.h" + +static int +bnxt_flow_args_validate(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + if (!pattern) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, + "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, + "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, + "NULL attribute."); + return -rte_errno; + } + + return 0; +} + +static const struct rte_flow_item * +bnxt_flow_non_void_item(const struct rte_flow_item *cur) +{ + while (1) { + if (cur->type != RTE_FLOW_ITEM_TYPE_VOID) + return cur; + cur++; + } +} + +static const struct rte_flow_action * +bnxt_flow_non_void_action(const struct rte_flow_action *cur) +{ + while (1) { + if (cur->type != RTE_FLOW_ACTION_TYPE_VOID) + return cur; + cur++; + } +} + +static int +bnxt_filter_type_check(const struct rte_flow_item pattern[], + struct rte_flow_error *error __rte_unused) +{ + const struct rte_flow_item *item = + bnxt_flow_non_void_item(pattern); + int use_ntuple = 1; + + while (item->type != RTE_FLOW_ITEM_TYPE_END) { + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + use_ntuple = 1; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + use_ntuple = 0; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + case RTE_FLOW_ITEM_TYPE_IPV6: + case RTE_FLOW_ITEM_TYPE_TCP: + case RTE_FLOW_ITEM_TYPE_UDP: + /* FALLTHROUGH */ + /* need ntuple match, reset exact match */ + if (!use_ntuple) { + PMD_DRV_LOG(ERR, + "VLAN flow cannot use NTUPLE filter\n"); + rte_flow_error_set + (error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Cannot use VLAN with NTUPLE"); + return -rte_errno; + } + use_ntuple |= 1; + break; + default: + PMD_DRV_LOG(ERR, "Unknown Flow type\n"); + use_ntuple |= 1; + } + item++; + } + return use_ntuple; +} + +static int +bnxt_validate_and_parse_flow_type(struct bnxt *bp, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + struct rte_flow_error *error, + struct bnxt_filter_info *filter) +{ + const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern); + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; + const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; + const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; + const struct rte_flow_item_udp *udp_spec, *udp_mask; + const struct rte_flow_item_eth *eth_spec, *eth_mask; + const struct rte_flow_item_nvgre *nvgre_spec; + const struct rte_flow_item_nvgre *nvgre_mask; + const struct rte_flow_item_vxlan *vxlan_spec; + const struct rte_flow_item_vxlan *vxlan_mask; + uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF}; + uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF}; + const struct rte_flow_item_vf *vf_spec; + uint32_t tenant_id_be = 0; + bool vni_masked = 0; + bool tni_masked = 0; + uint32_t vf = 0; + int use_ntuple; + uint32_t en = 0; + uint32_t en_ethertype; + int dflt_vnic; + + use_ntuple = bnxt_filter_type_check(pattern, error); + PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple); + if (use_ntuple < 0) + return use_ntuple; + + filter->filter_type = use_ntuple ? + HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER; + en_ethertype = use_ntuple ? + NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE : + EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE; + + while (item->type != RTE_FLOW_ITEM_TYPE_END) { + if (item->last) { + /* last or range is NOT supported as match criteria */ + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "No support for range"); + return -rte_errno; + } + + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "spec/mask is NULL"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth_spec = item->spec; + eth_mask = item->mask; + + /* Source MAC address mask cannot be partially set. + * Should be All 0's or all 1's. + * Destination MAC address mask must not be partially + * set. Should be all 1's or all 0's. + */ + if ((!is_zero_ether_addr(ð_mask->src) && + !is_broadcast_ether_addr(ð_mask->src)) || + (!is_zero_ether_addr(ð_mask->dst) && + !is_broadcast_ether_addr(ð_mask->dst))) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "MAC_addr mask not valid"); + return -rte_errno; + } + + /* Mask is not allowed. Only exact matches are */ + if (eth_mask->type && + eth_mask->type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ethertype mask not valid"); + return -rte_errno; + } + + if (is_broadcast_ether_addr(ð_mask->dst)) { + rte_memcpy(filter->dst_macaddr, + ð_spec->dst, 6); + en |= use_ntuple ? + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR : + EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR; + } + + if (is_broadcast_ether_addr(ð_mask->src)) { + rte_memcpy(filter->src_macaddr, + ð_spec->src, 6); + en |= use_ntuple ? + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR : + EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR; + } /* + * else { + * PMD_DRV_LOG(ERR, "Handle this condition\n"); + * } + */ + if (eth_mask->type) { + filter->ethertype = + rte_be_to_cpu_16(eth_spec->type); + en |= en_ethertype; + } + + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + vlan_spec = item->spec; + vlan_mask = item->mask; + if (en & en_ethertype) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VLAN TPID matching is not" + " supported"); + return -rte_errno; + } + if (vlan_mask->tci && + vlan_mask->tci == RTE_BE16(0x0fff)) { + /* Only the VLAN ID can be matched. */ + filter->l2_ovlan = + rte_be_to_cpu_16(vlan_spec->tci & + RTE_BE16(0x0fff)); + en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; + } else { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VLAN mask is invalid"); + return -rte_errno; + } + if (vlan_mask->inner_type && + vlan_mask->inner_type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "inner ethertype mask not" + " valid"); + return -rte_errno; + } + if (vlan_mask->inner_type) { + filter->ethertype = + rte_be_to_cpu_16(vlan_spec->inner_type); + en |= en_ethertype; + } + + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + /* If mask is not involved, we could use EM filters. */ + ipv4_spec = item->spec; + ipv4_mask = item->mask; + /* Only IP DST and SRC fields are maskable. */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.type_of_service || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.next_proto_id || + ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv4 mask."); + return -rte_errno; + } + + filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr; + filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr; + + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + else + en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | + EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; + + if (ipv4_mask->hdr.src_addr) { + filter->src_ipaddr_mask[0] = + ipv4_mask->hdr.src_addr; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + } + + if (ipv4_mask->hdr.dst_addr) { + filter->dst_ipaddr_mask[0] = + ipv4_mask->hdr.dst_addr; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + } + + filter->ip_addr_type = use_ntuple ? + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 : + HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; + + if (ipv4_spec->hdr.next_proto_id) { + filter->ip_protocol = + ipv4_spec->hdr.next_proto_id; + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + else + en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + ipv6_spec = item->spec; + ipv6_mask = item->mask; + + /* Only IP DST and SRC fields are maskable. */ + if (ipv6_mask->hdr.vtc_flow || + ipv6_mask->hdr.payload_len || + ipv6_mask->hdr.proto || + ipv6_mask->hdr.hop_limits) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv6 mask."); + return -rte_errno; + } + + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + else + en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | + EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; + + rte_memcpy(filter->src_ipaddr, + ipv6_spec->hdr.src_addr, 16); + rte_memcpy(filter->dst_ipaddr, + ipv6_spec->hdr.dst_addr, 16); + + if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr, + 16)) { + rte_memcpy(filter->src_ipaddr_mask, + ipv6_mask->hdr.src_addr, 16); + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + } + + if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr, + 16)) { + rte_memcpy(filter->dst_ipaddr_mask, + ipv6_mask->hdr.dst_addr, 16); + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + } + + filter->ip_addr_type = use_ntuple ? + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 : + EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; + break; + case RTE_FLOW_ITEM_TYPE_TCP: + tcp_spec = item->spec; + tcp_mask = item->mask; + + /* Check TCP mask. Only DST & SRC ports are maskable */ + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid TCP mask"); + return -rte_errno; + } + + filter->src_port = tcp_spec->hdr.src_port; + filter->dst_port = tcp_spec->hdr.dst_port; + + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + else + en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | + EM_FLOW_ALLOC_INPUT_EN_DST_PORT; + + if (tcp_mask->hdr.dst_port) { + filter->dst_port_mask = tcp_mask->hdr.dst_port; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + } + + if (tcp_mask->hdr.src_port) { + filter->src_port_mask = tcp_mask->hdr.src_port; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + } + break; + case RTE_FLOW_ITEM_TYPE_UDP: + udp_spec = item->spec; + udp_mask = item->mask; + + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid UDP mask"); + return -rte_errno; + } + + filter->src_port = udp_spec->hdr.src_port; + filter->dst_port = udp_spec->hdr.dst_port; + + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + else + en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | + EM_FLOW_ALLOC_INPUT_EN_DST_PORT; + + if (udp_mask->hdr.dst_port) { + filter->dst_port_mask = udp_mask->hdr.dst_port; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + } + + if (udp_mask->hdr.src_port) { + filter->src_port_mask = udp_mask->hdr.src_port; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + } + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + vxlan_spec = item->spec; + vxlan_mask = item->mask; + /* Check if VXLAN item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!vxlan_spec && vxlan_mask) || + (vxlan_spec && !vxlan_mask)) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VXLAN item"); + return -rte_errno; + } + + if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] || + vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] || + vxlan_spec->flags != 0x8) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VXLAN item"); + return -rte_errno; + } + + /* Check if VNI is masked. */ + if (vxlan_spec && vxlan_mask) { + vni_masked = + !!memcmp(vxlan_mask->vni, vni_mask, + RTE_DIM(vni_mask)); + if (vni_masked) { + rte_flow_error_set + (error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VNI mask"); + return -rte_errno; + } + + rte_memcpy(((uint8_t *)&tenant_id_be + 1), + vxlan_spec->vni, 3); + filter->vni = + rte_be_to_cpu_32(tenant_id_be); + filter->tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; + } + break; + case RTE_FLOW_ITEM_TYPE_NVGRE: + nvgre_spec = item->spec; + nvgre_mask = item->mask; + /* Check if NVGRE item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!nvgre_spec && nvgre_mask) || + (nvgre_spec && !nvgre_mask)) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return -rte_errno; + } + + if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 || + nvgre_spec->protocol != 0x6558) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return -rte_errno; + } + + if (nvgre_spec && nvgre_mask) { + tni_masked = + !!memcmp(nvgre_mask->tni, tni_mask, + RTE_DIM(tni_mask)); + if (tni_masked) { + rte_flow_error_set + (error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid TNI mask"); + return -rte_errno; + } + rte_memcpy(((uint8_t *)&tenant_id_be + 1), + nvgre_spec->tni, 3); + filter->vni = + rte_be_to_cpu_32(tenant_id_be); + filter->tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; + } + break; + case RTE_FLOW_ITEM_TYPE_VF: + vf_spec = item->spec; + vf = vf_spec->id; + + if (!BNXT_PF(bp)) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Configuring on a VF!"); + return -rte_errno; + } + + if (vf >= bp->pdev->max_vfs) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Incorrect VF id!"); + return -rte_errno; + } + + if (!attr->transfer) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Matching VF traffic without" + " affecting it (transfer attribute)" + " is unsupported"); + return -rte_errno; + } + + filter->mirror_vnic_id = + dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); + if (dflt_vnic < 0) { + /* This simply indicates there's no driver + * loaded. This is not an error. + */ + rte_flow_error_set + (error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unable to get default VNIC for VF"); + return -rte_errno; + } + + filter->mirror_vnic_id = dflt_vnic; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID; + break; + default: + break; + } + item++; + } + filter->enables = en; + + return 0; +} + +/* Parse attributes */ +static int +bnxt_flow_parse_attr(const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + /* Must be input direction */ + if (!attr->ingress) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, + "Only support ingress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->egress) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, + "No support for egress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->priority) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, + "No support for priority."); + return -rte_errno; + } + + /* Not supported */ + if (attr->group) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, + "No support for group."); + return -rte_errno; + } + + return 0; +} + +struct bnxt_filter_info * +bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, + struct bnxt_vnic_info *vnic) +{ + struct bnxt_filter_info *filter1, *f0; + struct bnxt_vnic_info *vnic0; + int rc; + + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + f0 = STAILQ_FIRST(&vnic0->filter); + + /* This flow has same DST MAC as the port/l2 filter. */ + if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0) + return f0; + + /* This flow needs DST MAC which is not same as port/l2 */ + PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n"); + filter1 = bnxt_get_unused_filter(bp); + if (filter1 == NULL) + return NULL; + + filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; + filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | + L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK; + memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN); + memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN); + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, + filter1); + if (rc) { + bnxt_free_filter(bp, filter1); + return NULL; + } + return filter1; +} + +static int +bnxt_validate_and_parse_flow(struct rte_eth_dev *dev, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + const struct rte_flow_attr *attr, + struct rte_flow_error *error, + struct bnxt_filter_info *filter) +{ + const struct rte_flow_action *act = + bnxt_flow_non_void_action(actions); + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_vf *act_vf; + struct bnxt_vnic_info *vnic, *vnic0; + struct bnxt_filter_info *filter1; + uint32_t vf = 0; + int dflt_vnic; + int rc; + + if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) { + PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n"); + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Cannot create flow on RSS queues"); + rc = -rte_errno; + goto ret; + } + + rc = + bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter); + if (rc != 0) + goto ret; + + rc = bnxt_flow_parse_attr(attr, error); + if (rc != 0) + goto ret; + + /* Since we support ingress attribute only - right now. */ + if (filter->filter_type == HWRM_CFA_EM_FILTER) + filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX; + + switch (act->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + /* Allow this flow. Redirect to a VNIC. */ + act_q = (const struct rte_flow_action_queue *)act->conf; + if (act_q->index >= bp->rx_nr_rings) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Invalid queue ID."); + rc = -rte_errno; + goto ret; + } + PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index); + + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]); + if (vnic == NULL) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "No matching VNIC for queue ID."); + rc = -rte_errno; + goto ret; + } + + filter->dst_id = vnic->fw_vnic_id; + filter1 = bnxt_get_l2_filter(bp, filter, vnic); + if (filter1 == NULL) { + rc = -ENOSPC; + goto ret; + } + + filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + PMD_DRV_LOG(DEBUG, "VNIC found\n"); + break; + case RTE_FLOW_ACTION_TYPE_DROP: + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + filter1 = bnxt_get_l2_filter(bp, filter, vnic0); + if (filter1 == NULL) { + rc = -ENOSPC; + goto ret; + } + + filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + if (filter->filter_type == HWRM_CFA_EM_FILTER) + filter->flags = + HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP; + else + filter->flags = + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + filter1 = bnxt_get_l2_filter(bp, filter, vnic0); + if (filter1 == NULL) { + rc = -ENOSPC; + goto ret; + } + + filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER; + break; + case RTE_FLOW_ACTION_TYPE_VF: + act_vf = (const struct rte_flow_action_vf *)act->conf; + vf = act_vf->id; + + if (!BNXT_PF(bp)) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Configuring on a VF!"); + rc = -rte_errno; + goto ret; + } + + if (vf >= bp->pdev->max_vfs) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Incorrect VF id!"); + rc = -rte_errno; + goto ret; + } + + filter->mirror_vnic_id = + dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); + if (dflt_vnic < 0) { + /* This simply indicates there's no driver loaded. + * This is not an error. + */ + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Unable to get default VNIC for VF"); + rc = -rte_errno; + goto ret; + } + + filter->mirror_vnic_id = dflt_vnic; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID; + + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + filter1 = bnxt_get_l2_filter(bp, filter, vnic0); + if (filter1 == NULL) { + rc = -ENOSPC; + goto ret; + } + + filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Invalid action."); + rc = -rte_errno; + goto ret; + } + + if (filter1) { + bnxt_free_filter(bp, filter1); + filter1->fw_l2_filter_id = -1; + } + + act = bnxt_flow_non_void_action(++act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Invalid action."); + rc = -rte_errno; + goto ret; + } +ret: + return rc; +} + +static int +bnxt_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt_filter_info *filter; + int ret = 0; + + ret = bnxt_flow_args_validate(attr, pattern, actions, error); + if (ret != 0) + return ret; + + filter = bnxt_get_unused_filter(bp); + if (filter == NULL) { + PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n"); + return -ENOMEM; + } + + ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, + error, filter); + /* No need to hold on to this filter if we are just validating flow */ + filter->fw_l2_filter_id = UINT64_MAX; + bnxt_free_filter(bp, filter); + + return ret; +} + +static int +bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf) +{ + struct bnxt_filter_info *mf; + struct rte_flow *flow; + int i; + + for (i = bp->nr_vnics - 1; i >= 0; i--) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + STAILQ_FOREACH(flow, &vnic->flow_list, next) { + mf = flow->filter; + + if (mf->filter_type == nf->filter_type && + mf->flags == nf->flags && + mf->src_port == nf->src_port && + mf->src_port_mask == nf->src_port_mask && + mf->dst_port == nf->dst_port && + mf->dst_port_mask == nf->dst_port_mask && + mf->ip_protocol == nf->ip_protocol && + mf->ip_addr_type == nf->ip_addr_type && + mf->ethertype == nf->ethertype && + mf->vni == nf->vni && + mf->tunnel_type == nf->tunnel_type && + mf->l2_ovlan == nf->l2_ovlan && + mf->l2_ovlan_mask == nf->l2_ovlan_mask && + mf->l2_ivlan == nf->l2_ivlan && + mf->l2_ivlan_mask == nf->l2_ivlan_mask && + !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) && + !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, + ETHER_ADDR_LEN) && + !memcmp(mf->src_macaddr, nf->src_macaddr, + ETHER_ADDR_LEN) && + !memcmp(mf->dst_macaddr, nf->dst_macaddr, + ETHER_ADDR_LEN) && + !memcmp(mf->src_ipaddr, nf->src_ipaddr, + sizeof(nf->src_ipaddr)) && + !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, + sizeof(nf->src_ipaddr_mask)) && + !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, + sizeof(nf->dst_ipaddr)) && + !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, + sizeof(nf->dst_ipaddr_mask))) { + if (mf->dst_id == nf->dst_id) + return -EEXIST; + /* + * Same Flow, Different queue + * Clear the old ntuple filter + * Reuse the matching L2 filter + * ID for the new filter + */ + nf->fw_l2_filter_id = mf->fw_l2_filter_id; + if (nf->filter_type == HWRM_CFA_EM_FILTER) + bnxt_hwrm_clear_em_filter(bp, mf); + if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER) + bnxt_hwrm_clear_ntuple_filter(bp, mf); + /* Free the old filter, update flow + * with new filter + */ + bnxt_free_filter(bp, mf); + flow->filter = nf; + return -EXDEV; + } + } + } + return 0; +} + +static struct rte_flow * +bnxt_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt_filter_info *filter; + struct bnxt_vnic_info *vnic = NULL; + bool update_flow = false; + struct rte_flow *flow; + unsigned int i; + int ret = 0; + + flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0); + if (!flow) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to allocate memory"); + return flow; + } + + ret = bnxt_flow_args_validate(attr, pattern, actions, error); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Not a validate flow.\n"); + goto free_flow; + } + + filter = bnxt_get_unused_filter(bp); + if (filter == NULL) { + PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n"); + goto free_flow; + } + + ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, + error, filter); + if (ret != 0) + goto free_filter; + + ret = bnxt_match_filter(bp, filter); + if (ret == -EEXIST) { + PMD_DRV_LOG(DEBUG, "Flow already exists.\n"); + /* Clear the filter that was created as part of + * validate_and_parse_flow() above + */ + bnxt_hwrm_clear_l2_filter(bp, filter); + goto free_filter; + } else if (ret == -EXDEV) { + PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n"); + PMD_DRV_LOG(DEBUG, "Updating with different destination\n"); + update_flow = true; + } + + if (filter->filter_type == HWRM_CFA_EM_FILTER) { + filter->enables |= + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter); + } + + if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) { + filter->enables |= + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter); + } + + for (i = 0; i < bp->nr_vnics; i++) { + vnic = &bp->vnic_info[i]; + if (filter->dst_id == vnic->fw_vnic_id) + break; + } + + if (!ret) { + flow->filter = filter; + flow->vnic = vnic; + if (update_flow) { + ret = -EXDEV; + goto free_flow; + } + PMD_DRV_LOG(ERR, "Successfully created flow.\n"); + STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next); + return flow; + } +free_filter: + bnxt_free_filter(bp, filter); +free_flow: + if (ret == -EEXIST) + rte_flow_error_set(error, ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Matching Flow exists."); + else if (ret == -EXDEV) + rte_flow_error_set(error, ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Flow with pattern exists, updating destination queue"); + else + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow."); + rte_free(flow); + flow = NULL; + return flow; +} + +static int +bnxt_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt_filter_info *filter = flow->filter; + struct bnxt_vnic_info *vnic = flow->vnic; + int ret = 0; + + ret = bnxt_match_filter(bp, filter); + if (ret == 0) + PMD_DRV_LOG(ERR, "Could not find matching flow\n"); + if (filter->filter_type == HWRM_CFA_EM_FILTER) + ret = bnxt_hwrm_clear_em_filter(bp, filter); + if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + ret = bnxt_hwrm_clear_ntuple_filter(bp, filter); + else + ret = bnxt_hwrm_clear_l2_filter(bp, filter); + if (!ret) { + STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); + rte_free(flow); + } else { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to destroy flow."); + } + + return ret; +} + +static int +bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt_vnic_info *vnic; + struct rte_flow *flow; + unsigned int i; + int ret = 0; + + for (i = 0; i < bp->nr_vnics; i++) { + vnic = &bp->vnic_info[i]; + STAILQ_FOREACH(flow, &vnic->flow_list, next) { + struct bnxt_filter_info *filter = flow->filter; + + if (filter->filter_type == HWRM_CFA_EM_FILTER) + ret = bnxt_hwrm_clear_em_filter(bp, filter); + if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + ret = bnxt_hwrm_clear_ntuple_filter(bp, filter); + + if (ret) { + rte_flow_error_set + (error, + -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Failed to flush flow in HW."); + return -rte_errno; + } + + STAILQ_REMOVE(&vnic->flow_list, flow, + rte_flow, next); + rte_free(flow); + } + } + + return ret; +} + +const struct rte_flow_ops bnxt_flow_ops = { + .validate = bnxt_flow_validate, + .create = bnxt_flow_create, + .destroy = bnxt_flow_destroy, + .flush = bnxt_flow_flush, +}; diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index b7843afe..c682488a 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -55,6 +27,8 @@ #include #define HWRM_CMD_TIMEOUT 10000 +#define HWRM_SPEC_CODE_1_8_3 0x10803 +#define HWRM_VERSION_1_9_1 0x10901 struct bnxt_plcmodes_cfg { uint32_t flags; @@ -115,7 +89,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, short_input.req_type = rte_cpu_to_le_16(req->req_type); short_input.signature = rte_cpu_to_le_16( - HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD); + HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD); short_input.size = rte_cpu_to_le_16(msg_len); short_input.req_addr = rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr); @@ -192,10 +166,26 @@ err_ret: req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \ } while (0) +#define HWRM_CHECK_RESULT_SILENT() do {\ + if (rc) { \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + return rc; \ + } \ + if (resp->error_code) { \ + rc = rte_le_to_cpu_16(resp->error_code); \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + return rc; \ + } \ +} while (0) + #define HWRM_CHECK_RESULT() do {\ if (rc) { \ PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \ rte_spinlock_unlock(&bp->hwrm_lock); \ + if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \ + rc = -EACCES; \ + else if (rc > 0) \ + rc = -EINVAL; \ return rc; \ } \ if (resp->error_code) { \ @@ -214,6 +204,10 @@ err_ret: PMD_DRV_LOG(ERR, "error %d\n", rc); \ } \ rte_spinlock_unlock(&bp->hwrm_lock); \ + if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \ + rc = -EACCES; \ + else if (rc > 0) \ + rc = -EINVAL; \ return rc; \ } \ } while (0) @@ -248,6 +242,9 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr; uint32_t mask = 0; + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + return rc; + HWRM_PREP(req, CFA_L2_SET_RX_MASK); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); @@ -344,7 +341,7 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - filter->fw_l2_filter_id = -1; + filter->fw_l2_filter_id = UINT64_MAX; return 0; } @@ -399,13 +396,13 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, req.l2_ovlan = filter->l2_ovlan; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN) - req.l2_ovlan = filter->l2_ivlan; + req.l2_ivlan = filter->l2_ivlan; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK) req.l2_ovlan_mask = filter->l2_ovlan_mask; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK) - req.l2_ovlan_mask = filter->l2_ivlan_mask; + req.l2_ivlan_mask = filter->l2_ivlan_mask; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID) req.src_id = rte_cpu_to_le_32(filter->src_id); if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE) @@ -436,16 +433,18 @@ int bnxt_hwrm_ptp_cfg(struct bnxt *bp) HWRM_PREP(req, PORT_MAC_CFG); if (ptp->rx_filter) - flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE; + flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE; else - flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE; + flags |= + HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE; if (ptp->tx_tstamp_en) - flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE; + flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE; else - flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE; + flags |= + HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE; req.flags = rte_cpu_to_le_32(flags); - req.enables = - rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); + req.enables = rte_cpu_to_le_32 + (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -473,7 +472,7 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) HWRM_CHECK_RESULT(); - if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS)) + if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS)) return 0; ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0); @@ -505,7 +504,7 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) return 0; } -int bnxt_hwrm_func_qcaps(struct bnxt *bp) +static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) { int rc = 0; struct hwrm_func_qcaps_input req = {.req_type = 0 }; @@ -527,6 +526,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) if (BNXT_PF(bp)) { bp->pf.port_id = resp->port_id; bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id); + bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs); new_max_vfs = bp->pdev->max_vfs; if (new_max_vfs != bp->pf.max_vfs) { if (bp->pf.vf_info) @@ -595,6 +595,20 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) return rc; } +int bnxt_hwrm_func_qcaps(struct bnxt *bp) +{ + int rc; + + rc = __bnxt_hwrm_func_qcaps(bp); + if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) { + rc = bnxt_hwrm_func_resc_qcaps(bp); + if (!rc) + bp->flags |= BNXT_FLAG_NEW_RM; + } + + return rc; +} + int bnxt_hwrm_func_reset(struct bnxt *bp) { int rc = 0; @@ -631,10 +645,19 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) if (BNXT_PF(bp)) { req.enables |= rte_cpu_to_le_32( - HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD); + HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD); memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd, RTE_MIN(sizeof(req.vf_req_fwd), sizeof(bp->pf.vf_req_fwd))); + + /* + * PF can sniff HWRM API issued by VF. This can be set up by + * linux driver and inherited by the DPDK PF driver. Clear + * this HWRM sniffer list in FW because DPDK PF driver does + * not support this. + */ + req.flags = + rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE); } req.async_event_fwd[0] |= @@ -655,6 +678,105 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) return rc; } +int bnxt_hwrm_check_vf_rings(struct bnxt *bp) +{ + if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM))) + return 0; + + return bnxt_hwrm_func_reserve_vf_resc(bp, true); +} + +int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test) +{ + int rc; + uint32_t flags = 0; + uint32_t enables; + struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_vf_cfg_input req = {0}; + + HWRM_PREP(req, FUNC_VF_CFG); + + req.enables = rte_cpu_to_le_32 + (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS); + + req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings); + req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings * + AGG_RING_MULTIPLIER); + req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings); + req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings + + bp->tx_nr_rings); + req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings); + req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings); + if (bp->vf_resv_strategy == + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) { + enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS; + req.enables |= rte_cpu_to_le_32(enables); + req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX); + req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX); + req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC); + } + + if (test) + flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST; + + req.flags = rte_cpu_to_le_32(flags); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + if (test) + HWRM_CHECK_RESULT_SILENT(); + else + HWRM_CHECK_RESULT(); + + HWRM_UNLOCK(); + return rc; +} + +int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) +{ + int rc; + struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_resource_qcaps_input req = {0}; + + HWRM_PREP(req, FUNC_RESOURCE_QCAPS); + req.fid = rte_cpu_to_le_16(0xffff); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + + if (BNXT_VF(bp)) { + bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx); + bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings); + bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings); + bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings); + bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps); + bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); + bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics); + bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); + } + bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy); + if (bp->vf_resv_strategy > + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) + bp->vf_resv_strategy = + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL; + + HWRM_UNLOCK(); + return rc; +} + int bnxt_hwrm_ver_get(struct bnxt *bp) { int rc = 0; @@ -678,11 +800,13 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) HWRM_CHECK_RESULT(); PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n", - resp->hwrm_intf_maj, resp->hwrm_intf_min, - resp->hwrm_intf_upd, - resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld); - bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) | - (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd; + resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, + resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b, + resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b); + bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) | + (resp->hwrm_fw_min_8b << 16) | + (resp->hwrm_fw_bld_8b << 8) | + resp->hwrm_fw_rsvd_8b; PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n", HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE); @@ -690,11 +814,12 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) my_version |= HWRM_VERSION_MINOR << 8; my_version |= HWRM_VERSION_UPDATE; - fw_version = resp->hwrm_intf_maj << 16; - fw_version |= resp->hwrm_intf_min << 8; - fw_version |= resp->hwrm_intf_upd; + fw_version = resp->hwrm_intf_maj_8b << 16; + fw_version |= resp->hwrm_intf_min_8b << 8; + fw_version |= resp->hwrm_intf_upd_8b; + bp->hwrm_spec_code = fw_version; - if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) { + if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) { PMD_DRV_LOG(ERR, "Unsupported firmware API version\n"); rc = -EINVAL; goto error; @@ -750,7 +875,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && (dev_caps_cfg & - HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) { + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) { PMD_DRV_LOG(DEBUG, "Short command supported\n"); rte_free(bp->hwrm_short_cmd_req_addr); @@ -919,9 +1044,15 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) int rc = 0; struct hwrm_queue_qportcfg_input req = {.req_type = 0 }; struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; + int i; HWRM_PREP(req, QUEUE_QPORTCFG); + req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX; + /* HWRM Version >= 1.9.1 */ + if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1) + req.drv_qmap_cap = + HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); HWRM_CHECK_RESULT(); @@ -941,6 +1072,20 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) HWRM_UNLOCK(); + if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) { + bp->tx_cosq_id = bp->cos_queue[0].id; + } else { + /* iterate and find the COSq profile to use for Tx */ + for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { + if (bp->cos_queue[i].profile == + HWRM_QUEUE_SERVICE_PROFILE_LOSSY) { + bp->tx_cosq_id = bp->cos_queue[i].id; + break; + } + } + } + PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id); + return rc; } @@ -964,7 +1109,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, switch (ring_type) { case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: - req.queue_id = bp->cos_queue[0].id; + req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id); /* FALLTHROUGH */ case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: req.ring_type = ring_type; @@ -1182,8 +1327,9 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) /* map ring groups to this vnic */ PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n", vnic->start_grp_id, vnic->end_grp_id); - for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) + for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++) vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id; + vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id; vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE; vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE; @@ -1193,7 +1339,8 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) HWRM_PREP(req, VNIC_ALLOC); if (vnic->func_default) - req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT; + req.flags = + rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); HWRM_CHECK_RESULT(); @@ -1214,7 +1361,7 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp, HWRM_PREP(req, VNIC_PLCMODES_QCFG); - req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -1242,7 +1389,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp, HWRM_PREP(req, VNIC_PLCMODES_CFG); - req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.flags = rte_cpu_to_le_32(pmode->flags); req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh); req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset); @@ -1451,6 +1598,7 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, HWRM_PREP(req, VNIC_RSS_CFG); req.hash_type = rte_cpu_to_le_32(vnic->hash_type); + req.hash_mode_flags = vnic->hash_mode; req.ring_grp_tbl_addr = rte_cpu_to_le_64(vnic->rss_table_dma_addr); @@ -1474,6 +1622,11 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr; uint16_t size; + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id); + return rc; + } + HWRM_PREP(req, VNIC_PLCMODES_CFG); req.flags = rte_cpu_to_le_32( @@ -1486,7 +1639,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, size -= RTE_PKTMBUF_HEADROOM; req.jumbo_thresh = rte_cpu_to_le_16(size); - req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -1517,12 +1670,12 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp, HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO | HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN | HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ); - req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); req.max_agg_segs = rte_cpu_to_le_16(5); req.max_aggs = rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX); req.min_agg_len = rte_cpu_to_le_32(512); } + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -1607,10 +1760,9 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid, stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes); stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes); - stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts); - stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts); - - stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts); + stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts); + stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts); + stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts); HWRM_UNLOCK(); @@ -1732,8 +1884,7 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) return rc; } -static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, - unsigned int idx __rte_unused) +static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) { struct bnxt_ring *cp_ring = cpr->cp_ring_struct; @@ -1745,17 +1896,52 @@ static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, cpr->cp_raw_cons = 0; } +void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index) +{ + struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index]; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + struct bnxt_ring *ring = rxr->rx_ring_struct; + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + bnxt_hwrm_ring_free(bp, ring, + HWRM_RING_FREE_INPUT_RING_TYPE_RX); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID; + memset(rxr->rx_desc_ring, 0, + rxr->rx_ring_struct->ring_size * + sizeof(*rxr->rx_desc_ring)); + memset(rxr->rx_buf_ring, 0, + rxr->rx_ring_struct->ring_size * + sizeof(*rxr->rx_buf_ring)); + rxr->rx_prod = 0; + } + ring = rxr->ag_ring_struct; + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + bnxt_hwrm_ring_free(bp, ring, + HWRM_RING_FREE_INPUT_RING_TYPE_RX); + ring->fw_ring_id = INVALID_HW_RING_ID; + memset(rxr->ag_buf_ring, 0, + rxr->ag_ring_struct->ring_size * + sizeof(*rxr->ag_buf_ring)); + rxr->ag_prod = 0; + bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID; + } + if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) + bnxt_free_cp_ring(bp, cpr); + + bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID; +} + int bnxt_free_all_hwrm_rings(struct bnxt *bp) { unsigned int i; - int rc = 0; for (i = 0; i < bp->tx_cp_nr_rings; i++) { struct bnxt_tx_queue *txq = bp->tx_queues[i]; struct bnxt_tx_ring_info *txr = txq->tx_ring; struct bnxt_ring *ring = txr->tx_ring_struct; struct bnxt_cp_ring_info *cpr = txq->cp_ring; - unsigned int idx = bp->rx_cp_nr_rings + i + 1; if (ring->fw_ring_id != INVALID_HW_RING_ID) { bnxt_hwrm_ring_free(bp, ring, @@ -1771,60 +1957,15 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp) txr->tx_cons = 0; } if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_free_cp_ring(bp, cpr, idx); - cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID; - } - } - - for (i = 0; i < bp->rx_cp_nr_rings; i++) { - struct bnxt_rx_queue *rxq = bp->rx_queues[i]; - struct bnxt_rx_ring_info *rxr = rxq->rx_ring; - struct bnxt_ring *ring = rxr->rx_ring_struct; - struct bnxt_cp_ring_info *cpr = rxq->cp_ring; - unsigned int idx = i + 1; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_hwrm_ring_free(bp, ring, - HWRM_RING_FREE_INPUT_RING_TYPE_RX); - ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID; - memset(rxr->rx_desc_ring, 0, - rxr->rx_ring_struct->ring_size * - sizeof(*rxr->rx_desc_ring)); - memset(rxr->rx_buf_ring, 0, - rxr->rx_ring_struct->ring_size * - sizeof(*rxr->rx_buf_ring)); - rxr->rx_prod = 0; - } - ring = rxr->ag_ring_struct; - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_hwrm_ring_free(bp, ring, - HWRM_RING_FREE_INPUT_RING_TYPE_RX); - ring->fw_ring_id = INVALID_HW_RING_ID; - memset(rxr->ag_buf_ring, 0, - rxr->ag_ring_struct->ring_size * - sizeof(*rxr->ag_buf_ring)); - rxr->ag_prod = 0; - bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID; - } - if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_free_cp_ring(bp, cpr, idx); - bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; + bnxt_free_cp_ring(bp, cpr); cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID; } } - /* Default completion ring */ - { - struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; + for (i = 0; i < bp->rx_cp_nr_rings; i++) + bnxt_free_hwrm_rx_ring(bp, i); - if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_free_cp_ring(bp, cpr, 0); - cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID; - } - } - - return rc; + return 0; } int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp) @@ -1887,6 +2028,7 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); else rc = bnxt_hwrm_clear_l2_filter(bp, filter); + STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); //if (rc) //break; } @@ -1974,6 +2116,8 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp) bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false); bnxt_hwrm_vnic_free(bp, vnic); + + rte_free(vnic->fw_grp_ids); } /* Ring resources */ bnxt_free_all_hwrm_rings(bp); @@ -1992,6 +2136,7 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed) switch (conf_link_speed) { case ETH_LINK_SPEED_10M_HD: case ETH_LINK_SPEED_100M_HD: + /* FALLTHROUGH */ return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF; } return hw_link_duplex; @@ -2012,6 +2157,7 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed) switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) { case ETH_LINK_SPEED_100M: case ETH_LINK_SPEED_100M_HD: + /* FALLTHROUGH */ eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB; break; @@ -2176,6 +2322,7 @@ static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex) switch (hw_link_duplex) { case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH: case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL: + /* FALLTHROUGH */ eth_link_duplex = ETH_LINK_FULL_DUPLEX; break; case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF: @@ -2305,6 +2452,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp) case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0: case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5: case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0: + /* FALLTHROUGH */ bp->port_partition_type = resp->port_partition_type; break; default: @@ -2362,7 +2510,8 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags); req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU); req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN + - ETHER_CRC_LEN + VLAN_TAG_SIZE); + ETHER_CRC_LEN + VLAN_TAG_SIZE * + BNXT_NUM_VLANS); req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx); req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx); req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings); @@ -2399,9 +2548,11 @@ static void populate_vf_func_cfg_req(struct bnxt *bp, HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS); req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN + - ETHER_CRC_LEN + VLAN_TAG_SIZE); + ETHER_CRC_LEN + VLAN_TAG_SIZE * + BNXT_NUM_VLANS); req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN + - ETHER_CRC_LEN + VLAN_TAG_SIZE); + ETHER_CRC_LEN + VLAN_TAG_SIZE * + BNXT_NUM_VLANS); req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx / (num_vfs + 1)); req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1)); @@ -2766,9 +2917,9 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) req.req_buf_page_size = rte_cpu_to_le_16( page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN)); req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN); - req.req_buf_page_addr[0] = + req.req_buf_page_addr0 = rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf)); - if (req.req_buf_page_addr[0] == 0) { + if (req.req_buf_page_addr0 == 0) { PMD_DRV_LOG(ERR, "unable to map buffer address to physical memory\n"); return -ENOMEM; @@ -2915,6 +3066,18 @@ int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf) return rc; } +int bnxt_hwrm_set_async_event_cr(struct bnxt *bp) +{ + int rc; + + if (BNXT_PF(bp)) + rc = bnxt_hwrm_func_cfg_def_cp(bp); + else + rc = bnxt_hwrm_vf_func_cfg_def_cp(bp); + + return rc; +} + int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id, void *encaped, size_t ec_size) { @@ -3029,9 +3192,6 @@ int bnxt_hwrm_port_qstats(struct bnxt *bp) struct bnxt_pf_info *pf = &bp->pf; int rc; - if (!(bp->flags & BNXT_FLAG_PORT_STATS)) - return 0; - HWRM_PREP(req, PORT_QSTATS); req.port_id = rte_cpu_to_le_16(pf->port_id); @@ -3052,7 +3212,9 @@ int bnxt_hwrm_port_clr_stats(struct bnxt *bp) struct bnxt_pf_info *pf = &bp->pf; int rc; - if (!(bp->flags & BNXT_FLAG_PORT_STATS)) + /* Not allowed on NS2 device, NPAR, MultiHost, VF */ + if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) || + BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp)) return 0; HWRM_PREP(req, PORT_CLR_STATS); @@ -3199,13 +3361,12 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data) req.host_dest_addr = rte_cpu_to_le_64(dma_handle); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT(); - HWRM_UNLOCK(); - if (rc == 0) memcpy(data, buf, len > buflen ? buflen : len); rte_free(buf); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -3237,12 +3398,13 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, req.offset = rte_cpu_to_le_32(offset); req.len = rte_cpu_to_le_32(length); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT(); - HWRM_UNLOCK(); if (rc == 0) memcpy(data, buf, length); rte_free(buf); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + return rc; } @@ -3273,14 +3435,6 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, rte_iova_t dma_handle; uint8_t *buf; - HWRM_PREP(req, NVM_WRITE); - - req.dir_type = rte_cpu_to_le_16(dir_type); - req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal); - req.dir_ext = rte_cpu_to_le_16(dir_ext); - req.dir_attr = rte_cpu_to_le_16(dir_attr); - req.dir_data_length = rte_cpu_to_le_32(data_len); - buf = rte_malloc("nvm_write", data_len, 0); rte_mem_lock_page(buf); if (!buf) @@ -3293,14 +3447,22 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, return -ENOMEM; } memcpy(buf, data, data_len); + + HWRM_PREP(req, NVM_WRITE); + + req.dir_type = rte_cpu_to_le_16(dir_type); + req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal); + req.dir_ext = rte_cpu_to_le_16(dir_ext); + req.dir_attr = rte_cpu_to_le_16(dir_attr); + req.dir_data_length = rte_cpu_to_le_32(data_len); req.host_src_addr = rte_cpu_to_le_64(dma_handle); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rte_free(buf); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - rte_free(buf); return rc; } @@ -3588,8 +3750,8 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter) HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - filter->fw_em_filter_id = -1; - filter->fw_l2_filter_id = -1; + filter->fw_em_filter_id = UINT64_MAX; + filter->fw_l2_filter_id = UINT64_MAX; return 0; } @@ -3700,7 +3862,7 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp, HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - filter->fw_ntuple_filter_id = -1; + filter->fw_ntuple_filter_id = UINT64_MAX; return 0; } @@ -3732,3 +3894,54 @@ int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic) } return 0; } + +static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) +{ + uint16_t flags; + + req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int); + + /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ + req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr); + + /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ + req->num_cmpl_dma_aggr_during_int = + rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int); + + req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max); + + /* min timer set to 1/2 of interrupt timer */ + req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min); + + /* buf timer set to 1/4 of interrupt timer */ + req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr); + + req->cmpl_aggr_dma_tmr_during_int = + rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int); + + flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET | + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE; + req->flags = rte_cpu_to_le_16(flags); +} + +int bnxt_hwrm_set_ring_coal(struct bnxt *bp, + struct bnxt_coal *coal, uint16_t ring_id) +{ + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp = + bp->hwrm_cmd_resp_addr; + int rc; + + /* Set ring coalesce parameters only for Stratus 100G NIC */ + if (!bnxt_stratus_device(bp)) + return 0; + + HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS); + bnxt_hwrm_set_coal_params(coal, &req); + req.ring_id = rte_cpu_to_le_16(ring_id); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + return 0; +} diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h index f11e72a3..379aac6e 100644 --- a/drivers/net/bnxt/bnxt_hwrm.h +++ b/drivers/net/bnxt/bnxt_hwrm.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_HWRM_H_ @@ -54,6 +26,12 @@ struct bnxt_cp_ring_info; #define ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE \ (1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE - 32)) +#define HWRM_QUEUE_SERVICE_PROFILE_LOSSY \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY + +#define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC \ + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC + int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic); int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic, @@ -88,6 +66,7 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp); int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); +int bnxt_hwrm_set_async_event_cr(struct bnxt *bp); int bnxt_hwrm_ring_alloc(struct bnxt *bp, struct bnxt_ring *ring, uint32_t ring_type, uint32_t map_index, @@ -131,10 +110,13 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic); int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic); void bnxt_free_all_hwrm_resources(struct bnxt *bp); void bnxt_free_hwrm_resources(struct bnxt *bp); +void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index); int bnxt_alloc_hwrm_resources(struct bnxt *bp); int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link); int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up); int bnxt_hwrm_func_qcfg(struct bnxt *bp); +int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp); +int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test); int bnxt_hwrm_allocate_pf_only(struct bnxt *bp); int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs); int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, @@ -189,4 +171,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, int bnxt_hwrm_ptp_cfg(struct bnxt *bp); int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_set_ring_coal(struct bnxt *bp, + struct bnxt_coal *coal, uint16_t ring_id); +int bnxt_hwrm_check_vf_rings(struct bnxt *bp); #endif diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c index 8ab98693..7ef7023e 100644 --- a/drivers/net/bnxt/bnxt_irq.c +++ b/drivers/net/bnxt/bnxt_irq.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2015 Broadcom Corporation. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -68,30 +40,10 @@ static void bnxt_int_handler(void *param) if (!CMP_VALID(cmp, raw_cons, cpr->cp_ring_struct)) break; - switch (CMP_TYPE(cmp)) { - case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: - /* Handle any async event */ - bnxt_handle_async_event(bp, cmp); - break; - case CMPL_BASE_TYPE_HWRM_FWD_REQ: - /* Handle HWRM forwarded responses */ - bnxt_handle_fwd_req(bp, cmp); - break; - default: - /* Ignore any other events */ - if (cmp->type & rte_cpu_to_le_16(0x01)) { - if (!CMP_VALID(cmp, raw_cons, - cpr->cp_ring_struct)) - goto no_more; - } - PMD_DRV_LOG(INFO, - "Ignoring %02x completion\n", CMP_TYPE(cmp)); - break; - } + bnxt_event_hwrm_resp_handler(bp, cmp); raw_cons = NEXT_RAW_CMP(raw_cons); - }; -no_more: + cpr->cp_raw_cons = raw_cons; B_CP_DB_REARM(cpr, cpr->cp_raw_cons); } @@ -127,7 +79,9 @@ void bnxt_enable_int(struct bnxt *bp) { struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; - B_CP_DB_ARM(cpr); + /* Only the default completion ring */ + if (cpr != NULL && cpr->cp_doorbell != NULL) + B_CP_DB_ARM(cpr); } int bnxt_setup_int(struct bnxt *bp) diff --git a/drivers/net/bnxt/bnxt_irq.h b/drivers/net/bnxt/bnxt_irq.h index 4d2f7af9..75ba2135 100644 --- a/drivers/net/bnxt/bnxt_irq.h +++ b/drivers/net/bnxt/bnxt_irq.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2015 Broadcom Corporation. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_IRQ_H_ diff --git a/drivers/net/bnxt/bnxt_nvm_defs.h b/drivers/net/bnxt/bnxt_nvm_defs.h index c5ccc9bc..ea9d4a9d 100644 --- a/drivers/net/bnxt/bnxt_nvm_defs.h +++ b/drivers/net/bnxt/bnxt_nvm_defs.h @@ -1,11 +1,6 @@ -/* Broadcom NetXtreme-C/E network driver. - * - * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2017 Broadcom Limited - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_NVM_DEFS_H_ diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c index 8fb89721..fcbd6bc6 100644 --- a/drivers/net/bnxt/bnxt_ring.c +++ b/drivers/net/bnxt/bnxt_ring.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -52,11 +24,14 @@ void bnxt_free_ring(struct bnxt_ring *ring) { + if (!ring) + return; + if (ring->vmem_size && *ring->vmem) { memset((char *)*ring->vmem, 0, ring->vmem_size); *ring->vmem = NULL; } - rte_memzone_free((const struct rte_memzone *)ring->mem_zone); + ring->mem_zone = NULL; } /* @@ -89,22 +64,26 @@ int bnxt_init_ring_grps(struct bnxt *bp) * rx bd ring - Only non-zero length if rx_ring_info is not NULL */ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, - struct bnxt_tx_ring_info *tx_ring_info, - struct bnxt_rx_ring_info *rx_ring_info, + struct bnxt_tx_queue *txq, + struct bnxt_rx_queue *rxq, struct bnxt_cp_ring_info *cp_ring_info, const char *suffix) { struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct; + struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL; + struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL; struct bnxt_ring *tx_ring; struct bnxt_ring *rx_ring; struct rte_pci_device *pdev = bp->pdev; + uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads; const struct rte_memzone *mz = NULL; char mz_name[RTE_MEMZONE_NAMESIZE]; rte_iova_t mz_phys_addr; int sz; int stats_len = (tx_ring_info || rx_ring_info) ? - RTE_CACHE_LINE_ROUNDUP(sizeof(struct ctx_hw_stats64)) : 0; + RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) - + sizeof (struct hwrm_resp_hdr)) : 0; int cp_vmem_start = stats_len; int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size); @@ -155,7 +134,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, sizeof(struct bnxt_tpa_info)) : 0; int total_alloc_len = tpa_info_start; - if (bp->eth_dev->data->dev_conf.rxmode.enable_lro) + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) total_alloc_len += tpa_info_len; snprintf(mz_name, RTE_MEMZONE_NAMESIZE, @@ -166,10 +145,11 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, mz = rte_memzone_lookup(mz_name); if (!mz) { mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len, - SOCKET_ID_ANY, - RTE_MEMZONE_2MB | - RTE_MEMZONE_SIZE_HINT_ONLY, - getpagesize()); + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG, + getpagesize()); if (mz == NULL) return -ENOMEM; } @@ -191,6 +171,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, } if (tx_ring_info) { + txq->mz = mz; tx_ring = tx_ring_info->tx_ring_struct; tx_ring->bd = ((char *)mz->addr + tx_ring_start); @@ -210,6 +191,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, } if (rx_ring_info) { + rxq->mz = mz; rx_ring = rx_ring_info->rx_ring_struct; rx_ring->bd = ((char *)mz->addr + rx_ring_start); @@ -252,7 +234,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, ag_bitmap_start, ag_bitmap_len); /* TPA info */ - if (bp->eth_dev->data->dev_conf.rxmode.enable_lro) + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) rx_ring_info->tpa_info = ((struct bnxt_tpa_info *)((char *)mz->addr + tpa_info_start)); @@ -276,6 +258,116 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, return 0; } +static void bnxt_init_dflt_coal(struct bnxt_coal *coal) +{ + /* Tick values in micro seconds. + * 1 coal_buf x bufs_per_record = 1 completion record. + */ + coal->num_cmpl_aggr_int = BNXT_NUM_CMPL_AGGR_INT; + /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ + coal->num_cmpl_dma_aggr = BNXT_NUM_CMPL_DMA_AGGR; + /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ + coal->num_cmpl_dma_aggr_during_int = BNXT_NUM_CMPL_DMA_AGGR_DURING_INT; + coal->int_lat_tmr_max = BNXT_INT_LAT_TMR_MAX; + /* min timer set to 1/2 of interrupt timer */ + coal->int_lat_tmr_min = BNXT_INT_LAT_TMR_MIN; + /* buf timer set to 1/4 of interrupt timer */ + coal->cmpl_aggr_dma_tmr = BNXT_CMPL_AGGR_DMA_TMR; + coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT; +} + +int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index) +{ + struct rte_pci_device *pci_dev = bp->pdev; + struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index]; + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + struct bnxt_ring *cp_ring = cpr->cp_ring_struct; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + struct bnxt_ring *ring = rxr->rx_ring_struct; + unsigned int map_idx = queue_index + bp->rx_cp_nr_rings; + int rc = 0; + + bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id; + + /* Rx cmpl */ + rc = bnxt_hwrm_ring_alloc(bp, cp_ring, + HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, + queue_index, HWRM_NA_SIGNATURE, + HWRM_NA_SIGNATURE); + if (rc) + goto err_out; + + cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr + + queue_index * BNXT_DB_SIZE; + bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id; + B_CP_DIS_DB(cpr, cpr->cp_raw_cons); + + if (!queue_index) { + /* + * In order to save completion resources, use the first + * completion ring from PF or VF as the default completion ring + * for async event and HWRM forward response handling. + */ + bp->def_cp_ring = cpr; + rc = bnxt_hwrm_set_async_event_cr(bp); + if (rc) + goto err_out; + } + /* Rx ring */ + rc = bnxt_hwrm_ring_alloc(bp, ring, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, + queue_index, cpr->hw_stats_ctx_id, + cp_ring->fw_ring_id); + if (rc) + goto err_out; + + rxr->rx_prod = 0; + rxr->rx_doorbell = (char *)pci_dev->mem_resource[2].addr + + queue_index * BNXT_DB_SIZE; + bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id; + B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); + + ring = rxr->ag_ring_struct; + /* Agg ring */ + if (!ring) + PMD_DRV_LOG(ERR, "Alloc AGG Ring is NULL!\n"); + + rc = bnxt_hwrm_ring_alloc(bp, ring, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, + map_idx, HWRM_NA_SIGNATURE, + cp_ring->fw_ring_id); + if (rc) + goto err_out; + + PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n"); + rxr->ag_prod = 0; + rxr->ag_doorbell = (char *)pci_dev->mem_resource[2].addr + + map_idx * BNXT_DB_SIZE; + bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id; + B_RX_DB(rxr->ag_doorbell, rxr->ag_prod); + + rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN + + ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE); + + if (bp->eth_dev->data->rx_queue_state[queue_index] == + RTE_ETH_QUEUE_STATE_STARTED) { + if (bnxt_init_one_rx_ring(rxq)) { + RTE_LOG(ERR, PMD, + "bnxt_init_one_rx_ring failed!\n"); + bnxt_rx_queue_release_op(rxq); + rc = -ENOMEM; + goto err_out; + } + B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); + B_RX_DB(rxr->ag_doorbell, rxr->ag_prod); + } + rxq->index = queue_index; + PMD_DRV_LOG(INFO, + "queue %d, rx_deferred_start %d, state %d!\n", + queue_index, rxq->rx_deferred_start, + bp->eth_dev->data->rx_queue_state[queue_index]); + +err_out: + return rc; +} /* ring_grp usage: * [0] = default completion ring * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings @@ -283,43 +375,61 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, */ int bnxt_alloc_hwrm_rings(struct bnxt *bp) { - struct rte_pci_device *pci_dev = bp->pdev; + struct bnxt_coal coal; unsigned int i; int rc = 0; + bnxt_init_dflt_coal(&coal); + for (i = 0; i < bp->rx_cp_nr_rings; i++) { struct bnxt_rx_queue *rxq = bp->rx_queues[i]; struct bnxt_cp_ring_info *cpr = rxq->cp_ring; struct bnxt_ring *cp_ring = cpr->cp_ring_struct; struct bnxt_rx_ring_info *rxr = rxq->rx_ring; struct bnxt_ring *ring = rxr->rx_ring_struct; - unsigned int idx = i + 1; - unsigned int map_idx = idx + bp->rx_cp_nr_rings; + unsigned int map_idx = i + bp->rx_cp_nr_rings; bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; /* Rx cmpl */ - rc = bnxt_hwrm_ring_alloc(bp, cp_ring, - HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, - idx, HWRM_NA_SIGNATURE, - HWRM_NA_SIGNATURE); + rc = bnxt_hwrm_ring_alloc + (bp, + cp_ring, + HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, + i, + HWRM_NA_SIGNATURE, + HWRM_NA_SIGNATURE); if (rc) goto err_out; - cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr + - idx * 0x80; + cpr->cp_doorbell = (char *)bp->doorbell_base + i * 0x80; bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id; B_CP_DIS_DB(cpr, cpr->cp_raw_cons); + bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id); + + if (!i) { + /* + * In order to save completion resource, use the first + * completion ring from PF or VF as the default + * completion ring for async event & HWRM + * forward response handling. + */ + bp->def_cp_ring = cpr; + rc = bnxt_hwrm_set_async_event_cr(bp); + if (rc) + goto err_out; + } /* Rx ring */ - rc = bnxt_hwrm_ring_alloc(bp, ring, - HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, - idx, cpr->hw_stats_ctx_id, - cp_ring->fw_ring_id); + rc = bnxt_hwrm_ring_alloc(bp, + ring, + HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, + i, + cpr->hw_stats_ctx_id, + cp_ring->fw_ring_id); if (rc) goto err_out; rxr->rx_prod = 0; - rxr->rx_doorbell = (char *)pci_dev->mem_resource[2].addr + - idx * 0x80; + rxr->rx_doorbell = (char *)bp->doorbell_base + i * 0x80; bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id; B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); @@ -338,9 +448,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) goto err_out; PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n"); rxr->ag_prod = 0; - rxr->ag_doorbell = - (char *)pci_dev->mem_resource[2].addr + - map_idx * 0x80; + rxr->ag_doorbell = (char *)bp->doorbell_base + map_idx * 0x80; bp->grp_info[i].ag_fw_ring_id = ring->fw_ring_id; B_RX_DB(rxr->ag_doorbell, rxr->ag_prod); @@ -353,7 +461,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) } B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); B_RX_DB(rxr->ag_doorbell, rxr->ag_prod); - rxq->index = idx; + rxq->index = i; } for (i = 0; i < bp->tx_cp_nr_rings; i++) { @@ -362,7 +470,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) struct bnxt_ring *cp_ring = cpr->cp_ring_struct; struct bnxt_tx_ring_info *txr = txq->tx_ring; struct bnxt_ring *ring = txr->tx_ring_struct; - unsigned int idx = i + 1 + bp->rx_cp_nr_rings; + unsigned int idx = i + bp->rx_cp_nr_rings; /* Tx cmpl */ rc = bnxt_hwrm_ring_alloc(bp, cp_ring, @@ -372,8 +480,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) if (rc) goto err_out; - cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr + - idx * 0x80; + cpr->cp_doorbell = (char *)bp->doorbell_base + idx * 0x80; B_CP_DIS_DB(cpr, cpr->cp_raw_cons); /* Tx ring */ @@ -384,9 +491,9 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) if (rc) goto err_out; - txr->tx_doorbell = (char *)pci_dev->mem_resource[2].addr + - idx * 0x80; + txr->tx_doorbell = (char *)bp->doorbell_base + idx * 0x80; txq->index = idx; + bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id); } err_out: diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h index ebf7228e..1446d784 100644 --- a/drivers/net/bnxt/bnxt_ring.h +++ b/drivers/net/bnxt/bnxt_ring.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_RING_H_ @@ -56,6 +28,7 @@ #define BNXT_TPA_MAX 64 #define AGG_RING_SIZE_FACTOR 2 +#define AGG_RING_MULTIPLIER 2 /* These assume 4k pages */ #define MAX_RX_DESC_CNT (8 * 1024) @@ -93,10 +66,11 @@ struct bnxt_cp_ring_info; void bnxt_free_ring(struct bnxt_ring *ring); int bnxt_init_ring_grps(struct bnxt *bp); int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, - struct bnxt_tx_ring_info *tx_ring_info, - struct bnxt_rx_ring_info *rx_ring_info, + struct bnxt_tx_queue *txq, + struct bnxt_rx_queue *rxq, struct bnxt_cp_ring_info *cp_ring_info, const char *suffix); +int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index); int bnxt_alloc_hwrm_rings(struct bnxt *bp); #endif diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c index d49f3546..832fc9ec 100644 --- a/drivers/net/bnxt/bnxt_rxq.c +++ b/drivers/net/bnxt/bnxt_rxq.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -51,10 +23,8 @@ void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq) { - struct bnxt_cp_ring_info *cpr = rxq->cp_ring; - - if (cpr->hw_stats) - cpr->hw_stats = NULL; + if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats) + rxq->cp_ring->hw_stats = NULL; } int bnxt_mq_rx_configure(struct bnxt *bp) @@ -107,6 +77,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp) switch (dev_conf->rxmode.mq_mode) { case ETH_MQ_RX_VMDQ_RSS: case ETH_MQ_RX_VMDQ_ONLY: + /* FALLTHROUGH */ /* ETH_8/64_POOLs */ pools = conf->nb_queue_pools; /* For each pool, allocate MACVLAN CFA rule & VNIC */ @@ -228,16 +199,19 @@ err_out: return rc; } -static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq) +void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq) { struct bnxt_sw_rx_bd *sw_ring; struct bnxt_tpa_info *tpa_info; uint16_t i; + rte_spinlock_lock(&rxq->lock); + if (rxq) { sw_ring = rxq->rx_ring->rx_buf_ring; if (sw_ring) { - for (i = 0; i < rxq->nb_rx_desc; i++) { + for (i = 0; + i < rxq->rx_ring->rx_ring_struct->ring_size; i++) { if (sw_ring[i].mbuf) { rte_pktmbuf_free_seg(sw_ring[i].mbuf); sw_ring[i].mbuf = NULL; @@ -247,7 +221,8 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq) /* Free up mbufs in Agg ring */ sw_ring = rxq->rx_ring->ag_buf_ring; if (sw_ring) { - for (i = 0; i < rxq->nb_rx_desc; i++) { + for (i = 0; + i < rxq->rx_ring->ag_ring_struct->ring_size; i++) { if (sw_ring[i].mbuf) { rte_pktmbuf_free_seg(sw_ring[i].mbuf); sw_ring[i].mbuf = NULL; @@ -266,6 +241,8 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq) } } } + + rte_spinlock_unlock(&rxq->lock); } void bnxt_free_rx_mbufs(struct bnxt *bp) @@ -295,6 +272,8 @@ void bnxt_rx_queue_release_op(void *rx_queue) bnxt_free_ring(rxq->cp_ring->cp_ring_struct); bnxt_free_rxq_stats(rxq); + rte_memzone_free(rxq->mz); + rxq->mz = NULL; rte_free(rxq); } @@ -308,14 +287,16 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, struct rte_mempool *mp) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; struct bnxt_rx_queue *rxq; int rc = 0; + uint8_t queue_state; if (queue_idx >= bp->max_rx_rings) { PMD_DRV_LOG(ERR, "Cannot create Rx ring %d. Only %d rings available\n", queue_idx, bp->max_rx_rings); - return -ENOSPC; + return -EINVAL; } if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) { @@ -350,12 +331,12 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, rxq->queue_id = queue_idx; rxq->port_id = eth_dev->data->port_id; - rxq->crc_len = (uint8_t)((eth_dev->data->dev_conf.rxmode.hw_strip_crc) ? - 0 : ETHER_CRC_LEN); + rxq->crc_len = rte_eth_dev_must_keep_crc(rx_offloads) ? + ETHER_CRC_LEN : 0; eth_dev->data->rx_queues[queue_idx] = rxq; /* Allocate RX ring hardware descriptors */ - if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring, + if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring, "rxr")) { PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!\n"); @@ -363,7 +344,13 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, rc = -ENOMEM; goto out; } + rte_atomic64_init(&rxq->rx_mbuf_alloc_fail); + rxq->rx_deferred_start = rx_conf->rx_deferred_start; + queue_state = rxq->rx_deferred_start ? RTE_ETH_QUEUE_STATE_STOPPED : + RTE_ETH_QUEUE_STATE_STARTED; + eth_dev->data->rx_queue_state[queue_idx] = queue_state; + rte_spinlock_init(&rxq->lock); out: return rc; } @@ -412,6 +399,7 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id]; struct bnxt_vnic_info *vnic = NULL; + int rc = 0; if (rxq == NULL) { PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id); @@ -419,28 +407,47 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) } dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - rxq->rx_deferred_start = false; + + bnxt_free_hwrm_rx_ring(bp, rx_queue_id); + bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id); PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id); + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { vnic = rxq->vnic; + if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID) return 0; - PMD_DRV_LOG(DEBUG, "vnic = %p fw_grp_id = %d\n", - vnic, bp->grp_info[rx_queue_id + 1].fw_grp_id); + + PMD_DRV_LOG(DEBUG, + "vnic = %p fw_grp_id = %d\n", + vnic, bp->grp_info[rx_queue_id].fw_grp_id); + vnic->fw_grp_ids[rx_queue_id] = - bp->grp_info[rx_queue_id + 1].fw_grp_id; - return bnxt_vnic_rss_configure(bp, vnic); + bp->grp_info[rx_queue_id].fw_grp_id; + rc = bnxt_vnic_rss_configure(bp, vnic); } - return 0; + if (rc == 0) + rxq->rx_deferred_start = false; + + return rc; } int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct bnxt *bp = (struct bnxt *)dev->data->dev_private; struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; - struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id]; struct bnxt_vnic_info *vnic = NULL; + struct bnxt_rx_queue *rxq = NULL; + int rc = 0; + + /* Rx CQ 0 also works as Default CQ for async notifications */ + if (!rx_queue_id) { + PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id); + return -EINVAL; + } + + rxq = bp->rx_queues[rx_queue_id]; if (rxq == NULL) { PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id); @@ -454,7 +461,11 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { vnic = rxq->vnic; vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID; - return bnxt_vnic_rss_configure(bp, vnic); + rc = bnxt_vnic_rss_configure(bp, vnic); } - return 0; + + if (rc == 0) + bnxt_rx_queue_release_mbufs(rxq); + + return rc; } diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h index c7acaa75..e5d6001d 100644 --- a/drivers/net/bnxt/bnxt_rxq.h +++ b/drivers/net/bnxt/bnxt_rxq.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_RQX_H_ @@ -38,6 +10,9 @@ struct bnxt; struct bnxt_rx_ring_info; struct bnxt_cp_ring_info; struct bnxt_rx_queue { + rte_spinlock_t lock; /* Synchronize between rx_queue_stop + * and fast path + */ struct rte_mempool *mb_pool; /* mbuf pool for RX ring */ struct rte_mbuf *pkt_first_seg; /* 1st seg of pkt */ struct rte_mbuf *pkt_last_seg; /* Last seg of pkt */ @@ -60,6 +35,8 @@ struct bnxt_rx_queue { uint32_t rx_buf_use_size; /* useable size */ struct bnxt_rx_ring_info *rx_ring; struct bnxt_cp_ring_info *cp_ring; + rte_atomic64_t rx_mbuf_alloc_fail; + const struct rte_memzone *mz; }; void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq); @@ -80,4 +57,5 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); +void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq); #endif diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c index aae9a635..c7bc8848 100644 --- a/drivers/net/bnxt/bnxt_rxr.c +++ b/drivers/net/bnxt/bnxt_rxr.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -69,13 +41,14 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq, mbuf = __bnxt_alloc_rx_data(rxq->mb_pool); if (!mbuf) { - rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail); + rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); return -ENOMEM; } rx_buf->mbuf = mbuf; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; - rxbd->addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); return 0; } @@ -90,7 +63,7 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq, mbuf = __bnxt_alloc_rx_data(rxq->mb_pool); if (!mbuf) { - rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail); + rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); return -ENOMEM; } @@ -101,8 +74,9 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq, rx_buf->mbuf = mbuf; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; - rxbd->addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); return 0; } @@ -123,7 +97,7 @@ static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr, prod_bd = &rxr->rx_desc_ring[prod]; - prod_bd->addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); rxr->rx_prod = prod; } @@ -143,7 +117,7 @@ static void bnxt_reuse_ag_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons, prod_bd = &rxr->ag_desc_ring[prod]; cons_bd = &rxr->ag_desc_ring[cons]; - prod_bd->addr = cons_bd->addr; + prod_bd->address = cons_bd->addr; } #endif @@ -327,7 +301,7 @@ static inline struct rte_mbuf *bnxt_tpa_end( struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool); RTE_ASSERT(new_data != NULL); if (!new_data) { - rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail); + rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); return NULL; } tpa_info->mbuf = new_data; @@ -490,11 +464,15 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, if (likely(RX_CMP_IP_CS_OK(rxcmp1))) mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + else if (likely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; else mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; if (likely(RX_CMP_L4_CS_OK(rxcmp1))) mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + else if (likely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; else mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; @@ -560,9 +538,12 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t prod = rxr->rx_prod; uint16_t ag_prod = rxr->ag_prod; int rc = 0; + bool evt = false; - /* If Rx Q was stopped return */ - if (rxq->rx_deferred_start) + /* If Rx Q was stopped return. RxQ0 cannot be stopped. */ + if (unlikely(((rxq->rx_deferred_start || + !rte_spinlock_trylock(&rxq->lock)) && + rxq->queue_id))) return 0; /* Handle RX burst request */ @@ -584,25 +565,37 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_rx_pkts++; if (rc == -EBUSY) /* partial completion */ break; + } else { + evt = + bnxt_event_hwrm_resp_handler(rxq->bp, + (struct cmpl_base *)rxcmp); } + raw_cons = NEXT_RAW_CMP(raw_cons); - if (nb_rx_pkts == nb_pkts) + if (nb_rx_pkts == nb_pkts || evt) break; + /* Post some Rx buf early in case of larger burst processing */ + if (nb_rx_pkts == BNXT_RX_POST_THRESH) + B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); } cpr->cp_raw_cons = raw_cons; - if (prod == rxr->rx_prod && ag_prod == rxr->ag_prod) { + if (!nb_rx_pkts && !evt) { /* * For PMD, there is no need to keep on pushing to REARM * the doorbell if there are no new completions */ - return nb_rx_pkts; + goto done; } - B_CP_DIS_DB(cpr, cpr->cp_raw_cons); - B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); + if (prod != rxr->rx_prod) + B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); + /* Ring the AGG ring DB */ - B_RX_DB(rxr->ag_doorbell, rxr->ag_prod); + if (ag_prod != rxr->ag_prod) + B_RX_DB(rxr->ag_doorbell, rxr->ag_prod); + + B_CP_DIS_DB(cpr, cpr->cp_raw_cons); /* Attempt to alloc Rx buf in case of a previous allocation failure. */ if (rc == -ENOMEM) { @@ -627,16 +620,22 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, } } +done: + rte_spinlock_unlock(&rxq->lock); + return nb_rx_pkts; } void bnxt_free_rx_rings(struct bnxt *bp) { int i; + struct bnxt_rx_queue *rxq; - for (i = 0; i < (int)bp->rx_nr_rings; i++) { - struct bnxt_rx_queue *rxq = bp->rx_queues[i]; + if (!bp->rx_queues) + return; + for (i = 0; i < (int)bp->rx_nr_rings; i++) { + rxq = bp->rx_queues[i]; if (!rxq) continue; @@ -755,7 +754,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) if (rxq->rx_buf_use_size <= size) size = rxq->rx_buf_use_size; - type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT; + type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD; rxr = rxq->rx_ring; ring = rxr->rx_ring_struct; @@ -795,7 +794,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) rxr->tpa_info[i].mbuf = __bnxt_alloc_rx_data(rxq->mb_pool); if (!rxr->tpa_info[i].mbuf) { - rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail); + rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); return -ENOMEM; } } diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h index f3ed49bd..3815a219 100644 --- a/drivers/net/bnxt/bnxt_rxr.h +++ b/drivers/net/bnxt/bnxt_rxr.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_RXR_H_ @@ -52,22 +24,38 @@ #define BNXT_TPA_OUTER_L3_OFF(hdr_info) \ ((hdr_info) & 0x1ff) -#define RX_CMP_L4_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC) +#define RX_CMP_L4_CS_BITS \ + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC) -#define RX_CMP_L4_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR) +#define RX_CMP_L4_CS_ERR_BITS \ + rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR | \ + RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR) #define RX_CMP_L4_CS_OK(rxcmp1) \ (((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) && \ !((rxcmp1)->errors_v2 & RX_CMP_L4_CS_ERR_BITS)) -#define RX_CMP_IP_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR) +#define RX_CMP_L4_CS_UNKNOWN(rxcmp1) \ + !((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) -#define RX_CMP_IP_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC) +#define RX_CMP_IP_CS_ERR_BITS \ + rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR | \ + RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR) + +#define RX_CMP_IP_CS_BITS \ + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) #define RX_CMP_IP_CS_OK(rxcmp1) \ (((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) && \ !((rxcmp1)->errors_v2 & RX_CMP_IP_CS_ERR_BITS)) +#define RX_CMP_IP_CS_UNKNOWN(rxcmp1) \ + !((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) + +#define BNXT_RX_POST_THRESH 32 + enum pkt_hash_types { PKT_HASH_TYPE_NONE, /* Undefined type */ PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */ diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c index bd93cc83..a5d3c866 100644 --- a/drivers/net/bnxt/bnxt_stats.c +++ b/drivers/net/bnxt/bnxt_stats.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -172,8 +144,8 @@ static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = { tx_mcast_pkts)}, {"tx_bcast_pkts", offsetof(struct hwrm_func_qstats_output, tx_bcast_pkts)}, - {"tx_err_pkts", offsetof(struct hwrm_func_qstats_output, - tx_err_pkts)}, + {"tx_discard_pkts", offsetof(struct hwrm_func_qstats_output, + tx_discard_pkts)}, {"tx_drop_pkts", offsetof(struct hwrm_func_qstats_output, tx_drop_pkts)}, {"tx_ucast_bytes", offsetof(struct hwrm_func_qstats_output, @@ -188,8 +160,8 @@ static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = { rx_mcast_pkts)}, {"rx_bcast_pkts", offsetof(struct hwrm_func_qstats_output, rx_bcast_pkts)}, - {"rx_err_pkts", offsetof(struct hwrm_func_qstats_output, - rx_err_pkts)}, + {"rx_discard_pkts", offsetof(struct hwrm_func_qstats_output, + rx_discard_pkts)}, {"rx_drop_pkts", offsetof(struct hwrm_func_qstats_output, rx_drop_pkts)}, {"rx_ucast_bytes", offsetof(struct hwrm_func_qstats_output, @@ -238,7 +210,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev, memset(bnxt_stats, 0, sizeof(*bnxt_stats)); if (!(bp->flags & BNXT_FLAG_INIT_DONE)) { PMD_DRV_LOG(ERR, "Device Initialization not complete!\n"); - return 0; + return -1; } for (i = 0; i < bp->rx_cp_nr_rings; i++) { @@ -249,6 +221,8 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev, bnxt_stats, 1); if (unlikely(rc)) return rc; + bnxt_stats->rx_nombuf += + rte_atomic64_read(&rxq->rx_mbuf_alloc_fail); } for (i = 0; i < bp->tx_cp_nr_rings; i++) { @@ -263,13 +237,13 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev, rc = bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats); if (unlikely(rc)) return rc; - bnxt_stats->rx_nombuf = rte_atomic64_read(&bp->rx_mbuf_alloc_fail); return rc; } void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + unsigned int i; if (!(bp->flags & BNXT_FLAG_INIT_DONE)) { PMD_DRV_LOG(ERR, "Device Initialization not complete!\n"); @@ -277,7 +251,11 @@ void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev) } bnxt_clear_all_hwrm_stat_ctxs(bp); - rte_atomic64_clear(&bp->rx_mbuf_alloc_fail); + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + struct bnxt_rx_queue *rxq = bp->rx_queues[i]; + + rte_atomic64_clear(&rxq->rx_mbuf_alloc_fail); + } } int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, @@ -288,11 +266,6 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, unsigned int count, i; uint64_t tx_drop_pkts; - if (!(bp->flags & BNXT_FLAG_PORT_STATS)) { - PMD_DRV_LOG(ERR, "xstats not supported for VF\n"); - return 0; - } - bnxt_hwrm_port_qstats(bp); bnxt_hwrm_func_qstats_tx_drop(bp, 0xffff, &tx_drop_pkts); @@ -305,6 +278,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, count = 0; for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) { uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats; + xstats[count].id = count; xstats[count].value = rte_le_to_cpu_64( *(uint64_t *)((char *)rx_stats + bnxt_rx_stats_strings[i].offset)); @@ -313,6 +287,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) { uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats; + xstats[count].id = count; xstats[count].value = rte_le_to_cpu_64( *(uint64_t *)((char *)tx_stats + bnxt_tx_stats_strings[i].offset)); @@ -320,6 +295,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, } /* The Tx drop pkts aka the Anti spoof coounter */ + xstats[count].id = count; xstats[count].value = rte_le_to_cpu_64(tx_drop_pkts); count++; diff --git a/drivers/net/bnxt/bnxt_stats.h b/drivers/net/bnxt/bnxt_stats.h index c1c83d57..b0f135a5 100644 --- a/drivers/net/bnxt/bnxt_stats.h +++ b/drivers/net/bnxt/bnxt_stats.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2015 Broadcom Corporation. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_STATS_H_ diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c index 53524346..b9b975e4 100644 --- a/drivers/net/bnxt/bnxt_txq.c +++ b/drivers/net/bnxt/bnxt_txq.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -47,10 +19,8 @@ void bnxt_free_txq_stats(struct bnxt_tx_queue *txq) { - struct bnxt_cp_ring_info *cpr = txq->cp_ring; - - if (cpr->hw_stats) - cpr->hw_stats = NULL; + if (txq && txq->cp_ring && txq->cp_ring->hw_stats) + txq->cp_ring->hw_stats = NULL; } static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq) @@ -58,6 +28,9 @@ static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq) struct bnxt_sw_tx_bd *sw_ring; uint16_t i; + if (!txq) + return; + sw_ring = txq->tx_ring->tx_buf_ring; if (sw_ring) { for (i = 0; i < txq->tx_ring->tx_ring_struct->ring_size; i++) { @@ -93,6 +66,8 @@ void bnxt_tx_queue_release_op(void *tx_queue) bnxt_free_ring(txq->cp_ring->cp_ring_struct); bnxt_free_txq_stats(txq); + rte_memzone_free(txq->mz); + txq->mz = NULL; rte_free(txq); } @@ -112,7 +87,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, PMD_DRV_LOG(ERR, "Cannot create Tx ring %d. Only %d rings available\n", queue_idx, bp->max_tx_rings); - return -ENOSPC; + return -EINVAL; } if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) { @@ -147,7 +122,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, txq->port_id = eth_dev->data->port_id; /* Allocate TX ring hardware descriptors */ - if (bnxt_alloc_rings(bp, queue_idx, txq->tx_ring, NULL, txq->cp_ring, + if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring, "txr")) { PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!"); bnxt_tx_queue_release_op(txq); diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h index e27c34fa..f2c712a7 100644 --- a/drivers/net/bnxt/bnxt_txq.h +++ b/drivers/net/bnxt/bnxt_txq.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_TXQ_H_ @@ -50,9 +22,9 @@ struct bnxt_tx_queue { uint8_t pthresh; /* Prefetch threshold register */ uint8_t hthresh; /* Host threshold register */ uint8_t wthresh; /* Write-back threshold reg */ - uint32_t txq_flags; /* Holds flags for this TXq */ uint32_t ctx_curr; /* Hardware context states */ uint8_t tx_deferred_start; /* not in global dev start */ + uint8_t cmpl_next; /* Next BD to trigger a compl */ struct bnxt *bp; int index; @@ -61,6 +33,7 @@ struct bnxt_tx_queue { unsigned int cp_nr_rings; struct bnxt_cp_ring_info *cp_ring; + const struct rte_memzone *mz; }; void bnxt_free_txq_stats(struct bnxt_tx_queue *txq); diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c index 2c81a37c..67bb35e0 100644 --- a/drivers/net/bnxt/bnxt_txr.c +++ b/drivers/net/bnxt/bnxt_txr.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -142,7 +114,9 @@ static inline uint32_t bnxt_tx_avail(struct bnxt_tx_ring_info *txr) } static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, - struct bnxt_tx_queue *txq) + struct bnxt_tx_queue *txq, + uint16_t *coal_pkts, + uint16_t *cmpl_next) { struct bnxt_tx_ring_info *txr = txq->tx_ring; struct tx_bd_long *txbd; @@ -161,7 +135,9 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM | - PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM)) + PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM | + PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN | + PKT_TX_TUNNEL_GENEVE)) long_bd = true; tx_buf = &txr->tx_buf_ring[txr->tx_prod]; @@ -174,14 +150,21 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, return -ENOMEM; txbd = &txr->tx_desc_ring[txr->tx_prod]; - txbd->opaque = txr->tx_prod; + txbd->opaque = *coal_pkts; txbd->flags_type = tx_buf->nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT; + txbd->flags_type |= TX_BD_SHORT_FLAGS_COAL_NOW; + if (!*cmpl_next) { + txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL; + } else { + *coal_pkts = 0; + *cmpl_next = false; + } txbd->len = tx_pkt->data_len; - if (txbd->len >= 2014) + if (tx_pkt->pkt_len >= 2014) txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K; else - txbd->flags_type |= lhint_arr[txbd->len >> 9]; - txbd->addr = rte_cpu_to_le_32(rte_mbuf_data_iova(tx_buf->mbuf)); + txbd->flags_type |= lhint_arr[tx_pkt->pkt_len >> 9]; + txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_buf->mbuf)); if (long_bd) { txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG; @@ -222,16 +205,46 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, /* Outer IP, Inner IP, Inner TCP/UDP CSO */ txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM; txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_CKSUM) == + PKT_TX_OIP_IIP_TCP_CKSUM) { + /* Outer IP, Inner IP, Inner TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_UDP_CKSUM) == + PKT_TX_OIP_IIP_UDP_CKSUM) { + /* Outer IP, Inner IP, Inner TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM; + txbd1->mss = 0; } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) == PKT_TX_IIP_TCP_UDP_CKSUM) { /* (Inner) IP, (Inner) TCP/UDP CSO */ txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM; txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_IIP_UDP_CKSUM) == + PKT_TX_IIP_UDP_CKSUM) { + /* (Inner) IP, (Inner) TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_CKSUM) == + PKT_TX_IIP_TCP_CKSUM) { + /* (Inner) IP, (Inner) TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM; + txbd1->mss = 0; } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) == PKT_TX_OIP_TCP_UDP_CKSUM) { /* Outer IP, (Inner) TCP/UDP CSO */ txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM; txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_OIP_UDP_CKSUM) == + PKT_TX_OIP_UDP_CKSUM) { + /* Outer IP, (Inner) TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_CKSUM) == + PKT_TX_OIP_TCP_CKSUM) { + /* Outer IP, (Inner) TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM; + txbd1->mss = 0; } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) == PKT_TX_OIP_IIP_CKSUM) { /* Outer IP, Inner IP CSO */ @@ -242,11 +255,23 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, /* TCP/UDP CSO */ txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM; txbd1->mss = 0; - } else if (tx_pkt->ol_flags & PKT_TX_IP_CKSUM) { + } else if ((tx_pkt->ol_flags & PKT_TX_TCP_CKSUM) == + PKT_TX_TCP_CKSUM) { + /* TCP/UDP CSO */ + txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_UDP_CKSUM) == + PKT_TX_UDP_CKSUM) { + /* TCP/UDP CSO */ + txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_IP_CKSUM) == + PKT_TX_IP_CKSUM) { /* IP CSO */ txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM; txbd1->mss = 0; - } else if (tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) { + } else if ((tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) == + PKT_TX_OUTER_IP_CKSUM) { /* IP CSO */ txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM; txbd1->mss = 0; @@ -262,14 +287,15 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, tx_buf = &txr->tx_buf_ring[txr->tx_prod]; txbd = &txr->tx_desc_ring[txr->tx_prod]; - txbd->addr = rte_cpu_to_le_32(rte_mbuf_data_iova(m_seg)); - txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT; + txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg)); + txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT; txbd->len = m_seg->data_len; m_seg = m_seg->next; } txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END; + txbd1->lflags = rte_cpu_to_le_32(txbd1->lflags); txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod); @@ -306,35 +332,44 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq) struct bnxt_cp_ring_info *cpr = txq->cp_ring; uint32_t raw_cons = cpr->cp_raw_cons; uint32_t cons; - int nb_tx_pkts = 0; + uint32_t nb_tx_pkts = 0; struct tx_cmpl *txcmp; + struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring; + struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct; + uint32_t ring_mask = cp_ring_struct->ring_mask; + uint32_t opaque = 0; - if ((txq->tx_ring->tx_ring_struct->ring_size - - (bnxt_tx_avail(txq->tx_ring))) > - txq->tx_free_thresh) { - while (1) { - cons = RING_CMP(cpr->cp_ring_struct, raw_cons); - txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons]; - - if (!CMP_VALID(txcmp, raw_cons, cpr->cp_ring_struct)) - break; - cpr->valid = FLIP_VALID(cons, - cpr->cp_ring_struct->ring_mask, - cpr->valid); - - if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2) - nb_tx_pkts++; - else - RTE_LOG_DP(DEBUG, PMD, - "Unhandled CMP type %02x\n", - CMP_TYPE(txcmp)); - raw_cons = NEXT_RAW_CMP(raw_cons); - } - if (nb_tx_pkts) - bnxt_tx_cmp(txq, nb_tx_pkts); + if (((txq->tx_ring->tx_prod - txq->tx_ring->tx_cons) & + txq->tx_ring->tx_ring_struct->ring_mask) < txq->tx_free_thresh) + return 0; + + do { + cons = RING_CMPL(ring_mask, raw_cons); + txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons]; + rte_prefetch_non_temporal(&cp_desc_ring[(cons + 2) & + ring_mask]); + + if (!CMPL_VALID(txcmp, cpr->valid)) + break; + opaque = rte_cpu_to_le_32(txcmp->opaque); + NEXT_CMPL(cpr, cons, cpr->valid, 1); + rte_prefetch0(&cp_desc_ring[cons]); + + if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2) + nb_tx_pkts += opaque; + else + RTE_LOG_DP(ERR, PMD, + "Unhandled CMP type %02x\n", + CMP_TYPE(txcmp)); + raw_cons = cons; + } while (nb_tx_pkts < ring_mask); + + if (nb_tx_pkts) { + bnxt_tx_cmp(txq, nb_tx_pkts); cpr->cp_raw_cons = raw_cons; - B_CP_DIS_DB(cpr, cpr->cp_raw_cons); + B_CP_DB(cpr, cpr->cp_raw_cons, ring_mask); } + return nb_tx_pkts; } @@ -343,8 +378,8 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, { struct bnxt_tx_queue *txq = tx_queue; uint16_t nb_tx_pkts = 0; - uint16_t db_mask = txq->tx_ring->tx_ring_struct->ring_size >> 2; - uint16_t last_db_mask = 0; + uint16_t coal_pkts = 0; + uint16_t cmpl_next = txq->cmpl_next; /* Handle TX completions */ bnxt_handle_tx_cp(txq); @@ -354,16 +389,25 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n"); return 0; } + + txq->cmpl_next = 0; /* Handle TX burst request */ for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) { - if (bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq)) { + int rc; + + /* Request a completion on first and last packet */ + cmpl_next |= (nb_pkts == nb_tx_pkts + 1); + coal_pkts++; + rc = bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq, + &coal_pkts, &cmpl_next); + + if (unlikely(rc)) { + /* Request a completion in next cycle */ + txq->cmpl_next = 1; break; - } else if ((nb_tx_pkts & db_mask) != last_db_mask) { - B_TX_DB(txq->tx_ring->tx_doorbell, - txq->tx_ring->tx_prod); - last_db_mask = nb_tx_pkts & db_mask; } } + if (nb_tx_pkts) B_TX_DB(txq->tx_ring->tx_doorbell, txq->tx_ring->tx_prod); diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h index d88b15ab..7f3c7cdb 100644 --- a/drivers/net/bnxt/bnxt_txr.h +++ b/drivers/net/bnxt/bnxt_txr.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_TXR_H_ @@ -73,10 +45,20 @@ int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); #define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_OIP_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \ + PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_OIP_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \ + PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM) #define PKT_TX_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \ PKT_TX_IP_CKSUM) +#define PKT_TX_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_IP_CKSUM) +#define PKT_TX_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM) #define PKT_TX_OIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \ PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_OIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \ + PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_OIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \ + PKT_TX_OUTER_IP_CKSUM) #define PKT_TX_OIP_IIP_CKSUM (PKT_TX_IP_CKSUM | \ PKT_TX_OUTER_IP_CKSUM) #define PKT_TX_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM) diff --git a/drivers/net/bnxt/bnxt_util.c b/drivers/net/bnxt/bnxt_util.c new file mode 100644 index 00000000..7d334271 --- /dev/null +++ b/drivers/net/bnxt/bnxt_util.c @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#include + +#include "bnxt_util.h" + +int bnxt_check_zero_bytes(const uint8_t *bytes, int len) +{ + int i; + + for (i = 0; i < len; i++) + if (bytes[i] != 0x00) + return 0; + return 1; +} diff --git a/drivers/net/bnxt/bnxt_util.h b/drivers/net/bnxt/bnxt_util.h new file mode 100644 index 00000000..2378833c --- /dev/null +++ b/drivers/net/bnxt/bnxt_util.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_UTIL_H_ +#define _BNXT_UTIL_H_ + +int bnxt_check_zero_bytes(const uint8_t *bytes, int len); + +#endif /* _BNXT_UTIL_H_ */ diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c index d4aeb4ca..c0577cd7 100644 --- a/drivers/net/bnxt/bnxt_vnic.c +++ b/drivers/net/bnxt/bnxt_vnic.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2015 Broadcom Corporation. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -67,7 +39,7 @@ void bnxt_init_vnics(struct bnxt *bp) { struct bnxt_vnic_info *vnic; uint16_t max_vnics; - int i, j; + int i; max_vnics = bp->max_vnics; STAILQ_INIT(&bp->free_vnic_list); @@ -77,9 +49,8 @@ void bnxt_init_vnics(struct bnxt *bp) vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE; vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE; vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE; - - for (j = 0; j < MAX_QUEUES_PER_VNIC; j++) - vnic->fw_grp_ids[j] = (uint16_t)HWRM_NA_SIGNATURE; + vnic->hash_mode = + HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT; prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE); STAILQ_INIT(&vnic->filter); @@ -185,10 +156,10 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp) mz = rte_memzone_lookup(mz_name); if (!mz) { mz = rte_memzone_reserve(mz_name, - entry_length * max_vnics, - SOCKET_ID_ANY, - RTE_MEMZONE_2MB | - RTE_MEMZONE_SIZE_HINT_ONLY); + entry_length * max_vnics, SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG); if (!mz) return -ENOMEM; } diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h index d8d35c7d..9029f78c 100644 --- a/drivers/net/bnxt/bnxt_vnic.h +++ b/drivers/net/bnxt/bnxt_vnic.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2015 Broadcom Corporation. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_VNIC_H_ @@ -43,16 +15,13 @@ struct bnxt_vnic_info { uint16_t fw_vnic_id; /* returned by Chimp during alloc */ uint16_t rss_rule; -#define MAX_NUM_TRAFFIC_CLASSES 8 -#define MAX_NUM_RSS_QUEUES_PER_VNIC 16 -#define MAX_QUEUES_PER_VNIC (MAX_NUM_RSS_QUEUES_PER_VNIC + \ - MAX_NUM_TRAFFIC_CLASSES) uint16_t start_grp_id; uint16_t end_grp_id; - uint16_t fw_grp_ids[MAX_QUEUES_PER_VNIC]; + uint16_t *fw_grp_ids; uint16_t dflt_ring_grp; uint16_t mru; uint16_t hash_type; + uint8_t hash_mode; rte_iova_t rss_table_dma_addr; uint16_t *rss_table; rte_iova_t rss_hash_key_dma_addr; diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h index 1e9c39f4..f5c7b422 100644 --- a/drivers/net/bnxt/hsi_struct_def_dpdk.h +++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h @@ -1,12516 +1,28211 @@ -/*- - * BSD LICENSE +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2014-2018 Broadcom Limited + * All rights reserved. * - * Copyright(c) 2001-2017 Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * DO NOT MODIFY!!! This file is automatically generated. */ -#ifndef _HSI_STRUCT_DEF_DPDK_ -#define _HSI_STRUCT_DEF_DPDK_ -/* HSI and HWRM Specification 1.8.2 */ -#define HWRM_VERSION_MAJOR 1 -#define HWRM_VERSION_MINOR 8 -#define HWRM_VERSION_UPDATE 2 +#ifndef _HSI_STRUCT_DEF_DPDK_H_ +#define _HSI_STRUCT_DEF_DPDK_H_ -#define HWRM_VERSION_RSVD 0 /* non-zero means beta version */ +/* This is the HWRM command header. */ +/* hwrm_cmd_hdr (size:128b/16B) */ +struct hwrm_cmd_hdr { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __attribute__((packed)); -#define HWRM_VERSION_STR "1.8.2.0" -/* - * Following is the signature for HWRM message field that indicates not - * applicable (All F's). Need to cast it the size of the field if needed. - */ -#define HWRM_NA_SIGNATURE ((uint32_t)(-1)) -#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */ -#define HWRM_MAX_RESP_LEN (280) /* hwrm_selftest_qlist */ -#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */ -#define HW_HASH_KEY_SIZE 40 -#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */ -#define HWRM_ROCE_SP_HSI_VERSION_MAJOR 1 -#define HWRM_ROCE_SP_HSI_VERSION_MINOR 8 -#define HWRM_ROCE_SP_HSI_VERSION_UPDATE 2 +/* This is the HWRM response header. */ +/* hwrm_resp_hdr (size:64b/8B) */ +struct hwrm_resp_hdr { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; +} __attribute__((packed)); /* - * Request types + * TLV encapsulated message. Use the TLV type field of the + * TLV to determine the type of message encapsulated. */ -#define HWRM_VER_GET (UINT32_C(0x0)) -#define HWRM_FUNC_BUF_UNRGTR (UINT32_C(0xe)) -#define HWRM_FUNC_VF_CFG (UINT32_C(0xf)) - /* Reserved for future use */ -#define RESERVED1 (UINT32_C(0x10)) -#define HWRM_FUNC_RESET (UINT32_C(0x11)) -#define HWRM_FUNC_GETFID (UINT32_C(0x12)) -#define HWRM_FUNC_VF_ALLOC (UINT32_C(0x13)) -#define HWRM_FUNC_VF_FREE (UINT32_C(0x14)) -#define HWRM_FUNC_QCAPS (UINT32_C(0x15)) -#define HWRM_FUNC_QCFG (UINT32_C(0x16)) -#define HWRM_FUNC_CFG (UINT32_C(0x17)) -#define HWRM_FUNC_QSTATS (UINT32_C(0x18)) -#define HWRM_FUNC_CLR_STATS (UINT32_C(0x19)) -#define HWRM_FUNC_DRV_UNRGTR (UINT32_C(0x1a)) -#define HWRM_FUNC_VF_RESC_FREE (UINT32_C(0x1b)) -#define HWRM_FUNC_VF_VNIC_IDS_QUERY (UINT32_C(0x1c)) -#define HWRM_FUNC_DRV_RGTR (UINT32_C(0x1d)) -#define HWRM_FUNC_DRV_QVER (UINT32_C(0x1e)) -#define HWRM_FUNC_BUF_RGTR (UINT32_C(0x1f)) -#define HWRM_PORT_PHY_CFG (UINT32_C(0x20)) -#define HWRM_PORT_MAC_CFG (UINT32_C(0x21)) -#define HWRM_PORT_QSTATS (UINT32_C(0x23)) -#define HWRM_PORT_LPBK_QSTATS (UINT32_C(0x24)) -#define HWRM_PORT_CLR_STATS (UINT32_C(0x25)) -#define HWRM_PORT_PHY_QCFG (UINT32_C(0x27)) -#define HWRM_PORT_MAC_QCFG (UINT32_C(0x28)) -#define HWRM_PORT_MAC_PTP_QCFG (UINT32_C(0x29)) -#define HWRM_PORT_PHY_QCAPS (UINT32_C(0x2a)) -#define HWRM_PORT_LED_CFG (UINT32_C(0x2d)) -#define HWRM_PORT_LED_QCFG (UINT32_C(0x2e)) -#define HWRM_PORT_LED_QCAPS (UINT32_C(0x2f)) -#define HWRM_QUEUE_QPORTCFG (UINT32_C(0x30)) -#define HWRM_QUEUE_QCFG (UINT32_C(0x31)) -#define HWRM_QUEUE_CFG (UINT32_C(0x32)) -#define HWRM_FUNC_VLAN_CFG (UINT32_C(0x33)) -#define HWRM_FUNC_VLAN_QCFG (UINT32_C(0x34)) -#define HWRM_QUEUE_PFCENABLE_QCFG (UINT32_C(0x35)) -#define HWRM_QUEUE_PFCENABLE_CFG (UINT32_C(0x36)) -#define HWRM_QUEUE_PRI2COS_QCFG (UINT32_C(0x37)) -#define HWRM_QUEUE_PRI2COS_CFG (UINT32_C(0x38)) -#define HWRM_QUEUE_COS2BW_QCFG (UINT32_C(0x39)) -#define HWRM_QUEUE_COS2BW_CFG (UINT32_C(0x3a)) -#define HWRM_VNIC_ALLOC (UINT32_C(0x40)) -#define HWRM_VNIC_ALLOC (UINT32_C(0x40)) -#define HWRM_VNIC_FREE (UINT32_C(0x41)) -#define HWRM_VNIC_CFG (UINT32_C(0x42)) -#define HWRM_VNIC_QCFG (UINT32_C(0x43)) -#define HWRM_VNIC_TPA_CFG (UINT32_C(0x44)) -#define HWRM_VNIC_RSS_CFG (UINT32_C(0x46)) -#define HWRM_VNIC_RSS_QCFG (UINT32_C(0x47)) -#define HWRM_VNIC_PLCMODES_CFG (UINT32_C(0x48)) -#define HWRM_VNIC_PLCMODES_QCFG (UINT32_C(0x49)) -#define HWRM_VNIC_QCAPS (UINT32_C(0x4a)) -#define HWRM_RING_ALLOC (UINT32_C(0x50)) -#define HWRM_RING_FREE (UINT32_C(0x51)) -#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (UINT32_C(0x52)) -#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (UINT32_C(0x53)) -#define HWRM_RING_RESET (UINT32_C(0x5e)) -#define HWRM_RING_GRP_ALLOC (UINT32_C(0x60)) -#define HWRM_RING_GRP_FREE (UINT32_C(0x61)) -#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (UINT32_C(0x70)) -#define HWRM_VNIC_RSS_COS_LB_CTX_FREE (UINT32_C(0x71)) -#define HWRM_CFA_L2_FILTER_ALLOC (UINT32_C(0x90)) -#define HWRM_CFA_L2_FILTER_FREE (UINT32_C(0x91)) -#define HWRM_CFA_L2_FILTER_CFG (UINT32_C(0x92)) -#define HWRM_CFA_L2_SET_RX_MASK (UINT32_C(0x93)) - /* Reserved for future use */ -#define HWRM_CFA_VLAN_ANTISPOOF_CFG (UINT32_C(0x94)) -#define HWRM_CFA_TUNNEL_FILTER_ALLOC (UINT32_C(0x95)) -#define HWRM_CFA_TUNNEL_FILTER_FREE (UINT32_C(0x96)) -#define HWRM_CFA_NTUPLE_FILTER_ALLOC (UINT32_C(0x99)) -#define HWRM_CFA_NTUPLE_FILTER_FREE (UINT32_C(0x9a)) -#define HWRM_CFA_NTUPLE_FILTER_CFG (UINT32_C(0x9b)) -#define HWRM_CFA_EM_FLOW_ALLOC (UINT32_C(0x9c)) -#define HWRM_CFA_EM_FLOW_FREE (UINT32_C(0x9d)) -#define HWRM_CFA_EM_FLOW_CFG (UINT32_C(0x9e)) -#define HWRM_TUNNEL_DST_PORT_QUERY (UINT32_C(0xa0)) -#define HWRM_TUNNEL_DST_PORT_ALLOC (UINT32_C(0xa1)) -#define HWRM_TUNNEL_DST_PORT_FREE (UINT32_C(0xa2)) -#define HWRM_STAT_CTX_ALLOC (UINT32_C(0xb0)) -#define HWRM_STAT_CTX_FREE (UINT32_C(0xb1)) -#define HWRM_STAT_CTX_QUERY (UINT32_C(0xb2)) -#define HWRM_STAT_CTX_CLR_STATS (UINT32_C(0xb3)) -#define HWRM_FW_RESET (UINT32_C(0xc0)) -#define HWRM_FW_QSTATUS (UINT32_C(0xc1)) -#define HWRM_EXEC_FWD_RESP (UINT32_C(0xd0)) -#define HWRM_REJECT_FWD_RESP (UINT32_C(0xd1)) -#define HWRM_FWD_RESP (UINT32_C(0xd2)) -#define HWRM_FWD_ASYNC_EVENT_CMPL (UINT32_C(0xd3)) -#define HWRM_TEMP_MONITOR_QUERY (UINT32_C(0xe0)) -#define HWRM_WOL_FILTER_ALLOC (UINT32_C(0xf0)) -#define HWRM_WOL_FILTER_FREE (UINT32_C(0xf1)) -#define HWRM_WOL_FILTER_QCFG (UINT32_C(0xf2)) -#define HWRM_WOL_REASON_QCFG (UINT32_C(0xf3)) -#define HWRM_DBG_DUMP (UINT32_C(0xff14)) -#define HWRM_NVM_VALIDATE_OPTION (UINT32_C(0xffef)) -#define HWRM_NVM_FLUSH (UINT32_C(0xfff0)) -#define HWRM_NVM_GET_VARIABLE (UINT32_C(0xfff1)) -#define HWRM_NVM_SET_VARIABLE (UINT32_C(0xfff2)) -#define HWRM_NVM_INSTALL_UPDATE (UINT32_C(0xfff3)) -#define HWRM_NVM_MODIFY (UINT32_C(0xfff4)) -#define HWRM_NVM_VERIFY_UPDATE (UINT32_C(0xfff5)) -#define HWRM_NVM_GET_DEV_INFO (UINT32_C(0xfff6)) -#define HWRM_NVM_ERASE_DIR_ENTRY (UINT32_C(0xfff7)) -#define HWRM_NVM_MOD_DIR_ENTRY (UINT32_C(0xfff8)) -#define HWRM_NVM_FIND_DIR_ENTRY (UINT32_C(0xfff9)) -#define HWRM_NVM_GET_DIR_ENTRIES (UINT32_C(0xfffa)) -#define HWRM_NVM_GET_DIR_INFO (UINT32_C(0xfffb)) -#define HWRM_NVM_RAW_DUMP (UINT32_C(0xfffc)) -#define HWRM_NVM_READ (UINT32_C(0xfffd)) -#define HWRM_NVM_WRITE (UINT32_C(0xfffe)) -#define HWRM_NVM_RAW_WRITE_BLK (UINT32_C(0xffff)) +#define CMD_DISCR_TLV_ENCAP UINT32_C(0x8000) +#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP -/* - * Note: The Host Software Interface (HSI) and Hardware Resource Manager (HWRM) - * specification describes the data structures used in Ethernet packet or RDMA - * message data transfers as well as an abstract interface for managing Ethernet - * NIC hardware resources. - */ -/* Ethernet Data path Host Structures */ -/* - * Description: The following three sections document the host structures used - * between device and software drivers for communicating Ethernet packets. - */ -/* BD Ring Structures */ -/* - * Description: This structure is used to inform the NIC of a location for and - * an aggregation buffer that will be used for packet data that is received. An - * aggregation buffer creates a different kind of completion operation for a - * packet where a variable number of BDs may be used to place the packet in the - * host. RX Rings that have aggregation buffers are known as aggregation rings - * and must contain only aggregation buffers. - */ -/* Short TX BD (16 bytes) */ -struct tx_bd_short { - uint16_t flags_type; + +/* HWRM request message */ +#define TLV_TYPE_HWRM_REQUEST UINT32_C(0x1) +/* HWRM response message */ +#define TLV_TYPE_HWRM_RESPONSE UINT32_C(0x2) +/* RoCE slow path command */ +#define TLV_TYPE_ROCE_SP_COMMAND UINT32_C(0x3) +/* Engine CKV - The device's serial number. */ +#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER UINT32_C(0x8001) +/* Engine CKV - Per-function random nonce data. */ +#define TLV_TYPE_ENGINE_CKV_NONCE UINT32_C(0x8002) +/* Engine CKV - Initialization vector. */ +#define TLV_TYPE_ENGINE_CKV_IV UINT32_C(0x8003) +/* Engine CKV - Authentication tag. */ +#define TLV_TYPE_ENGINE_CKV_AUTH_TAG UINT32_C(0x8004) +/* Engine CKV - The encrypted data. */ +#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT UINT32_C(0x8005) +/* Engine CKV - Supported algorithms. */ +#define TLV_TYPE_ENGINE_CKV_ALGORITHMS UINT32_C(0x8006) +/* Engine CKV - The EC curve name and ECC public key information. */ +#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY UINT32_C(0x8007) +/* Engine CKV - The ECDSA signature. */ +#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE UINT32_C(0x8008) +#define TLV_TYPE_LAST \ + TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE + + +/* tlv (size:64b/8B) */ +struct tlv { + /* + * The command discriminator is used to differentiate between various + * types of HWRM messages. This includes legacy HWRM and RoCE slowpath + * command messages as well as newer TLV encapsulated HWRM commands. + * + * For TLV encapsulated messages this field must be 0x8000. + */ + uint16_t cmd_discr; + uint8_t reserved_8b; + uint8_t flags; + /* + * Indicates the presence of additional TLV encapsulated data + * follows this TLV. + */ + #define TLV_FLAGS_MORE UINT32_C(0x1) + /* Last TLV in a sequence of TLVs. */ + #define TLV_FLAGS_MORE_LAST UINT32_C(0x0) + /* More TLVs follow this TLV. */ + #define TLV_FLAGS_MORE_NOT_LAST UINT32_C(0x1) + /* + * When an HWRM receiver detects a TLV type that it does not + * support with the TLV required flag set, the receiver must + * reject the HWRM message with an error code indicating an + * unsupported TLV type. + */ + #define TLV_FLAGS_REQUIRED UINT32_C(0x2) + /* No */ + #define TLV_FLAGS_REQUIRED_NO (UINT32_C(0x0) << 1) + /* Yes */ + #define TLV_FLAGS_REQUIRED_YES (UINT32_C(0x1) << 1) + #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES + /* + * This field defines the TLV type value which is divided into + * two ranges to differentiate between global and local TLV types. + * Global TLV types must be unique across all defined TLV types. + * Local TLV types are valid only for extensions to a given + * HWRM message and may be repeated across different HWRM message + * types. There is a direct correlation of each HWRM message type + * to a single global TLV type value. + * + * Global TLV range: `0 - (63k-1)` + * + * Local TLV range: `63k - (64k-1)` + */ + uint16_t tlv_type; + /* + * Length of the message data encapsulated by this TLV in bytes. + * This length does not include the size of the TLV header itself + * and it must be an integer multiple of 8B. + */ + uint16_t length; +} __attribute__((packed)); + +/* Input */ +/* input (size:128b/16B) */ +struct input { /* - * All bits in this field must be valid on the first BD of a - * packet. Only the packet_end bit must be valid for the - * remaining BDs of a packet. + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. */ - /* This value identifies the type of buffer descriptor. */ - #define TX_BD_SHORT_TYPE_MASK UINT32_C(0x3f) - #define TX_BD_SHORT_TYPE_SFT 0 + uint16_t req_type; /* - * Indicates that this BD is 16B long and is - * used for normal L2 packet transmission. + * This value indicates the what completion ring the request will + * be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. */ - #define TX_BD_SHORT_TYPE_TX_BD_SHORT UINT32_C(0x0) + uint16_t cmpl_ring; + /* This value indicates the command sequence number. */ + uint16_t seq_id; /* - * If set to 1, the packet ends with the data in the buffer - * pointed to by this descriptor. This flag must be valid on - * every BD. + * Target ID of this command. + * + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - HWRM */ - #define TX_BD_SHORT_FLAGS_PACKET_END UINT32_C(0x40) + uint16_t target_id; /* - * If set to 1, the device will not generate a completion for - * this transmit packet unless there is an error in it's - * processing. If this bit is set to 0, then the packet will be - * completed normally. This bit must be valid only on the first - * BD of a packet. + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. */ - #define TX_BD_SHORT_FLAGS_NO_CMPL UINT32_C(0x80) - /* - * This value indicates how many 16B BD locations are consumed - * in the ring by this packet. A value of 1 indicates that this - * BD is the only BD (and that the it is a short BD). A value of - * 3 indicates either 3 short BDs or 1 long BD and one short BD - * in the packet. A value of 0 indicates that there are 32 BD - * locations in the packet (the maximum). This field is valid - * only on the first BD of a packet. - */ - #define TX_BD_SHORT_FLAGS_BD_CNT_MASK UINT32_C(0x1f00) - #define TX_BD_SHORT_FLAGS_BD_CNT_SFT 8 - /* - * This value is a hint for the length of the entire packet. It - * is used by the chip to optimize internal processing. The - * packet will be dropped if the hint is too short. This field - * is valid only on the first BD of a packet. - */ - #define TX_BD_SHORT_FLAGS_LHINT_MASK UINT32_C(0x6000) - #define TX_BD_SHORT_FLAGS_LHINT_SFT 13 - /* indicates packet length < 512B */ - #define TX_BD_SHORT_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13) - /* indicates 512 <= packet length < 1KB */ - #define TX_BD_SHORT_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13) - /* indicates 1KB <= packet length < 2KB */ - #define TX_BD_SHORT_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13) - /* indicates packet length >= 2KB */ - #define TX_BD_SHORT_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13) - #define TX_BD_SHORT_FLAGS_LHINT_LAST \ - TX_BD_SHORT_FLAGS_LHINT_GTE2K + uint64_t resp_addr; +} __attribute__((packed)); + +/* Output */ +/* output (size:64b/8B) */ +struct output { /* - * If set to 1, the device immediately updates the Send Consumer - * Index after the buffer associated with this descriptor has - * been transferred via DMA to NIC memory from host memory. An - * interrupt may or may not be generated according to the state - * of the interrupt avoidance mechanisms. If this bit is set to - * 0, then the Consumer Index is only updated as soon as one of - * the host interrupt coalescing conditions has been met. This - * bit must be valid on the first BD of a packet. + * Pass/Fail or error type + * + * Note: receiver to verify the in parameters, and fail the call + * with an error when appropriate */ - #define TX_BD_SHORT_FLAGS_COAL_NOW UINT32_C(0x8000) + uint16_t error_code; + /* This field returns the type of original request. */ + uint16_t req_type; + /* This field provides original sequence number of the command. */ + uint16_t seq_id; /* - * All bits in this field must be valid on the first BD of a - * packet. Only the packet_end bit must be valid for the - * remaining BDs of a packet. + * This field is the length of the response in bytes. The + * last byte of the response is a valid flag that will read + * as '1' when the command has been completely written to + * memory. */ - #define TX_BD_SHORT_FLAGS_MASK UINT32_C(0xffc0) - #define TX_BD_SHORT_FLAGS_SFT 6 - uint16_t len; + uint16_t resp_len; +} __attribute__((packed)); + +/* Short Command Structure */ +/* hwrm_short_input (size:128b/16B) */ +struct hwrm_short_input { /* - * This is the length of the host physical buffer this BD - * describes in bytes. This field must be valid on all BDs of a - * packet. + * This field indicates the type of request in the request buffer. + * The format for the rest of the command (request) is determined + * by this field. */ - uint32_t opaque; + uint16_t req_type; /* - * The opaque data field is pass through to the completion and - * can be used for any data that the driver wants to associate - * with the transmit BD. This field must be valid on the first - * BD of a packet. + * This field indicates a signature that is used to identify short + * form of the command listed here. This field shall be set to + * 17185 (0x4321). */ - uint64_t addr; + uint16_t signature; + /* Signature indicating this is a short form of HWRM command */ + #define HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD UINT32_C(0x4321) + #define HWRM_SHORT_INPUT_SIGNATURE_LAST \ + HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD + /* Reserved for future use. */ + uint16_t unused_0; + /* This value indicates the length of the request. */ + uint16_t size; /* - * This is the host physical address for the portion of the - * packet described by this TX BD. This value must be valid on - * all BDs of a packet. + * This is the host address where the request was written. + * This area must be 16B aligned. */ + uint64_t req_addr; } __attribute__((packed)); -/* Long TX BD (32 bytes split to 2 16-byte struct) */ -struct tx_bd_long { - uint16_t flags_type; - /* - * All bits in this field must be valid on the first BD of a - * packet. Only the packet_end bit must be valid for the - * remaining BDs of a packet. +/* + * Command numbering + * # NOTE - definitions already in hwrm_req_type, in hwrm_types.yaml + * # So only structure definition is provided here. + */ +/* cmd_nums (size:64b/8B) */ +struct cmd_nums { + /* + * This version of the specification defines the commands listed in + * the table below. The following are general implementation + * requirements for these commands: + * + * # All commands listed below that are marked neither + * reserved nor experimental shall be implemented by the HWRM. + * # A HWRM client compliant to this specification should not use + * commands outside of the list below. + * # A HWRM client compliant to this specification should not use + * command numbers marked reserved below. + * # A command marked experimental below may not be implemented + * by the HWRM. + * # A command marked experimental may change in the + * future version of the HWRM specification. + * # A command not listed below may be implemented by the HWRM. + * The behavior of commands that are not listed below is outside + * the scope of this specification. */ - /* This value identifies the type of buffer descriptor. */ - #define TX_BD_LONG_TYPE_MASK UINT32_C(0x3f) - #define TX_BD_LONG_TYPE_SFT 0 + uint16_t req_type; + #define HWRM_VER_GET UINT32_C(0x0) + #define HWRM_FUNC_BUF_UNRGTR UINT32_C(0xe) + #define HWRM_FUNC_VF_CFG UINT32_C(0xf) + /* Reserved for future use. */ + #define HWRM_RESERVED1 UINT32_C(0x10) + #define HWRM_FUNC_RESET UINT32_C(0x11) + #define HWRM_FUNC_GETFID UINT32_C(0x12) + #define HWRM_FUNC_VF_ALLOC UINT32_C(0x13) + #define HWRM_FUNC_VF_FREE UINT32_C(0x14) + #define HWRM_FUNC_QCAPS UINT32_C(0x15) + #define HWRM_FUNC_QCFG UINT32_C(0x16) + #define HWRM_FUNC_CFG UINT32_C(0x17) + #define HWRM_FUNC_QSTATS UINT32_C(0x18) + #define HWRM_FUNC_CLR_STATS UINT32_C(0x19) + #define HWRM_FUNC_DRV_UNRGTR UINT32_C(0x1a) + #define HWRM_FUNC_VF_RESC_FREE UINT32_C(0x1b) + #define HWRM_FUNC_VF_VNIC_IDS_QUERY UINT32_C(0x1c) + #define HWRM_FUNC_DRV_RGTR UINT32_C(0x1d) + #define HWRM_FUNC_DRV_QVER UINT32_C(0x1e) + #define HWRM_FUNC_BUF_RGTR UINT32_C(0x1f) + #define HWRM_PORT_PHY_CFG UINT32_C(0x20) + #define HWRM_PORT_MAC_CFG UINT32_C(0x21) + /* Experimental */ + #define HWRM_PORT_TS_QUERY UINT32_C(0x22) + #define HWRM_PORT_QSTATS UINT32_C(0x23) + #define HWRM_PORT_LPBK_QSTATS UINT32_C(0x24) + /* Experimental */ + #define HWRM_PORT_CLR_STATS UINT32_C(0x25) + /* Experimental */ + #define HWRM_PORT_LPBK_CLR_STATS UINT32_C(0x26) + #define HWRM_PORT_PHY_QCFG UINT32_C(0x27) + #define HWRM_PORT_MAC_QCFG UINT32_C(0x28) + /* Experimental */ + #define HWRM_PORT_MAC_PTP_QCFG UINT32_C(0x29) + #define HWRM_PORT_PHY_QCAPS UINT32_C(0x2a) + #define HWRM_PORT_PHY_I2C_WRITE UINT32_C(0x2b) + #define HWRM_PORT_PHY_I2C_READ UINT32_C(0x2c) + #define HWRM_PORT_LED_CFG UINT32_C(0x2d) + #define HWRM_PORT_LED_QCFG UINT32_C(0x2e) + #define HWRM_PORT_LED_QCAPS UINT32_C(0x2f) + #define HWRM_QUEUE_QPORTCFG UINT32_C(0x30) + #define HWRM_QUEUE_QCFG UINT32_C(0x31) + #define HWRM_QUEUE_CFG UINT32_C(0x32) + #define HWRM_FUNC_VLAN_CFG UINT32_C(0x33) + #define HWRM_FUNC_VLAN_QCFG UINT32_C(0x34) + #define HWRM_QUEUE_PFCENABLE_QCFG UINT32_C(0x35) + #define HWRM_QUEUE_PFCENABLE_CFG UINT32_C(0x36) + #define HWRM_QUEUE_PRI2COS_QCFG UINT32_C(0x37) + #define HWRM_QUEUE_PRI2COS_CFG UINT32_C(0x38) + #define HWRM_QUEUE_COS2BW_QCFG UINT32_C(0x39) + #define HWRM_QUEUE_COS2BW_CFG UINT32_C(0x3a) + /* Experimental */ + #define HWRM_QUEUE_DSCP_QCAPS UINT32_C(0x3b) + /* Experimental */ + #define HWRM_QUEUE_DSCP2PRI_QCFG UINT32_C(0x3c) + /* Experimental */ + #define HWRM_QUEUE_DSCP2PRI_CFG UINT32_C(0x3d) + #define HWRM_VNIC_ALLOC UINT32_C(0x40) + #define HWRM_VNIC_FREE UINT32_C(0x41) + #define HWRM_VNIC_CFG UINT32_C(0x42) + #define HWRM_VNIC_QCFG UINT32_C(0x43) + #define HWRM_VNIC_TPA_CFG UINT32_C(0x44) + /* Experimental */ + #define HWRM_VNIC_TPA_QCFG UINT32_C(0x45) + #define HWRM_VNIC_RSS_CFG UINT32_C(0x46) + #define HWRM_VNIC_RSS_QCFG UINT32_C(0x47) + #define HWRM_VNIC_PLCMODES_CFG UINT32_C(0x48) + #define HWRM_VNIC_PLCMODES_QCFG UINT32_C(0x49) + #define HWRM_VNIC_QCAPS UINT32_C(0x4a) + #define HWRM_RING_ALLOC UINT32_C(0x50) + #define HWRM_RING_FREE UINT32_C(0x51) + #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS UINT32_C(0x52) + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS UINT32_C(0x53) + #define HWRM_RING_RESET UINT32_C(0x5e) + #define HWRM_RING_GRP_ALLOC UINT32_C(0x60) + #define HWRM_RING_GRP_FREE UINT32_C(0x61) + /* Reserved for future use. */ + #define HWRM_RESERVED5 UINT32_C(0x64) + /* Reserved for future use. */ + #define HWRM_RESERVED6 UINT32_C(0x65) + #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC UINT32_C(0x70) + #define HWRM_VNIC_RSS_COS_LB_CTX_FREE UINT32_C(0x71) + #define HWRM_CFA_L2_FILTER_ALLOC UINT32_C(0x90) + #define HWRM_CFA_L2_FILTER_FREE UINT32_C(0x91) + #define HWRM_CFA_L2_FILTER_CFG UINT32_C(0x92) + #define HWRM_CFA_L2_SET_RX_MASK UINT32_C(0x93) + #define HWRM_CFA_VLAN_ANTISPOOF_CFG UINT32_C(0x94) + #define HWRM_CFA_TUNNEL_FILTER_ALLOC UINT32_C(0x95) + #define HWRM_CFA_TUNNEL_FILTER_FREE UINT32_C(0x96) + /* Experimental */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC UINT32_C(0x97) + /* Experimental */ + #define HWRM_CFA_ENCAP_RECORD_FREE UINT32_C(0x98) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC UINT32_C(0x99) + #define HWRM_CFA_NTUPLE_FILTER_FREE UINT32_C(0x9a) + #define HWRM_CFA_NTUPLE_FILTER_CFG UINT32_C(0x9b) + /* Experimental */ + #define HWRM_CFA_EM_FLOW_ALLOC UINT32_C(0x9c) + /* Experimental */ + #define HWRM_CFA_EM_FLOW_FREE UINT32_C(0x9d) + /* Experimental */ + #define HWRM_CFA_EM_FLOW_CFG UINT32_C(0x9e) + #define HWRM_TUNNEL_DST_PORT_QUERY UINT32_C(0xa0) + #define HWRM_TUNNEL_DST_PORT_ALLOC UINT32_C(0xa1) + #define HWRM_TUNNEL_DST_PORT_FREE UINT32_C(0xa2) + #define HWRM_STAT_CTX_ALLOC UINT32_C(0xb0) + #define HWRM_STAT_CTX_FREE UINT32_C(0xb1) + #define HWRM_STAT_CTX_QUERY UINT32_C(0xb2) + #define HWRM_STAT_CTX_CLR_STATS UINT32_C(0xb3) + #define HWRM_PORT_QSTATS_EXT UINT32_C(0xb4) + #define HWRM_FW_RESET UINT32_C(0xc0) + #define HWRM_FW_QSTATUS UINT32_C(0xc1) + /* Experimental */ + #define HWRM_FW_SET_TIME UINT32_C(0xc8) + /* Experimental */ + #define HWRM_FW_GET_TIME UINT32_C(0xc9) + /* Experimental */ + #define HWRM_FW_SET_STRUCTURED_DATA UINT32_C(0xca) + /* Experimental */ + #define HWRM_FW_GET_STRUCTURED_DATA UINT32_C(0xcb) + /* Experimental */ + #define HWRM_FW_IPC_MAILBOX UINT32_C(0xcc) + #define HWRM_EXEC_FWD_RESP UINT32_C(0xd0) + #define HWRM_REJECT_FWD_RESP UINT32_C(0xd1) + #define HWRM_FWD_RESP UINT32_C(0xd2) + #define HWRM_FWD_ASYNC_EVENT_CMPL UINT32_C(0xd3) + #define HWRM_OEM_CMD UINT32_C(0xd4) + #define HWRM_TEMP_MONITOR_QUERY UINT32_C(0xe0) + #define HWRM_WOL_FILTER_ALLOC UINT32_C(0xf0) + #define HWRM_WOL_FILTER_FREE UINT32_C(0xf1) + #define HWRM_WOL_FILTER_QCFG UINT32_C(0xf2) + #define HWRM_WOL_REASON_QCFG UINT32_C(0xf3) + /* Experimental */ + #define HWRM_CFA_METER_PROFILE_ALLOC UINT32_C(0xf5) + /* Experimental */ + #define HWRM_CFA_METER_PROFILE_FREE UINT32_C(0xf6) + /* Experimental */ + #define HWRM_CFA_METER_PROFILE_CFG UINT32_C(0xf7) + /* Experimental */ + #define HWRM_CFA_METER_INSTANCE_ALLOC UINT32_C(0xf8) + /* Experimental */ + #define HWRM_CFA_METER_INSTANCE_FREE UINT32_C(0xf9) + /* Experimental */ + #define HWRM_CFA_VFR_ALLOC UINT32_C(0xfd) + /* Experimental */ + #define HWRM_CFA_VFR_FREE UINT32_C(0xfe) + /* Experimental */ + #define HWRM_CFA_VF_PAIR_ALLOC UINT32_C(0x100) + /* Experimental */ + #define HWRM_CFA_VF_PAIR_FREE UINT32_C(0x101) + /* Experimental */ + #define HWRM_CFA_VF_PAIR_INFO UINT32_C(0x102) + /* Experimental */ + #define HWRM_CFA_FLOW_ALLOC UINT32_C(0x103) + /* Experimental */ + #define HWRM_CFA_FLOW_FREE UINT32_C(0x104) + /* Experimental */ + #define HWRM_CFA_FLOW_FLUSH UINT32_C(0x105) + /* Experimental */ + #define HWRM_CFA_FLOW_STATS UINT32_C(0x106) + /* Experimental */ + #define HWRM_CFA_FLOW_INFO UINT32_C(0x107) + /* Experimental */ + #define HWRM_CFA_DECAP_FILTER_ALLOC UINT32_C(0x108) + /* Experimental */ + #define HWRM_CFA_DECAP_FILTER_FREE UINT32_C(0x109) + #define HWRM_CFA_VLAN_ANTISPOOF_QCFG UINT32_C(0x10a) + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC UINT32_C(0x10b) + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE UINT32_C(0x10c) + /* Experimental */ + #define HWRM_CFA_PAIR_ALLOC UINT32_C(0x10d) + /* Experimental */ + #define HWRM_CFA_PAIR_FREE UINT32_C(0x10e) + /* Experimental */ + #define HWRM_CFA_PAIR_INFO UINT32_C(0x10f) + /* Experimental */ + #define HWRM_FW_IPC_MSG UINT32_C(0x110) + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO UINT32_C(0x111) + /* Engine CKV - Ping the device and SRT firmware to get the public key. */ + #define HWRM_ENGINE_CKV_HELLO UINT32_C(0x12d) + /* Engine CKV - Get the current allocation status of keys provisioned in the key vault. */ + #define HWRM_ENGINE_CKV_STATUS UINT32_C(0x12e) + /* Engine CKV - Add a new CKEK used to encrypt keys. */ + #define HWRM_ENGINE_CKV_CKEK_ADD UINT32_C(0x12f) + /* Engine CKV - Delete a previously added CKEK. */ + #define HWRM_ENGINE_CKV_CKEK_DELETE UINT32_C(0x130) + /* Engine CKV - Add a new key to the key vault. */ + #define HWRM_ENGINE_CKV_KEY_ADD UINT32_C(0x131) + /* Engine CKV - Delete a key from the key vault. */ + #define HWRM_ENGINE_CKV_KEY_DELETE UINT32_C(0x132) + /* Engine CKV - Delete all keys from the key vault. */ + #define HWRM_ENGINE_CKV_FLUSH UINT32_C(0x133) + /* Engine CKV - Get random data. */ + #define HWRM_ENGINE_CKV_RNG_GET UINT32_C(0x134) + /* Engine CKV - Generate and encrypt a new AES key. */ + #define HWRM_ENGINE_CKV_KEY_GEN UINT32_C(0x135) + /* Engine - Query the available queue groups configuration. */ + #define HWRM_ENGINE_QG_CONFIG_QUERY UINT32_C(0x13c) + /* Engine - Query the queue groups assigned to a function. */ + #define HWRM_ENGINE_QG_QUERY UINT32_C(0x13d) + /* Engine - Query the available queue group meter profile configuration. */ + #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY UINT32_C(0x13e) + /* Engine - Query the configuration of a queue group meter profile. */ + #define HWRM_ENGINE_QG_METER_PROFILE_QUERY UINT32_C(0x13f) + /* Engine - Allocate a queue group meter profile. */ + #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC UINT32_C(0x140) + /* Engine - Free a queue group meter profile. */ + #define HWRM_ENGINE_QG_METER_PROFILE_FREE UINT32_C(0x141) + /* Engine - Query the meters assigned to a queue group. */ + #define HWRM_ENGINE_QG_METER_QUERY UINT32_C(0x142) + /* Engine - Bind a queue group meter profile to a queue group. */ + #define HWRM_ENGINE_QG_METER_BIND UINT32_C(0x143) + /* Engine - Unbind a queue group meter profile from a queue group. */ + #define HWRM_ENGINE_QG_METER_UNBIND UINT32_C(0x144) + /* Engine - Bind a queue group to a function. */ + #define HWRM_ENGINE_QG_FUNC_BIND UINT32_C(0x145) + /* Engine - Query the scheduling group configuration. */ + #define HWRM_ENGINE_SG_CONFIG_QUERY UINT32_C(0x146) + /* Engine - Query the queue groups assigned to a scheduling group. */ + #define HWRM_ENGINE_SG_QUERY UINT32_C(0x147) + /* Engine - Query the configuration of a scheduling group's meter profiles. */ + #define HWRM_ENGINE_SG_METER_QUERY UINT32_C(0x148) + /* Engine - Configure a scheduling group's meter profiles. */ + #define HWRM_ENGINE_SG_METER_CONFIG UINT32_C(0x149) + /* Engine - Bind a queue group to a scheduling group. */ + #define HWRM_ENGINE_SG_QG_BIND UINT32_C(0x14a) + /* Engine - Unbind a queue group from its scheduling group. */ + #define HWRM_ENGINE_QG_SG_UNBIND UINT32_C(0x14b) + /* Engine - Query the Engine configuration. */ + #define HWRM_ENGINE_CONFIG_QUERY UINT32_C(0x154) + /* Engine - Configure the statistics accumulator for an Engine. */ + #define HWRM_ENGINE_STATS_CONFIG UINT32_C(0x155) + /* Engine - Clear the statistics accumulator for an Engine. */ + #define HWRM_ENGINE_STATS_CLEAR UINT32_C(0x156) + /* Engine - Query the statistics accumulator for an Engine. */ + #define HWRM_ENGINE_STATS_QUERY UINT32_C(0x157) + /* Engine - Allocate an Engine RQ. */ + #define HWRM_ENGINE_RQ_ALLOC UINT32_C(0x15e) + /* Engine - Free an Engine RQ. */ + #define HWRM_ENGINE_RQ_FREE UINT32_C(0x15f) + /* Engine - Allocate an Engine CQ. */ + #define HWRM_ENGINE_CQ_ALLOC UINT32_C(0x160) + /* Engine - Free an Engine CQ. */ + #define HWRM_ENGINE_CQ_FREE UINT32_C(0x161) + /* Engine - Allocate an NQ. */ + #define HWRM_ENGINE_NQ_ALLOC UINT32_C(0x162) + /* Engine - Free an NQ. */ + #define HWRM_ENGINE_NQ_FREE UINT32_C(0x163) + /* Engine - Set the on-die RQE credit update location. */ + #define HWRM_ENGINE_ON_DIE_RQE_CREDITS UINT32_C(0x164) + /* Experimental */ + #define HWRM_FUNC_RESOURCE_QCAPS UINT32_C(0x190) + /* Experimental */ + #define HWRM_FUNC_VF_RESOURCE_CFG UINT32_C(0x191) + /* Experimental */ + #define HWRM_FUNC_BACKING_STORE_QCAPS UINT32_C(0x192) + /* Experimental */ + #define HWRM_FUNC_BACKING_STORE_CFG UINT32_C(0x193) + /* Experimental */ + #define HWRM_FUNC_BACKING_STORE_QCFG UINT32_C(0x194) + /* Experimental */ + #define HWRM_SELFTEST_QLIST UINT32_C(0x200) + /* Experimental */ + #define HWRM_SELFTEST_EXEC UINT32_C(0x201) + /* Experimental */ + #define HWRM_SELFTEST_IRQ UINT32_C(0x202) + /* Experimental */ + #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA UINT32_C(0x203) + /* Experimental */ + #define HWRM_PCIE_QSTATS UINT32_C(0x204) + /* Experimental */ + #define HWRM_DBG_READ_DIRECT UINT32_C(0xff10) + /* Experimental */ + #define HWRM_DBG_READ_INDIRECT UINT32_C(0xff11) + /* Experimental */ + #define HWRM_DBG_WRITE_DIRECT UINT32_C(0xff12) + /* Experimental */ + #define HWRM_DBG_WRITE_INDIRECT UINT32_C(0xff13) + #define HWRM_DBG_DUMP UINT32_C(0xff14) + /* Experimental */ + #define HWRM_DBG_ERASE_NVM UINT32_C(0xff15) + /* Experimental */ + #define HWRM_DBG_CFG UINT32_C(0xff16) + /* Experimental */ + #define HWRM_DBG_COREDUMP_LIST UINT32_C(0xff17) + /* Experimental */ + #define HWRM_DBG_COREDUMP_INITIATE UINT32_C(0xff18) + /* Experimental */ + #define HWRM_DBG_COREDUMP_RETRIEVE UINT32_C(0xff19) + /* */ + #define HWRM_DBG_I2C_CMD UINT32_C(0xff1b) + /* Experimental */ + #define HWRM_NVM_FACTORY_DEFAULTS UINT32_C(0xffee) + #define HWRM_NVM_VALIDATE_OPTION UINT32_C(0xffef) + #define HWRM_NVM_FLUSH UINT32_C(0xfff0) + #define HWRM_NVM_GET_VARIABLE UINT32_C(0xfff1) + #define HWRM_NVM_SET_VARIABLE UINT32_C(0xfff2) + #define HWRM_NVM_INSTALL_UPDATE UINT32_C(0xfff3) + #define HWRM_NVM_MODIFY UINT32_C(0xfff4) + #define HWRM_NVM_VERIFY_UPDATE UINT32_C(0xfff5) + #define HWRM_NVM_GET_DEV_INFO UINT32_C(0xfff6) + #define HWRM_NVM_ERASE_DIR_ENTRY UINT32_C(0xfff7) + #define HWRM_NVM_MOD_DIR_ENTRY UINT32_C(0xfff8) + #define HWRM_NVM_FIND_DIR_ENTRY UINT32_C(0xfff9) + #define HWRM_NVM_GET_DIR_ENTRIES UINT32_C(0xfffa) + #define HWRM_NVM_GET_DIR_INFO UINT32_C(0xfffb) + #define HWRM_NVM_RAW_DUMP UINT32_C(0xfffc) + #define HWRM_NVM_READ UINT32_C(0xfffd) + #define HWRM_NVM_WRITE UINT32_C(0xfffe) + #define HWRM_NVM_RAW_WRITE_BLK UINT32_C(0xffff) + #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK + uint16_t unused_0[3]; +} __attribute__((packed)); + +/* Return Codes */ +/* ret_codes (size:64b/8B) */ +struct ret_codes { + uint16_t error_code; + /* Request was successfully executed by the HWRM. */ + #define HWRM_ERR_CODE_SUCCESS UINT32_C(0x0) + /* The HWRM failed to execute the request. */ + #define HWRM_ERR_CODE_FAIL UINT32_C(0x1) /* - * Indicates that this BD is 32B long and is - * used for normal L2 packet transmission. + * The request contains invalid argument(s) or input + * parameters. */ - #define TX_BD_LONG_TYPE_TX_BD_LONG UINT32_C(0x10) + #define HWRM_ERR_CODE_INVALID_PARAMS UINT32_C(0x2) /* - * If set to 1, the packet ends with the data in the buffer - * pointed to by this descriptor. This flag must be valid on - * every BD. + * The requester is not allowed to access the requested + * resource. This error code shall be provided in a + * response to a request to query or modify an existing + * resource that is not accessible by the requester. */ - #define TX_BD_LONG_FLAGS_PACKET_END UINT32_C(0x40) + #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED UINT32_C(0x3) /* - * If set to 1, the device will not generate a completion for - * this transmit packet unless there is an error in it's - * processing. If this bit is set to 0, then the packet will be - * completed normally. This bit must be valid only on the first - * BD of a packet. + * The HWRM is unable to allocate the requested resource. + * This code only applies to requests for HWRM resource + * allocations. */ - #define TX_BD_LONG_FLAGS_NO_CMPL UINT32_C(0x80) - /* - * This value indicates how many 16B BD locations are consumed - * in the ring by this packet. A value of 1 indicates that this - * BD is the only BD (and that the it is a short BD). A value of - * 3 indicates either 3 short BDs or 1 long BD and one short BD - * in the packet. A value of 0 indicates that there are 32 BD - * locations in the packet (the maximum). This field is valid - * only on the first BD of a packet. - */ - #define TX_BD_LONG_FLAGS_BD_CNT_MASK UINT32_C(0x1f00) - #define TX_BD_LONG_FLAGS_BD_CNT_SFT 8 - /* - * This value is a hint for the length of the entire packet. It - * is used by the chip to optimize internal processing. The - * packet will be dropped if the hint is too short. This field - * is valid only on the first BD of a packet. - */ - #define TX_BD_LONG_FLAGS_LHINT_MASK UINT32_C(0x6000) - #define TX_BD_LONG_FLAGS_LHINT_SFT 13 - /* indicates packet length < 512B */ - #define TX_BD_LONG_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13) - /* indicates 512 <= packet length < 1KB */ - #define TX_BD_LONG_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13) - /* indicates 1KB <= packet length < 2KB */ - #define TX_BD_LONG_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13) - /* indicates packet length >= 2KB */ - #define TX_BD_LONG_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13) - #define TX_BD_LONG_FLAGS_LHINT_LAST \ - TX_BD_LONG_FLAGS_LHINT_GTE2K + #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR UINT32_C(0x4) /* - * If set to 1, the device immediately updates the Send Consumer - * Index after the buffer associated with this descriptor has - * been transferred via DMA to NIC memory from host memory. An - * interrupt may or may not be generated according to the state - * of the interrupt avoidance mechanisms. If this bit is set to - * 0, then the Consumer Index is only updated as soon as one of - * the host interrupt coalescing conditions has been met. This - * bit must be valid on the first BD of a packet. + * Invalid combination of flags is specified in the + * request. */ - #define TX_BD_LONG_FLAGS_COAL_NOW UINT32_C(0x8000) + #define HWRM_ERR_CODE_INVALID_FLAGS UINT32_C(0x5) /* - * All bits in this field must be valid on the first BD of a - * packet. Only the packet_end bit must be valid for the - * remaining BDs of a packet. + * Invalid combination of enables fields is specified in + * the request. */ - #define TX_BD_LONG_FLAGS_MASK UINT32_C(0xffc0) - #define TX_BD_LONG_FLAGS_SFT 6 - uint16_t len; + #define HWRM_ERR_CODE_INVALID_ENABLES UINT32_C(0x6) /* - * This is the length of the host physical buffer this BD - * describes in bytes. This field must be valid on all BDs of a - * packet. + * Request contains a required TLV that is not supported by + * the installed version of firmware. */ - uint32_t opaque; + #define HWRM_ERR_CODE_UNSUPPORTED_TLV UINT32_C(0x7) /* - * The opaque data field is pass through to the completion and - * can be used for any data that the driver wants to associate - * with the transmit BD. This field must be valid on the first - * BD of a packet. + * No firmware buffer available to accept the request. Driver + * should retry the request. */ - uint64_t addr; + #define HWRM_ERR_CODE_NO_BUFFER UINT32_C(0x8) /* - * This is the host physical address for the portion of the - * packet described by this TX BD. This value must be valid on - * all BDs of a packet. + * Generic HWRM execution error that represents an + * internal error. */ + #define HWRM_ERR_CODE_HWRM_ERROR UINT32_C(0xf) + /* Unknown error */ + #define HWRM_ERR_CODE_UNKNOWN_ERR UINT32_C(0xfffe) + /* Unsupported or invalid command */ + #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED UINT32_C(0xffff) + #define HWRM_ERR_CODE_LAST \ + HWRM_ERR_CODE_CMD_NOT_SUPPORTED + uint16_t unused_0[3]; } __attribute__((packed)); -/* last 16 bytes of Long TX BD */ -struct tx_bd_long_hi { - uint16_t lflags; - /* - * All bits in this field must be valid on the first BD of a - * packet. Their value on other BDs of the packet will be - * ignored. - */ - /* - * If set to 1, the controller replaces the TCP/UPD checksum - * fields of normal TCP/UPD checksum, or the inner TCP/UDP - * checksum field of the encapsulated TCP/UDP packets with the - * hardware calculated TCP/UDP checksum for the packet - * associated with this descriptor. The flag is ignored if the - * LSO flag is set. This bit must be valid on the first BD of a - * packet. - */ - #define TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1) +/* Output */ +/* hwrm_err_output (size:128b/16B) */ +struct hwrm_err_output { /* - * If set to 1, the controller replaces the IP checksum of the - * normal packets, or the inner IP checksum of the encapsulated - * packets with the hardware calculated IP checksum for the - * packet associated with this descriptor. This bit must be - * valid on the first BD of a packet. + * Pass/Fail or error type + * + * Note: receiver to verify the in parameters, and fail the call + * with an error when appropriate */ - #define TX_BD_LONG_LFLAGS_IP_CHKSUM UINT32_C(0x2) + uint16_t error_code; + /* This field returns the type of original request. */ + uint16_t req_type; + /* This field provides original sequence number of the command. */ + uint16_t seq_id; /* - * If set to 1, the controller will not append an Ethernet CRC - * to the end of the frame. This bit must be valid on the first - * BD of a packet. Packet must be 64B or longer when this flag - * is set. It is not useful to use this bit with any form of TX - * offload such as CSO or LSO. The intent is that the packet - * from the host already has a valid Ethernet CRC on the packet. + * This field is the length of the response in bytes. The + * last byte of the response is a valid flag that will read + * as '1' when the command has been completely written to + * memory. */ - #define TX_BD_LONG_LFLAGS_NOCRC UINT32_C(0x4) + uint16_t resp_len; + /* debug info for this error response. */ + uint32_t opaque_0; + /* debug info for this error response. */ + uint16_t opaque_1; /* - * If set to 1, the device will record the time at which the - * packet was actually transmitted at the TX MAC. This bit must - * be valid on the first BD of a packet. + * In the case of an error response, command specific error + * code is returned in this field. */ - #define TX_BD_LONG_LFLAGS_STAMP UINT32_C(0x8) + uint8_t cmd_err; /* - * If set to 1, The controller replaces the tunnel IP checksum - * field with hardware calculated IP checksum for the IP header - * of the packet associated with this descriptor. For outer UDP - * checksum, global outer UDP checksum TE_NIC register needs to - * be enabled. If the global outer UDP checksum TE_NIC register - * bit is set, outer UDP checksum will be calculated for the - * following cases: 1. Packets with tcp_udp_chksum flag set to - * offload checksum for inner packet AND the inner packet is - * TCP/UDP. If the inner packet is ICMP for example (non- - * TCP/UDP), even if the tcp_udp_chksum is set, the outer UDP - * checksum will not be calculated. 2. Packets with lso flag set - * which implies inner TCP checksum calculation as part of LSO - * operation. - */ - #define TX_BD_LONG_LFLAGS_T_IP_CHKSUM UINT32_C(0x10) - /* - * If set to 1, the device will treat this packet with LSO(Large - * Send Offload) processing for both normal or encapsulated - * packets, which is a form of TCP segmentation. When this bit - * is 1, the hdr_size and mss fields must be valid. The driver - * doesn't need to set t_ip_chksum, ip_chksum, and - * tcp_udp_chksum flags since the controller will replace the - * appropriate checksum fields for segmented packets. When this - * bit is 1, the hdr_size and mss fields must be valid. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define TX_BD_LONG_LFLAGS_LSO UINT32_C(0x20) + uint8_t valid; +} __attribute__((packed)); +/* + * Following is the signature for HWRM message field that indicates not + * applicable (All F's). Need to cast it the size of the field if needed. + */ +#define HWRM_NA_SIGNATURE ((uint32_t)(-1)) +/* hwrm_func_buf_rgtr */ +#define HWRM_MAX_REQ_LEN 128 +/* hwrm_selftest_qlist */ +#define HWRM_MAX_RESP_LEN 280 +/* 7 bit indirection table index. */ +#define HW_HASH_INDEX_SIZE 0x80 +#define HW_HASH_KEY_SIZE 40 +/* valid key for HWRM response */ +#define HWRM_RESP_VALID_KEY 1 +#define HWRM_VERSION_MAJOR 1 +#define HWRM_VERSION_MINOR 9 +#define HWRM_VERSION_UPDATE 2 +/* non-zero means beta version */ +#define HWRM_VERSION_RSVD 9 +#define HWRM_VERSION_STR "1.9.2.9" + +/**************** + * hwrm_ver_get * + ****************/ + + +/* hwrm_ver_get_input (size:192b/24B) */ +struct hwrm_ver_get_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * If set to zero when LSO is '1', then the IPID will be treated - * as a 16b number and will be wrapped if it exceeds a value of - * 0xffff. If set to one when LSO is '1', then the IPID will be - * treated as a 15b number and will be wrapped if it exceeds a - * value 0f 0x7fff. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define TX_BD_LONG_LFLAGS_IPID_FMT UINT32_C(0x40) + uint16_t cmpl_ring; /* - * If set to zero when LSO is '1', then the IPID of the tunnel - * IP header will not be modified during LSO operations. If set - * to one when LSO is '1', then the IPID of the tunnel IP header - * will be incremented for each subsequent segment of an LSO - * operation. The flag is ignored if the LSO packet is a normal - * (non-tunneled) TCP packet. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define TX_BD_LONG_LFLAGS_T_IPID UINT32_C(0x80) + uint16_t seq_id; /* - * If set to '1', then the RoCE ICRC will be appended to the - * packet. Packet must be a valid RoCE format packet. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define TX_BD_LONG_LFLAGS_ROCE_CRC UINT32_C(0x100) + uint16_t target_id; /* - * If set to '1', then the FCoE CRC will be appended to the - * packet. Packet must be a valid FCoE format packet. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define TX_BD_LONG_LFLAGS_FCOE_CRC UINT32_C(0x200) - uint16_t hdr_size; + uint64_t resp_addr; /* - * When LSO is '1', this field must contain the offset of the - * TCP payload from the beginning of the packet in as 16b words. - * In case of encapsulated/tunneling packet, this field contains - * the offset of the inner TCP payload from beginning of the - * packet as 16-bit words. This value must be valid on the first - * BD of a packet. + * This field represents the major version of HWRM interface + * specification supported by the driver HWRM implementation. + * The interface major version is intended to change only when + * non backward compatible changes are made to the HWRM + * interface specification. */ - #define TX_BD_LONG_HDR_SIZE_MASK UINT32_C(0x1ff) - #define TX_BD_LONG_HDR_SIZE_SFT 0 - uint32_t mss; + uint8_t hwrm_intf_maj; /* - * This is the MSS value that will be used to do the LSO - * processing. The value is the length in bytes of the TCP - * payload for each segment generated by the LSO operation. This - * value must be valid on the first BD of a packet. + * This field represents the minor version of HWRM interface + * specification supported by the driver HWRM implementation. + * A change in interface minor version is used to reflect + * significant backward compatible modification to HWRM + * interface specification. + * This can be due to addition or removal of functionality. + * HWRM interface specifications with the same major version + * but different minor versions are compatible. */ - #define TX_BD_LONG_MSS_MASK UINT32_C(0x7fff) - #define TX_BD_LONG_MSS_SFT 0 - uint16_t unused_2; - uint16_t cfa_action; + uint8_t hwrm_intf_min; /* - * This value selects a CFA action to perform on the packet. Set - * this value to zero if no CFA action is desired. This value - * must be valid on the first BD of a packet. + * This field represents the update version of HWRM interface + * specification supported by the driver HWRM implementation. + * The interface update version is used to reflect minor + * changes or bug fixes to a released HWRM interface + * specification. */ - uint32_t cfa_meta; + uint8_t hwrm_intf_upd; + uint8_t unused_0[5]; +} __attribute__((packed)); + +/* hwrm_ver_get_output (size:1408b/176B) */ +struct hwrm_ver_get_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; /* - * This value is action meta-data that defines CFA edit - * operations that are done in addition to any action editing. + * This field represents the major version of HWRM interface + * specification supported by the HWRM implementation. + * The interface major version is intended to change only when + * non backward compatible changes are made to the HWRM + * interface specification. + * A HWRM implementation that is compliant with this + * specification shall provide value of 1 in this field. */ - /* When key=1, This is the VLAN tag VID value. */ - #define TX_BD_LONG_CFA_META_VLAN_VID_MASK UINT32_C(0xfff) - #define TX_BD_LONG_CFA_META_VLAN_VID_SFT 0 - /* When key=1, This is the VLAN tag DE value. */ - #define TX_BD_LONG_CFA_META_VLAN_DE UINT32_C(0x1000) - /* When key=1, This is the VLAN tag PRI value. */ - #define TX_BD_LONG_CFA_META_VLAN_PRI_MASK UINT32_C(0xe000) - #define TX_BD_LONG_CFA_META_VLAN_PRI_SFT 13 - /* When key=1, This is the VLAN tag TPID select value. */ - #define TX_BD_LONG_CFA_META_VLAN_TPID_MASK UINT32_C(0x70000) - #define TX_BD_LONG_CFA_META_VLAN_TPID_SFT 16 - /* 0x88a8 */ - #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8 (UINT32_C(0x0) << 16) - /* 0x8100 */ - #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 (UINT32_C(0x1) << 16) - /* 0x9100 */ - #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100 (UINT32_C(0x2) << 16) - /* 0x9200 */ - #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200 (UINT32_C(0x3) << 16) - /* 0x9300 */ - #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16) - /* Value programmed in CFA VLANTPID register. */ - #define TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16) - #define TX_BD_LONG_CFA_META_VLAN_TPID_LAST \ - TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG - /* When key=1, This is the VLAN tag TPID select value. */ - #define TX_BD_LONG_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000) - #define TX_BD_LONG_CFA_META_VLAN_RESERVED_SFT 19 + uint8_t hwrm_intf_maj_8b; /* - * This field identifies the type of edit to be performed on the - * packet. This value must be valid on the first BD of a packet. + * This field represents the minor version of HWRM interface + * specification supported by the HWRM implementation. + * A change in interface minor version is used to reflect + * significant backward compatible modification to HWRM + * interface specification. + * This can be due to addition or removal of functionality. + * HWRM interface specifications with the same major version + * but different minor versions are compatible. + * A HWRM implementation that is compliant with this + * specification shall provide value of 2 in this field. */ - #define TX_BD_LONG_CFA_META_KEY_MASK UINT32_C(0xf0000000) - #define TX_BD_LONG_CFA_META_KEY_SFT 28 - /* No editing */ - #define TX_BD_LONG_CFA_META_KEY_NONE (UINT32_C(0x0) << 28) + uint8_t hwrm_intf_min_8b; /* - * - meta[17:16] - TPID select value (0 = - * 0x8100). - meta[15:12] - PRI/DE value. - - * meta[11:0] - VID value. + * This field represents the update version of HWRM interface + * specification supported by the HWRM implementation. + * The interface update version is used to reflect minor + * changes or bug fixes to a released HWRM interface + * specification. + * A HWRM implementation that is compliant with this + * specification shall provide value of 2 in this field. */ - #define TX_BD_LONG_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28) - #define TX_BD_LONG_CFA_META_KEY_LAST \ - TX_BD_LONG_CFA_META_KEY_VLAN_TAG -} __attribute__((packed)); - -/* RX Producer Packet BD (16 bytes) */ -struct rx_prod_pkt_bd { - uint16_t flags_type; - /* This value identifies the type of buffer descriptor. */ - #define RX_PROD_PKT_BD_TYPE_MASK UINT32_C(0x3f) - #define RX_PROD_PKT_BD_TYPE_SFT 0 + uint8_t hwrm_intf_upd_8b; + uint8_t hwrm_intf_rsvd_8b; /* - * Indicates that this BD is 16B long and is an - * RX Producer (ie. empty) buffer descriptor. + * This field represents the major version of HWRM firmware. + * A change in firmware major version represents a major + * firmware release. */ - #define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT UINT32_C(0x4) + uint8_t hwrm_fw_maj_8b; /* - * If set to 1, the packet will be placed at the address plus - * 2B. The 2 Bytes of padding will be written as zero. + * This field represents the minor version of HWRM firmware. + * A change in firmware minor version represents significant + * firmware functionality changes. */ + uint8_t hwrm_fw_min_8b; /* - * This is intended to be used when the host buffer is cache- - * line aligned to produce packets that are easy to parse in - * host memory while still allowing writes to be cache line - * aligned. + * This field represents the build version of HWRM firmware. + * A change in firmware build version represents bug fixes + * to a released firmware. */ - #define RX_PROD_PKT_BD_FLAGS_SOP_PAD UINT32_C(0x40) + uint8_t hwrm_fw_bld_8b; /* - * If set to 1, the packet write will be padded out to the - * nearest cache-line with zero value padding. + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version of the + * HWRM firmware. */ + uint8_t hwrm_fw_rsvd_8b; /* - * If receive buffers start/end on cache-line boundaries, this - * feature will ensure that all data writes on the PCI bus - * start/end on cache line boundaries. + * This field represents the major version of mgmt firmware. + * A change in major version represents a major release. */ - #define RX_PROD_PKT_BD_FLAGS_EOP_PAD UINT32_C(0x80) + uint8_t mgmt_fw_maj_8b; /* - * This value is the number of additional buffers in the ring - * that describe the buffer space to be consumed for the this - * packet. If the value is zero, then the packet must fit within - * the space described by this BD. If this value is 1 or more, - * it indicates how many additional "buffer" BDs are in the ring - * immediately following this BD to be used for the same network - * packet. Even if the packet to be placed does not need all the - * additional buffers, they will be consumed anyway. + * This field represents the minor version of mgmt firmware. + * A change in minor version represents significant + * functionality changes. */ - #define RX_PROD_PKT_BD_FLAGS_BUFFERS_MASK UINT32_C(0x300) - #define RX_PROD_PKT_BD_FLAGS_BUFFERS_SFT 8 - #define RX_PROD_PKT_BD_FLAGS_MASK UINT32_C(0xffc0) - #define RX_PROD_PKT_BD_FLAGS_SFT 6 - uint16_t len; + uint8_t mgmt_fw_min_8b; /* - * This is the length in Bytes of the host physical buffer where - * data for the packet may be placed in host memory. + * This field represents the build version of mgmt firmware. + * A change in update version represents bug fixes. */ + uint8_t mgmt_fw_bld_8b; /* - * While this is a Byte resolution value, it is often - * advantageous to ensure that the buffers provided end on a - * host cache line. + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version */ - uint32_t opaque; + uint8_t mgmt_fw_rsvd_8b; /* - * The opaque data field is pass through to the completion and - * can be used for any data that the driver wants to associate - * with this receive buffer set. + * This field represents the major version of network + * control firmware. + * A change in major version represents a major release. */ - uint64_t addr; + uint8_t netctrl_fw_maj_8b; /* - * This is the host physical address where data for the packet - * may by placed in host memory. + * This field represents the minor version of network + * control firmware. + * A change in minor version represents significant + * functionality changes. */ + uint8_t netctrl_fw_min_8b; /* - * While this is a Byte resolution value, it is often - * advantageous to ensure that the buffers provide start on a - * host cache line. + * This field represents the build version of network + * control firmware. + * A change in update version represents bug fixes. */ -} __attribute__((packed)); - -/* Completion Ring Structures */ -/* Note: This structure is used by the HWRM to communicate HWRM Error. */ -/* Base Completion Record (16 bytes) */ -struct cmpl_base { - uint16_t type; - /* unused is 10 b */ + uint8_t netctrl_fw_bld_8b; /* - * This field indicates the exact type of the completion. By - * convention, the LSB identifies the length of the record in - * 16B units. Even values indicate 16B records. Odd values - * indicate 32B records. + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version */ - #define CMPL_BASE_TYPE_MASK UINT32_C(0x3f) - #define CMPL_BASE_TYPE_SFT 0 - /* TX L2 completion: Completion of TX packet. Length = 16B */ - #define CMPL_BASE_TYPE_TX_L2 UINT32_C(0x0) + uint8_t netctrl_fw_rsvd_8b; /* - * RX L2 completion: Completion of and L2 RX - * packet. Length = 32B + * This field is used to indicate device's capabilities and + * configurations. */ - #define CMPL_BASE_TYPE_RX_L2 UINT32_C(0x11) + uint32_t dev_caps_cfg; /* - * RX Aggregation Buffer completion : Completion - * of an L2 aggregation buffer in support of - * TPA, HDS, or Jumbo packet completion. Length - * = 16B + * If set to 1, then secure firmware update behavior + * is supported. + * If set to 0, then secure firmware update behavior is + * not supported. */ - #define CMPL_BASE_TYPE_RX_AGG UINT32_C(0x12) + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED \ + UINT32_C(0x1) /* - * RX L2 TPA Start Completion: Completion at the - * beginning of a TPA operation. Length = 32B + * If set to 1, then firmware based DCBX agent is supported. + * If set to 0, then firmware based DCBX agent capability + * is not supported on this device. */ - #define CMPL_BASE_TYPE_RX_TPA_START UINT32_C(0x13) + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED \ + UINT32_C(0x2) /* - * RX L2 TPA End Completion: Completion at the - * end of a TPA operation. Length = 32B + * If set to 1, then HWRM short command format is supported. + * If set to 0, then HWRM short command format is not supported. */ - #define CMPL_BASE_TYPE_RX_TPA_END UINT32_C(0x15) + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED \ + UINT32_C(0x4) /* - * Statistics Ejection Completion: Completion of - * statistics data ejection buffer. Length = 16B + * If set to 1, then HWRM short command format is required. + * If set to 0, then HWRM short command format is not required. */ - #define CMPL_BASE_TYPE_STAT_EJECT UINT32_C(0x1a) - /* HWRM Command Completion: Completion of an HWRM command. */ - #define CMPL_BASE_TYPE_HWRM_DONE UINT32_C(0x20) - /* Forwarded HWRM Request */ - #define CMPL_BASE_TYPE_HWRM_FWD_REQ UINT32_C(0x22) - /* Forwarded HWRM Response */ - #define CMPL_BASE_TYPE_HWRM_FWD_RESP UINT32_C(0x24) - /* HWRM Asynchronous Event Information */ - #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e) - /* CQ Notification */ - #define CMPL_BASE_TYPE_CQ_NOTIFICATION UINT32_C(0x30) - /* SRQ Threshold Event */ - #define CMPL_BASE_TYPE_SRQ_EVENT UINT32_C(0x32) - /* DBQ Threshold Event */ - #define CMPL_BASE_TYPE_DBQ_EVENT UINT32_C(0x34) - /* QP Async Notification */ - #define CMPL_BASE_TYPE_QP_EVENT UINT32_C(0x38) - /* Function Async Notification */ - #define CMPL_BASE_TYPE_FUNC_EVENT UINT32_C(0x3a) - /* unused is 10 b */ - uint16_t info1; - /* info1 is 16 b */ - uint32_t info2; - /* info2 is 32 b */ - uint32_t info3_v; - /* info3 is 31 b */ - /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. - */ - #define CMPL_BASE_V UINT32_C(0x1) - /* info3 is 31 b */ - #define CMPL_BASE_INFO3_MASK UINT32_C(0xfffffffe) - #define CMPL_BASE_INFO3_SFT 1 - uint32_t info4; - /* info4 is 32 b */ -} __attribute__((packed)); - -/* TX Completion Record (16 bytes) */ -struct tx_cmpl { - uint16_t flags_type; + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED \ + UINT32_C(0x8) /* - * This field indicates the exact type of the completion. By - * convention, the LSB identifies the length of the record in - * 16B units. Even values indicate 16B records. Odd values - * indicate 32B records. + * This field represents the major version of RoCE firmware. + * A change in major version represents a major release. */ - #define TX_CMPL_TYPE_MASK UINT32_C(0x3f) - #define TX_CMPL_TYPE_SFT 0 - /* TX L2 completion: Completion of TX packet. Length = 16B */ - #define TX_CMPL_TYPE_TX_L2 UINT32_C(0x0) + uint8_t roce_fw_maj_8b; /* - * When this bit is '1', it indicates a packet that has an error - * of some type. Type of error is indicated in error_flags. + * This field represents the minor version of RoCE firmware. + * A change in minor version represents significant + * functionality changes. */ - #define TX_CMPL_FLAGS_ERROR UINT32_C(0x40) + uint8_t roce_fw_min_8b; /* - * When this bit is '1', it indicates that the packet completed - * was transmitted using the push acceleration data provided by - * the driver. When this bit is '0', it indicates that the - * packet had not push acceleration data written or was executed - * as a normal packet even though push data was provided. + * This field represents the build version of RoCE firmware. + * A change in update version represents bug fixes. */ - #define TX_CMPL_FLAGS_PUSH UINT32_C(0x80) - #define TX_CMPL_FLAGS_MASK UINT32_C(0xffc0) - #define TX_CMPL_FLAGS_SFT 6 - uint16_t unused_0; - /* unused1 is 16 b */ - uint32_t opaque; + uint8_t roce_fw_bld_8b; /* - * This is a copy of the opaque field from the first TX BD of - * this transmitted packet. + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version */ - uint16_t errors_v; + uint8_t roce_fw_rsvd_8b; /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. + * This field represents the name of HWRM FW (ASCII chars + * with NULL at the end). */ - #define TX_CMPL_V UINT32_C(0x1) + char hwrm_fw_name[16]; /* - * This error indicates that there was some sort of problem with - * the BDs for the packet. + * This field represents the name of mgmt FW (ASCII chars + * with NULL at the end). */ - #define TX_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe) - #define TX_CMPL_ERRORS_BUFFER_ERROR_SFT 1 - /* No error */ - #define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (UINT32_C(0x0) << 1) - /* Bad Format: BDs were not formatted correctly. */ - #define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (UINT32_C(0x2) << 1) - #define TX_CMPL_ERRORS_BUFFER_ERROR_LAST \ - TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT + char mgmt_fw_name[16]; /* - * When this bit is '1', it indicates that the length of the - * packet was zero. No packet was transmitted. + * This field represents the name of network control + * firmware (ASCII chars with NULL at the end). */ - #define TX_CMPL_ERRORS_ZERO_LENGTH_PKT UINT32_C(0x10) + char netctrl_fw_name[16]; /* - * When this bit is '1', it indicates that the packet was longer - * than the programmed limit in TDI. No packet was transmitted. + * This field is reserved for future use. + * The responder should set it to 0. + * The requester should ignore this field. */ - #define TX_CMPL_ERRORS_EXCESSIVE_BD_LENGTH UINT32_C(0x20) + uint8_t reserved2[16]; /* - * When this bit is '1', it indicates that one or more of the - * BDs associated with this packet generated a PCI error. This - * probably means the address was not valid. + * This field represents the name of RoCE FW (ASCII chars + * with NULL at the end). */ - #define TX_CMPL_ERRORS_DMA_ERROR UINT32_C(0x40) + char roce_fw_name[16]; + /* This field returns the chip number. */ + uint16_t chip_num; + /* This field returns the revision of chip. */ + uint8_t chip_rev; + /* This field returns the chip metal number. */ + uint8_t chip_metal; + /* This field returns the bond id of the chip. */ + uint8_t chip_bond_id; + /* This value indicates the type of platform used for chip implementation. */ + uint8_t chip_platform_type; + /* ASIC */ + #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_ASIC UINT32_C(0x0) + /* FPGA platform of the chip. */ + #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_FPGA UINT32_C(0x1) + /* Palladium platform of the chip. */ + #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM UINT32_C(0x2) + #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_LAST \ + HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM /* - * When this bit is '1', it indicates that the packet was longer - * than indicated by the hint. No packet was transmitted. + * This field returns the maximum value of request window that + * is supported by the HWRM. The request window is mapped + * into device address space using MMIO. */ - #define TX_CMPL_ERRORS_HINT_TOO_SHORT UINT32_C(0x80) + uint16_t max_req_win_len; /* - * When this bit is '1', it indicates that the packet was - * dropped due to Poison TLP error on one or more of the TLPs in - * the PXP completion. + * This field returns the maximum value of response buffer in + * bytes. */ - #define TX_CMPL_ERRORS_POISON_TLP_ERROR UINT32_C(0x100) - #define TX_CMPL_ERRORS_MASK UINT32_C(0xfffe) - #define TX_CMPL_ERRORS_SFT 1 - uint16_t unused_1; - /* unused2 is 16 b */ - uint32_t unused_2; - /* unused3 is 32 b */ -} __attribute__((packed)); - -/* RX Packet Completion Record (32 bytes split to 2 16-byte struct) */ -struct rx_pkt_cmpl { - uint16_t flags_type; + uint16_t max_resp_len; /* - * This field indicates the exact type of the completion. By - * convention, the LSB identifies the length of the record in - * 16B units. Even values indicate 16B records. Odd values - * indicate 32B records. + * This field returns the default request timeout value in + * milliseconds. */ - #define RX_PKT_CMPL_TYPE_MASK UINT32_C(0x3f) - #define RX_PKT_CMPL_TYPE_SFT 0 + uint16_t def_req_timeout; /* - * RX L2 completion: Completion of and L2 RX - * packet. Length = 32B + * This field will indicate if any subsystems is not fully + * initialized. */ - #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11) + uint8_t flags; /* - * When this bit is '1', it indicates a packet that has an error - * of some type. Type of error is indicated in error_flags. + * If set to 1, device is not ready. + * If set to 0, device is ready to accept all HWRM commands. */ - #define RX_PKT_CMPL_FLAGS_ERROR UINT32_C(0x40) - /* This field indicates how the packet was placed in the buffer. */ - #define RX_PKT_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) - #define RX_PKT_CMPL_FLAGS_PLACEMENT_SFT 7 - /* Normal: Packet was placed using normal algorithm. */ - #define RX_PKT_CMPL_FLAGS_PLACEMENT_NORMAL (UINT32_C(0x0) << 7) - /* Jumbo: Packet was placed using jumbo algorithm. */ - #define RX_PKT_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7) - /* - * Header/Data Separation: Packet was placed - * using Header/Data separation algorithm. The - * separation location is indicated by the itype - * field. + #define HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY UINT32_C(0x1) + /* + * If set to 1, external version present. + * If set to 0, external version not present. */ - #define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7) - #define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST \ - RX_PKT_CMPL_FLAGS_PLACEMENT_HDS - /* This bit is '1' if the RSS field in this completion is valid. */ - #define RX_PKT_CMPL_FLAGS_RSS_VALID UINT32_C(0x400) - /* unused is 1 b */ - #define RX_PKT_CMPL_FLAGS_UNUSED UINT32_C(0x800) + #define HWRM_VER_GET_OUTPUT_FLAGS_EXT_VER_AVAIL UINT32_C(0x2) + uint8_t unused_0[2]; /* - * This value indicates what the inner packet determined for the - * packet was. + * For backward compatibility this field must be set to 1. + * Older drivers might look for this field to be 1 before + * processing the message. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) - #define RX_PKT_CMPL_FLAGS_ITYPE_SFT 12 - /* Not Known: Indicates that the packet type was not known. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_NOT_KNOWN (UINT32_C(0x0) << 12) + uint8_t always_1; /* - * IP Packet: Indicates that the packet was an - * IP packet, but further classification was not - * possible. + * This field represents the major version of HWRM interface + * specification supported by the HWRM implementation. + * The interface major version is intended to change only when + * non backward compatible changes are made to the HWRM + * interface specification. A HWRM implementation that is + * compliant with this specification shall provide value of 1 + * in this field. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_IP (UINT32_C(0x1) << 12) + uint16_t hwrm_intf_major; /* - * TCP Packet: Indicates that the packet was IP - * and TCP. This indicates that the - * payload_offset field is valid. + * This field represents the minor version of HWRM interface + * specification supported by the HWRM implementation. + * A change in interface minor version is used to reflect + * significant backward compatible modification to HWRM + * interface specification. This can be due to addition or + * removal of functionality. HWRM interface specifications + * with the same major version but different minor versions are + * compatible. A HWRM implementation that is compliant with + * this specification shall provide value of 2 in this field. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_TCP (UINT32_C(0x2) << 12) + uint16_t hwrm_intf_minor; /* - * UDP Packet: Indicates that the packet was IP - * and UDP. This indicates that the - * payload_offset field is valid. + * This field represents the update version of HWRM interface + * specification supported by the HWRM implementation. The + * interface update version is used to reflect minor changes or + * bug fixes to a released HWRM interface specification. + * A HWRM implementation that is compliant with this + * specification shall provide value of 2 in this field. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_UDP (UINT32_C(0x3) << 12) + uint16_t hwrm_intf_build; /* - * FCoE Packet: Indicates that the packet was - * recognized as a FCoE. This also indicates - * that the payload_offset field is valid. + * This field represents the patch version of HWRM interface + * specification supported by the HWRM implementation. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_FCOE (UINT32_C(0x4) << 12) + uint16_t hwrm_intf_patch; /* - * RoCE Packet: Indicates that the packet was - * recognized as a RoCE. This also indicates - * that the payload_offset field is valid. + * This field represents the major version of HWRM firmware. + * A change in firmware major version represents a major + * firmware release. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_ROCE (UINT32_C(0x5) << 12) + uint16_t hwrm_fw_major; /* - * ICMP Packet: Indicates that the packet was - * recognized as ICMP. This indicates that the - * payload_offset field is valid. + * This field represents the minor version of HWRM firmware. + * A change in firmware minor version represents significant + * firmware functionality changes. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_ICMP (UINT32_C(0x7) << 12) + uint16_t hwrm_fw_minor; /* - * PtP packet wo/timestamp: Indicates that the - * packet was recognized as a PtP packet. + * This field represents the build version of HWRM firmware. + * A change in firmware build version represents bug fixes to + * a released firmware. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_WO_TIMESTAMP (UINT32_C(0x8) << 12) + uint16_t hwrm_fw_build; /* - * PtP packet w/timestamp: Indicates that the - * packet was recognized as a PtP packet and - * that a timestamp was taken for the packet. + * This field is a reserved field. + * This field can be used to represent firmware branches or customer + * specific releases tied to a specific (major,minor,update) version + * of the HWRM firmware. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP (UINT32_C(0x9) << 12) - #define RX_PKT_CMPL_FLAGS_ITYPE_LAST \ - RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP - #define RX_PKT_CMPL_FLAGS_MASK UINT32_C(0xffc0) - #define RX_PKT_CMPL_FLAGS_SFT 6 - uint16_t len; + uint16_t hwrm_fw_patch; /* - * This is the length of the data for the packet stored in the - * buffer(s) identified by the opaque value. This includes the - * packet BD and any associated buffer BDs. This does not - * include the length of any data places in aggregation BDs. + * This field represents the major version of mgmt firmware. + * A change in major version represents a major release. */ - uint32_t opaque; + uint16_t mgmt_fw_major; /* - * This is a copy of the opaque field from the RX BD this - * completion corresponds to. + * This field represents the minor version of HWRM firmware. + * A change in firmware minor version represents significant + * firmware functionality changes. */ - uint8_t agg_bufs_v1; - /* unused1 is 2 b */ + uint16_t mgmt_fw_minor; /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. + * This field represents the build version of mgmt firmware. + * A change in update version represents bug fixes. */ - #define RX_PKT_CMPL_V1 UINT32_C(0x1) + uint16_t mgmt_fw_build; /* - * This value is the number of aggregation buffers that follow - * this entry in the completion ring that are a part of this - * packet. If the value is zero, then the packet is completely - * contained in the buffer space provided for the packet in the - * RX ring. + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version. */ - #define RX_PKT_CMPL_AGG_BUFS_MASK UINT32_C(0x3e) - #define RX_PKT_CMPL_AGG_BUFS_SFT 1 - /* unused1 is 2 b */ - uint8_t rss_hash_type; - /* - * This is the RSS hash type for the packet. The value is packed - * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]} - * . The value of tuple_extrac_op provides the information about - * what fields the hash was computed on. * 0: The RSS hash was - * computed over source IP address, destination IP address, - * source port, and destination port of inner IP and TCP or UDP - * headers. Note: For non-tunneled packets, the packet headers - * are considered inner packet headers for the RSS hash - * computation purpose. * 1: The RSS hash was computed over - * source IP address and destination IP address of inner IP - * header. Note: For non-tunneled packets, the packet headers - * are considered inner packet headers for the RSS hash - * computation purpose. * 2: The RSS hash was computed over - * source IP address, destination IP address, source port, and - * destination port of IP and TCP or UDP headers of outer tunnel - * headers. Note: For non-tunneled packets, this value is not - * applicable. * 3: The RSS hash was computed over source IP - * address and destination IP address of IP header of outer - * tunnel headers. Note: For non-tunneled packets, this value is - * not applicable. Note that 4-tuples values listed above are - * applicable for layer 4 protocols supported and enabled for - * RSS in the hardware, HWRM firmware, and drivers. For example, - * if RSS hash is supported and enabled for TCP traffic only, - * then the values of tuple_extract_op corresponding to 4-tuples - * are only valid for TCP traffic. - */ - uint8_t payload_offset; - /* - * This value indicates the offset in bytes from the beginning - * of the packet where the inner payload starts. This value is - * valid for TCP, UDP, FCoE, and RoCE packets. A value of zero - * indicates that header is 256B into the packet. - */ - uint8_t unused_1; - /* unused2 is 8 b */ - uint32_t rss_hash; + uint16_t mgmt_fw_patch; /* - * This value is the RSS hash value calculated for the packet - * based on the mode bits and key value in the VNIC. + * This field represents the major version of network control + * firmware. A change in major version represents + * a major release. */ -} __attribute__((packed)); - -/* last 16 bytes of RX Packet Completion Record */ -struct rx_pkt_cmpl_hi { - uint32_t flags2; + uint16_t netctrl_fw_major; /* - * This indicates that the ip checksum was calculated for the - * inner packet and that the ip_cs_error field indicates if - * there was an error. + * This field represents the minor version of network control + * firmware. A change in minor version represents significant + * functionality changes. */ - #define RX_PKT_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1) + uint16_t netctrl_fw_minor; /* - * This indicates that the TCP, UDP or ICMP checksum was - * calculated for the inner packet and that the l4_cs_error - * field indicates if there was an error. + * This field represents the build version of network control + * firmware. A change in update version represents bug fixes. */ - #define RX_PKT_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2) + uint16_t netctrl_fw_build; /* - * This indicates that the ip checksum was calculated for the - * tunnel header and that the t_ip_cs_error field indicates if - * there was an error. + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version */ - #define RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4) + uint16_t netctrl_fw_patch; /* - * This indicates that the UDP checksum was calculated for the - * tunnel packet and that the t_l4_cs_error field indicates if - * there was an error. + * This field represents the major version of RoCE firmware. + * A change in major version represents a major release. */ - #define RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8) - /* This value indicates what format the metadata field is. */ - #define RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0) - #define RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT 4 - /* No metadata informtaion. Value is zero. */ - #define RX_PKT_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4) - /* - * The metadata field contains the VLAN tag and - * TPID value. - metadata[11:0] contains the - * vlan VID value. - metadata[12] contains the - * vlan DE value. - metadata[15:13] contains the - * vlan PRI value. - metadata[31:16] contains - * the vlan TPID value. - */ - #define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4) - #define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \ - RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN + uint16_t roce_fw_major; /* - * This field indicates the IP type for the inner-most IP - * header. A value of '0' indicates IPv4. A value of '1' - * indicates IPv6. This value is only valid if itype indicates a - * packet with an IP header. + * This field represents the minor version of RoCE firmware. + * A change in minor version represents significant + * functionality changes. */ - #define RX_PKT_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100) - uint32_t metadata; + uint16_t roce_fw_minor; /* - * This is data from the CFA block as indicated by the - * meta_format field. + * This field represents the build version of RoCE firmware. + * A change in update version represents bug fixes. */ - /* When meta_format=1, this value is the VLAN VID. */ - #define RX_PKT_CMPL_METADATA_VID_MASK UINT32_C(0xfff) - #define RX_PKT_CMPL_METADATA_VID_SFT 0 - /* When meta_format=1, this value is the VLAN DE. */ - #define RX_PKT_CMPL_METADATA_DE UINT32_C(0x1000) - /* When meta_format=1, this value is the VLAN PRI. */ - #define RX_PKT_CMPL_METADATA_PRI_MASK UINT32_C(0xe000) - #define RX_PKT_CMPL_METADATA_PRI_SFT 13 - /* When meta_format=1, this value is the VLAN TPID. */ - #define RX_PKT_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000) - #define RX_PKT_CMPL_METADATA_TPID_SFT 16 - uint16_t errors_v2; + uint16_t roce_fw_build; /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version */ - #define RX_PKT_CMPL_V2 UINT32_C(0x1) + uint16_t roce_fw_patch; /* - * This error indicates that there was some sort of problem with - * the BDs for the packet that was found after part of the - * packet was already placed. The packet should be treated as - * invalid. + * This field returns the maximum extended request length acceptable + * by the device which allows requests greater than mailbox size when + * used with the short cmd request format. */ - #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe) - #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT 1 - /* No buffer error */ - #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (UINT32_C(0x0) << 1) + uint16_t max_ext_req_len; + uint8_t unused_1[5]; /* - * Did Not Fit: Packet did not fit into packet - * buffer provided. For regular placement, this - * means the packet did not fit in the buffer - * provided. For HDS and jumbo placement, this - * means that the packet could not be placed - * into 7 physical buffers or less. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT \ - (UINT32_C(0x1) << 1) + uint8_t valid; +} __attribute__((packed)); + +/* bd_base (size:64b/8B) */ +struct bd_base { + uint8_t type; + /* This value identifies the type of buffer descriptor. */ + #define BD_BASE_TYPE_MASK UINT32_C(0x3f) + #define BD_BASE_TYPE_SFT 0 /* - * Not On Chip: All BDs needed for the packet - * were not on-chip when the packet arrived. + * Indicates that this BD is 16B long and is used for + * normal L2 packet transmission. */ - #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \ - (UINT32_C(0x2) << 1) - /* Bad Format: BDs were not formatted correctly. */ - #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT \ - (UINT32_C(0x3) << 1) - #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \ - RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT - /* This indicates that there was an error in the IP header checksum. */ - #define RX_PKT_CMPL_ERRORS_IP_CS_ERROR UINT32_C(0x10) + #define BD_BASE_TYPE_TX_BD_SHORT UINT32_C(0x0) /* - * This indicates that there was an error in the TCP, UDP or - * ICMP checksum. + * Indicates that this BD is 1BB long and is an empty + * TX BD. Not valid for use by the driver. */ - #define RX_PKT_CMPL_ERRORS_L4_CS_ERROR UINT32_C(0x20) + #define BD_BASE_TYPE_TX_BD_EMPTY UINT32_C(0x1) /* - * This indicates that there was an error in the tunnel IP - * header checksum. + * Indicates that this BD is 16B long and is an RX Producer + * (ie. empty) buffer descriptor. */ - #define RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR UINT32_C(0x40) + #define BD_BASE_TYPE_RX_PROD_PKT UINT32_C(0x4) /* - * This indicates that there was an error in the tunnel UDP - * checksum. + * Indicates that this BD is 16B long and is an RX + * Producer Buffer BD. */ - #define RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR UINT32_C(0x80) + #define BD_BASE_TYPE_RX_PROD_BFR UINT32_C(0x5) /* - * This indicates that there was a CRC error on either an FCoE - * or RoCE packet. The itype indicates the packet type. + * Indicates that this BD is 16B long and is an + * RX Producer Assembly Buffer Descriptor. */ - #define RX_PKT_CMPL_ERRORS_CRC_ERROR UINT32_C(0x100) + #define BD_BASE_TYPE_RX_PROD_AGG UINT32_C(0x6) /* - * This indicates that there was an error in the tunnel portion - * of the packet when this field is non-zero. + * Indicates that this BD is 32B long and is used for + * normal L2 packet transmission. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_MASK UINT32_C(0xe00) - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_SFT 9 + #define BD_BASE_TYPE_TX_BD_LONG UINT32_C(0x10) + #define BD_BASE_TYPE_LAST BD_BASE_TYPE_TX_BD_LONG + uint8_t unused_1[7]; +} __attribute__((packed)); + +/* tx_bd_short (size:128b/16B) */ +struct tx_bd_short { /* - * No additional error occurred on the tunnel - * portion of the packet of the packet does not - * have a tunnel. + * All bits in this field must be valid on the first BD of a packet. + * Only the packet_end bit must be valid for the remaining BDs + * of a packet. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (UINT32_C(0x0) << 9) + uint16_t flags_type; + /* This value identifies the type of buffer descriptor. */ + #define TX_BD_SHORT_TYPE_MASK UINT32_C(0x3f) + #define TX_BD_SHORT_TYPE_SFT 0 /* - * Indicates that IP header version does not - * match expectation from L2 Ethertype for IPv4 - * and IPv6 in the tunnel header. + * Indicates that this BD is 16B long and is used for + * normal L2 packet transmission. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION \ - (UINT32_C(0x1) << 9) + #define TX_BD_SHORT_TYPE_TX_BD_SHORT UINT32_C(0x0) + #define TX_BD_SHORT_TYPE_LAST TX_BD_SHORT_TYPE_TX_BD_SHORT /* - * Indicates that header length is out of range - * in the tunnel header. Valid for IPv4. + * All bits in this field must be valid on the first BD of a packet. + * Only the packet_end bit must be valid for the remaining BDs + * of a packet. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN \ - (UINT32_C(0x2) << 9) + #define TX_BD_SHORT_FLAGS_MASK UINT32_C(0xffc0) + #define TX_BD_SHORT_FLAGS_SFT 6 /* - * Indicates that the physical packet is shorter - * than that claimed by the PPPoE header length - * for a tunnel PPPoE packet. + * If set to 1, the packet ends with the data in the buffer + * pointed to by this descriptor. This flag must be + * valid on every BD. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR \ - (UINT32_C(0x3) << 9) + #define TX_BD_SHORT_FLAGS_PACKET_END UINT32_C(0x40) /* - * Indicates that physical packet is shorter - * than that claimed by the tunnel l3 header - * length. Valid for IPv4, or IPv6 tunnel packet - * packets. + * If set to 1, the device will not generate a completion for + * this transmit packet unless there is an error in it's + * processing. + * If this bit + * is set to 0, then the packet will be completed normally. + * + * This bit must be valid only on the first BD of a packet. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR \ - (UINT32_C(0x4) << 9) + #define TX_BD_SHORT_FLAGS_NO_CMPL UINT32_C(0x80) + /* + * This value indicates how many 16B BD locations are consumed + * in the ring by this packet. + * A value of 1 indicates that this BD is the only BD (and that + * the it is a short BD). A value + * of 3 indicates either 3 short BDs or 1 long BD and one short + * BD in the packet. A value of 0 indicates + * that there are 32 BD locations in the packet (the maximum). + * + * This field is valid only on the first BD of a packet. + */ + #define TX_BD_SHORT_FLAGS_BD_CNT_MASK UINT32_C(0x1f00) + #define TX_BD_SHORT_FLAGS_BD_CNT_SFT 8 + /* + * This value is a hint for the length of the entire packet. + * It is used by the chip to optimize internal processing. + * + * The packet will be dropped if the hint is too short. + * + * This field is valid only on the first BD of a packet. + */ + #define TX_BD_SHORT_FLAGS_LHINT_MASK UINT32_C(0x6000) + #define TX_BD_SHORT_FLAGS_LHINT_SFT 13 + /* indicates packet length < 512B */ + #define TX_BD_SHORT_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13) + /* indicates 512 <= packet length < 1KB */ + #define TX_BD_SHORT_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13) + /* indicates 1KB <= packet length < 2KB */ + #define TX_BD_SHORT_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13) + /* indicates packet length >= 2KB */ + #define TX_BD_SHORT_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13) + #define TX_BD_SHORT_FLAGS_LHINT_LAST \ + TX_BD_SHORT_FLAGS_LHINT_GTE2K /* - * Indicates that the physical packet is shorter - * than that claimed by the tunnel UDP header - * length for a tunnel UDP packet that is not - * fragmented. + * If set to 1, the device immediately updates the Send Consumer + * Index after the buffer associated with this descriptor has + * been transferred via DMA to NIC memory from host memory. An + * interrupt may or may not be generated according to the state + * of the interrupt avoidance mechanisms. If this bit + * is set to 0, then the Consumer Index is only updated as soon + * as one of the host interrupt coalescing conditions has been met. + * + * This bit must be valid on the first BD of a packet. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR \ - (UINT32_C(0x5) << 9) + #define TX_BD_SHORT_FLAGS_COAL_NOW UINT32_C(0x8000) /* - * indicates that the IPv4 TTL or IPv6 hop limit - * check have failed (e.g. TTL = 0) in the - * tunnel header. Valid for IPv4, and IPv6. + * This is the length of the host physical buffer this BD describes + * in bytes. + * + * This field must be valid on all BDs of a packet. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL \ - (UINT32_C(0x6) << 9) - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \ - RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL + uint16_t len; /* - * This indicates that there was an error in the inner portion - * of the packet when this field is non-zero. + * The opaque data field is pass through to the completion and can be + * used for any data that the driver wants to associate with the + * transmit BD. + * + * This field must be valid on the first BD of a packet. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_MASK UINT32_C(0xf000) - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_SFT 12 + uint32_t opaque; /* - * No additional error occurred on the tunnel - * portion of the packet of the packet does not - * have a tunnel. + * This is the host physical address for the portion of the packet + * described by this TX BD. + * + * This value must be valid on all BDs of a packet. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_NO_ERROR (UINT32_C(0x0) << 12) + uint64_t address; +} __attribute__((packed)); + +/* tx_bd_long (size:128b/16B) */ +struct tx_bd_long { + /* This value identifies the type of buffer descriptor. */ + uint16_t flags_type; /* - * Indicates that IP header version does not - * match expectation from L2 Ethertype for IPv4 - * and IPv6 or that option other than VFT was - * parsed on FCoE packet. + * This value indicates the type of buffer descriptor. + * packet. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION \ - (UINT32_C(0x1) << 12) + #define TX_BD_LONG_TYPE_MASK UINT32_C(0x3f) + #define TX_BD_LONG_TYPE_SFT 0 /* - * indicates that header length is out of range. - * Valid for IPv4 and RoCE + * Indicates that this BD is 32B long and is used for + * normal L2 packet transmission. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN \ - (UINT32_C(0x2) << 12) + #define TX_BD_LONG_TYPE_TX_BD_LONG UINT32_C(0x10) + #define TX_BD_LONG_TYPE_LAST TX_BD_LONG_TYPE_TX_BD_LONG /* - * indicates that the IPv4 TTL or IPv6 hop limit - * check have failed (e.g. TTL = 0). Valid for - * IPv4, and IPv6 + * All bits in this field must be valid on the first BD of a packet. + * Only the packet_end bit must be valid for the remaining BDs + * of a packet. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (UINT32_C(0x3) << 12) + #define TX_BD_LONG_FLAGS_MASK UINT32_C(0xffc0) + #define TX_BD_LONG_FLAGS_SFT 6 /* - * Indicates that physical packet is shorter - * than that claimed by the l3 header length. - * Valid for IPv4, IPv6 packet or RoCE packets. + * If set to 1, the packet ends with the data in the buffer + * pointed to by this descriptor. This flag must be + * valid on every BD. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR \ - (UINT32_C(0x4) << 12) + #define TX_BD_LONG_FLAGS_PACKET_END UINT32_C(0x40) /* - * Indicates that the physical packet is shorter - * than that claimed by the UDP header length - * for a UDP packet that is not fragmented. + * If set to 1, the device will not generate a completion for + * this transmit packet unless there is an error in it's + * processing. + * If this bit + * is set to 0, then the packet will be completed normally. + * + * This bit must be valid only on the first BD of a packet. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR \ - (UINT32_C(0x5) << 12) + #define TX_BD_LONG_FLAGS_NO_CMPL UINT32_C(0x80) + /* + * This value indicates how many 16B BD locations are consumed + * in the ring by this packet. + * A value of 1 indicates that this BD is the only BD (and that + * the it is a short BD). A value + * of 3 indicates either 3 short BDs or 1 long BD and one short + * BD in the packet. A value of 0 indicates + * that there are 32 BD locations in the packet (the maximum). + * + * This field is valid only on the first BD of a packet. + */ + #define TX_BD_LONG_FLAGS_BD_CNT_MASK UINT32_C(0x1f00) + #define TX_BD_LONG_FLAGS_BD_CNT_SFT 8 + /* + * This value is a hint for the length of the entire packet. + * It is used by the chip to optimize internal processing. + * + * The packet will be dropped if the hint is too short. + * + * This field is valid only on the first BD of a packet. + */ + #define TX_BD_LONG_FLAGS_LHINT_MASK UINT32_C(0x6000) + #define TX_BD_LONG_FLAGS_LHINT_SFT 13 + /* indicates packet length < 512B */ + #define TX_BD_LONG_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13) + /* indicates 512 <= packet length < 1KB */ + #define TX_BD_LONG_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13) + /* indicates 1KB <= packet length < 2KB */ + #define TX_BD_LONG_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13) + /* indicates packet length >= 2KB */ + #define TX_BD_LONG_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13) + #define TX_BD_LONG_FLAGS_LHINT_LAST TX_BD_LONG_FLAGS_LHINT_GTE2K /* - * Indicates that TCP header length > IP - * payload. Valid for TCP packets only. + * If set to 1, the device immediately updates the Send Consumer + * Index after the buffer associated with this descriptor has + * been transferred via DMA to NIC memory from host memory. An + * interrupt may or may not be generated according to the state + * of the interrupt avoidance mechanisms. If this bit + * is set to 0, then the Consumer Index is only updated as soon + * as one of the host interrupt coalescing conditions has been met. + * + * This bit must be valid on the first BD of a packet. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN \ - (UINT32_C(0x6) << 12) - /* Indicates that TCP header length < 5. Valid for TCP. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL \ - (UINT32_C(0x7) << 12) + #define TX_BD_LONG_FLAGS_COAL_NOW UINT32_C(0x8000) /* - * Indicates that TCP option headers result in a - * TCP header size that does not match data - * offset in TCP header. Valid for TCP. + * This is the length of the host physical buffer this BD describes + * in bytes. + * + * This field must be valid on all BDs of a packet. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \ - (UINT32_C(0x8) << 12) - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \ - RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN - #define RX_PKT_CMPL_ERRORS_MASK UINT32_C(0xfffe) - #define RX_PKT_CMPL_ERRORS_SFT 1 - uint16_t cfa_code; + uint16_t len; /* - * This field identifies the CFA action rule that was used for - * this packet. + * The opaque data field is pass through to the completion and can be + * used for any data that the driver wants to associate with the + * transmit BD. + * + * This field must be valid on the first BD of a packet. */ - uint32_t reorder; + uint32_t opaque; /* - * This value holds the reordering sequence number for the - * packet. If the reordering sequence is not valid, then this - * value is zero. The reordering domain for the packet is in the - * bottom 8 to 10b of the rss_hash value. The bottom 20b of this - * value contain the ordering domain value for the packet. + * This is the host physical address for the portion of the packet + * described by this TX BD. + * + * This value must be valid on all BDs of a packet. */ - #define RX_PKT_CMPL_REORDER_MASK UINT32_C(0xffffff) - #define RX_PKT_CMPL_REORDER_SFT 0 + uint64_t address; } __attribute__((packed)); -/* RX L2 TPA Start Completion Record (32 bytes split to 2 16-byte struct) */ -struct rx_tpa_start_cmpl { - uint16_t flags_type; +/* tx_bd_long_hi (size:128b/16B) */ +struct tx_bd_long_hi { /* - * This field indicates the exact type of the completion. By - * convention, the LSB identifies the length of the record in - * 16B units. Even values indicate 16B records. Odd values - * indicate 32B records. + * All bits in this field must be valid on the first BD of a packet. + * Their value on other BDs of the packet will be ignored. */ - #define RX_TPA_START_CMPL_TYPE_MASK UINT32_C(0x3f) - #define RX_TPA_START_CMPL_TYPE_SFT 0 + uint16_t lflags; /* - * RX L2 TPA Start Completion: Completion at the - * beginning of a TPA operation. Length = 32B + * If set to 1, the controller replaces the TCP/UPD checksum + * fields of normal TCP/UPD checksum, or the inner TCP/UDP + * checksum field of the encapsulated TCP/UDP packets with the + * hardware calculated TCP/UDP checksum for the packet associated + * with this descriptor. The flag is ignored if the LSO flag is set. + * + * This bit must be valid on the first BD of a packet. */ - #define RX_TPA_START_CMPL_TYPE_RX_TPA_START UINT32_C(0x13) - /* This bit will always be '0' for TPA start completions. */ - #define RX_TPA_START_CMPL_FLAGS_ERROR UINT32_C(0x40) - /* This field indicates how the packet was placed in the buffer. */ - #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) - #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_SFT 7 + #define TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1) /* - * Jumbo: TPA Packet was placed using jumbo - * algorithm. This means that the first buffer - * will be filled with data before moving to - * aggregation buffers. Each aggregation buffer - * will be filled before moving to the next - * aggregation buffer. + * If set to 1, the controller replaces the IP checksum of the + * normal packets, or the inner IP checksum of the encapsulated + * packets with the hardware calculated IP checksum for the + * packet associated with this descriptor. + * + * This bit must be valid on the first BD of a packet. */ - #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7) + #define TX_BD_LONG_LFLAGS_IP_CHKSUM UINT32_C(0x2) /* - * Header/Data Separation: Packet was placed - * using Header/Data separation algorithm. The - * separation location is indicated by the itype - * field. + * If set to 1, the controller will not append an Ethernet CRC + * to the end of the frame. + * + * This bit must be valid on the first BD of a packet. + * + * Packet must be 64B or longer when this flag is set. It is not + * useful to use this bit with any form of TX offload such as + * CSO or LSO. The intent is that the packet from the host already + * has a valid Ethernet CRC on the packet. */ - #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7) + #define TX_BD_LONG_LFLAGS_NOCRC UINT32_C(0x4) /* - * GRO/Jumbo: Packet will be placed using - * GRO/Jumbo where the first packet is filled - * with data. Subsequent packets will be placed - * such that any one packet does not span two - * aggregation buffers unless it starts at the - * beginning of an aggregation buffer. + * If set to 1, the device will record the time at which the packet + * was actually transmitted at the TX MAC. + * + * This bit must be valid on the first BD of a packet. */ - #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \ - (UINT32_C(0x5) << 7) + #define TX_BD_LONG_LFLAGS_STAMP UINT32_C(0x8) /* - * GRO/Header-Data Separation: Packet will be - * placed using GRO/HDS where the header is in - * the first packet. Payload of each packet will - * be placed such that any one packet does not - * span two aggregation buffers unless it starts - * at the beginning of an aggregation buffer. + * If set to 1, The controller replaces the tunnel IP checksum + * field with hardware calculated IP checksum for the IP header + * of the packet associated with this descriptor. + * + * For outer UDP checksum, global outer UDP checksum TE_NIC register + * needs to be enabled. If the global outer UDP checksum TE_NIC register + * bit is set, outer UDP checksum will be calculated for the following + * cases: + * 1. Packets with tcp_udp_chksum flag set to offload checksum for inner + * packet AND the inner packet is TCP/UDP. If the inner packet is ICMP for + * example (non-TCP/UDP), even if the tcp_udp_chksum is set, the outer UDP + * checksum will not be calculated. + * 2. Packets with lso flag set which implies inner TCP checksum calculation + * as part of LSO operation. + */ + #define TX_BD_LONG_LFLAGS_T_IP_CHKSUM UINT32_C(0x10) + /* + * If set to 1, the device will treat this packet with LSO(Large + * Send Offload) processing for both normal or encapsulated + * packets, which is a form of TCP segmentation. When this bit + * is 1, the hdr_size and mss fields must be valid. The driver + * doesn't need to set t_ip_chksum, ip_chksum, and tcp_udp_chksum + * flags since the controller will replace the appropriate + * checksum fields for segmented packets. + * + * When this bit is 1, the hdr_size and mss fields must be valid. */ - #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS (UINT32_C(0x6) << 7) - #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_LAST \ - RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS - /* This bit is '1' if the RSS field in this completion is valid. */ - #define RX_TPA_START_CMPL_FLAGS_RSS_VALID UINT32_C(0x400) - /* unused is 1 b */ - #define RX_TPA_START_CMPL_FLAGS_UNUSED UINT32_C(0x800) + #define TX_BD_LONG_LFLAGS_LSO UINT32_C(0x20) /* - * This value indicates what the inner packet determined for the - * packet was. + * If set to zero when LSO is '1', then the IPID will be treated + * as a 16b number and will be wrapped if it exceeds a value of + * 0xffff. + * + * If set to one when LSO is '1', then the IPID will be treated + * as a 15b number and will be wrapped if it exceeds a value 0f + * 0x7fff. */ - #define RX_TPA_START_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) - #define RX_TPA_START_CMPL_FLAGS_ITYPE_SFT 12 - /* TCP Packet: Indicates that the packet was IP and TCP. */ - #define RX_TPA_START_CMPL_FLAGS_ITYPE_TCP (UINT32_C(0x2) << 12) - #define RX_TPA_START_CMPL_FLAGS_ITYPE_LAST \ - RX_TPA_START_CMPL_FLAGS_ITYPE_TCP - #define RX_TPA_START_CMPL_FLAGS_MASK UINT32_C(0xffc0) - #define RX_TPA_START_CMPL_FLAGS_SFT 6 - uint16_t len; + #define TX_BD_LONG_LFLAGS_IPID_FMT UINT32_C(0x40) /* - * This value indicates the amount of packet data written to the - * buffer the opaque field in this completion corresponds to. + * If set to zero when LSO is '1', then the IPID of the tunnel + * IP header will not be modified during LSO operations. + * + * If set to one when LSO is '1', then the IPID of the tunnel + * IP header will be incremented for each subsequent segment of an + * LSO operation. + * + * The flag is ignored if the LSO packet is a normal (non-tunneled) + * TCP packet. */ - uint32_t opaque; - /* - * This is a copy of the opaque field from the RX BD this - * completion corresponds to. - */ - uint8_t v1; - /* unused1 is 7 b */ - /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. - */ - #define RX_TPA_START_CMPL_V1 UINT32_C(0x1) - /* unused1 is 7 b */ - uint8_t rss_hash_type; - /* - * This is the RSS hash type for the packet. The value is packed - * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]} - * . The value of tuple_extrac_op provides the information about - * what fields the hash was computed on. * 0: The RSS hash was - * computed over source IP address, destination IP address, - * source port, and destination port of inner IP and TCP or UDP - * headers. Note: For non-tunneled packets, the packet headers - * are considered inner packet headers for the RSS hash - * computation purpose. * 1: The RSS hash was computed over - * source IP address and destination IP address of inner IP - * header. Note: For non-tunneled packets, the packet headers - * are considered inner packet headers for the RSS hash - * computation purpose. * 2: The RSS hash was computed over - * source IP address, destination IP address, source port, and - * destination port of IP and TCP or UDP headers of outer tunnel - * headers. Note: For non-tunneled packets, this value is not - * applicable. * 3: The RSS hash was computed over source IP - * address and destination IP address of IP header of outer - * tunnel headers. Note: For non-tunneled packets, this value is - * not applicable. Note that 4-tuples values listed above are - * applicable for layer 4 protocols supported and enabled for - * RSS in the hardware, HWRM firmware, and drivers. For example, - * if RSS hash is supported and enabled for TCP traffic only, - * then the values of tuple_extract_op corresponding to 4-tuples - * are only valid for TCP traffic. - */ - uint16_t agg_id; + #define TX_BD_LONG_LFLAGS_T_IPID UINT32_C(0x80) /* - * This is the aggregation ID that the completion is associated - * with. Use this number to correlate the TPA start completion - * with the TPA end completion. + * If set to '1', then the RoCE ICRC will be appended to the + * packet. Packet must be a valid RoCE format packet. */ - /* unused2 is 9 b */ + #define TX_BD_LONG_LFLAGS_ROCE_CRC UINT32_C(0x100) /* - * This is the aggregation ID that the completion is associated - * with. Use this number to correlate the TPA start completion - * with the TPA end completion. + * If set to '1', then the FCoE CRC will be appended to the + * packet. Packet must be a valid FCoE format packet. */ - #define RX_TPA_START_CMPL_AGG_ID_MASK UINT32_C(0xfe00) - #define RX_TPA_START_CMPL_AGG_ID_SFT 9 - uint32_t rss_hash; + #define TX_BD_LONG_LFLAGS_FCOE_CRC UINT32_C(0x200) + uint16_t hdr_size; /* - * This value is the RSS hash value calculated for the packet - * based on the mode bits and key value in the VNIC. + * When LSO is '1', this field must contain the offset of the + * TCP payload from the beginning of the packet in as + * 16b words. In case of encapsulated/tunneling packet, this field + * contains the offset of the inner TCP payload from beginning of the + * packet as 16-bit words. + * + * This value must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_HDR_SIZE_MASK UINT32_C(0x1ff) + #define TX_BD_LONG_HDR_SIZE_SFT 0 + uint32_t mss; + /* + * This is the MSS value that will be used to do the LSO processing. + * The value is the length in bytes of the TCP payload for each + * segment generated by the LSO operation. + * + * This value must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_MSS_MASK UINT32_C(0x7fff) + #define TX_BD_LONG_MSS_SFT 0 + uint16_t unused2; + /* + * This value selects a CFA action to perform on the packet. + * Set this value to zero if no CFA action is desired. + * + * This value must be valid on the first BD of a packet. + */ + uint16_t cfa_action; + /* + * This value is action meta-data that defines CFA edit operations + * that are done in addition to any action editing. + */ + uint32_t cfa_meta; + /* When key=1, This is the VLAN tag VID value. */ + #define TX_BD_LONG_CFA_META_VLAN_VID_MASK UINT32_C(0xfff) + #define TX_BD_LONG_CFA_META_VLAN_VID_SFT 0 + /* When key=1, This is the VLAN tag DE value. */ + #define TX_BD_LONG_CFA_META_VLAN_DE UINT32_C(0x1000) + /* When key=1, This is the VLAN tag PRI value. */ + #define TX_BD_LONG_CFA_META_VLAN_PRI_MASK UINT32_C(0xe000) + #define TX_BD_LONG_CFA_META_VLAN_PRI_SFT 13 + /* When key=1, This is the VLAN tag TPID select value. */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_MASK UINT32_C(0x70000) + #define TX_BD_LONG_CFA_META_VLAN_TPID_SFT 16 + /* 0x88a8 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8 (UINT32_C(0x0) << 16) + /* 0x8100 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 (UINT32_C(0x1) << 16) + /* 0x9100 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100 (UINT32_C(0x2) << 16) + /* 0x9200 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200 (UINT32_C(0x3) << 16) + /* 0x9300 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16) + /* Value programmed in CFA VLANTPID register. */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16) + #define TX_BD_LONG_CFA_META_VLAN_TPID_LAST \ + TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG + /* When key=1, This is the VLAN tag TPID select value. */ + #define TX_BD_LONG_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000) + #define TX_BD_LONG_CFA_META_VLAN_RESERVED_SFT 19 + /* + * This field identifies the type of edit to be performed + * on the packet. + * + * This value must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_CFA_META_KEY_MASK UINT32_C(0xf0000000) + #define TX_BD_LONG_CFA_META_KEY_SFT 28 + /* No editing */ + #define TX_BD_LONG_CFA_META_KEY_NONE (UINT32_C(0x0) << 28) + /* + * - meta[17:16] - TPID select value (0 = 0x8100). + * - meta[15:12] - PRI/DE value. + * - meta[11:0] - VID value. */ + #define TX_BD_LONG_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28) + #define TX_BD_LONG_CFA_META_KEY_LAST \ + TX_BD_LONG_CFA_META_KEY_VLAN_TAG } __attribute__((packed)); -/* last 16 bytes of RX L2 TPA Start Completion Record */ -struct rx_tpa_start_cmpl_hi { - uint32_t flags2; +/* tx_bd_empty (size:128b/16B) */ +struct tx_bd_empty { + /* This value identifies the type of buffer descriptor. */ + uint8_t type; + #define TX_BD_EMPTY_TYPE_MASK UINT32_C(0x3f) + #define TX_BD_EMPTY_TYPE_SFT 0 + /* + * Indicates that this BD is 1BB long and is an empty + * TX BD. Not valid for use by the driver. + */ + #define TX_BD_EMPTY_TYPE_TX_BD_EMPTY UINT32_C(0x1) + #define TX_BD_EMPTY_TYPE_LAST TX_BD_EMPTY_TYPE_TX_BD_EMPTY + uint8_t unused_1[3]; + uint8_t unused_2; + uint8_t unused_3[3]; + uint8_t unused_4[8]; +} __attribute__((packed)); + +/* rx_prod_pkt_bd (size:128b/16B) */ +struct rx_prod_pkt_bd { + /* This value identifies the type of buffer descriptor. */ + uint16_t flags_type; + /* This value identifies the type of buffer descriptor. */ + #define RX_PROD_PKT_BD_TYPE_MASK UINT32_C(0x3f) + #define RX_PROD_PKT_BD_TYPE_SFT 0 /* - * This indicates that the ip checksum was calculated for the - * inner packet and that the sum passed for all segments - * included in the aggregation. + * Indicates that this BD is 16B long and is an RX Producer + * (ie. empty) buffer descriptor. */ - #define RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1) + #define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT UINT32_C(0x4) + #define RX_PROD_PKT_BD_TYPE_LAST \ + RX_PROD_PKT_BD_TYPE_RX_PROD_PKT + #define RX_PROD_PKT_BD_FLAGS_MASK UINT32_C(0xffc0) + #define RX_PROD_PKT_BD_FLAGS_SFT 6 /* - * This indicates that the TCP, UDP or ICMP checksum was - * calculated for the inner packet and that the sum passed for - * all segments included in the aggregation. + * If set to 1, the packet will be placed at the address plus + * 2B. The 2 Bytes of padding will be written as zero. */ - #define RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2) + #define RX_PROD_PKT_BD_FLAGS_SOP_PAD UINT32_C(0x40) /* - * This indicates that the ip checksum was calculated for the - * tunnel header and that the sum passed for all segments - * included in the aggregation. + * If set to 1, the packet write will be padded out to the + * nearest cache-line with zero value padding. */ - #define RX_TPA_START_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4) - /* - * This indicates that the UDP checksum was calculated for the - * tunnel packet and that the sum passed for all segments - * included in the aggregation. + #define RX_PROD_PKT_BD_FLAGS_EOP_PAD UINT32_C(0x80) + /* + * This value is the number of additional buffers in the ring that + * describe the buffer space to be consumed for the this packet. + * If the value is zero, then the packet must fit within the + * space described by this BD. If this value is 1 or more, it + * indicates how many additional "buffer" BDs are in the ring + * immediately following this BD to be used for the same + * network packet. + * + * Even if the packet to be placed does not need all the + * additional buffers, they will be consumed anyway. */ - #define RX_TPA_START_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8) - /* This value indicates what format the metadata field is. */ - #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0) - #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_SFT 4 - /* No metadata informtaion. Value is zero. */ - #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4) - /* - * The metadata field contains the VLAN tag and - * TPID value. - metadata[11:0] contains the - * vlan VID value. - metadata[12] contains the - * vlan DE value. - metadata[15:13] contains the - * vlan PRI value. - metadata[31:16] contains - * the vlan TPID value. - */ - #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4) - #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_LAST \ - RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN + #define RX_PROD_PKT_BD_FLAGS_BUFFERS_MASK UINT32_C(0x300) + #define RX_PROD_PKT_BD_FLAGS_BUFFERS_SFT 8 /* - * This field indicates the IP type for the inner-most IP - * header. A value of '0' indicates IPv4. A value of '1' - * indicates IPv6. + * This is the length in Bytes of the host physical buffer where + * data for the packet may be placed in host memory. */ - #define RX_TPA_START_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100) - uint32_t metadata; + uint16_t len; /* - * This is data from the CFA block as indicated by the - * meta_format field. + * The opaque data field is pass through to the completion and can be + * used for any data that the driver wants to associate with this + * receive buffer set. */ - /* When meta_format=1, this value is the VLAN VID. */ - #define RX_TPA_START_CMPL_METADATA_VID_MASK UINT32_C(0xfff) - #define RX_TPA_START_CMPL_METADATA_VID_SFT 0 - /* When meta_format=1, this value is the VLAN DE. */ - #define RX_TPA_START_CMPL_METADATA_DE UINT32_C(0x1000) - /* When meta_format=1, this value is the VLAN PRI. */ - #define RX_TPA_START_CMPL_METADATA_PRI_MASK UINT32_C(0xe000) - #define RX_TPA_START_CMPL_METADATA_PRI_SFT 13 - /* When meta_format=1, this value is the VLAN TPID. */ - #define RX_TPA_START_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000) - #define RX_TPA_START_CMPL_METADATA_TPID_SFT 16 - uint16_t v2; - /* unused4 is 15 b */ + uint32_t opaque; /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. + * This is the host physical address where data for the packet may + * by placed in host memory. */ - #define RX_TPA_START_CMPL_V2 UINT32_C(0x1) - /* unused4 is 15 b */ - uint16_t cfa_code; + uint64_t address; +} __attribute__((packed)); + +/* rx_prod_bfr_bd (size:128b/16B) */ +struct rx_prod_bfr_bd { + /* This value identifies the type of buffer descriptor. */ + uint16_t flags_type; + /* This value identifies the type of buffer descriptor. */ + #define RX_PROD_BFR_BD_TYPE_MASK UINT32_C(0x3f) + #define RX_PROD_BFR_BD_TYPE_SFT 0 /* - * This field identifies the CFA action rule that was used for - * this packet. + * Indicates that this BD is 16B long and is an RX + * Producer Buffer BD. */ - uint32_t inner_l4_size_inner_l3_offset_inner_l2_offset_outer_l3_offset; + #define RX_PROD_BFR_BD_TYPE_RX_PROD_BFR UINT32_C(0x5) + #define RX_PROD_BFR_BD_TYPE_LAST RX_PROD_BFR_BD_TYPE_RX_PROD_BFR + #define RX_PROD_BFR_BD_FLAGS_MASK UINT32_C(0xffc0) + #define RX_PROD_BFR_BD_FLAGS_SFT 6 /* - * This is the size in bytes of the inner most L4 header. This - * can be subtracted from the payload_offset to determine the - * start of the inner most L4 header. + * This is the length in Bytes of the host physical buffer where + * data for the packet may be placed in host memory. */ + uint16_t len; + /* This field is not used. */ + uint32_t opaque; /* - * This is the offset from the beginning of the packet in bytes - * for the outer L3 header. If there is no outer L3 header, then - * this value is zero. + * This is the host physical address where data for the packet may + * by placed in host memory. */ - #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_MASK UINT32_C(0x1ff) - #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_SFT 0 + uint64_t address; +} __attribute__((packed)); + +/* rx_prod_agg_bd (size:128b/16B) */ +struct rx_prod_agg_bd { + /* This value identifies the type of buffer descriptor. */ + uint16_t flags_type; + /* This value identifies the type of buffer descriptor. */ + #define RX_PROD_AGG_BD_TYPE_MASK UINT32_C(0x3f) + #define RX_PROD_AGG_BD_TYPE_SFT 0 /* - * This is the offset from the beginning of the packet in bytes - * for the inner most L2 header. + * Indicates that this BD is 16B long and is an + * RX Producer Assembly Buffer Descriptor. */ - #define RX_TPA_START_CMPL_INNER_L2_OFFSET_MASK UINT32_C(0x3fe00) - #define RX_TPA_START_CMPL_INNER_L2_OFFSET_SFT 9 + #define RX_PROD_AGG_BD_TYPE_RX_PROD_AGG UINT32_C(0x6) + #define RX_PROD_AGG_BD_TYPE_LAST \ + RX_PROD_AGG_BD_TYPE_RX_PROD_AGG + #define RX_PROD_AGG_BD_FLAGS_MASK UINT32_C(0xffc0) + #define RX_PROD_AGG_BD_FLAGS_SFT 6 /* - * This is the offset from the beginning of the packet in bytes - * for the inner most L3 header. + * If set to 1, the packet write will be padded out to the + * nearest cache-line with zero value padding. */ - #define RX_TPA_START_CMPL_INNER_L3_OFFSET_MASK UINT32_C(0x7fc0000) - #define RX_TPA_START_CMPL_INNER_L3_OFFSET_SFT 18 + #define RX_PROD_AGG_BD_FLAGS_EOP_PAD UINT32_C(0x40) /* - * This is the size in bytes of the inner most L4 header. This - * can be subtracted from the payload_offset to determine the - * start of the inner most L4 header. + * This is the length in Bytes of the host physical buffer where + * data for the packet may be placed in host memory. */ - #define RX_TPA_START_CMPL_INNER_L4_SIZE_MASK UINT32_C(0xf8000000) - #define RX_TPA_START_CMPL_INNER_L4_SIZE_SFT 27 -} __attribute__((packed)); - -/* RX TPA End Completion Record (32 bytes split to 2 16-byte struct) */ -struct rx_tpa_end_cmpl { - uint16_t flags_type; + uint16_t len; /* - * This field indicates the exact type of the completion. By - * convention, the LSB identifies the length of the record in - * 16B units. Even values indicate 16B records. Odd values - * indicate 32B records. + * The opaque data field is pass through to the completion and can be + * used for any data that the driver wants to associate with this + * receive assembly buffer. */ - #define RX_TPA_END_CMPL_TYPE_MASK UINT32_C(0x3f) - #define RX_TPA_END_CMPL_TYPE_SFT 0 + uint32_t opaque; /* - * RX L2 TPA End Completion: Completion at the - * end of a TPA operation. Length = 32B + * This is the host physical address where data for the packet may + * by placed in host memory. */ - #define RX_TPA_END_CMPL_TYPE_RX_TPA_END UINT32_C(0x15) + uint64_t address; +} __attribute__((packed)); + +/* cmpl_base (size:128b/16B) */ +struct cmpl_base { + uint16_t type; /* - * When this bit is '1', it indicates a packet that has an error - * of some type. Type of error is indicated in error_flags. + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. */ - #define RX_TPA_END_CMPL_FLAGS_ERROR UINT32_C(0x40) - /* This field indicates how the packet was placed in the buffer. */ - #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) - #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_SFT 7 + #define CMPL_BASE_TYPE_MASK UINT32_C(0x3f) + #define CMPL_BASE_TYPE_SFT 0 /* - * Jumbo: TPA Packet was placed using jumbo - * algorithm. This means that the first buffer - * will be filled with data before moving to - * aggregation buffers. Each aggregation buffer - * will be filled before moving to the next - * aggregation buffer. + * TX L2 completion: + * Completion of TX packet. Length = 16B */ - #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7) + #define CMPL_BASE_TYPE_TX_L2 UINT32_C(0x0) /* - * Header/Data Separation: Packet was placed - * using Header/Data separation algorithm. The - * separation location is indicated by the itype - * field. + * RX L2 completion: + * Completion of and L2 RX packet. Length = 32B */ - #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7) + #define CMPL_BASE_TYPE_RX_L2 UINT32_C(0x11) /* - * GRO/Jumbo: Packet will be placed using - * GRO/Jumbo where the first packet is filled - * with data. Subsequent packets will be placed - * such that any one packet does not span two - * aggregation buffers unless it starts at the - * beginning of an aggregation buffer. + * RX Aggregation Buffer completion : + * Completion of an L2 aggregation buffer in support of + * TPA, HDS, or Jumbo packet completion. Length = 16B */ - #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_JUMBO (UINT32_C(0x5) << 7) + #define CMPL_BASE_TYPE_RX_AGG UINT32_C(0x12) /* - * GRO/Header-Data Separation: Packet will be - * placed using GRO/HDS where the header is in - * the first packet. Payload of each packet will - * be placed such that any one packet does not - * span two aggregation buffers unless it starts - * at the beginning of an aggregation buffer. + * RX L2 TPA Start Completion: + * Completion at the beginning of a TPA operation. + * Length = 32B */ - #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS (UINT32_C(0x6) << 7) - #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_LAST \ - RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS - /* unused is 2 b */ - #define RX_TPA_END_CMPL_FLAGS_UNUSED_MASK UINT32_C(0xc00) - #define RX_TPA_END_CMPL_FLAGS_UNUSED_SFT 10 - /* - * This value indicates what the inner packet determined for the - * packet was. - 2 TCP Packet Indicates that the packet was IP - * and TCP. This indicates that the ip_cs field is valid and - * that the tcp_udp_cs field is valid and contains the TCP - * checksum. This also indicates that the payload_offset field - * is valid. - */ - #define RX_TPA_END_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) - #define RX_TPA_END_CMPL_FLAGS_ITYPE_SFT 12 - #define RX_TPA_END_CMPL_FLAGS_MASK UINT32_C(0xffc0) - #define RX_TPA_END_CMPL_FLAGS_SFT 6 - uint16_t len; - /* - * This value is zero for TPA End completions. There is no data - * in the buffer that corresponds to the opaque value in this - * completion. - */ - uint32_t opaque; - /* - * This is a copy of the opaque field from the RX BD this - * completion corresponds to. - */ - uint8_t agg_bufs_v1; - /* unused1 is 1 b */ - /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. - */ - #define RX_TPA_END_CMPL_V1 UINT32_C(0x1) - /* - * This value is the number of aggregation buffers that follow - * this entry in the completion ring that are a part of this - * aggregation packet. If the value is zero, then the packet is - * completely contained in the buffer space provided in the - * aggregation start completion. - */ - #define RX_TPA_END_CMPL_AGG_BUFS_MASK UINT32_C(0x7e) - #define RX_TPA_END_CMPL_AGG_BUFS_SFT 1 - /* unused1 is 1 b */ - uint8_t tpa_segs; - /* This value is the number of segments in the TPA operation. */ - uint8_t payload_offset; + #define CMPL_BASE_TYPE_RX_TPA_START UINT32_C(0x13) /* - * This value indicates the offset in bytes from the beginning - * of the packet where the inner payload starts. This value is - * valid for TCP, UDP, FCoE, and RoCE packets. A value of zero - * indicates an offset of 256 bytes. + * RX L2 TPA End Completion: + * Completion at the end of a TPA operation. + * Length = 32B */ - uint8_t agg_id; + #define CMPL_BASE_TYPE_RX_TPA_END UINT32_C(0x15) /* - * This is the aggregation ID that the completion is associated - * with. Use this number to correlate the TPA start completion - * with the TPA end completion. + * Statistics Ejection Completion: + * Completion of statistics data ejection buffer. + * Length = 16B */ - /* unused2 is 1 b */ + #define CMPL_BASE_TYPE_STAT_EJECT UINT32_C(0x1a) /* - * This is the aggregation ID that the completion is associated - * with. Use this number to correlate the TPA start completion - * with the TPA end completion. + * HWRM Command Completion: + * Completion of an HWRM command. */ - #define RX_TPA_END_CMPL_AGG_ID_MASK UINT32_C(0xfe) - #define RX_TPA_END_CMPL_AGG_ID_SFT 1 - uint32_t tsdelta; + #define CMPL_BASE_TYPE_HWRM_DONE UINT32_C(0x20) + /* Forwarded HWRM Request */ + #define CMPL_BASE_TYPE_HWRM_FWD_REQ UINT32_C(0x22) + /* Forwarded HWRM Response */ + #define CMPL_BASE_TYPE_HWRM_FWD_RESP UINT32_C(0x24) + /* HWRM Asynchronous Event Information */ + #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e) + /* CQ Notification */ + #define CMPL_BASE_TYPE_CQ_NOTIFICATION UINT32_C(0x30) + /* SRQ Threshold Event */ + #define CMPL_BASE_TYPE_SRQ_EVENT UINT32_C(0x32) + /* DBQ Threshold Event */ + #define CMPL_BASE_TYPE_DBQ_EVENT UINT32_C(0x34) + /* QP Async Notification */ + #define CMPL_BASE_TYPE_QP_EVENT UINT32_C(0x38) + /* Function Async Notification */ + #define CMPL_BASE_TYPE_FUNC_EVENT UINT32_C(0x3a) + #define CMPL_BASE_TYPE_LAST CMPL_BASE_TYPE_FUNC_EVENT + /* info1 is 16 b */ + uint16_t info1; + /* info2 is 32 b */ + uint32_t info2; /* - * For non-GRO packets, this value is the timestamp delta - * between earliest and latest timestamp values for TPA packet. - * If packets were not time stamped, then delta will be zero. - * For GRO packets, this field is zero except for the following - * sub-fields. - tsdelta[31] Timestamp present indication. When - * '0', no Timestamp option is in the packet. When '1', then a - * Timestamp option is present in the packet. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ + uint32_t info3_v; + #define CMPL_BASE_V UINT32_C(0x1) + #define CMPL_BASE_INFO3_MASK UINT32_C(0xfffffffe) + #define CMPL_BASE_INFO3_SFT 1 + /* info4 is 32 b */ + uint32_t info4; } __attribute__((packed)); -/* last 16 bytes of RX TPA End Completion Record */ -struct rx_tpa_end_cmpl_hi { - uint32_t tpa_dup_acks; - /* unused3 is 28 b */ +/* tx_cmpl (size:128b/16B) */ +struct tx_cmpl { + uint16_t flags_type; /* - * This value is the number of duplicate ACKs that have been - * received as part of the TPA operation. + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. */ - #define RX_TPA_END_CMPL_TPA_DUP_ACKS_MASK UINT32_C(0xf) - #define RX_TPA_END_CMPL_TPA_DUP_ACKS_SFT 0 - /* unused3 is 28 b */ - uint16_t tpa_seg_len; - /* - * This value is the valid when TPA completion is active. It - * indicates the length of the longest segment of the TPA - * operation for LRO mode and the length of the first segment in - * GRO mode. This value may be used by GRO software to re- - * construct the original packet stream from the TPA packet. - * This is the length of all but the last segment for GRO. In - * LRO mode this value may be used to indicate MSS size to the - * stack. - */ - uint16_t unused_3; - /* unused4 is 16 b */ - uint16_t errors_v2; + #define TX_CMPL_TYPE_MASK UINT32_C(0x3f) + #define TX_CMPL_TYPE_SFT 0 /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. + * TX L2 completion: + * Completion of TX packet. Length = 16B */ - #define RX_TPA_END_CMPL_V2 UINT32_C(0x1) + #define TX_CMPL_TYPE_TX_L2 UINT32_C(0x0) + #define TX_CMPL_TYPE_LAST TX_CMPL_TYPE_TX_L2 + #define TX_CMPL_FLAGS_MASK UINT32_C(0xffc0) + #define TX_CMPL_FLAGS_SFT 6 /* - * This error indicates that there was some sort of problem with - * the BDs for the packet that was found after part of the - * packet was already placed. The packet should be treated as - * invalid. + * When this bit is '1', it indicates a packet that has an + * error of some type. Type of error is indicated in + * error_flags. */ - #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe) - #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + #define TX_CMPL_FLAGS_ERROR UINT32_C(0x40) /* - * This error occurs when there is a fatal HW - * problem in the chip only. It indicates that - * there were not BDs on chip but that there was - * adequate reservation. provided by the TPA - * block. + * When this bit is '1', it indicates that the packet completed + * was transmitted using the push acceleration data provided + * by the driver. When this bit is '0', it indicates that the + * packet had not push acceleration data written or was executed + * as a normal packet even though push data was provided. */ - #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \ - (UINT32_C(0x2) << 1) + #define TX_CMPL_FLAGS_PUSH UINT32_C(0x80) + /* unused1 is 16 b */ + uint16_t unused_0; /* - * This error occurs when TPA block was not - * configured to reserve adequate BDs for TPA - * operations on this RX ring. All data for the - * TPA operation was not placed. This error can - * also be generated when the number of segments - * is not programmed correctly in TPA and the 33 - * total aggregation buffers allowed for the TPA - * operation has been exceeded. + * This is a copy of the opaque field from the first TX BD of this + * transmitted packet. */ - #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR \ - (UINT32_C(0x4) << 1) - #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_LAST \ - RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR - #define RX_TPA_END_CMPL_ERRORS_MASK UINT32_C(0xfffe) - #define RX_TPA_END_CMPL_ERRORS_SFT 1 - uint16_t unused_4; - /* unused5 is 16 b */ - uint32_t start_opaque; + uint32_t opaque; + uint16_t errors_v; /* - * This is the opaque value that was completed for the TPA start - * completion that corresponds to this TPA end completion. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ -} __attribute__((packed)); - -/* HWRM Forwarded Request (16 bytes) */ -struct hwrm_fwd_req_cmpl { - uint16_t req_len_type; - /* Length of forwarded request in bytes. */ + #define TX_CMPL_V UINT32_C(0x1) + #define TX_CMPL_ERRORS_MASK UINT32_C(0xfffe) + #define TX_CMPL_ERRORS_SFT 1 /* - * This field indicates the exact type of the completion. By - * convention, the LSB identifies the length of the record in - * 16B units. Even values indicate 16B records. Odd values - * indicate 32B records. + * This error indicates that there was some sort of problem + * with the BDs for the packet. */ - #define HWRM_FWD_INPUT_CMPL_TYPE_MASK UINT32_C(0x3f) - #define HWRM_FWD_INPUT_CMPL_TYPE_SFT 0 - /* Forwarded HWRM Request */ - #define HWRM_FWD_INPUT_CMPL_TYPE_HWRM_FWD_INPUT UINT32_C(0x22) - /* Length of forwarded request in bytes. */ - #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK UINT32_C(0xffc0) - #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6 - uint16_t source_id; + #define TX_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe) + #define TX_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + /* No error */ + #define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (UINT32_C(0x0) << 1) /* - * Source ID of this request. Typically used in forwarding - * requests and responses. 0x0 - 0xFFF8 - Used for function ids - * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - - * HWRM + * Bad Format: + * BDs were not formatted correctly. */ - uint32_t unused_0; - /* unused1 is 32 b */ - uint32_t req_buf_addr_v[2]; - /* Address of forwarded request. */ + #define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (UINT32_C(0x2) << 1) + #define TX_CMPL_ERRORS_BUFFER_ERROR_LAST \ + TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. + * When this bit is '1', it indicates that the length of + * the packet was zero. No packet was transmitted. */ - #define HWRM_FWD_INPUT_CMPL_V UINT32_C(0x1) - /* Address of forwarded request. */ - #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK UINT32_C(0xfffffffe) - #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1 -} __attribute__((packed)); - -/* HWRM Asynchronous Event Completion Record (16 bytes) */ -struct hwrm_async_event_cmpl { - uint16_t type; - /* unused1 is 10 b */ + #define TX_CMPL_ERRORS_ZERO_LENGTH_PKT UINT32_C(0x10) /* - * This field indicates the exact type of the completion. By - * convention, the LSB identifies the length of the record in - * 16B units. Even values indicate 16B records. Odd values - * indicate 32B records. + * When this bit is '1', it indicates that the packet + * was longer than the programmed limit in TDI. No + * packet was transmitted. */ - #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK UINT32_C(0x3f) - #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0 - /* HWRM Asynchronous Event Information */ - #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e) - /* unused1 is 10 b */ - uint16_t event_id; - /* Identifiers of events. */ - /* Link status changed */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE UINT32_C(0x0) - /* Link MTU changed */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE UINT32_C(0x1) - /* Link speed changed */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE UINT32_C(0x2) - /* DCB Configuration changed */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE UINT32_C(0x3) - /* Port connection not allowed */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED UINT32_C(0x4) - /* Link speed configuration was not allowed */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \ - UINT32_C(0x5) - /* Link speed configuration change */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE UINT32_C(0x6) - /* Port PHY configuration change */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE UINT32_C(0x7) - /* Function driver unloaded */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD UINT32_C(0x10) - /* Function driver loaded */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD UINT32_C(0x11) - /* Function FLR related processing has completed */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT UINT32_C(0x12) - /* PF driver unloaded */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD UINT32_C(0x20) - /* PF driver loaded */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD UINT32_C(0x21) - /* VF Function Level Reset (FLR) */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR UINT32_C(0x30) - /* VF MAC Address Change */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE UINT32_C(0x31) - /* PF-VF communication channel status change. */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \ - UINT32_C(0x32) - /* VF Configuration Change */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE UINT32_C(0x33) - /* LLFC/PFC Configuration Change */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE UINT32_C(0x34) - /* HWRM Error */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR UINT32_C(0xff) - uint32_t event_data2; - /* Event specific data */ - uint8_t opaque_v; - /* opaque is 7 b */ + #define TX_CMPL_ERRORS_EXCESSIVE_BD_LENGTH UINT32_C(0x20) /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. + * When this bit is '1', it indicates that one or more of the + * BDs associated with this packet generated a PCI error. + * This probably means the address was not valid. */ - #define HWRM_ASYNC_EVENT_CMPL_V UINT32_C(0x1) - /* opaque is 7 b */ - #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK UINT32_C(0xfe) - #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1 - uint8_t timestamp_lo; - /* 8-lsb timestamp from POR (100-msec resolution) */ - uint16_t timestamp_hi; - /* 16-lsb timestamp from POR (100-msec resolution) */ - uint32_t event_data1; - /* Event specific data */ -} __attribute__((packed)); - -/* hwrm_ver_get */ -/* - * Description: This function is called by a driver to determine the HWRM - * interface version supported by the HWRM firmware, the version of HWRM - * firmware implementation, the name of HWRM firmware, the versions of other - * embedded firmwares, and the names of other embedded firmwares, etc. Any - * interface or firmware version with major = 0, minor = 0, and update = 0 shall - * be considered an invalid version. - */ -/* Input (24 bytes) */ -struct hwrm_ver_get_input { - uint16_t req_type; + #define TX_CMPL_ERRORS_DMA_ERROR UINT32_C(0x40) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * When this bit is '1', it indicates that the packet was longer + * than indicated by the hint. No packet was transmitted. */ - uint16_t cmpl_ring; + #define TX_CMPL_ERRORS_HINT_TOO_SHORT UINT32_C(0x80) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * When this bit is '1', it indicates that the packet was + * dropped due to Poison TLP error on one or more of the + * TLPs in the PXP completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define TX_CMPL_ERRORS_POISON_TLP_ERROR UINT32_C(0x100) + /* unused2 is 16 b */ + uint16_t unused_1; + /* unused3 is 32 b */ + uint32_t unused_2; +} __attribute__((packed)); + +/* rx_pkt_cmpl (size:128b/16B) */ +struct rx_pkt_cmpl { + uint16_t flags_type; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. */ - uint64_t resp_addr; + #define RX_PKT_CMPL_TYPE_MASK UINT32_C(0x3f) + #define RX_PKT_CMPL_TYPE_SFT 0 /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * RX L2 completion: + * Completion of and L2 RX packet. Length = 32B */ - uint8_t hwrm_intf_maj; + #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11) + #define RX_PKT_CMPL_TYPE_LAST RX_PKT_CMPL_TYPE_RX_L2 + #define RX_PKT_CMPL_FLAGS_MASK UINT32_C(0xffc0) + #define RX_PKT_CMPL_FLAGS_SFT 6 /* - * This field represents the major version of HWRM interface - * specification supported by the driver HWRM implementation. - * The interface major version is intended to change only when - * non backward compatible changes are made to the HWRM - * interface specification. + * When this bit is '1', it indicates a packet that has an + * error of some type. Type of error is indicated in + * error_flags. */ - uint8_t hwrm_intf_min; + #define RX_PKT_CMPL_FLAGS_ERROR UINT32_C(0x40) + /* This field indicates how the packet was placed in the buffer. */ + #define RX_PKT_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) + #define RX_PKT_CMPL_FLAGS_PLACEMENT_SFT 7 /* - * This field represents the minor version of HWRM interface - * specification supported by the driver HWRM implementation. A - * change in interface minor version is used to reflect - * significant backward compatible modification to HWRM - * interface specification. This can be due to addition or - * removal of functionality. HWRM interface specifications with - * the same major version but different minor versions are - * compatible. + * Normal: + * Packet was placed using normal algorithm. */ - uint8_t hwrm_intf_upd; + #define RX_PKT_CMPL_FLAGS_PLACEMENT_NORMAL (UINT32_C(0x0) << 7) /* - * This field represents the update version of HWRM interface - * specification supported by the driver HWRM implementation. - * The interface update version is used to reflect minor changes - * or bug fixes to a released HWRM interface specification. + * Jumbo: + * Packet was placed using jumbo algorithm. */ - uint8_t unused_0[5]; -} __attribute__((packed)); - -/* Output (128 bytes) */ -struct hwrm_ver_get_output { - uint16_t error_code; + #define RX_PKT_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Header/Data Separation: + * Packet was placed using Header/Data separation algorithm. + * The separation location is indicated by the itype field. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7) + #define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST \ + RX_PKT_CMPL_FLAGS_PLACEMENT_HDS + /* This bit is '1' if the RSS field in this completion is valid. */ + #define RX_PKT_CMPL_FLAGS_RSS_VALID UINT32_C(0x400) + /* unused is 1 b */ + #define RX_PKT_CMPL_FLAGS_UNUSED UINT32_C(0x800) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This value indicates what the inner packet determined for the + * packet was. */ - uint8_t hwrm_intf_maj; + #define RX_PKT_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) + #define RX_PKT_CMPL_FLAGS_ITYPE_SFT 12 /* - * This field represents the major version of HWRM interface - * specification supported by the HWRM implementation. The - * interface major version is intended to change only when non - * backward compatible changes are made to the HWRM interface - * specification. A HWRM implementation that is compliant with - * this specification shall provide value of 1 in this field. + * Not Known: + * Indicates that the packet type was not known. */ - uint8_t hwrm_intf_min; + #define RX_PKT_CMPL_FLAGS_ITYPE_NOT_KNOWN \ + (UINT32_C(0x0) << 12) /* - * This field represents the minor version of HWRM interface - * specification supported by the HWRM implementation. A change - * in interface minor version is used to reflect significant - * backward compatible modification to HWRM interface - * specification. This can be due to addition or removal of - * functionality. HWRM interface specifications with the same - * major version but different minor versions are compatible. A - * HWRM implementation that is compliant with this specification - * shall provide value of 2 in this field. + * IP Packet: + * Indicates that the packet was an IP packet, but further + * classification was not possible. */ - uint8_t hwrm_intf_upd; + #define RX_PKT_CMPL_FLAGS_ITYPE_IP \ + (UINT32_C(0x1) << 12) /* - * This field represents the update version of HWRM interface - * specification supported by the HWRM implementation. The - * interface update version is used to reflect minor changes or - * bug fixes to a released HWRM interface specification. A HWRM - * implementation that is compliant with this specification - * shall provide value of 2 in this field. + * TCP Packet: + * Indicates that the packet was IP and TCP. + * This indicates that the payload_offset field is valid. */ - uint8_t hwrm_intf_rsvd; - uint8_t hwrm_fw_maj; + #define RX_PKT_CMPL_FLAGS_ITYPE_TCP \ + (UINT32_C(0x2) << 12) /* - * This field represents the major version of HWRM firmware. A - * change in firmware major version represents a major firmware - * release. + * UDP Packet: + * Indicates that the packet was IP and UDP. + * This indicates that the payload_offset field is valid. */ - uint8_t hwrm_fw_min; + #define RX_PKT_CMPL_FLAGS_ITYPE_UDP \ + (UINT32_C(0x3) << 12) /* - * This field represents the minor version of HWRM firmware. A - * change in firmware minor version represents significant - * firmware functionality changes. + * FCoE Packet: + * Indicates that the packet was recognized as a FCoE. + * This also indicates that the payload_offset field is valid. */ - uint8_t hwrm_fw_bld; + #define RX_PKT_CMPL_FLAGS_ITYPE_FCOE \ + (UINT32_C(0x4) << 12) /* - * This field represents the build version of HWRM firmware. A - * change in firmware build version represents bug fixes to a - * released firmware. + * RoCE Packet: + * Indicates that the packet was recognized as a RoCE. + * This also indicates that the payload_offset field is valid. */ - uint8_t hwrm_fw_rsvd; + #define RX_PKT_CMPL_FLAGS_ITYPE_ROCE \ + (UINT32_C(0x5) << 12) /* - * This field is a reserved field. This field can be used to - * represent firmware branches or customer specific releases - * tied to a specific (major,minor,update) version of the HWRM - * firmware. + * ICMP Packet: + * Indicates that the packet was recognized as ICMP. + * This indicates that the payload_offset field is valid. */ - uint8_t mgmt_fw_maj; + #define RX_PKT_CMPL_FLAGS_ITYPE_ICMP \ + (UINT32_C(0x7) << 12) /* - * This field represents the major version of mgmt firmware. A - * change in major version represents a major release. + * PtP packet wo/timestamp: + * Indicates that the packet was recognized as a PtP + * packet. */ - uint8_t mgmt_fw_min; + #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_WO_TIMESTAMP \ + (UINT32_C(0x8) << 12) /* - * This field represents the minor version of mgmt firmware. A - * change in minor version represents significant functionality - * changes. + * PtP packet w/timestamp: + * Indicates that the packet was recognized as a PtP + * packet and that a timestamp was taken for the packet. */ - uint8_t mgmt_fw_bld; + #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP \ + (UINT32_C(0x9) << 12) + #define RX_PKT_CMPL_FLAGS_ITYPE_LAST \ + RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP /* - * This field represents the build version of mgmt firmware. A - * change in update version represents bug fixes. + * This is the length of the data for the packet stored in the + * buffer(s) identified by the opaque value. This includes + * the packet BD and any associated buffer BDs. This does not include + * the the length of any data places in aggregation BDs. */ - uint8_t mgmt_fw_rsvd; + uint16_t len; /* - * This field is a reserved field. This field can be used to - * represent firmware branches or customer specific releases - * tied to a specific (major,minor,update) version + * This is a copy of the opaque field from the RX BD this completion + * corresponds to. */ - uint8_t netctrl_fw_maj; + uint32_t opaque; + uint8_t agg_bufs_v1; /* - * This field represents the major version of network control - * firmware. A change in major version represents a major - * release. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ - uint8_t netctrl_fw_min; + #define RX_PKT_CMPL_V1 UINT32_C(0x1) /* - * This field represents the minor version of network control - * firmware. A change in minor version represents significant - * functionality changes. + * This value is the number of aggregation buffers that follow this + * entry in the completion ring that are a part of this packet. + * If the value is zero, then the packet is completely contained + * in the buffer space provided for the packet in the RX ring. */ - uint8_t netctrl_fw_bld; + #define RX_PKT_CMPL_AGG_BUFS_MASK UINT32_C(0x3e) + #define RX_PKT_CMPL_AGG_BUFS_SFT 1 + /* unused1 is 2 b */ + #define RX_PKT_CMPL_UNUSED1_MASK UINT32_C(0xc0) + #define RX_PKT_CMPL_UNUSED1_SFT 6 + /* + * This is the RSS hash type for the packet. The value is packed + * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}. + * + * The value of tuple_extrac_op provides the information about + * what fields the hash was computed on. + * * 0: The RSS hash was computed over source IP address, + * destination IP address, source port, and destination port of inner + * IP and TCP or UDP headers. Note: For non-tunneled packets, + * the packet headers are considered inner packet headers for the RSS + * hash computation purpose. + * * 1: The RSS hash was computed over source IP address and destination + * IP address of inner IP header. Note: For non-tunneled packets, + * the packet headers are considered inner packet headers for the RSS + * hash computation purpose. + * * 2: The RSS hash was computed over source IP address, + * destination IP address, source port, and destination port of + * IP and TCP or UDP headers of outer tunnel headers. + * Note: For non-tunneled packets, this value is not applicable. + * * 3: The RSS hash was computed over source IP address and + * destination IP address of IP header of outer tunnel headers. + * Note: For non-tunneled packets, this value is not applicable. + * + * Note that 4-tuples values listed above are applicable + * for layer 4 protocols supported and enabled for RSS in the hardware, + * HWRM firmware, and drivers. For example, if RSS hash is supported and + * enabled for TCP traffic only, then the values of tuple_extract_op + * corresponding to 4-tuples are only valid for TCP traffic. + */ + uint8_t rss_hash_type; + /* + * This value indicates the offset in bytes from the beginning of the packet + * where the inner payload starts. This value is valid for TCP, UDP, + * FCoE, and RoCE packets. + * + * A value of zero indicates that header is 256B into the packet. + */ + uint8_t payload_offset; + /* unused2 is 8 b */ + uint8_t unused1; /* - * This field represents the build version of network control - * firmware. A change in update version represents bug fixes. + * This value is the RSS hash value calculated for the packet + * based on the mode bits and key value in the VNIC. */ - uint8_t netctrl_fw_rsvd; + uint32_t rss_hash; +} __attribute__((packed)); + +/* rx_pkt_cmpl_hi (size:128b/16B) */ +struct rx_pkt_cmpl_hi { + uint32_t flags2; /* - * This field is a reserved field. This field can be used to - * represent firmware branches or customer specific releases - * tied to a specific (major,minor,update) version + * This indicates that the ip checksum was calculated for the + * inner packet and that the ip_cs_error field indicates if there + * was an error. */ - uint32_t dev_caps_cfg; + #define RX_PKT_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1) /* - * This field is used to indicate device's capabilities and - * configurations. + * This indicates that the TCP, UDP or ICMP checksum was + * calculated for the inner packet and that the l4_cs_error field + * indicates if there was an error. */ + #define RX_PKT_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2) /* - * If set to 1, then secure firmware update behavior is - * supported. If set to 0, then secure firmware update behavior - * is not supported. + * This indicates that the ip checksum was calculated for the + * tunnel header and that the t_ip_cs_error field indicates if there + * was an error. */ - #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED \ - UINT32_C(0x1) + #define RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4) /* - * If set to 1, then firmware based DCBX agent is supported. If - * set to 0, then firmware based DCBX agent capability is not - * supported on this device. + * This indicates that the UDP checksum was + * calculated for the tunnel packet and that the t_l4_cs_error field + * indicates if there was an error. */ - #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED \ - UINT32_C(0x2) + #define RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8) + /* This value indicates what format the metadata field is. */ + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0) + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT 4 + /* No metadata informtaion. Value is zero. */ + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4) + /* + * The metadata field contains the VLAN tag and TPID value. + * - metadata[11:0] contains the vlan VID value. + * - metadata[12] contains the vlan DE value. + * - metadata[15:13] contains the vlan PRI value. + * - metadata[31:16] contains the vlan TPID value. + */ + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4) + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \ + RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN /* - * If set to 1, then HWRM short command format is supported. If - * set to 0, then HWRM short command format is not supported. + * This field indicates the IP type for the inner-most IP header. + * A value of '0' indicates IPv4. A value of '1' indicates IPv6. + * This value is only valid if itype indicates a packet + * with an IP header. */ - #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED \ - UINT32_C(0x4) + #define RX_PKT_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100) /* - * If set to 1, then HWRM short command format is required. If - * set to 0, then HWRM short command format is not required. + * This is data from the CFA block as indicated by the meta_format + * field. */ - #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED \ - UINT32_C(0x8) - uint8_t roce_fw_maj; + uint32_t metadata; + /* When meta_format=1, this value is the VLAN VID. */ + #define RX_PKT_CMPL_METADATA_VID_MASK UINT32_C(0xfff) + #define RX_PKT_CMPL_METADATA_VID_SFT 0 + /* When meta_format=1, this value is the VLAN DE. */ + #define RX_PKT_CMPL_METADATA_DE UINT32_C(0x1000) + /* When meta_format=1, this value is the VLAN PRI. */ + #define RX_PKT_CMPL_METADATA_PRI_MASK UINT32_C(0xe000) + #define RX_PKT_CMPL_METADATA_PRI_SFT 13 + /* When meta_format=1, this value is the VLAN TPID. */ + #define RX_PKT_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000) + #define RX_PKT_CMPL_METADATA_TPID_SFT 16 + uint16_t errors_v2; /* - * This field represents the major version of RoCE firmware. A - * change in major version represents a major release. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ - uint8_t roce_fw_min; + #define RX_PKT_CMPL_V2 \ + UINT32_C(0x1) + #define RX_PKT_CMPL_ERRORS_MASK \ + UINT32_C(0xfffe) + #define RX_PKT_CMPL_ERRORS_SFT 1 /* - * This field represents the minor version of RoCE firmware. A - * change in minor version represents significant functionality - * changes. + * This error indicates that there was some sort of problem with + * the BDs for the packet that was found after part of the + * packet was already placed. The packet should be treated as + * invalid. */ - uint8_t roce_fw_bld; + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK \ + UINT32_C(0xe) + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + /* No buffer error */ + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER \ + (UINT32_C(0x0) << 1) /* - * This field represents the build version of RoCE firmware. A - * change in update version represents bug fixes. + * Did Not Fit: + * Packet did not fit into packet buffer provided. + * For regular placement, this means the packet did not fit + * in the buffer provided. For HDS and jumbo placement, this + * means that the packet could not be placed into 7 physical + * buffers or less. */ - uint8_t roce_fw_rsvd; + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT \ + (UINT32_C(0x1) << 1) /* - * This field is a reserved field. This field can be used to - * represent firmware branches or customer specific releases - * tied to a specific (major,minor,update) version + * Not On Chip: + * All BDs needed for the packet were not on-chip when + * the packet arrived. */ - char hwrm_fw_name[16]; + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \ + (UINT32_C(0x2) << 1) /* - * This field represents the name of HWRM FW (ASCII chars with - * NULL at the end). + * Bad Format: + * BDs were not formatted correctly. */ - char mgmt_fw_name[16]; + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT \ + (UINT32_C(0x3) << 1) + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \ + RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT /* - * This field represents the name of mgmt FW (ASCII chars with - * NULL at the end). + * This indicates that there was an error in the IP header + * checksum. */ - char netctrl_fw_name[16]; + #define RX_PKT_CMPL_ERRORS_IP_CS_ERROR \ + UINT32_C(0x10) /* - * This field represents the name of network control firmware - * (ASCII chars with NULL at the end). + * This indicates that there was an error in the TCP, UDP + * or ICMP checksum. */ - uint32_t reserved2[4]; + #define RX_PKT_CMPL_ERRORS_L4_CS_ERROR \ + UINT32_C(0x20) /* - * This field is reserved for future use. The responder should - * set it to 0. The requester should ignore this field. + * This indicates that there was an error in the tunnel + * IP header checksum. */ - char roce_fw_name[16]; + #define RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR \ + UINT32_C(0x40) /* - * This field represents the name of RoCE FW (ASCII chars with - * NULL at the end). + * This indicates that there was an error in the tunnel + * UDP checksum. */ - uint16_t chip_num; - /* This field returns the chip number. */ - uint8_t chip_rev; - /* This field returns the revision of chip. */ - uint8_t chip_metal; - /* This field returns the chip metal number. */ - uint8_t chip_bond_id; - /* This field returns the bond id of the chip. */ - uint8_t chip_platform_type; + #define RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR \ + UINT32_C(0x80) /* - * This value indicates the type of platform used for chip - * implementation. + * This indicates that there was a CRC error on either an FCoE + * or RoCE packet. The itype indicates the packet type. */ - /* ASIC */ - #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_ASIC UINT32_C(0x0) - /* FPGA platform of the chip. */ - #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_FPGA UINT32_C(0x1) - /* Palladium platform of the chip. */ - #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM UINT32_C(0x2) - uint16_t max_req_win_len; + #define RX_PKT_CMPL_ERRORS_CRC_ERROR \ + UINT32_C(0x100) /* - * This field returns the maximum value of request window that - * is supported by the HWRM. The request window is mapped into - * device address space using MMIO. + * This indicates that there was an error in the tunnel + * portion of the packet when this + * field is non-zero. */ - uint16_t max_resp_len; - /* This field returns the maximum value of response buffer in bytes. */ - uint16_t def_req_timeout; + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_MASK \ + UINT32_C(0xe00) + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_SFT 9 /* - * This field returns the default request timeout value in - * milliseconds. + * No additional error occurred on the tunnel portion + * of the packet of the packet does not have a tunnel. */ - uint8_t init_pending; + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR \ + (UINT32_C(0x0) << 9) /* - * This field will indicate if any subsystems is not fully - * initialized. + * Indicates that IP header version does not match + * expectation from L2 Ethertype for IPv4 and IPv6 + * in the tunnel header. */ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION \ + (UINT32_C(0x1) << 9) /* - * If set to 1, device is not ready. If set to 0, device is - * ready to accept all HWRM commands. + * Indicates that header length is out of range in the + * tunnel header. Valid for + * IPv4. */ - #define HWRM_VER_GET_OUTPUT_INIT_PENDING_DEV_NOT_RDY UINT32_C(0x1) - uint8_t unused_0; - uint8_t unused_1; - uint8_t valid; + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN \ + (UINT32_C(0x2) << 9) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * Indicates that the physical packet is shorter than that + * claimed by the PPPoE header length for a tunnel PPPoE + * packet. */ -} __attribute__((packed)); - -/* hwrm_func_reset */ -/* - * Description: This command resets a hardware function (PCIe function) and - * frees any resources used by the function. This command shall be initiated by - * the driver after an FLR has occurred to prepare the function for re-use. This - * command may also be initiated by a driver prior to doing it's own - * configuration. This command puts the function into the reset state. In the - * reset state, global and port related features of the chip are not available. - */ -/* - * Note: This command will reset a function that has already been disabled or - * idled. The command returns all the resources owned by the function so a new - * driver may allocate and configure resources normally. - */ -/* Input (24 bytes) */ -struct hwrm_func_reset_input { - uint16_t req_type; + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR \ + (UINT32_C(0x3) << 9) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * Indicates that physical packet is shorter than that claimed + * by the tunnel l3 header length. Valid for IPv4, or IPv6 + * tunnel packet packets. */ - uint16_t cmpl_ring; + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR \ + (UINT32_C(0x4) << 9) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * Indicates that the physical packet is shorter than that + * claimed by the tunnel UDP header length for a tunnel + * UDP packet that is not fragmented. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; - /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM - */ - uint64_t resp_addr; + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR \ + (UINT32_C(0x5) << 9) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * indicates that the IPv4 TTL or IPv6 hop limit check + * have failed (e.g. TTL = 0) in the tunnel header. Valid + * for IPv4, and IPv6. */ - uint32_t enables; - /* This bit must be '1' for the vf_id_valid field to be configured. */ - #define HWRM_FUNC_RESET_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1) - uint16_t vf_id; + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL \ + (UINT32_C(0x6) << 9) + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \ + RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL /* - * The ID of the VF that this PF is trying to reset. Only the - * parent PF shall be allowed to reset a child VF. A parent PF - * driver shall use this field only when a specific child VF is - * requested to be reset. + * This indicates that there was an error in the inner + * portion of the packet when this + * field is non-zero. */ - uint8_t func_reset_level; - /* This value indicates the level of a function reset. */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_MASK \ + UINT32_C(0xf000) + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_SFT 12 /* - * Reset the caller function and its children - * VFs (if any). If no children functions exist, - * then reset the caller function only. + * No additional error occurred on the tunnel portion + * of the packet of the packet does not have a tunnel. */ - #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETALL UINT32_C(0x0) - /* Reset the caller function only */ - #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETME UINT32_C(0x1) + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_NO_ERROR \ + (UINT32_C(0x0) << 12) /* - * Reset all children VFs of the caller function - * driver if the caller is a PF driver. It is an - * error to specify this level by a VF driver. - * It is an error to specify this level by a PF - * driver with no children VFs. + * Indicates that IP header version does not match + * expectation from L2 Ethertype for IPv4 and IPv6 or that + * option other than VFT was parsed on + * FCoE packet. */ - #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN \ - UINT32_C(0x2) + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION \ + (UINT32_C(0x1) << 12) /* - * Reset a specific VF of the caller function - * driver if the caller is the parent PF driver. - * It is an error to specify this level by a VF - * driver. It is an error to specify this level - * by a PF driver that is not the parent of the - * VF that is being requested to reset. + * indicates that header length is out of range. Valid for + * IPv4 and RoCE */ - #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF UINT32_C(0x3) - uint8_t unused_0; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_func_reset_output { - uint16_t error_code; + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN \ + (UINT32_C(0x2) << 12) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * indicates that the IPv4 TTL or IPv6 hop limit check + * have failed (e.g. TTL = 0). Valid for IPv4, and IPv6 */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL \ + (UINT32_C(0x3) << 12) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * Indicates that physical packet is shorter than that + * claimed by the l3 header length. Valid for IPv4, + * IPv6 packet or RoCE packets. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR \ + (UINT32_C(0x4) << 12) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * Indicates that the physical packet is shorter than that + * claimed by the UDP header length for a UDP packet that is + * not fragmented. */ -} __attribute__((packed)); - -/* hwrm_func_vf_cfg */ -/* - * Description: This command allows configuration of a VF by its driver. If this - * function is called by a PF driver, then the HWRM shall fail this command. If - * guest VLAN and/or MAC address are provided in this command, then the HWRM - * shall set up appropriate MAC/VLAN filters for the VF that is being - * configured. A VF driver should set VF MTU/MRU using this command prior to - * allocating RX VNICs or TX rings for the corresponding VF. - */ -/* Input (32 bytes) */ -struct hwrm_func_vf_cfg_input { - uint16_t req_type; + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR \ + (UINT32_C(0x5) << 12) /* - * This value indicates what type of request this is. The format for the - * rest of the command is determined by this field. + * Indicates that TCP header length > IP payload. Valid for + * TCP packets only. */ - uint16_t cmpl_ring; + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN \ + (UINT32_C(0x6) << 12) + /* Indicates that TCP header length < 5. Valid for TCP. */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL \ + (UINT32_C(0x7) << 12) /* - * This value indicates the what completion ring the request will be - * optionally completed on. If the value is -1, then no CR completion - * will be generated. Any other value must be a valid CR ring_id value - * for this function. + * Indicates that TCP option headers result in a TCP header + * size that does not match data offset in TCP header. Valid + * for TCP. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \ + (UINT32_C(0x8) << 12) + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \ + RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids - * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + * This field identifies the CFA action rule that was used for this + * packet. */ - uint64_t resp_addr; + uint16_t cfa_code; + uint32_t reorder; /* - * This is the host address where the response will be written when the - * request is complete. This area must be 16B aligned and must be - * cleared to zero before the request is made. + * This value holds the reordering sequence number for the packet. + * If the reordering sequence is not valid, then this value is zero. + * The reordering domain for the packet is in the bottom 8 to 10b of + * the rss_hash value. The bottom 20b of this value contain the + * ordering domain value for the packet. */ - uint32_t enables; - /* This bit must be '1' for the mtu field to be configured. */ - #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU UINT32_C(0x1) - /* This bit must be '1' for the guest_vlan field to be configured. */ - #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN UINT32_C(0x2) + #define RX_PKT_CMPL_REORDER_MASK UINT32_C(0xffffff) + #define RX_PKT_CMPL_REORDER_SFT 0 +} __attribute__((packed)); + +/* rx_tpa_start_cmpl (size:128b/16B) */ +struct rx_tpa_start_cmpl { + uint16_t flags_type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define RX_TPA_START_CMPL_TYPE_MASK UINT32_C(0x3f) + #define RX_TPA_START_CMPL_TYPE_SFT 0 + /* + * RX L2 TPA Start Completion: + * Completion at the beginning of a TPA operation. + * Length = 32B + */ + #define RX_TPA_START_CMPL_TYPE_RX_TPA_START UINT32_C(0x13) + #define RX_TPA_START_CMPL_TYPE_LAST \ + RX_TPA_START_CMPL_TYPE_RX_TPA_START + #define RX_TPA_START_CMPL_FLAGS_MASK UINT32_C(0xffc0) + #define RX_TPA_START_CMPL_FLAGS_SFT 6 + /* This bit will always be '0' for TPA start completions. */ + #define RX_TPA_START_CMPL_FLAGS_ERROR UINT32_C(0x40) + /* This field indicates how the packet was placed in the buffer. */ + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_SFT 7 /* - * This bit must be '1' for the async_event_cr field to be configured. + * Jumbo: + * TPA Packet was placed using jumbo algorithm. This means + * that the first buffer will be filled with data before + * moving to aggregation buffers. Each aggregation buffer + * will be filled before moving to the next aggregation + * buffer. */ - #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR UINT32_C(0x4) - /* This bit must be '1' for the dflt_mac_addr field to be configured. */ - #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x8) - uint16_t mtu; + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_JUMBO \ + (UINT32_C(0x1) << 7) /* - * The maximum transmission unit requested on the function. The HWRM - * should make sure that the mtu of the function does not exceed the mtu - * of the physical port that this function is associated with. In - * addition to requesting mtu per function, it is possible to configure - * mtu per transmit ring. By default, the mtu of each transmit ring - * associated with a function is equal to the mtu of the function. The - * HWRM should make sure that the mtu of each transmit ring that is - * assigned to a function has a valid mtu. + * Header/Data Separation: + * Packet was placed using Header/Data separation algorithm. + * The separation location is indicated by the itype field. */ - uint16_t guest_vlan; + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_HDS \ + (UINT32_C(0x2) << 7) /* - * The guest VLAN for the function being configured. This field's format - * is same as 802.1Q Tag's Tag Control Information (TCI) format that - * includes both Priority Code Point (PCP) and VLAN Identifier (VID). + * GRO/Jumbo: + * Packet will be placed using GRO/Jumbo where the first + * packet is filled with data. Subsequent packets will be + * placed such that any one packet does not span two + * aggregation buffers unless it starts at the beginning of + * an aggregation buffer. */ - uint16_t async_event_cr; + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \ + (UINT32_C(0x5) << 7) /* - * ID of the target completion ring for receiving asynchronous event - * completions. If this field is not valid, then the HWRM shall use the - * default completion ring of the function that is being configured as - * the target completion ring for providing any asynchronous event - * completions for that function. If this field is valid, then the HWRM - * shall use the completion ring identified by this ID as the target - * completion ring for providing any asynchronous event completions for - * the function that is being configured. + * GRO/Header-Data Separation: + * Packet will be placed using GRO/HDS where the header + * is in the first packet. + * Payload of each packet will be + * placed such that any one packet does not span two + * aggregation buffers unless it starts at the beginning of + * an aggregation buffer. */ - uint8_t dflt_mac_addr[6]; + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS \ + (UINT32_C(0x6) << 7) + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_LAST \ + RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS + /* This bit is '1' if the RSS field in this completion is valid. */ + #define RX_TPA_START_CMPL_FLAGS_RSS_VALID UINT32_C(0x400) + /* unused is 1 b */ + #define RX_TPA_START_CMPL_FLAGS_UNUSED UINT32_C(0x800) /* - * This value is the current MAC address requested by the VF driver to - * be configured on this VF. A value of 00-00-00-00-00-00 indicates no - * MAC address configuration is requested by the VF driver. The parent - * PF driver may reject or overwrite this MAC address. + * This value indicates what the inner packet determined for the + * packet was. */ -} __attribute__((packed)); - -/* Output (16 bytes) */ - -struct hwrm_func_vf_cfg_output { - uint16_t error_code; + #define RX_TPA_START_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) + #define RX_TPA_START_CMPL_FLAGS_ITYPE_SFT 12 /* - * Pass/Fail or error type Note: receiver to verify the in parameters, - * and fail the call with an error when appropriate + * TCP Packet: + * Indicates that the packet was IP and TCP. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define RX_TPA_START_CMPL_FLAGS_ITYPE_TCP \ + (UINT32_C(0x2) << 12) + #define RX_TPA_START_CMPL_FLAGS_ITYPE_LAST \ + RX_TPA_START_CMPL_FLAGS_ITYPE_TCP /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This value indicates the amount of packet data written to the + * buffer the opaque field in this completion corresponds to. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t len; + /* + * This is a copy of the opaque field from the RX BD this completion + * corresponds to. + */ + uint32_t opaque; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + uint8_t v1; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define RX_TPA_START_CMPL_V1 UINT32_C(0x1) + #define RX_TPA_START_CMPL_LAST RX_TPA_START_CMPL_V1 + /* + * This is the RSS hash type for the packet. The value is packed + * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}. + * + * The value of tuple_extrac_op provides the information about + * what fields the hash was computed on. + * * 0: The RSS hash was computed over source IP address, + * destination IP address, source port, and destination port of inner + * IP and TCP or UDP headers. Note: For non-tunneled packets, + * the packet headers are considered inner packet headers for the RSS + * hash computation purpose. + * * 1: The RSS hash was computed over source IP address and destination + * IP address of inner IP header. Note: For non-tunneled packets, + * the packet headers are considered inner packet headers for the RSS + * hash computation purpose. + * * 2: The RSS hash was computed over source IP address, + * destination IP address, source port, and destination port of + * IP and TCP or UDP headers of outer tunnel headers. + * Note: For non-tunneled packets, this value is not applicable. + * * 3: The RSS hash was computed over source IP address and + * destination IP address of IP header of outer tunnel headers. + * Note: For non-tunneled packets, this value is not applicable. + * + * Note that 4-tuples values listed above are applicable + * for layer 4 protocols supported and enabled for RSS in the hardware, + * HWRM firmware, and drivers. For example, if RSS hash is supported and + * enabled for TCP traffic only, then the values of tuple_extract_op + * corresponding to 4-tuples are only valid for TCP traffic. + */ + uint8_t rss_hash_type; /* - * This field is used in Output records to indicate that the output is - * completely written to RAM. This field should be read as '1' to - * indicate that the output has been completely written. When writing a - * command completion or response to an internal processor, the order of - * writes has to be such that this field is written last. + * This is the aggregation ID that the completion is associated + * with. Use this number to correlate the TPA start completion + * with the TPA end completion. */ -} __attribute__((packed)); - -/* hwrm_func_qcaps */ -/* - * Description: This command returns capabilities of a function. The input FID - * value is used to indicate what function is being queried. This allows a - * physical function driver to query virtual functions that are children of the - * physical function. The output FID value is needed to configure Rings and - * MSI-X vectors so their DMA operations appear correctly on the PCI bus. - */ -/* Input (24 bytes) */ -struct hwrm_func_qcaps_input { - uint16_t req_type; + uint16_t agg_id; + /* unused2 is 9 b */ + #define RX_TPA_START_CMPL_UNUSED2_MASK UINT32_C(0x1ff) + #define RX_TPA_START_CMPL_UNUSED2_SFT 0 /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This is the aggregation ID that the completion is associated + * with. Use this number to correlate the TPA start completion + * with the TPA end completion. */ - uint16_t cmpl_ring; + #define RX_TPA_START_CMPL_AGG_ID_MASK UINT32_C(0xfe00) + #define RX_TPA_START_CMPL_AGG_ID_SFT 9 /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This value is the RSS hash value calculated for the packet + * based on the mode bits and key value in the VNIC. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint32_t rss_hash; +} __attribute__((packed)); + +/* rx_tpa_start_cmpl_hi (size:128b/16B) */ +struct rx_tpa_start_cmpl_hi { + uint32_t flags2; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This indicates that the ip checksum was calculated for the + * inner packet and that the sum passed for all segments + * included in the aggregation. */ - uint64_t resp_addr; + #define RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This indicates that the TCP, UDP or ICMP checksum was + * calculated for the inner packet and that the sum passed + * for all segments included in the aggregation. */ - uint16_t fid; + #define RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2) /* - * Function ID of the function that is being queried. 0xFF... - * (All Fs) if the query is for the requesting function. + * This indicates that the ip checksum was calculated for the + * tunnel header and that the sum passed for all segments + * included in the aggregation. */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (80 bytes) */ -struct hwrm_func_qcaps_output { - uint16_t error_code; + #define RX_TPA_START_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This indicates that the UDP checksum was + * calculated for the tunnel packet and that the sum passed for + * all segments included in the aggregation. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define RX_TPA_START_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8) + /* This value indicates what format the metadata field is. */ + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0) + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_SFT 4 + /* No metadata informtaion. Value is zero. */ + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_NONE \ + (UINT32_C(0x0) << 4) + /* + * The metadata field contains the VLAN tag and TPID value. + * - metadata[11:0] contains the vlan VID value. + * - metadata[12] contains the vlan DE value. + * - metadata[15:13] contains the vlan PRI value. + * - metadata[31:16] contains the vlan TPID value. + */ + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN \ + (UINT32_C(0x1) << 4) + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_LAST \ + RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This field indicates the IP type for the inner-most IP header. + * A value of '0' indicates IPv4. A value of '1' indicates IPv6. */ - uint16_t fid; + #define RX_TPA_START_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100) /* - * FID value. This value is used to identify operations on the - * PCI bus as belonging to a particular PCI function. + * This is data from the CFA block as indicated by the meta_format + * field. */ - uint16_t port_id; + uint32_t metadata; + /* When meta_format=1, this value is the VLAN VID. */ + #define RX_TPA_START_CMPL_METADATA_VID_MASK UINT32_C(0xfff) + #define RX_TPA_START_CMPL_METADATA_VID_SFT 0 + /* When meta_format=1, this value is the VLAN DE. */ + #define RX_TPA_START_CMPL_METADATA_DE UINT32_C(0x1000) + /* When meta_format=1, this value is the VLAN PRI. */ + #define RX_TPA_START_CMPL_METADATA_PRI_MASK UINT32_C(0xe000) + #define RX_TPA_START_CMPL_METADATA_PRI_SFT 13 + /* When meta_format=1, this value is the VLAN TPID. */ + #define RX_TPA_START_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000) + #define RX_TPA_START_CMPL_METADATA_TPID_SFT 16 + uint16_t v2; /* - * Port ID of port that this function is associated with. Valid - * only for the PF. 0xFF... (All Fs) if this function is not - * associated with any port. 0xFF... (All Fs) if this function - * is called from a VF. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ - uint32_t flags; - /* If 1, then Push mode is supported on this function. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED UINT32_C(0x1) + #define RX_TPA_START_CMPL_V2 UINT32_C(0x1) /* - * If 1, then the global MSI-X auto-masking is enabled for the - * device. + * This field identifies the CFA action rule that was used for this + * packet. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING \ - UINT32_C(0x2) + uint16_t cfa_code; /* - * If 1, then the Precision Time Protocol (PTP) processing is - * supported on this function. The HWRM should enable PTP on - * only a single Physical Function (PF) per port. + * This is the size in bytes of the inner most L4 header. + * This can be subtracted from the payload_offset to determine + * the start of the inner most L4 header. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED UINT32_C(0x4) + uint32_t inner_l4_size_inner_l3_offset_inner_l2_offset_outer_l3_offset; /* - * If 1, then RDMA over Converged Ethernet (RoCE) v1 is - * supported on this function. + * This is the offset from the beginning of the packet in bytes for + * the outer L3 header. If there is no outer L3 header, then this + * value is zero. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED UINT32_C(0x8) + #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_MASK UINT32_C(0x1ff) + #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_SFT 0 /* - * If 1, then RDMA over Converged Ethernet (RoCE) v2 is - * supported on this function. + * This is the offset from the beginning of the packet in bytes for + * the inner most L2 header. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED UINT32_C(0x10) + #define RX_TPA_START_CMPL_INNER_L2_OFFSET_MASK UINT32_C(0x3fe00) + #define RX_TPA_START_CMPL_INNER_L2_OFFSET_SFT 9 /* - * If 1, then control and configuration of WoL magic packet are - * supported on this function. + * This is the offset from the beginning of the packet in bytes for + * the inner most L3 header. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED \ - UINT32_C(0x20) + #define RX_TPA_START_CMPL_INNER_L3_OFFSET_MASK UINT32_C(0x7fc0000) + #define RX_TPA_START_CMPL_INNER_L3_OFFSET_SFT 18 /* - * If 1, then control and configuration of bitmap pattern packet - * are supported on this function. + * This is the size in bytes of the inner most L4 header. + * This can be subtracted from the payload_offset to determine + * the start of the inner most L4 header. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_BMP_SUPPORTED UINT32_C(0x40) + #define RX_TPA_START_CMPL_INNER_L4_SIZE_MASK UINT32_C(0xf8000000) + #define RX_TPA_START_CMPL_INNER_L4_SIZE_SFT 27 +} __attribute__((packed)); + +/* rx_tpa_end_cmpl (size:128b/16B) */ +struct rx_tpa_end_cmpl { + uint16_t flags_type; /* - * If set to 1, then the control and configuration of rate limit - * of an allocated TX ring on the queried function is supported. + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_RING_RL_SUPPORTED UINT32_C(0x80) + #define RX_TPA_END_CMPL_TYPE_MASK UINT32_C(0x3f) + #define RX_TPA_END_CMPL_TYPE_SFT 0 /* - * If 1, then control and configuration of minimum and maximum - * bandwidths are supported on the queried function. + * RX L2 TPA End Completion: + * Completion at the end of a TPA operation. + * Length = 32B */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_BW_CFG_SUPPORTED UINT32_C(0x100) + #define RX_TPA_END_CMPL_TYPE_RX_TPA_END UINT32_C(0x15) + #define RX_TPA_END_CMPL_TYPE_LAST \ + RX_TPA_END_CMPL_TYPE_RX_TPA_END + #define RX_TPA_END_CMPL_FLAGS_MASK UINT32_C(0xffc0) + #define RX_TPA_END_CMPL_FLAGS_SFT 6 /* - * If the query is for a VF, then this flag shall be ignored. If - * this query is for a PF and this flag is set to 1, then the PF - * has the capability to set the rate limits on the TX rings of - * its children VFs. If this query is for a PF and this flag is - * set to 0, then the PF does not have the capability to set the - * rate limits on the TX rings of its children VFs. + * When this bit is '1', it indicates a packet that has an + * error of some type. Type of error is indicated in + * error_flags. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_TX_RING_RL_SUPPORTED \ - UINT32_C(0x200) + #define RX_TPA_END_CMPL_FLAGS_ERROR UINT32_C(0x40) + /* This field indicates how the packet was placed in the buffer. */ + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_SFT 7 + /* + * Jumbo: + * TPA Packet was placed using jumbo algorithm. This means + * that the first buffer will be filled with data before + * moving to aggregation buffers. Each aggregation buffer + * will be filled before moving to the next aggregation + * buffer. + */ + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_JUMBO \ + (UINT32_C(0x1) << 7) + /* + * Header/Data Separation: + * Packet was placed using Header/Data separation algorithm. + * The separation location is indicated by the itype field. + */ + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_HDS \ + (UINT32_C(0x2) << 7) + /* + * GRO/Jumbo: + * Packet will be placed using GRO/Jumbo where the first + * packet is filled with data. Subsequent packets will be + * placed such that any one packet does not span two + * aggregation buffers unless it starts at the beginning of + * an aggregation buffer. + */ + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \ + (UINT32_C(0x5) << 7) /* - * If the query is for a VF, then this flag shall be ignored. If - * this query is for a PF and this flag is set to 1, then the PF - * has the capability to set the minimum and/or maximum - * bandwidths for its children VFs. If this query is for a PF - * and this flag is set to 0, then the PF does not have the - * capability to set the minimum or maximum bandwidths for its - * children VFs. + * GRO/Header-Data Separation: + * Packet will be placed using GRO/HDS where the header + * is in the first packet. + * Payload of each packet will be + * placed such that any one packet does not span two + * aggregation buffers unless it starts at the beginning of + * an aggregation buffer. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_BW_CFG_SUPPORTED UINT32_C(0x400) + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS \ + (UINT32_C(0x6) << 7) + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_LAST \ + RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS + /* unused is 2 b */ + #define RX_TPA_END_CMPL_FLAGS_UNUSED_MASK UINT32_C(0xc00) + #define RX_TPA_END_CMPL_FLAGS_UNUSED_SFT 10 /* - * Standard TX Ring mode is used for the allocation of TX ring - * and underlying scheduling resources that allow bandwidth - * reservation and limit settings on the queried function. If - * set to 1, then standard TX ring mode is supported on the - * queried function. If set to 0, then standard TX ring mode is - * not available on the queried function. + * This value indicates what the inner packet determined for the + * packet was. + * - 2 TCP Packet + * Indicates that the packet was IP and TCP. This indicates + * that the ip_cs field is valid and that the tcp_udp_cs + * field is valid and contains the TCP checksum. + * This also indicates that the payload_offset field is valid. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_STD_TX_RING_MODE_SUPPORTED \ - UINT32_C(0x800) - uint8_t mac_address[6]; + #define RX_TPA_END_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) + #define RX_TPA_END_CMPL_FLAGS_ITYPE_SFT 12 /* - * This value is current MAC address configured for this - * function. A value of 00-00-00-00-00-00 indicates no MAC - * address is currently configured. + * This value is zero for TPA End completions. + * There is no data in the buffer that corresponds to the opaque + * value in this completion. */ - uint16_t max_rsscos_ctx; + uint16_t len; /* - * The maximum number of RSS/COS contexts that can be allocated - * to the function. + * This is a copy of the opaque field from the RX BD this completion + * corresponds to. */ - uint16_t max_cmpl_rings; + uint32_t opaque; /* - * The maximum number of completion rings that can be allocated - * to the function. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ - uint16_t max_tx_rings; + uint8_t agg_bufs_v1; /* - * The maximum number of transmit rings that can be allocated to - * the function. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ - uint16_t max_rx_rings; + #define RX_TPA_END_CMPL_V1 UINT32_C(0x1) /* - * The maximum number of receive rings that can be allocated to - * the function. + * This value is the number of aggregation buffers that follow this + * entry in the completion ring that are a part of this aggregation + * packet. + * If the value is zero, then the packet is completely contained + * in the buffer space provided in the aggregation start completion. */ - uint16_t max_l2_ctxs; + #define RX_TPA_END_CMPL_AGG_BUFS_MASK UINT32_C(0x7e) + #define RX_TPA_END_CMPL_AGG_BUFS_SFT 1 + /* This value is the number of segments in the TPA operation. */ + uint8_t tpa_segs; /* - * The maximum number of L2 contexts that can be allocated to - * the function. + * This value indicates the offset in bytes from the beginning of the packet + * where the inner payload starts. This value is valid for TCP, UDP, + * FCoE, and RoCE packets. + * + * A value of zero indicates an offset of 256 bytes. */ - uint16_t max_vnics; + uint8_t payload_offset; + uint8_t agg_id; + /* unused2 is 1 b */ + #define RX_TPA_END_CMPL_UNUSED2 UINT32_C(0x1) /* - * The maximum number of VNICs that can be allocated to the - * function. + * This is the aggregation ID that the completion is associated + * with. Use this number to correlate the TPA start completion + * with the TPA end completion. */ - uint16_t first_vf_id; + #define RX_TPA_END_CMPL_AGG_ID_MASK UINT32_C(0xfe) + #define RX_TPA_END_CMPL_AGG_ID_SFT 1 /* - * The identifier for the first VF enabled on a PF. This is - * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if - * this command is called on a PF with SR-IOV disabled or on a - * VF. + * For non-GRO packets, this value is the + * timestamp delta between earliest and latest timestamp values for + * TPA packet. If packets were not time stamped, then delta will be + * zero. + * + * For GRO packets, this field is zero except for the following + * sub-fields. + * - tsdelta[31] + * Timestamp present indication. When '0', no Timestamp + * option is in the packet. When '1', then a Timestamp + * option is present in the packet. */ - uint16_t max_vfs; + uint32_t tsdelta; +} __attribute__((packed)); + +/* rx_tpa_end_cmpl_hi (size:128b/16B) */ +struct rx_tpa_end_cmpl_hi { /* - * The maximum number of VFs that can be allocated to the - * function. This is valid only on the PF with SR-IOV enabled. - * 0xFF... (All Fs) if this command is called on a PF with SR- - * IOV disabled or on a VF. + * This value is the number of duplicate ACKs that have been + * received as part of the TPA operation. */ - uint16_t max_stat_ctx; + uint32_t tpa_dup_acks; /* - * The maximum number of statistic contexts that can be - * allocated to the function. + * This value is the number of duplicate ACKs that have been + * received as part of the TPA operation. */ - uint32_t max_encap_records; + #define RX_TPA_END_CMPL_TPA_DUP_ACKS_MASK UINT32_C(0xf) + #define RX_TPA_END_CMPL_TPA_DUP_ACKS_SFT 0 /* - * The maximum number of Encapsulation records that can be - * offloaded by this function. + * This value is the valid when TPA completion is active. It + * indicates the length of the longest segment of the TPA operation + * for LRO mode and the length of the first segment in GRO mode. + * + * This value may be used by GRO software to re-construct the original + * packet stream from the TPA packet. This is the length of all + * but the last segment for GRO. In LRO mode this value may be used + * to indicate MSS size to the stack. */ - uint32_t max_decap_records; + uint16_t tpa_seg_len; + /* unused4 is 16 b */ + uint16_t unused3; + uint16_t errors_v2; /* - * The maximum number of decapsulation records that can be - * offloaded by this function. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ - uint32_t max_tx_em_flows; + #define RX_TPA_END_CMPL_V2 UINT32_C(0x1) + #define RX_TPA_END_CMPL_ERRORS_MASK UINT32_C(0xfffe) + #define RX_TPA_END_CMPL_ERRORS_SFT 1 /* - * The maximum number of Exact Match (EM) flows that can be - * offloaded by this function on the TX side. + * This error indicates that there was some sort of problem with + * the BDs for the packet that was found after part of the + * packet was already placed. The packet should be treated as + * invalid. */ - uint32_t max_tx_wm_flows; + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe) + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_SFT 1 /* - * The maximum number of Wildcard Match (WM) flows that can be - * offloaded by this function on the TX side. + * This error occurs when there is a fatal HW problem in + * the chip only. It indicates that there were not + * BDs on chip but that there was adequate reservation. + * provided by the TPA block. */ - uint32_t max_rx_em_flows; + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \ + (UINT32_C(0x2) << 1) /* - * The maximum number of Exact Match (EM) flows that can be - * offloaded by this function on the RX side. + * This error occurs when TPA block was not configured to + * reserve adequate BDs for TPA operations on this RX + * ring. All data for the TPA operation was not placed. + * + * This error can also be generated when the number of + * segments is not programmed correctly in TPA and the + * 33 total aggregation buffers allowed for the TPA + * operation has been exceeded. */ - uint32_t max_rx_wm_flows; + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR \ + (UINT32_C(0x4) << 1) + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_LAST \ + RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR + /* unused5 is 16 b */ + uint16_t unused_4; /* - * The maximum number of Wildcard Match (WM) flows that can be - * offloaded by this function on the RX side. + * This is the opaque value that was completed for the TPA start + * completion that corresponds to this TPA end completion. */ - uint32_t max_mcast_filters; + uint32_t start_opaque; +} __attribute__((packed)); + +/* rx_abuf_cmpl (size:128b/16B) */ +struct rx_abuf_cmpl { + uint16_t type; /* - * The maximum number of multicast filters that can be supported - * by this function on the RX side. + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. */ - uint32_t max_flow_id; + #define RX_ABUF_CMPL_TYPE_MASK UINT32_C(0x3f) + #define RX_ABUF_CMPL_TYPE_SFT 0 /* - * The maximum value of flow_id that can be supported in - * completion records. + * RX Aggregation Buffer completion : + * Completion of an L2 aggregation buffer in support of + * TPA, HDS, or Jumbo packet completion. Length = 16B */ - uint32_t max_hw_ring_grps; + #define RX_ABUF_CMPL_TYPE_RX_AGG UINT32_C(0x12) + #define RX_ABUF_CMPL_TYPE_LAST RX_ABUF_CMPL_TYPE_RX_AGG /* - * The maximum number of HW ring groups that can be supported on - * this function. + * This is the length of the data for the packet stored in this + * aggregation buffer identified by the opaque value. This does not + * include the length of any + * data placed in other aggregation BDs or in the packet or buffer + * BDs. This length does not include any space added due to + * hdr_offset register during HDS placement mode. */ - uint16_t max_sp_tx_rings; + uint16_t len; /* - * The maximum number of strict priority transmit rings that can - * be allocated to the function. This number indicates the - * maximum number of TX rings that can be assigned strict - * priorities out of the maximum number of TX rings that can be - * allocated (max_tx_rings) to the function. + * This is a copy of the opaque field from the RX BD this aggregation + * buffer corresponds to. */ - uint8_t unused_0; - uint8_t valid; + uint32_t opaque; + uint32_t v; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ + #define RX_ABUF_CMPL_V UINT32_C(0x1) + /* unused3 is 32 b */ + uint32_t unused_2; } __attribute__((packed)); -/* hwrm_func_qcfg */ -/* - * Description: This command returns the current configuration of a function. - * The input FID value is used to indicate what function is being queried. This - * allows a physical function driver to query virtual functions that are - * children of the physical function. The output FID value is needed to - * configure Rings and MSI-X vectors so their DMA operations appear correctly on - * the PCI bus. This command should be called by every driver after - * 'hwrm_func_cfg' to get the actual number of resources allocated by the HWRM. - * The values returned by hwrm_func_qcfg are the values the driver shall use. - * These values may be different than what was originally requested in the - * 'hwrm_func_cfg' command. - */ -/* Input (24 bytes) */ -struct hwrm_func_qcfg_input { - uint16_t req_type; +/* eject_cmpl (size:128b/16B) */ +struct eject_cmpl { + uint16_t type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. */ - uint16_t cmpl_ring; + #define EJECT_CMPL_TYPE_MASK UINT32_C(0x3f) + #define EJECT_CMPL_TYPE_SFT 0 /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * Statistics Ejection Completion: + * Completion of statistics data ejection buffer. + * Length = 16B */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define EJECT_CMPL_TYPE_STAT_EJECT UINT32_C(0x1a) + #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This is the length of the statistics data stored in this + * buffer. */ - uint64_t resp_addr; + uint16_t len; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This is a copy of the opaque field from the RX BD this ejection + * buffer corresponds to. */ - uint16_t fid; + uint32_t opaque; + uint32_t v; /* - * Function ID of the function that is being queried. 0xFF... - * (All Fs) if the query is for the requesting function. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ - uint16_t unused_0[3]; + #define EJECT_CMPL_V UINT32_C(0x1) + /* unused3 is 32 b */ + uint32_t unused_2; } __attribute__((packed)); -/* Output (72 bytes) */ -struct hwrm_func_qcfg_output { - uint16_t error_code; +/* hwrm_cmpl (size:128b/16B) */ +struct hwrm_cmpl { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_CMPL_TYPE_MASK UINT32_C(0x3f) + #define HWRM_CMPL_TYPE_SFT 0 + /* + * HWRM Command Completion: + * Completion of an HWRM command. + */ + #define HWRM_CMPL_TYPE_HWRM_DONE UINT32_C(0x20) + #define HWRM_CMPL_TYPE_LAST HWRM_CMPL_TYPE_HWRM_DONE + /* This is the sequence_id of the HWRM command that has completed. */ + uint16_t sequence_id; + /* unused2 is 32 b */ + uint32_t unused_1; + uint32_t v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_CMPL_V UINT32_C(0x1) + /* unused4 is 32 b */ + uint32_t unused_3; +} __attribute__((packed)); + +/* hwrm_fwd_req_cmpl (size:128b/16B) */ +struct hwrm_fwd_req_cmpl { /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t req_len_type; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. */ - uint16_t fid; + #define HWRM_FWD_REQ_CMPL_TYPE_MASK UINT32_C(0x3f) + #define HWRM_FWD_REQ_CMPL_TYPE_SFT 0 + /* Forwarded HWRM Request */ + #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ UINT32_C(0x22) + #define HWRM_FWD_REQ_CMPL_TYPE_LAST \ + HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ + /* Length of forwarded request in bytes. */ + #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK UINT32_C(0xffc0) + #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6 /* - * FID value. This value is used to identify operations on the - * PCI bus as belonging to a particular PCI function. + * Source ID of this request. + * Typically used in forwarding requests and responses. + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - HWRM */ - uint16_t port_id; + uint16_t source_id; + /* unused1 is 32 b */ + uint32_t unused0; + /* Address of forwarded request. */ + uint32_t req_buf_addr_v[2]; /* - * Port ID of port that this function is associated with. - * 0xFF... (All Fs) if this function is not associated with any - * port. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ - uint16_t vlan; + #define HWRM_FWD_REQ_CMPL_V UINT32_C(0x1) + /* Address of forwarded request. */ + #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK UINT32_C(0xfffffffe) + #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1 +} __attribute__((packed)); + +/* hwrm_fwd_resp_cmpl (size:128b/16B) */ +struct hwrm_fwd_resp_cmpl { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_FWD_RESP_CMPL_TYPE_MASK UINT32_C(0x3f) + #define HWRM_FWD_RESP_CMPL_TYPE_SFT 0 + /* Forwarded HWRM Response */ + #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP UINT32_C(0x24) + #define HWRM_FWD_RESP_CMPL_TYPE_LAST \ + HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP + /* + * Source ID of this response. + * Typically used in forwarding requests and responses. + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - HWRM + */ + uint16_t source_id; + /* Length of forwarded response in bytes. */ + uint16_t resp_len; + /* unused2 is 16 b */ + uint16_t unused_1; + /* Address of forwarded request. */ + uint32_t resp_buf_addr_v[2]; /* - * This value is the current VLAN setting for this function. The - * value of 0 for this field indicates no priority tagging or - * VLAN is used. This field's format is same as 802.1Q Tag's Tag - * Control Information (TCI) format that includes both Priority - * Code Point (PCP) and VLAN Identifier (VID). + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ - uint16_t flags; + #define HWRM_FWD_RESP_CMPL_V UINT32_C(0x1) + /* Address of forwarded request. */ + #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_MASK UINT32_C(0xfffffffe) + #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl (size:128b/16B) */ +struct hwrm_async_event_cmpl { + uint16_t type; /* - * If 1, then magic packet based Out-Of-Box WoL is enabled on - * the port associated with this function. + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. */ - #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_MAGICPKT_ENABLED \ + #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Link status changed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE \ + UINT32_C(0x0) + /* Link MTU changed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE \ UINT32_C(0x1) - /* - * If 1, then bitmap pattern based Out-Of-Box WoL packet is - * enabled on the port associated with this function. - */ - #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_BMP_ENABLED UINT32_C(0x2) - /* - * If set to 1, then FW based DCBX agent is enabled and running - * on the port associated with this function. If set to 0, then - * DCBX agent is not running in the firmware. - */ - #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED \ + /* Link speed changed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE \ + UINT32_C(0x2) + /* DCB Configuration changed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE \ + UINT32_C(0x3) + /* Port connection not allowed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED \ UINT32_C(0x4) + /* Link speed configuration was not allowed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \ + UINT32_C(0x5) + /* Link speed configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE \ + UINT32_C(0x6) + /* Port PHY configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE \ + UINT32_C(0x7) + /* Function driver unloaded */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD \ + UINT32_C(0x10) + /* Function driver loaded */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD \ + UINT32_C(0x11) + /* Function FLR related processing has completed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT \ + UINT32_C(0x12) + /* PF driver unloaded */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD \ + UINT32_C(0x20) + /* PF driver loaded */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD \ + UINT32_C(0x21) + /* VF Function Level Reset (FLR) */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR \ + UINT32_C(0x30) + /* VF MAC Address Change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE \ + UINT32_C(0x31) + /* PF-VF communication channel status change. */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \ + UINT32_C(0x32) + /* VF Configuration Change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE \ + UINT32_C(0x33) + /* LLFC/PFC Configuration Change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE \ + UINT32_C(0x34) + /* Default VNIC Configuration Change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE \ + UINT32_C(0x35) + /* HWRM Error */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR \ + UINT32_C(0xff) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; /* - * Standard TX Ring mode is used for the allocation of TX ring - * and underlying scheduling resources that allow bandwidth - * reservation and limit settings on the queried function. If - * set to 1, then standard TX ring mode is enabled on the - * queried function. If set to 0, then the standard TX ring mode - * is disabled on the queried function. In this extended TX ring - * resource mode, the minimum and maximum bandwidth settings are - * not supported to allow the allocation of TX rings to span - * multiple scheduler nodes. - */ - #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_STD_TX_RING_MODE_ENABLED \ - UINT32_C(0x8) + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_status_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Link status changed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE \ + UINT32_C(0x0) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Indicates link status change */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE \ + UINT32_C(0x1) + /* + * If this bit set to 0, then it indicates that the link + * was up and it went down. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN \ + UINT32_C(0x0) + /* + * If this bit is set to 1, then it indicates that the link + * was down and it went up. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP \ + UINT32_C(0x1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP + /* Indicates the physical port this link status change occur */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK \ + UINT32_C(0xe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT \ + 1 + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff0) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 4 + /* Indicates the physical function this event occured on. */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK \ + UINT32_C(0xff00000) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT \ + 20 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_link_mtu_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_mtu_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Link MTU changed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE \ + UINT32_C(0x1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* The new MTU of the link in bytes. */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_link_speed_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Link speed changed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE \ + UINT32_C(0x2) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* + * When this bit is '1', the link was forced to the + * force_link_speed value. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE \ + UINT32_C(0x1) + /* The new link speed in 100 Mbps units. */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK \ + UINT32_C(0xfffe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT \ + 1 + /* 100Mb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB \ + (UINT32_C(0x1) << 1) + /* 1Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB \ + (UINT32_C(0xa) << 1) + /* 2Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB \ + (UINT32_C(0x14) << 1) + /* 25Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB \ + (UINT32_C(0x19) << 1) + /* 10Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB \ + (UINT32_C(0x64) << 1) + /* 20Mb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB \ + (UINT32_C(0xc8) << 1) + /* 25Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB \ + (UINT32_C(0xfa) << 1) + /* 40Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB \ + (UINT32_C(0x190) << 1) + /* 50Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB \ + (UINT32_C(0x1f4) << 1) + /* 100Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB \ + (UINT32_C(0x3e8) << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff0000) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 16 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_dcb_config_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_dcb_config_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* DCB Configuration changed */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE \ + UINT32_C(0x3) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE + /* Event specific data */ + uint32_t event_data2; + /* ETS configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS \ + UINT32_C(0x1) + /* PFC configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC \ + UINT32_C(0x2) + /* APP configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP \ + UINT32_C(0x4) + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 0 + /* Priority recommended for RoCE traffic */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK \ + UINT32_C(0xff0000) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT \ + 16 + /* none is 255 */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE \ + (UINT32_C(0xff) << 16) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST \ + HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE + /* Priority recommended for L2 traffic */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK \ + UINT32_C(0xff000000) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT \ + 24 + /* none is 255 */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE \ + (UINT32_C(0xff) << 24) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST \ + HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */ +struct hwrm_async_event_cmpl_port_conn_not_allowed { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Port connection not allowed */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED \ + UINT32_C(0x4) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT \ + 0 + /* + * This value indicates the current port level enforcement policy + * for the optics module when there is an optical module mismatch + * and port is not connected. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK \ + UINT32_C(0xff0000) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT \ + 16 + /* No enforcement */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE \ + (UINT32_C(0x0) << 16) + /* Disable Transmit side Laser. */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX \ + (UINT32_C(0x1) << 16) + /* Raise a warning message. */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG \ + (UINT32_C(0x2) << 16) + /* Power down the module. */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN \ + (UINT32_C(0x3) << 16) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST \ + HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_link_speed_cfg_not_allowed (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Link speed configuration was not allowed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \ + UINT32_C(0x5) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT \ + 0 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_cfg_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Link speed configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE \ + UINT32_C(0x6) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 0 + /* + * If set to 1, it indicates that the supported link speeds + * configuration on the port has changed. + * If set to 0, then there is no change in supported link speeds + * configuration. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE \ + UINT32_C(0x10000) + /* + * If set to 1, it indicates that the link speed configuration + * on the port has become illegal or invalid. + * If set to 0, then the link speed configuration on the port is + * legal or valid. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG \ + UINT32_C(0x20000) +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_port_phy_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_port_phy_cfg_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Port PHY configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE \ + UINT32_C(0x7) + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 0 + /* + * If set to 1, it indicates that the FEC + * configuration on the port has changed. + * If set to 0, then there is no change in FEC configuration. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_FEC_CFG_CHANGE \ + UINT32_C(0x10000) + /* + * If set to 1, it indicates that the EEE configuration + * on the port has changed. + * If set to 0, then there is no change in EEE configuration + * on the port. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_EEE_CFG_CHANGE \ + UINT32_C(0x20000) + /* + * If set to 1, it indicates that the pause configuration + * on the PHY has changed. + * If set to 0, then there is no change in the pause + * configuration on the PHY. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PAUSE_CFG_CHANGE \ + UINT32_C(0x40000) +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_func_drvr_unload (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_drvr_unload { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Function driver unloaded */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD \ + UINT32_C(0x10) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Function ID */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT \ + 0 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_func_drvr_load (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_drvr_load { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Function driver loaded */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD \ + UINT32_C(0x11) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Function ID */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_func_flr_proc_cmplt (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_flr_proc_cmplt { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Function FLR related processing has completed */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT \ + UINT32_C(0x12) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Function ID */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT \ + 0 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_drvr_unload { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* PF driver unloaded */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD \ + UINT32_C(0x20) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PF ID */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 + /* Indicates the physical port this pf belongs to */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK \ + UINT32_C(0x70000) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_pf_drvr_load (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_drvr_load { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* PF driver loaded */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD \ + UINT32_C(0x21) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PF ID */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 + /* Indicates the physical port this pf belongs to */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK \ + UINT32_C(0x70000) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_vf_flr (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_flr { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* VF Function Level Reset (FLR) */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR UINT32_C(0x30) + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* VF ID */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0 + /* Indicates the physical function this event occured on. */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_MASK \ + UINT32_C(0xff0000) + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_SFT 16 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_vf_mac_addr_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_mac_addr_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* VF MAC Address Change */ + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE \ + UINT32_C(0x31) + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* VF ID */ + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT \ + 0 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_pf_vf_comm_status_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_vf_comm_status_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* PF-VF communication channel status change. */ + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \ + UINT32_C(0x32) + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* + * If this bit is set to 1, then it indicates that the PF-VF + * communication was lost and it is established. + * If this bit set to 0, then it indicates that the PF-VF + * communication was established and it is lost. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED \ + UINT32_C(0x1) +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_cfg_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* VF Configuration Change */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE \ + UINT32_C(0x33) + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* + * Each flag provided in this field indicates a specific VF + * configuration change. At least one of these flags shall be set to 1 + * when an asynchronous event completion of this type is provided + * by the HWRM. + */ + uint32_t event_data1; + /* + * If this bit is set to 1, then the value of MTU + * was changed on this VF. + * If set to 0, then this bit should be ignored. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE \ + UINT32_C(0x1) + /* + * If this bit is set to 1, then the value of MRU + * was changed on this VF. + * If set to 0, then this bit should be ignored. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE \ + UINT32_C(0x2) + /* + * If this bit is set to 1, then the value of default MAC + * address was changed on this VF. + * If set to 0, then this bit should be ignored. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE \ + UINT32_C(0x4) + /* + * If this bit is set to 1, then the value of default VLAN + * was changed on this VF. + * If set to 0, then this bit should be ignored. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE \ + UINT32_C(0x8) +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_llfc_pfc_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_llfc_pfc_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* unused1 is 10 b */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_MASK \ + UINT32_C(0xffc0) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_SFT 6 + /* Identifiers of events. */ + uint16_t event_id; + /* LLFC/PFC Configuration Change */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE \ + UINT32_C(0x34) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Indicates llfc pfc status change */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_MASK \ + UINT32_C(0x3) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_SFT \ + 0 + /* + * If this field set to 1, then it indicates that llfc is + * enabled. + */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LLFC \ + UINT32_C(0x1) + /* + * If this field is set to 2, then it indicates that pfc + * is enabled. + */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC \ + UINT32_C(0x2) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LAST \ + HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC + /* Indicates the physical port this llfc pfc change occur */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_MASK \ + UINT32_C(0x1c) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_SFT \ + 2 + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0x1fffe0) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 5 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_default_vnic_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* unused1 is 10 b */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK \ + UINT32_C(0xffc0) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT \ + 6 + /* Identifiers of events. */ + uint16_t event_id; + /* Notification of a default vnic allocaiton or free */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION \ + UINT32_C(0x35) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Indicates default vnic configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK \ + UINT32_C(0x3) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT \ + 0 + /* + * If this field is set to 1, then it indicates that + * a default VNIC has been allocate. + */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC \ + UINT32_C(0x1) + /* + * If this field is set to 2, then it indicates that + * a default VNIC has been freed. + */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE \ + UINT32_C(0x2) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST \ + HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE + /* Indicates the physical function this event occured on. */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK \ + UINT32_C(0x3fc) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT \ + 2 + /* Indicates the virtual function this event occured on */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK \ + UINT32_C(0x3fffc00) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT \ + 10 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */ +struct hwrm_async_event_cmpl_hwrm_error { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* HWRM Error */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR \ + UINT32_C(0xff) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR + /* Event specific data */ + uint32_t event_data2; + /* Severity of HWRM Error */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK \ + UINT32_C(0xff) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0 + /* Warning */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING \ + UINT32_C(0x0) + /* Non-fatal Error */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL \ + UINT32_C(0x1) + /* Fatal Error */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL \ + UINT32_C(0x2) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST \ + HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Time stamp for error event */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP \ + UINT32_C(0x1) +} __attribute__((packed)); + +/******************* + * hwrm_func_reset * + *******************/ + + +/* hwrm_func_reset_input (size:192b/24B) */ +struct hwrm_func_reset_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the vf_id_valid field to be + * configured. + */ + #define HWRM_FUNC_RESET_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1) + /* + * The ID of the VF that this PF is trying to reset. + * Only the parent PF shall be allowed to reset a child VF. + * + * A parent PF driver shall use this field only when a specific child VF + * is requested to be reset. + */ + uint16_t vf_id; + /* This value indicates the level of a function reset. */ + uint8_t func_reset_level; + /* + * Reset the caller function and its children VFs (if any). If no + * children functions exist, then reset the caller function only. + */ + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETALL \ + UINT32_C(0x0) + /* Reset the caller function only */ + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETME \ + UINT32_C(0x1) + /* + * Reset all children VFs of the caller function driver if the + * caller is a PF driver. + * It is an error to specify this level by a VF driver. + * It is an error to specify this level by a PF driver with + * no children VFs. + */ + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN \ + UINT32_C(0x2) + /* + * Reset a specific VF of the caller function driver if the caller + * is the parent PF driver. + * It is an error to specify this level by a VF driver. + * It is an error to specify this level by a PF driver that is not + * the parent of the VF that is being requested to reset. + */ + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF \ + UINT32_C(0x3) + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_LAST \ + HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF + uint8_t unused_0; +} __attribute__((packed)); + +/* hwrm_func_reset_output (size:128b/16B) */ +struct hwrm_func_reset_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/******************** + * hwrm_func_getfid * + ********************/ + + +/* hwrm_func_getfid_input (size:192b/24B) */ +struct hwrm_func_getfid_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the pci_id field to be + * configured. + */ + #define HWRM_FUNC_GETFID_INPUT_ENABLES_PCI_ID UINT32_C(0x1) + /* + * This value is the PCI ID of the queried function. + * If ARI is enabled, then it is + * Bus Number (8b):Function Number(8b). Otherwise, it is + * Bus Number (8b):Device Number (5b):Function Number(3b). + */ + uint16_t pci_id; + uint8_t unused_0[2]; +} __attribute__((packed)); + +/* hwrm_func_getfid_output (size:128b/16B) */ +struct hwrm_func_getfid_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * FID value. This value is used to identify operations on the PCI + * bus as belonging to a particular PCI function. + */ + uint16_t fid; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_func_vf_alloc * + **********************/ + + +/* hwrm_func_vf_alloc_input (size:192b/24B) */ +struct hwrm_func_vf_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the first_vf_id field to be + * configured. + */ + #define HWRM_FUNC_VF_ALLOC_INPUT_ENABLES_FIRST_VF_ID UINT32_C(0x1) + /* + * This value is used to identify a Virtual Function (VF). + * The scope of VF ID is local within a PF. + */ + uint16_t first_vf_id; + /* The number of virtual functions requested. */ + uint16_t num_vfs; +} __attribute__((packed)); + +/* hwrm_func_vf_alloc_output (size:128b/16B) */ +struct hwrm_func_vf_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The ID of the first VF allocated. */ + uint16_t first_vf_id; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************* + * hwrm_func_vf_free * + *********************/ + + +/* hwrm_func_vf_free_input (size:192b/24B) */ +struct hwrm_func_vf_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the first_vf_id field to be + * configured. + */ + #define HWRM_FUNC_VF_FREE_INPUT_ENABLES_FIRST_VF_ID UINT32_C(0x1) + /* + * This value is used to identify a Virtual Function (VF). + * The scope of VF ID is local within a PF. + */ + uint16_t first_vf_id; + /* + * The number of virtual functions requested. + * 0xFFFF - Cleanup all children of this PF. + */ + uint16_t num_vfs; +} __attribute__((packed)); + +/* hwrm_func_vf_free_output (size:128b/16B) */ +struct hwrm_func_vf_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/******************** + * hwrm_func_vf_cfg * + ********************/ + + +/* hwrm_func_vf_cfg_input (size:448b/56B) */ +struct hwrm_func_vf_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the mtu field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU \ + UINT32_C(0x1) + /* + * This bit must be '1' for the guest_vlan field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN \ + UINT32_C(0x2) + /* + * This bit must be '1' for the async_event_cr field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR \ + UINT32_C(0x4) + /* + * This bit must be '1' for the dflt_mac_addr field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR \ + UINT32_C(0x8) + /* + * This bit must be '1' for the num_rsscos_ctxs field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS \ + UINT32_C(0x10) + /* + * This bit must be '1' for the num_cmpl_rings field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS \ + UINT32_C(0x20) + /* + * This bit must be '1' for the num_tx_rings field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS \ + UINT32_C(0x40) + /* + * This bit must be '1' for the num_rx_rings field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS \ + UINT32_C(0x80) + /* + * This bit must be '1' for the num_l2_ctxs field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS \ + UINT32_C(0x100) + /* + * This bit must be '1' for the num_vnics field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS \ + UINT32_C(0x200) + /* + * This bit must be '1' for the num_stat_ctxs field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS \ + UINT32_C(0x400) + /* + * This bit must be '1' for the num_hw_ring_grps field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS \ + UINT32_C(0x800) + /* + * The maximum transmission unit requested on the function. + * The HWRM should make sure that the mtu of + * the function does not exceed the mtu of the physical + * port that this function is associated with. + * + * In addition to requesting mtu per function, it is + * possible to configure mtu per transmit ring. + * By default, the mtu of each transmit ring associated + * with a function is equal to the mtu of the function. + * The HWRM should make sure that the mtu of each transmit + * ring that is assigned to a function has a valid mtu. + */ + uint16_t mtu; + /* + * The guest VLAN for the function being configured. + * This field's format is same as 802.1Q Tag's + * Tag Control Information (TCI) format that includes both + * Priority Code Point (PCP) and VLAN Identifier (VID). + */ + uint16_t guest_vlan; + /* + * ID of the target completion ring for receiving asynchronous + * event completions. If this field is not valid, then the + * HWRM shall use the default completion ring of the function + * that is being configured as the target completion ring for + * providing any asynchronous event completions for that + * function. + * If this field is valid, then the HWRM shall use the + * completion ring identified by this ID as the target + * completion ring for providing any asynchronous event + * completions for the function that is being configured. + */ + uint16_t async_event_cr; + /* + * This value is the current MAC address requested by the VF + * driver to be configured on this VF. A value of + * 00-00-00-00-00-00 indicates no MAC address configuration + * is requested by the VF driver. + * The parent PF driver may reject or overwrite this + * MAC address. + */ + uint8_t dflt_mac_addr[6]; + uint32_t flags; + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of TX rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST \ + UINT32_C(0x1) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of RX rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST \ + UINT32_C(0x2) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of CMPL rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST \ + UINT32_C(0x4) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of RSS ctx) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RSSCOS_CTX_ASSETS_TEST \ + UINT32_C(0x8) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of ring groups) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST \ + UINT32_C(0x10) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of stat ctx) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST \ + UINT32_C(0x20) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of VNICs) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST \ + UINT32_C(0x40) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of L2 ctx) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_L2_CTX_ASSETS_TEST \ + UINT32_C(0x80) + /* The number of RSS/COS contexts requested for the VF. */ + uint16_t num_rsscos_ctxs; + /* The number of completion rings requested for the VF. */ + uint16_t num_cmpl_rings; + /* The number of transmit rings requested for the VF. */ + uint16_t num_tx_rings; + /* The number of receive rings requested for the VF. */ + uint16_t num_rx_rings; + /* The number of L2 contexts requested for the VF. */ + uint16_t num_l2_ctxs; + /* The number of vnics requested for the VF. */ + uint16_t num_vnics; + /* The number of statistic contexts requested for the VF. */ + uint16_t num_stat_ctxs; + /* The number of HW ring groups requested for the VF. */ + uint16_t num_hw_ring_grps; + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_func_vf_cfg_output (size:128b/16B) */ +struct hwrm_func_vf_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/******************* + * hwrm_func_qcaps * + *******************/ + + +/* hwrm_func_qcaps_input (size:192b/24B) */ +struct hwrm_func_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being queried. + * 0xFF... (All Fs) if the query is for the requesting + * function. + */ + uint16_t fid; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_func_qcaps_output (size:640b/80B) */ +struct hwrm_func_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * FID value. This value is used to identify operations on the PCI + * bus as belonging to a particular PCI function. + */ + uint16_t fid; + /* + * Port ID of port that this function is associated with. + * Valid only for the PF. + * 0xFF... (All Fs) if this function is not associated with + * any port. + * 0xFF... (All Fs) if this function is called from a VF. + */ + uint16_t port_id; + uint32_t flags; + /* If 1, then Push mode is supported on this function. */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED \ + UINT32_C(0x1) + /* + * If 1, then the global MSI-X auto-masking is enabled for the + * device. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING \ + UINT32_C(0x2) + /* + * If 1, then the Precision Time Protocol (PTP) processing + * is supported on this function. + * The HWRM should enable PTP on only a single Physical + * Function (PF) per port. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED \ + UINT32_C(0x4) + /* + * If 1, then RDMA over Converged Ethernet (RoCE) v1 + * is supported on this function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED \ + UINT32_C(0x8) + /* + * If 1, then RDMA over Converged Ethernet (RoCE) v2 + * is supported on this function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED \ + UINT32_C(0x10) + /* + * If 1, then control and configuration of WoL magic packet + * are supported on this function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED \ + UINT32_C(0x20) + /* + * If 1, then control and configuration of bitmap pattern + * packet are supported on this function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_BMP_SUPPORTED \ + UINT32_C(0x40) + /* + * If set to 1, then the control and configuration of rate limit + * of an allocated TX ring on the queried function is supported. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_RING_RL_SUPPORTED \ + UINT32_C(0x80) + /* + * If 1, then control and configuration of minimum and + * maximum bandwidths are supported on the queried function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_BW_CFG_SUPPORTED \ + UINT32_C(0x100) + /* + * If the query is for a VF, then this flag shall be ignored. + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to set the rate limits + * on the TX rings of its children VFs. + * If this query is for a PF and this flag is set to 0, then + * the PF does not have the capability to set the rate limits + * on the TX rings of its children VFs. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_TX_RING_RL_SUPPORTED \ + UINT32_C(0x200) + /* + * If the query is for a VF, then this flag shall be ignored. + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to set the minimum and/or + * maximum bandwidths for its children VFs. + * If this query is for a PF and this flag is set to 0, then + * the PF does not have the capability to set the minimum or + * maximum bandwidths for its children VFs. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_BW_CFG_SUPPORTED \ + UINT32_C(0x400) + /* + * Standard TX Ring mode is used for the allocation of TX ring + * and underlying scheduling resources that allow bandwidth + * reservation and limit settings on the queried function. + * If set to 1, then standard TX ring mode is supported + * on the queried function. + * If set to 0, then standard TX ring mode is not available + * on the queried function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_STD_TX_RING_MODE_SUPPORTED \ + UINT32_C(0x800) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to detect GENEVE tunnel + * flags. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED \ + UINT32_C(0x1000) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to detect NVGRE tunnel + * flags. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED \ + UINT32_C(0x2000) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to detect GRE tunnel + * flags. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GRE_TUN_FLAGS_SUPPORTED \ + UINT32_C(0x4000) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to detect MPLS tunnel + * flags. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_MPLS_TUN_FLAGS_SUPPORTED \ + UINT32_C(0x8000) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to support pcie stats. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PCIE_STATS_SUPPORTED \ + UINT32_C(0x10000) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to adopt the VF's belonging + * to another PF. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADOPTED_PF_SUPPORTED \ + UINT32_C(0x20000) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to administer another PF. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADMIN_PF_SUPPORTED \ + UINT32_C(0x40000) + /* + * This value is current MAC address configured for this + * function. A value of 00-00-00-00-00-00 indicates no + * MAC address is currently configured. + */ + uint8_t mac_address[6]; + /* + * The maximum number of RSS/COS contexts that can be + * allocated to the function. + */ + uint16_t max_rsscos_ctx; + /* + * The maximum number of completion rings that can be + * allocated to the function. + */ + uint16_t max_cmpl_rings; + /* + * The maximum number of transmit rings that can be + * allocated to the function. + */ + uint16_t max_tx_rings; + /* + * The maximum number of receive rings that can be + * allocated to the function. + */ + uint16_t max_rx_rings; + /* + * The maximum number of L2 contexts that can be + * allocated to the function. + */ + uint16_t max_l2_ctxs; + /* + * The maximum number of VNICs that can be + * allocated to the function. + */ + uint16_t max_vnics; + /* + * The identifier for the first VF enabled on a PF. This + * is valid only on the PF with SR-IOV enabled. + * 0xFF... (All Fs) if this command is called on a PF with + * SR-IOV disabled or on a VF. + */ + uint16_t first_vf_id; + /* + * The maximum number of VFs that can be + * allocated to the function. This is valid only on the + * PF with SR-IOV enabled. 0xFF... (All Fs) if this + * command is called on a PF with SR-IOV disabled or + * on a VF. + */ + uint16_t max_vfs; + /* + * The maximum number of statistic contexts that can be + * allocated to the function. + */ + uint16_t max_stat_ctx; + /* + * The maximum number of Encapsulation records that can be + * offloaded by this function. + */ + uint32_t max_encap_records; + /* + * The maximum number of decapsulation records that can + * be offloaded by this function. + */ + uint32_t max_decap_records; + /* + * The maximum number of Exact Match (EM) flows that can be + * offloaded by this function on the TX side. + */ + uint32_t max_tx_em_flows; + /* + * The maximum number of Wildcard Match (WM) flows that can + * be offloaded by this function on the TX side. + */ + uint32_t max_tx_wm_flows; + /* + * The maximum number of Exact Match (EM) flows that can be + * offloaded by this function on the RX side. + */ + uint32_t max_rx_em_flows; + /* + * The maximum number of Wildcard Match (WM) flows that can + * be offloaded by this function on the RX side. + */ + uint32_t max_rx_wm_flows; + /* + * The maximum number of multicast filters that can + * be supported by this function on the RX side. + */ + uint32_t max_mcast_filters; + /* + * The maximum value of flow_id that can be supported + * in completion records. + */ + uint32_t max_flow_id; + /* + * The maximum number of HW ring groups that can be + * supported on this function. + */ + uint32_t max_hw_ring_grps; + /* + * The maximum number of strict priority transmit rings + * that can be allocated to the function. + * This number indicates the maximum number of TX rings + * that can be assigned strict priorities out of the + * maximum number of TX rings that can be allocated + * (max_tx_rings) to the function. + */ + uint16_t max_sp_tx_rings; + uint8_t unused_0; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/****************** + * hwrm_func_qcfg * + ******************/ + + +/* hwrm_func_qcfg_input (size:192b/24B) */ +struct hwrm_func_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being queried. + * 0xFF... (All Fs) if the query is for the requesting + * function. + */ + uint16_t fid; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_func_qcfg_output (size:640b/80B) */ +struct hwrm_func_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * FID value. This value is used to identify operations on the PCI + * bus as belonging to a particular PCI function. + */ + uint16_t fid; + /* + * Port ID of port that this function is associated with. + * 0xFF... (All Fs) if this function is not associated with + * any port. + */ + uint16_t port_id; + /* + * This value is the current VLAN setting for this + * function. The value of 0 for this field indicates + * no priority tagging or VLAN is used. + * This field's format is same as 802.1Q Tag's + * Tag Control Information (TCI) format that includes both + * Priority Code Point (PCP) and VLAN Identifier (VID). + */ + uint16_t vlan; + uint16_t flags; + /* + * If 1, then magic packet based Out-Of-Box WoL is enabled on + * the port associated with this function. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_MAGICPKT_ENABLED \ + UINT32_C(0x1) + /* + * If 1, then bitmap pattern based Out-Of-Box WoL packet is enabled + * on the port associated with this function. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_BMP_ENABLED \ + UINT32_C(0x2) + /* + * If set to 1, then FW based DCBX agent is enabled and running on + * the port associated with this function. + * If set to 0, then DCBX agent is not running in the firmware. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED \ + UINT32_C(0x4) + /* + * Standard TX Ring mode is used for the allocation of TX ring + * and underlying scheduling resources that allow bandwidth + * reservation and limit settings on the queried function. + * If set to 1, then standard TX ring mode is enabled + * on the queried function. + * If set to 0, then the standard TX ring mode is disabled + * on the queried function. In this extended TX ring resource + * mode, the minimum and maximum bandwidth settings are not + * supported to allow the allocation of TX rings to span multiple + * scheduler nodes. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_STD_TX_RING_MODE_ENABLED \ + UINT32_C(0x8) + /* + * If set to 1 then FW based LLDP agent is enabled and running on + * the port associated with this function. + * If set to 0 then the LLDP agent is not running in the firmware. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_LLDP_AGENT_ENABLED \ + UINT32_C(0x10) + /* + * If set to 1, then multi-host mode is active for this function. + * If set to 0, then multi-host mode is inactive for this function + * or not applicable for this device. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST \ + UINT32_C(0x20) + /* + * This value is current MAC address configured for this + * function. A value of 00-00-00-00-00-00 indicates no + * MAC address is currently configured. + */ + uint8_t mac_address[6]; + /* + * This value is current PCI ID of this + * function. If ARI is enabled, then it is + * Bus Number (8b):Function Number(8b). Otherwise, it is + * Bus Number (8b):Device Number (4b):Function Number(4b). + * If multi-host mode is active, the 4 lsb will indicate + * the PF index for this function. + */ + uint16_t pci_id; + /* + * The number of RSS/COS contexts currently + * allocated to the function. + */ + uint16_t alloc_rsscos_ctx; + /* + * The number of completion rings currently allocated to + * the function. This does not include the rings allocated + * to any children functions if any. + */ + uint16_t alloc_cmpl_rings; + /* + * The number of transmit rings currently allocated to + * the function. This does not include the rings allocated + * to any children functions if any. + */ + uint16_t alloc_tx_rings; + /* + * The number of receive rings currently allocated to + * the function. This does not include the rings allocated + * to any children functions if any. + */ + uint16_t alloc_rx_rings; + /* The allocated number of L2 contexts to the function. */ + uint16_t alloc_l2_ctx; + /* The allocated number of vnics to the function. */ + uint16_t alloc_vnics; + /* + * The maximum transmission unit of the function. + * For rings allocated on this function, this default + * value is used if ring MTU is not specified. + */ + uint16_t mtu; + /* + * The maximum receive unit of the function. + * For vnics allocated on this function, this default + * value is used if vnic MRU is not specified. + */ + uint16_t mru; + /* The statistics context assigned to a function. */ + uint16_t stat_ctx_id; + /* + * The HWRM shall return Unknown value for this field + * when this command is used to query VF's configuration. + */ + uint8_t port_partition_type; + /* Single physical function */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_SPF UINT32_C(0x0) + /* Multiple physical functions */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_MPFS UINT32_C(0x1) + /* Network Partitioning 1.0 */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0 UINT32_C(0x2) + /* Network Partitioning 1.5 */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5 UINT32_C(0x3) + /* Network Partitioning 2.0 */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0 UINT32_C(0x4) + /* Unknown */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_LAST \ + HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN + /* + * This field will indicate number of physical functions on this port_partition. + * HWRM shall return unavail (i.e. value of 0) for this field + * when this command is used to query VF's configuration or + * from older firmware that doesn't support this field. + */ + uint8_t port_pf_cnt; + /* number of PFs is not available */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL UINT32_C(0x0) + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_LAST \ + HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL + /* + * The default VNIC ID assigned to a function that is + * being queried. + */ + uint16_t dflt_vnic_id; + uint16_t max_mtu_configured; + /* + * Minimum BW allocated for this function. + * The HWRM will translate this value into byte counter and + * time interval used for the scheduler inside the device. + * A value of 0 indicates the minimum bandwidth is not + * configured. + */ + uint32_t min_bw; + /* The bandwidth value. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_SFT 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_LAST \ + HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_SFT 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated for this function. + * The HWRM will translate this value into byte counter and + * time interval used for the scheduler inside the device. + * A value of 0 indicates that the maximum bandwidth is not + * configured. + */ + uint32_t max_bw; + /* The bandwidth value. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_SFT 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_LAST \ + HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_SFT 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID + /* + * This value indicates the Edge virtual bridge mode for the + * domain that this function belongs to. + */ + uint8_t evb_mode; + /* No Edge Virtual Bridging (EVB) */ + #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_NO_EVB UINT32_C(0x0) + /* Virtual Ethernet Bridge (VEB) */ + #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEB UINT32_C(0x1) + /* Virtual Ethernet Port Aggregator (VEPA) */ + #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA UINT32_C(0x2) + #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_LAST \ + HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA + uint8_t options; + /* + * This value indicates the PCIE device cache line size. + * The cache line size allows the DMA writes to terminate and + * start at the cache boundary. + */ + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_MASK \ + UINT32_C(0x3) + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SFT 0 + /* Cache Line Size 64 bytes */ + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_64 \ + UINT32_C(0x0) + /* Cache Line Size 128 bytes */ + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_128 \ + UINT32_C(0x1) + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_LAST \ + HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_128 + /* Reserved for future. */ + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_RSVD_MASK \ + UINT32_C(0xfc) + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_RSVD_SFT 2 + /* + * The number of VFs that are allocated to the function. + * This is valid only on the PF with SR-IOV enabled. + * 0xFF... (All Fs) if this command is called on a PF with + * SR-IOV disabled or on a VF. + */ + uint16_t alloc_vfs; + /* + * The number of allocated multicast filters for this + * function on the RX side. + */ + uint32_t alloc_mcast_filters; + /* + * The number of allocated HW ring groups for this + * function. + */ + uint32_t alloc_hw_ring_grps; + /* + * The number of strict priority transmit rings out of + * currently allocated TX rings to the function + * (alloc_tx_rings). + */ + uint16_t alloc_sp_tx_rings; + /* + * The number of statistics contexts + * currently reserved for the function. + */ + uint16_t alloc_stat_ctx; + /* + * This field specifies how many NQs are reserved for the PF. + * Remaining NQs that belong to the PF are available for VFs. + * Once a PF has created VFs, it cannot change how many NQs are + * reserved for itself (since the NQs must be contiguous in HW). + */ + uint16_t alloc_msix; + uint8_t unused_2[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_func_vlan_qcfg * + ***********************/ + + +/* hwrm_func_vlan_qcfg_input (size:192b/24B) */ +struct hwrm_func_vlan_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being + * configured. + * If set to 0xFF... (All Fs), then the configuration is + * for the requesting function. + */ + uint16_t fid; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_func_vlan_qcfg_output (size:320b/40B) */ +struct hwrm_func_vlan_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; + /* S-TAG VLAN identifier configured for the function. */ + uint16_t stag_vid; + /* S-TAG PCP value configured for the function. */ + uint8_t stag_pcp; + uint8_t unused_1; + /* + * S-TAG TPID value configured for the function. This field is specified in + * network byte order. + */ + uint16_t stag_tpid; + /* C-TAG VLAN identifier configured for the function. */ + uint16_t ctag_vid; + /* C-TAG PCP value configured for the function. */ + uint8_t ctag_pcp; + uint8_t unused_2; + /* + * C-TAG TPID value configured for the function. This field is specified in + * network byte order. + */ + uint16_t ctag_tpid; + /* Future use. */ + uint32_t rsvd2; + /* Future use. */ + uint32_t rsvd3; + uint32_t unused_3; +} __attribute__((packed)); + +/********************** + * hwrm_func_vlan_cfg * + **********************/ + + +/* hwrm_func_vlan_cfg_input (size:384b/48B) */ +struct hwrm_func_vlan_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being + * configured. + * If set to 0xFF... (All Fs), then the configuration is + * for the requesting function. + */ + uint16_t fid; + uint8_t unused_0[2]; + uint32_t enables; + /* + * This bit must be '1' for the stag_vid field to be + * configured. + */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_VID UINT32_C(0x1) + /* + * This bit must be '1' for the ctag_vid field to be + * configured. + */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_VID UINT32_C(0x2) + /* + * This bit must be '1' for the stag_pcp field to be + * configured. + */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_PCP UINT32_C(0x4) + /* + * This bit must be '1' for the ctag_pcp field to be + * configured. + */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_PCP UINT32_C(0x8) + /* + * This bit must be '1' for the stag_tpid field to be + * configured. + */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_TPID UINT32_C(0x10) + /* + * This bit must be '1' for the ctag_tpid field to be + * configured. + */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_TPID UINT32_C(0x20) + /* S-TAG VLAN identifier configured for the function. */ + uint16_t stag_vid; + /* S-TAG PCP value configured for the function. */ + uint8_t stag_pcp; + uint8_t unused_1; + /* + * S-TAG TPID value configured for the function. This field is specified in + * network byte order. + */ + uint16_t stag_tpid; + /* C-TAG VLAN identifier configured for the function. */ + uint16_t ctag_vid; + /* C-TAG PCP value configured for the function. */ + uint8_t ctag_pcp; + uint8_t unused_2; + /* + * C-TAG TPID value configured for the function. This field is specified in + * network byte order. + */ + uint16_t ctag_tpid; + /* Future use. */ + uint32_t rsvd1; + /* Future use. */ + uint32_t rsvd2; + uint8_t unused_3[4]; +} __attribute__((packed)); + +/* hwrm_func_vlan_cfg_output (size:128b/16B) */ +struct hwrm_func_vlan_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/***************** + * hwrm_func_cfg * + *****************/ + + +/* hwrm_func_cfg_input (size:704b/88B) */ +struct hwrm_func_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being + * configured. + * If set to 0xFF... (All Fs), then the the configuration is + * for the requesting function. + */ + uint16_t fid; + /* + * This field specifies how many NQs will be reserved for the PF. + * Remaining NQs that belong to the PF become available for VFs. + * Once a PF has created VFs, it cannot change how many NQs are + * reserved for itself (since the NQs must be contiguous in HW). + */ + uint16_t num_msix; + uint32_t flags; + /* + * When this bit is '1', the function is disabled with + * source MAC address check. + * This is an anti-spoofing check. If this flag is set, + * then the function shall be configured to disallow + * transmission of frames with the source MAC address that + * is configured for this function. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE \ + UINT32_C(0x1) + /* + * When this bit is '1', the function is enabled with + * source MAC address check. + * This is an anti-spoofing check. If this flag is set, + * then the function shall be configured to allow + * transmission of frames with the source MAC address that + * is configured for this function. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_MASK \ + UINT32_C(0x1fc) + #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_SFT 2 + /* + * Standard TX Ring mode is used for the allocation of TX ring + * and underlying scheduling resources that allow bandwidth + * reservation and limit settings on the queried function. + * If set to 1, then standard TX ring mode is requested to be + * enabled on the function being configured. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE \ + UINT32_C(0x200) + /* + * Standard TX Ring mode is used for the allocation of TX ring + * and underlying scheduling resources that allow bandwidth + * reservation and limit settings on the queried function. + * If set to 1, then the standard TX ring mode is requested to + * be disabled on the function being configured. In this extended + * TX ring resource mode, the minimum and maximum bandwidth settings + * are not supported to allow the allocation of TX rings to + * span multiple scheduler nodes. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE \ + UINT32_C(0x400) + /* + * If this bit is set, virtual mac address configured + * in this command will be persistent over warm boot. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_VIRT_MAC_PERSIST \ + UINT32_C(0x800) + /* + * This bit only applies to the VF. If this bit is set, the statistic + * context counters will not be cleared when the statistic context is freed + * or a function reset is called on VF. This bit will be cleared when the PF + * is unloaded or a function reset is called on the PF. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC \ + UINT32_C(0x1000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of TX rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_TX_ASSETS_TEST \ + UINT32_C(0x2000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of RX rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_RX_ASSETS_TEST \ + UINT32_C(0x4000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of CMPL rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST \ + UINT32_C(0x8000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of RSS ctx) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_RSSCOS_CTX_ASSETS_TEST \ + UINT32_C(0x10000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of ring groups) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST \ + UINT32_C(0x20000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of stat ctx) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST \ + UINT32_C(0x40000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of VNICs) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST \ + UINT32_C(0x80000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of L2 ctx) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_L2_CTX_ASSETS_TEST \ + UINT32_C(0x100000) + uint32_t enables; + /* + * This bit must be '1' for the mtu field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_MTU \ + UINT32_C(0x1) + /* + * This bit must be '1' for the mru field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_MRU \ + UINT32_C(0x2) + /* + * This bit must be '1' for the num_rsscos_ctxs field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS \ + UINT32_C(0x4) + /* + * This bit must be '1' for the num_cmpl_rings field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS \ + UINT32_C(0x8) + /* + * This bit must be '1' for the num_tx_rings field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS \ + UINT32_C(0x10) + /* + * This bit must be '1' for the num_rx_rings field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS \ + UINT32_C(0x20) + /* + * This bit must be '1' for the num_l2_ctxs field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS \ + UINT32_C(0x40) + /* + * This bit must be '1' for the num_vnics field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS \ + UINT32_C(0x80) + /* + * This bit must be '1' for the num_stat_ctxs field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS \ + UINT32_C(0x100) + /* + * This bit must be '1' for the dflt_mac_addr field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR \ + UINT32_C(0x200) + /* + * This bit must be '1' for the dflt_vlan field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN \ + UINT32_C(0x400) + /* + * This bit must be '1' for the dflt_ip_addr field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_IP_ADDR \ + UINT32_C(0x800) + /* + * This bit must be '1' for the min_bw field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_MIN_BW \ + UINT32_C(0x1000) + /* + * This bit must be '1' for the max_bw field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW \ + UINT32_C(0x2000) + /* + * This bit must be '1' for the async_event_cr field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the vlan_antispoof_mode field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE \ + UINT32_C(0x8000) + /* + * This bit must be '1' for the allowed_vlan_pris field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_ALLOWED_VLAN_PRIS \ + UINT32_C(0x10000) + /* + * This bit must be '1' for the evb_mode field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE \ + UINT32_C(0x20000) + /* + * This bit must be '1' for the num_mcast_filters field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MCAST_FILTERS \ + UINT32_C(0x40000) + /* + * This bit must be '1' for the num_hw_ring_grps field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS \ + UINT32_C(0x80000) + /* + * This bit must be '1' for the cache_linesize field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_CACHE_LINESIZE \ + UINT32_C(0x100000) + /* + * This bit must be '1' for the num_msix field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX \ + UINT32_C(0x200000) + /* + * The maximum transmission unit of the function. + * The HWRM should make sure that the mtu of + * the function does not exceed the mtu of the physical + * port that this function is associated with. + * + * In addition to configuring mtu per function, it is + * possible to configure mtu per transmit ring. + * By default, the mtu of each transmit ring associated + * with a function is equal to the mtu of the function. + * The HWRM should make sure that the mtu of each transmit + * ring that is assigned to a function has a valid mtu. + */ + uint16_t mtu; + /* + * The maximum receive unit of the function. + * The HWRM should make sure that the mru of + * the function does not exceed the mru of the physical + * port that this function is associated with. + * + * In addition to configuring mru per function, it is + * possible to configure mru per vnic. + * By default, the mru of each vnic associated + * with a function is equal to the mru of the function. + * The HWRM should make sure that the mru of each vnic + * that is assigned to a function has a valid mru. + */ + uint16_t mru; + /* + * The number of RSS/COS contexts requested for the + * function. + */ + uint16_t num_rsscos_ctxs; + /* + * The number of completion rings requested for the + * function. This does not include the rings allocated + * to any children functions if any. + */ + uint16_t num_cmpl_rings; + /* + * The number of transmit rings requested for the function. + * This does not include the rings allocated to any + * children functions if any. + */ + uint16_t num_tx_rings; + /* + * The number of receive rings requested for the function. + * This does not include the rings allocated + * to any children functions if any. + */ + uint16_t num_rx_rings; + /* The requested number of L2 contexts for the function. */ + uint16_t num_l2_ctxs; + /* The requested number of vnics for the function. */ + uint16_t num_vnics; + /* The requested number of statistic contexts for the function. */ + uint16_t num_stat_ctxs; + /* + * The number of HW ring groups that should + * be reserved for this function. + */ + uint16_t num_hw_ring_grps; + /* The default MAC address for the function being configured. */ + uint8_t dflt_mac_addr[6]; + /* + * The default VLAN for the function being configured. + * This field's format is same as 802.1Q Tag's + * Tag Control Information (TCI) format that includes both + * Priority Code Point (PCP) and VLAN Identifier (VID). + */ + uint16_t dflt_vlan; + /* + * The default IP address for the function being configured. + * This address is only used in enabling source property check. + */ + uint32_t dflt_ip_addr[4]; + /* + * Minimum BW allocated for this function. + * The HWRM will translate this value into byte counter and + * time interval used for the scheduler inside the device. + */ + uint32_t min_bw; + /* The bandwidth value. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_SFT 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_LAST \ + HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_SFT 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated for this function. + * The HWRM will translate this value into byte counter and + * time interval used for the scheduler inside the device. + */ + uint32_t max_bw; + /* The bandwidth value. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_SFT 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_LAST \ + HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID + /* + * ID of the target completion ring for receiving asynchronous + * event completions. If this field is not valid, then the + * HWRM shall use the default completion ring of the function + * that is being configured as the target completion ring for + * providing any asynchronous event completions for that + * function. + * If this field is valid, then the HWRM shall use the + * completion ring identified by this ID as the target + * completion ring for providing any asynchronous event + * completions for the function that is being configured. + */ + uint16_t async_event_cr; + /* VLAN Anti-spoofing mode. */ + uint8_t vlan_antispoof_mode; + /* No VLAN anti-spoofing checks are enabled */ + #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK \ + UINT32_C(0x0) + /* Validate VLAN against the configured VLAN(s) */ + #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN \ + UINT32_C(0x1) + /* Insert VLAN if it does not exist, otherwise discard */ + #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE \ + UINT32_C(0x2) + /* Insert VLAN if it does not exist, override VLAN if it exists */ + #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN \ + UINT32_C(0x3) + #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_LAST \ + HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN + /* + * This bit field defines VLAN PRIs that are allowed on + * this function. + * If nth bit is set, then VLAN PRI n is allowed on this + * function. + */ + uint8_t allowed_vlan_pris; + /* + * The HWRM shall allow a PF driver to change EVB mode for the + * partition it belongs to. + * The HWRM shall not allow a VF driver to change the EVB mode. + * The HWRM shall take into account the switching of EVB mode + * from one to another and reconfigure hardware resources as + * appropriately. + * The switching from VEB to VEPA mode requires + * the disabling of the loopback traffic. Additionally, + * source knock outs are handled differently in VEB and VEPA + * modes. + */ + uint8_t evb_mode; + /* No Edge Virtual Bridging (EVB) */ + #define HWRM_FUNC_CFG_INPUT_EVB_MODE_NO_EVB UINT32_C(0x0) + /* Virtual Ethernet Bridge (VEB) */ + #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEB UINT32_C(0x1) + /* Virtual Ethernet Port Aggregator (VEPA) */ + #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA UINT32_C(0x2) + #define HWRM_FUNC_CFG_INPUT_EVB_MODE_LAST \ + HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA + uint8_t options; + /* + * This value indicates the PCIE device cache line size. + * The cache line size allows the DMA writes to terminate and + * start at the cache boundary. + */ + #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_MASK \ + UINT32_C(0x3) + #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SFT 0 + /* Cache Line Size 64 bytes */ + #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_64 \ + UINT32_C(0x0) + /* Cache Line Size 128 bytes */ + #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_128 \ + UINT32_C(0x1) + #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_LAST \ + HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_128 + /* Reserved for future. */ + #define HWRM_FUNC_CFG_INPUT_OPTIONS_RSVD_MASK \ + UINT32_C(0xfc) + #define HWRM_FUNC_CFG_INPUT_OPTIONS_RSVD_SFT 2 + /* + * The number of multicast filters that should + * be reserved for this function on the RX side. + */ + uint16_t num_mcast_filters; +} __attribute__((packed)); + +/* hwrm_func_cfg_output (size:128b/16B) */ +struct hwrm_func_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/******************** + * hwrm_func_qstats * + ********************/ + + +/* hwrm_func_qstats_input (size:192b/24B) */ +struct hwrm_func_qstats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being queried. + * 0xFF... (All Fs) if the query is for the requesting + * function. + */ + uint16_t fid; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_func_qstats_output (size:1408b/176B) */ +struct hwrm_func_qstats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Number of transmitted unicast packets on the function. */ + uint64_t tx_ucast_pkts; + /* Number of transmitted multicast packets on the function. */ + uint64_t tx_mcast_pkts; + /* Number of transmitted broadcast packets on the function. */ + uint64_t tx_bcast_pkts; + /* + * Number of transmitted packets that were discarded due to + * internal NIC resource problems. For transmit, this + * can only happen if TMP is configured to allow dropping + * in HOL blocking conditions, which is not a normal + * configuration. + */ + uint64_t tx_discard_pkts; + /* + * Number of dropped packets on transmit path on the function. + * These are packets that have been marked for drop by + * the TE CFA block or are packets that exceeded the + * transmit MTU limit for the function. + */ + uint64_t tx_drop_pkts; + /* Number of transmitted bytes for unicast traffic on the function. */ + uint64_t tx_ucast_bytes; + /* Number of transmitted bytes for multicast traffic on the function. */ + uint64_t tx_mcast_bytes; + /* Number of transmitted bytes for broadcast traffic on the function. */ + uint64_t tx_bcast_bytes; + /* Number of received unicast packets on the function. */ + uint64_t rx_ucast_pkts; + /* Number of received multicast packets on the function. */ + uint64_t rx_mcast_pkts; + /* Number of received broadcast packets on the function. */ + uint64_t rx_bcast_pkts; + /* + * Number of received packets that were discarded on the function + * due to resource limitations. This can happen for 3 reasons. + * # The BD used for the packet has a bad format. + * # There were no BDs available in the ring for the packet. + * # There were no BDs available on-chip for the packet. + */ + uint64_t rx_discard_pkts; + /* + * Number of dropped packets on received path on the function. + * These are packets that have been marked for drop by the + * RE CFA. + */ + uint64_t rx_drop_pkts; + /* Number of received bytes for unicast traffic on the function. */ + uint64_t rx_ucast_bytes; + /* Number of received bytes for multicast traffic on the function. */ + uint64_t rx_mcast_bytes; + /* Number of received bytes for broadcast traffic on the function. */ + uint64_t rx_bcast_bytes; + /* Number of aggregated unicast packets on the function. */ + uint64_t rx_agg_pkts; + /* Number of aggregated unicast bytes on the function. */ + uint64_t rx_agg_bytes; + /* Number of aggregation events on the function. */ + uint64_t rx_agg_events; + /* Number of aborted aggregations on the function. */ + uint64_t rx_agg_aborts; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_func_clr_stats * + ***********************/ + + +/* hwrm_func_clr_stats_input (size:192b/24B) */ +struct hwrm_func_clr_stats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function. + * 0xFF... (All Fs) if the query is for the requesting + * function. + */ + uint16_t fid; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_func_clr_stats_output (size:128b/16B) */ +struct hwrm_func_clr_stats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************** + * hwrm_func_vf_resc_free * + **************************/ + + +/* hwrm_func_vf_resc_free_input (size:192b/24B) */ +struct hwrm_func_vf_resc_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This value is used to identify a Virtual Function (VF). + * The scope of VF ID is local within a PF. + */ + uint16_t vf_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_func_vf_resc_free_output (size:128b/16B) */ +struct hwrm_func_vf_resc_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/******************************* + * hwrm_func_vf_vnic_ids_query * + *******************************/ + + +/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */ +struct hwrm_func_vf_vnic_ids_query_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This value is used to identify a Virtual Function (VF). + * The scope of VF ID is local within a PF. + */ + uint16_t vf_id; + uint8_t unused_0[2]; + /* Max number of vnic ids in vnic id table */ + uint32_t max_vnic_id_cnt; + /* This is the address for VF VNIC ID table */ + uint64_t vnic_id_tbl_addr; +} __attribute__((packed)); + +/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */ +struct hwrm_func_vf_vnic_ids_query_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * Actual number of vnic ids + * + * Each VNIC ID is written as a 32-bit number. + */ + uint32_t vnic_id_cnt; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_func_drv_rgtr * + **********************/ + + +/* hwrm_func_drv_rgtr_input (size:896b/112B) */ +struct hwrm_func_drv_rgtr_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When this bit is '1', the function driver is requesting + * all requests from its children VF drivers to be + * forwarded to itself. + * This flag can only be set by the PF driver. + * If a VF driver sets this flag, it should be ignored + * by the HWRM. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_ALL_MODE UINT32_C(0x1) + /* + * When this bit is '1', the function is requesting none of + * the requests from its children VF drivers to be + * forwarded to itself. + * This flag can only be set by the PF driver. + * If a VF driver sets this flag, it should be ignored + * by the HWRM. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE UINT32_C(0x2) + /* + * When this bit is '1', then ver_maj_8b, ver_min_8b, ver_upd_8b + * fields shall be ignored and ver_maj, ver_min, ver_upd + * and ver_patch shall be used for the driver version information. + * When this bit is '0', then ver_maj_8b, ver_min_8b, ver_upd_8b + * fields shall be used for the driver version information and + * ver_maj, ver_min, ver_upd and ver_patch shall be ignored. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_16BIT_VER_MODE UINT32_C(0x4) + uint32_t enables; + /* + * This bit must be '1' for the os_type field to be + * configured. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE \ + UINT32_C(0x1) + /* + * This bit must be '1' for the ver field to be + * configured. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER \ + UINT32_C(0x2) + /* + * This bit must be '1' for the timestamp field to be + * configured. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_TIMESTAMP \ + UINT32_C(0x4) + /* + * This bit must be '1' for the vf_req_fwd field to be + * configured. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD \ + UINT32_C(0x8) + /* + * This bit must be '1' for the async_event_fwd field to be + * configured. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD \ + UINT32_C(0x10) + /* This value indicates the type of OS. The values are based on CIM_OperatingSystem.mof file as published by the DMTF. */ + uint16_t os_type; + /* Unknown */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN UINT32_C(0x0) + /* Other OS not listed below. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_OTHER UINT32_C(0x1) + /* MSDOS OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_MSDOS UINT32_C(0xe) + /* Windows OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WINDOWS UINT32_C(0x12) + /* Solaris OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_SOLARIS UINT32_C(0x1d) + /* Linux OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LINUX UINT32_C(0x24) + /* FreeBSD OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD UINT32_C(0x2a) + /* VMware ESXi OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_ESXI UINT32_C(0x68) + /* Microsoft Windows 8 64-bit OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN864 UINT32_C(0x73) + /* Microsoft Windows Server 2012 R2 OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN2012R2 UINT32_C(0x74) + /* UEFI driver. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UEFI UINT32_C(0x8000) + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LAST \ + HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UEFI + /* This is the 8bit major version of the driver. */ + uint8_t ver_maj_8b; + /* This is the 8bit minor version of the driver. */ + uint8_t ver_min_8b; + /* This is the 8bit update version of the driver. */ + uint8_t ver_upd_8b; + uint8_t unused_0[3]; + /* + * This is a 32-bit timestamp provided by the driver for + * keep alive. + * The timestamp is in multiples of 1ms. + */ + uint32_t timestamp; + uint8_t unused_1[4]; + /* + * This is a 256-bit bit mask provided by the PF driver for + * letting the HWRM know what commands issued by the VF driver + * to the HWRM should be forwarded to the PF driver. + * Nth bit refers to the Nth req_type. + * + * Setting Nth bit to 1 indicates that requests from the + * VF driver with req_type equal to N shall be forwarded to + * the parent PF driver. + * + * This field is not valid for the VF driver. + */ + uint32_t vf_req_fwd[8]; + /* + * This is a 256-bit bit mask provided by the function driver + * (PF or VF driver) to indicate the list of asynchronous event + * completions to be forwarded. + * + * Nth bit refers to the Nth event_id. + * + * Setting Nth bit to 1 by the function driver shall result in + * the HWRM forwarding asynchronous event completion with + * event_id equal to N. + * + * If all bits are set to 0 (value of 0), then the HWRM shall + * not forward any asynchronous event completion to this + * function driver. + */ + uint32_t async_event_fwd[8]; + /* This is the 16bit major version of the driver. */ + uint16_t ver_maj; + /* This is the 16bit minor version of the driver. */ + uint16_t ver_min; + /* This is the 16bit update version of the driver. */ + uint16_t ver_upd; + /* This is the 16bit patch version of the driver. */ + uint16_t ver_patch; +} __attribute__((packed)); + +/* hwrm_func_drv_rgtr_output (size:128b/16B) */ +struct hwrm_func_drv_rgtr_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************ + * hwrm_func_drv_unrgtr * + ************************/ + + +/* hwrm_func_drv_unrgtr_input (size:192b/24B) */ +struct hwrm_func_drv_unrgtr_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When this bit is '1', the function driver is notifying + * the HWRM to prepare for the shutdown. + */ + #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN \ + UINT32_C(0x1) + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_func_drv_unrgtr_output (size:128b/16B) */ +struct hwrm_func_drv_unrgtr_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_func_buf_rgtr * + **********************/ + + +/* hwrm_func_buf_rgtr_input (size:1024b/128B) */ +struct hwrm_func_buf_rgtr_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the vf_id field to be + * configured. + */ + #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1) + /* + * This bit must be '1' for the err_buf_addr field to be + * configured. + */ + #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_ERR_BUF_ADDR UINT32_C(0x2) + /* + * This value is used to identify a Virtual Function (VF). + * The scope of VF ID is local within a PF. + */ + uint16_t vf_id; + /* + * This field represents the number of pages used for request + * buffer(s). + */ + uint16_t req_buf_num_pages; + /* + * This field represents the page size used for request + * buffer(s). + */ + uint16_t req_buf_page_size; + /* 16 bytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_16B UINT32_C(0x4) + /* 4 Kbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_4K UINT32_C(0xc) + /* 8 Kbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_8K UINT32_C(0xd) + /* 64 Kbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_64K UINT32_C(0x10) + /* 2 Mbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_2M UINT32_C(0x15) + /* 4 Mbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_4M UINT32_C(0x16) + /* 1 Gbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_1G UINT32_C(0x1e) + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_LAST \ + HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_1G + /* The length of the request buffer per VF in bytes. */ + uint16_t req_buf_len; + /* The length of the response buffer in bytes. */ + uint16_t resp_buf_len; + uint8_t unused_0[2]; + /* This field represents the page address of page #0. */ + uint64_t req_buf_page_addr0; + /* This field represents the page address of page #1. */ + uint64_t req_buf_page_addr1; + /* This field represents the page address of page #2. */ + uint64_t req_buf_page_addr2; + /* This field represents the page address of page #3. */ + uint64_t req_buf_page_addr3; + /* This field represents the page address of page #4. */ + uint64_t req_buf_page_addr4; + /* This field represents the page address of page #5. */ + uint64_t req_buf_page_addr5; + /* This field represents the page address of page #6. */ + uint64_t req_buf_page_addr6; + /* This field represents the page address of page #7. */ + uint64_t req_buf_page_addr7; + /* This field represents the page address of page #8. */ + uint64_t req_buf_page_addr8; + /* This field represents the page address of page #9. */ + uint64_t req_buf_page_addr9; + /* + * This field is used to receive the error reporting from + * the chipset. Only applicable for PFs. + */ + uint64_t error_buf_addr; + /* + * This field is used to receive the response forwarded by the + * HWRM. + */ + uint64_t resp_buf_addr; +} __attribute__((packed)); + +/* hwrm_func_buf_rgtr_output (size:128b/16B) */ +struct hwrm_func_buf_rgtr_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************ + * hwrm_func_buf_unrgtr * + ************************/ + + +/* hwrm_func_buf_unrgtr_input (size:192b/24B) */ +struct hwrm_func_buf_unrgtr_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the vf_id field to be + * configured. + */ + #define HWRM_FUNC_BUF_UNRGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1) + /* + * This value is used to identify a Virtual Function (VF). + * The scope of VF ID is local within a PF. + */ + uint16_t vf_id; + uint8_t unused_0[2]; +} __attribute__((packed)); + +/* hwrm_func_buf_unrgtr_output (size:128b/16B) */ +struct hwrm_func_buf_unrgtr_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_func_drv_qver * + **********************/ + + +/* hwrm_func_drv_qver_input (size:192b/24B) */ +struct hwrm_func_drv_qver_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Reserved for future use. */ + uint32_t reserved; + /* + * Function ID of the function that is being queried. + * 0xFF... (All Fs) if the query is for the requesting + * function. + */ + uint16_t fid; + uint8_t unused_0[2]; +} __attribute__((packed)); + +/* hwrm_func_drv_qver_output (size:192b/24B) */ +struct hwrm_func_drv_qver_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value indicates the type of OS. The values are based on CIM_OperatingSystem.mof file as published by the DMTF. */ + uint16_t os_type; + /* Unknown */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UNKNOWN UINT32_C(0x0) + /* Other OS not listed below. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_OTHER UINT32_C(0x1) + /* MSDOS OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_MSDOS UINT32_C(0xe) + /* Windows OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_WINDOWS UINT32_C(0x12) + /* Solaris OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_SOLARIS UINT32_C(0x1d) + /* Linux OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_LINUX UINT32_C(0x24) + /* FreeBSD OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_FREEBSD UINT32_C(0x2a) + /* VMware ESXi OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_ESXI UINT32_C(0x68) + /* Microsoft Windows 8 64-bit OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_WIN864 UINT32_C(0x73) + /* Microsoft Windows Server 2012 R2 OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_WIN2012R2 UINT32_C(0x74) + /* UEFI driver. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UEFI UINT32_C(0x8000) + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_LAST \ + HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UEFI + /* This is the 8bit major version of the driver. */ + uint8_t ver_maj_8b; + /* This is the 8bit minor version of the driver. */ + uint8_t ver_min_8b; + /* This is the 8bit update version of the driver. */ + uint8_t ver_upd_8b; + uint8_t unused_0[2]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; + /* This is the 16bit major version of the driver. */ + uint16_t ver_maj; + /* This is the 16bit minor version of the driver. */ + uint16_t ver_min; + /* This is the 16bit update version of the driver. */ + uint16_t ver_upd; + /* This is the 16bit patch version of the driver. */ + uint16_t ver_patch; +} __attribute__((packed)); + +/**************************** + * hwrm_func_resource_qcaps * + ****************************/ + + +/* hwrm_func_resource_qcaps_input (size:192b/24B) */ +struct hwrm_func_resource_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being queried. + * 0xFF... (All Fs) if the query is for the requesting + * function. + */ + uint16_t fid; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_func_resource_qcaps_output (size:448b/56B) */ +struct hwrm_func_resource_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Maximum guaranteed number of VFs supported by PF. Not applicable for VFs. */ + uint16_t max_vfs; + /* Maximum guaranteed number of MSI-X vectors supported by function */ + uint16_t max_msix; + /* Hint of strategy to be used by PF driver to reserve resources for its VF */ + uint16_t vf_reservation_strategy; + /* The PF driver should evenly divide its remaining resources among all VFs. */ + #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL \ + UINT32_C(0x0) + /* The PF driver should only reserve minimal resources for each VF. */ + #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL \ + UINT32_C(0x1) + /* + * The PF driver should not reserve any resources for each VF until the + * the VF interface is brought up. + */ + #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC \ + UINT32_C(0x2) + #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_LAST \ + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC + /* Minimum guaranteed number of RSS/COS contexts */ + uint16_t min_rsscos_ctx; + /* Maximum non-guaranteed number of RSS/COS contexts */ + uint16_t max_rsscos_ctx; + /* Minimum guaranteed number of completion rings */ + uint16_t min_cmpl_rings; + /* Maximum non-guaranteed number of completion rings */ + uint16_t max_cmpl_rings; + /* Minimum guaranteed number of transmit rings */ + uint16_t min_tx_rings; + /* Maximum non-guaranteed number of transmit rings */ + uint16_t max_tx_rings; + /* Minimum guaranteed number of receive rings */ + uint16_t min_rx_rings; + /* Maximum non-guaranteed number of receive rings */ + uint16_t max_rx_rings; + /* Minimum guaranteed number of L2 contexts */ + uint16_t min_l2_ctxs; + /* Maximum non-guaranteed number of L2 contexts */ + uint16_t max_l2_ctxs; + /* Minimum guaranteed number of VNICs */ + uint16_t min_vnics; + /* Maximum non-guaranteed number of VNICs */ + uint16_t max_vnics; + /* Minimum guaranteed number of statistic contexts */ + uint16_t min_stat_ctx; + /* Maximum non-guaranteed number of statistic contexts */ + uint16_t max_stat_ctx; + /* Minimum guaranteed number of ring groups */ + uint16_t min_hw_ring_grps; + /* Maximum non-guaranteed number of ring groups */ + uint16_t max_hw_ring_grps; + /* + * Maximum number of inputs into the transmit scheduler for this function. + * The number of TX rings assigned to the function cannot exceed this value. + */ + uint16_t max_tx_scheduler_inputs; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/***************************** + * hwrm_func_vf_resource_cfg * + *****************************/ + + +/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */ +struct hwrm_func_vf_resource_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* VF ID that is being configured by PF */ + uint16_t vf_id; + /* Maximum guaranteed number of MSI-X vectors for the function */ + uint16_t max_msix; + /* Minimum guaranteed number of RSS/COS contexts */ + uint16_t min_rsscos_ctx; + /* Maximum non-guaranteed number of RSS/COS contexts */ + uint16_t max_rsscos_ctx; + /* Minimum guaranteed number of completion rings */ + uint16_t min_cmpl_rings; + /* Maximum non-guaranteed number of completion rings */ + uint16_t max_cmpl_rings; + /* Minimum guaranteed number of transmit rings */ + uint16_t min_tx_rings; + /* Maximum non-guaranteed number of transmit rings */ + uint16_t max_tx_rings; + /* Minimum guaranteed number of receive rings */ + uint16_t min_rx_rings; + /* Maximum non-guaranteed number of receive rings */ + uint16_t max_rx_rings; + /* Minimum guaranteed number of L2 contexts */ + uint16_t min_l2_ctxs; + /* Maximum non-guaranteed number of L2 contexts */ + uint16_t max_l2_ctxs; + /* Minimum guaranteed number of VNICs */ + uint16_t min_vnics; + /* Maximum non-guaranteed number of VNICs */ + uint16_t max_vnics; + /* Minimum guaranteed number of statistic contexts */ + uint16_t min_stat_ctx; + /* Maximum non-guaranteed number of statistic contexts */ + uint16_t max_stat_ctx; + /* Minimum guaranteed number of ring groups */ + uint16_t min_hw_ring_grps; + /* Maximum non-guaranteed number of ring groups */ + uint16_t max_hw_ring_grps; + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */ +struct hwrm_func_vf_resource_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Reserved number of RSS/COS contexts */ + uint16_t reserved_rsscos_ctx; + /* Reserved number of completion rings */ + uint16_t reserved_cmpl_rings; + /* Reserved number of transmit rings */ + uint16_t reserved_tx_rings; + /* Reserved number of receive rings */ + uint16_t reserved_rx_rings; + /* Reserved number of L2 contexts */ + uint16_t reserved_l2_ctxs; + /* Reserved number of VNICs */ + uint16_t reserved_vnics; + /* Reserved number of statistic contexts */ + uint16_t reserved_stat_ctx; + /* Reserved number of ring groups */ + uint16_t reserved_hw_ring_grps; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************************* + * hwrm_func_backing_store_qcaps * + *********************************/ + + +/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */ +struct hwrm_func_backing_store_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __attribute__((packed)); + +/* hwrm_func_backing_store_qcaps_output (size:512b/64B) */ +struct hwrm_func_backing_store_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Maximum number of QP context entries supported for this function. */ + uint32_t qp_max_entries; + /* + * Minimum number of QP context entries that are needed to be reserved + * for QP1 for the PF and its VFs. PF drivers must allocate at least + * this many QP context entries, even if RoCE will not be used. + */ + uint16_t qp_min_qp1_entries; + /* Maximum number of QP context entries that can be used for L2. */ + uint16_t qp_max_l2_entries; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t qp_entry_size; + /* Maximum number of SRQ context entries that can be used for L2. */ + uint16_t srq_max_l2_entries; + /* Maximum number of SRQ context entries supported for this function. */ + uint32_t srq_max_entries; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t srq_entry_size; + /* Maximum number of CQ context entries that can be used for L2. */ + uint16_t cq_max_l2_entries; + /* Maximum number of CQ context entries supported for this function. */ + uint32_t cq_max_entries; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t cq_entry_size; + /* Maximum number of VNIC context entries supported for this function. */ + uint16_t vnic_max_vnic_entries; + /* Maximum number of Ring table context entries supported for this function. */ + uint16_t vnic_max_ring_table_entries; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t vnic_entry_size; + /* Maximum number of statistic context entries supported for this function. */ + uint32_t stat_max_entries; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t stat_entry_size; + /* Maximum number of TQM context entries supported per ring. */ + uint16_t tqm_max_entries_per_ring; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t tqm_entry_size; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t mrav_entry_size; + /* Maximum number of MR/AV context entries supported for this function. */ + uint32_t mrav_max_entries; + /* Maximum number of Timer context entries supported for this function. */ + uint32_t tim_max_entries; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t tim_entry_size; + uint8_t unused_0; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/******************************* + * hwrm_func_backing_store_cfg * + *******************************/ + + +/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */ +struct hwrm_func_backing_store_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When set, the firmware only uses on-chip resources and does not + * expect any backing store to be provided by the host driver. This + * mode provides minimal L2 functionality (e.g. limited L2 resources, + * no RoCE). + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_FLAGS_PREBOOT_MODE \ + UINT32_C(0x1) + uint32_t enables; + /* + * This bit must be '1' for the qp fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP \ + UINT32_C(0x1) + /* + * This bit must be '1' for the srq fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ \ + UINT32_C(0x2) + /* + * This bit must be '1' for the cq fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ \ + UINT32_C(0x4) + /* + * This bit must be '1' for the vnic fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC \ + UINT32_C(0x8) + /* + * This bit must be '1' for the stat fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT \ + UINT32_C(0x10) + /* + * This bit must be '1' for the tqm_sp fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP \ + UINT32_C(0x20) + /* + * This bit must be '1' for the tqm_ring0 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING0 \ + UINT32_C(0x40) + /* + * This bit must be '1' for the tqm_ring1 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING1 \ + UINT32_C(0x80) + /* + * This bit must be '1' for the tqm_ring2 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING2 \ + UINT32_C(0x100) + /* + * This bit must be '1' for the tqm_ring3 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING3 \ + UINT32_C(0x200) + /* + * This bit must be '1' for the tqm_ring4 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING4 \ + UINT32_C(0x400) + /* + * This bit must be '1' for the tqm_ring5 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING5 \ + UINT32_C(0x800) + /* + * This bit must be '1' for the tqm_ring6 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING6 \ + UINT32_C(0x1000) + /* + * This bit must be '1' for the tqm_ring7 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING7 \ + UINT32_C(0x2000) + /* + * This bit must be '1' for the mrav fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the tim fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM \ + UINT32_C(0x8000) + /* QPC page size and level. */ + uint8_t qpc_pg_size_qpc_lvl; + /* QPC PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2 + /* QPC page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_1G + /* SRQ page size and level. */ + uint8_t srq_pg_size_srq_lvl; + /* SRQ PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_2 + /* SRQ page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_1G + /* CQ page size and level. */ + uint8_t cq_pg_size_cq_lvl; + /* CQ PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_2 + /* CQ page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_1G + /* VNIC page size and level. */ + uint8_t vnic_pg_size_vnic_lvl; + /* VNIC PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_2 + /* VNIC page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_1G + /* Stat page size and level. */ + uint8_t stat_pg_size_stat_lvl; + /* Stat PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_2 + /* Stat page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_1G + /* TQM slow path page size and level. */ + uint8_t tqm_sp_pg_size_tqm_sp_lvl; + /* TQM slow path PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_2 + /* TQM slow path page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_1G + /* TQM ring 0 page size and level. */ + uint8_t tqm_ring0_pg_size_tqm_ring0_lvl; + /* TQM ring 0 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_2 + /* TQM ring 0 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_1G + /* TQM ring 1 page size and level. */ + uint8_t tqm_ring1_pg_size_tqm_ring1_lvl; + /* TQM ring 1 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_2 + /* TQM ring 1 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_1G + /* TQM ring 2 page size and level. */ + uint8_t tqm_ring2_pg_size_tqm_ring2_lvl; + /* TQM ring 2 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_2 + /* TQM ring 2 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_1G + /* TQM ring 3 page size and level. */ + uint8_t tqm_ring3_pg_size_tqm_ring3_lvl; + /* TQM ring 3 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_2 + /* TQM ring 3 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_1G + /* TQM ring 4 page size and level. */ + uint8_t tqm_ring4_pg_size_tqm_ring4_lvl; + /* TQM ring 4 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_2 + /* TQM ring 4 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_1G + /* TQM ring 5 page size and level. */ + uint8_t tqm_ring5_pg_size_tqm_ring5_lvl; + /* TQM ring 5 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_2 + /* TQM ring 5 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_1G + /* TQM ring 6 page size and level. */ + uint8_t tqm_ring6_pg_size_tqm_ring6_lvl; + /* TQM ring 6 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_2 + /* TQM ring 6 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_1G + /* TQM ring 7 page size and level. */ + uint8_t tqm_ring7_pg_size_tqm_ring7_lvl; + /* TQM ring 7 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_2 + /* TQM ring 7 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_1G + /* MR/AV page size and level. */ + uint8_t mrav_pg_size_mrav_lvl; + /* MR/AV PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_2 + /* MR/AV page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_1G + /* Timer page size and level. */ + uint8_t tim_pg_size_tim_lvl; + /* Timer PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_2 + /* Timer page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_1G + /* QP page directory. */ + uint64_t qpc_page_dir; + /* SRQ page directory. */ + uint64_t srq_page_dir; + /* CQ page directory. */ + uint64_t cq_page_dir; + /* VNIC page directory. */ + uint64_t vnic_page_dir; + /* Stat page directory. */ + uint64_t stat_page_dir; + /* TQM slowpath page directory. */ + uint64_t tqm_sp_page_dir; + /* TQM ring 0 page directory. */ + uint64_t tqm_ring0_page_dir; + /* TQM ring 1 page directory. */ + uint64_t tqm_ring1_page_dir; + /* TQM ring 2 page directory. */ + uint64_t tqm_ring2_page_dir; + /* TQM ring 3 page directory. */ + uint64_t tqm_ring3_page_dir; + /* TQM ring 4 page directory. */ + uint64_t tqm_ring4_page_dir; + /* TQM ring 5 page directory. */ + uint64_t tqm_ring5_page_dir; + /* TQM ring 6 page directory. */ + uint64_t tqm_ring6_page_dir; + /* TQM ring 7 page directory. */ + uint64_t tqm_ring7_page_dir; + /* MR/AV page directory. */ + uint64_t mrav_page_dir; + /* Timer page directory. */ + uint64_t tim_page_dir; + /* Number of QPs. */ + uint32_t qp_num_entries; + /* Number of SRQs. */ + uint32_t srq_num_entries; + /* Number of CQs. */ + uint32_t cq_num_entries; + /* Number of Stats. */ + uint32_t stat_num_entries; + /* Number of TQM slowpath entries. */ + uint32_t tqm_sp_num_entries; + /* Number of TQM ring 0 entries. */ + uint32_t tqm_ring0_num_entries; + /* Number of TQM ring 1 entries. */ + uint32_t tqm_ring1_num_entries; + /* Number of TQM ring 2 entries. */ + uint32_t tqm_ring2_num_entries; + /* Number of TQM ring 3 entries. */ + uint32_t tqm_ring3_num_entries; + /* Number of TQM ring 4 entries. */ + uint32_t tqm_ring4_num_entries; + /* Number of TQM ring 5 entries. */ + uint32_t tqm_ring5_num_entries; + /* Number of TQM ring 6 entries. */ + uint32_t tqm_ring6_num_entries; + /* Number of TQM ring 7 entries. */ + uint32_t tqm_ring7_num_entries; + /* Number of MR/AV entries. */ + uint32_t mrav_num_entries; + /* Number of Timer entries. */ + uint32_t tim_num_entries; + /* Number of entries to reserve for QP1 */ + uint16_t qp_num_qp1_entries; + /* Number of entries to reserve for L2 */ + uint16_t qp_num_l2_entries; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t qp_entry_size; + /* Number of entries to reserve for L2 */ + uint16_t srq_num_l2_entries; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t srq_entry_size; + /* Number of entries to reserve for L2 */ + uint16_t cq_num_l2_entries; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t cq_entry_size; + /* Number of entries to reserve for VNIC entries */ + uint16_t vnic_num_vnic_entries; + /* Number of entries to reserve for Ring table entries */ + uint16_t vnic_num_ring_table_entries; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t vnic_entry_size; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t stat_entry_size; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t tqm_entry_size; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t mrav_entry_size; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t tim_entry_size; +} __attribute__((packed)); + +/* hwrm_func_backing_store_cfg_output (size:128b/16B) */ +struct hwrm_func_backing_store_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/******************************** + * hwrm_func_backing_store_qcfg * + ********************************/ + + +/* hwrm_func_backing_store_qcfg_input (size:128b/16B) */ +struct hwrm_func_backing_store_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __attribute__((packed)); + +/* hwrm_func_backing_store_qcfg_output (size:1920b/240B) */ +struct hwrm_func_backing_store_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; + /* + * When set, the firmware only uses on-chip resources and does not + * expect any backing store to be provided by the host driver. This + * mode provides minimal L2 functionality (e.g. limited L2 resources, + * no RoCE). + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_FLAGS_PREBOOT_MODE \ + UINT32_C(0x1) + uint8_t unused_0[4]; + /* + * This bit must be '1' for the qp fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_QP \ + UINT32_C(0x1) + /* + * This bit must be '1' for the srq fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_SRQ \ + UINT32_C(0x2) + /* + * This bit must be '1' for the cq fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_CQ \ + UINT32_C(0x4) + /* + * This bit must be '1' for the vnic fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_VNIC \ + UINT32_C(0x8) + /* + * This bit must be '1' for the stat fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_STAT \ + UINT32_C(0x10) + /* + * This bit must be '1' for the tqm_sp fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_SP \ + UINT32_C(0x20) + /* + * This bit must be '1' for the tqm_ring0 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING0 \ + UINT32_C(0x40) + /* + * This bit must be '1' for the tqm_ring1 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING1 \ + UINT32_C(0x80) + /* + * This bit must be '1' for the tqm_ring2 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING2 \ + UINT32_C(0x100) + /* + * This bit must be '1' for the tqm_ring3 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING3 \ + UINT32_C(0x200) + /* + * This bit must be '1' for the tqm_ring4 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING4 \ + UINT32_C(0x400) + /* + * This bit must be '1' for the tqm_ring5 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING5 \ + UINT32_C(0x800) + /* + * This bit must be '1' for the tqm_ring6 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING6 \ + UINT32_C(0x1000) + /* + * This bit must be '1' for the tqm_ring7 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING7 \ + UINT32_C(0x2000) + /* + * This bit must be '1' for the mrav fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_MRAV \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the tim fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TIM \ + UINT32_C(0x8000) + /* QPC page size and level. */ + uint8_t qpc_pg_size_qpc_lvl; + /* QPC PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_2 + /* QPC page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_1G + /* SRQ page size and level. */ + uint8_t srq_pg_size_srq_lvl; + /* SRQ PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_2 + /* SRQ page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_1G + /* CQ page size and level. */ + uint8_t cq_pg_size_cq_lvl; + /* CQ PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_2 + /* CQ page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_1G + /* VNIC page size and level. */ + uint8_t vnic_pg_size_vnic_lvl; + /* VNIC PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_2 + /* VNIC page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_1G + /* Stat page size and level. */ + uint8_t stat_pg_size_stat_lvl; + /* Stat PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_2 + /* Stat page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_1G + /* TQM slow path page size and level. */ + uint8_t tqm_sp_pg_size_tqm_sp_lvl; + /* TQM slow path PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_2 + /* TQM slow path page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_1G + /* TQM ring 0 page size and level. */ + uint8_t tqm_ring0_pg_size_tqm_ring0_lvl; + /* TQM ring 0 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_2 + /* TQM ring 0 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_1G + /* TQM ring 1 page size and level. */ + uint8_t tqm_ring1_pg_size_tqm_ring1_lvl; + /* TQM ring 1 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_2 + /* TQM ring 1 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_1G + /* TQM ring 2 page size and level. */ + uint8_t tqm_ring2_pg_size_tqm_ring2_lvl; + /* TQM ring 2 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_2 + /* TQM ring 2 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_1G + /* TQM ring 3 page size and level. */ + uint8_t tqm_ring3_pg_size_tqm_ring3_lvl; + /* TQM ring 3 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_2 + /* TQM ring 3 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_1G + /* TQM ring 4 page size and level. */ + uint8_t tqm_ring4_pg_size_tqm_ring4_lvl; + /* TQM ring 4 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_2 + /* TQM ring 4 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_1G + /* TQM ring 5 page size and level. */ + uint8_t tqm_ring5_pg_size_tqm_ring5_lvl; + /* TQM ring 5 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_2 + /* TQM ring 5 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_1G + /* TQM ring 6 page size and level. */ + uint8_t tqm_ring6_pg_size_tqm_ring6_lvl; + /* TQM ring 6 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_2 + /* TQM ring 6 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_1G + /* TQM ring 7 page size and level. */ + uint8_t tqm_ring7_pg_size_tqm_ring7_lvl; + /* TQM ring 7 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_2 + /* TQM ring 7 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_1G + /* MR/AV page size and level. */ + uint8_t mrav_pg_size_mrav_lvl; + /* MR/AV PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_2 + /* MR/AV page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_1G + /* Timer page size and level. */ + uint8_t tim_pg_size_tim_lvl; + /* Timer PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_2 + /* Timer page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_1G + /* QP page directory. */ + uint64_t qpc_page_dir; + /* SRQ page directory. */ + uint64_t srq_page_dir; + /* CQ page directory. */ + uint64_t cq_page_dir; + /* VNIC page directory. */ + uint64_t vnic_page_dir; + /* Stat page directory. */ + uint64_t stat_page_dir; + /* TQM slowpath page directory. */ + uint64_t tqm_sp_page_dir; + /* TQM ring 0 page directory. */ + uint64_t tqm_ring0_page_dir; + /* TQM ring 1 page directory. */ + uint64_t tqm_ring1_page_dir; + /* TQM ring 2 page directory. */ + uint64_t tqm_ring2_page_dir; + /* TQM ring 3 page directory. */ + uint64_t tqm_ring3_page_dir; + /* TQM ring 4 page directory. */ + uint64_t tqm_ring4_page_dir; + /* TQM ring 5 page directory. */ + uint64_t tqm_ring5_page_dir; + /* TQM ring 6 page directory. */ + uint64_t tqm_ring6_page_dir; + /* TQM ring 7 page directory. */ + uint64_t tqm_ring7_page_dir; + /* MR/AV page directory. */ + uint64_t mrav_page_dir; + /* Timer page directory. */ + uint64_t tim_page_dir; + /* Number of entries to reserve for QP1 */ + uint16_t qp_num_qp1_entries; + /* Number of entries to reserve for L2 */ + uint16_t qp_num_l2_entries; + /* Number of QPs. */ + uint32_t qp_num_entries; + /* Number of SRQs. */ + uint32_t srq_num_entries; + /* Number of entries to reserve for L2 */ + uint16_t srq_num_l2_entries; + /* Number of entries to reserve for L2 */ + uint16_t cq_num_l2_entries; + /* Number of CQs. */ + uint32_t cq_num_entries; + /* Number of entries to reserve for VNIC entries */ + uint16_t vnic_num_vnic_entries; + /* Number of entries to reserve for Ring table entries */ + uint16_t vnic_num_ring_table_entries; + /* Number of Stats. */ + uint32_t stat_num_entries; + /* Number of TQM slowpath entries. */ + uint32_t tqm_sp_num_entries; + /* Number of TQM ring 0 entries. */ + uint32_t tqm_ring0_num_entries; + /* Number of TQM ring 1 entries. */ + uint32_t tqm_ring1_num_entries; + /* Number of TQM ring 2 entries. */ + uint32_t tqm_ring2_num_entries; + /* Number of TQM ring 3 entries. */ + uint32_t tqm_ring3_num_entries; + /* Number of TQM ring 4 entries. */ + uint32_t tqm_ring4_num_entries; + /* Number of TQM ring 5 entries. */ + uint32_t tqm_ring5_num_entries; + /* Number of TQM ring 6 entries. */ + uint32_t tqm_ring6_num_entries; + /* Number of TQM ring 7 entries. */ + uint32_t tqm_ring7_num_entries; + /* Number of MR/AV entries. */ + uint32_t mrav_num_entries; + /* Number of Timer entries. */ + uint32_t tim_num_entries; + uint8_t unused_1[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************* + * hwrm_port_phy_cfg * + *********************/ + + +/* hwrm_port_phy_cfg_input (size:448b/56B) */ +struct hwrm_port_phy_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When this bit is set to '1', the PHY for the port shall + * be reset. + * + * # If this bit is set to 1, then the HWRM shall reset the + * PHY after applying PHY configuration changes specified + * in this command. + * # In order to guarantee that PHY configuration changes + * specified in this command take effect, the HWRM + * client should set this flag to 1. + * # If this bit is not set to 1, then the HWRM may reset + * the PHY depending on the current PHY configuration and + * settings specified in this command. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY \ + UINT32_C(0x1) + /* deprecated bit. Do not use!!! */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_DEPRECATED \ + UINT32_C(0x2) + /* + * When this bit is set to '1', the link shall be forced to + * the force_link_speed value. + * + * When this bit is set to '1', the HWRM client should + * not enable any of the auto negotiation related + * fields represented by auto_XXX fields in this command. + * When this bit is set to '1' and the HWRM client has + * enabled a auto_XXX field in this command, then the + * HWRM shall ignore the enabled auto_XXX field. + * + * When this bit is set to zero, the link + * shall be allowed to autoneg. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE \ + UINT32_C(0x4) + /* + * When this bit is set to '1', the auto-negotiation process + * shall be restarted on the link. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG \ + UINT32_C(0x8) + /* + * When this bit is set to '1', Energy Efficient Ethernet + * (EEE) is requested to be enabled on this link. + * If EEE is not supported on this port, then this flag + * shall be ignored by the HWRM. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE \ + UINT32_C(0x10) + /* + * When this bit is set to '1', Energy Efficient Ethernet + * (EEE) is requested to be disabled on this link. + * If EEE is not supported on this port, then this flag + * shall be ignored by the HWRM. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE \ + UINT32_C(0x20) + /* + * When this bit is set to '1' and EEE is enabled on this + * link, then TX LPI is requested to be enabled on the link. + * If EEE is not supported on this port, then this flag + * shall be ignored by the HWRM. + * If EEE is disabled on this port, then this flag shall be + * ignored by the HWRM. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_ENABLE \ + UINT32_C(0x40) + /* + * When this bit is set to '1' and EEE is enabled on this + * link, then TX LPI is requested to be disabled on the link. + * If EEE is not supported on this port, then this flag + * shall be ignored by the HWRM. + * If EEE is disabled on this port, then this flag shall be + * ignored by the HWRM. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE \ + UINT32_C(0x80) + /* + * When set to 1, then the HWRM shall enable FEC autonegotitation + * on this port if supported. + * When set to 0, then this flag shall be ignored. + * If FEC autonegotiation is not supported, then the HWRM shall ignore this + * flag. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE \ + UINT32_C(0x100) + /* + * When set to 1, then the HWRM shall disable FEC autonegotiation + * on this port if supported. + * When set to 0, then this flag shall be ignored. + * If FEC autonegotiation is not supported, then the HWRM shall ignore this + * flag. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE \ + UINT32_C(0x200) + /* + * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire Code) + * on this port if supported. + * When set to 0, then this flag shall be ignored. + * If FEC CLAUSE 74 is not supported, then the HWRM shall ignore this + * flag. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE \ + UINT32_C(0x400) + /* + * When set to 1, then the HWRM shall disable FEC CLAUSE 74 (Fire Code) + * on this port if supported. + * When set to 0, then this flag shall be ignored. + * If FEC CLAUSE 74 is not supported, then the HWRM shall ignore this + * flag. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_DISABLE \ + UINT32_C(0x800) + /* + * When set to 1, then the HWRM shall enable FEC CLAUSE 91 (Reed Solomon) + * on this port if supported. + * When set to 0, then this flag shall be ignored. + * If FEC CLAUSE 91 is not supported, then the HWRM shall ignore this + * flag. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_ENABLE \ + UINT32_C(0x1000) + /* + * When set to 1, then the HWRM shall disable FEC CLAUSE 91 (Reed Solomon) + * on this port if supported. + * When set to 0, then this flag shall be ignored. + * If FEC CLAUSE 91 is not supported, then the HWRM shall ignore this + * flag. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE \ + UINT32_C(0x2000) + /* + * When this bit is set to '1', the link shall be forced to + * be taken down. + * + * # When this bit is set to '1", all other + * command input settings related to the link speed shall + * be ignored. + * Once the link state is forced down, it can be + * explicitly cleared from that state by setting this flag + * to '0'. + * # If this flag is set to '0', then the link shall be + * cleared from forced down state if the link is in forced + * down state. + * There may be conditions (e.g. out-of-band or sideband + * configuration changes for the link) outside the scope + * of the HWRM implementation that may clear forced down + * link state. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN \ + UINT32_C(0x4000) + uint32_t enables; + /* + * This bit must be '1' for the auto_mode field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE \ + UINT32_C(0x1) + /* + * This bit must be '1' for the auto_duplex field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX \ + UINT32_C(0x2) + /* + * This bit must be '1' for the auto_pause field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE \ + UINT32_C(0x4) + /* + * This bit must be '1' for the auto_link_speed field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED \ + UINT32_C(0x8) + /* + * This bit must be '1' for the auto_link_speed_mask field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK \ + UINT32_C(0x10) + /* + * This bit must be '1' for the wirespeed field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_WIRESPEED \ + UINT32_C(0x20) + /* + * This bit must be '1' for the lpbk field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_LPBK \ + UINT32_C(0x40) + /* + * This bit must be '1' for the preemphasis field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_PREEMPHASIS \ + UINT32_C(0x80) + /* + * This bit must be '1' for the force_pause field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE \ + UINT32_C(0x100) + /* + * This bit must be '1' for the eee_link_speed_mask field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK \ + UINT32_C(0x200) + /* + * This bit must be '1' for the tx_lpi_timer field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_TX_LPI_TIMER \ + UINT32_C(0x400) + /* Port ID of port that is to be configured. */ + uint16_t port_id; + /* + * This is the speed that will be used if the force + * bit is '1'. If unsupported speed is selected, an error + * will be generated. + */ + uint16_t force_link_speed; + /* 100Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB UINT32_C(0x3e8) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff) + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_LAST \ + HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB + /* + * This value is used to identify what autoneg mode is + * used when the link speed is not being forced. + */ + uint8_t auto_mode; + /* Disable autoneg or autoneg disabled. No speeds are selected. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE UINT32_C(0x0) + /* Select all possible speeds for autoneg mode. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1) + /* + * Select only the auto_link_speed speed for autoneg mode. This mode has + * been DEPRECATED. An HWRM client should not use this mode. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2) + /* + * Select the auto_link_speed or any speed below that speed for autoneg. + * This mode has been DEPRECATED. An HWRM client should not use this mode. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3) + /* + * Select the speeds based on the corresponding link speed mask value + * that is provided. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4) + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_LAST \ + HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK + /* + * This is the duplex setting that will be used if the autoneg_mode + * is "one_speed" or "one_or_below". + */ + uint8_t auto_duplex; + /* Half Duplex will be requested. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF UINT32_C(0x0) + /* Full duplex will be requested. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL UINT32_C(0x1) + /* Both Half and Full dupex will be requested. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH UINT32_C(0x2) + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_LAST \ + HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH + /* + * This value is used to configure the pause that will be + * used for autonegotiation. + * Add text on the usage of auto_pause and force_pause. + */ + uint8_t auto_pause; + /* + * When this bit is '1', Generation of tx pause messages + * has been requested. Disabled otherwise. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX \ + UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages + * has been requested. Disabled otherwise. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX \ + UINT32_C(0x2) + /* + * When set to 1, the advertisement of pause is enabled. + * + * # When the auto_mode is not set to none and this flag is + * set to 1, then the auto_pause bits on this port are being + * advertised and autoneg pause results are being interpreted. + * # When the auto_mode is not set to none and this + * flag is set to 0, the pause is forced as indicated in + * force_pause, and also advertised as auto_pause bits, but + * the autoneg results are not interpreted since the pause + * configuration is being forced. + * # When the auto_mode is set to none and this flag is set to + * 1, auto_pause bits should be ignored and should be set to 0. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE \ + UINT32_C(0x4) + uint8_t unused_0; + /* + * This is the speed that will be used if the autoneg_mode + * is "one_speed" or "one_or_below". If an unsupported speed + * is selected, an error will be generated. + */ + uint16_t auto_link_speed; + /* 100Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff) + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_LAST \ + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB + /* + * This is a mask of link speeds that will be used if + * autoneg_mode is "mask". If unsupported speed is enabled + * an error will be generated. + */ + uint16_t auto_link_speed_mask; + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB \ + UINT32_C(0x10) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB \ + UINT32_C(0x2000) + /* This value controls the wirespeed feature. */ + uint8_t wirespeed; + /* Wirespeed feature is disabled. */ + #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_OFF UINT32_C(0x0) + /* Wirespeed feature is enabled. */ + #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_ON UINT32_C(0x1) + #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_LAST \ + HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_ON + /* This value controls the loopback setting for the PHY. */ + uint8_t lpbk; + /* No loopback is selected. Normal operation. */ + #define HWRM_PORT_PHY_CFG_INPUT_LPBK_NONE UINT32_C(0x0) + /* + * The HW will be configured with local loopback such that + * host data is sent back to the host without modification. + */ + #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LOCAL UINT32_C(0x1) + /* + * The HW will be configured with remote loopback such that + * port logic will send packets back out the transmitter that + * are received. + */ + #define HWRM_PORT_PHY_CFG_INPUT_LPBK_REMOTE UINT32_C(0x2) + /* + * The HW will be configured with external loopback such that + * host data is sent on the trasmitter and based on the external + * loopback connection the data will be received without modification. + */ + #define HWRM_PORT_PHY_CFG_INPUT_LPBK_EXTERNAL UINT32_C(0x3) + #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LAST \ + HWRM_PORT_PHY_CFG_INPUT_LPBK_EXTERNAL + /* + * This value is used to configure the pause that will be + * used for force mode. + */ + uint8_t force_pause; + /* + * When this bit is '1', Generation of tx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX UINT32_C(0x2) + uint8_t unused_1; + /* + * This value controls the pre-emphasis to be used for the + * link. Driver should not set this value (use + * enable.preemphasis = 0) unless driver is sure of setting. + * Normally HWRM FW will determine proper pre-emphasis. + */ + uint32_t preemphasis; + /* + * Setting for link speed mask that is used to + * advertise speeds during autonegotiation when EEE is enabled. + * This field is valid only when EEE is enabled. + * The speeds specified in this field shall be a subset of + * speeds specified in auto_link_speed_mask. + * If EEE is enabled,then at least one speed shall be provided + * in this mask. + */ + uint16_t eee_link_speed_mask; + /* Reserved */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* Reserved */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* Reserved */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 \ + UINT32_C(0x10) + /* Reserved */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + uint8_t unused_2[2]; + /* + * Reuested setting of TX LPI timer in microseconds. + * This field is valid only when EEE is enabled and TX LPI is + * enabled. + */ + uint32_t tx_lpi_timer; + #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff) + #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_SFT 0 + uint32_t unused_3; +} __attribute__((packed)); + +/* hwrm_port_phy_cfg_output (size:128b/16B) */ +struct hwrm_port_phy_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */ +struct hwrm_port_phy_cfg_cmd_err { + /* + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. + */ + uint8_t code; + /* Unknown error */ + #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* Unable to complete operation due to invalid speed */ + #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED UINT32_C(0x1) + /* + * retry the command since the phy is not ready. + * retry count is returned in opaque_0. + * This is only valid for the first command and + * this value will not change for successive calls. + * but if a 0 is returned at any time then this should + * be treated as an un recoverable failure, + * + * retry interval in milli seconds is returned in opaque_1. + * This specifies the time that user should wait before + * issuing the next port_phy_cfg command. + */ + #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_RETRY UINT32_C(0x2) + #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_LAST \ + HWRM_PORT_PHY_CFG_CMD_ERR_CODE_RETRY + uint8_t unused_0[7]; +} __attribute__((packed)); + +/********************** + * hwrm_port_phy_qcfg * + **********************/ + + +/* hwrm_port_phy_qcfg_input (size:192b/24B) */ +struct hwrm_port_phy_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is to be queried. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_port_phy_qcfg_output (size:768b/96B) */ +struct hwrm_port_phy_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value indicates the current link status. */ + uint8_t link; + /* There is no link or cable detected. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK UINT32_C(0x0) + /* There is no link, but a cable has been detected. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SIGNAL UINT32_C(0x1) + /* There is a link. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK UINT32_C(0x2) + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK + uint8_t unused_0; + /* This value indicates the current link speed of the connection. */ + uint16_t link_speed; + /* 100Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB UINT32_C(0x1) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB UINT32_C(0xa) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB UINT32_C(0x14) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB UINT32_C(0x19) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB UINT32_C(0x64) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB UINT32_C(0xc8) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB UINT32_C(0xfa) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB UINT32_C(0x190) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB UINT32_C(0x1f4) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB UINT32_C(0x3e8) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB UINT32_C(0xffff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB + /* + * This value is indicates the duplex of the current + * configuration. + */ + uint8_t duplex_cfg; + /* Half Duplex connection. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_HALF UINT32_C(0x0) + /* Full duplex connection. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL UINT32_C(0x1) + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL + /* + * This value is used to indicate the current + * pause configuration. When autoneg is enabled, this value + * represents the autoneg results of pause configuration. + */ + uint8_t pause; + /* + * When this bit is '1', Generation of tx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX UINT32_C(0x2) + /* + * The supported speeds for the port. This is a bit mask. + * For each speed that is supported, the corrresponding + * bit will be set to '1'. + */ + uint16_t support_speeds; + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB \ + UINT32_C(0x10) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB \ + UINT32_C(0x2000) + /* + * Current setting of forced link speed. + * When the link speed is not being forced, this + * value shall be set to 0. + */ + uint16_t force_link_speed; + /* 100Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB \ + UINT32_C(0x190) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB \ + UINT32_C(0x1f4) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB \ + UINT32_C(0x3e8) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB \ + UINT32_C(0xffff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB + /* Current setting of auto negotiation mode. */ + uint8_t auto_mode; + /* Disable autoneg or autoneg disabled. No speeds are selected. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE UINT32_C(0x0) + /* Select all possible speeds for autoneg mode. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1) + /* + * Select only the auto_link_speed speed for autoneg mode. This mode has + * been DEPRECATED. An HWRM client should not use this mode. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2) + /* + * Select the auto_link_speed or any speed below that speed for autoneg. + * This mode has been DEPRECATED. An HWRM client should not use this mode. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3) + /* + * Select the speeds based on the corresponding link speed mask value + * that is provided. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4) + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK + /* + * Current setting of pause autonegotiation. + * Move autoneg_pause flag here. + */ + uint8_t auto_pause; + /* + * When this bit is '1', Generation of tx pause messages + * has been requested. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_TX \ + UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages + * has been requested. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_RX \ + UINT32_C(0x2) + /* + * When set to 1, the advertisement of pause is enabled. + * + * # When the auto_mode is not set to none and this flag is + * set to 1, then the auto_pause bits on this port are being + * advertised and autoneg pause results are being interpreted. + * # When the auto_mode is not set to none and this + * flag is set to 0, the pause is forced as indicated in + * force_pause, and also advertised as auto_pause bits, but + * the autoneg results are not interpreted since the pause + * configuration is being forced. + * # When the auto_mode is set to none and this flag is set to + * 1, auto_pause bits should be ignored and should be set to 0. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE \ + UINT32_C(0x4) + /* + * Current setting for auto_link_speed. This field is only + * valid when auto_mode is set to "one_speed" or "one_or_below". + */ + uint16_t auto_link_speed; + /* 100Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB \ + UINT32_C(0xffff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB + /* + * Current setting for auto_link_speed_mask that is used to + * advertise speeds during autonegotiation. + * This field is only valid when auto_mode is set to "mask". + * The speeds specified in this field shall be a subset of + * supported speeds on this port. + */ + uint16_t auto_link_speed_mask; + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2GB \ + UINT32_C(0x10) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MB \ + UINT32_C(0x2000) + /* Current setting for wirespeed. */ + uint8_t wirespeed; + /* Wirespeed feature is disabled. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_OFF UINT32_C(0x0) + /* Wirespeed feature is enabled. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_ON UINT32_C(0x1) + #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_ON + /* Current setting for loopback. */ + uint8_t lpbk; + /* No loopback is selected. Normal operation. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0) + /* + * The HW will be configured with local loopback such that + * host data is sent back to the host without modification. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1) + /* + * The HW will be configured with remote loopback such that + * port logic will send packets back out the transmitter that + * are received. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2) + /* + * The HW will be configured with external loopback such that + * host data is sent on the trasmitter and based on the external + * loopback connection the data will be received without modification. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_EXTERNAL UINT32_C(0x3) + #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_EXTERNAL + /* + * Current setting of forced pause. + * When the pause configuration is not being forced, then + * this value shall be set to 0. + */ + uint8_t force_pause; + /* + * When this bit is '1', Generation of tx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_TX UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_RX UINT32_C(0x2) + /* + * This value indicates the current status of the optics module on + * this port. + */ + uint8_t module_status; + /* Module is inserted and accepted */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NONE \ + UINT32_C(0x0) + /* Module is rejected and transmit side Laser is disabled. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX \ + UINT32_C(0x1) + /* Module mismatch warning. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG \ + UINT32_C(0x2) + /* Module is rejected and powered down. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN \ + UINT32_C(0x3) + /* Module is not inserted. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED \ + UINT32_C(0x4) + /* Module status is not applicable. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE \ + UINT32_C(0xff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE + /* Current setting for preemphasis. */ + uint32_t preemphasis; + /* This field represents the major version of the PHY. */ + uint8_t phy_maj; + /* This field represents the minor version of the PHY. */ + uint8_t phy_min; + /* This field represents the build version of the PHY. */ + uint8_t phy_bld; + /* This value represents a PHY type. */ + uint8_t phy_type; + /* Unknown */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN \ + UINT32_C(0x0) + /* BASE-CR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR \ + UINT32_C(0x1) + /* BASE-KR4 (Deprecated) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4 \ + UINT32_C(0x2) + /* BASE-LR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR \ + UINT32_C(0x3) + /* BASE-SR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR \ + UINT32_C(0x4) + /* BASE-KR2 (Deprecated) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2 \ + UINT32_C(0x5) + /* BASE-KX */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX \ + UINT32_C(0x6) + /* BASE-KR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR \ + UINT32_C(0x7) + /* BASE-T */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET \ + UINT32_C(0x8) + /* EEE capable BASE-T */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE \ + UINT32_C(0x9) + /* SGMII connected external PHY */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY \ + UINT32_C(0xa) + /* 25G_BASECR_CA_L */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L \ + UINT32_C(0xb) + /* 25G_BASECR_CA_S */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S \ + UINT32_C(0xc) + /* 25G_BASECR_CA_N */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N \ + UINT32_C(0xd) + /* 25G_BASESR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR \ + UINT32_C(0xe) + /* 100G_BASECR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4 \ + UINT32_C(0xf) + /* 100G_BASESR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4 \ + UINT32_C(0x10) + /* 100G_BASELR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4 \ + UINT32_C(0x11) + /* 100G_BASEER4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4 \ + UINT32_C(0x12) + /* 100G_BASESR10 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10 \ + UINT32_C(0x13) + /* 40G_BASECR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4 \ + UINT32_C(0x14) + /* 40G_BASESR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4 \ + UINT32_C(0x15) + /* 40G_BASELR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4 \ + UINT32_C(0x16) + /* 40G_BASEER4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4 \ + UINT32_C(0x17) + /* 40G_ACTIVE_CABLE */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE \ + UINT32_C(0x18) + /* 1G_baseT */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET \ + UINT32_C(0x19) + /* 1G_baseSX */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX \ + UINT32_C(0x1a) + /* 1G_baseCX */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX \ + UINT32_C(0x1b) + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX + /* This value represents a media type. */ + uint8_t media_type; + /* Unknown */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_UNKNOWN UINT32_C(0x0) + /* Twisted Pair */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP UINT32_C(0x1) + /* Direct Attached Copper */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC UINT32_C(0x2) + /* Fiber */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE UINT32_C(0x3) + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE + /* This value represents a transceiver type. */ + uint8_t xcvr_pkg_type; + /* PHY and MAC are in the same package */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_INTERNAL \ + UINT32_C(0x1) + /* PHY and MAC are in different packages */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL \ + UINT32_C(0x2) + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL + uint8_t eee_config_phy_addr; + /* This field represents PHY address. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK \ + UINT32_C(0x1f) + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_SFT 0 + /* + * This field represents flags related to EEE configuration. + * These EEE configuration flags are valid only when the + * auto_mode is not set to none (in other words autonegotiation + * is enabled). + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_MASK \ + UINT32_C(0xe0) + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_SFT 5 + /* + * When set to 1, Energy Efficient Ethernet (EEE) mode is enabled. + * Speeds for autoneg with EEE mode enabled + * are based on eee_link_speed_mask. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED \ + UINT32_C(0x20) + /* + * This flag is valid only when eee_enabled is set to 1. + * + * # If eee_enabled is set to 0, then EEE mode is disabled + * and this flag shall be ignored. + * # If eee_enabled is set to 1 and this flag is set to 1, + * then Energy Efficient Ethernet (EEE) mode is enabled + * and in use. + * # If eee_enabled is set to 1 and this flag is set to 0, + * then Energy Efficient Ethernet (EEE) mode is enabled + * but is currently not in use. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ACTIVE \ + UINT32_C(0x40) + /* + * This flag is valid only when eee_enabled is set to 1. + * + * # If eee_enabled is set to 0, then EEE mode is disabled + * and this flag shall be ignored. + * # If eee_enabled is set to 1 and this flag is set to 1, + * then Energy Efficient Ethernet (EEE) mode is enabled + * and TX LPI is enabled. + * # If eee_enabled is set to 1 and this flag is set to 0, + * then Energy Efficient Ethernet (EEE) mode is enabled + * but TX LPI is disabled. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_TX_LPI \ + UINT32_C(0x80) + /* + * When set to 1, the parallel detection is used to determine + * the speed of the link partner. + * + * Parallel detection is used when a autonegotiation capable + * device is connected to a link parter that is not capable + * of autonegotiation. + */ + uint8_t parallel_detect; + /* + * When set to 1, the parallel detection is used to determine + * the speed of the link partner. + * + * Parallel detection is used when a autonegotiation capable + * device is connected to a link parter that is not capable + * of autonegotiation. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PARALLEL_DETECT UINT32_C(0x1) + /* + * The advertised speeds for the port by the link partner. + * Each advertised speed will be set to '1'. + */ + uint16_t link_partner_adv_speeds; + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2GB \ + UINT32_C(0x10) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MB \ + UINT32_C(0x2000) + /* + * The advertised autoneg for the port by the link partner. + * This field is deprecated and should be set to 0. + */ + uint8_t link_partner_adv_auto_mode; + /* Disable autoneg or autoneg disabled. No speeds are selected. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_NONE \ + UINT32_C(0x0) + /* Select all possible speeds for autoneg mode. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS \ + UINT32_C(0x1) + /* + * Select only the auto_link_speed speed for autoneg mode. This mode has + * been DEPRECATED. An HWRM client should not use this mode. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \ + UINT32_C(0x2) + /* + * Select the auto_link_speed or any speed below that speed for autoneg. + * This mode has been DEPRECATED. An HWRM client should not use this mode. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW \ + UINT32_C(0x3) + /* + * Select the speeds based on the corresponding link speed mask value + * that is provided. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK \ + UINT32_C(0x4) + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK + /* The advertised pause settings on the port by the link partner. */ + uint8_t link_partner_adv_pause; + /* + * When this bit is '1', Generation of tx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_TX \ + UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_RX \ + UINT32_C(0x2) + /* + * Current setting for link speed mask that is used to + * advertise speeds during autonegotiation when EEE is enabled. + * This field is valid only when eee_enabled flags is set to 1. + * The speeds specified in this field shall be a subset of + * speeds specified in auto_link_speed_mask. + */ + uint16_t adv_eee_link_speed_mask; + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD1 \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD2 \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD3 \ + UINT32_C(0x10) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD4 \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + /* + * Current setting for link speed mask that is advertised by + * the link partner when EEE is enabled. + * This field is valid only when eee_enabled flags is set to 1. + */ + uint16_t link_partner_adv_eee_link_speed_mask; + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 \ + UINT32_C(0x10) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + uint32_t xcvr_identifier_type_tx_lpi_timer; + /* + * Current setting of TX LPI timer in microseconds. + * This field is valid only when_eee_enabled flag is set to 1 + * and tx_lpi_enabled is set to 1. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK \ + UINT32_C(0xffffff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_SFT 0 + /* This value represents transceiver identifier type. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_MASK \ + UINT32_C(0xff000000) + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFT 24 + /* Unknown */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN \ + (UINT32_C(0x0) << 24) + /* SFP/SFP+/SFP28 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP \ + (UINT32_C(0x3) << 24) + /* QSFP+ */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP \ + (UINT32_C(0xc) << 24) + /* QSFP+ */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPPLUS \ + (UINT32_C(0xd) << 24) + /* QSFP28 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 \ + (UINT32_C(0x11) << 24) + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 + /* + * This value represents the current configuration of + * Forward Error Correction (FEC) on the port. + */ + uint16_t fec_cfg; + /* + * When set to 1, then FEC is not supported on this port. If this flag + * is set to 1, then all other FEC configuration flags shall be ignored. + * When set to 0, then FEC is supported as indicated by other + * configuration flags. + * If no cable is attached and the HWRM does not yet know the FEC + * capability, then the HWRM shall set this flag to 1 when reporting + * FEC capability. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED \ + UINT32_C(0x1) + /* + * When set to 1, then FEC autonegotiation is supported on this port. + * When set to 0, then FEC autonegotiation is not supported on this port. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_SUPPORTED \ + UINT32_C(0x2) + /* + * When set to 1, then FEC autonegotiation is enabled on this port. + * When set to 0, then FEC autonegotiation is disabled if supported. + * This flag should be ignored if FEC autonegotiation is not supported on this port. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_ENABLED \ + UINT32_C(0x4) + /* + * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on this port. + * When set to 0, then FEC CLAUSE 74 (Fire Code) is not supported on this port. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_SUPPORTED \ + UINT32_C(0x8) + /* + * When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on this port. + * When set to 0, then FEC CLAUSE 74 (Fire Code) is disabled if supported. + * This flag should be ignored if FEC CLAUSE 74 is not supported on this port. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_ENABLED \ + UINT32_C(0x10) + /* + * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is supported on this port. + * When set to 0, then FEC CLAUSE 91 (Reed Solomon) is not supported on this port. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_SUPPORTED \ + UINT32_C(0x20) + /* + * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is enabled on this port. + * When set to 0, then FEC CLAUSE 91 (Reed Solomon) is disabled if supported. + * This flag should be ignored if FEC CLAUSE 91 is not supported on this port. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_ENABLED \ + UINT32_C(0x40) + /* + * This value is indicates the duplex of the current + * connection state. + */ + uint8_t duplex_state; + /* Half Duplex connection. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF UINT32_C(0x0) + /* Full duplex connection. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_FULL UINT32_C(0x1) + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_FULL + /* Option flags fields. */ + uint8_t option_flags; + /* When this bit is '1', Media auto detect is enabled. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_OPTION_FLAGS_MEDIA_AUTO_DETECT \ + UINT32_C(0x1) + /* + * Up to 16 bytes of null padded ASCII string representing + * PHY vendor. + * If the string is set to null, then the vendor name is not + * available. + */ + char phy_vendor_name[16]; + /* + * Up to 16 bytes of null padded ASCII string that + * identifies vendor specific part number of the PHY. + * If the string is set to null, then the vendor specific + * part number is not available. + */ + char phy_vendor_partnumber[16]; + uint8_t unused_2[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************* + * hwrm_port_mac_cfg * + *********************/ + + +/* hwrm_port_mac_cfg_input (size:320b/40B) */ +struct hwrm_port_mac_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * In this field, there are a number of CoS mappings related flags + * that are used to configure CoS mappings and their corresponding + * priorities in the hardware. + * For the priorities of CoS mappings, the HWRM uses the following + * priority order (high to low) by default: + * # vlan pri + * # ip_dscp + * # tunnel_vlan_pri + * # default cos + * + * A subset of CoS mappings can be enabled. + * If a priority is not specified for an enabled CoS mapping, the + * priority will be assigned in the above order for the enabled CoS + * mappings. For example, if vlan_pri and ip_dscp CoS mappings are + * enabled and their priorities are not specified, the following + * priority order (high to low) will be used by the HWRM: + * # vlan_pri + * # ip_dscp + * # default cos + * + * vlan_pri CoS mapping together with default CoS with lower priority + * are enabled by default by the HWRM. + */ + uint32_t flags; + /* + * When this bit is '1', this command will configure + * the MAC to match the current link state of the PHY. + * If the link is not established on the PHY, then this + * bit has no effect. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_MATCH_LINK \ + UINT32_C(0x1) + /* + * When this bit is set to '1', the inner VLAN PRI to CoS mapping + * is requested to be enabled. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_VLAN_PRI2COS_ENABLE \ + UINT32_C(0x2) + /* + * When this bit is set to '1', tunnel VLAN PRI field to + * CoS mapping is requested to be enabled. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_TUNNEL_PRI2COS_ENABLE \ + UINT32_C(0x4) + /* + * When this bit is set to '1', the IP DSCP to CoS mapping is + * requested to be enabled. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_IP_DSCP2COS_ENABLE \ + UINT32_C(0x8) + /* + * When this bit is '1', the HWRM is requested to + * enable timestamp capture capability on the receive side + * of this port. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE \ + UINT32_C(0x10) + /* + * When this bit is '1', the HWRM is requested to + * disable timestamp capture capability on the receive side + * of this port. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE \ + UINT32_C(0x20) + /* + * When this bit is '1', the HWRM is requested to + * enable timestamp capture capability on the transmit side + * of this port. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE \ + UINT32_C(0x40) + /* + * When this bit is '1', the HWRM is requested to + * disable timestamp capture capability on the transmit side + * of this port. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE \ + UINT32_C(0x80) + /* + * When this bit is '1', the Out-Of-Box WoL is requested to + * be enabled on this port. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_OOB_WOL_ENABLE \ + UINT32_C(0x100) + /* + * When this bit is '1', the the Out-Of-Box WoL is requested to + * be disabled on this port. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_OOB_WOL_DISABLE \ + UINT32_C(0x200) + /* + * When this bit is set to '1', the inner VLAN PRI to CoS mapping + * is requested to be disabled. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_VLAN_PRI2COS_DISABLE \ + UINT32_C(0x400) + /* + * When this bit is set to '1', tunnel VLAN PRI field to + * CoS mapping is requested to be disabled. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_TUNNEL_PRI2COS_DISABLE \ + UINT32_C(0x800) + /* + * When this bit is set to '1', the IP DSCP to CoS mapping is + * requested to be disabled. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_IP_DSCP2COS_DISABLE \ + UINT32_C(0x1000) + uint32_t enables; + /* + * This bit must be '1' for the ipg field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_IPG \ + UINT32_C(0x1) + /* + * This bit must be '1' for the lpbk field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_LPBK \ + UINT32_C(0x2) + /* + * This bit must be '1' for the vlan_pri2cos_map_pri field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_VLAN_PRI2COS_MAP_PRI \ + UINT32_C(0x4) + /* + * This bit must be '1' for the tunnel_pri2cos_map_pri field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_TUNNEL_PRI2COS_MAP_PRI \ + UINT32_C(0x10) + /* + * This bit must be '1' for the dscp2cos_map_pri field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_DSCP2COS_MAP_PRI \ + UINT32_C(0x20) + /* + * This bit must be '1' for the rx_ts_capture_ptp_msg_type field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE \ + UINT32_C(0x40) + /* + * This bit must be '1' for the tx_ts_capture_ptp_msg_type field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE \ + UINT32_C(0x80) + /* + * This bit must be '1' for the cos_field_cfg field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_COS_FIELD_CFG \ + UINT32_C(0x100) + /* Port ID of port that is to be configured. */ + uint16_t port_id; + /* + * This value is used to configure the minimum IPG that will + * be sent between packets by this port. + */ + uint8_t ipg; + /* This value controls the loopback setting for the MAC. */ + uint8_t lpbk; + /* No loopback is selected. Normal operation. */ + #define HWRM_PORT_MAC_CFG_INPUT_LPBK_NONE UINT32_C(0x0) + /* + * The HW will be configured with local loopback such that + * host data is sent back to the host without modification. + */ + #define HWRM_PORT_MAC_CFG_INPUT_LPBK_LOCAL UINT32_C(0x1) + /* + * The HW will be configured with remote loopback such that + * port logic will send packets back out the transmitter that + * are received. + */ + #define HWRM_PORT_MAC_CFG_INPUT_LPBK_REMOTE UINT32_C(0x2) + #define HWRM_PORT_MAC_CFG_INPUT_LPBK_LAST \ + HWRM_PORT_MAC_CFG_INPUT_LPBK_REMOTE + /* + * This value controls the priority setting of VLAN PRI to CoS + * mapping based on VLAN Tags of inner packet headers of + * tunneled packets or packet headers of non-tunneled packets. + * + * # Each XXX_pri variable shall have a unique priority value + * when it is being specified. + * # When comparing priorities of mappings, higher value + * indicates higher priority. + * For example, a value of 0-3 is returned where 0 is being + * the lowest priority and 3 is being the highest priority. + */ + uint8_t vlan_pri2cos_map_pri; + /* Reserved field. */ + uint8_t reserved1; + /* + * This value controls the priority setting of VLAN PRI to CoS + * mapping based on VLAN Tags of tunneled header. + * This mapping only applies when tunneled headers + * are present. + * + * # Each XXX_pri variable shall have a unique priority value + * when it is being specified. + * # When comparing priorities of mappings, higher value + * indicates higher priority. + * For example, a value of 0-3 is returned where 0 is being + * the lowest priority and 3 is being the highest priority. + */ + uint8_t tunnel_pri2cos_map_pri; + /* + * This value controls the priority setting of IP DSCP to CoS + * mapping based on inner IP header of tunneled packets or + * IP header of non-tunneled packets. + * + * # Each XXX_pri variable shall have a unique priority value + * when it is being specified. + * # When comparing priorities of mappings, higher value + * indicates higher priority. + * For example, a value of 0-3 is returned where 0 is being + * the lowest priority and 3 is being the highest priority. + */ + uint8_t dscp2pri_map_pri; + /* + * This is a 16-bit bit mask that is used to request a + * specific configuration of time stamp capture of PTP messages + * on the receive side of this port. + * This field shall be ignored if the ptp_rx_ts_capture_enable + * flag is not set in this command. + * Otherwise, if bit 'i' is set, then the HWRM is being + * requested to configure the receive side of the port to + * capture the time stamp of every received PTP message + * with messageType field value set to i. + */ + uint16_t rx_ts_capture_ptp_msg_type; + /* + * This is a 16-bit bit mask that is used to request a + * specific configuration of time stamp capture of PTP messages + * on the transmit side of this port. + * This field shall be ignored if the ptp_tx_ts_capture_enable + * flag is not set in this command. + * Otherwise, if bit 'i' is set, then the HWRM is being + * requested to configure the transmit sied of the port to + * capture the time stamp of every transmitted PTP message + * with messageType field value set to i. + */ + uint16_t tx_ts_capture_ptp_msg_type; + /* Configuration of CoS fields. */ + uint8_t cos_field_cfg; + /* Reserved */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_RSVD1 \ + UINT32_C(0x1) + /* + * This field is used to specify selection of VLAN PRI value + * based on whether one or two VLAN Tags are present in + * the inner packet headers of tunneled packets or + * non-tunneled packets. + * This field is valid only if inner VLAN PRI to CoS mapping + * is enabled. + * If VLAN PRI to CoS mapping is not enabled, then this + * field shall be ignored. + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_MASK \ + UINT32_C(0x6) + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_SFT \ + 1 + /* + * Select inner VLAN PRI when 1 or 2 VLAN Tags are + * present in the inner packet headers + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \ + (UINT32_C(0x0) << 1) + /* + * Select outer VLAN Tag PRI when 2 VLAN Tags are + * present in the inner packet headers. + * No VLAN PRI shall be selected for this configuration + * if only one VLAN Tag is present in the inner + * packet headers. + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \ + (UINT32_C(0x1) << 1) + /* + * Select outermost VLAN PRI when 1 or 2 VLAN Tags + * are present in the inner packet headers + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \ + (UINT32_C(0x2) << 1) + /* Unspecified */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \ + (UINT32_C(0x3) << 1) + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \ + HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED + /* + * This field is used to specify selection of tunnel VLAN + * PRI value based on whether one or two VLAN Tags are + * present in tunnel headers. + * This field is valid only if tunnel VLAN PRI to CoS mapping + * is enabled. + * If tunnel VLAN PRI to CoS mapping is not enabled, then this + * field shall be ignored. + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK \ + UINT32_C(0x18) + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT \ + 3 + /* + * Select inner VLAN PRI when 1 or 2 VLAN Tags are + * present in the tunnel packet headers + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \ + (UINT32_C(0x0) << 3) + /* + * Select outer VLAN Tag PRI when 2 VLAN Tags are + * present in the tunnel packet headers. + * No tunnel VLAN PRI shall be selected for this + * configuration if only one VLAN Tag is present in + * the tunnel packet headers. + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \ + (UINT32_C(0x1) << 3) + /* + * Select outermost VLAN PRI when 1 or 2 VLAN Tags + * are present in the tunnel packet headers + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \ + (UINT32_C(0x2) << 3) + /* Unspecified */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \ + (UINT32_C(0x3) << 3) + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \ + HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED + /* + * This field shall be used to provide default CoS value + * that has been configured on this port. + * This field is valid only if default CoS mapping + * is enabled. + * If default CoS mapping is not enabled, then this + * field shall be ignored. + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_DEFAULT_COS_MASK \ + UINT32_C(0xe0) + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_DEFAULT_COS_SFT \ + 5 + uint8_t unused_0[3]; +} __attribute__((packed)); + +/* hwrm_port_mac_cfg_output (size:128b/16B) */ +struct hwrm_port_mac_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * This is the configured maximum length of Ethernet packet + * payload that is allowed to be received on the port. + * This value does not include the number of bytes used by + * Ethernet header and trailer (CRC). + */ + uint16_t mru; + /* + * This is the configured maximum length of Ethernet packet + * payload that is allowed to be transmitted on the port. + * This value does not include the number of bytes used by + * Ethernet header and trailer (CRC). + */ + uint16_t mtu; + /* Current configuration of the IPG value. */ + uint8_t ipg; + /* Current value of the loopback value. */ + uint8_t lpbk; + /* No loopback is selected. Normal operation. */ + #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_NONE UINT32_C(0x0) + /* + * The HW will be configured with local loopback such that + * host data is sent back to the host without modification. + */ + #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1) + /* + * The HW will be configured with remote loopback such that + * port logic will send packets back out the transmitter that + * are received. + */ + #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2) + #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_LAST \ + HWRM_PORT_MAC_CFG_OUTPUT_LPBK_REMOTE + uint8_t unused_0; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_port_mac_qcfg * + **********************/ + + +/* hwrm_port_mac_qcfg_input (size:192b/24B) */ +struct hwrm_port_mac_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is to be configured. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_port_mac_qcfg_output (size:192b/24B) */ +struct hwrm_port_mac_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * This is the configured maximum length of Ethernet packet + * payload that is allowed to be received on the port. + * This value does not include the number of bytes used by the + * Ethernet header and trailer (CRC). + */ + uint16_t mru; + /* + * This is the configured maximum length of Ethernet packet + * payload that is allowed to be transmitted on the port. + * This value does not include the number of bytes used by the + * Ethernet header and trailer (CRC). + */ + uint16_t mtu; + /* + * The minimum IPG that will + * be sent between packets by this port. + */ + uint8_t ipg; + /* The loopback setting for the MAC. */ + uint8_t lpbk; + /* No loopback is selected. Normal operation. */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0) + /* + * The HW will be configured with local loopback such that + * host data is sent back to the host without modification. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1) + /* + * The HW will be configured with remote loopback such that + * port logic will send packets back out the transmitter that + * are received. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2) + #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_LAST \ + HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_REMOTE + /* + * Priority setting for VLAN PRI to CoS mapping. + * # Each XXX_pri variable shall have a unique priority value + * when it is being used. + * # When comparing priorities of mappings, higher value + * indicates higher priority. + * For example, a value of 0-3 is returned where 0 is being + * the lowest priority and 3 is being the highest priority. + * # If the correspoding CoS mapping is not enabled, then this + * field should be ignored. + * # This value indicates the normalized priority value retained + * in the HWRM. + */ + uint8_t vlan_pri2cos_map_pri; + /* + * In this field, a number of CoS mappings related flags + * are used to indicate configured CoS mappings. + */ + uint8_t flags; + /* + * When this bit is set to '1', the inner VLAN PRI to CoS mapping + * is enabled. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_VLAN_PRI2COS_ENABLE \ + UINT32_C(0x1) + /* + * When this bit is set to '1', tunnel VLAN PRI field to + * CoS mapping is enabled. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_TUNNEL_PRI2COS_ENABLE \ + UINT32_C(0x2) + /* + * When this bit is set to '1', the IP DSCP to CoS mapping is + * enabled. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_IP_DSCP2COS_ENABLE \ + UINT32_C(0x4) + /* + * When this bit is '1', the Out-Of-Box WoL is enabled on this + * port. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_OOB_WOL_ENABLE \ + UINT32_C(0x8) + /* When this bit is '1', PTP is enabled for RX on this port. */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE \ + UINT32_C(0x10) + /* When this bit is '1', PTP is enabled for TX on this port. */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE \ + UINT32_C(0x20) + /* + * Priority setting for tunnel VLAN PRI to CoS mapping. + * # Each XXX_pri variable shall have a unique priority value + * when it is being used. + * # When comparing priorities of mappings, higher value + * indicates higher priority. + * For example, a value of 0-3 is returned where 0 is being + * the lowest priority and 3 is being the highest priority. + * # If the correspoding CoS mapping is not enabled, then this + * field should be ignored. + * # This value indicates the normalized priority value retained + * in the HWRM. + */ + uint8_t tunnel_pri2cos_map_pri; + /* + * Priority setting for DSCP to PRI mapping. + * # Each XXX_pri variable shall have a unique priority value + * when it is being used. + * # When comparing priorities of mappings, higher value + * indicates higher priority. + * For example, a value of 0-3 is returned where 0 is being + * the lowest priority and 3 is being the highest priority. + * # If the correspoding CoS mapping is not enabled, then this + * field should be ignored. + * # This value indicates the normalized priority value retained + * in the HWRM. + */ + uint8_t dscp2pri_map_pri; + /* + * This is a 16-bit bit mask that represents the + * current configuration of time stamp capture of PTP messages + * on the receive side of this port. + * If bit 'i' is set, then the receive side of the port + * is configured to capture the time stamp of every + * received PTP message with messageType field value set + * to i. + * If all bits are set to 0 (i.e. field value set 0), + * then the receive side of the port is not configured + * to capture timestamp for PTP messages. + * If all bits are set to 1, then the receive side of the + * port is configured to capture timestamp for all PTP + * messages. + */ + uint16_t rx_ts_capture_ptp_msg_type; + /* + * This is a 16-bit bit mask that represents the + * current configuration of time stamp capture of PTP messages + * on the transmit side of this port. + * If bit 'i' is set, then the transmit side of the port + * is configured to capture the time stamp of every + * received PTP message with messageType field value set + * to i. + * If all bits are set to 0 (i.e. field value set 0), + * then the transmit side of the port is not configured + * to capture timestamp for PTP messages. + * If all bits are set to 1, then the transmit side of the + * port is configured to capture timestamp for all PTP + * messages. + */ + uint16_t tx_ts_capture_ptp_msg_type; + /* Configuration of CoS fields. */ + uint8_t cos_field_cfg; + /* Reserved */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_RSVD \ + UINT32_C(0x1) + /* + * This field is used for selecting VLAN PRI value + * based on whether one or two VLAN Tags are present in + * the inner packet headers of tunneled packets or + * non-tunneled packets. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_MASK \ + UINT32_C(0x6) + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_SFT \ + 1 + /* + * Select inner VLAN PRI when 1 or 2 VLAN Tags are + * present in the inner packet headers + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \ + (UINT32_C(0x0) << 1) + /* + * Select outer VLAN Tag PRI when 2 VLAN Tags are + * present in the inner packet headers. + * No VLAN PRI is selected for this configuration + * if only one VLAN Tag is present in the inner + * packet headers. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \ + (UINT32_C(0x1) << 1) + /* + * Select outermost VLAN PRI when 1 or 2 VLAN Tags + * are present in the inner packet headers + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \ + (UINT32_C(0x2) << 1) + /* Unspecified */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \ + (UINT32_C(0x3) << 1) + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \ + HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED + /* + * This field is used for selecting tunnel VLAN PRI value + * based on whether one or two VLAN Tags are present in + * the tunnel headers of tunneled packets. This selection + * does not apply to non-tunneled packets. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK \ + UINT32_C(0x18) + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT \ + 3 + /* + * Select inner VLAN PRI when 1 or 2 VLAN Tags are + * present in the tunnel packet headers + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \ + (UINT32_C(0x0) << 3) + /* + * Select outer VLAN Tag PRI when 2 VLAN Tags are + * present in the tunnel packet headers. + * No VLAN PRI is selected for this configuration + * if only one VLAN Tag is present in the tunnel + * packet headers. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \ + (UINT32_C(0x1) << 3) + /* + * Select outermost VLAN PRI when 1 or 2 VLAN Tags + * are present in the tunnel packet headers + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \ + (UINT32_C(0x2) << 3) + /* Unspecified */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \ + (UINT32_C(0x3) << 3) + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \ + HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED + /* + * This field is used to provide default CoS value that + * has been configured on this port. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_DEFAULT_COS_MASK \ + UINT32_C(0xe0) + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_DEFAULT_COS_SFT \ + 5 + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************** + * hwrm_port_mac_ptp_qcfg * + **************************/ + + +/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */ +struct hwrm_port_mac_ptp_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is being queried. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */ +struct hwrm_port_mac_ptp_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * In this field, a number of PTP related flags + * are used to indicate configured PTP capabilities. + */ + uint8_t flags; + /* + * When this bit is set to '1', the PTP related registers are + * directly accessible by the host. + */ + #define HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS \ + UINT32_C(0x1) + /* + * When this bit is set to '1', the PTP information is accessible + * via HWRM commands. + */ + #define HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_HWRM_ACCESS \ + UINT32_C(0x2) + uint8_t unused_0[3]; + /* Offset of the PTP register for the lower 32 bits of timestamp for RX. */ + uint32_t rx_ts_reg_off_lower; + /* Offset of the PTP register for the upper 32 bits of timestamp for RX. */ + uint32_t rx_ts_reg_off_upper; + /* Offset of the PTP register for the sequence ID for RX. */ + uint32_t rx_ts_reg_off_seq_id; + /* Offset of the first PTP source ID for RX. */ + uint32_t rx_ts_reg_off_src_id_0; + /* Offset of the second PTP source ID for RX. */ + uint32_t rx_ts_reg_off_src_id_1; + /* Offset of the third PTP source ID for RX. */ + uint32_t rx_ts_reg_off_src_id_2; + /* Offset of the domain ID for RX. */ + uint32_t rx_ts_reg_off_domain_id; + /* Offset of the PTP FIFO register for RX. */ + uint32_t rx_ts_reg_off_fifo; + /* Offset of the PTP advance FIFO register for RX. */ + uint32_t rx_ts_reg_off_fifo_adv; + /* PTP timestamp granularity for RX. */ + uint32_t rx_ts_reg_off_granularity; + /* Offset of the PTP register for the lower 32 bits of timestamp for TX. */ + uint32_t tx_ts_reg_off_lower; + /* Offset of the PTP register for the upper 32 bits of timestamp for TX. */ + uint32_t tx_ts_reg_off_upper; + /* Offset of the PTP register for the sequence ID for TX. */ + uint32_t tx_ts_reg_off_seq_id; + /* Offset of the PTP FIFO register for TX. */ + uint32_t tx_ts_reg_off_fifo; + /* PTP timestamp granularity for TX. */ + uint32_t tx_ts_reg_off_granularity; + uint8_t unused_1[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/******************** + * hwrm_port_qstats * + ********************/ + + +/* hwrm_port_qstats_input (size:320b/40B) */ +struct hwrm_port_qstats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is being queried. */ + uint16_t port_id; + uint8_t unused_0[6]; + /* + * This is the host address where + * Tx port statistics will be stored + */ + uint64_t tx_stat_host_addr; + /* + * This is the host address where + * Rx port statistics will be stored + */ + uint64_t rx_stat_host_addr; +} __attribute__((packed)); + +/* hwrm_port_qstats_output (size:128b/16B) */ +struct hwrm_port_qstats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The size of TX port statistics block in bytes. */ + uint16_t tx_stat_size; + /* The size of RX port statistics block in bytes. */ + uint16_t rx_stat_size; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************ + * hwrm_port_qstats_ext * + ************************/ + + +/* hwrm_port_qstats_ext_input (size:320b/40B) */ +struct hwrm_port_qstats_ext_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is being queried. */ + uint16_t port_id; + /* + * The size of TX port extended + * statistics block in bytes. + */ + uint16_t tx_stat_size; + /* + * The size of RX port extended + * statistics block in bytes + */ + uint16_t rx_stat_size; + uint8_t unused_0[2]; + /* + * This is the host address where + * Tx port statistics will be stored + */ + uint64_t tx_stat_host_addr; + /* + * This is the host address where + * Rx port statistics will be stored + */ + uint64_t rx_stat_host_addr; +} __attribute__((packed)); + +/* hwrm_port_qstats_ext_output (size:128b/16B) */ +struct hwrm_port_qstats_ext_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The size of TX port statistics block in bytes. */ + uint16_t tx_stat_size; + /* The size of RX port statistics block in bytes. */ + uint16_t rx_stat_size; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************* + * hwrm_port_lpbk_qstats * + *************************/ + + +/* hwrm_port_lpbk_qstats_input (size:128b/16B) */ +struct hwrm_port_lpbk_qstats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __attribute__((packed)); + +/* hwrm_port_lpbk_qstats_output (size:768b/96B) */ +struct hwrm_port_lpbk_qstats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Number of transmitted unicast frames */ + uint64_t lpbk_ucast_frames; + /* Number of transmitted multicast frames */ + uint64_t lpbk_mcast_frames; + /* Number of transmitted broadcast frames */ + uint64_t lpbk_bcast_frames; + /* Number of transmitted bytes for unicast traffic */ + uint64_t lpbk_ucast_bytes; + /* Number of transmitted bytes for multicast traffic */ + uint64_t lpbk_mcast_bytes; + /* Number of transmitted bytes for broadcast traffic */ + uint64_t lpbk_bcast_bytes; + /* Total Tx Drops for loopback traffic reported by STATS block */ + uint64_t tx_stat_discard; + /* Total Tx Error Drops for loopback traffic reported by STATS block */ + uint64_t tx_stat_error; + /* Total Rx Drops for loopback traffic reported by STATS block */ + uint64_t rx_stat_discard; + /* Total Rx Error Drops for loopback traffic reported by STATS block */ + uint64_t rx_stat_error; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_port_clr_stats * + ***********************/ + + +/* hwrm_port_clr_stats_input (size:192b/24B) */ +struct hwrm_port_clr_stats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is being queried. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_port_clr_stats_output (size:128b/16B) */ +struct hwrm_port_clr_stats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/**************************** + * hwrm_port_lpbk_clr_stats * + ****************************/ + + +/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */ +struct hwrm_port_lpbk_clr_stats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __attribute__((packed)); + +/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */ +struct hwrm_port_lpbk_clr_stats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_port_ts_query * + **********************/ + + +/* hwrm_port_ts_query_input (size:192b/24B) */ +struct hwrm_port_ts_query_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_LAST \ + HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX + /* Port ID of port that is being queried. */ + uint16_t port_id; + uint8_t unused_0[2]; +} __attribute__((packed)); + +/* hwrm_port_ts_query_output (size:192b/24B) */ +struct hwrm_port_ts_query_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Timestamp value of PTP message captured. */ + uint64_t ptp_msg_ts; + /* Sequence ID of the PTP message captured. */ + uint16_t ptp_msg_seqid; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_port_phy_qcaps * + ***********************/ + + +/* hwrm_port_phy_qcaps_input (size:192b/24B) */ +struct hwrm_port_phy_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is being queried. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_port_phy_qcaps_output (size:192b/24B) */ +struct hwrm_port_phy_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* PHY capability flags */ + uint8_t flags; + /* + * If set to 1, then this field indicates that the + * link is capable of supporting EEE. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_EEE_SUPPORTED \ + UINT32_C(0x1) + /* + * If set to 1, then this field indicates that the + * PHY is capable of supporting external loopback. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_EXTERNAL_LPBK_SUPPORTED \ + UINT32_C(0x2) + /* + * Reserved field. The HWRM shall set this field to 0. + * An HWRM client shall ignore this field. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_RSVD1_MASK \ + UINT32_C(0xfc) + #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_RSVD1_SFT 2 + /* Number of front panel ports for this device. */ + uint8_t port_cnt; + /* Not supported or unknown */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_UNKNOWN UINT32_C(0x0) + /* single port device */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_1 UINT32_C(0x1) + /* 2-port device */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_2 UINT32_C(0x2) + /* 3-port device */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_3 UINT32_C(0x3) + /* 4-port device */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_4 UINT32_C(0x4) + #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_LAST \ + HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_4 + /* + * This is a bit mask to indicate what speeds are supported + * as forced speeds on this link. + * For each speed that can be forced on this link, the + * corresponding mask bit shall be set to '1'. + */ + uint16_t supported_speeds_force_mode; + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_2GB \ + UINT32_C(0x10) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_10MB \ + UINT32_C(0x2000) + /* + * This is a bit mask to indicate what speeds are supported + * for autonegotiation on this link. + * For each speed that can be autonegotiated on this link, the + * corresponding mask bit shall be set to '1'. + */ + uint16_t supported_speeds_auto_mode; + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_2GB \ + UINT32_C(0x10) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_10MB \ + UINT32_C(0x2000) + /* + * This is a bit mask to indicate what speeds are supported + * for EEE on this link. + * For each speed that can be autonegotiated when EEE is enabled + * on this link, the corresponding mask bit shall be set to '1'. + * This field is only valid when the eee_suppotred is set to '1'. + */ + uint16_t supported_speeds_eee_mode; + /* Reserved */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_100MB \ + UINT32_C(0x2) + /* Reserved */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_1GB \ + UINT32_C(0x8) + /* Reserved */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 \ + UINT32_C(0x10) + /* Reserved */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_10GB \ + UINT32_C(0x40) + uint32_t tx_lpi_timer_low; + /* + * The lowest value of TX LPI timer that can be set on this link + * when EEE is enabled. This value is in microseconds. + * This field is valid only when_eee_supported is set to '1'. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_LOW_MASK \ + UINT32_C(0xffffff) + #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_LOW_SFT 0 + /* + * Reserved field. The HWRM shall set this field to 0. + * An HWRM client shall ignore this field. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_RSVD2_MASK \ + UINT32_C(0xff000000) + #define HWRM_PORT_PHY_QCAPS_OUTPUT_RSVD2_SFT 24 + uint32_t valid_tx_lpi_timer_high; + /* + * The highest value of TX LPI timer that can be set on this link + * when EEE is enabled. This value is in microseconds. + * This field is valid only when_eee_supported is set to '1'. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_HIGH_MASK \ + UINT32_C(0xffffff) + #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_HIGH_SFT 0 + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_VALID_MASK \ + UINT32_C(0xff000000) + #define HWRM_PORT_PHY_QCAPS_OUTPUT_VALID_SFT 24 +} __attribute__((packed)); + +/*************************** + * hwrm_port_phy_i2c_write * + ***************************/ + + +/* hwrm_port_phy_i2c_write_input (size:832b/104B) */ +struct hwrm_port_phy_i2c_write_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + uint32_t enables; + /* + * This bit must be '1' for the page_offset field to be + * configured. + */ + #define HWRM_PORT_PHY_I2C_WRITE_INPUT_ENABLES_PAGE_OFFSET \ + UINT32_C(0x1) + /* Port ID of port. */ + uint16_t port_id; + /* 8-bit I2C slave address. */ + uint8_t i2c_slave_addr; + uint8_t unused_0; + /* The page number that is being accessed over I2C. */ + uint16_t page_number; + /* Offset within the page that is being accessed over I2C. */ + uint16_t page_offset; + /* + * Length of data to write, in bytes starting at the offset + * specified above. If the offset is not specified, then + * the data shall be written from the beginning of the page. + */ + uint8_t data_length; + uint8_t unused_1[7]; + /* Up to 64B of data. */ + uint32_t data[16]; +} __attribute__((packed)); + +/* hwrm_port_phy_i2c_write_output (size:128b/16B) */ +struct hwrm_port_phy_i2c_write_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************** + * hwrm_port_phy_i2c_read * + **************************/ + + +/* hwrm_port_phy_i2c_read_input (size:320b/40B) */ +struct hwrm_port_phy_i2c_read_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + uint32_t enables; + /* + * This bit must be '1' for the page_offset field to be + * configured. + */ + #define HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_PAGE_OFFSET \ + UINT32_C(0x1) + /* Port ID of port. */ + uint16_t port_id; + /* 8-bit I2C slave address. */ + uint8_t i2c_slave_addr; + uint8_t unused_0; + /* The page number that is being accessed over I2C. */ + uint16_t page_number; + /* Offset within the page that is being accessed over I2C. */ + uint16_t page_offset; + /* + * Length of data to read, in bytes starting at the offset + * specified above. If the offset is not specified, then + * the data shall be read from the beginning of the page. + */ + uint8_t data_length; + uint8_t unused_1[7]; +} __attribute__((packed)); + +/* hwrm_port_phy_i2c_read_output (size:640b/80B) */ +struct hwrm_port_phy_i2c_read_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Up to 64B of data. */ + uint32_t data[16]; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************* + * hwrm_port_led_cfg * + *********************/ + + +/* hwrm_port_led_cfg_input (size:512b/64B) */ +struct hwrm_port_led_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the led0_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_ID \ + UINT32_C(0x1) + /* + * This bit must be '1' for the led0_state field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_STATE \ + UINT32_C(0x2) + /* + * This bit must be '1' for the led0_color field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_COLOR \ + UINT32_C(0x4) + /* + * This bit must be '1' for the led0_blink_on field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_ON \ + UINT32_C(0x8) + /* + * This bit must be '1' for the led0_blink_off field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_OFF \ + UINT32_C(0x10) + /* + * This bit must be '1' for the led0_group_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_GROUP_ID \ + UINT32_C(0x20) + /* + * This bit must be '1' for the led1_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_ID \ + UINT32_C(0x40) + /* + * This bit must be '1' for the led1_state field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_STATE \ + UINT32_C(0x80) + /* + * This bit must be '1' for the led1_color field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_COLOR \ + UINT32_C(0x100) + /* + * This bit must be '1' for the led1_blink_on field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_ON \ + UINT32_C(0x200) + /* + * This bit must be '1' for the led1_blink_off field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_OFF \ + UINT32_C(0x400) + /* + * This bit must be '1' for the led1_group_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_GROUP_ID \ + UINT32_C(0x800) + /* + * This bit must be '1' for the led2_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_ID \ + UINT32_C(0x1000) + /* + * This bit must be '1' for the led2_state field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_STATE \ + UINT32_C(0x2000) + /* + * This bit must be '1' for the led2_color field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_COLOR \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the led2_blink_on field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_ON \ + UINT32_C(0x8000) + /* + * This bit must be '1' for the led2_blink_off field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_OFF \ + UINT32_C(0x10000) + /* + * This bit must be '1' for the led2_group_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_GROUP_ID \ + UINT32_C(0x20000) + /* + * This bit must be '1' for the led3_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_ID \ + UINT32_C(0x40000) + /* + * This bit must be '1' for the led3_state field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_STATE \ + UINT32_C(0x80000) + /* + * This bit must be '1' for the led3_color field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_COLOR \ + UINT32_C(0x100000) + /* + * This bit must be '1' for the led3_blink_on field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_ON \ + UINT32_C(0x200000) + /* + * This bit must be '1' for the led3_blink_off field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_OFF \ + UINT32_C(0x400000) + /* + * This bit must be '1' for the led3_group_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_GROUP_ID \ + UINT32_C(0x800000) + /* Port ID of port whose LEDs are configured. */ + uint16_t port_id; + /* + * The number of LEDs that are being configured. + * Up to 4 LEDs can be configured with this command. + */ + uint8_t num_leds; + /* Reserved field. */ + uint8_t rsvd; + /* An identifier for the LED #0. */ + uint8_t led0_id; + /* The requested state of the LED #0. */ + uint8_t led0_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT + /* The requested color of LED #0. */ + uint8_t led0_color; + /* Default */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER + uint8_t unused_0; + /* + * If the LED #0 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led0_blink_on; + /* + * If the LED #0 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led0_blink_off; + /* + * An identifier for the group of LEDs that LED #0 belongs + * to. + * If set to 0, then the LED #0 shall not be grouped and + * shall be treated as an individual resource. + * For all other non-zero values of this field, LED #0 shall + * be grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led0_group_id; + /* Reserved field. */ + uint8_t rsvd0; + /* An identifier for the LED #1. */ + uint8_t led1_id; + /* The requested state of the LED #1. */ + uint8_t led1_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT + /* The requested color of LED #1. */ + uint8_t led1_color; + /* Default */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER + uint8_t unused_1; + /* + * If the LED #1 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led1_blink_on; + /* + * If the LED #1 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led1_blink_off; + /* + * An identifier for the group of LEDs that LED #1 belongs + * to. + * If set to 0, then the LED #1 shall not be grouped and + * shall be treated as an individual resource. + * For all other non-zero values of this field, LED #1 shall + * be grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led1_group_id; + /* Reserved field. */ + uint8_t rsvd1; + /* An identifier for the LED #2. */ + uint8_t led2_id; + /* The requested state of the LED #2. */ + uint8_t led2_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT + /* The requested color of LED #2. */ + uint8_t led2_color; + /* Default */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER + uint8_t unused_2; + /* + * If the LED #2 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led2_blink_on; + /* + * If the LED #2 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led2_blink_off; + /* + * An identifier for the group of LEDs that LED #2 belongs + * to. + * If set to 0, then the LED #2 shall not be grouped and + * shall be treated as an individual resource. + * For all other non-zero values of this field, LED #2 shall + * be grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led2_group_id; + /* Reserved field. */ + uint8_t rsvd2; + /* An identifier for the LED #3. */ + uint8_t led3_id; + /* The requested state of the LED #3. */ + uint8_t led3_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT + /* The requested color of LED #3. */ + uint8_t led3_color; + /* Default */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER + uint8_t unused_3; + /* + * If the LED #3 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led3_blink_on; + /* + * If the LED #3 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led3_blink_off; + /* + * An identifier for the group of LEDs that LED #3 belongs + * to. + * If set to 0, then the LED #3 shall not be grouped and + * shall be treated as an individual resource. + * For all other non-zero values of this field, LED #3 shall + * be grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led3_group_id; + /* Reserved field. */ + uint8_t rsvd3; +} __attribute__((packed)); + +/* hwrm_port_led_cfg_output (size:128b/16B) */ +struct hwrm_port_led_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_port_led_qcfg * + **********************/ + + +/* hwrm_port_led_qcfg_input (size:192b/24B) */ +struct hwrm_port_led_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port whose LED configuration is being queried. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_port_led_qcfg_output (size:448b/56B) */ +struct hwrm_port_led_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * The number of LEDs that are configured on this port. + * Up to 4 LEDs can be returned in the response. + */ + uint8_t num_leds; + /* An identifier for the LED #0. */ + uint8_t led0_id; + /* The type of LED #0. */ + uint8_t led0_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID + /* The current state of the LED #0. */ + uint8_t led0_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT + /* The color of LED #0. */ + uint8_t led0_color; + /* Default */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER + uint8_t unused_0; + /* + * If the LED #0 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led0_blink_on; + /* + * If the LED #0 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led0_blink_off; + /* + * An identifier for the group of LEDs that LED #0 belongs + * to. + * If set to 0, then the LED #0 is not grouped. + * For all other non-zero values of this field, LED #0 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led0_group_id; + /* An identifier for the LED #1. */ + uint8_t led1_id; + /* The type of LED #1. */ + uint8_t led1_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID + /* The current state of the LED #1. */ + uint8_t led1_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT + /* The color of LED #1. */ + uint8_t led1_color; + /* Default */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER + uint8_t unused_1; + /* + * If the LED #1 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led1_blink_on; + /* + * If the LED #1 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led1_blink_off; + /* + * An identifier for the group of LEDs that LED #1 belongs + * to. + * If set to 0, then the LED #1 is not grouped. + * For all other non-zero values of this field, LED #1 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led1_group_id; + /* An identifier for the LED #2. */ + uint8_t led2_id; + /* The type of LED #2. */ + uint8_t led2_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID + /* The current state of the LED #2. */ + uint8_t led2_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT + /* The color of LED #2. */ + uint8_t led2_color; + /* Default */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER + uint8_t unused_2; + /* + * If the LED #2 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led2_blink_on; + /* + * If the LED #2 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led2_blink_off; + /* + * An identifier for the group of LEDs that LED #2 belongs + * to. + * If set to 0, then the LED #2 is not grouped. + * For all other non-zero values of this field, LED #2 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led2_group_id; + /* An identifier for the LED #3. */ + uint8_t led3_id; + /* The type of LED #3. */ + uint8_t led3_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID + /* The current state of the LED #3. */ + uint8_t led3_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT + /* The color of LED #3. */ + uint8_t led3_color; + /* Default */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER + uint8_t unused_3; + /* + * If the LED #3 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led3_blink_on; + /* + * If the LED #3 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led3_blink_off; + /* + * An identifier for the group of LEDs that LED #3 belongs + * to. + * If set to 0, then the LED #3 is not grouped. + * For all other non-zero values of this field, LED #3 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led3_group_id; + uint8_t unused_4[6]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_port_led_qcaps * + ***********************/ + + +/* hwrm_port_led_qcaps_input (size:192b/24B) */ +struct hwrm_port_led_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port whose LED configuration is being queried. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_port_led_qcaps_output (size:384b/48B) */ +struct hwrm_port_led_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * The number of LEDs that are configured on this port. + * Up to 4 LEDs can be returned in the response. + */ + uint8_t num_leds; + /* Reserved for future use. */ + uint8_t unused[3]; + /* An identifier for the LED #0. */ + uint8_t led0_id; + /* The type of LED #0. */ + uint8_t led0_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_LAST \ + HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID + /* + * An identifier for the group of LEDs that LED #0 belongs + * to. + * If set to 0, then the LED #0 cannot be grouped. + * For all other non-zero values of this field, LED #0 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led0_group_id; + uint8_t unused_0; + /* The states supported by LED #0. */ + uint16_t led0_state_caps; + /* + * If set to 1, this LED is enabled. + * If set to 0, this LED is disabled. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ENABLED \ + UINT32_C(0x1) + /* + * If set to 1, off state is supported on this LED. + * If set to 0, off state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_OFF_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, on state is supported on this LED. + * If set to 0, on state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ON_SUPPORTED \ + UINT32_C(0x4) + /* + * If set to 1, blink state is supported on this LED. + * If set to 0, blink state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_SUPPORTED \ + UINT32_C(0x8) + /* + * If set to 1, blink_alt state is supported on this LED. + * If set to 0, blink_alt state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED \ + UINT32_C(0x10) + /* The colors supported by LED #0. */ + uint16_t led0_color_caps; + /* reserved. */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_RSVD \ + UINT32_C(0x1) + /* + * If set to 1, Amber color is supported on this LED. + * If set to 0, Amber color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_AMBER_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, Green color is supported on this LED. + * If set to 0, Green color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_GREEN_SUPPORTED \ + UINT32_C(0x4) + /* An identifier for the LED #1. */ + uint8_t led1_id; + /* The type of LED #1. */ + uint8_t led1_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_LAST \ + HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID + /* + * An identifier for the group of LEDs that LED #1 belongs + * to. + * If set to 0, then the LED #0 cannot be grouped. + * For all other non-zero values of this field, LED #0 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led1_group_id; + uint8_t unused_1; + /* The states supported by LED #1. */ + uint16_t led1_state_caps; + /* + * If set to 1, this LED is enabled. + * If set to 0, this LED is disabled. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ENABLED \ + UINT32_C(0x1) + /* + * If set to 1, off state is supported on this LED. + * If set to 0, off state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_OFF_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, on state is supported on this LED. + * If set to 0, on state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ON_SUPPORTED \ + UINT32_C(0x4) + /* + * If set to 1, blink state is supported on this LED. + * If set to 0, blink state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_SUPPORTED \ + UINT32_C(0x8) + /* + * If set to 1, blink_alt state is supported on this LED. + * If set to 0, blink_alt state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED \ + UINT32_C(0x10) + /* The colors supported by LED #1. */ + uint16_t led1_color_caps; + /* reserved. */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_RSVD \ + UINT32_C(0x1) + /* + * If set to 1, Amber color is supported on this LED. + * If set to 0, Amber color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_AMBER_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, Green color is supported on this LED. + * If set to 0, Green color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_GREEN_SUPPORTED \ + UINT32_C(0x4) + /* An identifier for the LED #2. */ + uint8_t led2_id; + /* The type of LED #2. */ + uint8_t led2_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_LAST \ + HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID + /* + * An identifier for the group of LEDs that LED #0 belongs + * to. + * If set to 0, then the LED #0 cannot be grouped. + * For all other non-zero values of this field, LED #0 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led2_group_id; + uint8_t unused_2; + /* The states supported by LED #2. */ + uint16_t led2_state_caps; + /* + * If set to 1, this LED is enabled. + * If set to 0, this LED is disabled. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ENABLED \ + UINT32_C(0x1) + /* + * If set to 1, off state is supported on this LED. + * If set to 0, off state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_OFF_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, on state is supported on this LED. + * If set to 0, on state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ON_SUPPORTED \ + UINT32_C(0x4) + /* + * If set to 1, blink state is supported on this LED. + * If set to 0, blink state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_SUPPORTED \ + UINT32_C(0x8) + /* + * If set to 1, blink_alt state is supported on this LED. + * If set to 0, blink_alt state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED \ + UINT32_C(0x10) + /* The colors supported by LED #2. */ + uint16_t led2_color_caps; + /* reserved. */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_RSVD \ + UINT32_C(0x1) + /* + * If set to 1, Amber color is supported on this LED. + * If set to 0, Amber color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_AMBER_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, Green color is supported on this LED. + * If set to 0, Green color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_GREEN_SUPPORTED \ + UINT32_C(0x4) + /* An identifier for the LED #3. */ + uint8_t led3_id; + /* The type of LED #3. */ + uint8_t led3_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_LAST \ + HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID + /* + * An identifier for the group of LEDs that LED #3 belongs + * to. + * If set to 0, then the LED #0 cannot be grouped. + * For all other non-zero values of this field, LED #0 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led3_group_id; + uint8_t unused_3; + /* The states supported by LED #3. */ + uint16_t led3_state_caps; + /* + * If set to 1, this LED is enabled. + * If set to 0, this LED is disabled. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ENABLED \ + UINT32_C(0x1) + /* + * If set to 1, off state is supported on this LED. + * If set to 0, off state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_OFF_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, on state is supported on this LED. + * If set to 0, on state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ON_SUPPORTED \ + UINT32_C(0x4) + /* + * If set to 1, blink state is supported on this LED. + * If set to 0, blink state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_SUPPORTED \ + UINT32_C(0x8) + /* + * If set to 1, blink_alt state is supported on this LED. + * If set to 0, blink_alt state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED \ + UINT32_C(0x10) + /* The colors supported by LED #3. */ + uint16_t led3_color_caps; + /* reserved. */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_RSVD \ + UINT32_C(0x1) + /* + * If set to 1, Amber color is supported on this LED. + * If set to 0, Amber color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_AMBER_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, Green color is supported on this LED. + * If set to 0, Green color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_GREEN_SUPPORTED \ + UINT32_C(0x4) + uint8_t unused_4[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_queue_qportcfg * + ***********************/ + + +/* hwrm_queue_qportcfg_input (size:192b/24B) */ +struct hwrm_queue_qportcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_LAST \ + HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX + /* + * Port ID of port for which the queue configuration is being + * queried. This field is only required when sent by IPC. + */ + uint16_t port_id; + /* + * Drivers will set this capability when it can use + * queue_idx_service_profile to map the queues to application. + */ + uint8_t drv_qmap_cap; + /* disabled */ + #define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_DISABLED UINT32_C(0x0) + /* enabled */ + #define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED UINT32_C(0x1) + #define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_LAST \ + HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED + uint8_t unused_0; +} __attribute__((packed)); + +/* hwrm_queue_qportcfg_output (size:256b/32B) */ +struct hwrm_queue_qportcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * The maximum number of queues that can be configured on this + * port. + * Valid values range from 1 through 8. + */ + uint8_t max_configurable_queues; + /* + * The maximum number of lossless queues that can be configured + * on this port. + * Valid values range from 0 through 8. + */ + uint8_t max_configurable_lossless_queues; + /* + * Bitmask indicating which queues can be configured by the + * hwrm_queue_cfg command. + * + * Each bit represents a specific queue where bit 0 represents + * queue 0 and bit 7 represents queue 7. + * # A value of 0 indicates that the queue is not configurable + * by the hwrm_queue_cfg command. + * # A value of 1 indicates that the queue is configurable. + * # A hwrm_queue_cfg command shall return error when trying to + * configure a queue not configurable. + */ + uint8_t queue_cfg_allowed; + /* Information about queue configuration. */ + uint8_t queue_cfg_info; + /* + * If this flag is set to '1', then the queues are + * configured asymmetrically on TX and RX sides. + * If this flag is set to '0', then the queues are + * configured symmetrically on TX and RX sides. For + * symmetric configuration, the queue configuration + * including queue ids and service profiles on the + * TX side is the same as the corresponding queue + * configuration on the RX side. + */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \ + UINT32_C(0x1) + /* + * Bitmask indicating which queues can be configured by the + * hwrm_queue_pfcenable_cfg command. + * + * Each bit represents a specific priority where bit 0 represents + * priority 0 and bit 7 represents priority 7. + * # A value of 0 indicates that the priority is not configurable by + * the hwrm_queue_pfcenable_cfg command. + * # A value of 1 indicates that the priority is configurable. + * # A hwrm_queue_pfcenable_cfg command shall return error when + * trying to configure a priority that is not configurable. + */ + uint8_t queue_pfcenable_cfg_allowed; + /* + * Bitmask indicating which queues can be configured by the + * hwrm_queue_pri2cos_cfg command. + * + * Each bit represents a specific queue where bit 0 represents + * queue 0 and bit 7 represents queue 7. + * # A value of 0 indicates that the queue is not configurable + * by the hwrm_queue_pri2cos_cfg command. + * # A value of 1 indicates that the queue is configurable. + * # A hwrm_queue_pri2cos_cfg command shall return error when + * trying to configure a queue that is not configurable. + */ + uint8_t queue_pri2cos_cfg_allowed; + /* + * Bitmask indicating which queues can be configured by the + * hwrm_queue_pri2cos_cfg command. + * + * Each bit represents a specific queue where bit 0 represents + * queue 0 and bit 7 represents queue 7. + * # A value of 0 indicates that the queue is not configurable + * by the hwrm_queue_pri2cos_cfg command. + * # A value of 1 indicates that the queue is configurable. + * # A hwrm_queue_pri2cos_cfg command shall return error when + * trying to configure a queue not configurable. + */ + uint8_t queue_cos2bw_cfg_allowed; + /* + * ID of CoS Queue 0. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id0; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id0_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 1. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id1; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id1_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 2. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id2; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id2_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 3. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id3; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id3_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 4. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id4; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id4_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 5. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id5; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id5_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 6. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id6; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id6_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 7. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id7; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id7_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/******************* + * hwrm_queue_qcfg * + *******************/ + + +/* hwrm_queue_qcfg_input (size:192b/24B) */ +struct hwrm_queue_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_LAST \ + HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_RX + /* Queue ID of the queue. */ + uint32_t queue_id; +} __attribute__((packed)); + +/* hwrm_queue_qcfg_output (size:128b/16B) */ +struct hwrm_queue_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * This value is a the estimate packet length used in the + * TX arbiter. + */ + uint32_t queue_len; + /* This value is applicable to CoS queues only. */ + uint8_t service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_LOSSY UINT32_C(0x0) + /* Lossless */ + #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_LOSSLESS UINT32_C(0x1) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_UNKNOWN UINT32_C(0xff) + #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_UNKNOWN + /* Information about queue configuration. */ + uint8_t queue_cfg_info; + /* + * If this flag is set to '1', then the queue is + * configured asymmetrically on TX and RX sides. + * If this flag is set to '0', then this queue is + * configured symmetrically on TX and RX sides. + */ + #define HWRM_QUEUE_QCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \ + UINT32_C(0x1) + uint8_t unused_0; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/****************** + * hwrm_queue_cfg * + ******************/ + + +/* hwrm_queue_cfg_input (size:320b/40B) */ +struct hwrm_queue_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX, or both directions applicable to the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_MASK UINT32_C(0x3) + #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_SFT 0 + /* tx path */ + #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + /* Bi-directional (Symmetrically applicable to TX and RX paths) */ + #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_BIDIR UINT32_C(0x2) + #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_LAST \ + HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_BIDIR + uint32_t enables; + /* + * This bit must be '1' for the dflt_len field to be + * configured. + */ + #define HWRM_QUEUE_CFG_INPUT_ENABLES_DFLT_LEN UINT32_C(0x1) + /* + * This bit must be '1' for the service_profile field to be + * configured. + */ + #define HWRM_QUEUE_CFG_INPUT_ENABLES_SERVICE_PROFILE UINT32_C(0x2) + /* Queue ID of queue that is to be configured by this function. */ + uint32_t queue_id; + /* + * This value is a the estimate packet length used in the + * TX arbiter. + * Set to 0xFF... (All Fs) to not adjust this value. + */ + uint32_t dflt_len; + /* This value is applicable to CoS queues only. */ + uint8_t service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LOSSY UINT32_C(0x0) + /* Lossless */ + #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LOSSLESS UINT32_C(0x1) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_UNKNOWN UINT32_C(0xff) + #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_UNKNOWN + uint8_t unused_0[7]; +} __attribute__((packed)); + +/* hwrm_queue_cfg_output (size:128b/16B) */ +struct hwrm_queue_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/***************************** + * hwrm_queue_pfcenable_qcfg * + *****************************/ + + +/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */ +struct hwrm_queue_pfcenable_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. + */ + uint16_t port_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */ +struct hwrm_queue_pfcenable_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; + /* If set to 1, then PFC is enabled on PRI 0. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI0_PFC_ENABLED \ + UINT32_C(0x1) + /* If set to 1, then PFC is enabled on PRI 1. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI1_PFC_ENABLED \ + UINT32_C(0x2) + /* If set to 1, then PFC is enabled on PRI 2. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI2_PFC_ENABLED \ + UINT32_C(0x4) + /* If set to 1, then PFC is enabled on PRI 3. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI3_PFC_ENABLED \ + UINT32_C(0x8) + /* If set to 1, then PFC is enabled on PRI 4. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI4_PFC_ENABLED \ + UINT32_C(0x10) + /* If set to 1, then PFC is enabled on PRI 5. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI5_PFC_ENABLED \ + UINT32_C(0x20) + /* If set to 1, then PFC is enabled on PRI 6. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI6_PFC_ENABLED \ + UINT32_C(0x40) + /* If set to 1, then PFC is enabled on PRI 7. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI7_PFC_ENABLED \ + UINT32_C(0x80) + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/**************************** + * hwrm_queue_pfcenable_cfg * + ****************************/ + + +/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */ +struct hwrm_queue_pfcenable_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* If set to 1, then PFC is requested to be enabled on PRI 0. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI0_PFC_ENABLED \ + UINT32_C(0x1) + /* If set to 1, then PFC is requested to be enabled on PRI 1. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI1_PFC_ENABLED \ + UINT32_C(0x2) + /* If set to 1, then PFC is requested to be enabled on PRI 2. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI2_PFC_ENABLED \ + UINT32_C(0x4) + /* If set to 1, then PFC is requested to be enabled on PRI 3. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI3_PFC_ENABLED \ + UINT32_C(0x8) + /* If set to 1, then PFC is requested to be enabled on PRI 4. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI4_PFC_ENABLED \ + UINT32_C(0x10) + /* If set to 1, then PFC is requested to be enabled on PRI 5. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI5_PFC_ENABLED \ + UINT32_C(0x20) + /* If set to 1, then PFC is requested to be enabled on PRI 6. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI6_PFC_ENABLED \ + UINT32_C(0x40) + /* If set to 1, then PFC is requested to be enabled on PRI 7. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI7_PFC_ENABLED \ + UINT32_C(0x80) + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. + */ + uint16_t port_id; + uint8_t unused_0[2]; +} __attribute__((packed)); + +/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */ +struct hwrm_queue_pfcenable_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/*************************** + * hwrm_queue_pri2cos_qcfg * + ***************************/ + + +/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */ +struct hwrm_queue_pri2cos_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_LAST \ + HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_RX + /* + * When this bit is set to '0', the query is + * for VLAN PRI field in tunnel headers. + * When this bit is set to '1', the query is + * for VLAN PRI field in inner packet headers. + */ + #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN UINT32_C(0x2) + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. + */ + uint8_t port_id; + uint8_t unused_0[3]; +} __attribute__((packed)); + +/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */ +struct hwrm_queue_pri2cos_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * CoS Queue assigned to priority 0. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri0_cos_queue_id; + /* + * CoS Queue assigned to priority 1. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri1_cos_queue_id; + /* + * CoS Queue assigned to priority 2 This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri2_cos_queue_id; + /* + * CoS Queue assigned to priority 3. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri3_cos_queue_id; + /* + * CoS Queue assigned to priority 4. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri4_cos_queue_id; + /* + * CoS Queue assigned to priority 5. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri5_cos_queue_id; + /* + * CoS Queue assigned to priority 6. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri6_cos_queue_id; + /* + * CoS Queue assigned to priority 7. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri7_cos_queue_id; + /* Information about queue configuration. */ + uint8_t queue_cfg_info; + /* + * If this flag is set to '1', then the PRI to CoS + * configuration is asymmetric on TX and RX sides. + * If this flag is set to '0', then PRI to CoS configuration + * is symmetric on TX and RX sides. + */ + #define HWRM_QUEUE_PRI2COS_QCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \ + UINT32_C(0x1) + uint8_t unused_0[6]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************** + * hwrm_queue_pri2cos_cfg * + **************************/ + + +/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */ +struct hwrm_queue_pri2cos_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX, or both directions applicable to the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_MASK UINT32_C(0x3) + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_SFT 0 + /* tx path */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + /* Bi-directional (Symmetrically applicable to TX and RX paths) */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_BIDIR UINT32_C(0x2) + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_LAST \ + HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_BIDIR + /* + * When this bit is set to '0', the mapping is requested + * for VLAN PRI field in tunnel headers. + * When this bit is set to '1', the mapping is requested + * for VLAN PRI field in inner packet headers. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_IVLAN UINT32_C(0x4) + uint32_t enables; + /* + * This bit must be '1' for the pri0_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI0_COS_QUEUE_ID \ + UINT32_C(0x1) + /* + * This bit must be '1' for the pri1_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI1_COS_QUEUE_ID \ + UINT32_C(0x2) + /* + * This bit must be '1' for the pri2_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI2_COS_QUEUE_ID \ + UINT32_C(0x4) + /* + * This bit must be '1' for the pri3_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI3_COS_QUEUE_ID \ + UINT32_C(0x8) + /* + * This bit must be '1' for the pri4_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI4_COS_QUEUE_ID \ + UINT32_C(0x10) + /* + * This bit must be '1' for the pri5_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI5_COS_QUEUE_ID \ + UINT32_C(0x20) + /* + * This bit must be '1' for the pri6_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI6_COS_QUEUE_ID \ + UINT32_C(0x40) + /* + * This bit must be '1' for the pri7_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI7_COS_QUEUE_ID \ + UINT32_C(0x80) + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. + */ + uint8_t port_id; + /* + * CoS Queue assigned to priority 0. This value can only + * be changed before traffic has started. + */ + uint8_t pri0_cos_queue_id; + /* + * CoS Queue assigned to priority 1. This value can only + * be changed before traffic has started. + */ + uint8_t pri1_cos_queue_id; + /* + * CoS Queue assigned to priority 2 This value can only + * be changed before traffic has started. + */ + uint8_t pri2_cos_queue_id; + /* + * CoS Queue assigned to priority 3. This value can only + * be changed before traffic has started. + */ + uint8_t pri3_cos_queue_id; + /* + * CoS Queue assigned to priority 4. This value can only + * be changed before traffic has started. + */ + uint8_t pri4_cos_queue_id; + /* + * CoS Queue assigned to priority 5. This value can only + * be changed before traffic has started. + */ + uint8_t pri5_cos_queue_id; + /* + * CoS Queue assigned to priority 6. This value can only + * be changed before traffic has started. + */ + uint8_t pri6_cos_queue_id; + /* + * CoS Queue assigned to priority 7. This value can only + * be changed before traffic has started. + */ + uint8_t pri7_cos_queue_id; + uint8_t unused_0[7]; +} __attribute__((packed)); + +/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */ +struct hwrm_queue_pri2cos_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************** + * hwrm_queue_cos2bw_qcfg * + **************************/ + + +/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */ +struct hwrm_queue_cos2bw_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure TC BW assignment on this port. + */ + uint16_t port_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */ +struct hwrm_queue_cos2bw_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* ID of CoS Queue 0. */ + uint8_t queue_id0; + uint8_t unused_0; + uint16_t unused_1; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id0_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id0_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id0_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id0_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id0_bw_weight; + /* ID of CoS Queue 1. */ + uint8_t queue_id1; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id1_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id1_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id1_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id1_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id1_bw_weight; + /* ID of CoS Queue 2. */ + uint8_t queue_id2; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id2_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id2_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id2_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id2_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id2_bw_weight; + /* ID of CoS Queue 3. */ + uint8_t queue_id3; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id3_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id3_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id3_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id3_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id3_bw_weight; + /* ID of CoS Queue 4. */ + uint8_t queue_id4; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id4_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id4_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id4_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id4_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id4_bw_weight; + /* ID of CoS Queue 5. */ + uint8_t queue_id5; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id5_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id5_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id5_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id5_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id5_bw_weight; + /* ID of CoS Queue 6. */ + uint8_t queue_id6; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id6_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id6_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id6_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id6_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id6_bw_weight; + /* ID of CoS Queue 7. */ + uint8_t queue_id7; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id7_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id7_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id7_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id7_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id7_bw_weight; + uint8_t unused_2[4]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************* + * hwrm_queue_cos2bw_cfg * + *************************/ + + +/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */ +struct hwrm_queue_cos2bw_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + uint32_t enables; + /* + * If this bit is set to 1, then all queue_id0 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID0_VALID \ + UINT32_C(0x1) + /* + * If this bit is set to 1, then all queue_id1 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID1_VALID \ + UINT32_C(0x2) + /* + * If this bit is set to 1, then all queue_id2 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID2_VALID \ + UINT32_C(0x4) + /* + * If this bit is set to 1, then all queue_id3 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID3_VALID \ + UINT32_C(0x8) + /* + * If this bit is set to 1, then all queue_id4 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID4_VALID \ + UINT32_C(0x10) + /* + * If this bit is set to 1, then all queue_id5 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID5_VALID \ + UINT32_C(0x20) + /* + * If this bit is set to 1, then all queue_id6 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID6_VALID \ + UINT32_C(0x40) + /* + * If this bit is set to 1, then all queue_id7 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID7_VALID \ + UINT32_C(0x80) + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure TC BW assignment on this port. + */ + uint16_t port_id; + /* ID of CoS Queue 0. */ + uint8_t queue_id0; + uint8_t unused_0; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id0_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id0_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id0_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id0_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id0_bw_weight; + /* ID of CoS Queue 1. */ + uint8_t queue_id1; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id1_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id1_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id1_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id1_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id1_bw_weight; + /* ID of CoS Queue 2. */ + uint8_t queue_id2; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id2_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id2_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id2_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) /* - * If set to 1 then FW based LLDP agent is enabled and running - * on the port associated with this function. If set to 0 then - * the LLDP agent is not running in the firmware. + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. */ - #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_LLDP_AGENT_ENABLED UINT32_C(0x10) + uint8_t queue_id2_pri_lvl; /* - * If set to 1, then multi-host mode is active for this - * function. If set to 0, then multi-host mode is inactive for - * this function or not applicable for this device. + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. */ - #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST UINT32_C(0x20) - uint8_t mac_address[6]; + uint8_t queue_id2_bw_weight; + /* ID of CoS Queue 3. */ + uint8_t queue_id3; /* - * This value is current MAC address configured for this - * function. A value of 00-00-00-00-00-00 indicates no MAC - * address is currently configured. + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. */ - uint16_t pci_id; + uint32_t queue_id3_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID /* - * This value is current PCI ID of this function. If ARI is - * enabled, then it is Bus Number (8b):Function Number(8b). - * Otherwise, it is Bus Number (8b):Device Number (4b):Function - * Number(4b). + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. */ - uint16_t alloc_rsscos_ctx; + uint32_t queue_id3_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id3_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) /* - * The number of RSS/COS contexts currently allocated to the - * function. + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. */ - uint16_t alloc_cmpl_rings; + uint8_t queue_id3_pri_lvl; /* - * The number of completion rings currently allocated to the - * function. This does not include the rings allocated to any - * children functions if any. + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. */ - uint16_t alloc_tx_rings; + uint8_t queue_id3_bw_weight; + /* ID of CoS Queue 4. */ + uint8_t queue_id4; /* - * The number of transmit rings currently allocated to the - * function. This does not include the rings allocated to any - * children functions if any. + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. */ - uint16_t alloc_rx_rings; + uint32_t queue_id4_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID /* - * The number of receive rings currently allocated to the - * function. This does not include the rings allocated to any - * children functions if any. + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. */ - uint16_t alloc_l2_ctx; - /* The allocated number of L2 contexts to the function. */ - uint16_t alloc_vnics; - /* The allocated number of vnics to the function. */ - uint16_t mtu; + uint32_t queue_id4_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id4_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) /* - * The maximum transmission unit of the function. For rings - * allocated on this function, this default value is used if - * ring MTU is not specified. + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. */ - uint16_t mru; + uint8_t queue_id4_pri_lvl; /* - * The maximum receive unit of the function. For vnics allocated - * on this function, this default value is used if vnic MRU is - * not specified. + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. */ - uint16_t stat_ctx_id; - /* The statistics context assigned to a function. */ - uint8_t port_partition_type; + uint8_t queue_id4_bw_weight; + /* ID of CoS Queue 5. */ + uint8_t queue_id5; /* - * The HWRM shall return Unknown value for this field when this - * command is used to query VF's configuration. + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. */ - /* Single physical function */ - #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_SPF UINT32_C(0x0) - /* Multiple physical functions */ - #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_MPFS UINT32_C(0x1) - /* Network Partitioning 1.0 */ - #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0 UINT32_C(0x2) - /* Network Partitioning 1.5 */ - #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5 UINT32_C(0x3) - /* Network Partitioning 2.0 */ - #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0 UINT32_C(0x4) - /* Unknown */ - #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN UINT32_C(0xff) - uint8_t port_pf_cnt; + uint32_t queue_id5_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID /* - * This field will indicate number of physical functions on this - * port_partition. HWRM shall return unavail (i.e. value of 0) - * for this field when this command is used to query VF's - * configuration or from older firmware that doesn't support - * this field. + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. */ - /* number of PFs is not available */ - #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL UINT32_C(0x0) - uint16_t dflt_vnic_id; - /* The default VNIC ID assigned to a function that is being queried. */ - uint16_t max_mtu_configured; - /* - * This value specifies the MAX MTU that can be configured by - * host drivers. This 'max_mtu_configure' can be HW max MTU or - * OEM applications specified value. Host drivers can't - * configure the MTU greater than this value. Host drivers - * should read this value prior to configuring the MTU. FW will - * fail the host request with MTU greater than - * 'max_mtu_configured'. - */ - uint32_t min_bw; - /* - * Minimum BW allocated for this function. The HWRM will - * translate this value into byte counter and time interval used - * for the scheduler inside the device. A value of 0 indicates - * the minimum bandwidth is not configured. + uint32_t queue_id5_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id5_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id5_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id5_bw_weight; + /* ID of CoS Queue 6. */ + uint8_t queue_id6; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. */ + uint32_t queue_id6_min_bw; /* The bandwidth value. */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_MASK UINT32_C(0xfffffff) - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_SFT 0 - /* The granularity of the value (bits or bytes). */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE UINT32_C(0x10000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE \ + UINT32_C(0x10000000) /* Value is in bits. */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BITS (UINT32_C(0x0) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) /* Value is in bytes. */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES \ (UINT32_C(0x1) << 28) - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_LAST \ - FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES /* bw_value_unit is 3 b */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MASK \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK \ UINT32_C(0xe0000000) - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_SFT 29 - /* Value is in Mb or MB (base 10). */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MEGA \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA \ (UINT32_C(0x0) << 29) - /* Value is in Kb or KB (base 10). */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_KILO \ + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO \ (UINT32_C(0x2) << 29) /* Value is in bits or bytes. */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_BASE \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE \ (UINT32_C(0x4) << 29) - /* Value is in Gb or GB (base 10). */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_GIGA \ + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA \ (UINT32_C(0x6) << 29) /* Value is in 1/100th of a percentage of total bandwidth. */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ (UINT32_C(0x1) << 29) /* Invalid unit */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID \ (UINT32_C(0x7) << 29) - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_LAST \ - FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID - uint32_t max_bw; + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID /* - * Maximum BW allocated for this function. The HWRM will - * translate this value into byte counter and time interval used - * for the scheduler inside the device. A value of 0 indicates - * that the maximum bandwidth is not configured. + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. */ + uint32_t queue_id6_max_bw; /* The bandwidth value. */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_MASK UINT32_C(0xfffffff) - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_SFT 0 - /* The granularity of the value (bits or bytes). */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE UINT32_C(0x10000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE \ + UINT32_C(0x10000000) /* Value is in bits. */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) /* Value is in bytes. */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES \ (UINT32_C(0x1) << 28) - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_LAST \ - FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES /* bw_value_unit is 3 b */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MASK \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK \ UINT32_C(0xe0000000) - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_SFT 29 - /* Value is in Mb or MB (base 10). */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MEGA \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA \ (UINT32_C(0x0) << 29) - /* Value is in Kb or KB (base 10). */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_KILO \ + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO \ (UINT32_C(0x2) << 29) /* Value is in bits or bytes. */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_BASE \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE \ (UINT32_C(0x4) << 29) - /* Value is in Gb or GB (base 10). */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_GIGA \ + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA \ (UINT32_C(0x6) << 29) /* Value is in 1/100th of a percentage of total bandwidth. */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ (UINT32_C(0x1) << 29) /* Invalid unit */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID \ (UINT32_C(0x7) << 29) - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_LAST \ - FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID - uint8_t evb_mode; + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id6_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) /* - * This value indicates the Edge virtual bridge mode for the - * domain that this function belongs to. + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id6_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id6_bw_weight; + /* ID of CoS Queue 7. */ + uint8_t queue_id7; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id7_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id7_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id7_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id7_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id7_bw_weight; + uint8_t unused_1[5]; +} __attribute__((packed)); + +/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */ +struct hwrm_queue_cos2bw_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************* + * hwrm_queue_dscp_qcaps * + *************************/ + + +/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */ +struct hwrm_queue_dscp_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. + */ + uint8_t port_id; + uint8_t unused_0[7]; +} __attribute__((packed)); + +/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */ +struct hwrm_queue_dscp_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The number of bits provided by the hardware for the DSCP value. */ + uint8_t num_dscp_bits; + uint8_t unused_0; + /* Max number of DSCP-MASK-PRI entries supported. */ + uint16_t max_entries; + uint8_t unused_1[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/**************************** + * hwrm_queue_dscp2pri_qcfg * + ****************************/ + + +/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */ +struct hwrm_queue_dscp2pri_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This is the host address where the 24-bits DSCP-MASK-PRI + * tuple(s) will be copied to. */ - /* No Edge Virtual Bridging (EVB) */ - #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_NO_EVB UINT32_C(0x0) - /* Virtual Ethernet Bridge (VEB) */ - #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEB UINT32_C(0x1) - /* Virtual Ethernet Port Aggregator (VEPA) */ - #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA UINT32_C(0x2) - uint8_t unused_0; - uint16_t alloc_vfs; + uint64_t dest_data_addr; /* - * The number of VFs that are allocated to the function. This is - * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if - * this command is called on a PF with SR-IOV disabled or on a - * VF. + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. */ - uint32_t alloc_mcast_filters; + uint8_t port_id; + uint8_t unused_0; + /* Size of the buffer pointed to by dest_data_addr. */ + uint16_t dest_data_buffer_size; + uint8_t unused_1[4]; +} __attribute__((packed)); + +/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */ +struct hwrm_queue_dscp2pri_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; /* - * The number of allocated multicast filters for this function - * on the RX side. + * A count of the number of DSCP-MASK-PRI tuple(s) pointed to + * by the dest_data_addr. */ - uint32_t alloc_hw_ring_grps; - /* The number of allocated HW ring groups for this function. */ - uint16_t alloc_sp_tx_rings; + uint16_t entry_cnt; /* - * The number of strict priority transmit rings out of currently - * allocated TX rings to the function (alloc_tx_rings). + * This is the default PRI which un-initialized DSCP values are + * mapped to. */ - uint8_t unused_1; - uint8_t valid; + uint8_t default_pri; + uint8_t unused_0[4]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_func_vlan_qcfg */ -/* - * Description: This command should be called by PF driver to get the current - * C-TAG, S-TAG and correcponsing PCP and TPID values configured for the - * function. - */ -/* Input (24 bytes) */ -struct hwrm_func_vlan_qcfg_input { - uint16_t req_type; +/*************************** + * hwrm_queue_dscp2pri_cfg * + ***************************/ + + +/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */ +struct hwrm_queue_dscp2pri_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t fid; + uint64_t resp_addr; /* - * Function ID of the function that is being configured. If set - * to 0xFF... (All Fs), then the configuration is for the - * requesting function. + * This is the host address where the 24-bits DSCP-MASK-PRI tuple + * will be copied from. */ - uint16_t unused_0[3]; -}; - -/* Output (40 bytes) */ -struct hwrm_func_vlan_qcfg_output { - uint16_t error_code; + uint64_t src_data_addr; + uint32_t flags; + /* use_hw_default_pri is 1 b */ + #define HWRM_QUEUE_DSCP2PRI_CFG_INPUT_FLAGS_USE_HW_DEFAULT_PRI \ + UINT32_C(0x1) + uint32_t enables; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This bit must be '1' for the default_pri field to be + * configured. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; - /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. - */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; - /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. - */ - uint16_t stag_vid; - /* S-TAG VLAN identifier configured for the function. */ - uint8_t stag_pcp; - /* S-TAG PCP value configured for the function. */ - uint8_t unused_4; - uint16_t stag_tpid; + #define HWRM_QUEUE_DSCP2PRI_CFG_INPUT_ENABLES_DEFAULT_PRI \ + UINT32_C(0x1) /* - * S-TAG TPID value configured for the function. This field is - * specified in network byte order. + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. */ - uint16_t ctag_vid; - /* C-TAG VLAN identifier configured for the function. */ - uint8_t ctag_pcp; - /* C-TAG PCP value configured for the function. */ - uint8_t unused_5; - uint16_t ctag_tpid; + uint8_t port_id; /* - * C-TAG TPID value configured for the function. This field is - * specified in network byte order. + * This is the default PRI which un-initialized DSCP values will be + * mapped to. */ - uint32_t rsvd2; - /* Future use. */ - uint32_t rsvd3; - /* Future use. */ - uint32_t unused_6; -}; + uint8_t default_pri; + /* + * A count of the number of DSCP-MASK-PRI tuple(s) in the data pointed + * to by src_data_addr. + */ + uint16_t entry_cnt; + uint8_t unused_0[4]; +} __attribute__((packed)); -/* hwrm_func_vlan_cfg */ -/* - * Description: This command allows PF driver to configure C-TAG, S-TAG and - * corresponding PCP and TPID values for a function. - */ -/* Input (48 bytes) */ -struct hwrm_func_vlan_cfg_input { - uint16_t req_type; +/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */ +struct hwrm_queue_dscp2pri_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t cmpl_ring; + uint8_t valid; +} __attribute__((packed)); + +/******************* + * hwrm_vnic_alloc * + *******************/ + + +/* hwrm_vnic_alloc_input (size:192b/24B) */ +struct hwrm_vnic_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t cmpl_ring; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t resp_addr; + uint16_t seq_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t fid; - /* - * Function ID of the function that is being configured. If set - * to 0xFF... (All Fs), then the configuration is for the - * requesting function. - */ - uint8_t unused_0; - uint8_t unused_1; - uint32_t enables; - /* This bit must be '1' for the stag_vid field to be configured. */ - #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_VID UINT32_C(0x1) - /* This bit must be '1' for the ctag_vid field to be configured. */ - #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_VID UINT32_C(0x2) - /* This bit must be '1' for the stag_pcp field to be configured. */ - #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_PCP UINT32_C(0x4) - /* This bit must be '1' for the ctag_pcp field to be configured. */ - #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_PCP UINT32_C(0x8) - /* This bit must be '1' for the stag_tpid field to be configured. */ - #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_TPID UINT32_C(0x10) - /* This bit must be '1' for the ctag_tpid field to be configured. */ - #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_TPID UINT32_C(0x20) - uint16_t stag_vid; - /* S-TAG VLAN identifier configured for the function. */ - uint8_t stag_pcp; - /* S-TAG PCP value configured for the function. */ - uint8_t unused_2; - uint16_t stag_tpid; + uint16_t target_id; /* - * S-TAG TPID value configured for the function. This field is - * specified in network byte order. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t ctag_vid; - /* C-TAG VLAN identifier configured for the function. */ - uint8_t ctag_pcp; - /* C-TAG PCP value configured for the function. */ - uint8_t unused_3; - uint16_t ctag_tpid; + uint64_t resp_addr; + uint32_t flags; /* - * C-TAG TPID value configured for the function. This field is - * specified in network byte order. + * When this bit is '1', this VNIC is requested to + * be the default VNIC for this function. */ - uint32_t rsvd1; - /* Future use. */ - uint32_t rsvd2; - /* Future use. */ - uint32_t unused_4; -}; + #define HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT UINT32_C(0x1) + uint8_t unused_0[4]; +} __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_func_vlan_cfg_output { - uint16_t error_code; +/* hwrm_vnic_alloc_output (size:128b/16B) */ +struct hwrm_vnic_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Logical vnic ID */ + uint32_t vnic_id; + uint8_t unused_0[3]; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint8_t valid; +} __attribute__((packed)); + +/****************** + * hwrm_vnic_free * + ******************/ + + +/* hwrm_vnic_free_input (size:192b/24B) */ +struct hwrm_vnic_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ -}; + uint64_t resp_addr; + /* Logical vnic ID */ + uint32_t vnic_id; + uint8_t unused_0[4]; +} __attribute__((packed)); -/* hwrm_func_cfg */ -/* - * Description: This command allows configuration of a PF by the corresponding - * PF driver. This command also allows configuration of a child VF by its parent - * PF driver. The input FID value is used to indicate what function is being - * configured. This allows a PF driver to configure the PF owned by itself or a - * virtual function that is a child of the PF. This command allows to reserve - * resources for a VF by its parent PF. To reverse the process, the command - * should be called with all enables flags cleared for resources. This will free - * allocated resources for the VF and return them to the resource pool. If this - * command is requested by a VF driver to configure or reserve resources, then - * the HWRM shall fail this command. If default MAC address and/or VLAN are - * provided in this command, then the HWRM shall set up appropriate MAC/VLAN - * filters for the function that is being configured. If source properties - * checks are enabled and default MAC address and/or IP address are provided in - * this command, then the HWRM shall set appropriate source property checks - * based on provided MAC and/or IP addresses. The parent PF driver should not - * set MTU/MRU for a VF using this command. This is to allow MTU/MRU setting by - * the VF driver. If the MTU or MRU for a VF is set by the PF driver, then the - * HWRM should ignore it. A function's MTU/MRU should be set prior to allocating - * RX VNICs or TX rings. A PF driver calls hwrm_func_cfg to allocate resources - * for itself or its children VFs. All function drivers shall call hwrm_func_cfg - * to reserve resources. A request to hwrm_func_cfg may not be fully granted; - * that is, a request for resources may be larger than what can be supported by - * the device and the HWRM will allocate the best set of resources available, - * but that may be less than requested. If all the amounts requested could not - * be fulfilled, the HWRM shall allocate what it could and return a status code - * of success. A function driver should call hwrm_func_qcfg immediately after - * hwrm_func_cfg to determine what resources were assigned to the configured - * function. A call by a PF driver to hwrm_func_cfg to allocate resources for - * itself shall only allocate resources for the PF driver to use, not for its - * children VFs. Likewise, a call to hwrm_func_qcfg shall return the resources - * available for the PF driver to use, not what is available to its children - * VFs. - */ -/* Input (88 bytes) */ -struct hwrm_func_cfg_input { - uint16_t req_type; +/* hwrm_vnic_free_output (size:128b/16B) */ +struct hwrm_vnic_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t cmpl_ring; + uint8_t valid; +} __attribute__((packed)); + +/***************** + * hwrm_vnic_cfg * + *****************/ + + +/* hwrm_vnic_cfg_input (size:320b/40B) */ +struct hwrm_vnic_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t cmpl_ring; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t resp_addr; + uint16_t seq_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t fid; + uint16_t target_id; /* - * Function ID of the function that is being configured. If set - * to 0xFF... (All Fs), then the configuration is for the - * requesting function. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint8_t unused_0; - uint8_t unused_1; - uint32_t flags; + uint64_t resp_addr; + uint32_t flags; /* - * When this bit is '1', the function is disabled with source - * MAC address check. This is an anti-spoofing check. If this - * flag is set, then the function shall be configured to - * disallow transmission of frames with the source MAC address - * that is configured for this function. + * When this bit is '1', the VNIC is requested to + * be the default VNIC for the function. */ - #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE \ + #define HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT \ UINT32_C(0x1) /* - * When this bit is '1', the function is enabled with source MAC - * address check. This is an anti-spoofing check. If this flag - * is set, then the function shall be configured to allow - * transmission of frames with the source MAC address that is - * configured for this function. + * When this bit is '1', the VNIC is being configured to + * strip VLAN in the RX path. + * If set to '0', then VLAN stripping is disabled on + * this VNIC. */ - #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE \ + #define HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE \ UINT32_C(0x2) - /* reserved */ - #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_MASK UINT32_C(0x1fc) - #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_SFT 2 /* - * Standard TX Ring mode is used for the allocation of TX ring - * and underlying scheduling resources that allow bandwidth - * reservation and limit settings on the queried function. If - * set to 1, then standard TX ring mode is requested to be - * enabled on the function being configured. + * When this bit is '1', the VNIC is being configured to + * buffer receive packets in the hardware until the host + * posts new receive buffers. + * If set to '0', then bd_stall is being configured to be + * disabled on this VNIC. */ - #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE \ - UINT32_C(0x200) - /* - * Standard TX Ring mode is used for the allocation of TX ring - * and underlying scheduling resources that allow bandwidth - * reservation and limit settings on the queried function. If - * set to 1, then the standard TX ring mode is requested to be - * disabled on the function being configured. In this extended - * TX ring resource mode, the minimum and maximum bandwidth - * settings are not supported to allow the allocation of TX - * rings to span multiple scheduler nodes. - */ - #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE \ - UINT32_C(0x400) + #define HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE \ + UINT32_C(0x4) /* - * If this bit is set, virtual mac address configured in this - * command will be persistent over warm boot. + * When this bit is '1', the VNIC is being configured to + * receive both RoCE and non-RoCE traffic. + * If set to '0', then this VNIC is not configured to be + * operating in dual VNIC mode. */ - #define HWRM_FUNC_CFG_INPUT_FLAGS_VIRT_MAC_PERSIST UINT32_C(0x800) + #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_DUAL_VNIC_MODE \ + UINT32_C(0x8) /* - * This bit only applies to the VF. If this bit is set, the - * statistic context counters will not be cleared when the - * statistic context is freed or a function reset is called on - * VF. This bit will be cleared when the PF is unloaded or a - * function reset is called on the PF. + * When this flag is set to '1', the VNIC is requested to + * be configured to receive only RoCE traffic. + * If this flag is set to '0', then this flag shall be + * ignored by the HWRM. + * If roce_dual_vnic_mode flag is set to '1' + * or roce_mirroring_capable_vnic_mode flag to 1, + * then the HWRM client shall not set this flag to '1'. */ - #define HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC \ - UINT32_C(0x1000) + #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE \ + UINT32_C(0x10) + /* + * When a VNIC uses one destination ring group for certain + * application (e.g. Receive Flow Steering) where + * exact match is used to direct packets to a VNIC with one + * destination ring group only, there is no need to configure + * RSS indirection table for that VNIC as only one destination + * ring group is used. + * + * This flag is used to enable a mode where + * RSS is enabled in the VNIC using a RSS context + * for computing RSS hash but the RSS indirection table is + * not configured using hwrm_vnic_rss_cfg. + * + * If this mode is enabled, then the driver should not program + * RSS indirection table for the RSS context that is used for + * computing RSS hash only. + */ + #define HWRM_VNIC_CFG_INPUT_FLAGS_RSS_DFLT_CR_MODE \ + UINT32_C(0x20) /* - * This bit requests that the firmware test to see if all the - * assets requested in this command (i.e. number of TX rings) - * are available. The firmware will return an error if the - * requested assets are not available. The firwmare will NOT - * reserve the assets if they are available. + * When this bit is '1', the VNIC is being configured to + * receive both RoCE and non-RoCE traffic, but forward only the + * RoCE traffic further. Also, RoCE traffic can be mirrored to + * L2 driver. */ - #define HWRM_FUNC_CFG_INPUT_FLAGS_TX_ASSETS_TEST UINT32_C(0x2000) - uint32_t enables; - /* This bit must be '1' for the mtu field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_MTU UINT32_C(0x1) - /* This bit must be '1' for the mru field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_MRU UINT32_C(0x2) + #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \ + UINT32_C(0x40) + uint32_t enables; /* - * This bit must be '1' for the num_rsscos_ctxs field to be + * This bit must be '1' for the dflt_ring_grp field to be * configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS UINT32_C(0x4) + #define HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP \ + UINT32_C(0x1) /* - * This bit must be '1' for the num_cmpl_rings field to be + * This bit must be '1' for the rss_rule field to be * configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS UINT32_C(0x8) - /* This bit must be '1' for the num_tx_rings field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS UINT32_C(0x10) - /* This bit must be '1' for the num_rx_rings field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS UINT32_C(0x20) - /* This bit must be '1' for the num_l2_ctxs field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS UINT32_C(0x40) - /* This bit must be '1' for the num_vnics field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS UINT32_C(0x80) + #define HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE \ + UINT32_C(0x2) /* - * This bit must be '1' for the num_stat_ctxs field to be + * This bit must be '1' for the cos_rule field to be * configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS UINT32_C(0x100) + #define HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE \ + UINT32_C(0x4) /* - * This bit must be '1' for the dflt_mac_addr field to be + * This bit must be '1' for the lb_rule field to be * configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x200) - /* This bit must be '1' for the dflt_vlan field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN UINT32_C(0x400) - /* This bit must be '1' for the dflt_ip_addr field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_IP_ADDR UINT32_C(0x800) - /* This bit must be '1' for the min_bw field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_MIN_BW UINT32_C(0x1000) - /* This bit must be '1' for the max_bw field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW UINT32_C(0x2000) + #define HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE \ + UINT32_C(0x8) /* - * This bit must be '1' for the async_event_cr field to be + * This bit must be '1' for the mru field to be * configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR UINT32_C(0x4000) + #define HWRM_VNIC_CFG_INPUT_ENABLES_MRU \ + UINT32_C(0x10) /* - * This bit must be '1' for the vlan_antispoof_mode field to be + * This bit must be '1' for the default_rx_ring_id field to be * configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE UINT32_C(0x8000) + #define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID \ + UINT32_C(0x20) /* - * This bit must be '1' for the allowed_vlan_pris field to be + * This bit must be '1' for the default_cmpl_ring_id field to be * configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_ALLOWED_VLAN_PRIS UINT32_C(0x10000) - /* This bit must be '1' for the evb_mode field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE UINT32_C(0x20000) + #define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID \ + UINT32_C(0x40) + /* Logical vnic ID */ + uint16_t vnic_id; /* - * This bit must be '1' for the num_mcast_filters field to be - * configured. + * Default Completion ring for the VNIC. This ring will + * be chosen if packet does not match any RSS rules and if + * there is no COS rule. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MCAST_FILTERS UINT32_C(0x40000) + uint16_t dflt_ring_grp; /* - * This bit must be '1' for the num_hw_ring_grps field to be - * configured. + * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if + * there is no RSS rule. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS UINT32_C(0x80000) - uint16_t mtu; + uint16_t rss_rule; /* - * The maximum transmission unit of the function. The HWRM - * should make sure that the mtu of the function does not exceed - * the mtu of the physical port that this function is associated - * with. In addition to configuring mtu per function, it is - * possible to configure mtu per transmit ring. By default, the - * mtu of each transmit ring associated with a function is equal - * to the mtu of the function. The HWRM should make sure that - * the mtu of each transmit ring that is assigned to a function - * has a valid mtu. + * RSS ID for COS rule/table structure. 0xFF... (All Fs) if + * there is no COS rule. */ - uint16_t mru; + uint16_t cos_rule; /* - * The maximum receive unit of the function. The HWRM should - * make sure that the mru of the function does not exceed the - * mru of the physical port that this function is associated - * with. In addition to configuring mru per function, it is - * possible to configure mru per vnic. By default, the mru of - * each vnic associated with a function is equal to the mru of - * the function. The HWRM should make sure that the mru of each - * vnic that is assigned to a function has a valid mru. + * RSS ID for load balancing rule/table structure. + * 0xFF... (All Fs) if there is no LB rule. */ - uint16_t num_rsscos_ctxs; - /* The number of RSS/COS contexts requested for the function. */ - uint16_t num_cmpl_rings; + uint16_t lb_rule; /* - * The number of completion rings requested for the function. - * This does not include the rings allocated to any children - * functions if any. + * The maximum receive unit of the vnic. + * Each vnic is associated with a function. + * The vnic mru value overwrites the mru setting of the + * associated function. + * The HWRM shall make sure that vnic mru does not exceed + * the mru of the port the function is associated with. */ - uint16_t num_tx_rings; + uint16_t mru; /* - * The number of transmit rings requested for the function. This - * does not include the rings allocated to any children - * functions if any. + * Default Rx ring for the VNIC. This ring will + * be chosen if packet does not match any RSS rules. + * The aggregation ring associated with the Rx ring is + * implied based on the Rx ring specified when the + * aggregation ring was allocated. */ - uint16_t num_rx_rings; + uint16_t default_rx_ring_id; /* - * The number of receive rings requested for the function. This - * does not include the rings allocated to any children - * functions if any. + * Default completion ring for the VNIC. This ring will + * be chosen if packet does not match any RSS rules. */ - uint16_t num_l2_ctxs; - /* The requested number of L2 contexts for the function. */ - uint16_t num_vnics; - /* The requested number of vnics for the function. */ - uint16_t num_stat_ctxs; - /* The requested number of statistic contexts for the function. */ - uint16_t num_hw_ring_grps; + uint16_t default_cmpl_ring_id; +} __attribute__((packed)); + +/* hwrm_vnic_cfg_output (size:128b/16B) */ +struct hwrm_vnic_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * The number of HW ring groups that should be reserved for this - * function. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint8_t dflt_mac_addr[6]; - /* The default MAC address for the function being configured. */ - uint16_t dflt_vlan; + uint8_t valid; +} __attribute__((packed)); + +/****************** + * hwrm_vnic_qcfg * + ******************/ + + +/* hwrm_vnic_qcfg_input (size:256b/32B) */ +struct hwrm_vnic_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * The default VLAN for the function being configured. This - * field's format is same as 802.1Q Tag's Tag Control - * Information (TCI) format that includes both Priority Code - * Point (PCP) and VLAN Identifier (VID). + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint32_t dflt_ip_addr[4]; + uint16_t cmpl_ring; /* - * The default IP address for the function being configured. - * This address is only used in enabling source property check. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t min_bw; + uint16_t seq_id; /* - * Minimum BW allocated for this function. The HWRM will - * translate this value into byte counter and time interval used - * for the scheduler inside the device. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - /* The bandwidth value. */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_MASK UINT32_C(0xfffffff) - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_SFT 0 - /* The granularity of the value (bits or bytes). */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE UINT32_C(0x10000000) - /* Value is in bits. */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BITS (UINT32_C(0x0) << 28) - /* Value is in bytes. */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES (UINT32_C(0x1) << 28) - #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_LAST \ - FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES - /* bw_value_unit is 3 b */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MASK \ - UINT32_C(0xe0000000) - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_SFT 29 - /* Value is in Mb or MB (base 10). */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MEGA \ - (UINT32_C(0x0) << 29) - /* Value is in Kb or KB (base 10). */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_KILO \ - (UINT32_C(0x2) << 29) - /* Value is in bits or bytes. */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_BASE \ - (UINT32_C(0x4) << 29) - /* Value is in Gb or GB (base 10). */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_GIGA \ - (UINT32_C(0x6) << 29) - /* Value is in 1/100th of a percentage of total bandwidth. */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ - (UINT32_C(0x1) << 29) - /* Invalid unit */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID \ - (UINT32_C(0x7) << 29) - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_LAST \ - FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID - uint32_t max_bw; + uint16_t target_id; /* - * Maximum BW allocated for this function. The HWRM will - * translate this value into byte counter and time interval used - * for the scheduler inside the device. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - /* The bandwidth value. */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_MASK \ - UINT32_C(0xfffffff) - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_SFT 0 - /* The granularity of the value (bits or bytes). */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE UINT32_C(0x10000000) - /* Value is in bits. */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28) - /* Value is in bytes. */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES (UINT32_C(0x1) << 28) - #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_LAST \ - FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES - /* bw_value_unit is 3 b */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \ - UINT32_C(0xe0000000) - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29 - /* Value is in Mb or MB (base 10). */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \ - (UINT32_C(0x0) << 29) - /* Value is in Kb or KB (base 10). */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \ - (UINT32_C(0x2) << 29) - /* Value is in bits or bytes. */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \ - (UINT32_C(0x4) << 29) - /* Value is in Gb or GB (base 10). */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \ - (UINT32_C(0x6) << 29) - /* Value is in 1/100th of a percentage of total bandwidth. */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ - (UINT32_C(0x1) << 29) - /* Invalid unit */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ - (UINT32_C(0x7) << 29) - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \ - FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID - uint16_t async_event_cr; + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the vf_id_valid field to be + * configured. + */ + #define HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1) + /* Logical vnic ID */ + uint32_t vnic_id; + /* ID of Virtual Function whose VNIC resource is being queried. */ + uint16_t vf_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_vnic_qcfg_output (size:256b/32B) */ +struct hwrm_vnic_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Default Completion ring for the VNIC. */ + uint16_t dflt_ring_grp; + /* + * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if + * there is no RSS rule. + */ + uint16_t rss_rule; + /* + * RSS ID for COS rule/table structure. 0xFF... (All Fs) if + * there is no COS rule. + */ + uint16_t cos_rule; + /* + * RSS ID for load balancing rule/table structure. + * 0xFF... (All Fs) if there is no LB rule. + */ + uint16_t lb_rule; + /* The maximum receive unit of the vnic. */ + uint16_t mru; + uint8_t unused_0[2]; + uint32_t flags; /* - * ID of the target completion ring for receiving asynchronous - * event completions. If this field is not valid, then the HWRM - * shall use the default completion ring of the function that is - * being configured as the target completion ring for providing - * any asynchronous event completions for that function. If this - * field is valid, then the HWRM shall use the completion ring - * identified by this ID as the target completion ring for - * providing any asynchronous event completions for the function - * that is being configured. - */ - uint8_t vlan_antispoof_mode; - /* VLAN Anti-spoofing mode. */ - /* No VLAN anti-spoofing checks are enabled */ - #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK UINT32_C(0x0) - /* Validate VLAN against the configured VLAN(s) */ - #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN \ + * When this bit is '1', the VNIC is the default VNIC for + * the function. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT \ UINT32_C(0x1) - /* Insert VLAN if it does not exist, otherwise discard */ - #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE \ + /* + * When this bit is '1', the VNIC is configured to + * strip VLAN in the RX path. + * If set to '0', then VLAN stripping is disabled on + * this VNIC. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE \ UINT32_C(0x2) /* - * Insert VLAN if it does not exist, override - * VLAN if it exists + * When this bit is '1', the VNIC is configured to + * buffer receive packets in the hardware until the host + * posts new receive buffers. + * If set to '0', then bd_stall is disabled on + * this VNIC. */ - #define \ - HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN \ - UINT32_C(0x3) - uint8_t allowed_vlan_pris; + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE \ + UINT32_C(0x4) /* - * This bit field defines VLAN PRIs that are allowed on this - * function. If nth bit is set, then VLAN PRI n is allowed on - * this function. + * When this bit is '1', the VNIC is configured to + * receive both RoCE and non-RoCE traffic. + * If set to '0', then this VNIC is not configured to + * operate in dual VNIC mode. */ - uint8_t evb_mode; + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE \ + UINT32_C(0x8) /* - * The HWRM shall allow a PF driver to change EVB mode for the - * partition it belongs to. The HWRM shall not allow a VF driver - * to change the EVB mode. The HWRM shall take into account the - * switching of EVB mode from one to another and reconfigure - * hardware resources as appropriately. The switching from VEB - * to VEPA mode requires the disabling of the loopback traffic. - * Additionally, source knock outs are handled differently in - * VEB and VEPA modes. + * When this flag is set to '1', the VNIC is configured to + * receive only RoCE traffic. + * When this flag is set to '0', the VNIC is not configured + * to receive only RoCE traffic. + * If roce_dual_vnic_mode flag and this flag both are set + * to '1', then it is an invalid configuration of the + * VNIC. The HWRM should not allow that type of + * mis-configuration by HWRM clients. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE \ + UINT32_C(0x10) + /* + * When a VNIC uses one destination ring group for certain + * application (e.g. Receive Flow Steering) where + * exact match is used to direct packets to a VNIC with one + * destination ring group only, there is no need to configure + * RSS indirection table for that VNIC as only one destination + * ring group is used. + * + * When this bit is set to '1', then the VNIC is enabled in a + * mode where RSS is enabled in the VNIC using a RSS context + * for computing RSS hash but the RSS indirection table is + * not configured. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE \ + UINT32_C(0x20) + /* + * When this bit is '1', the VNIC is configured to + * receive both RoCE and non-RoCE traffic, but forward only + * RoCE traffic further. Also RoCE traffic can be mirrored to + * L2 driver. */ - /* No Edge Virtual Bridging (EVB) */ - #define HWRM_FUNC_CFG_INPUT_EVB_MODE_NO_EVB UINT32_C(0x0) - /* Virtual Ethernet Bridge (VEB) */ - #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEB UINT32_C(0x1) - /* Virtual Ethernet Port Aggregator (VEPA) */ - #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA UINT32_C(0x2) - uint8_t unused_2; - uint16_t num_mcast_filters; + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \ + UINT32_C(0x40) + uint8_t unused_1[7]; /* - * The number of multicast filters that should be reserved for - * this function on the RX side. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_func_cfg_output { - uint16_t error_code; +/******************* + * hwrm_vnic_qcaps * + *******************/ + + +/* hwrm_vnic_qcaps_input (size:192b/24B) */ +struct hwrm_vnic_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t target_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ + uint64_t resp_addr; + uint32_t enables; + uint8_t unused_0[4]; } __attribute__((packed)); -/* hwrm_func_qstats */ -/* - * Description: This command returns statistics of a function. The input FID - * value is used to indicate what function is being queried. This allows a - * physical function driver to query virtual functions that are children of the - * physical function. The HWRM shall return any unsupported counter with a value - * of 0xFFFFFFFF for 32-bit counters and 0xFFFFFFFFFFFFFFFF for 64-bit counters. - */ -/* Input (24 bytes) */ -struct hwrm_func_qstats_input { - uint16_t req_type; +/* hwrm_vnic_qcaps_output (size:192b/24B) */ +struct hwrm_vnic_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The maximum receive unit that is settable on a vnic. */ + uint16_t mru; + uint8_t unused_0[2]; + uint32_t flags; + /* Unused. */ + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_UNUSED \ + UINT32_C(0x1) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * When this bit is '1', the capability of stripping VLAN in + * the RX path is supported on VNIC(s). + * If set to '0', then VLAN stripping capability is + * not supported on VNIC(s). */ - uint16_t cmpl_ring; + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VLAN_STRIP_CAP \ + UINT32_C(0x2) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * When this bit is '1', the capability to buffer receive + * packets in the hardware until the host posts new receive buffers + * is supported on VNIC(s). + * If set to '0', then bd_stall capability is not supported + * on VNIC(s). */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_BD_STALL_CAP \ + UINT32_C(0x4) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * When this bit is '1', the capability to + * receive both RoCE and non-RoCE traffic on VNIC(s) is + * supported. + * If set to '0', then the capability to receive + * both RoCE and non-RoCE traffic on VNIC(s) is + * not supported. */ - uint64_t resp_addr; + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_DUAL_VNIC_CAP \ + UINT32_C(0x8) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * When this bit is set to '1', the capability to configure + * a VNIC to receive only RoCE traffic is supported. + * When this flag is set to '0', the VNIC capability to + * configure to receive only RoCE traffic is not supported. */ - uint16_t fid; + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_ONLY_VNIC_CAP \ + UINT32_C(0x10) /* - * Function ID of the function that is being queried. 0xFF... - * (All Fs) if the query is for the requesting function. + * When this bit is set to '1', then the capability to enable + * a VNIC in a mode where RSS context without configuring + * RSS indirection table is supported (for RSS hash computation). + * When this bit is set to '0', then a VNIC can not be configured + * with a mode to enable RSS context without configuring RSS + * indirection table. */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (176 bytes) */ -struct hwrm_func_qstats_output { - uint16_t error_code; + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_DFLT_CR_CAP \ + UINT32_C(0x20) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * When this bit is '1', the capability to + * mirror the the RoCE traffic is supported. + * If set to '0', then the capability to mirror the + * RoCE traffic is not supported. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP \ + UINT32_C(0x40) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * When this bit is '1', the outermost RSS hashing capability + * is supported. If set to '0', then the outermost RSS hashing + * capability is not supported. */ - uint64_t tx_ucast_pkts; - /* Number of transmitted unicast packets on the function. */ - uint64_t tx_mcast_pkts; - /* Number of transmitted multicast packets on the function. */ - uint64_t tx_bcast_pkts; - /* Number of transmitted broadcast packets on the function. */ - uint64_t tx_err_pkts; + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP \ + UINT32_C(0x80) + uint8_t unused_1[7]; /* - * Number of transmitted packets that were discarded due to - * internal NIC resource problems. For transmit, this can only - * happen if TMP is configured to allow dropping in HOL blocking - * conditions, which is not a normal configuration. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint64_t tx_drop_pkts; + uint8_t valid; +} __attribute__((packed)); + +/********************* + * hwrm_vnic_tpa_cfg * + *********************/ + + +/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */ +struct hwrm_vnic_tpa_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Number of dropped packets on transmit path on the function. - * These are packets that have been marked for drop by the TE - * CFA block or are packets that exceeded the transmit MTU limit - * for the function. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint64_t tx_ucast_bytes; - /* Number of transmitted bytes for unicast traffic on the function. */ - uint64_t tx_mcast_bytes; + uint16_t cmpl_ring; /* - * Number of transmitted bytes for multicast traffic on the - * function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t tx_bcast_bytes; + uint16_t seq_id; /* - * Number of transmitted bytes for broadcast traffic on the - * function. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t rx_ucast_pkts; - /* Number of received unicast packets on the function. */ - uint64_t rx_mcast_pkts; - /* Number of received multicast packets on the function. */ - uint64_t rx_bcast_pkts; - /* Number of received broadcast packets on the function. */ - uint64_t rx_err_pkts; + uint16_t target_id; /* - * Number of received packets that were discarded on the - * function due to resource limitations. This can happen for 3 - * reasons. # The BD used for the packet has a bad format. # - * There were no BDs available in the ring for the packet. # - * There were no BDs available on-chip for the packet. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t rx_drop_pkts; + uint64_t resp_addr; + uint32_t flags; /* - * Number of dropped packets on received path on the function. - * These are packets that have been marked for drop by the RE - * CFA. + * When this bit is '1', the VNIC shall be configured to + * perform transparent packet aggregation (TPA) of + * non-tunneled TCP packets. */ - uint64_t rx_ucast_bytes; - /* Number of received bytes for unicast traffic on the function. */ - uint64_t rx_mcast_bytes; - /* Number of received bytes for multicast traffic on the function. */ - uint64_t rx_bcast_bytes; - /* Number of received bytes for broadcast traffic on the function. */ - uint64_t rx_agg_pkts; - /* Number of aggregated unicast packets on the function. */ - uint64_t rx_agg_bytes; - /* Number of aggregated unicast bytes on the function. */ - uint64_t rx_agg_events; - /* Number of aggregation events on the function. */ - uint64_t rx_agg_aborts; - /* Number of aborted aggregations on the function. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA \ + UINT32_C(0x1) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * When this bit is '1', the VNIC shall be configured to + * perform transparent packet aggregation (TPA) of + * tunneled TCP packets. */ -} __attribute__((packed)); - -/* hwrm_func_clr_stats */ -/* - * Description: This command clears statistics of a function. The input FID - * value is used to indicate what function's statistics is being cleared. This - * allows a physical function driver to clear statistics of virtual functions - * that are children of the physical function. - */ -/* Input (24 bytes) */ -struct hwrm_func_clr_stats_input { - uint16_t req_type; + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA \ + UINT32_C(0x2) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * When this bit is '1', the VNIC shall be configured to + * perform transparent packet aggregation (TPA) according + * to Windows Receive Segment Coalescing (RSC) rules. */ - uint16_t cmpl_ring; + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE \ + UINT32_C(0x4) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * When this bit is '1', the VNIC shall be configured to + * perform transparent packet aggregation (TPA) according + * to Linux Generic Receive Offload (GRO) rules. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO \ + UINT32_C(0x8) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * When this bit is '1', the VNIC shall be configured to + * perform transparent packet aggregation (TPA) for TCP + * packets with IP ECN set to non-zero. */ - uint64_t resp_addr; + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN \ + UINT32_C(0x10) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * When this bit is '1', the VNIC shall be configured to + * perform transparent packet aggregation (TPA) for + * GRE tunneled TCP packets only if all packets have the + * same GRE sequence. */ - uint16_t fid; + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \ + UINT32_C(0x20) /* - * Function ID of the function. 0xFF... (All Fs) if the query is - * for the requesting function. + * When this bit is '1' and the GRO mode is enabled, + * the VNIC shall be configured to + * perform transparent packet aggregation (TPA) for + * TCP/IPv4 packets with consecutively increasing IPIDs. + * In other words, the last packet that is being + * aggregated to an already existing aggregation context + * shall have IPID 1 more than the IPID of the last packet + * that was aggregated in that aggregation context. */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_func_clr_stats_output { - uint16_t error_code; + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK \ + UINT32_C(0x40) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * When this bit is '1' and the GRO mode is enabled, + * the VNIC shall be configured to + * perform transparent packet aggregation (TPA) for + * TCP packets with the same TTL (IPv4) or Hop limit (IPv6) + * value. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK \ + UINT32_C(0x80) + uint32_t enables; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This bit must be '1' for the max_agg_segs field to be + * configured. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This bit must be '1' for the max_aggs field to be + * configured. */ -} __attribute__((packed)); - -/* hwrm_func_vf_vnic_ids_query */ -/* Description: This command is used to query vf vnic ids. */ -/* Input (32 bytes) */ -struct hwrm_func_vf_vnic_ids_query_input { - uint16_t req_type; + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This bit must be '1' for the max_agg_timer field to be + * configured. */ - uint16_t cmpl_ring; + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This bit must be '1' for the min_agg_len field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8) + /* Logical vnic ID */ + uint16_t vnic_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This is the maximum number of TCP segments that can + * be aggregated (unit is Log2). Max value is 31. */ - uint64_t resp_addr; + uint16_t max_agg_segs; + /* 1 segment */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0) + /* 2 segments */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1) + /* 4 segments */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2) + /* 8 segments */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3) + /* Any segment size larger than this is not valid */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f) + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_LAST \ + HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This is the maximum number of aggregations this VNIC is + * allowed (unit is Log2). Max value is 7 + */ + uint16_t max_aggs; + /* 1 aggregation */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0) + /* 2 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1) + /* 4 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2) + /* 8 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3) + /* 16 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4) + /* Any aggregation size larger than this is not valid */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7) + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_LAST \ + HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX + uint8_t unused_0[2]; + /* + * This is the maximum amount of time allowed for + * an aggregation context to complete after it was initiated. */ - uint16_t vf_id; + uint32_t max_agg_timer; /* - * This value is used to identify a Virtual Function (VF). The - * scope of VF ID is local within a PF. + * This is the minimum amount of payload length required to + * start an aggregation context. */ - uint8_t unused_0; - uint8_t unused_1; - uint32_t max_vnic_id_cnt; - /* Max number of vnic ids in vnic id table */ - uint64_t vnic_id_tbl_addr; - /* This is the address for VF VNIC ID table */ + uint32_t min_agg_len; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_func_vf_vnic_ids_query_output { - uint16_t error_code; +/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */ +struct hwrm_vnic_tpa_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_vnic_tpa_qcfg * + **********************/ + + +/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_tpa_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t vnic_id_cnt; + uint16_t seq_id; /* - * Actual number of vnic ids Each VNIC ID is written as a 32-bit - * number. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; + uint16_t target_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ + uint64_t resp_addr; + /* Logical vnic ID */ + uint16_t vnic_id; + uint8_t unused_0[6]; } __attribute__((packed)); -/* hwrm_func_drv_rgtr */ -/* - * Description: This command is used by the function driver to register its - * information with the HWRM. A function driver shall implement this command. A - * function driver shall use this command during the driver initialization right - * after the HWRM version discovery and default ring resources allocation. - */ -/* Input (80 bytes) */ -struct hwrm_func_drv_rgtr_input { - uint16_t req_type; +/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */ +struct hwrm_vnic_tpa_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * When this bit is '1', the VNIC is configured to + * perform transparent packet aggregation (TPA) of + * non-tunneled TCP packets. */ - uint16_t cmpl_ring; + #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_TPA \ + UINT32_C(0x1) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * When this bit is '1', the VNIC is configured to + * perform transparent packet aggregation (TPA) of + * tunneled TCP packets. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_ENCAP_TPA \ + UINT32_C(0x2) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * When this bit is '1', the VNIC is configured to + * perform transparent packet aggregation (TPA) according + * to Windows Receive Segment Coalescing (RSC) rules. */ - uint64_t resp_addr; + #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_RSC_WND_UPDATE \ + UINT32_C(0x4) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * When this bit is '1', the VNIC is configured to + * perform transparent packet aggregation (TPA) according + * to Linux Generic Receive Offload (GRO) rules. */ - uint32_t flags; + #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO \ + UINT32_C(0x8) /* - * When this bit is '1', the function driver is requesting all - * requests from its children VF drivers to be forwarded to - * itself. This flag can only be set by the PF driver. If a VF - * driver sets this flag, it should be ignored by the HWRM. + * When this bit is '1', the VNIC is configured to + * perform transparent packet aggregation (TPA) for TCP + * packets with IP ECN set to non-zero. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_ALL_MODE UINT32_C(0x1) + #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_AGG_WITH_ECN \ + UINT32_C(0x10) /* - * When this bit is '1', the function is requesting none of the - * requests from its children VF drivers to be forwarded to - * itself. This flag can only be set by the PF driver. If a VF - * driver sets this flag, it should be ignored by the HWRM. + * When this bit is '1', the VNIC is configured to + * perform transparent packet aggregation (TPA) for + * GRE tunneled TCP packets only if all packets have the + * same GRE sequence. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE UINT32_C(0x2) - uint32_t enables; - /* This bit must be '1' for the os_type field to be configured. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE UINT32_C(0x1) - /* This bit must be '1' for the ver field to be configured. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER UINT32_C(0x2) - /* This bit must be '1' for the timestamp field to be configured. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_TIMESTAMP UINT32_C(0x4) - /* This bit must be '1' for the vf_req_fwd field to be configured. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD UINT32_C(0x8) + #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \ + UINT32_C(0x20) /* - * This bit must be '1' for the async_event_fwd field to be - * configured. + * When this bit is '1' and the GRO mode is enabled, + * the VNIC is configured to + * perform transparent packet aggregation (TPA) for + * TCP/IPv4 packets with consecutively increasing IPIDs. + * In other words, the last packet that is being + * aggregated to an already existing aggregation context + * shall have IPID 1 more than the IPID of the last packet + * that was aggregated in that aggregation context. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD UINT32_C(0x10) - uint16_t os_type; + #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO_IPID_CHECK \ + UINT32_C(0x40) /* - * This value indicates the type of OS. The values are based on - * CIM_OperatingSystem.mof file as published by the DMTF. + * When this bit is '1' and the GRO mode is enabled, + * the VNIC is configured to + * perform transparent packet aggregation (TPA) for + * TCP packets with the same TTL (IPv4) or Hop limit (IPv6) + * value. */ - /* Unknown */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN UINT32_C(0x0) - /* Other OS not listed below. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_OTHER UINT32_C(0x1) - /* MSDOS OS. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_MSDOS UINT32_C(0xe) - /* Windows OS. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WINDOWS UINT32_C(0x12) - /* Solaris OS. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_SOLARIS UINT32_C(0x1d) - /* Linux OS. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LINUX UINT32_C(0x24) - /* FreeBSD OS. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD UINT32_C(0x2a) - /* VMware ESXi OS. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_ESXI UINT32_C(0x68) - /* Microsoft Windows 8 64-bit OS. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN864 UINT32_C(0x73) - /* Microsoft Windows Server 2012 R2 OS. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN2012R2 UINT32_C(0x74) - uint8_t ver_maj; - /* This is the major version of the driver. */ - uint8_t ver_min; - /* This is the minor version of the driver. */ - uint8_t ver_upd; - /* This is the update version of the driver. */ - uint8_t unused_0; - uint16_t unused_1; - uint32_t timestamp; - /* - * This is a 32-bit timestamp provided by the driver for keep - * alive. The timestamp is in multiples of 1ms. - */ - uint32_t unused_2; - uint32_t vf_req_fwd[8]; + #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO_TTL_CHECK \ + UINT32_C(0x80) /* - * This is a 256-bit bit mask provided by the PF driver for - * letting the HWRM know what commands issued by the VF driver - * to the HWRM should be forwarded to the PF driver. Nth bit - * refers to the Nth req_type. Setting Nth bit to 1 indicates - * that requests from the VF driver with req_type equal to N - * shall be forwarded to the parent PF driver. This field is not - * valid for the VF driver. + * This is the maximum number of TCP segments that can + * be aggregated (unit is Log2). Max value is 31. */ - uint32_t async_event_fwd[8]; + uint16_t max_agg_segs; + /* 1 segment */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_1 UINT32_C(0x0) + /* 2 segments */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_2 UINT32_C(0x1) + /* 4 segments */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_4 UINT32_C(0x2) + /* 8 segments */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_8 UINT32_C(0x3) + /* Any segment size larger than this is not valid */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f) + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_LAST \ + HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_MAX /* - * This is a 256-bit bit mask provided by the function driver - * (PF or VF driver) to indicate the list of asynchronous event - * completions to be forwarded. Nth bit refers to the Nth - * event_id. Setting Nth bit to 1 by the function driver shall - * result in the HWRM forwarding asynchronous event completion - * with event_id equal to N. If all bits are set to 0 (value of - * 0), then the HWRM shall not forward any asynchronous event - * completion to this function driver. + * This is the maximum number of aggregations this VNIC is + * allowed (unit is Log2). Max value is 7 */ -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_func_drv_rgtr_output { - uint16_t error_code; + uint16_t max_aggs; + /* 1 aggregation */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_1 UINT32_C(0x0) + /* 2 aggregations */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_2 UINT32_C(0x1) + /* 4 aggregations */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_4 UINT32_C(0x2) + /* 8 aggregations */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_8 UINT32_C(0x3) + /* 16 aggregations */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_16 UINT32_C(0x4) + /* Any aggregation size larger than this is not valid */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_MAX UINT32_C(0x7) + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_LAST \ + HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_MAX /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This is the maximum amount of time allowed for + * an aggregation context to complete after it was initiated. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint32_t max_agg_timer; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This is the minimum amount of payload length required to + * start an aggregation context. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint32_t min_agg_len; + uint8_t unused_0[7]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_func_drv_unrgtr */ -/* - * Description: This command is used by the function driver to un register with - * the HWRM. A function driver shall implement this command. A function driver - * shall use this command during the driver unloading. - */ -/* Input (24 bytes) */ -struct hwrm_func_drv_unrgtr_input { - uint16_t req_type; +/********************* + * hwrm_vnic_rss_cfg * + *********************/ + + +/* hwrm_vnic_rss_cfg_input (size:384b/48B) */ +struct hwrm_vnic_rss_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t hash_type; + /* + * When this bit is '1', the RSS hash shall be computed + * over source and destination IPv4 addresses of IPv4 + * packets. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 UINT32_C(0x1) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv4 addresses and + * source/destination ports of TCP/IPv4 packets. */ - uint16_t cmpl_ring; + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv4 addresses and + * source/destination ports of UDP/IPv4 packets. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * When this bit is '1', the RSS hash shall be computed + * over source and destination IPv4 addresses of IPv6 + * packets. */ - uint64_t resp_addr; + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 UINT32_C(0x8) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv6 addresses and + * source/destination ports of TCP/IPv6 packets. */ - uint32_t flags; + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10) /* - * When this bit is '1', the function driver is notifying the - * HWRM to prepare for the shutdown. + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv6 addresses and + * source/destination ports of UDP/IPv6 packets. */ - #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN \ - UINT32_C(0x1) - uint32_t unused_0; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_func_drv_unrgtr_output { - uint16_t error_code; + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20) + /* VNIC ID of VNIC associated with RSS table being configured. */ + uint16_t vnic_id; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Specifies which VNIC ring table pair to configure. + * Valid values range from 0 to 7. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint8_t ring_table_pair_index; + /* Flags to specify different RSS hash modes. */ + uint8_t hash_mode_flags; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * When this bit is '1', it indicates using current RSS + * hash mode setting configured in the device. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT \ + UINT32_C(0x1) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * When this bit is '1', it indicates requesting support of + * RSS hashing over innermost 4 tuples {l3.src, l3.dest, + * l4.src, l4.dest} for tunnel packets. For none-tunnel + * packets, the RSS hash is computed over the normal + * src/dest l3 and src/dest l4 headers. */ -} __attribute__((packed)); - -/* hwrm_func_buf_rgtr */ -/* - * Description: This command is used by the PF driver to register buffers used - * in the PF-VF communication with the HWRM. The PF driver uses this command to - * register buffers for each PF-VF channel. A parent PF may issue this command - * per child VF. If VF ID is not valid, then this command is used to register - * buffers for all children VFs of the PF. - */ -/* Input (128 bytes) */ -struct hwrm_func_buf_rgtr_input { - uint16_t req_type; + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4 \ + UINT32_C(0x2) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * When this bit is '1', it indicates requesting support of + * RSS hashing over innermost 2 tuples {l3.src, l3.dest} for + * tunnel packets. For none-tunnel packets, the RSS hash is + * computed over the normal src/dest l3 headers. */ - uint16_t cmpl_ring; + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 \ + UINT32_C(0x4) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * When this bit is '1', it indicates requesting support of + * RSS hashing over outermost 4 tuples {t_l3.src, t_l3.dest, + * t_l4.src, t_l4.dest} for tunnel packets. For none-tunnel + * packets, the RSS hash is computed over the normal + * src/dest l3 and src/dest l4 headers. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4 \ + UINT32_C(0x8) + /* + * When this bit is '1', it indicates requesting support of + * RSS hashing over outermost 2 tuples {t_l3.src, t_l3.dest} for + * tunnel packets. For none-tunnel packets, the RSS hash is + * computed over the normal src/dest l3 headers. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 \ + UINT32_C(0x10) + /* This is the address for rss ring group table */ + uint64_t ring_grp_tbl_addr; + /* This is the address for rss hash key table */ + uint64_t hash_key_tbl_addr; + /* Index to the rss indirection table. */ + uint16_t rss_ctx_idx; + uint8_t unused_1[6]; +} __attribute__((packed)); + +/* hwrm_vnic_rss_cfg_output (size:128b/16B) */ +struct hwrm_vnic_rss_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint64_t resp_addr; + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_vnic_rss_qcfg * + **********************/ + + +/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_rss_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint32_t enables; - /* This bit must be '1' for the vf_id field to be configured. */ - #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1) - /* This bit must be '1' for the err_buf_addr field to be configured. */ - #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_ERR_BUF_ADDR UINT32_C(0x2) - uint16_t vf_id; + uint16_t cmpl_ring; /* - * This value is used to identify a Virtual Function (VF). The - * scope of VF ID is local within a PF. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t req_buf_num_pages; + uint16_t seq_id; /* - * This field represents the number of pages used for request - * buffer(s). + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t req_buf_page_size; - /* This field represents the page size used for request buffer(s). */ - /* 16 bytes */ - #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_16B UINT32_C(0x4) - /* 4 Kbytes */ - #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_4K UINT32_C(0xc) - /* 8 Kbytes */ - #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_8K UINT32_C(0xd) - /* 64 Kbytes */ - #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_64K UINT32_C(0x10) - /* 2 Mbytes */ - #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_2M UINT32_C(0x15) - /* 4 Mbytes */ - #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_4M UINT32_C(0x16) - /* 1 Gbytes */ - #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_1G UINT32_C(0x1e) - uint16_t req_buf_len; - /* The length of the request buffer per VF in bytes. */ - uint16_t resp_buf_len; - /* The length of the response buffer in bytes. */ - uint8_t unused_0; - uint8_t unused_1; - uint64_t req_buf_page_addr[10]; - /* This field represents the page address of req buffer. */ - uint64_t error_buf_addr; + uint16_t target_id; /* - * This field is used to receive the error reporting from the - * chipset. Only applicable for PFs. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t resp_buf_addr; - /* This field is used to receive the response forwarded by the HWRM. */ + uint64_t resp_addr; + /* Index to the rss indirection table. */ + uint16_t rss_ctx_idx; + uint8_t unused_0[6]; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_func_buf_rgtr_output { - uint16_t error_code; +/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */ +struct hwrm_vnic_rss_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t hash_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * When this bit is '1', the RSS hash shall be computed + * over source and destination IPv4 addresses of IPv4 + * packets. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_IPV4 UINT32_C(0x1) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv4 addresses and + * source/destination ports of TCP/IPv4 packets. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv4 addresses and + * source/destination ports of UDP/IPv4 packets. */ -} __attribute__((packed)); - -/* hwrm_func_buf_unrgtr */ -/* - * Description: This command is used by the PF driver to unregister buffers used - * in the PF-VF communication with the HWRM. The PF driver uses this command to - * unregister buffers for PF-VF communication. A parent PF may issue this - * command to unregister buffers for communication between the PF and a specific - * VF. If the VF ID is not valid, then this command is used to unregister - * buffers used for communications with all children VFs of the PF. - */ -/* Input (24 bytes) */ -struct hwrm_func_buf_unrgtr_input { - uint16_t req_type; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * When this bit is '1', the RSS hash shall be computed + * over source and destination IPv4 addresses of IPv6 + * packets. */ - uint16_t cmpl_ring; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_IPV6 UINT32_C(0x8) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv6 addresses and + * source/destination ports of TCP/IPv6 packets. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv6 addresses and + * source/destination ports of UDP/IPv6 packets. */ - uint64_t resp_addr; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20) + uint8_t unused_0[4]; + /* This is the value of rss hash key */ + uint32_t hash_key[10]; + /* Flags to specify different RSS hash modes. */ + uint8_t hash_mode_flags; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * When this bit is '1', it indicates using current RSS + * hash mode setting configured in the device. */ - uint32_t enables; - /* This bit must be '1' for the vf_id field to be configured. */ - #define HWRM_FUNC_BUF_UNRGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1) - uint16_t vf_id; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_DEFAULT \ + UINT32_C(0x1) /* - * This value is used to identify a Virtual Function (VF). The - * scope of VF ID is local within a PF. + * When this bit is '1', it indicates requesting support of + * RSS hashing over innermost 4 tuples {l3.src, l3.dest, + * l4.src, l4.dest} for tunnel packets. For none-tunnel + * packets, the RSS hash is computed over the normal + * src/dest l3 and src/dest l4 headers. */ - uint16_t unused_0; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_func_buf_unrgtr_output { - uint16_t error_code; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_INNERMOST_4 \ + UINT32_C(0x2) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * When this bit is '1', it indicates requesting support of + * RSS hashing over innermost 2 tuples {l3.src, l3.dest} for + * tunnel packets. For none-tunnel packets, the RSS hash is + * computed over the normal src/dest l3 headers. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_INNERMOST_2 \ + UINT32_C(0x4) + /* + * When this bit is '1', it indicates requesting support of + * RSS hashing over outermost 4 tuples {t_l3.src, t_l3.dest, + * t_l4.src, t_l4.dest} for tunnel packets. For none-tunnel + * packets, the RSS hash is computed over the normal + * src/dest l3 and src/dest l4 headers. + */ + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_OUTERMOST_4 \ + UINT32_C(0x8) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * When this bit is '1', it indicates requesting support of + * RSS hashing over outermost 2 tuples {t_l3.src, t_l3.dest} for + * tunnel packets. For none-tunnel packets, the RSS hash is + * computed over the normal src/dest l3 headers. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_OUTERMOST_2 \ + UINT32_C(0x10) + uint8_t unused_1[6]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_port_phy_cfg */ -/* - * Description: This command configures the PHY device for the port. It allows - * setting of the most generic settings for the PHY. The HWRM shall complete - * this command as soon as PHY settings are configured. They may not be applied - * when the command response is provided. A VF driver shall not be allowed to - * configure PHY using this command. In a network partition mode, a PF driver - * shall not be allowed to configure PHY using this command. - */ -/* Input (56 bytes) */ -struct hwrm_port_phy_cfg_input { - uint16_t req_type; +/************************** + * hwrm_vnic_plcmodes_cfg * + **************************/ + + +/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */ +struct hwrm_vnic_plcmodes_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t flags; + uint64_t resp_addr; + uint32_t flags; /* - * When this bit is set to '1', the PHY for the port shall be - * reset. # If this bit is set to 1, then the HWRM shall reset - * the PHY after applying PHY configuration changes specified in - * this command. # In order to guarantee that PHY configuration - * changes specified in this command take effect, the HWRM - * client should set this flag to 1. # If this bit is not set to - * 1, then the HWRM may reset the PHY depending on the current - * PHY configuration and settings specified in this command. + * When this bit is '1', the VNIC shall be configured to + * use regular placement algorithm. + * By default, the regular placement algorithm shall be + * enabled on the VNIC. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY UINT32_C(0x1) - /* deprecated bit. Do not use!!! */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_DEPRECATED UINT32_C(0x2) + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_REGULAR_PLACEMENT \ + UINT32_C(0x1) /* - * When this bit is set to '1', the link shall be forced to the - * force_link_speed value. When this bit is set to '1', the HWRM - * client should not enable any of the auto negotiation related - * fields represented by auto_XXX fields in this command. When - * this bit is set to '1' and the HWRM client has enabled a - * auto_XXX field in this command, then the HWRM shall ignore - * the enabled auto_XXX field. When this bit is set to zero, the - * link shall be allowed to autoneg. + * When this bit is '1', the VNIC shall be configured + * use the jumbo placement algorithm. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE UINT32_C(0x4) + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT \ + UINT32_C(0x2) /* - * When this bit is set to '1', the auto-negotiation process - * shall be restarted on the link. + * When this bit is '1', the VNIC shall be configured + * to enable Header-Data split for IPv4 packets according + * to the following rules: + * # If the packet is identified as TCP/IPv4, then the + * packet is split at the beginning of the TCP payload. + * # If the packet is identified as UDP/IPv4, then the + * packet is split at the beginning of UDP payload. + * # If the packet is identified as non-TCP and non-UDP + * IPv4 packet, then the packet is split at the beginning + * of the upper layer protocol header carried in the IPv4 + * packet. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG UINT32_C(0x8) + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 \ + UINT32_C(0x4) /* - * When this bit is set to '1', Energy Efficient Ethernet (EEE) - * is requested to be enabled on this link. If EEE is not - * supported on this port, then this flag shall be ignored by - * the HWRM. + * When this bit is '1', the VNIC shall be configured + * to enable Header-Data split for IPv6 packets according + * to the following rules: + * # If the packet is identified as TCP/IPv6, then the + * packet is split at the beginning of the TCP payload. + * # If the packet is identified as UDP/IPv6, then the + * packet is split at the beginning of UDP payload. + * # If the packet is identified as non-TCP and non-UDP + * IPv6 packet, then the packet is split at the beginning + * of the upper layer protocol header carried in the IPv6 + * packet. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE UINT32_C(0x10) + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6 \ + UINT32_C(0x8) /* - * When this bit is set to '1', Energy Efficient Ethernet (EEE) - * is requested to be disabled on this link. If EEE is not - * supported on this port, then this flag shall be ignored by - * the HWRM. + * When this bit is '1', the VNIC shall be configured + * to enable Header-Data split for FCoE packets at the + * beginning of FC payload. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE UINT32_C(0x20) + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_FCOE \ + UINT32_C(0x10) /* - * When this bit is set to '1' and EEE is enabled on this link, - * then TX LPI is requested to be enabled on the link. If EEE is - * not supported on this port, then this flag shall be ignored - * by the HWRM. If EEE is disabled on this port, then this flag - * shall be ignored by the HWRM. + * When this bit is '1', the VNIC shall be configured + * to enable Header-Data split for RoCE packets at the + * beginning of RoCE payload (after BTH/GRH headers). */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_ENABLE UINT32_C(0x40) + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_ROCE \ + UINT32_C(0x20) + uint32_t enables; /* - * When this bit is set to '1' and EEE is enabled on this link, - * then TX LPI is requested to be disabled on the link. If EEE - * is not supported on this port, then this flag shall be - * ignored by the HWRM. If EEE is disabled on this port, then - * this flag shall be ignored by the HWRM. + * This bit must be '1' for the jumbo_thresh_valid field to be + * configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE UINT32_C(0x80) + #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID \ + UINT32_C(0x1) /* - * When set to 1, then the HWRM shall enable FEC - * autonegotitation on this port if supported. When set to 0, - * then this flag shall be ignored. If FEC autonegotiation is - * not supported, then the HWRM shall ignore this flag. + * This bit must be '1' for the hds_offset_valid field to be + * configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE UINT32_C(0x100) + #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID \ + UINT32_C(0x2) /* - * When set to 1, then the HWRM shall disable FEC - * autonegotiation on this port if supported. When set to 0, - * then this flag shall be ignored. If FEC autonegotiation is - * not supported, then the HWRM shall ignore this flag. + * This bit must be '1' for the hds_threshold_valid field to be + * configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE \ - UINT32_C(0x200) + #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID \ + UINT32_C(0x4) + /* Logical vnic ID */ + uint32_t vnic_id; /* - * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire - * Code) on this port if supported. When set to 0, then this - * flag shall be ignored. If FEC CLAUSE 74 is not supported, - * then the HWRM shall ignore this flag. + * When jumbo placement algorithm is enabled, this value + * is used to determine the threshold for jumbo placement. + * Packets with length larger than this value will be + * placed according to the jumbo placement algorithm. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE \ - UINT32_C(0x400) + uint16_t jumbo_thresh; /* - * When set to 1, then the HWRM shall disable FEC CLAUSE 74 - * (Fire Code) on this port if supported. When set to 0, then - * this flag shall be ignored. If FEC CLAUSE 74 is not - * supported, then the HWRM shall ignore this flag. + * This value is used to determine the offset into + * packet buffer where the split data (payload) will be + * placed according to one of of HDS placement algorithm. + * + * The lengths of packet buffers provided for split data + * shall be larger than this value. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_DISABLE \ - UINT32_C(0x800) + uint16_t hds_offset; /* - * When set to 1, then the HWRM shall enable FEC CLAUSE 91 (Reed - * Solomon) on this port if supported. When set to 0, then this - * flag shall be ignored. If FEC CLAUSE 91 is not supported, - * then the HWRM shall ignore this flag. + * When one of the HDS placement algorithm is enabled, this + * value is used to determine the threshold for HDS + * placement. + * Packets with length larger than this value will be + * placed according to the HDS placement algorithm. + * This value shall be in multiple of 4 bytes. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_ENABLE \ - UINT32_C(0x1000) + uint16_t hds_threshold; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */ +struct hwrm_vnic_plcmodes_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * When set to 1, then the HWRM shall disable FEC CLAUSE 91 - * (Reed Solomon) on this port if supported. When set to 0, then - * this flag shall be ignored. If FEC CLAUSE 91 is not - * supported, then the HWRM shall ignore this flag. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE \ - UINT32_C(0x2000) + uint8_t valid; +} __attribute__((packed)); + +/*************************** + * hwrm_vnic_plcmodes_qcfg * + ***************************/ + + +/* hwrm_vnic_plcmodes_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_plcmodes_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * When this bit is set to '1', the link shall be forced to be - * taken down. # When this bit is set to '1", all other command - * input settings related to the link speed shall be ignored. - * Once the link state is forced down, it can be explicitly - * cleared from that state by setting this flag to '0'. # If - * this flag is set to '0', then the link shall be cleared from - * forced down state if the link is in forced down state. There - * may be conditions (e.g. out-of-band or sideband configuration - * changes for the link) outside the scope of the HWRM - * implementation that may clear forced down link state. - */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN UINT32_C(0x4000) - uint32_t enables; - /* This bit must be '1' for the auto_mode field to be configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE UINT32_C(0x1) - /* This bit must be '1' for the auto_duplex field to be configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX UINT32_C(0x2) - /* This bit must be '1' for the auto_pause field to be configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE UINT32_C(0x4) + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; /* - * This bit must be '1' for the auto_link_speed field to be - * configured. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED UINT32_C(0x8) + uint16_t seq_id; /* - * This bit must be '1' for the auto_link_speed_mask field to be - * configured. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK \ - UINT32_C(0x10) - /* This bit must be '1' for the wirespeed field to be configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_WIOUTPUTEED UINT32_C(0x20) - /* This bit must be '1' for the lpbk field to be configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_LPBK UINT32_C(0x40) - /* This bit must be '1' for the preemphasis field to be configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_PREEMPHASIS UINT32_C(0x80) - /* This bit must be '1' for the force_pause field to be configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE UINT32_C(0x100) + uint16_t target_id; /* - * This bit must be '1' for the eee_link_speed_mask field to be - * configured. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK \ - UINT32_C(0x200) - /* This bit must be '1' for the tx_lpi_timer field to be configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_TX_LPI_TIMER UINT32_C(0x400) - uint16_t port_id; - /* Port ID of port that is to be configured. */ - uint16_t force_link_speed; + uint64_t resp_addr; + /* Logical vnic ID */ + uint32_t vnic_id; + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_vnic_plcmodes_qcfg_output (size:192b/24B) */ +struct hwrm_vnic_plcmodes_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; /* - * This is the speed that will be used if the force bit is '1'. - * If unsupported speed is selected, an error will be generated. + * When this bit is '1', the VNIC is configured to + * use regular placement algorithm. */ - /* 100Mb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1) - /* 1Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa) - /* 2Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14) - /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64) - /* 20Mb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8) - /* 25Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa) - /* 40Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190) - /* 50Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4) - /* 100Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB UINT32_C(0x3e8) - /* 10Mb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff) - uint8_t auto_mode; + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_REGULAR_PLACEMENT \ + UINT32_C(0x1) /* - * This value is used to identify what autoneg mode is used when - * the link speed is not being forced. + * When this bit is '1', the VNIC is configured to + * use the jumbo placement algorithm. */ + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_JUMBO_PLACEMENT \ + UINT32_C(0x2) /* - * Disable autoneg or autoneg disabled. No - * speeds are selected. + * When this bit is '1', the VNIC is configured + * to enable Header-Data split for IPv4 packets. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE UINT32_C(0x0) - /* Select all possible speeds for autoneg mode. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1) + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV4 \ + UINT32_C(0x4) /* - * Select only the auto_link_speed speed for - * autoneg mode. This mode has been DEPRECATED. - * An HWRM client should not use this mode. + * When this bit is '1', the VNIC is configured + * to enable Header-Data split for IPv6 packets. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2) + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV6 \ + UINT32_C(0x8) /* - * Select the auto_link_speed or any speed below - * that speed for autoneg. This mode has been - * DEPRECATED. An HWRM client should not use - * this mode. + * When this bit is '1', the VNIC is configured + * to enable Header-Data split for FCoE packets. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3) + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_FCOE \ + UINT32_C(0x10) /* - * Select the speeds based on the corresponding - * link speed mask value that is provided. + * When this bit is '1', the VNIC is configured + * to enable Header-Data split for RoCE packets. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4) - uint8_t auto_duplex; + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_ROCE \ + UINT32_C(0x20) /* - * This is the duplex setting that will be used if the - * autoneg_mode is "one_speed" or "one_or_below". + * When this bit is '1', the VNIC is configured + * to be the default VNIC of the requesting function. */ - /* Half Duplex will be requested. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF UINT32_C(0x0) - /* Full duplex will be requested. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL UINT32_C(0x1) - /* Both Half and Full dupex will be requested. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH UINT32_C(0x2) - uint8_t auto_pause; + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC \ + UINT32_C(0x40) /* - * This value is used to configure the pause that will be used - * for autonegotiation. Add text on the usage of auto_pause and - * force_pause. + * When jumbo placement algorithm is enabled, this value + * is used to determine the threshold for jumbo placement. + * Packets with length larger than this value will be + * placed according to the jumbo placement algorithm. */ + uint16_t jumbo_thresh; /* - * When this bit is '1', Generation of tx pause messages has - * been requested. Disabled otherwise. + * This value is used to determine the offset into + * packet buffer where the split data (payload) will be + * placed according to one of of HDS placement algorithm. + * + * The lengths of packet buffers provided for split data + * shall be larger than this value. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX UINT32_C(0x1) + uint16_t hds_offset; /* - * When this bit is '1', Reception of rx pause messages has been - * requested. Disabled otherwise. + * When one of the HDS placement algorithm is enabled, this + * value is used to determine the threshold for HDS + * placement. + * Packets with length larger than this value will be + * placed according to the HDS placement algorithm. + * This value shall be in multiple of 4 bytes. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX UINT32_C(0x2) + uint16_t hds_threshold; + uint8_t unused_0[5]; /* - * When set to 1, the advertisement of pause is enabled. # When - * the auto_mode is not set to none and this flag is set to 1, - * then the auto_pause bits on this port are being advertised - * and autoneg pause results are being interpreted. # When the - * auto_mode is not set to none and this flag is set to 0, the - * pause is forced as indicated in force_pause, and also - * advertised as auto_pause bits, but the autoneg results are - * not interpreted since the pause configuration is being - * forced. # When the auto_mode is set to none and this flag is - * set to 1, auto_pause bits should be ignored and should be set - * to 0. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4) - uint8_t unused_0; - uint16_t auto_link_speed; + uint8_t valid; +} __attribute__((packed)); + +/********************************** + * hwrm_vnic_rss_cos_lb_ctx_alloc * + **********************************/ + + +/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This is the speed that will be used if the autoneg_mode is - * "one_speed" or "one_or_below". If an unsupported speed is - * selected, an error will be generated. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - /* 100Mb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1) - /* 1Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa) - /* 2Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14) - /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64) - /* 20Mb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8) - /* 25Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa) - /* 40Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190) - /* 50Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4) - /* 100Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8) - /* 10Mb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff) - uint16_t auto_link_speed_mask; + uint16_t cmpl_ring; /* - * This is a mask of link speeds that will be used if - * autoneg_mode is "mask". If unsupported speed is enabled an - * error will be generated. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - /* 100Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD \ - UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB \ - UINT32_C(0x2) - /* 1Gb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD \ - UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB \ - UINT32_C(0x8) - /* 2Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB \ - UINT32_C(0x10) - /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB \ - UINT32_C(0x20) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB UINT32_C(0x40) - /* 20Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB UINT32_C(0x80) - /* 25Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB \ - UINT32_C(0x100) - /* 40Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB \ - UINT32_C(0x200) - /* 50Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB \ - UINT32_C(0x400) - /* 100Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB \ - UINT32_C(0x800) - /* 10Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD \ - UINT32_C(0x1000) - /* 10Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB \ - UINT32_C(0x2000) - uint8_t wirespeed; - /* This value controls the wirespeed feature. */ - /* Wirespeed feature is disabled. */ - #define HWRM_PORT_PHY_CFG_INPUT_WIOUTPUTEED_OFF UINT32_C(0x0) - /* Wirespeed feature is enabled. */ - #define HWRM_PORT_PHY_CFG_INPUT_WIOUTPUTEED_ON UINT32_C(0x1) - uint8_t lpbk; - /* This value controls the loopback setting for the PHY. */ - /* No loopback is selected. Normal operation. */ - #define HWRM_PORT_PHY_CFG_INPUT_LPBK_NONE UINT32_C(0x0) + uint16_t seq_id; /* - * The HW will be configured with local loopback - * such that host data is sent back to the host - * without modification. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LOCAL UINT32_C(0x1) + uint16_t target_id; /* - * The HW will be configured with remote - * loopback such that port logic will send - * packets back out the transmitter that are - * received. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_PORT_PHY_CFG_INPUT_LPBK_REMOTE UINT32_C(0x2) - uint8_t force_pause; + uint64_t resp_addr; +} __attribute__((packed)); + +/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* rss_cos_lb_ctx_id is 16 b */ + uint16_t rss_cos_lb_ctx_id; + uint8_t unused_0[5]; /* - * This value is used to configure the pause that will be used - * for force mode. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; +} __attribute__((packed)); + +/********************************* + * hwrm_vnic_rss_cos_lb_ctx_free * + *********************************/ + + +/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * When this bit is '1', Generation of tx pause messages is - * supported. Disabled otherwise. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX UINT32_C(0x1) + uint16_t cmpl_ring; /* - * When this bit is '1', Reception of rx pause messages is - * supported. Disabled otherwise. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX UINT32_C(0x2) - uint8_t unused_1; - uint32_t preemphasis; + uint16_t seq_id; /* - * This value controls the pre-emphasis to be used for the link. - * Driver should not set this value (use enable.preemphasis = 0) - * unless driver is sure of setting. Normally HWRM FW will - * determine proper pre-emphasis. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t eee_link_speed_mask; + uint16_t target_id; /* - * Setting for link speed mask that is used to advertise speeds - * during autonegotiation when EEE is enabled. This field is - * valid only when EEE is enabled. The speeds specified in this - * field shall be a subset of speeds specified in - * auto_link_speed_mask. If EEE is enabled,then at least one - * speed shall be provided in this mask. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - /* Reserved */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB UINT32_C(0x2) - /* Reserved */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_1GB UINT32_C(0x8) - /* Reserved */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 UINT32_C(0x10) - /* Reserved */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 UINT32_C(0x20) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB UINT32_C(0x40) - uint8_t unused_2; - uint8_t unused_3; - uint32_t tx_lpi_timer; - uint32_t unused_4; + uint64_t resp_addr; + /* rss_cos_lb_ctx_id is 16 b */ + uint16_t rss_cos_lb_ctx_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Reuested setting of TX LPI timer in microseconds. This field - * is valid only when EEE is enabled and TX LPI is enabled. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff) - #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_SFT 0 + uint8_t valid; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_port_phy_cfg_output { - uint16_t error_code; +/******************* + * hwrm_ring_alloc * + *******************/ + + +/* hwrm_ring_alloc_input (size:640b/80B) */ +struct hwrm_ring_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t seq_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ -} __attribute__((packed)); - -/* hwrm_port_phy_qcfg */ -/* Description: This command queries the PHY configuration for the port. */ -/* Input (24 bytes) */ -struct hwrm_port_phy_qcfg_input { - uint16_t req_type; + uint16_t target_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t cmpl_ring; + uint64_t resp_addr; + uint32_t enables; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This bit must be '1' for the ring_arb_cfg field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_RING_ALLOC_INPUT_ENABLES_RING_ARB_CFG \ + UINT32_C(0x2) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This bit must be '1' for the stat_ctx_id_valid field to be + * configured. */ - uint64_t resp_addr; + #define HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID \ + UINT32_C(0x8) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This bit must be '1' for the max_bw_valid field to be + * configured. */ - uint16_t port_id; - /* Port ID of port that is to be queried. */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (96 bytes) */ -struct hwrm_port_phy_qcfg_output { - uint16_t error_code; + #define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID \ + UINT32_C(0x20) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This bit must be '1' for the rx_ring_id field to be + * configured. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID \ + UINT32_C(0x40) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This bit must be '1' for the nq_ring_id field to be + * configured. */ - uint8_t link; - /* This value indicates the current link status. */ - /* There is no link or cable detected. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK UINT32_C(0x0) - /* There is no link, but a cable has been detected. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SIGNAL UINT32_C(0x1) - /* There is a link. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK UINT32_C(0x2) - uint8_t unused_0; - uint16_t link_speed; - /* This value indicates the current link speed of the connection. */ - /* 100Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB UINT32_C(0x1) - /* 1Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB UINT32_C(0xa) - /* 2Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB UINT32_C(0x14) - /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB UINT32_C(0x19) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB UINT32_C(0x64) - /* 20Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB UINT32_C(0xc8) - /* 25Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB UINT32_C(0xfa) - /* 40Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB UINT32_C(0x190) - /* 50Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB UINT32_C(0x1f4) - /* 100Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB UINT32_C(0x3e8) - /* 10Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB UINT32_C(0xffff) - uint8_t duplex_cfg; - /* This value is indicates the duplex of the current connection. */ - /* Half Duplex connection. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_HALF UINT32_C(0x0) - /* Full duplex connection. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL UINT32_C(0x1) - uint8_t pause; + #define HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID \ + UINT32_C(0x80) /* - * This value is used to indicate the current pause - * configuration. When autoneg is enabled, this value represents - * the autoneg results of pause configuration. + * This bit must be '1' for the rx_buf_size field to be + * configured. */ - /* - * When this bit is '1', Generation of tx pause messages is - * supported. Disabled otherwise. + #define HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID \ + UINT32_C(0x100) + /* Ring Type. */ + uint8_t ring_type; + /* L2 Completion Ring (CR) */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0) + /* TX Ring (TR) */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_TX UINT32_C(0x1) + /* RX Ring (RR) */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX UINT32_C(0x2) + /* RoCE Notification Completion Ring (ROCE_CR) */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3) + /* RX Aggregation Ring */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG UINT32_C(0x4) + /* Notification Queue */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ UINT32_C(0x5) + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_LAST \ + HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ + uint8_t unused_0[3]; + /* + * This value is a pointer to the page table for the + * Ring. + */ + uint64_t page_tbl_addr; + /* First Byte Offset of the first entry in the first page. */ + uint32_t fbo; + /* + * Actual page size in 2^page_size. The supported range is increments + * in powers of 2 from 16 bytes to 1GB. + * - 4 = 16 B + * Page size is 16 B. + * - 12 = 4 KB + * Page size is 4 KB. + * - 13 = 8 KB + * Page size is 8 KB. + * - 16 = 64 KB + * Page size is 64 KB. + * - 21 = 2 MB + * Page size is 2 MB. + * - 22 = 4 MB + * Page size is 4 MB. + * - 30 = 1 GB + * Page size is 1 GB. + */ + uint8_t page_size; + /* + * This value indicates the depth of page table. + * For this version of the specification, value other than 0 or + * 1 shall be considered as an invalid value. + * When the page_tbl_depth = 0, then it is treated as a + * special case with the following. + * 1. FBO and page size fields are not valid. + * 2. page_tbl_addr is the physical address of the first + * element of the ring. + */ + uint8_t page_tbl_depth; + uint8_t unused_1[2]; + /* + * Number of 16B units in the ring. Minimum size for + * a ring is 16 16B entries. + */ + uint32_t length; + /* + * Logical ring number for the ring to be allocated. + * This value determines the position in the doorbell + * area where the update to the ring will be made. + * + * For completion rings, this value is also the MSI-X + * vector number for the function the completion ring is + * associated with. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX UINT32_C(0x1) + uint16_t logical_id; /* - * When this bit is '1', Reception of rx pause messages is - * supported. Disabled otherwise. + * This field is used only when ring_type is a TX ring. + * This value indicates what completion ring the TX ring + * is associated with. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX UINT32_C(0x2) - uint16_t support_speeds; + uint16_t cmpl_ring_id; /* - * The supported speeds for the port. This is a bit mask. For - * each speed that is supported, the corrresponding bit will be - * set to '1'. + * This field is used only when ring_type is a TX ring. + * This value indicates what CoS queue the TX ring + * is associated with. */ - /* 100Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB UINT32_C(0x2) - /* 1Gb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB UINT32_C(0x8) - /* 2Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB UINT32_C(0x10) - /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB UINT32_C(0x20) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB UINT32_C(0x40) - /* 20Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB UINT32_C(0x80) - /* 25Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB UINT32_C(0x100) - /* 40Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB UINT32_C(0x200) - /* 50Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB UINT32_C(0x400) - /* 100Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB UINT32_C(0x800) - /* 10Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MBHD UINT32_C(0x1000) - /* 10Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB UINT32_C(0x2000) - uint16_t force_link_speed; + uint16_t queue_id; /* - * Current setting of forced link speed. When the link speed is - * not being forced, this value shall be set to 0. + * When allocating a Rx ring or Rx aggregation ring, this field + * specifies the size of the buffer descriptors posted to the ring. */ - /* 100Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1) - /* 1Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa) - /* 2Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14) - /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64) - /* 20Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8) - /* 25Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa) - /* 40Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190) - /* 50Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4) - /* 100Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB UINT32_C(0x3e8) - /* 10Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff) - uint8_t auto_mode; - /* Current setting of auto negotiation mode. */ + uint16_t rx_buf_size; /* - * Disable autoneg or autoneg disabled. No - * speeds are selected. + * When allocating an Rx aggregation ring, this field + * specifies the associated Rx ring ID. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE UINT32_C(0x0) - /* Select all possible speeds for autoneg mode. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1) + uint16_t rx_ring_id; /* - * Select only the auto_link_speed speed for - * autoneg mode. This mode has been DEPRECATED. - * An HWRM client should not use this mode. + * When allocating a completion ring, this field + * specifies the associated NQ ring ID. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2) + uint16_t nq_ring_id; /* - * Select the auto_link_speed or any speed below - * that speed for autoneg. This mode has been - * DEPRECATED. An HWRM client should not use - * this mode. + * This field is used only when ring_type is a TX ring. + * This field is used to configure arbitration related + * parameters for a TX ring. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3) + uint16_t ring_arb_cfg; + /* Arbitration policy used for the ring. */ + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_MASK \ + UINT32_C(0xf) + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SFT 0 /* - * Select the speeds based on the corresponding - * link speed mask value that is provided. + * Use strict priority for the TX ring. + * Priority value is specified in arb_policy_param */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4) - uint8_t auto_pause; + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SP \ + UINT32_C(0x1) /* - * Current setting of pause autonegotiation. Move autoneg_pause - * flag here. + * Use weighted fair queue arbitration for the TX ring. + * Weight is specified in arb_policy_param */ + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ \ + UINT32_C(0x2) + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_LAST \ + HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ + /* Reserved field. */ + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_MASK \ + UINT32_C(0xf0) + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_SFT 4 + /* + * Arbitration policy specific parameter. + * # For strict priority arbitration policy, this field + * represents a priority value. If set to 0, then the priority + * is not specified and the HWRM is allowed to select + * any priority for this TX ring. + * # For weighted fair queue arbitration policy, this field + * represents a weight value. If set to 0, then the weight + * is not specified and the HWRM is allowed to select + * any weight for this TX ring. + */ + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_MASK \ + UINT32_C(0xff00) + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8 + uint16_t unused_3; /* - * When this bit is '1', Generation of tx pause messages has - * been requested. Disabled otherwise. + * This field is reserved for the future use. + * It shall be set to 0. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_TX UINT32_C(0x1) + uint32_t reserved3; /* - * When this bit is '1', Reception of rx pause messages has been - * requested. Disabled otherwise. + * This field is used only when ring_type is a TX ring. + * This input indicates what statistics context this ring + * should be associated with. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_RX UINT32_C(0x2) + uint32_t stat_ctx_id; /* - * When set to 1, the advertisement of pause is enabled. # When - * the auto_mode is not set to none and this flag is set to 1, - * then the auto_pause bits on this port are being advertised - * and autoneg pause results are being interpreted. # When the - * auto_mode is not set to none and this flag is set to 0, the - * pause is forced as indicated in force_pause, and also - * advertised as auto_pause bits, but the autoneg results are - * not interpreted since the pause configuration is being - * forced. # When the auto_mode is set to none and this flag is - * set to 1, auto_pause bits should be ignored and should be set - * to 0. + * This field is reserved for the future use. + * It shall be set to 0. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4) - uint16_t auto_link_speed; + uint32_t reserved4; /* - * Current setting for auto_link_speed. This field is only valid - * when auto_mode is set to "one_speed" or "one_or_below". + * This field is used only when ring_type is a TX ring + * to specify maximum BW allocated to the TX ring. + * The HWRM will translate this value into byte counter and + * time interval used for this ring inside the device. */ - /* 100Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1) - /* 1Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa) - /* 2Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14) - /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64) - /* 20Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8) - /* 25Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa) - /* 40Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190) - /* 50Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4) - /* 100Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8) - /* 10Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff) - uint16_t auto_link_speed_mask; + uint32_t max_bw; + /* The bandwidth value. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_SFT 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_LAST \ + HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID /* - * Current setting for auto_link_speed_mask that is used to - * advertise speeds during autonegotiation. This field is only - * valid when auto_mode is set to "mask". The speeds specified - * in this field shall be a subset of supported speeds on this - * port. + * This field is used only when ring_type is a Completion ring. + * This value indicates what interrupt mode should be used + * on this completion ring. + * Note: In the legacy interrupt mode, no more than 16 + * completion rings are allowed. */ - /* 100Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MBHD \ - UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \ - UINT32_C(0x2) - /* 1Gb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \ - UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB UINT32_C(0x8) - /* 2Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2GB \ - UINT32_C(0x10) - /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \ - UINT32_C(0x20) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10GB \ - UINT32_C(0x40) - /* 20Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_20GB \ - UINT32_C(0x80) - /* 25Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_25GB \ - UINT32_C(0x100) - /* 40Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_40GB \ - UINT32_C(0x200) - /* 50Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_50GB \ - UINT32_C(0x400) - /* 100Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \ - UINT32_C(0x800) - /* 10Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MBHD \ - UINT32_C(0x1000) - /* 10Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MB \ - UINT32_C(0x2000) - uint8_t wirespeed; - /* Current setting for wirespeed. */ - /* Wirespeed feature is disabled. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_WIOUTPUTEED_OFF UINT32_C(0x0) - /* Wirespeed feature is enabled. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_WIOUTPUTEED_ON UINT32_C(0x1) - uint8_t lpbk; - /* Current setting for loopback. */ - /* No loopback is selected. Normal operation. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0) + uint8_t int_mode; + /* Legacy INTA */ + #define HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY UINT32_C(0x0) + /* Reserved */ + #define HWRM_RING_ALLOC_INPUT_INT_MODE_RSVD UINT32_C(0x1) + /* MSI-X */ + #define HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX UINT32_C(0x2) + /* No Interrupt - Polled mode */ + #define HWRM_RING_ALLOC_INPUT_INT_MODE_POLL UINT32_C(0x3) + #define HWRM_RING_ALLOC_INPUT_INT_MODE_LAST \ + HWRM_RING_ALLOC_INPUT_INT_MODE_POLL + uint8_t unused_4[3]; +} __attribute__((packed)); + +/* hwrm_ring_alloc_output (size:128b/16B) */ +struct hwrm_ring_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; /* - * The HW will be configured with local loopback - * such that host data is sent back to the host - * without modification. + * Physical number of ring allocated. + * This value shall be unique for a ring type. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1) + uint16_t ring_id; + /* Logical number of ring allocated. */ + uint16_t logical_ring_id; + uint8_t unused_0[3]; /* - * The HW will be configured with remote - * loopback such that port logic will send - * packets back out the transmitter that are - * received. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2) - uint8_t force_pause; + uint8_t valid; +} __attribute__((packed)); + +/****************** + * hwrm_ring_free * + ******************/ + + +/* hwrm_ring_free_input (size:192b/24B) */ +struct hwrm_ring_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Current setting of forced pause. When the pause configuration - * is not being forced, then this value shall be set to 0. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ + uint16_t cmpl_ring; /* - * When this bit is '1', Generation of tx pause messages is - * supported. Disabled otherwise. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_TX UINT32_C(0x1) + uint16_t seq_id; /* - * When this bit is '1', Reception of rx pause messages is - * supported. Disabled otherwise. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_RX UINT32_C(0x2) - uint8_t module_status; + uint16_t target_id; /* - * This value indicates the current status of the optics module - * on this port. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - /* Module is inserted and accepted */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NONE UINT32_C(0x0) - /* Module is rejected and transmit side Laser is disabled. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX UINT32_C(0x1) - /* Module mismatch warning. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG UINT32_C(0x2) - /* Module is rejected and powered down. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN UINT32_C(0x3) - /* Module is not inserted. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED \ - UINT32_C(0x4) - /* Module status is not applicable. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE \ - UINT32_C(0xff) - uint32_t preemphasis; - /* Current setting for preemphasis. */ - uint8_t phy_maj; - /* This field represents the major version of the PHY. */ - uint8_t phy_min; - /* This field represents the minor version of the PHY. */ - uint8_t phy_bld; - /* This field represents the build version of the PHY. */ - uint8_t phy_type; - /* This value represents a PHY type. */ - /* Unknown */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN UINT32_C(0x0) - /* BASE-CR */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR UINT32_C(0x1) - /* BASE-KR4 (Deprecated) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4 UINT32_C(0x2) - /* BASE-LR */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR UINT32_C(0x3) - /* BASE-SR */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR UINT32_C(0x4) - /* BASE-KR2 (Deprecated) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2 UINT32_C(0x5) - /* BASE-KX */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX UINT32_C(0x6) - /* BASE-KR */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR UINT32_C(0x7) - /* BASE-T */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET UINT32_C(0x8) - /* EEE capable BASE-T */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE UINT32_C(0x9) - /* SGMII connected external PHY */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY UINT32_C(0xa) - /* 25G_BASECR_CA_L */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L UINT32_C(0xb) - /* 25G_BASECR_CA_S */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S UINT32_C(0xc) - /* 25G_BASECR_CA_N */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N UINT32_C(0xd) - /* 25G_BASESR */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR UINT32_C(0xe) - /* 100G_BASECR4 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4 UINT32_C(0xf) - /* 100G_BASESR4 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4 UINT32_C(0x10) - /* 100G_BASELR4 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4 UINT32_C(0x11) - /* 100G_BASEER4 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4 UINT32_C(0x12) - /* 100G_BASESR10 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10 UINT32_C(0x13) - /* 40G_BASECR4 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4 UINT32_C(0x14) - /* 40G_BASESR4 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4 UINT32_C(0x15) - /* 40G_BASELR4 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4 UINT32_C(0x16) - /* 40G_BASEER4 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4 UINT32_C(0x17) - /* 40G_ACTIVE_CABLE */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE \ - UINT32_C(0x18) - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET UINT32_C(0x19) - /* 1G_baseSX */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX UINT32_C(0x1a) - /* 1G_baseCX */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX UINT32_C(0x1b) - uint8_t media_type; - /* This value represents a media type. */ - /* Unknown */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_UNKNOWN UINT32_C(0x0) - /* Twisted Pair */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP UINT32_C(0x1) - /* Direct Attached Copper */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC UINT32_C(0x2) - /* Fiber */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE UINT32_C(0x3) - uint8_t xcvr_pkg_type; - /* This value represents a transceiver type. */ - /* PHY and MAC are in the same package */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_INTERNAL \ - UINT32_C(0x1) - /* PHY and MAC are in different packages */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL \ - UINT32_C(0x2) - uint8_t eee_config_phy_addr; + uint64_t resp_addr; + /* Ring Type. */ + uint8_t ring_type; + /* L2 Completion Ring (CR) */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0) + /* TX Ring (TR) */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_TX UINT32_C(0x1) + /* RX Ring (RR) */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_RX UINT32_C(0x2) + /* RoCE Notification Completion Ring (ROCE_CR) */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3) + /* RX Aggregation Ring */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG UINT32_C(0x4) + /* Notification Queue */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_NQ UINT32_C(0x5) + #define HWRM_RING_FREE_INPUT_RING_TYPE_LAST \ + HWRM_RING_FREE_INPUT_RING_TYPE_NQ + uint8_t unused_0; + /* Physical number of ring allocated. */ + uint16_t ring_id; + uint8_t unused_1[4]; +} __attribute__((packed)); + +/* hwrm_ring_free_output (size:128b/16B) */ +struct hwrm_ring_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This field represents flags related to EEE configuration. - * These EEE configuration flags are valid only when the - * auto_mode is not set to none (in other words autonegotiation - * is enabled). + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - /* This field represents PHY address. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK UINT32_C(0x1f) - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_SFT 0 + uint8_t valid; +} __attribute__((packed)); + +/************************************** + * hwrm_ring_cmpl_ring_qaggint_params * + **************************************/ + + +/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */ +struct hwrm_ring_cmpl_ring_qaggint_params_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * When set to 1, Energy Efficient Ethernet (EEE) mode is - * enabled. Speeds for autoneg with EEE mode enabled are based - * on eee_link_speed_mask. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED UINT32_C(0x20) + uint16_t cmpl_ring; /* - * This flag is valid only when eee_enabled is set to 1. # If - * eee_enabled is set to 0, then EEE mode is disabled and this - * flag shall be ignored. # If eee_enabled is set to 1 and this - * flag is set to 1, then Energy Efficient Ethernet (EEE) mode - * is enabled and in use. # If eee_enabled is set to 1 and this - * flag is set to 0, then Energy Efficient Ethernet (EEE) mode - * is enabled but is currently not in use. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ACTIVE UINT32_C(0x40) + uint16_t seq_id; /* - * This flag is valid only when eee_enabled is set to 1. # If - * eee_enabled is set to 0, then EEE mode is disabled and this - * flag shall be ignored. # If eee_enabled is set to 1 and this - * flag is set to 1, then Energy Efficient Ethernet (EEE) mode - * is enabled and TX LPI is enabled. # If eee_enabled is set to - * 1 and this flag is set to 0, then Energy Efficient Ethernet - * (EEE) mode is enabled but TX LPI is disabled. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_TX_LPI UINT32_C(0x80) + uint16_t target_id; /* - * This field represents flags related to EEE configuration. - * These EEE configuration flags are valid only when the - * auto_mode is not set to none (in other words autonegotiation - * is enabled). + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_MASK UINT32_C(0xe0) - #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_SFT 5 - uint8_t parallel_detect; - /* Reserved field, set to 0 */ + uint64_t resp_addr; + /* Physical number of completion ring. */ + uint16_t ring_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */ +struct hwrm_ring_cmpl_ring_qaggint_params_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint16_t flags; /* - * When set to 1, the parallel detection is used to determine - * the speed of the link partner. Parallel detection is used - * when a autonegotiation capable device is connected to a link - * parter that is not capable of autonegotiation. + * When this bit is set to '1', interrupt max + * timer is reset whenever a completion is received. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PARALLEL_DETECT UINT32_C(0x1) - /* Reserved field, set to 0 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_RESERVED_MASK UINT32_C(0xfe) - #define HWRM_PORT_PHY_QCFG_OUTPUT_RESERVED_SFT 1 - uint16_t link_partner_adv_speeds; + #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS_OUTPUT_FLAGS_TIMER_RESET \ + UINT32_C(0x1) /* - * The advertised speeds for the port by the link partner. Each - * advertised speed will be set to '1'. + * When this bit is set to '1', ring idle mode + * aggregation will be enabled. */ - /* 100Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MBHD \ - UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MB \ + #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS_OUTPUT_FLAGS_RING_IDLE \ UINT32_C(0x2) - /* 1Gb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GBHD \ - UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GB \ - UINT32_C(0x8) - /* 2Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2GB \ - UINT32_C(0x10) - /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2_5GB \ - UINT32_C(0x20) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10GB \ - UINT32_C(0x40) - /* 20Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_20GB \ - UINT32_C(0x80) - /* 25Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_25GB \ - UINT32_C(0x100) - /* 40Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_40GB \ - UINT32_C(0x200) - /* 50Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_50GB \ - UINT32_C(0x400) - /* 100Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100GB \ - UINT32_C(0x800) - /* 10Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MBHD \ - UINT32_C(0x1000) - /* 10Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MB \ - UINT32_C(0x2000) - uint8_t link_partner_adv_auto_mode; /* - * The advertised autoneg for the port by the link partner. This - * field is deprecated and should be set to 0. + * Number of completions to aggregate before DMA + * during the normal mode. */ + uint16_t num_cmpl_dma_aggr; /* - * Disable autoneg or autoneg disabled. No - * speeds are selected. + * Number of completions to aggregate before DMA + * during the interrupt mode. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_NONE \ - UINT32_C(0x0) - /* Select all possible speeds for autoneg mode. */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS \ - UINT32_C(0x1) + uint16_t num_cmpl_dma_aggr_during_int; /* - * Select only the auto_link_speed speed for - * autoneg mode. This mode has been DEPRECATED. - * An HWRM client should not use this mode. + * Timer in unit of 80-nsec used to aggregate completions before + * DMA during the normal mode (not in interrupt mode). */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \ - UINT32_C(0x2) + uint16_t cmpl_aggr_dma_tmr; /* - * Select the auto_link_speed or any speed below - * that speed for autoneg. This mode has been - * DEPRECATED. An HWRM client should not use - * this mode. + * Timer in unit of 80-nsec used to aggregate completions before + * DMA during the interrupt mode. */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW \ - UINT32_C(0x3) + uint16_t cmpl_aggr_dma_tmr_during_int; + /* Minimum time (in unit of 80-nsec) between two interrupts. */ + uint16_t int_lat_tmr_min; /* - * Select the speeds based on the corresponding - * link speed mask value that is provided. + * Maximum wait time (in unit of 80-nsec) spent aggregating + * completions before signaling the interrupt after the + * interrupt is enabled. */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK \ - UINT32_C(0x4) - uint8_t link_partner_adv_pause; - /* The advertised pause settings on the port by the link partner. */ + uint16_t int_lat_tmr_max; /* - * When this bit is '1', Generation of tx pause messages is - * supported. Disabled otherwise. + * Minimum number of completions aggregated before signaling + * an interrupt. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_TX \ - UINT32_C(0x1) + uint16_t num_cmpl_aggr_int; + uint8_t unused_0[7]; /* - * When this bit is '1', Reception of rx pause messages is - * supported. Disabled otherwise. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_RX \ - UINT32_C(0x2) - uint16_t adv_eee_link_speed_mask; + uint8_t valid; +} __attribute__((packed)); + +/***************************************** + * hwrm_ring_cmpl_ring_cfg_aggint_params * + *****************************************/ + + +/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */ +struct hwrm_ring_cmpl_ring_cfg_aggint_params_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Current setting for link speed mask that is used to advertise - * speeds during autonegotiation when EEE is enabled. This field - * is valid only when eee_enabled flags is set to 1. The speeds - * specified in this field shall be a subset of speeds specified - * in auto_link_speed_mask. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - /* Reserved */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD1 \ - UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_100MB \ - UINT32_C(0x2) - /* Reserved */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD2 \ - UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_1GB \ - UINT32_C(0x8) - /* Reserved */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD3 \ - UINT32_C(0x10) - /* Reserved */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD4 \ - UINT32_C(0x20) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_10GB \ - UINT32_C(0x40) - uint16_t link_partner_adv_eee_link_speed_mask; + uint16_t cmpl_ring; /* - * Current setting for link speed mask that is advertised by the - * link partner when EEE is enabled. This field is valid only - * when eee_enabled flags is set to 1. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - /* Reserved */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 \ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Physical number of completion ring. */ + uint16_t ring_id; + uint16_t flags; + /* + * When this bit is set to '1', interrupt latency max + * timer is reset whenever a completion is received. + */ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET \ UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB \ + /* + * When this bit is set to '1', ring idle mode + * aggregation will be enabled. + */ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE \ UINT32_C(0x2) - /* Reserved */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 \ + /* + * Set this flag to 1 when configuring parameters on a + * notification queue. Set this flag to 0 when configuring + * parameters on a completion queue. + */ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_IS_NQ \ UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB \ - UINT32_C(0x8) - /* Reserved */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 \ - UINT32_C(0x10) - /* Reserved */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 \ - UINT32_C(0x20) - /* 10Gb link speed */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB \ - UINT32_C(0x40) - uint32_t xcvr_identifier_type_tx_lpi_timer; - /* This value represents transceiver identifier type. */ /* - * Current setting of TX LPI timer in microseconds. This field - * is valid only when_eee_enabled flag is set to 1 and - * tx_lpi_enabled is set to 1. + * Number of completions to aggregate before DMA + * during the normal mode. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff) - #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_SFT 0 - /* This value represents transceiver identifier type. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_MASK \ - UINT32_C(0xff000000) - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFT 24 - /* Unknown */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN \ - (UINT32_C(0x0) << 24) - /* SFP/SFP+/SFP28 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP \ - (UINT32_C(0x3) << 24) - /* QSFP */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP \ - (UINT32_C(0xc) << 24) - /* QSFP+ */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPPLUS \ - (UINT32_C(0xd) << 24) - /* QSFP28 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 \ - (UINT32_C(0x11) << 24) - uint16_t fec_cfg; + uint16_t num_cmpl_dma_aggr; + /* + * Number of completions to aggregate before DMA + * during the interrupt mode. + */ + uint16_t num_cmpl_dma_aggr_during_int; + /* + * Timer in unit of 80-nsec used to aggregate completions before + * DMA during the normal mode (not in interrupt mode). + */ + uint16_t cmpl_aggr_dma_tmr; + /* + * Timer in unit of 80-nsec used to aggregate completions before + * DMA during the interrupt mode. + */ + uint16_t cmpl_aggr_dma_tmr_during_int; + /* Minimum time (in unit of 80-nsec) between two interrupts. */ + uint16_t int_lat_tmr_min; + /* + * Maximum wait time (in unit of 80-nsec) spent aggregating + * cmpls before signaling the interrupt after the + * interrupt is enabled. + */ + uint16_t int_lat_tmr_max; + /* + * Minimum number of completions aggregated before signaling + * an interrupt. + */ + uint16_t num_cmpl_aggr_int; /* - * This value represents the current configuration of Forward - * Error Correction (FEC) on the port. + * Bitfield that indicates which parameters are to be applied. Only + * required when configuring devices with notification queues, and + * used in that case to set certain parameters on completion queues + * and others on notification queues. */ + uint16_t enables; /* - * When set to 1, then FEC is not supported on this port. If - * this flag is set to 1, then all other FEC configuration flags - * shall be ignored. When set to 0, then FEC is supported as - * indicated by other configuration flags. If no cable is - * attached and the HWRM does not yet know the FEC capability, - * then the HWRM shall set this flag to 1 when reporting FEC - * capability. + * This bit must be '1' for the num_cmpl_dma_aggr field to be + * configured. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED \ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR \ UINT32_C(0x1) /* - * When set to 1, then FEC autonegotiation is supported on this - * port. When set to 0, then FEC autonegotiation is not - * supported on this port. + * This bit must be '1' for the num_cmpl_dma_aggr_during_int field to be + * configured. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_SUPPORTED \ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT \ UINT32_C(0x2) /* - * When set to 1, then FEC autonegotiation is enabled on this - * port. When set to 0, then FEC autonegotiation is disabled if - * supported. This flag should be ignored if FEC autonegotiation - * is not supported on this port. + * This bit must be '1' for the cmpl_aggr_dma_tmr field to be + * configured. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_ENABLED \ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR \ UINT32_C(0x4) /* - * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on - * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is - * not supported on this port. + * This bit must be '1' for the int_lat_tmr_min field to be + * configured. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_SUPPORTED \ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_INT_LAT_TMR_MIN \ UINT32_C(0x8) /* - * When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on - * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is - * disabled if supported. This flag should be ignored if FEC - * CLAUSE 74 is not supported on this port. + * This bit must be '1' for the int_lat_tmr_max field to be + * configured. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_ENABLED \ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_INT_LAT_TMR_MAX \ UINT32_C(0x10) /* - * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is supported - * on this port. When set to 0, then FEC CLAUSE 91 (Reed - * Solomon) is not supported on this port. + * This bit must be '1' for the num_cmpl_aggr_int field to be + * configured. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_SUPPORTED \ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_AGGR_INT \ UINT32_C(0x20) + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */ +struct hwrm_ring_cmpl_ring_cfg_aggint_params_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is enabled - * on this port. When set to 0, then FEC CLAUSE 91 (Reed - * Solomon) is disabled if supported. This flag should be - * ignored if FEC CLAUSE 91 is not supported on this port. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_ENABLED \ - UINT32_C(0x40) - uint8_t duplex_state; + uint8_t valid; +} __attribute__((packed)); + +/******************* + * hwrm_ring_reset * + *******************/ + + +/* hwrm_ring_reset_input (size:192b/24B) */ +struct hwrm_ring_reset_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value is indicates the duplex of the current connection - * state. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - /* Half Duplex connection. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF UINT32_C(0x0) - /* Full duplex connection. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_FULL UINT32_C(0x1) - uint8_t unused_1; - char phy_vendor_name[16]; + uint16_t cmpl_ring; /* - * Up to 16 bytes of null padded ASCII string representing PHY - * vendor. If the string is set to null, then the vendor name is - * not available. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - char phy_vendor_partnumber[16]; + uint16_t seq_id; /* - * Up to 16 bytes of null padded ASCII string that identifies - * vendor specific part number of the PHY. If the string is set - * to null, then the vendor specific part number is not - * available. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint32_t unused_2; - uint8_t unused_3; - uint8_t unused_4; - uint8_t unused_5; - uint8_t valid; + uint16_t target_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ + uint64_t resp_addr; + /* Ring Type. */ + uint8_t ring_type; + /* L2 Completion Ring (CR) */ + #define HWRM_RING_RESET_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0) + /* TX Ring (TR) */ + #define HWRM_RING_RESET_INPUT_RING_TYPE_TX UINT32_C(0x1) + /* RX Ring (RR) */ + #define HWRM_RING_RESET_INPUT_RING_TYPE_RX UINT32_C(0x2) + /* RoCE Notification Completion Ring (ROCE_CR) */ + #define HWRM_RING_RESET_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3) + #define HWRM_RING_RESET_INPUT_RING_TYPE_LAST \ + HWRM_RING_RESET_INPUT_RING_TYPE_ROCE_CMPL + uint8_t unused_0; + /* Physical number of the ring. */ + uint16_t ring_id; + uint8_t unused_1[4]; } __attribute__((packed)); -/* hwrm_port_qstats */ -/* Description: This function returns per port Ethernet statistics. */ -/* Input (40 bytes) */ -struct hwrm_port_qstats_input { - uint16_t req_type; +/* hwrm_ring_reset_output (size:128b/16B) */ +struct hwrm_ring_reset_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t cmpl_ring; + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_ring_grp_alloc * + ***********************/ + + +/* hwrm_ring_grp_alloc_input (size:192b/24B) */ +struct hwrm_ring_grp_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t cmpl_ring; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t resp_addr; + uint16_t seq_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t port_id; - /* Port ID of port that is being queried. */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2[3]; - uint8_t unused_3; - uint64_t tx_stat_host_addr; - /* This is the host address where Tx port statistics will be stored */ - uint64_t rx_stat_host_addr; - /* This is the host address where Rx port statistics will be stored */ -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_port_qstats_output { - uint16_t error_code; + uint16_t target_id; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint64_t resp_addr; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This value identifies the CR associated with the ring + * group. */ - uint16_t tx_stat_size; - /* The size of TX port statistics block in bytes. */ - uint16_t rx_stat_size; - /* The size of RX port statistics block in bytes. */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; + uint16_t cr; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This value identifies the main RR associated with the ring + * group. */ -} __attribute__((packed)); - -/* hwrm_port_clr_stats */ -/* - * Description: This function clears per port statistics. The HWRM shall not - * allow a VF driver to clear port statistics. The HWRM shall not allow a PF - * driver to clear port statistics in a partitioning mode. The HWRM may allow a - * PF driver to clear port statistics in the non-partitioning mode. - */ -/* Input (24 bytes) */ -struct hwrm_port_clr_stats_input { - uint16_t req_type; + uint16_t rr; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This value identifies the aggregation RR associated with + * the ring group. If this value is 0xFF... (All Fs), then no + * Aggregation ring will be set. */ - uint16_t cmpl_ring; + uint16_t ar; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This value identifies the statistics context associated + * with the ring group. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t sc; +} __attribute__((packed)); + +/* hwrm_ring_grp_alloc_output (size:128b/16B) */ +struct hwrm_ring_grp_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This is the ring group ID value. Use this value to program + * the default ring group for the VNIC or as table entries + * in an RSS/COS context. */ - uint64_t resp_addr; + uint32_t ring_group_id; + uint8_t unused_0[3]; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t port_id; - /* Port ID of port that is being queried. */ - uint16_t unused_0[3]; + uint8_t valid; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_port_clr_stats_output { - uint16_t error_code; +/********************** + * hwrm_ring_grp_free * + **********************/ + + +/* hwrm_ring_grp_free_input (size:192b/24B) */ +struct hwrm_ring_grp_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ + uint64_t resp_addr; + /* This is the ring group ID value. */ + uint32_t ring_group_id; + uint8_t unused_0[4]; } __attribute__((packed)); -/* hwrm_port_led_cfg */ -/* - * Description: This function is used to configure LEDs on a given port. Each - * port has individual set of LEDs associated with it. These LEDs are used for - * speed/link configuration as well as activity indicator configuration. Up to - * three LEDs can be configured, one for activity and two for speeds. - */ -/* Input (64 bytes) */ -struct hwrm_port_led_cfg_input { - uint16_t req_type; +/* hwrm_ring_grp_free_output (size:128b/16B) */ +struct hwrm_ring_grp_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/**************************** + * hwrm_cfa_l2_filter_alloc * + ****************************/ + + +/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */ +struct hwrm_cfa_l2_filter_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH \ + UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x1) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX + /* Setting of this flag indicates the applicability to the loopback path. */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \ + UINT32_C(0x2) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. */ - uint16_t cmpl_ring; + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP \ + UINT32_C(0x4) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * If this flag is set, all t_l2_* fields are invalid + * and they should not be specified. + * If this flag is set, then l2_* fields refer to + * fields of outermost L2 header. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST \ + UINT32_C(0x8) + uint32_t enables; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This bit must be '1' for the l2_addr field to be + * configured. */ - uint64_t resp_addr; + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR \ + UINT32_C(0x1) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This bit must be '1' for the l2_addr_mask field to be + * configured. */ - uint32_t enables; - /* This bit must be '1' for the led0_id field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_ID UINT32_C(0x1) - /* This bit must be '1' for the led0_state field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_STATE UINT32_C(0x2) - /* This bit must be '1' for the led0_color field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_COLOR UINT32_C(0x4) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK \ + UINT32_C(0x2) /* - * This bit must be '1' for the led0_blink_on field to be + * This bit must be '1' for the l2_ovlan field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_ON UINT32_C(0x8) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN \ + UINT32_C(0x4) /* - * This bit must be '1' for the led0_blink_off field to be + * This bit must be '1' for the l2_ovlan_mask field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_OFF UINT32_C(0x10) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK \ + UINT32_C(0x8) /* - * This bit must be '1' for the led0_group_id field to be + * This bit must be '1' for the l2_ivlan field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_GROUP_ID UINT32_C(0x20) - /* This bit must be '1' for the led1_id field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_ID UINT32_C(0x40) - /* This bit must be '1' for the led1_state field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_STATE UINT32_C(0x80) - /* This bit must be '1' for the led1_color field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_COLOR UINT32_C(0x100) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN \ + UINT32_C(0x10) /* - * This bit must be '1' for the led1_blink_on field to be + * This bit must be '1' for the l2_ivlan_mask field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_ON UINT32_C(0x200) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK \ + UINT32_C(0x20) /* - * This bit must be '1' for the led1_blink_off field to be + * This bit must be '1' for the t_l2_addr field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_OFF UINT32_C(0x400) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR \ + UINT32_C(0x40) /* - * This bit must be '1' for the led1_group_id field to be + * This bit must be '1' for the t_l2_addr_mask field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_GROUP_ID UINT32_C(0x800) - /* This bit must be '1' for the led2_id field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_ID UINT32_C(0x1000) - /* This bit must be '1' for the led2_state field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_STATE UINT32_C(0x2000) - /* This bit must be '1' for the led2_color field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_COLOR UINT32_C(0x4000) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK \ + UINT32_C(0x80) /* - * This bit must be '1' for the led2_blink_on field to be + * This bit must be '1' for the t_l2_ovlan field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_ON UINT32_C(0x8000) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN \ + UINT32_C(0x100) /* - * This bit must be '1' for the led2_blink_off field to be + * This bit must be '1' for the t_l2_ovlan_mask field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_OFF UINT32_C(0x10000) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK \ + UINT32_C(0x200) /* - * This bit must be '1' for the led2_group_id field to be + * This bit must be '1' for the t_l2_ivlan field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_GROUP_ID UINT32_C(0x20000) - /* This bit must be '1' for the led3_id field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_ID UINT32_C(0x40000) - /* This bit must be '1' for the led3_state field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_STATE UINT32_C(0x80000) - /* This bit must be '1' for the led3_color field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_COLOR UINT32_C(0x100000) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN \ + UINT32_C(0x400) /* - * This bit must be '1' for the led3_blink_on field to be + * This bit must be '1' for the t_l2_ivlan_mask field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_ON UINT32_C(0x200000) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK \ + UINT32_C(0x800) /* - * This bit must be '1' for the led3_blink_off field to be + * This bit must be '1' for the src_type field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_OFF \ - UINT32_C(0x400000) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE \ + UINT32_C(0x1000) /* - * This bit must be '1' for the led3_group_id field to be + * This bit must be '1' for the src_id field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_GROUP_ID UINT32_C(0x800000) - uint16_t port_id; - /* Port ID of port whose LEDs are configured. */ - uint8_t num_leds; + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID \ + UINT32_C(0x2000) /* - * The number of LEDs that are being configured. Up to 4 LEDs - * can be configured with this command. + * This bit must be '1' for the tunnel_type field to be + * configured. */ - uint8_t rsvd; - /* Reserved field. */ - uint8_t led0_id; - /* An identifier for the LED #0. */ - uint8_t led0_state; - /* The requested state of the LED #0. */ - /* Default state of the LED */ - #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_DEFAULT UINT32_C(0x0) - /* Off */ - #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_OFF UINT32_C(0x1) - /* On */ - #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_ON UINT32_C(0x2) - /* Blink */ - #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINK UINT32_C(0x3) - /* Blink Alternately */ - #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT UINT32_C(0x4) - uint8_t led0_color; - /* The requested color of LED #0. */ - /* Default */ - #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_DEFAULT UINT32_C(0x0) - /* Amber */ - #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_AMBER UINT32_C(0x1) - /* Green */ - #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREEN UINT32_C(0x2) - /* Green or Amber */ - #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3) - uint8_t unused_0; - uint16_t led0_blink_on; + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the dst_id field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID \ + UINT32_C(0x8000) /* - * If the LED #0 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED on - * between cycles. + * This bit must be '1' for the mirror_vnic_id field to be + * configured. */ - uint16_t led0_blink_off; + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + UINT32_C(0x10000) /* - * If the LED #0 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED off - * between cycles. + * This value sets the match value for the L2 MAC address. + * Destination MAC address for RX path. + * Source MAC address for TX path. */ - uint8_t led0_group_id; + uint8_t l2_addr[6]; + uint8_t unused_0[2]; /* - * An identifier for the group of LEDs that LED #0 belongs to. - * If set to 0, then the LED #0 shall not be grouped and shall - * be treated as an individual resource. For all other non-zero - * values of this field, LED #0 shall be grouped together with - * the LEDs with the same group ID value. + * This value sets the mask value for the L2 address. + * A value of 0 will mask the corresponding bit from + * compare. */ - uint8_t rsvd0; - /* Reserved field. */ - uint8_t led1_id; - /* An identifier for the LED #1. */ - uint8_t led1_state; - /* The requested state of the LED #1. */ - /* Default state of the LED */ - #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_DEFAULT UINT32_C(0x0) - /* Off */ - #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_OFF UINT32_C(0x1) - /* On */ - #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_ON UINT32_C(0x2) - /* Blink */ - #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINK UINT32_C(0x3) - /* Blink Alternately */ - #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT UINT32_C(0x4) - uint8_t led1_color; - /* The requested color of LED #1. */ - /* Default */ - #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_DEFAULT UINT32_C(0x0) - /* Amber */ - #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_AMBER UINT32_C(0x1) - /* Green */ - #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREEN UINT32_C(0x2) - /* Green or Amber */ - #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3) - uint8_t unused_1; - uint16_t led1_blink_on; + uint8_t l2_addr_mask[6]; + /* This value sets VLAN ID value for outer VLAN. */ + uint16_t l2_ovlan; /* - * If the LED #1 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED on - * between cycles. + * This value sets the mask value for the ovlan id. + * A value of 0 will mask the corresponding bit from + * compare. */ - uint16_t led1_blink_off; + uint16_t l2_ovlan_mask; + /* This value sets VLAN ID value for inner VLAN. */ + uint16_t l2_ivlan; /* - * If the LED #1 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED off - * between cycles. + * This value sets the mask value for the ivlan id. + * A value of 0 will mask the corresponding bit from + * compare. */ - uint8_t led1_group_id; + uint16_t l2_ivlan_mask; + uint8_t unused_1[2]; /* - * An identifier for the group of LEDs that LED #1 belongs to. - * If set to 0, then the LED #1 shall not be grouped and shall - * be treated as an individual resource. For all other non-zero - * values of this field, LED #1 shall be grouped together with - * the LEDs with the same group ID value. + * This value sets the match value for the tunnel + * L2 MAC address. + * Destination MAC address for RX path. + * Source MAC address for TX path. */ - uint8_t rsvd1; - /* Reserved field. */ - uint8_t led2_id; - /* An identifier for the LED #2. */ - uint8_t led2_state; - /* The requested state of the LED #2. */ - /* Default state of the LED */ - #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_DEFAULT UINT32_C(0x0) - /* Off */ - #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_OFF UINT32_C(0x1) - /* On */ - #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_ON UINT32_C(0x2) - /* Blink */ - #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINK UINT32_C(0x3) - /* Blink Alternately */ - #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT UINT32_C(0x4) - uint8_t led2_color; - /* The requested color of LED #2. */ - /* Default */ - #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_DEFAULT UINT32_C(0x0) - /* Amber */ - #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_AMBER UINT32_C(0x1) - /* Green */ - #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREEN UINT32_C(0x2) - /* Green or Amber */ - #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3) - uint8_t unused_2; - uint16_t led2_blink_on; + uint8_t t_l2_addr[6]; + uint8_t unused_2[2]; /* - * If the LED #2 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED on - * between cycles. + * This value sets the mask value for the tunnel L2 + * address. + * A value of 0 will mask the corresponding bit from + * compare. */ - uint16_t led2_blink_off; + uint8_t t_l2_addr_mask[6]; + /* This value sets VLAN ID value for tunnel outer VLAN. */ + uint16_t t_l2_ovlan; /* - * If the LED #2 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED off - * between cycles. + * This value sets the mask value for the tunnel ovlan id. + * A value of 0 will mask the corresponding bit from + * compare. */ - uint8_t led2_group_id; + uint16_t t_l2_ovlan_mask; + /* This value sets VLAN ID value for tunnel inner VLAN. */ + uint16_t t_l2_ivlan; /* - * An identifier for the group of LEDs that LED #2 belongs to. - * If set to 0, then the LED #2 shall not be grouped and shall - * be treated as an individual resource. For all other non-zero - * values of this field, LED #2 shall be grouped together with - * the LEDs with the same group ID value. + * This value sets the mask value for the tunnel ivlan id. + * A value of 0 will mask the corresponding bit from + * compare. */ - uint8_t rsvd2; - /* Reserved field. */ - uint8_t led3_id; - /* An identifier for the LED #3. */ - uint8_t led3_state; - /* The requested state of the LED #3. */ - /* Default state of the LED */ - #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_DEFAULT UINT32_C(0x0) - /* Off */ - #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_OFF UINT32_C(0x1) - /* On */ - #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_ON UINT32_C(0x2) - /* Blink */ - #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINK UINT32_C(0x3) - /* Blink Alternately */ - #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT UINT32_C(0x4) - uint8_t led3_color; - /* The requested color of LED #3. */ - /* Default */ - #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_DEFAULT UINT32_C(0x0) - /* Amber */ - #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_AMBER UINT32_C(0x1) - /* Green */ - #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREEN UINT32_C(0x2) - /* Green or Amber */ - #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3) - uint8_t unused_3; - uint16_t led3_blink_on; + uint16_t t_l2_ivlan_mask; + /* This value identifies the type of source of the packet. */ + uint8_t src_type; + /* Network port */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_NPORT UINT32_C(0x0) + /* Physical function */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_PF UINT32_C(0x1) + /* Virtual function */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VF UINT32_C(0x2) + /* Virtual NIC of a function */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VNIC UINT32_C(0x3) + /* Embedded processor for CFA management */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_KONG UINT32_C(0x4) + /* Embedded processor for OOB management */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_APE UINT32_C(0x5) + /* Embedded processor for RoCE */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_BONO UINT32_C(0x6) + /* Embedded processor for network proxy functions */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG UINT32_C(0x7) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_LAST \ + HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG + uint8_t unused_3; + /* + * This value is the id of the source. + * For a network port, it represents port_id. + * For a physical function, it represents fid. + * For a virtual function, it represents vf_id. + * For a vnic, it represents vnic_id. + * For embedded processors, this id is not valid. + * + * Notes: + * 1. The function ID is implied if it src_id is + * not provided for a src_type that is either + */ + uint32_t src_id; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Any tunneled traffic */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL + uint8_t unused_4; /* - * If the LED #3 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED on - * between cycles. + * If set, this value shall represent the + * Logical VNIC ID of the destination VNIC for the RX + * path and network port id of the destination port for + * the TX path. */ - uint16_t led3_blink_off; + uint16_t dst_id; /* - * If the LED #3 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED off - * between cycles. + * Logical VNIC ID of the VNIC where traffic is + * mirrored. */ - uint8_t led3_group_id; + uint16_t mirror_vnic_id; /* - * An identifier for the group of LEDs that LED #3 belongs to. - * If set to 0, then the LED #3 shall not be grouped and shall - * be treated as an individual resource. For all other non-zero - * values of this field, LED #3 shall be grouped together with - * the LEDs with the same group ID value. + * This hint is provided to help in placing + * the filter in the filter table. */ - uint8_t rsvd3; - /* Reserved field. */ + uint8_t pri_hint; + /* No preference */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \ + UINT32_C(0x0) + /* Above the given filter */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE_FILTER \ + UINT32_C(0x1) + /* Below the given filter */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER \ + UINT32_C(0x2) + /* As high as possible */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MAX \ + UINT32_C(0x3) + /* As low as possible */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN \ + UINT32_C(0x4) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_LAST \ + HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN + uint8_t unused_5; + uint32_t unused_6; + /* + * This is the ID of the filter that goes along with + * the pri_hint. + * + * This field is valid only for the following values. + * 1 - Above the given filter + * 2 - Below the given filter + */ + uint64_t l2_filter_id_hint; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_port_led_cfg_output { - uint16_t error_code; +/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_l2_filter_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This value identifies a set of CFA data structures used for an L2 + * context. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint64_t l2_filter_id; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This is the ID of the flow associated with this + * filter. + * This value shall be used to match and associate the + * flow identifier returned in completion records. + * A value of 0xFFFFFFFF shall indicate no flow id. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint32_t flow_id; + uint8_t unused_0[3]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_port_led_qcfg */ -/* - * Description: This function is used to query configuration of LEDs on a given - * port. Each port has individual set of LEDs associated with it. These LEDs are - * used for speed/link configuration as well as activity indicator - * configuration. Up to three LEDs can be configured, one for activity and two - * for speeds. - */ -/* Input (24 bytes) */ -struct hwrm_port_led_qcfg_input { - uint16_t req_type; - /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. - */ - uint16_t cmpl_ring; - /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. - */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; +/*************************** + * hwrm_cfa_l2_filter_free * + ***************************/ + + +/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_l2_filter_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint64_t resp_addr; + uint16_t cmpl_ring; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t port_id; - /* Port ID of port whose LED configuration is being queried. */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (56 bytes) */ -struct hwrm_port_led_qcfg_output { - uint16_t error_code; + uint16_t seq_id; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t target_id; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint8_t num_leds; + uint64_t resp_addr; /* - * The number of LEDs that are configured on this port. Up to 4 - * LEDs can be returned in the response. + * This value identifies a set of CFA data structures used for an L2 + * context. */ - uint8_t led0_id; - /* An identifier for the LED #0. */ - uint8_t led0_type; - /* The type of LED #0. */ - /* Speed LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0) - /* Activity LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1) - /* Invalid */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff) - uint8_t led0_state; - /* The current state of the LED #0. */ - /* Default state of the LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT UINT32_C(0x0) - /* Off */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_OFF UINT32_C(0x1) - /* On */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_ON UINT32_C(0x2) - /* Blink */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINK UINT32_C(0x3) - /* Blink Alternately */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT UINT32_C(0x4) - uint8_t led0_color; - /* The color of LED #0. */ - /* Default */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_DEFAULT UINT32_C(0x0) - /* Amber */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_AMBER UINT32_C(0x1) - /* Green */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREEN UINT32_C(0x2) - /* Green or Amber */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3) - uint8_t unused_0; - uint16_t led0_blink_on; + uint64_t l2_filter_id; +} __attribute__((packed)); + +/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_l2_filter_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * If the LED #0 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED on - * between cycles. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t led0_blink_off; + uint8_t valid; +} __attribute__((packed)); + +/************************** + * hwrm_cfa_l2_filter_cfg * + **************************/ + + +/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */ +struct hwrm_cfa_l2_filter_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * If the LED #0 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED off - * between cycles. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint8_t led0_group_id; + uint16_t cmpl_ring; /* - * An identifier for the group of LEDs that LED #0 belongs to. - * If set to 0, then the LED #0 is not grouped. For all other - * non-zero values of this field, LED #0 is grouped together - * with the LEDs with the same group ID value. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint8_t led1_id; - /* An identifier for the LED #1. */ - uint8_t led1_type; - /* The type of LED #1. */ - /* Speed LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0) - /* Activity LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1) - /* Invalid */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff) - uint8_t led1_state; - /* The current state of the LED #1. */ - /* Default state of the LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_DEFAULT UINT32_C(0x0) - /* Off */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_OFF UINT32_C(0x1) - /* On */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_ON UINT32_C(0x2) - /* Blink */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINK UINT32_C(0x3) - /* Blink Alternately */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT UINT32_C(0x4) - uint8_t led1_color; - /* The color of LED #1. */ - /* Default */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_DEFAULT UINT32_C(0x0) - /* Amber */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_AMBER UINT32_C(0x1) - /* Green */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREEN UINT32_C(0x2) - /* Green or Amber */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3) - uint8_t unused_1; - uint16_t led1_blink_on; + uint16_t seq_id; /* - * If the LED #1 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED on - * between cycles. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t led1_blink_off; + uint16_t target_id; /* - * If the LED #1 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED off - * between cycles. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint8_t led1_group_id; + uint64_t resp_addr; + uint32_t flags; /* - * An identifier for the group of LEDs that LED #1 belongs to. - * If set to 0, then the LED #1 is not grouped. For all other - * non-zero values of this field, LED #1 is grouped together - * with the LEDs with the same group ID value. + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. */ - uint8_t led2_id; - /* An identifier for the LED #2. */ - uint8_t led2_type; - /* The type of LED #2. */ - /* Speed LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0) - /* Activity LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1) - /* Invalid */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff) - uint8_t led2_state; - /* The current state of the LED #2. */ - /* Default state of the LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_DEFAULT UINT32_C(0x0) - /* Off */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_OFF UINT32_C(0x1) - /* On */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_ON UINT32_C(0x2) - /* Blink */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINK UINT32_C(0x3) - /* Blink Alternately */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT UINT32_C(0x4) - uint8_t led2_color; - /* The color of LED #2. */ - /* Default */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_DEFAULT UINT32_C(0x0) - /* Amber */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_AMBER UINT32_C(0x1) - /* Green */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREEN UINT32_C(0x2) - /* Green or Amber */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3) - uint8_t unused_2; - uint16_t led2_blink_on; + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX /* - * If the LED #2 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED on - * between cycles. + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. */ - uint16_t led2_blink_off; + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_DROP UINT32_C(0x2) + uint32_t enables; /* - * If the LED #2 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED off - * between cycles. + * This bit must be '1' for the dst_id field to be + * configured. */ - uint8_t led2_group_id; + #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_DST_ID \ + UINT32_C(0x1) /* - * An identifier for the group of LEDs that LED #2 belongs to. - * If set to 0, then the LED #2 is not grouped. For all other - * non-zero values of this field, LED #2 is grouped together - * with the LEDs with the same group ID value. + * This bit must be '1' for the new_mirror_vnic_id field to be + * configured. */ - uint8_t led3_id; - /* An identifier for the LED #3. */ - uint8_t led3_type; - /* The type of LED #3. */ - /* Speed LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0) - /* Activity LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1) - /* Invalid */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff) - uint8_t led3_state; - /* The current state of the LED #3. */ - /* Default state of the LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_DEFAULT UINT32_C(0x0) - /* Off */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_OFF UINT32_C(0x1) - /* On */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_ON UINT32_C(0x2) - /* Blink */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINK UINT32_C(0x3) - /* Blink Alternately */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT UINT32_C(0x4) - uint8_t led3_color; - /* The color of LED #3. */ - /* Default */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_DEFAULT UINT32_C(0x0) - /* Amber */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_AMBER UINT32_C(0x1) - /* Green */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREEN UINT32_C(0x2) - /* Green or Amber */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3) - uint8_t unused_3; - uint16_t led3_blink_on; + #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \ + UINT32_C(0x2) /* - * If the LED #3 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED on - * between cycles. + * This value identifies a set of CFA data structures used for an L2 + * context. */ - uint16_t led3_blink_off; + uint64_t l2_filter_id; /* - * If the LED #3 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED off - * between cycles. + * If set, this value shall represent the + * Logical VNIC ID of the destination VNIC for the RX + * path and network port id of the destination port for + * the TX path. */ - uint8_t led3_group_id; + uint32_t dst_id; /* - * An identifier for the group of LEDs that LED #3 belongs to. - * If set to 0, then the LED #3 is not grouped. For all other - * non-zero values of this field, LED #3 is grouped together - * with the LEDs with the same group ID value. + * New Logical VNIC ID of the VNIC where traffic is + * mirrored. */ - uint8_t unused_4; - uint16_t unused_5; - uint8_t unused_6; - uint8_t unused_7; - uint8_t unused_8; - uint8_t valid; + uint32_t new_mirror_vnic_id; +} __attribute__((packed)); + +/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */ +struct hwrm_cfa_l2_filter_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_port_led_qcaps */ -/* - * Description: This function is used to query capabilities of LEDs on a given - * port. Each port has individual set of LEDs associated with it. These LEDs are - * used for speed/link configuration as well as activity indicator - * configuration. - */ -/* Input (24 bytes) */ -struct hwrm_port_led_qcaps_input { - uint16_t req_type; +/*************************** + * hwrm_cfa_l2_set_rx_mask * + ***************************/ + + +/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */ +struct hwrm_cfa_l2_set_rx_mask_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t port_id; - /* Port ID of port whose LED configuration is being queried. */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (48 bytes) */ -struct hwrm_port_led_qcaps_output { - uint16_t error_code; + uint64_t resp_addr; + /* VNIC ID */ + uint32_t vnic_id; + uint32_t mask; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * When this bit is '1', the function is requested to accept + * multi-cast packets specified by the multicast addr table. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST \ + UINT32_C(0x2) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * When this bit is '1', the function is requested to accept + * all multi-cast packets. */ - uint8_t num_leds; + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST \ + UINT32_C(0x4) /* - * The number of LEDs that are configured on this port. Up to 4 - * LEDs can be returned in the response. + * When this bit is '1', the function is requested to accept + * broadcast packets. */ - uint8_t unused_0[3]; - /* Reserved for future use. */ - uint8_t led0_id; - /* An identifier for the LED #0. */ - uint8_t led0_type; - /* The type of LED #0. */ - /* Speed LED */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0) - /* Activity LED */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1) - /* Invalid */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff) - uint8_t led0_group_id; + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST \ + UINT32_C(0x8) /* - * An identifier for the group of LEDs that LED #0 belongs to. - * If set to 0, then the LED #0 cannot be grouped. For all other - * non-zero values of this field, LED #0 is grouped together - * with the LEDs with the same group ID value. + * When this bit is '1', the function is requested to be + * put in the promiscuous mode. + * + * The HWRM should accept any function to set up + * promiscuous mode. + * + * The HWRM shall follow the semantics below for the + * promiscuous mode support. + * # When partitioning is not enabled on a port + * (i.e. single PF on the port), then the PF shall + * be allowed to be in the promiscuous mode. When the + * PF is in the promiscuous mode, then it shall + * receive all host bound traffic on that port. + * # When partitioning is enabled on a port + * (i.e. multiple PFs per port) and a PF on that + * port is in the promiscuous mode, then the PF + * receives all traffic within that partition as + * identified by a unique identifier for the + * PF (e.g. S-Tag). If a unique outer VLAN + * for the PF is specified, then the setting of + * promiscuous mode on that PF shall result in the + * PF receiving all host bound traffic with matching + * outer VLAN. + * # A VF shall can be set in the promiscuous mode. + * In the promiscuous mode, the VF does not receive any + * traffic unless a unique outer VLAN for the + * VF is specified. If a unique outer VLAN + * for the VF is specified, then the setting of + * promiscuous mode on that VF shall result in the + * VF receiving all host bound traffic with the + * matching outer VLAN. + * # The HWRM shall allow the setting of promiscuous + * mode on a function independently from the + * promiscuous mode settings on other functions. + */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS \ + UINT32_C(0x10) + /* + * If this flag is set, the corresponding RX + * filters shall be set up to cover multicast/broadcast + * filters for the outermost Layer 2 destination MAC + * address field. */ - uint8_t unused_1; - uint16_t led0_state_caps; - /* The states supported by LED #0. */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_OUTERMOST \ + UINT32_C(0x20) /* - * If set to 1, this LED is enabled. If set to 0, this LED is - * disabled. + * If this flag is set, the corresponding RX + * filters shall be set up to cover multicast/broadcast + * filters for the VLAN-tagged packets that match the + * TPID and VID fields of VLAN tags in the VLAN tag + * table specified in this command. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ENABLED UINT32_C(0x1) + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY \ + UINT32_C(0x40) /* - * If set to 1, off state is supported on this LED. If set to 0, - * off state is not supported on this LED. + * If this flag is set, the corresponding RX + * filters shall be set up to cover multicast/broadcast + * filters for non-VLAN tagged packets and VLAN-tagged + * packets that match the TPID and VID fields of VLAN + * tags in the VLAN tag table specified in this command. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_OFF_SUPPORTED \ - UINT32_C(0x2) + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN \ + UINT32_C(0x80) /* - * If set to 1, on state is supported on this LED. If set to 0, - * on state is not supported on this LED. + * If this flag is set, the corresponding RX + * filters shall be set up to cover multicast/broadcast + * filters for non-VLAN tagged packets and VLAN-tagged + * packets matching any VLAN tag. + * + * If this flag is set, then the HWRM shall ignore + * VLAN tags specified in vlan_tag_tbl. + * + * If none of vlanonly, vlan_nonvlan, and anyvlan_nonvlan + * flags is set, then the HWRM shall ignore + * VLAN tags specified in vlan_tag_tbl. + * + * The HWRM client shall set at most one flag out of + * vlanonly, vlan_nonvlan, and anyvlan_nonvlan. + */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN \ + UINT32_C(0x100) + /* This is the address for mcast address tbl. */ + uint64_t mc_tbl_addr; + /* + * This value indicates how many entries in mc_tbl are valid. + * Each entry is 6 bytes. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ON_SUPPORTED \ - UINT32_C(0x4) + uint32_t num_mc_entries; + uint8_t unused_0[4]; /* - * If set to 1, blink state is supported on this LED. If set to - * 0, blink state is not supported on this LED. + * This is the address for VLAN tag table. + * Each VLAN entry in the table is 4 bytes of a VLAN tag + * including TPID, PCP, DEI, and VID fields in network byte + * order. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_SUPPORTED \ - UINT32_C(0x8) + uint64_t vlan_tag_tbl_addr; /* - * If set to 1, blink_alt state is supported on this LED. If set - * to 0, blink_alt state is not supported on this LED. + * This value indicates how many entries in vlan_tag_tbl are + * valid. Each entry is 4 bytes. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED \ - UINT32_C(0x10) - uint16_t led0_color_caps; - /* The colors supported by LED #0. */ - /* reserved */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_RSVD UINT32_C(0x1) + uint32_t num_vlan_tags; + uint8_t unused_1[4]; +} __attribute__((packed)); + +/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */ +struct hwrm_cfa_l2_set_rx_mask_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * If set to 1, Amber color is supported on this LED. If set to - * 0, Amber color is not supported on this LED. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_AMBER_SUPPORTED \ - UINT32_C(0x2) + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */ +struct hwrm_cfa_l2_set_rx_mask_cmd_err { /* - * If set to 1, Green color is supported on this LED. If set to - * 0, Green color is not supported on this LED. + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_GREEN_SUPPORTED \ - UINT32_C(0x4) - uint8_t led1_id; - /* An identifier for the LED #1. */ - uint8_t led1_type; - /* The type of LED #1. */ - /* Speed LED */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0) - /* Activity LED */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1) - /* Invalid */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff) - uint8_t led1_group_id; + uint8_t code; + /* Unknown error */ + #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN \ + UINT32_C(0x0) + /* Unable to complete operation due to conflict with Ntuple Filter */ + #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR \ + UINT32_C(0x1) + #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST \ + HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR + uint8_t unused_0[7]; +} __attribute__((packed)); + +/******************************* + * hwrm_cfa_vlan_antispoof_cfg * + *******************************/ + + +/* hwrm_cfa_vlan_antispoof_cfg_input (size:256b/32B) */ +struct hwrm_cfa_vlan_antispoof_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * An identifier for the group of LEDs that LED #1 belongs to. - * If set to 0, then the LED #0 cannot be grouped. For all other - * non-zero values of this field, LED #0 is grouped together - * with the LEDs with the same group ID value. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint8_t unused_2; - uint16_t led1_state_caps; - /* The states supported by LED #1. */ + uint16_t cmpl_ring; /* - * If set to 1, this LED is enabled. If set to 0, this LED is - * disabled. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ENABLED UINT32_C(0x1) + uint16_t seq_id; /* - * If set to 1, off state is supported on this LED. If set to 0, - * off state is not supported on this LED. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_OFF_SUPPORTED \ - UINT32_C(0x2) + uint16_t target_id; /* - * If set to 1, on state is supported on this LED. If set to 0, - * on state is not supported on this LED. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ON_SUPPORTED \ - UINT32_C(0x4) + uint64_t resp_addr; /* - * If set to 1, blink state is supported on this LED. If set to - * 0, blink state is not supported on this LED. + * Function ID of the function that is being configured. + * Only valid for a VF FID configured by the PF. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_SUPPORTED \ - UINT32_C(0x8) + uint16_t fid; + uint8_t unused_0[2]; + /* Number of VLAN entries in the vlan_tag_mask_tbl. */ + uint32_t num_vlan_entries; /* - * If set to 1, blink_alt state is supported on this LED. If set - * to 0, blink_alt state is not supported on this LED. + * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN + * antispoof table. Each table entry contains the 16-bit TPID + * (0x8100 or 0x88a8 only), 16-bit VLAN ID, and a 16-bit mask, + * all in network order to match hwrm_cfa_l2_set_rx_mask. + * For an individual VLAN entry, the mask value should be 0xfff + * for the 12-bit VLAN ID. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED \ - UINT32_C(0x10) - uint16_t led1_color_caps; - /* The colors supported by LED #1. */ - /* reserved */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_RSVD UINT32_C(0x1) + uint64_t vlan_tag_mask_tbl_addr; +} __attribute__((packed)); + +/* hwrm_cfa_vlan_antispoof_cfg_output (size:128b/16B) */ +struct hwrm_cfa_vlan_antispoof_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * If set to 1, Amber color is supported on this LED. If set to - * 0, Amber color is not supported on this LED. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_AMBER_SUPPORTED \ - UINT32_C(0x2) + uint8_t valid; +} __attribute__((packed)); + +/******************************** + * hwrm_cfa_vlan_antispoof_qcfg * + ********************************/ + + +/* hwrm_cfa_vlan_antispoof_qcfg_input (size:256b/32B) */ +struct hwrm_cfa_vlan_antispoof_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * If set to 1, Green color is supported on this LED. If set to - * 0, Green color is not supported on this LED. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_GREEN_SUPPORTED \ - UINT32_C(0x4) - uint8_t led2_id; - /* An identifier for the LED #2. */ - uint8_t led2_type; - /* The type of LED #2. */ - /* Speed LED */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0) - /* Activity LED */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1) - /* Invalid */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff) - uint8_t led2_group_id; + uint16_t cmpl_ring; /* - * An identifier for the group of LEDs that LED #0 belongs to. - * If set to 0, then the LED #0 cannot be grouped. For all other - * non-zero values of this field, LED #0 is grouped together - * with the LEDs with the same group ID value. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint8_t unused_3; - uint16_t led2_state_caps; - /* The states supported by LED #2. */ + uint16_t seq_id; /* - * If set to 1, this LED is enabled. If set to 0, this LED is - * disabled. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ENABLED UINT32_C(0x1) + uint16_t target_id; /* - * If set to 1, off state is supported on this LED. If set to 0, - * off state is not supported on this LED. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_OFF_SUPPORTED \ - UINT32_C(0x2) + uint64_t resp_addr; /* - * If set to 1, on state is supported on this LED. If set to 0, - * on state is not supported on this LED. + * Function ID of the function that is being queried. + * Only valid for a VF FID queried by the PF. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ON_SUPPORTED \ - UINT32_C(0x4) + uint16_t fid; + uint8_t unused_0[2]; /* - * If set to 1, blink state is supported on this LED. If set to - * 0, blink state is not supported on this LED. + * Maximum number of VLAN entries the firmware is allowed to DMA + * to vlan_tag_mask_tbl. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_SUPPORTED \ - UINT32_C(0x8) + uint32_t max_vlan_entries; /* - * If set to 1, blink_alt state is supported on this LED. If set - * to 0, blink_alt state is not supported on this LED. + * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN + * antispoof table to which firmware will DMA to. Each table + * entry will contain the 16-bit TPID (0x8100 or 0x88a8 only), + * 16-bit VLAN ID, and a 16-bit mask, all in network order to + * match hwrm_cfa_l2_set_rx_mask. For an individual VLAN entry, + * the mask value should be 0xfff for the 12-bit VLAN ID. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED \ - UINT32_C(0x10) - uint16_t led2_color_caps; - /* The colors supported by LED #2. */ - /* reserved */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_RSVD UINT32_C(0x1) + uint64_t vlan_tag_mask_tbl_addr; +} __attribute__((packed)); + +/* hwrm_cfa_vlan_antispoof_qcfg_output (size:128b/16B) */ +struct hwrm_cfa_vlan_antispoof_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Number of valid entries DMAd by firmware to vlan_tag_mask_tbl. */ + uint32_t num_vlan_entries; + uint8_t unused_0[3]; /* - * If set to 1, Amber color is supported on this LED. If set to - * 0, Amber color is not supported on this LED. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_AMBER_SUPPORTED \ - UINT32_C(0x2) + uint8_t valid; +} __attribute__((packed)); + +/******************************** + * hwrm_cfa_tunnel_filter_alloc * + ********************************/ + + +/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */ +struct hwrm_cfa_tunnel_filter_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * If set to 1, Green color is supported on this LED. If set to - * 0, Green color is not supported on this LED. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_GREEN_SUPPORTED \ - UINT32_C(0x4) - uint8_t led3_id; - /* An identifier for the LED #3. */ - uint8_t led3_type; - /* The type of LED #3. */ - /* Speed LED */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0) - /* Activity LED */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1) - /* Invalid */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff) - uint8_t led3_group_id; + uint16_t cmpl_ring; /* - * An identifier for the group of LEDs that LED #3 belongs to. - * If set to 0, then the LED #0 cannot be grouped. For all other - * non-zero values of this field, LED #0 is grouped together - * with the LEDs with the same group ID value. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint8_t unused_4; - uint16_t led3_state_caps; - /* The states supported by LED #3. */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* Setting of this flag indicates the applicability to the loopback path. */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \ + UINT32_C(0x1) + uint32_t enables; /* - * If set to 1, this LED is enabled. If set to 0, this LED is - * disabled. + * This bit must be '1' for the l2_filter_id field to be + * configured. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ENABLED UINT32_C(0x1) + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \ + UINT32_C(0x1) /* - * If set to 1, off state is supported on this LED. If set to 0, - * off state is not supported on this LED. + * This bit must be '1' for the l2_addr field to be + * configured. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_OFF_SUPPORTED \ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR \ UINT32_C(0x2) /* - * If set to 1, on state is supported on this LED. If set to 0, - * on state is not supported on this LED. + * This bit must be '1' for the l2_ivlan field to be + * configured. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ON_SUPPORTED \ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN \ UINT32_C(0x4) /* - * If set to 1, blink state is supported on this LED. If set to - * 0, blink state is not supported on this LED. + * This bit must be '1' for the l3_addr field to be + * configured. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_SUPPORTED \ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L3_ADDR \ UINT32_C(0x8) /* - * If set to 1, blink_alt state is supported on this LED. If set - * to 0, blink_alt state is not supported on this LED. + * This bit must be '1' for the l3_addr_type field to be + * configured. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED \ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L3_ADDR_TYPE \ UINT32_C(0x10) - uint16_t led3_color_caps; - /* The colors supported by LED #3. */ - /* reserved */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_RSVD UINT32_C(0x1) /* - * If set to 1, Amber color is supported on this LED. If set to - * 0, Amber color is not supported on this LED. + * This bit must be '1' for the t_l3_addr_type field to be + * configured. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_AMBER_SUPPORTED \ - UINT32_C(0x2) + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_T_L3_ADDR_TYPE \ + UINT32_C(0x20) /* - * If set to 1, Green color is supported on this LED. If set to - * 0, Green color is not supported on this LED. + * This bit must be '1' for the t_l3_addr field to be + * configured. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_GREEN_SUPPORTED \ - UINT32_C(0x4) - uint8_t unused_5; - uint8_t unused_6; - uint8_t unused_7; - uint8_t valid; + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_T_L3_ADDR \ + UINT32_C(0x40) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This bit must be '1' for the tunnel_type field to be + * configured. */ -} __attribute__((packed)); - -/* hwrm_queue_qportcfg */ -/* - * Description: This function is called by a driver to query queue configuration - * of a port. # The HWRM shall at least advertise one queue with lossy service - * profile. # The driver shall use this command to query queue ids before - * configuring or using any queues. # If a service profile is not set for a - * queue, then the driver shall not use that queue without configuring a service - * profile for it. # If the driver is not allowed to configure service profiles, - * then the driver shall only use queues for which service profiles are pre- - * configured. - */ -/* Input (24 bytes) */ -struct hwrm_queue_qportcfg_input { - uint16_t req_type; + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ + UINT32_C(0x80) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This bit must be '1' for the vni field to be + * configured. */ - uint16_t cmpl_ring; + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_VNI \ + UINT32_C(0x100) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This bit must be '1' for the dst_vnic_id field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_DST_VNIC_ID \ + UINT32_C(0x200) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This bit must be '1' for the mirror_vnic_id field to be + * configured. */ - uint64_t resp_addr; + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + UINT32_C(0x400) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This value identifies a set of CFA data structures used for an L2 + * context. */ - uint32_t flags; + uint64_t l2_filter_id; /* - * Enumeration denoting the RX, TX type of the resource. This - * enumeration is used for resources that are similar for both - * TX and RX paths of the chip. + * This value sets the match value for the inner L2 + * MAC address. + * Destination MAC address for RX path. + * Source MAC address for TX path. */ - #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH UINT32_C(0x1) - /* tx path */ - #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) - /* rx path */ - #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) - #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_LAST \ - QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX - uint16_t port_id; + uint8_t l2_addr[6]; /* - * Port ID of port for which the queue configuration is being - * queried. This field is only required when sent by IPC. + * This value sets VLAN ID value for inner VLAN. + * Only 12-bits of VLAN ID are used in setting the filter. */ - uint16_t unused_0; -} __attribute__((packed)); - -/* Output (32 bytes) */ -struct hwrm_queue_qportcfg_output { - uint16_t error_code; + uint16_t l2_ivlan; + /* + * The value of inner destination IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. + */ + uint32_t l3_addr[4]; + /* + * The value of tunnel destination IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. + */ + uint32_t t_l3_addr[4]; + /* + * This value indicates the type of inner IP address. + * 4 - IPv4 + * 6 - IPv6 + * All others are invalid. + */ + uint8_t l3_addr_type; + /* + * This value indicates the type of tunnel IP address. + * 4 - IPv4 + * 6 - IPv6 + * All others are invalid. + */ + uint8_t t_l3_addr_type; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Any tunneled traffic */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * tunnel_flags allows the user to indicate the tunnel tag detection + * for the tunnel type specified in tunnel_type. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint8_t tunnel_flags; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * If the tunnel_type is geneve, then this bit indicates if we + * need to match the geneve OAM packet. + * If the tunnel_type is nvgre or gre, then this bit indicates if + * we need to detect checksum present bit in geneve header. + * If the tunnel_type is mpls, then this bit indicates if we need + * to match mpls packet with explicit IPV4/IPV6 null header. */ - uint8_t max_configurable_queues; + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR \ + UINT32_C(0x1) /* - * The maximum number of queues that can be configured on this - * port. Valid values range from 1 through 8. + * If the tunnel_type is geneve, then this bit indicates if we + * need to detect the critical option bit set in the oam packet. + * If the tunnel_type is nvgre or gre, then this bit indicates + * if we need to match nvgre packets with key present bit set in + * gre header. + * If the tunnel_type is mpls, then this bit indicates if we + * need to match mpls packet with S bit from inner/second label. */ - uint8_t max_configurable_lossless_queues; + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 \ + UINT32_C(0x2) /* - * The maximum number of lossless queues that can be configured - * on this port. Valid values range from 0 through 8. + * If the tunnel_type is geneve, then this bit indicates if we + * need to match geneve packet with extended header bit set in + * geneve header. + * If the tunnel_type is nvgre or gre, then this bit indicates + * if we need to match nvgre packets with sequence number + * present bit set in gre header. + * If the tunnel_type is mpls, then this bit indicates if we + * need to match mpls packet with S bit from out/first label. */ - uint8_t queue_cfg_allowed; + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 \ + UINT32_C(0x4) /* - * Bitmask indicating which queues can be configured by the - * hwrm_queue_cfg command. Each bit represents a specific queue - * where bit 0 represents queue 0 and bit 7 represents queue 7. - * # A value of 0 indicates that the queue is not configurable - * by the hwrm_queue_cfg command. # A value of 1 indicates that - * the queue is configurable. # A hwrm_queue_cfg command shall - * return error when trying to configure a queue not - * configurable. + * Virtual Network Identifier (VNI). Only valid with + * tunnel_types VXLAN, NVGRE, and Geneve. + * Only lower 24-bits of VNI field are used + * in setting up the filter. */ - uint8_t queue_cfg_info; - /* Information about queue configuration. */ + uint32_t vni; + /* Logical VNIC ID of the destination VNIC. */ + uint32_t dst_vnic_id; /* - * If this flag is set to '1', then the queues are configured - * asymmetrically on TX and RX sides. If this flag is set to - * '0', then the queues are configured symmetrically on TX and - * RX sides. For symmetric configuration, the queue - * configuration including queue ids and service profiles on the - * TX side is the same as the corresponding queue configuration - * on the RX side. + * Logical VNIC ID of the VNIC where traffic is + * mirrored. */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG UINT32_C(0x1) - uint8_t queue_pfcenable_cfg_allowed; + uint32_t mirror_vnic_id; +} __attribute__((packed)); + +/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_tunnel_filter_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value is an opaque id into CFA data structures. */ + uint64_t tunnel_filter_id; /* - * Bitmask indicating which queues can be configured by the - * hwrm_queue_pfcenable_cfg command. Each bit represents a - * specific priority where bit 0 represents priority 0 and bit 7 - * represents priority 7. # A value of 0 indicates that the - * priority is not configurable by the hwrm_queue_pfcenable_cfg - * command. # A value of 1 indicates that the priority is - * configurable. # A hwrm_queue_pfcenable_cfg command shall - * return error when trying to configure a priority that is not - * configurable. + * This is the ID of the flow associated with this + * filter. + * This value shall be used to match and associate the + * flow identifier returned in completion records. + * A value of 0xFFFFFFFF shall indicate no flow id. */ - uint8_t queue_pri2cos_cfg_allowed; + uint32_t flow_id; + uint8_t unused_0[3]; /* - * Bitmask indicating which queues can be configured by the - * hwrm_queue_pri2cos_cfg command. Each bit represents a - * specific queue where bit 0 represents queue 0 and bit 7 - * represents queue 7. # A value of 0 indicates that the queue - * is not configurable by the hwrm_queue_pri2cos_cfg command. # - * A value of 1 indicates that the queue is configurable. # A - * hwrm_queue_pri2cos_cfg command shall return error when trying - * to configure a queue that is not configurable. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint8_t queue_cos2bw_cfg_allowed; + uint8_t valid; +} __attribute__((packed)); + +/******************************* + * hwrm_cfa_tunnel_filter_free * + *******************************/ + + +/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_tunnel_filter_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Bitmask indicating which queues can be configured by the - * hwrm_queue_pri2cos_cfg command. Each bit represents a - * specific queue where bit 0 represents queue 0 and bit 7 - * represents queue 7. # A value of 0 indicates that the queue - * is not configurable by the hwrm_queue_pri2cos_cfg command. # - * A value of 1 indicates that the queue is configurable. # A - * hwrm_queue_pri2cos_cfg command shall return error when trying - * to configure a queue not configurable. - */ - uint8_t queue_id0; - /* - * ID of CoS Queue 0. FF - Invalid id # This ID can be used on - * any subsequent call to an hwrm command that takes a queue id. - * # IDs must always be queried by this command before any use - * by the driver or software. # Any driver or software should - * not make any assumptions about queue IDs. # A value of 0xff - * indicates that the queue is not available. # Available queues - * may not be in sequential order. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint8_t queue_id0_service_profile; - /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY \ - UINT32_C(0x0) - /* Lossless */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS \ - UINT32_C(0x1) + uint16_t cmpl_ring; /* - * Set to 0xFF... (All Fs) if there is no - * service profile specified + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN \ - UINT32_C(0xff) - uint8_t queue_id1; + uint16_t seq_id; /* - * ID of CoS Queue 1. FF - Invalid id # This ID can be used on - * any subsequent call to an hwrm command that takes a queue id. - * # IDs must always be queried by this command before any use - * by the driver or software. # Any driver or software should - * not make any assumptions about queue IDs. # A value of 0xff - * indicates that the queue is not available. # Available queues - * may not be in sequential order. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint8_t queue_id1_service_profile; - /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY \ - UINT32_C(0x0) - /* Lossless */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS \ - UINT32_C(0x1) + uint16_t target_id; /* - * Set to 0xFF... (All Fs) if there is no - * service profile specified + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN \ - UINT32_C(0xff) - uint8_t queue_id2; + uint64_t resp_addr; + /* This value is an opaque id into CFA data structures. */ + uint64_t tunnel_filter_id; +} __attribute__((packed)); + +/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_tunnel_filter_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * ID of CoS Queue 2. FF - Invalid id # This ID can be used on - * any subsequent call to an hwrm command that takes a queue id. - * # IDs must always be queried by this command before any use - * by the driver or software. # Any driver or software should - * not make any assumptions about queue IDs. # A value of 0xff - * indicates that the queue is not available. # Available queues - * may not be in sequential order. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint8_t queue_id2_service_profile; - /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY \ - UINT32_C(0x0) - /* Lossless */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS \ - UINT32_C(0x1) + uint8_t valid; +} __attribute__((packed)); + +/*************************************** + * hwrm_cfa_redirect_tunnel_type_alloc * + ***************************************/ + + +/* hwrm_cfa_redirect_tunnel_type_alloc_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Set to 0xFF... (All Fs) if there is no - * service profile specified + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN \ - UINT32_C(0xff) - uint8_t queue_id3; + uint16_t cmpl_ring; /* - * ID of CoS Queue 3. FF - Invalid id # This ID can be used on - * any subsequent call to an hwrm command that takes a queue id. - * # IDs must always be queried by this command before any use - * by the driver or software. # Any driver or software should - * not make any assumptions about queue IDs. # A value of 0xff - * indicates that the queue is not available. # Available queues - * may not be in sequential order. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint8_t queue_id3_service_profile; - /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY \ - UINT32_C(0x0) - /* Lossless */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS \ - UINT32_C(0x1) + uint16_t seq_id; /* - * Set to 0xFF... (All Fs) if there is no - * service profile specified + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN \ - UINT32_C(0xff) - uint8_t queue_id4; + uint16_t target_id; /* - * ID of CoS Queue 4. FF - Invalid id # This ID can be used on - * any subsequent call to an hwrm command that takes a queue id. - * # IDs must always be queried by this command before any use - * by the driver or software. # Any driver or software should - * not make any assumptions about queue IDs. # A value of 0xff - * indicates that the queue is not available. # Available queues - * may not be in sequential order. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint8_t queue_id4_service_profile; - /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY \ + uint64_t resp_addr; + /* The destination function id, to whom the traffic is redirected. */ + uint16_t dest_fid; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ UINT32_C(0x0) - /* Lossless */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS \ + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ UINT32_C(0x1) - /* - * Set to 0xFF... (All Fs) if there is no - * service profile specified - */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN \ + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Any tunneled traffic */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ UINT32_C(0xff) - uint8_t queue_id5; + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL + /* Tunnel alloc flags. */ + uint8_t flags; + /* Setting of this flag indicates modify existing redirect tunnel to new destination function ID. */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_FLAGS_MODIFY_DST \ + UINT32_C(0x1) + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_cfa_redirect_tunnel_type_alloc_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * ID of CoS Queue 5. FF - Invalid id # This ID can be used on - * any subsequent call to an hwrm command that takes a queue id. - * # IDs must always be queried by this command before any use - * by the driver or software. # Any driver or software should - * not make any assumptions about queue IDs. # A value of 0xff - * indicates that the queue is not available. # Available queues - * may not be in sequential order. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint8_t queue_id5_service_profile; - /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY \ - UINT32_C(0x0) - /* Lossless */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS \ - UINT32_C(0x1) + uint8_t valid; +} __attribute__((packed)); + +/************************************** + * hwrm_cfa_redirect_tunnel_type_free * + **************************************/ + + +/* hwrm_cfa_redirect_tunnel_type_free_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Set to 0xFF... (All Fs) if there is no - * service profile specified + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN \ - UINT32_C(0xff) - uint8_t queue_id6; + uint16_t cmpl_ring; /* - * ID of CoS Queue 6. FF - Invalid id # This ID can be used on - * any subsequent call to an hwrm command that takes a queue id. - * # IDs must always be queried by this command before any use - * by the driver or software. # Any driver or software should - * not make any assumptions about queue IDs. # A value of 0xff - * indicates that the queue is not available. # Available queues - * may not be in sequential order. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint8_t queue_id6_service_profile; - /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY \ - UINT32_C(0x0) - /* Lossless */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS \ - UINT32_C(0x1) + uint16_t seq_id; /* - * Set to 0xFF... (All Fs) if there is no - * service profile specified + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN \ - UINT32_C(0xff) - uint8_t queue_id7; + uint16_t target_id; /* - * ID of CoS Queue 7. FF - Invalid id # This ID can be used on - * any subsequent call to an hwrm command that takes a queue id. - * # IDs must always be queried by this command before any use - * by the driver or software. # Any driver or software should - * not make any assumptions about queue IDs. # A value of 0xff - * indicates that the queue is not available. # Available queues - * may not be in sequential order. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint8_t queue_id7_service_profile; - /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY \ + uint64_t resp_addr; + /* The destination function id, to whom the traffic is redirected. */ + uint16_t dest_fid; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_NONTUNNEL \ UINT32_C(0x0) - /* Lossless */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS \ + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_VXLAN \ UINT32_C(0x1) - /* - * Set to 0xFF... (All Fs) if there is no - * service profile specified - */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN \ + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Any tunneled traffic */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_ANYTUNNEL \ UINT32_C(0xff) - uint8_t valid; + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_ANYTUNNEL + uint8_t unused_0[5]; +} __attribute__((packed)); + +/* hwrm_cfa_redirect_tunnel_type_free_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/********************* - * hwrm_port_mac_cfg * - *********************/ +/************************************** + * hwrm_cfa_redirect_tunnel_type_info * + **************************************/ -/* hwrm_port_mac_cfg_input (size:320b/40B) */ -struct hwrm_port_mac_cfg_input { +/* hwrm_cfa_redirect_tunnel_type_info_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_info_input { + /* The HWRM command request type. */ uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ uint64_t resp_addr; - uint32_t flags; - #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL - #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL - #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL - #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL - #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL - #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL - #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL - #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL - #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL - #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL - #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL - #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL - #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL - uint32_t enables; - #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL - #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL - #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL - #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL - #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL - #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL - #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL - #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL - uint16_t port_id; - uint8_t ipg; - uint8_t lpbk; - #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL - #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL - #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL - #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE - uint8_t vlan_pri2cos_map_pri; - uint8_t reserved1; - uint8_t tunnel_pri2cos_map_pri; - uint8_t dscp2pri_map_pri; - uint16_t rx_ts_capture_ptp_msg_type; - uint16_t tx_ts_capture_ptp_msg_type; - uint8_t cos_field_cfg; - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1 - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \ - (0x0UL << 1) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \ - (0x1UL << 1) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \ - (0x2UL << 1) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \ - (0x3UL << 1) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \ - PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3 - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \ - (0x0UL << 3) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \ - (0x1UL << 3) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \ - (0x2UL << 3) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \ - (0x3UL << 3) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \ - PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5 - uint8_t unused_0[3]; -}; - + /* The source function id. */ + uint16_t src_fid; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Any tunneled traffic */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_ANYTUNNEL + uint8_t unused_0[5]; +} __attribute__((packed)); -/* hwrm_port_mac_cfg_output (size:128b/16B) */ -struct hwrm_port_mac_cfg_output { +/* hwrm_cfa_redirect_tunnel_type_info_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_info_output { + /* The specific error status for the command. */ uint16_t error_code; + /* The HWRM command request type. */ uint16_t req_type; + /* The sequence ID from the original command. */ uint16_t seq_id; + /* The length of the response data in number of bytes. */ uint16_t resp_len; - uint16_t mru; - uint16_t mtu; - uint8_t ipg; - uint8_t lpbk; - #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL - #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL - #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL - #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE - uint8_t unused_0; - uint8_t valid; -}; + /* The destination function id, to whom the traffic is redirected. */ + uint16_t dest_fid; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */ +struct hwrm_vxlan_ipv4_hdr { + /* IPv4 version and header length. */ + uint8_t ver_hlen; + /* IPv4 header length */ + #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK UINT32_C(0xf) + #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0 + /* Version */ + #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK UINT32_C(0xf0) + #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4 + /* IPv4 type of service. */ + uint8_t tos; + /* IPv4 identification. */ + uint16_t ip_id; + /* IPv4 flags and offset. */ + uint16_t flags_frag_offset; + /* IPv4 TTL. */ + uint8_t ttl; + /* IPv4 protocol. */ + uint8_t protocol; + /* IPv4 source address. */ + uint32_t src_ip_addr; + /* IPv4 destination address. */ + uint32_t dest_ip_addr; +} __attribute__((packed)); +/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */ +struct hwrm_vxlan_ipv6_hdr { + /* IPv6 version, traffic class and flow label. */ + uint32_t ver_tc_flow_label; + /* IPv6 version shift */ + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT \ + UINT32_C(0x1c) + /* IPv6 version mask */ + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK \ + UINT32_C(0xf0000000) + /* IPv6 TC shift */ + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT \ + UINT32_C(0x14) + /* IPv6 TC mask */ + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK \ + UINT32_C(0xff00000) + /* IPv6 flow label shift */ + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT \ + UINT32_C(0x0) + /* IPv6 flow label mask */ + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK \ + UINT32_C(0xfffff) + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST \ + HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK + /* IPv6 payload length. */ + uint16_t payload_len; + /* IPv6 next header. */ + uint8_t next_hdr; + /* IPv6 TTL. */ + uint8_t ttl; + /* IPv6 source address. */ + uint32_t src_ip_addr[4]; + /* IPv6 destination address. */ + uint32_t dest_ip_addr[4]; +} __attribute__((packed)); -/********************** - * hwrm_port_mac_qcfg * - **********************/ +/* hwrm_cfa_encap_data_vxlan (size:576b/72B) */ +struct hwrm_cfa_encap_data_vxlan { + /* Source MAC address. */ + uint8_t src_mac_addr[6]; + /* reserved. */ + uint16_t unused_0; + /* Destination MAC address. */ + uint8_t dst_mac_addr[6]; + /* Number of VLAN tags. */ + uint8_t num_vlan_tags; + /* reserved. */ + uint8_t unused_1; + /* Outer VLAN TPID. */ + uint16_t ovlan_tpid; + /* Outer VLAN TCI. */ + uint16_t ovlan_tci; + /* Inner VLAN TPID. */ + uint16_t ivlan_tpid; + /* Inner VLAN TCI. */ + uint16_t ivlan_tci; + /* L3 header fields. */ + uint32_t l3[10]; + /* IP version mask. */ + #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_MASK UINT32_C(0xf) + /* IP version 4. */ + #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 UINT32_C(0x4) + /* IP version 6. */ + #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 UINT32_C(0x6) + #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_LAST \ + HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 + /* UDP source port. */ + uint16_t src_port; + /* UDP destination port. */ + uint16_t dst_port; + /* VXLAN Network Identifier. */ + uint32_t vni; +} __attribute__((packed)); +/******************************* + * hwrm_cfa_encap_record_alloc * + *******************************/ -/* hwrm_port_mac_qcfg_input (size:192b/24B) */ -struct hwrm_port_mac_qcfg_input { + +/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */ +struct hwrm_cfa_encap_record_alloc_input { + /* The HWRM command request type. */ uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ uint64_t resp_addr; - uint16_t port_id; - uint8_t unused_0[6]; -}; - + uint32_t flags; + /* Setting of this flag indicates the applicability to the loopback path. */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_FLAGS_LOOPBACK \ + UINT32_C(0x1) + /* Encapsulation Type. */ + uint8_t encap_type; + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) after inside Ethernet payload */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_MPLS \ + UINT32_C(0x6) + /* VLAN */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_VLAN \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPGRE \ + UINT32_C(0x8) + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_LAST \ + HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPGRE + uint8_t unused_0[3]; + /* This value is encap data used for the given encap type. */ + uint32_t encap_data[20]; +} __attribute__((packed)); -/* hwrm_port_mac_qcfg_output (size:192b/24B) */ -struct hwrm_port_mac_qcfg_output { +/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */ +struct hwrm_cfa_encap_record_alloc_output { + /* The specific error status for the command. */ uint16_t error_code; + /* The HWRM command request type. */ uint16_t req_type; + /* The sequence ID from the original command. */ uint16_t seq_id; + /* The length of the response data in number of bytes. */ uint16_t resp_len; - uint16_t mru; - uint16_t mtu; - uint8_t ipg; - uint8_t lpbk; - #define PORT_MAC_QCFG_RESP_LPBK_NONE 0x0UL - #define PORT_MAC_QCFG_RESP_LPBK_LOCAL 0x1UL - #define PORT_MAC_QCFG_RESP_LPBK_REMOTE 0x2UL - #define PORT_MAC_QCFG_RESP_LPBK_LAST PORT_MAC_QCFG_RESP_LPBK_REMOTE - uint8_t vlan_pri2cos_map_pri; - uint8_t flags; - #define PORT_MAC_QCFG_RESP_FLAGS_VLAN_PRI2COS_ENABLE 0x1UL - #define PORT_MAC_QCFG_RESP_FLAGS_TUNNEL_PRI2COS_ENABLE 0x2UL - #define PORT_MAC_QCFG_RESP_FLAGS_IP_DSCP2COS_ENABLE 0x4UL - #define PORT_MAC_QCFG_RESP_FLAGS_OOB_WOL_ENABLE 0x8UL - #define PORT_MAC_QCFG_RESP_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL - #define PORT_MAC_QCFG_RESP_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x20UL - uint8_t tunnel_pri2cos_map_pri; - uint8_t dscp2pri_map_pri; - uint16_t rx_ts_capture_ptp_msg_type; - uint16_t tx_ts_capture_ptp_msg_type; - uint8_t cos_field_cfg; - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_RSVD 0x1UL - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1 - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \ - (0x0UL << 1) - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \ - (0x1UL << 1) - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \ - (0x2UL << 1) - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \ - (0x3UL << 1) - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \ - PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3 - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \ - (0x0UL << 3) - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \ - (0x1UL << 3) - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \ - (0x2UL << 3) - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \ - (0x3UL << 3) - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \ - PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_SFT 5 - uint8_t valid; -}; - - -/************************** - * hwrm_port_mac_ptp_qcfg * - **************************/ + /* This value is an opaque id into CFA data structures. */ + uint32_t encap_record_id; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); +/****************************** + * hwrm_cfa_encap_record_free * + ******************************/ -/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */ -struct hwrm_port_mac_ptp_qcfg_input { + +/* hwrm_cfa_encap_record_free_input (size:192b/24B) */ +struct hwrm_cfa_encap_record_free_input { + /* The HWRM command request type. */ uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ uint64_t resp_addr; - uint16_t port_id; - uint8_t unused_0[6]; -}; - + /* This value is an opaque id into CFA data structures. */ + uint32_t encap_record_id; + uint8_t unused_0[4]; +} __attribute__((packed)); -/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */ -struct hwrm_port_mac_ptp_qcfg_output { +/* hwrm_cfa_encap_record_free_output (size:128b/16B) */ +struct hwrm_cfa_encap_record_free_output { + /* The specific error status for the command. */ uint16_t error_code; + /* The HWRM command request type. */ uint16_t req_type; + /* The sequence ID from the original command. */ uint16_t seq_id; + /* The length of the response data in number of bytes. */ uint16_t resp_len; - uint8_t flags; - #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL - #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL - uint8_t unused_0[3]; - uint32_t rx_ts_reg_off_lower; - uint32_t rx_ts_reg_off_upper; - uint32_t rx_ts_reg_off_seq_id; - uint32_t rx_ts_reg_off_src_id_0; - uint32_t rx_ts_reg_off_src_id_1; - uint32_t rx_ts_reg_off_src_id_2; - uint32_t rx_ts_reg_off_domain_id; - uint32_t rx_ts_reg_off_fifo; - uint32_t rx_ts_reg_off_fifo_adv; - uint32_t rx_ts_reg_off_granularity; - uint32_t tx_ts_reg_off_lower; - uint32_t tx_ts_reg_off_upper; - uint32_t tx_ts_reg_off_seq_id; - uint32_t tx_ts_reg_off_fifo; - uint32_t tx_ts_reg_off_granularity; - uint8_t unused_1[7]; - uint8_t valid; -}; - - -/* hwrm_vnic_alloc */ -/* - * Description: This VNIC is a resource in the RX side of the chip that is used - * to represent a virtual host "interface". # At the time of VNIC allocation or - * configuration, the function can specify whether it wants the requested VNIC - * to be the default VNIC for the function or not. # If a function requests - * allocation of a VNIC for the first time and a VNIC is successfully allocated - * by the HWRM, then the HWRM shall make the allocated VNIC as the default VNIC - * for that function. # The default VNIC shall be used for the default action - * for a partition or function. # For each VNIC allocated on a function, a - * mapping on the RX side to map the allocated VNIC to source virtual interface - * shall be performed by the HWRM. This should be hidden to the function driver - * requesting the VNIC allocation. This enables broadcast/multicast replication - * with source knockout. # If multicast replication with source knockout is - * enabled, then the internal VNIC to SVIF mapping data structures shall be - * programmed at the time of VNIC allocation. - */ -/* Input (24 bytes) */ -struct hwrm_vnic_alloc_input { - uint16_t req_type; + uint8_t unused_0[7]; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t cmpl_ring; + uint8_t valid; +} __attribute__((packed)); + +/******************************** + * hwrm_cfa_ntuple_filter_alloc * + ********************************/ + + +/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */ +struct hwrm_cfa_ntuple_filter_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t cmpl_ring; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t resp_addr; + uint16_t seq_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint32_t flags; + uint16_t target_id; /* - * When this bit is '1', this VNIC is requested to be the - * default VNIC for this function. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT UINT32_C(0x1) - uint32_t unused_0; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_vnic_alloc_output { - uint16_t error_code; + uint64_t resp_addr; + uint32_t flags; + /* Setting of this flag indicates the applicability to the loopback path. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \ + UINT32_C(0x1) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP \ + UINT32_C(0x2) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * Setting of this flag indicates that a meter is expected to be attached + * to this flow. This hint can be used when choosing the action record + * format required for the flow. */ - uint32_t vnic_id; - /* Logical vnic ID */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER \ + UINT32_C(0x4) + uint32_t enables; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This bit must be '1' for the l2_filter_id field to be + * configured. */ -} __attribute__((packed)); - -/* hwrm_vnic_free */ -/* - * Description: Free a VNIC resource. Idle any resources associated with the - * VNIC as well as the VNIC. Reset and release all resources associated with the - * VNIC. - */ -/* Input (24 bytes) */ -struct hwrm_vnic_free_input { - uint16_t req_type; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \ + UINT32_C(0x1) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This bit must be '1' for the ethertype field to be + * configured. */ - uint16_t cmpl_ring; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \ + UINT32_C(0x2) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This bit must be '1' for the tunnel_type field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ + UINT32_C(0x4) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This bit must be '1' for the src_macaddr field to be + * configured. */ - uint64_t resp_addr; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR \ + UINT32_C(0x8) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This bit must be '1' for the ipaddr_type field to be + * configured. */ - uint32_t vnic_id; - /* Logical vnic ID */ - uint32_t unused_0; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_vnic_free_output { - uint16_t error_code; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \ + UINT32_C(0x10) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This bit must be '1' for the src_ipaddr field to be + * configured. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \ + UINT32_C(0x20) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This bit must be '1' for the src_ipaddr_mask field to be + * configured. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK \ + UINT32_C(0x40) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This bit must be '1' for the dst_ipaddr field to be + * configured. */ -} __attribute__((packed)); - -/* hwrm_vnic_cfg */ -/* Description: Configure the RX VNIC structure. */ -/* Input (40 bytes) */ -struct hwrm_vnic_cfg_input { - uint16_t req_type; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \ + UINT32_C(0x80) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This bit must be '1' for the dst_ipaddr_mask field to be + * configured. */ - uint16_t cmpl_ring; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK \ + UINT32_C(0x100) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This bit must be '1' for the ip_protocol field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \ + UINT32_C(0x200) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This bit must be '1' for the src_port field to be + * configured. */ - uint64_t resp_addr; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \ + UINT32_C(0x400) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This bit must be '1' for the src_port_mask field to be + * configured. */ - uint32_t flags; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK \ + UINT32_C(0x800) /* - * When this bit is '1', the VNIC is requested to be the default - * VNIC for the function. + * This bit must be '1' for the dst_port field to be + * configured. */ - #define HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT UINT32_C(0x1) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \ + UINT32_C(0x1000) /* - * When this bit is '1', the VNIC is being configured to strip - * VLAN in the RX path. If set to '0', then VLAN stripping is - * disabled on this VNIC. + * This bit must be '1' for the dst_port_mask field to be + * configured. */ - #define HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE UINT32_C(0x2) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK \ + UINT32_C(0x2000) /* - * When this bit is '1', the VNIC is being configured to buffer - * receive packets in the hardware until the host posts new - * receive buffers. If set to '0', then bd_stall is being - * configured to be disabled on this VNIC. + * This bit must be '1' for the pri_hint field to be + * configured. */ - #define HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE UINT32_C(0x4) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_PRI_HINT \ + UINT32_C(0x4000) /* - * When this bit is '1', the VNIC is being configured to receive - * both RoCE and non-RoCE traffic. If set to '0', then this VNIC - * is not configured to be operating in dual VNIC mode. + * This bit must be '1' for the ntuple_filter_id field to be + * configured. */ - #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_DUAL_VNIC_MODE UINT32_C(0x8) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_NTUPLE_FILTER_ID \ + UINT32_C(0x8000) /* - * When this flag is set to '1', the VNIC is requested to be - * configured to receive only RoCE traffic. If this flag is set - * to '0', then this flag shall be ignored by the HWRM. If - * roce_dual_vnic_mode flag is set to '1', then the HWRM client - * shall not set this flag to '1'. + * This bit must be '1' for the dst_id field to be + * configured. */ - #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID \ + UINT32_C(0x10000) /* - * When a VNIC uses one destination ring group for certain - * application (e.g. Receive Flow Steering) where exact match is - * used to direct packets to a VNIC with one destination ring - * group only, there is no need to configure RSS indirection - * table for that VNIC as only one destination ring group is - * used. This flag is used to enable a mode where RSS is enabled - * in the VNIC using a RSS context for computing RSS hash but - * the RSS indirection table is not configured using - * hwrm_vnic_rss_cfg. If this mode is enabled, then the driver - * should not program RSS indirection table for the RSS context - * that is used for computing RSS hash only. - */ - #define HWRM_VNIC_CFG_INPUT_FLAGS_RSS_DFLT_CR_MODE UINT32_C(0x20) - /* - * When this bit is '1', the VNIC is being configured to receive - * both RoCE and non-RoCE traffic, but forward only the RoCE - * traffic further. Also, RoCE traffic can be mirrored to L2 - * driver. + * This bit must be '1' for the mirror_vnic_id field to be + * configured. */ - #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \ - UINT32_C(0x40) - uint32_t enables; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + UINT32_C(0x20000) /* - * This bit must be '1' for the dflt_ring_grp field to be + * This bit must be '1' for the dst_macaddr field to be * configured. */ - #define HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP UINT32_C(0x1) - /* This bit must be '1' for the rss_rule field to be configured. */ - #define HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE UINT32_C(0x2) - /* This bit must be '1' for the cos_rule field to be configured. */ - #define HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE UINT32_C(0x4) - /* This bit must be '1' for the lb_rule field to be configured. */ - #define HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE UINT32_C(0x8) - /* This bit must be '1' for the mru field to be configured. */ - #define HWRM_VNIC_CFG_INPUT_ENABLES_MRU UINT32_C(0x10) - uint16_t vnic_id; - /* Logical vnic ID */ - uint16_t dflt_ring_grp; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR \ + UINT32_C(0x40000) /* - * Default Completion ring for the VNIC. This ring will be - * chosen if packet does not match any RSS rules and if there is - * no COS rule. + * This value identifies a set of CFA data structures used for an L2 + * context. */ - uint16_t rss_rule; + uint64_t l2_filter_id; /* - * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if - * there is no RSS rule. + * This value indicates the source MAC address in + * the Ethernet header. */ - uint16_t cos_rule; + uint8_t src_macaddr[6]; + /* This value indicates the ethertype in the Ethernet header. */ + uint16_t ethertype; /* - * RSS ID for COS rule/table structure. 0xFF... (All Fs) if - * there is no COS rule. + * This value indicates the type of IP address. + * 4 - IPv4 + * 6 - IPv6 + * All others are invalid. */ - uint16_t lb_rule; + uint8_t ip_addr_type; + /* invalid */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN \ + UINT32_C(0x0) + /* IPv4 */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \ + UINT32_C(0x4) + /* IPv6 */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \ + UINT32_C(0x6) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 /* - * RSS ID for load balancing rule/table structure. 0xFF... (All - * Fs) if there is no LB rule. + * The value of protocol filed in IP header. + * Applies to UDP and TCP traffic. + * 6 - TCP + * 17 - UDP */ - uint16_t mru; + uint8_t ip_protocol; + /* invalid */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \ + UINT32_C(0x0) + /* TCP */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP \ + UINT32_C(0x6) + /* UDP */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP \ + UINT32_C(0x11) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_LAST \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP /* - * The maximum receive unit of the vnic. Each vnic is associated - * with a function. The vnic mru value overwrites the mru - * setting of the associated function. The HWRM shall make sure - * that vnic mru does not exceed the mru of the port the - * function is associated with. + * If set, this value shall represent the + * Logical VNIC ID of the destination VNIC for the RX + * path and network port id of the destination port for + * the TX path. */ - uint32_t unused_0; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_vnic_cfg_output { - uint16_t error_code; + uint16_t dst_id; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Logical VNIC ID of the VNIC where traffic is + * mirrored. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t mirror_vnic_id; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This value indicates the tunnel type for this filter. + * If this field is not specified, then the filter shall + * apply to both non-tunneled and tunneled packets. + * If this field conflicts with the tunnel_type specified + * in the l2_filter_id, then the HWRM shall return an + * error for this command. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Any tunneled traffic */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This hint is provided to help in placing + * the filter in the filter table. */ -} __attribute__((packed)); - -/* hwrm_vnic_qcfg */ -/* - * Description: Query the RX VNIC structure. This function can be used by a PF - * driver to query its own VNIC resource or VNIC resource of its child VF. This - * function can also be used by a VF driver to query its own VNIC resource. - */ -/* Input (32 bytes) */ -struct hwrm_vnic_qcfg_input { - uint16_t req_type; + uint8_t pri_hint; + /* No preference */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \ + UINT32_C(0x0) + /* Above the given filter */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE \ + UINT32_C(0x1) + /* Below the given filter */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_BELOW \ + UINT32_C(0x2) + /* As high as possible */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_HIGHEST \ + UINT32_C(0x3) + /* As low as possible */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LOWEST \ + UINT32_C(0x4) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LAST \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LOWEST /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The value of source IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. */ - uint16_t cmpl_ring; + uint32_t src_ipaddr[4]; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The value of source IP address mask to be used in + * filtering. + * For IPv4, first four bytes represent the IP address mask. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint32_t src_ipaddr_mask[4]; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The value of destination IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. */ - uint64_t resp_addr; + uint32_t dst_ipaddr[4]; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The value of destination IP address mask to be used in + * filtering. + * For IPv4, first four bytes represent the IP address mask. */ - uint32_t enables; - /* This bit must be '1' for the vf_id_valid field to be configured. */ - #define HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1) - uint32_t vnic_id; - /* Logical vnic ID */ - uint16_t vf_id; - /* ID of Virtual Function whose VNIC resource is being queried. */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (32 bytes) */ -struct hwrm_vnic_qcfg_output { - uint16_t error_code; + uint32_t dst_ipaddr_mask[4]; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The value of source port to be used in filtering. + * Applies to UDP and TCP traffic. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t src_port; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The value of source port mask to be used in filtering. + * Applies to UDP and TCP traffic. */ - uint16_t dflt_ring_grp; - /* Default Completion ring for the VNIC. */ - uint16_t rss_rule; + uint16_t src_port_mask; /* - * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if - * there is no RSS rule. + * The value of destination port to be used in filtering. + * Applies to UDP and TCP traffic. */ - uint16_t cos_rule; + uint16_t dst_port; /* - * RSS ID for COS rule/table structure. 0xFF... (All Fs) if - * there is no COS rule. + * The value of destination port mask to be used in + * filtering. + * Applies to UDP and TCP traffic. */ - uint16_t lb_rule; + uint16_t dst_port_mask; /* - * RSS ID for load balancing rule/table structure. 0xFF... (All - * Fs) if there is no LB rule. + * This is the ID of the filter that goes along with + * the pri_hint. */ - uint16_t mru; - /* The maximum receive unit of the vnic. */ - uint8_t unused_0; - uint8_t unused_1; - uint32_t flags; + uint64_t ntuple_filter_id_hint; +} __attribute__((packed)); + +/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_ntuple_filter_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value is an opaque id into CFA data structures. */ + uint64_t ntuple_filter_id; /* - * When this bit is '1', the VNIC is the default VNIC for the - * function. + * This is the ID of the flow associated with this + * filter. + * This value shall be used to match and associate the + * flow identifier returned in completion records. + * A value of 0xFFFFFFFF shall indicate no flow id. */ - #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT UINT32_C(0x1) + uint32_t flow_id; + uint8_t unused_0[3]; /* - * When this bit is '1', the VNIC is configured to strip VLAN in - * the RX path. If set to '0', then VLAN stripping is disabled - * on this VNIC. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE UINT32_C(0x2) + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */ +struct hwrm_cfa_ntuple_filter_alloc_cmd_err { /* - * When this bit is '1', the VNIC is configured to buffer - * receive packets in the hardware until the host posts new - * receive buffers. If set to '0', then bd_stall is disabled on - * this VNIC. + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. */ - #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE UINT32_C(0x4) + uint8_t code; + /* Unknown error */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN \ + UINT32_C(0x0) + /* Unable to complete operation due to conflict with Rx Mask VLAN */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR \ + UINT32_C(0x1) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR + uint8_t unused_0[7]; +} __attribute__((packed)); + +/******************************* + * hwrm_cfa_ntuple_filter_free * + *******************************/ + + +/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_ntuple_filter_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * When this bit is '1', the VNIC is configured to receive both - * RoCE and non-RoCE traffic. If set to '0', then this VNIC is - * not configured to operate in dual VNIC mode. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE UINT32_C(0x8) + uint16_t cmpl_ring; /* - * When this flag is set to '1', the VNIC is configured to - * receive only RoCE traffic. When this flag is set to '0', the - * VNIC is not configured to receive only RoCE traffic. If - * roce_dual_vnic_mode flag and this flag both are set to '1', - * then it is an invalid configuration of the VNIC. The HWRM - * should not allow that type of mis-configuration by HWRM - * clients. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10) + uint16_t seq_id; /* - * When a VNIC uses one destination ring group for certain - * application (e.g. Receive Flow Steering) where exact match is - * used to direct packets to a VNIC with one destination ring - * group only, there is no need to configure RSS indirection - * table for that VNIC as only one destination ring group is - * used. When this bit is set to '1', then the VNIC is enabled - * in a mode where RSS is enabled in the VNIC using a RSS - * context for computing RSS hash but the RSS indirection table - * is not configured. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE UINT32_C(0x20) + uint16_t target_id; /* - * When this bit is '1', the VNIC is configured to receive both - * RoCE and non-RoCE traffic, but forward only RoCE traffic - * further. Also RoCE traffic can be mirrored to L2 driver. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \ - UINT32_C(0x40) - uint32_t unused_2; - uint8_t unused_3; - uint8_t unused_4; - uint8_t unused_5; - uint8_t valid; + uint64_t resp_addr; + /* This value is an opaque id into CFA data structures. */ + uint64_t ntuple_filter_id; +} __attribute__((packed)); + +/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_ntuple_filter_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); +/****************************** + * hwrm_cfa_ntuple_filter_cfg * + ******************************/ -/* hwrm_vnic_tpa_cfg */ -/* Description: This function is used to enable/configure TPA on the VNIC. */ -/* Input (40 bytes) */ -struct hwrm_vnic_tpa_cfg_input { - uint16_t req_type; + +/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */ +struct hwrm_cfa_ntuple_filter_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t flags; + uint64_t resp_addr; + uint32_t enables; /* - * When this bit is '1', the VNIC shall be configured to perform - * transparent packet aggregation (TPA) of non-tunneled TCP - * packets. + * This bit must be '1' for the new_dst_id field to be + * configured. */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA UINT32_C(0x1) + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_DST_ID \ + UINT32_C(0x1) /* - * When this bit is '1', the VNIC shall be configured to perform - * transparent packet aggregation (TPA) of tunneled TCP packets. + * This bit must be '1' for the new_mirror_vnic_id field to be + * configured. */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA UINT32_C(0x2) + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \ + UINT32_C(0x2) /* - * When this bit is '1', the VNIC shall be configured to perform - * transparent packet aggregation (TPA) according to Windows - * Receive Segment Coalescing (RSC) rules. + * This bit must be '1' for the new_meter_instance_id field to be + * configured. */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE UINT32_C(0x4) + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \ + UINT32_C(0x4) + uint8_t unused_0[4]; + /* This value is an opaque id into CFA data structures. */ + uint64_t ntuple_filter_id; /* - * When this bit is '1', the VNIC shall be configured to perform - * transparent packet aggregation (TPA) according to Linux - * Generic Receive Offload (GRO) rules. + * If set, this value shall represent the new + * Logical VNIC ID of the destination VNIC for the RX + * path and new network port id of the destination port for + * the TX path. */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO UINT32_C(0x8) + uint32_t new_dst_id; /* - * When this bit is '1', the VNIC shall be configured to perform - * transparent packet aggregation (TPA) for TCP packets with IP - * ECN set to non-zero. + * New Logical VNIC ID of the VNIC where traffic is + * mirrored. */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN UINT32_C(0x10) + uint32_t new_mirror_vnic_id; /* - * When this bit is '1', the VNIC shall be configured to perform - * transparent packet aggregation (TPA) for GRE tunneled TCP - * packets only if all packets have the same GRE sequence. + * New meter to attach to the flow. Specifying the + * invalid instance ID is used to remove any existing + * meter from the flow. */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \ - UINT32_C(0x20) + uint16_t new_meter_instance_id; /* - * When this bit is '1' and the GRO mode is enabled, the VNIC - * shall be configured to perform transparent packet aggregation - * (TPA) for TCP/IPv4 packets with consecutively increasing - * IPIDs. In other words, the last packet that is being - * aggregated to an already existing aggregation context shall - * have IPID 1 more than the IPID of the last packet that was - * aggregated in that aggregation context. + * A value of 0xfff is considered invalid and implies the + * instance is not configured. */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK UINT32_C(0x40) + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_LAST \ + HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID + uint8_t unused_1[6]; +} __attribute__((packed)); + +/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */ +struct hwrm_cfa_ntuple_filter_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * When this bit is '1' and the GRO mode is enabled, the VNIC - * shall be configured to perform transparent packet aggregation - * (TPA) for TCP packets with the same TTL (IPv4) or Hop limit - * (IPv6) value. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK UINT32_C(0x80) - uint32_t enables; - /* This bit must be '1' for the max_agg_segs field to be configured. */ - #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1) - /* This bit must be '1' for the max_aggs field to be configured. */ - #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2) + uint8_t valid; +} __attribute__((packed)); + +/************************** + * hwrm_cfa_em_flow_alloc * + **************************/ + + +/* hwrm_cfa_em_flow_alloc_input (size:896b/112B) */ +struct hwrm_cfa_em_flow_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This bit must be '1' for the max_agg_timer field to be - * configured. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4) - /* This bit must be '1' for the min_agg_len field to be configured. */ - #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8) - uint16_t vnic_id; - /* Logical vnic ID */ - uint16_t max_agg_segs; + uint16_t cmpl_ring; /* - * This is the maximum number of TCP segments that can be - * aggregated (unit is Log2). Max value is 31. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - /* 1 segment */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0) - /* 2 segments */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1) - /* 4 segments */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2) - /* 8 segments */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3) - /* Any segment size larger than this is not valid */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f) - uint16_t max_aggs; + uint16_t seq_id; /* - * This is the maximum number of aggregations this VNIC is - * allowed (unit is Log2). Max value is 7 + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - /* 1 aggregation */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0) - /* 2 aggregations */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1) - /* 4 aggregations */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2) - /* 8 aggregations */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3) - /* 16 aggregations */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4) - /* Any aggregation size larger than this is not valid */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7) - uint8_t unused_0; - uint8_t unused_1; - uint32_t max_agg_timer; + uint16_t target_id; /* - * This is the maximum amount of time allowed for an aggregation - * context to complete after it was initiated. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t min_agg_len; + uint64_t resp_addr; + uint32_t flags; /* - * This is the minimum amount of payload length required to - * start an aggregation context. + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. */ -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_vnic_tpa_cfg_output { - uint16_t error_code; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Setting of this flag indicates enabling of a byte counter for a given + * flow. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_BYTE_CTR UINT32_C(0x2) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * Setting of this flag indicates enabling of a packet counter for a given + * flow. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PKT_CTR UINT32_C(0x4) + /* Setting of this flag indicates de-capsulation action for the given flow. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DECAP UINT32_C(0x8) + /* Setting of this flag indicates encapsulation action for the given flow. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_ENCAP UINT32_C(0x10) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. */ -} __attribute__((packed)); - -/* hwrm_vnic_rss_cfg */ -/* Description: This function is used to enable RSS configuration. */ -/* Input (48 bytes) */ -struct hwrm_vnic_rss_cfg_input { - uint16_t req_type; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x20) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * Setting of this flag indicates that a meter is expected to be attached + * to this flow. This hint can be used when choosing the action record + * format required for the flow. */ - uint16_t cmpl_ring; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_METER UINT32_C(0x40) + uint32_t enables; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This bit must be '1' for the l2_filter_id field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID \ + UINT32_C(0x1) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This bit must be '1' for the tunnel_type field to be + * configured. */ - uint64_t resp_addr; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ + UINT32_C(0x2) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This bit must be '1' for the tunnel_id field to be + * configured. */ - uint32_t hash_type; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_ID \ + UINT32_C(0x4) /* - * When this bit is '1', the RSS hash shall be computed over - * source and destination IPv4 addresses of IPv4 packets. + * This bit must be '1' for the src_macaddr field to be + * configured. */ - #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 UINT32_C(0x1) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR \ + UINT32_C(0x8) /* - * When this bit is '1', the RSS hash shall be computed over - * source/destination IPv4 addresses and source/destination - * ports of TCP/IPv4 packets. + * This bit must be '1' for the dst_macaddr field to be + * configured. */ - #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR \ + UINT32_C(0x10) /* - * When this bit is '1', the RSS hash shall be computed over - * source/destination IPv4 addresses and source/destination - * ports of UDP/IPv4 packets. + * This bit must be '1' for the ovlan_vid field to be + * configured. */ - #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID \ + UINT32_C(0x20) /* - * When this bit is '1', the RSS hash shall be computed over - * source and destination IPv4 addresses of IPv6 packets. + * This bit must be '1' for the ivlan_vid field to be + * configured. */ - #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 UINT32_C(0x8) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID \ + UINT32_C(0x40) /* - * When this bit is '1', the RSS hash shall be computed over - * source/destination IPv6 addresses and source/destination - * ports of TCP/IPv6 packets. + * This bit must be '1' for the ethertype field to be + * configured. */ - #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE \ + UINT32_C(0x80) /* - * When this bit is '1', the RSS hash shall be computed over - * source/destination IPv6 addresses and source/destination - * ports of UDP/IPv6 packets. + * This bit must be '1' for the src_ipaddr field to be + * configured. */ - #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20) - uint32_t unused_0; - uint64_t ring_grp_tbl_addr; - /* This is the address for rss ring group table */ - uint64_t hash_key_tbl_addr; - /* This is the address for rss hash key table */ - uint16_t rss_ctx_idx; - /* Index to the rss indirection table. */ - uint16_t unused_1[3]; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_vnic_rss_cfg_output { - uint16_t error_code; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR \ + UINT32_C(0x100) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This bit must be '1' for the dst_ipaddr field to be + * configured. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR \ + UINT32_C(0x200) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This bit must be '1' for the ipaddr_type field to be + * configured. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE \ + UINT32_C(0x400) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This bit must be '1' for the ip_protocol field to be + * configured. */ -} __attribute__((packed)); - -/* hwrm_vnic_plcmodes_cfg */ -/* - * Description: This function can be used to set placement mode configuration of - * the VNIC. - */ -/* Input (40 bytes) */ -struct hwrm_vnic_plcmodes_cfg_input { - uint16_t req_type; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL \ + UINT32_C(0x800) /* - * This value indicates what type of request this is. The format for the - * rest of the command is determined by this field. + * This bit must be '1' for the src_port field to be + * configured. */ - uint16_t cmpl_ring; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT \ + UINT32_C(0x1000) /* - * This value indicates the what completion ring the request will be - * optionally completed on. If the value is -1, then no CR completion - * will be generated. Any other value must be a valid CR ring_id value - * for this function. + * This bit must be '1' for the dst_port field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT \ + UINT32_C(0x2000) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids - * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + * This bit must be '1' for the dst_id field to be + * configured. */ - uint64_t resp_addr; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID \ + UINT32_C(0x4000) /* - * This is the host address where the response will be written when the - * request is complete. This area must be 16B aligned and must be - * cleared to zero before the request is made. + * This bit must be '1' for the mirror_vnic_id field to be + * configured. */ - uint32_t flags; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + UINT32_C(0x8000) /* - * When this bit is '1', the VNIC shall be configured to use regular - * placement algorithm. By default, the regular placement algorithm - * shall be enabled on the VNIC. + * This bit must be '1' for the encap_record_id field to be + * configured. */ - #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_REGULAR_PLACEMENT \ - UINT32_C(0x1) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ENCAP_RECORD_ID \ + UINT32_C(0x10000) /* - * When this bit is '1', the VNIC shall be configured use the jumbo - * placement algorithm. + * This bit must be '1' for the meter_instance_id field to be + * configured. */ - #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT \ - UINT32_C(0x2) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_METER_INSTANCE_ID \ + UINT32_C(0x20000) /* - * When this bit is '1', the VNIC shall be configured to enable Header- - * Data split for IPv4 packets according to the following rules: # If - * the packet is identified as TCP/IPv4, then the packet is split at the - * beginning of the TCP payload. # If the packet is identified as - * UDP/IPv4, then the packet is split at the beginning of UDP payload. # - * If the packet is identified as non-TCP and non-UDP IPv4 packet, then - * the packet is split at the beginning of the upper layer protocol - * header carried in the IPv4 packet. + * This value identifies a set of CFA data structures used for an L2 + * context. */ - #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 UINT32_C(0x4) + uint64_t l2_filter_id; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Any tunneled traffic */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL + uint8_t unused_0[3]; /* - * When this bit is '1', the VNIC shall be configured to enable Header- - * Data split for IPv6 packets according to the following rules: # If - * the packet is identified as TCP/IPv6, then the packet is split at the - * beginning of the TCP payload. # If the packet is identified as - * UDP/IPv6, then the packet is split at the beginning of UDP payload. # - * If the packet is identified as non-TCP and non-UDP IPv6 packet, then - * the packet is split at the beginning of the upper layer protocol - * header carried in the IPv6 packet. + * Tunnel identifier. + * Virtual Network Identifier (VNI). Only valid with + * tunnel_types VXLAN, NVGRE, and Geneve. + * Only lower 24-bits of VNI field are used + * in setting up the filter. */ - #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6 UINT32_C(0x8) + uint32_t tunnel_id; /* - * When this bit is '1', the VNIC shall be configured to enable Header- - * Data split for FCoE packets at the beginning of FC payload. + * This value indicates the source MAC address in + * the Ethernet header. */ - #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_FCOE UINT32_C(0x10) + uint8_t src_macaddr[6]; + /* The meter instance to attach to the flow. */ + uint16_t meter_instance_id; /* - * When this bit is '1', the VNIC shall be configured to enable Header- - * Data split for RoCE packets at the beginning of RoCE payload (after - * BTH/GRH headers). + * A value of 0xfff is considered invalid and implies the + * instance is not configured. */ - #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_ROCE UINT32_C(0x20) - uint32_t enables; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_LAST \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_INVALID /* - * This bit must be '1' for the jumbo_thresh_valid field to be - * configured. + * This value indicates the destination MAC address in + * the Ethernet header. */ - #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID \ - UINT32_C(0x1) + uint8_t dst_macaddr[6]; /* - * This bit must be '1' for the hds_offset_valid field to be configured. + * This value indicates the VLAN ID of the outer VLAN tag + * in the Ethernet header. */ - #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID \ - UINT32_C(0x2) + uint16_t ovlan_vid; /* - * This bit must be '1' for the hds_threshold_valid field to be - * configured. + * This value indicates the VLAN ID of the inner VLAN tag + * in the Ethernet header. */ - #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID \ - UINT32_C(0x4) - uint32_t vnic_id; - /* Logical vnic ID */ - uint16_t jumbo_thresh; + uint16_t ivlan_vid; + /* This value indicates the ethertype in the Ethernet header. */ + uint16_t ethertype; /* - * When jumbo placement algorithm is enabled, this value is used to - * determine the threshold for jumbo placement. Packets with length - * larger than this value will be placed according to the jumbo - * placement algorithm. + * This value indicates the type of IP address. + * 4 - IPv4 + * 6 - IPv6 + * All others are invalid. */ - uint16_t hds_offset; + uint8_t ip_addr_type; + /* invalid */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN UINT32_C(0x0) + /* IPv4 */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 UINT32_C(0x4) + /* IPv6 */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 UINT32_C(0x6) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_LAST \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 /* - * This value is used to determine the offset into packet buffer where - * the split data (payload) will be placed according to one of of HDS - * placement algorithm. The lengths of packet buffers provided for split - * data shall be larger than this value. + * The value of protocol filed in IP header. + * Applies to UDP and TCP traffic. + * 6 - TCP + * 17 - UDP */ - uint16_t hds_threshold; + uint8_t ip_protocol; + /* invalid */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN UINT32_C(0x0) + /* TCP */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_TCP UINT32_C(0x6) + /* UDP */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UDP UINT32_C(0x11) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_LAST \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UDP + uint8_t unused_1[2]; /* - * When one of the HDS placement algorithm is enabled, this value is - * used to determine the threshold for HDS placement. Packets with - * length larger than this value will be placed according to the HDS - * placement algorithm. This value shall be in multiple of 4 bytes. + * The value of source IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_vnic_plcmodes_cfg_output { - uint16_t error_code; + uint32_t src_ipaddr[4]; /* - * Pass/Fail or error type Note: receiver to verify the in parameters, - * and fail the call with an error when appropriate + * big_endian = True + * The value of destination IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint32_t dst_ipaddr[4]; /* - * This field is the length of the response in bytes. The last byte of - * the response is a valid flag that will read as '1' when the command - * has been completely written to memory. + * The value of source port to be used in filtering. + * Applies to UDP and TCP traffic. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t src_port; /* - * This field is used in Output records to indicate that the output is - * completely written to RAM. This field should be read as '1' to - * indicate that the output has been completely written. When writing a - * command completion or response to an internal processor, the order of - * writes has to be such that this field is written last. + * The value of destination port to be used in filtering. + * Applies to UDP and TCP traffic. */ -} __attribute__((packed)); - -/* hwrm_vnic_plcmodes_qcfg */ -/* - * Description: This function can be used to query placement mode configuration - * of the VNIC. - */ -/* Input (24 bytes) */ -struct hwrm_vnic_plcmodes_qcfg_input { - uint16_t req_type; + uint16_t dst_port; /* - * This value indicates what type of request this is. The format for the - * rest of the command is determined by this field. + * If set, this value shall represent the + * Logical VNIC ID of the destination VNIC for the RX + * path and network port id of the destination port for + * the TX path. */ - uint16_t cmpl_ring; + uint16_t dst_id; /* - * This value indicates the what completion ring the request will be - * optionally completed on. If the value is -1, then no CR completion - * will be generated. Any other value must be a valid CR ring_id value - * for this function. + * Logical VNIC ID of the VNIC where traffic is + * mirrored. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t mirror_vnic_id; + /* Logical ID of the encapsulation record. */ + uint32_t encap_record_id; + uint8_t unused_2[4]; +} __attribute__((packed)); + +/* hwrm_cfa_em_flow_alloc_output (size:192b/24B) */ +struct hwrm_cfa_em_flow_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value is an opaque id into CFA data structures. */ + uint64_t em_filter_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids - * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + * This is the ID of the flow associated with this + * filter. + * This value shall be used to match and associate the + * flow identifier returned in completion records. + * A value of 0xFFFFFFFF shall indicate no flow id. */ - uint64_t resp_addr; + uint32_t flow_id; + uint8_t unused_0[3]; /* - * This is the host address where the response will be written when the - * request is complete. This area must be 16B aligned and must be - * cleared to zero before the request is made. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint32_t vnic_id; - /* Logical vnic ID */ - uint32_t unused_0; + uint8_t valid; } __attribute__((packed)); -/* Output (24 bytes) */ -struct hwrm_vnic_plcmodes_qcfg_output { - uint16_t error_code; +/************************* + * hwrm_cfa_em_flow_free * + *************************/ + + +/* hwrm_cfa_em_flow_free_input (size:192b/24B) */ +struct hwrm_cfa_em_flow_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in parameters, - * and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last byte of - * the response is a valid flag that will read as '1' when the command - * has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t flags; + uint16_t seq_id; /* - * When this bit is '1', the VNIC is configured to use regular placement - * algorithm. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_REGULAR_PLACEMENT \ - UINT32_C(0x1) + uint16_t target_id; /* - * When this bit is '1', the VNIC is configured to use the jumbo - * placement algorithm. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_JUMBO_PLACEMENT \ - UINT32_C(0x2) + uint64_t resp_addr; + /* This value is an opaque id into CFA data structures. */ + uint64_t em_filter_id; +} __attribute__((packed)); + +/* hwrm_cfa_em_flow_free_output (size:128b/16B) */ +struct hwrm_cfa_em_flow_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * When this bit is '1', the VNIC is configured to enable Header-Data - * split for IPv4 packets. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV4 UINT32_C(0x4) + uint8_t valid; +} __attribute__((packed)); + +/************************ + * hwrm_cfa_em_flow_cfg * + ************************/ + + +/* hwrm_cfa_em_flow_cfg_input (size:384b/48B) */ +struct hwrm_cfa_em_flow_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * When this bit is '1', the VNIC is configured to enable Header-Data - * split for IPv6 packets. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV6 UINT32_C(0x8) + uint16_t cmpl_ring; /* - * When this bit is '1', the VNIC is configured to enable Header-Data - * split for FCoE packets. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_FCOE UINT32_C(0x10) + uint16_t seq_id; /* - * When this bit is '1', the VNIC is configured to enable Header-Data - * split for RoCE packets. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_ROCE UINT32_C(0x20) + uint16_t target_id; /* - * When this bit is '1', the VNIC is configured to be the default VNIC - * of the requesting function. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC UINT32_C(0x40) - uint16_t jumbo_thresh; + uint64_t resp_addr; + uint32_t enables; /* - * When jumbo placement algorithm is enabled, this value is used to - * determine the threshold for jumbo placement. Packets with length - * larger than this value will be placed according to the jumbo - * placement algorithm. + * This bit must be '1' for the new_dst_id field to be + * configured. */ - uint16_t hds_offset; + #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_DST_ID \ + UINT32_C(0x1) /* - * This value is used to determine the offset into packet buffer where - * the split data (payload) will be placed according to one of of HDS - * placement algorithm. The lengths of packet buffers provided for split - * data shall be larger than this value. + * This bit must be '1' for the new_mirror_vnic_id field to be + * configured. */ - uint16_t hds_threshold; + #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \ + UINT32_C(0x2) /* - * When one of the HDS placement algorithm is enabled, this value is - * used to determine the threshold for HDS placement. Packets with - * length larger than this value will be placed according to the HDS - * placement algorithm. This value shall be in multiple of 4 bytes. + * This bit must be '1' for the new_meter_instance_id field to be + * configured. */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t unused_4; - uint8_t valid; + #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \ + UINT32_C(0x4) + uint8_t unused_0[4]; + /* This value is an opaque id into CFA data structures. */ + uint64_t em_filter_id; /* - * This field is used in Output records to indicate that the output is - * completely written to RAM. This field should be read as '1' to - * indicate that the output has been completely written. When writing a - * command completion or response to an internal processor, the order of - * writes has to be such that this field is written last. + * If set, this value shall represent the new + * Logical VNIC ID of the destination VNIC for the RX + * path and network port id of the destination port for + * the TX path. */ -} __attribute__((packed)); - -/* hwrm_vnic_rss_cos_lb_ctx_alloc */ -/* Description: This function is used to allocate COS/Load Balance context. */ -/* Input (16 bytes) */ -struct hwrm_vnic_rss_cos_lb_ctx_alloc_input { - uint16_t req_type; + uint32_t new_dst_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * New Logical VNIC ID of the VNIC where traffic is + * mirrored. */ - uint16_t cmpl_ring; + uint32_t new_mirror_vnic_id; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * New meter to attach to the flow. Specifying the + * invalid instance ID is used to remove any existing + * meter from the flow. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t new_meter_instance_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * A value of 0xfff is considered invalid and implies the + * instance is not configured. */ - uint64_t resp_addr; + #define HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_LAST \ + HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID + uint8_t unused_1[6]; +} __attribute__((packed)); + +/* hwrm_cfa_em_flow_cfg_output (size:128b/16B) */ +struct hwrm_cfa_em_flow_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_vnic_rss_cos_lb_ctx_alloc_output { - uint16_t error_code; +/******************************** + * hwrm_cfa_meter_profile_alloc * + ********************************/ + + +/* hwrm_cfa_meter_profile_alloc_input (size:320b/40B) */ +struct hwrm_cfa_meter_profile_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t rss_cos_lb_ctx_id; - /* rss_cos_lb_ctx_id is 16 b */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t unused_4; - uint8_t valid; + uint16_t seq_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ -} __attribute__((packed)); - -/* hwrm_vnic_rss_cos_lb_ctx_free */ -/* Description: This function can be used to free COS/Load Balance context. */ -/* Input (24 bytes) */ -struct hwrm_vnic_rss_cos_lb_ctx_free_input { - uint16_t req_type; + uint16_t target_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t cmpl_ring; + uint64_t resp_addr; + uint8_t flags; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x1) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_RX + /* The meter algorithm type. */ + uint8_t meter_type; + /* RFC 2697 (srTCM) */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC2697 \ + UINT32_C(0x0) + /* RFC 2698 (trTCM) */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC2698 \ + UINT32_C(0x1) + /* RFC 4115 (trTCM) */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC4115 \ + UINT32_C(0x2) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC4115 /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This field is reserved for the future use. + * It shall be set to 0. */ - uint64_t resp_addr; + uint16_t reserved1; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This field is reserved for the future use. + * It shall be set to 0. */ - uint16_t rss_cos_lb_ctx_id; - /* rss_cos_lb_ctx_id is 16 b */ - uint16_t unused_0[3]; + uint32_t reserved2; + /* A meter rate specified in bytes-per-second. */ + uint32_t commit_rate; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID + /* A meter burst size specified in bytes. */ + uint32_t commit_burst; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID + /* A meter rate specified in bytes-per-second. */ + uint32_t excess_peak_rate; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID + /* A meter burst size specified in bytes. */ + uint32_t excess_peak_burst; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_vnic_rss_cos_lb_ctx_free_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; +/* hwrm_cfa_meter_profile_alloc_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value identifies a meter profile in CFA. */ + uint16_t meter_profile_id; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * A value of 0xfff is considered invalid and implies the + * profile is not configured. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_INVALID + uint8_t unused_0[5]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_ring_alloc */ -/* - * Description: This command allocates and does basic preparation for a ring. - */ -/* Input (80 bytes) */ -struct hwrm_ring_alloc_input { - uint16_t req_type; - /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. - */ - uint16_t cmpl_ring; +/******************************* + * hwrm_cfa_meter_profile_free * + *******************************/ + + +/* hwrm_cfa_meter_profile_free_input (size:192b/24B) */ +struct hwrm_cfa_meter_profile_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t cmpl_ring; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t resp_addr; + uint16_t seq_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint32_t enables; - /* This bit must be '1' for the Reserved1 field to be configured. */ - #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED1 UINT32_C(0x1) - /* This bit must be '1' for the ring_arb_cfg field to be configured. */ - #define HWRM_RING_ALLOC_INPUT_ENABLES_RING_ARB_CFG UINT32_C(0x2) - /* This bit must be '1' for the Reserved3 field to be configured. */ - #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED3 UINT32_C(0x4) + uint16_t target_id; /* - * This bit must be '1' for the stat_ctx_id_valid field to be - * configured. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID UINT32_C(0x8) - /* This bit must be '1' for the Reserved4 field to be configured. */ - #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED4 UINT32_C(0x10) - /* This bit must be '1' for the max_bw_valid field to be configured. */ - #define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID UINT32_C(0x20) - uint8_t ring_type; - /* Ring Type. */ - /* L2 Completion Ring (CR) */ - #define HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0) - /* TX Ring (TR) */ - #define HWRM_RING_ALLOC_INPUT_RING_TYPE_TX UINT32_C(0x1) - /* RX Ring (RR) */ - #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX UINT32_C(0x2) - /* RoCE Notification Completion Ring (ROCE_CR) */ - #define HWRM_RING_ALLOC_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3) - uint8_t unused_0; - uint16_t unused_1; - uint64_t page_tbl_addr; - /* This value is a pointer to the page table for the Ring. */ - uint32_t fbo; - /* First Byte Offset of the first entry in the first page. */ - uint8_t page_size; + uint64_t resp_addr; + uint8_t flags; /* - * Actual page size in 2^page_size. The supported range is - * increments in powers of 2 from 16 bytes to 1GB. - 4 = 16 B - * Page size is 16 B. - 12 = 4 KB Page size is 4 KB. - 13 = 8 KB - * Page size is 8 KB. - 16 = 64 KB Page size is 64 KB. - 21 = 2 - * MB Page size is 2 MB. - 22 = 4 MB Page size is 4 MB. - 30 = 1 - * GB Page size is 1 GB. + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. */ - uint8_t page_tbl_depth; + #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x1) + #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_RX + uint8_t unused_0; + /* This value identifies a meter profile in CFA. */ + uint16_t meter_profile_id; /* - * This value indicates the depth of page table. For this - * version of the specification, value other than 0 or 1 shall - * be considered as an invalid value. When the page_tbl_depth = - * 0, then it is treated as a special case with the following. - * 1. FBO and page size fields are not valid. 2. page_tbl_addr - * is the physical address of the first element of the ring. + * A value of 0xfff is considered invalid and implies the + * profile is not configured. */ - uint8_t unused_2; - uint8_t unused_3; - uint32_t length; + #define HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_LAST \ + HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_INVALID + uint8_t unused_1[4]; +} __attribute__((packed)); + +/* hwrm_cfa_meter_profile_free_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Number of 16B units in the ring. Minimum size for a ring is - * 16 16B entries. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t logical_id; + uint8_t valid; +} __attribute__((packed)); + +/****************************** + * hwrm_cfa_meter_profile_cfg * + ******************************/ + + +/* hwrm_cfa_meter_profile_cfg_input (size:320b/40B) */ +struct hwrm_cfa_meter_profile_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Logical ring number for the ring to be allocated. This value - * determines the position in the doorbell area where the update - * to the ring will be made. For completion rings, this value is - * also the MSI-X vector number for the function the completion - * ring is associated with. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring_id; + uint16_t cmpl_ring; /* - * This field is used only when ring_type is a TX ring. This - * value indicates what completion ring the TX ring is - * associated with. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t queue_id; + uint16_t seq_id; /* - * This field is used only when ring_type is a TX ring. This - * value indicates what CoS queue the TX ring is associated - * with. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint8_t unused_4; - uint8_t unused_5; - uint32_t reserved1; - /* This field is reserved for the future use. It shall be set to 0. */ - uint16_t ring_arb_cfg; + uint16_t target_id; /* - * This field is used only when ring_type is a TX ring. This - * field is used to configure arbitration related parameters for - * a TX ring. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - /* Arbitration policy used for the ring. */ - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_MASK UINT32_C(0xf) - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SFT 0 + uint64_t resp_addr; + uint8_t flags; /* - * Use strict priority for the TX ring. Priority - * value is specified in arb_policy_param + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. */ - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SP \ - (UINT32_C(0x1) << 0) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_RX + /* The meter algorithm type. */ + uint8_t meter_type; + /* RFC 2697 (srTCM) */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC2697 \ + UINT32_C(0x0) + /* RFC 2698 (trTCM) */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC2698 \ + UINT32_C(0x1) + /* RFC 4115 (trTCM) */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC4115 \ + UINT32_C(0x2) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC4115 + /* This value identifies a meter profile in CFA. */ + uint16_t meter_profile_id; /* - * Use weighted fair queue arbitration for the - * TX ring. Weight is specified in - * arb_policy_param - */ - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ \ - (UINT32_C(0x2) << 0) - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_LAST \ - RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ - /* Reserved field. */ - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_MASK UINT32_C(0xf0) - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_SFT 4 - /* - * Arbitration policy specific parameter. # For strict priority - * arbitration policy, this field represents a priority value. - * If set to 0, then the priority is not specified and the HWRM - * is allowed to select any priority for this TX ring. # For - * weighted fair queue arbitration policy, this field represents - * a weight value. If set to 0, then the weight is not specified - * and the HWRM is allowed to select any weight for this TX - * ring. - */ - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_MASK \ - UINT32_C(0xff00) - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8 - uint8_t unused_6; - uint8_t unused_7; - uint32_t reserved3; - /* This field is reserved for the future use. It shall be set to 0. */ - uint32_t stat_ctx_id; - /* - * This field is used only when ring_type is a TX ring. This - * input indicates what statistics context this ring should be - * associated with. + * A value of 0xfff is considered invalid and implies the + * profile is not configured. */ - uint32_t reserved4; - /* This field is reserved for the future use. It shall be set to 0. */ - uint32_t max_bw; + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_INVALID /* - * This field is used only when ring_type is a TX ring to - * specify maximum BW allocated to the TX ring. The HWRM will - * translate this value into byte counter and time interval used - * for this ring inside the device. + * This field is reserved for the future use. + * It shall be set to 0. */ + uint32_t reserved; + /* A meter rate specified in bytes-per-second. */ + uint32_t commit_rate; /* The bandwidth value. */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_MASK UINT32_C(0xfffffff) - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_SFT 0 - /* The granularity of the value (bits or bytes). */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE UINT32_C(0x10000000) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE \ + UINT32_C(0x10000000) /* Value is in bits. */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BITS \ + (UINT32_C(0x0) << 28) /* Value is in bytes. */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES (UINT32_C(0x1) << 28) - #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_LAST \ - RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BYTES /* bw_value_unit is 3 b */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MASK \ UINT32_C(0xe0000000) - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29 - /* Value is in Mb or MB (base 10). */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MEGA \ (UINT32_C(0x0) << 29) - /* Value is in Kb or KB (base 10). */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \ + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_KILO \ (UINT32_C(0x2) << 29) /* Value is in bits or bytes. */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_BASE \ (UINT32_C(0x4) << 29) - /* Value is in Gb or GB (base 10). */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \ + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_GIGA \ (UINT32_C(0x6) << 29) /* Value is in 1/100th of a percentage of total bandwidth. */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 \ (UINT32_C(0x1) << 29) /* Invalid unit */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID \ (UINT32_C(0x7) << 29) - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \ - RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID - uint8_t int_mode; - /* - * This field is used only when ring_type is a Completion ring. - * This value indicates what interrupt mode should be used on - * this completion ring. Note: In the legacy interrupt mode, no - * more than 16 completion rings are allowed. - */ - /* Legacy INTA */ - #define HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY UINT32_C(0x0) - /* Reserved */ - #define HWRM_RING_ALLOC_INPUT_INT_MODE_RSVD UINT32_C(0x1) - /* MSI-X */ - #define HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX UINT32_C(0x2) - /* No Interrupt - Polled mode */ - #define HWRM_RING_ALLOC_INPUT_INT_MODE_POLL UINT32_C(0x3) - uint8_t unused_8[3]; + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID + /* A meter burst size specified in bytes. */ + uint32_t commit_burst; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID + /* A meter rate specified in bytes-per-second. */ + uint32_t excess_peak_rate; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID + /* A meter burst size specified in bytes. */ + uint32_t excess_peak_burst; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_ring_alloc_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; +/* hwrm_cfa_meter_profile_cfg_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t ring_id; + uint8_t valid; +} __attribute__((packed)); + +/********************************* + * hwrm_cfa_meter_instance_alloc * + *********************************/ + + +/* hwrm_cfa_meter_instance_alloc_input (size:192b/24B) */ +struct hwrm_cfa_meter_instance_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Physical number of ring allocated. This value shall be unique - * for a ring type. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t logical_ring_id; - /* Logical number of ring allocated. */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; + uint16_t cmpl_ring; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ -} __attribute__((packed)); - -/* hwrm_ring_free */ -/* - * Description: This command is used to free a ring and associated resources. - * With QoS and DCBx agents, it is possible the traffic classes will be moved - * from one CoS queue to another. When this occurs, the driver shall call - * 'hwrm_ring_free' to free the allocated rings and then call 'hwrm_ring_alloc' - * to re-allocate each ring and assign it to a new CoS queue. hwrm_ring_free - * shall be called on a ring only after it has been idle for 500ms or more and - * no frames have been posted to the ring during this time. All frames queued - * for transmission shall be completed and at least 500ms time elapsed from the - * last completion before calling this command. - */ -/* Input (24 bytes) */ -struct hwrm_ring_free_input { - uint16_t req_type; + uint16_t seq_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t cmpl_ring; + uint16_t target_id; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint64_t resp_addr; + uint8_t flags; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. */ - uint64_t resp_addr; + #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH \ + UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x1) + #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_RX + uint8_t unused_0; + /* This value identifies a meter profile in CFA. */ + uint16_t meter_profile_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A value of 0xfff is considered invalid and implies the + * profile is not configured. */ - uint8_t ring_type; - /* Ring Type. */ - /* L2 Completion Ring (CR) */ - #define HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0) - /* TX Ring (TR) */ - #define HWRM_RING_FREE_INPUT_RING_TYPE_TX UINT32_C(0x1) - /* RX Ring (RR) */ - #define HWRM_RING_FREE_INPUT_RING_TYPE_RX UINT32_C(0x2) - /* RoCE Notification Completion Ring (ROCE_CR) */ - #define HWRM_RING_FREE_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3) - uint8_t unused_0; - uint16_t ring_id; - /* Physical number of ring allocated. */ - uint32_t unused_1; + #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_LAST \ + HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_INVALID + uint8_t unused_1[4]; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_ring_free_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; +/* hwrm_cfa_meter_instance_alloc_output (size:128b/16B) */ +struct hwrm_cfa_meter_instance_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value identifies a meter instance in CFA. */ + uint16_t meter_instance_id; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * A value of 0xfff is considered invalid and implies the + * instance is not configured. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_LAST \ + HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_INVALID + uint8_t unused_0[5]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_ring_grp_alloc */ -/* - * Description: This API allocates and does basic preparation for a ring group. - */ -/* Input (24 bytes) */ -struct hwrm_ring_grp_alloc_input { - uint16_t req_type; +/******************************** + * hwrm_cfa_meter_instance_free * + ********************************/ + + +/* hwrm_cfa_meter_instance_free_input (size:192b/24B) */ +struct hwrm_cfa_meter_instance_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t cr; - /* This value identifies the CR associated with the ring group. */ - uint16_t rr; - /* This value identifies the main RR associated with the ring group. */ - uint16_t ar; + uint64_t resp_addr; + uint8_t flags; /* - * This value identifies the aggregation RR associated with the - * ring group. If this value is 0xFF... (All Fs), then no - * Aggregation ring will be set. + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. */ - uint16_t sc; + #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x1) + #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_RX + uint8_t unused_0; + /* This value identifies a meter instance in CFA. */ + uint16_t meter_instance_id; /* - * This value identifies the statistics context associated with - * the ring group. + * A value of 0xfff is considered invalid and implies the + * instance is not configured. */ + #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_LAST \ + HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_INVALID + uint8_t unused_1[4]; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_ring_grp_alloc_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; - /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. - */ - uint32_t ring_group_id; - /* - * This is the ring group ID value. Use this value to program - * the default ring group for the VNIC or as table entries in an - * RSS/COS context. - */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; +/* hwrm_cfa_meter_instance_free_output (size:128b/16B) */ +struct hwrm_cfa_meter_instance_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_ring_grp_free */ -/* - * Description: This API frees a ring group and associated resources. # If a - * ring in the ring group is reset or free, then the associated rings in the - * ring group shall also be reset/free using hwrm_ring_free. # A function driver - * shall always use hwrm_ring_grp_free after freeing all rings in a group. # As - * a part of executing this command, the HWRM shall reset all associated ring - * group resources. - */ -/* Input (24 bytes) */ -struct hwrm_ring_grp_free_input { - uint16_t req_type; +/******************************* + * hwrm_cfa_decap_filter_alloc * + *******************************/ + + +/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */ +struct hwrm_cfa_decap_filter_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t ring_group_id; - /* This is the ring group ID value. */ - uint32_t unused_0; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_ring_grp_free_output { - uint16_t error_code; + uint64_t resp_addr; + uint32_t flags; + /* ovs_tunnel is 1 b */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_FLAGS_OVS_TUNNEL \ + UINT32_C(0x1) + uint32_t enables; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This bit must be '1' for the tunnel_type field to be + * configured. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ + UINT32_C(0x1) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This bit must be '1' for the tunnel_id field to be + * configured. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_ID \ + UINT32_C(0x2) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This bit must be '1' for the src_macaddr field to be + * configured. */ -} __attribute__((packed)); - -/* hwrm_cfa_l2_filter_alloc */ -/* - * Description: An L2 filter is a filter resource that is used to identify a - * vnic or ring for a packet based on layer 2 fields. Layer 2 fields for - * encapsulated packets include both outer L2 header and/or inner l2 header of - * encapsulated packet. The L2 filter resource covers the following OS specific - * L2 filters. Linux/FreeBSD (per function): # Broadcast enable/disable # List - * of individual multicast filters # All multicast enable/disable filter # - * Unicast filters # Promiscuous mode VMware: # Broadcast enable/disable (per - * physical function) # All multicast enable/disable (per function) # Unicast - * filters per ring or vnic # Promiscuous mode per PF Windows: # Broadcast - * enable/disable (per physical function) # List of individual multicast filters - * (Driver needs to advertise the maximum number of filters supported) # All - * multicast enable/disable per physical function # Unicast filters per vnic # - * Promiscuous mode per PF Implementation notes on the use of VNIC in this - * command: # By default, these filters belong to default vnic for the function. - * # Once these filters are set up, only destination VNIC can be modified. # If - * the destination VNIC is not specified in this command, then the HWRM shall - * only create an l2 context id. HWRM Implementation notes for multicast - * filters: # The hwrm_filter_alloc command can be used to set up multicast - * filters (perfect match or partial match). Each individual function driver can - * set up multicast filters independently. # The HWRM needs to keep track of - * multicast filters set up by function drivers and maintain multicast group - * replication records to enable a subset of functions to receive traffic for a - * specific multicast address. # When a specific multicast filter cannot be set, - * the HWRM shall return an error. In this error case, the driver should fall - * back to using one general filter (rather than specific) for all multicast - * traffic. # When the SR-IOV is enabled, the HWRM needs to additionally track - * source knockout per multicast group record. Examples of setting unicast - * filters: For a unicast MAC based filter, one can use a combination of the - * fields and masks provided in this command to set up the filter. Below are - * some examples: # MAC + no VLAN filter: This filter is used to identify - * traffic that does not contain any VLAN tags and matches destination (or - * source) MAC address. This filter can be set up by setting only l2_addr field - * to be a valid field. All other fields are not valid. The following value is - * set for l2_addr. l2_addr = MAC # MAC + Any VLAN filter: This filter is used - * to identify traffic that carries single VLAN tag and matches (destination or - * source) MAC address. This filter can be set up by setting only l2_addr and - * l2_ovlan_mask fields to be valid fields. All other fields are not valid. The - * following values are set for those two valid fields. l2_addr = MAC, - * l2_ovlan_mask = 0xFFFF # MAC + no VLAN or VLAN ID=0: This filter is used to - * identify untagged traffic that does not contain any VLAN tags or a VLAN tag - * with VLAN ID = 0 and matches destination (or source) MAC address. This filter - * can be set up by setting only l2_addr and l2_ovlan fields to be valid fields. - * All other fields are not valid. The following value are set for l2_addr and - * l2_ovlan. l2_addr = MAC, l2_ovlan = 0x0 # MAC + no VLAN or any VLAN: This - * filter is used to identify traffic that contains zero or 1 VLAN tag and - * matches destination (or source) MAC address. This filter can be set up by - * setting only l2_addr, l2_ovlan, and l2_mask fields to be valid fields. All - * other fields are not valid. The following value are set for l2_addr, - * l2_ovlan, and l2_mask fields. l2_addr = MAC, l2_ovlan = 0x0, l2_ovlan_mask = - * 0xFFFF # MAC + VLAN ID filter: This filter can be set up by setting only - * l2_addr, l2_ovlan, and l2_ovlan_mask fields to be valid fields. All other - * fields are not valid. The following values are set for those three valid - * fields. l2_addr = MAC, l2_ovlan = VLAN ID, l2_ovlan_mask = 0xF000 - */ -/* Input (96 bytes) */ -struct hwrm_cfa_l2_filter_alloc_input { - uint16_t req_type; + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR \ + UINT32_C(0x4) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This bit must be '1' for the dst_macaddr field to be + * configured. */ - uint16_t cmpl_ring; + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR \ + UINT32_C(0x8) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This bit must be '1' for the ovlan_vid field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_OVLAN_VID \ + UINT32_C(0x10) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This bit must be '1' for the ivlan_vid field to be + * configured. */ - uint64_t resp_addr; + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_IVLAN_VID \ + UINT32_C(0x20) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This bit must be '1' for the t_ovlan_vid field to be + * configured. */ - uint32_t flags; + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_T_OVLAN_VID \ + UINT32_C(0x40) /* - * Enumeration denoting the RX, TX type of the resource. This - * enumeration is used for resources that are similar for both - * TX and RX paths of the chip. + * This bit must be '1' for the t_ivlan_vid field to be + * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1) - /* tx path */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX \ - (UINT32_C(0x0) << 0) - /* rx path */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX \ - (UINT32_C(0x1) << 0) - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_LAST \ - CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_T_IVLAN_VID \ + UINT32_C(0x80) /* - * Setting of this flag indicates the applicability to the - * loopback path. + * This bit must be '1' for the ethertype field to be + * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK UINT32_C(0x2) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \ + UINT32_C(0x100) /* - * Setting of this flag indicates drop action. If this flag is - * not set, then it should be considered accept action. + * This bit must be '1' for the src_ipaddr field to be + * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x4) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \ + UINT32_C(0x200) /* - * If this flag is set, all t_l2_* fields are invalid and they - * should not be specified. If this flag is set, then l2_* - * fields refer to fields of outermost L2 header. + * This bit must be '1' for the dst_ipaddr field to be + * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST UINT32_C(0x8) - uint32_t enables; - /* This bit must be '1' for the l2_addr field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR UINT32_C(0x1) - /* This bit must be '1' for the l2_addr_mask field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK \ - UINT32_C(0x2) - /* This bit must be '1' for the l2_ovlan field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN UINT32_C(0x4) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \ + UINT32_C(0x400) /* - * This bit must be '1' for the l2_ovlan_mask field to be + * This bit must be '1' for the ipaddr_type field to be * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK \ - UINT32_C(0x8) - /* This bit must be '1' for the l2_ivlan field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN UINT32_C(0x10) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \ + UINT32_C(0x800) /* - * This bit must be '1' for the l2_ivlan_mask field to be + * This bit must be '1' for the ip_protocol field to be * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK \ - UINT32_C(0x20) - /* This bit must be '1' for the t_l2_addr field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR UINT32_C(0x40) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \ + UINT32_C(0x1000) /* - * This bit must be '1' for the t_l2_addr_mask field to be + * This bit must be '1' for the src_port field to be * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK \ - UINT32_C(0x80) - /* This bit must be '1' for the t_l2_ovlan field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN \ - UINT32_C(0x100) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \ + UINT32_C(0x2000) /* - * This bit must be '1' for the t_l2_ovlan_mask field to be + * This bit must be '1' for the dst_port field to be * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK \ - UINT32_C(0x200) - /* This bit must be '1' for the t_l2_ivlan field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN \ - UINT32_C(0x400) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \ + UINT32_C(0x4000) /* - * This bit must be '1' for the t_l2_ivlan_mask field to be + * This bit must be '1' for the dst_id field to be * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK \ - UINT32_C(0x800) - /* This bit must be '1' for the src_type field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE UINT32_C(0x1000) - /* This bit must be '1' for the src_id field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID UINT32_C(0x2000) - /* This bit must be '1' for the tunnel_type field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ - UINT32_C(0x4000) - /* This bit must be '1' for the dst_id field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x8000) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_ID \ + UINT32_C(0x8000) /* * This bit must be '1' for the mirror_vnic_id field to be * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ UINT32_C(0x10000) - uint8_t l2_addr[6]; /* - * This value sets the match value for the L2 MAC address. - * Destination MAC address for RX path. Source MAC address for - * TX path. + * Tunnel identifier. + * Virtual Network Identifier (VNI). Only valid with + * tunnel_types VXLAN, NVGRE, and Geneve. + * Only lower 24-bits of VNI field are used + * in setting up the filter. */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t l2_addr_mask[6]; + uint32_t tunnel_id; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Any tunneled traffic */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL + uint8_t unused_0; + uint16_t unused_1; /* - * This value sets the mask value for the L2 address. A value of - * 0 will mask the corresponding bit from compare. + * This value indicates the source MAC address in + * the Ethernet header. */ - uint16_t l2_ovlan; - /* This value sets VLAN ID value for outer VLAN. */ - uint16_t l2_ovlan_mask; + uint8_t src_macaddr[6]; + uint8_t unused_2[2]; /* - * This value sets the mask value for the ovlan id. A value of 0 - * will mask the corresponding bit from compare. + * This value indicates the destination MAC address in + * the Ethernet header. */ - uint16_t l2_ivlan; - /* This value sets VLAN ID value for inner VLAN. */ - uint16_t l2_ivlan_mask; + uint8_t dst_macaddr[6]; /* - * This value sets the mask value for the ivlan id. A value of 0 - * will mask the corresponding bit from compare. + * This value indicates the VLAN ID of the outer VLAN tag + * in the Ethernet header. */ - uint8_t unused_2; - uint8_t unused_3; - uint8_t t_l2_addr[6]; + uint16_t ovlan_vid; /* - * This value sets the match value for the tunnel L2 MAC - * address. Destination MAC address for RX path. Source MAC - * address for TX path. + * This value indicates the VLAN ID of the inner VLAN tag + * in the Ethernet header. */ - uint8_t unused_4; - uint8_t unused_5; - uint8_t t_l2_addr_mask[6]; + uint16_t ivlan_vid; /* - * This value sets the mask value for the tunnel L2 address. A - * value of 0 will mask the corresponding bit from compare. + * This value indicates the VLAN ID of the outer VLAN tag + * in the tunnel Ethernet header. */ - uint16_t t_l2_ovlan; - /* This value sets VLAN ID value for tunnel outer VLAN. */ - uint16_t t_l2_ovlan_mask; + uint16_t t_ovlan_vid; /* - * This value sets the mask value for the tunnel ovlan id. A - * value of 0 will mask the corresponding bit from compare. + * This value indicates the VLAN ID of the inner VLAN tag + * in the tunnel Ethernet header. */ - uint16_t t_l2_ivlan; - /* This value sets VLAN ID value for tunnel inner VLAN. */ - uint16_t t_l2_ivlan_mask; + uint16_t t_ivlan_vid; + /* This value indicates the ethertype in the Ethernet header. */ + uint16_t ethertype; /* - * This value sets the mask value for the tunnel ivlan id. A - * value of 0 will mask the corresponding bit from compare. + * This value indicates the type of IP address. + * 4 - IPv4 + * 6 - IPv6 + * All others are invalid. */ - uint8_t src_type; - /* This value identifies the type of source of the packet. */ - /* Network port */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_NPORT UINT32_C(0x0) - /* Physical function */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_PF UINT32_C(0x1) - /* Virtual function */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VF UINT32_C(0x2) - /* Virtual NIC of a function */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VNIC UINT32_C(0x3) - /* Embedded processor for CFA management */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_KONG UINT32_C(0x4) - /* Embedded processor for OOB management */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_APE UINT32_C(0x5) - /* Embedded processor for RoCE */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_BONO UINT32_C(0x6) - /* Embedded processor for network proxy functions */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG UINT32_C(0x7) - uint8_t unused_6; - uint32_t src_id; - /* - * This value is the id of the source. For a network port, it - * represents port_id. For a physical function, it represents - * fid. For a virtual function, it represents vf_id. For a vnic, - * it represents vnic_id. For embedded processors, this id is - * not valid. Notes: 1. The function ID is implied if it src_id - * is not provided for a src_type that is either - */ - uint8_t tunnel_type; - /* Tunnel Type. */ - /* Non-tunnel */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + uint8_t ip_addr_type; + /* invalid */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN \ UINT32_C(0x0) - /* Virtual eXtensible Local Area Network (VXLAN) */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ - UINT32_C(0x1) + /* IPv4 */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \ + UINT32_C(0x4) + /* IPv6 */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \ + UINT32_C(0x6) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST \ + HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 /* - * Network Virtualization Generic Routing - * Encapsulation (NVGRE) + * The value of protocol filed in IP header. + * Applies to UDP and TCP traffic. + * 6 - TCP + * 17 - UDP */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ - UINT32_C(0x2) + uint8_t ip_protocol; + /* invalid */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \ + UINT32_C(0x0) + /* TCP */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP \ + UINT32_C(0x6) + /* UDP */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP \ + UINT32_C(0x11) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_LAST \ + HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP + uint16_t unused_3; + uint32_t unused_4; /* - * Generic Routing Encapsulation (GRE) inside - * Ethernet payload + * The value of source IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE UINT32_C(0x3) - /* IP in IP */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP UINT32_C(0x4) - /* Generic Network Virtualization Encapsulation (Geneve) */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5) - /* Multi-Protocol Lable Switching (MPLS) */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS UINT32_C(0x6) - /* Stateless Transport Tunnel (STT) */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7) + uint32_t src_ipaddr[4]; /* - * Generic Routing Encapsulation (GRE) inside IP - * datagram payload + * The value of destination IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8) + uint32_t dst_ipaddr[4]; /* - * IPV4 over virtual eXtensible Local Area - * Network (IPV4oVXLAN) + * The value of source port to be used in filtering. + * Applies to UDP and TCP traffic. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ - UINT32_C(0x9) - /* Any tunneled traffic */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ - UINT32_C(0xff) - uint8_t unused_7; - uint16_t dst_id; + uint16_t src_port; /* - * If set, this value shall represent the Logical VNIC ID of the - * destination VNIC for the RX path and network port id of the - * destination port for the TX path. + * The value of destination port to be used in filtering. + * Applies to UDP and TCP traffic. */ - uint16_t mirror_vnic_id; - /* Logical VNIC ID of the VNIC where traffic is mirrored. */ - uint8_t pri_hint; + uint16_t dst_port; /* - * This hint is provided to help in placing the filter in the - * filter table. + * If set, this value shall represent the + * Logical VNIC ID of the destination VNIC for the RX + * path. */ - /* No preference */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \ - UINT32_C(0x0) - /* Above the given filter */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE_FILTER \ - UINT32_C(0x1) - /* Below the given filter */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER \ - UINT32_C(0x2) - /* As high as possible */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MAX UINT32_C(0x3) - /* As low as possible */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN UINT32_C(0x4) - uint8_t unused_8; - uint32_t unused_9; - uint64_t l2_filter_id_hint; + uint16_t dst_id; /* - * This is the ID of the filter that goes along with the - * pri_hint. This field is valid only for the following values. - * 1 - Above the given filter 2 - Below the given filter + * If set, this value shall represent the L2 context that matches the L2 + * information of the decap filter. */ + uint16_t l2_ctxt_ref_id; } __attribute__((packed)); -/* Output (24 bytes) */ -struct hwrm_cfa_l2_filter_alloc_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; - /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. - */ - uint64_t l2_filter_id; - /* - * This value identifies a set of CFA data structures used for - * an L2 context. - */ - uint32_t flow_id; - /* - * This is the ID of the flow associated with this filter. This - * value shall be used to match and associate the flow - * identifier returned in completion records. A value of - * 0xFFFFFFFF shall indicate no flow id. - */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; +/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */ +struct hwrm_cfa_decap_filter_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value is an opaque id into CFA data structures. */ + uint32_t decap_filter_id; + uint8_t unused_0[3]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_cfa_l2_filter_free */ -/* - * Description: Free a L2 filter. The HWRM shall free all associated filter - * resources with the L2 filter. - */ -/* Input (24 bytes) */ -struct hwrm_cfa_l2_filter_free_input { - uint16_t req_type; +/****************************** + * hwrm_cfa_decap_filter_free * + ******************************/ + + +/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_decap_filter_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t l2_filter_id; + uint64_t resp_addr; + /* This value is an opaque id into CFA data structures. */ + uint32_t decap_filter_id; + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_decap_filter_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This value identifies a set of CFA data structures used for - * an L2 context. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_cfa_l2_filter_free_output { - uint16_t error_code; +/*********************** + * hwrm_cfa_flow_alloc * + ***********************/ + + +/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */ +struct hwrm_cfa_flow_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t seq_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ -} __attribute__((packed)); - -/* hwrm_cfa_l2_filter_cfg */ -/* Description: Change the configuration of an existing L2 filter */ -/* Input (40 bytes) */ -struct hwrm_cfa_l2_filter_cfg_input { - uint16_t req_type; + uint16_t target_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t cmpl_ring; + uint64_t resp_addr; + uint16_t flags; + /* tunnel is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_TUNNEL UINT32_C(0x1) + /* num_vlan is 2 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_MASK UINT32_C(0x6) + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_SFT 1 + /* no tags */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_NONE \ + (UINT32_C(0x0) << 1) + /* 1 tag */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_ONE \ + (UINT32_C(0x1) << 1) + /* 2 tags */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_TWO \ + (UINT32_C(0x2) << 1) + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_LAST \ + HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_TWO + /* Enumeration denoting the Flow Type. */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_MASK UINT32_C(0x38) + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_SFT 3 + /* L2 flow */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_L2 \ + (UINT32_C(0x0) << 3) + /* IPV4 flow */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV4 \ + (UINT32_C(0x1) << 3) + /* IPV6 flow */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV6 \ + (UINT32_C(0x2) << 3) + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_LAST \ + HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV6 + /* + * Tx Flow: vf fid. + * Rx Flow: pf fid. + */ + uint16_t src_fid; + /* Tunnel handle valid when tunnel flag is set. */ + uint32_t tunnel_handle; + uint16_t action_flags; + /* + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. + */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_FWD \ + UINT32_C(0x1) + /* recycle is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_RECYCLE \ + UINT32_C(0x2) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_DROP \ + UINT32_C(0x4) + /* meter is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_METER \ + UINT32_C(0x8) + /* tunnel is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_TUNNEL \ + UINT32_C(0x10) + /* nat_src is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NAT_SRC \ + UINT32_C(0x20) + /* nat_dest is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NAT_DEST \ + UINT32_C(0x40) + /* nat_ipv4_address is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NAT_IPV4_ADDRESS \ + UINT32_C(0x80) + /* l2_header_rewrite is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_L2_HEADER_REWRITE \ + UINT32_C(0x100) + /* ttl_decrement is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_TTL_DECREMENT \ + UINT32_C(0x200) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * Tx Flow: pf or vf fid. + * Rx Flow: vf fid. + */ + uint16_t dst_fid; + /* VLAN tpid, valid when push_vlan flag is set. */ + uint16_t l2_rewrite_vlan_tpid; + /* VLAN tci, valid when push_vlan flag is set. */ + uint16_t l2_rewrite_vlan_tci; + /* Meter id, valid when meter flag is set. */ + uint16_t act_meter_id; + /* Flow with the same l2 context tcam key. */ + uint16_t ref_flow_handle; + /* This value sets the match value for the ethertype. */ + uint16_t ethertype; + /* valid when num tags is 1 or 2. */ + uint16_t outer_vlan_tci; + /* This value sets the match value for the Destination MAC address. */ + uint16_t dmac[3]; + /* valid when num tags is 2. */ + uint16_t inner_vlan_tci; + /* This value sets the match value for the Source MAC address. */ + uint16_t smac[3]; + /* The bit length of destination IP address mask. */ + uint8_t ip_dst_mask_len; + /* The bit length of source IP address mask. */ + uint8_t ip_src_mask_len; + /* The value of destination IPv4/IPv6 address. */ + uint32_t ip_dst[4]; + /* The source IPv4/IPv6 address. */ + uint32_t ip_src[4]; + /* + * The value of source port. + * Applies to UDP and TCP traffic. */ - uint64_t resp_addr; + uint16_t l4_src_port; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The value of source port mask. + * Applies to UDP and TCP traffic. */ - uint32_t flags; + uint16_t l4_src_port_mask; /* - * Enumeration denoting the RX, TX type of the resource. This - * enumeration is used for resources that are similar for both - * TX and RX paths of the chip. + * The value of destination port. + * Applies to UDP and TCP traffic. */ - #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH UINT32_C(0x1) - /* tx path */ - #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX \ - (UINT32_C(0x0) << 0) - /* rx path */ - #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX \ - (UINT32_C(0x1) << 0) - #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_LAST \ - CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX + uint16_t l4_dst_port; /* - * Setting of this flag indicates drop action. If this flag is - * not set, then it should be considered accept action. + * The value of destination port mask. + * Applies to UDP and TCP traffic. */ - #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_DROP UINT32_C(0x2) - uint32_t enables; - /* This bit must be '1' for the dst_id field to be configured. */ - #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_DST_ID UINT32_C(0x1) + uint16_t l4_dst_port_mask; /* - * This bit must be '1' for the new_mirror_vnic_id field to be - * configured. + * NAT IPv4/6 address based on address type flag. + * 0 values are ignored. */ - #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \ - UINT32_C(0x2) - uint64_t l2_filter_id; + uint32_t nat_ip_address[4]; + /* L2 header re-write Destination MAC address. */ + uint16_t l2_rewrite_dmac[3]; /* - * This value identifies a set of CFA data structures used for - * an L2 context. - */ - uint32_t dst_id; + * The NAT source/destination port based on direction flag. + * Applies to UDP and TCP traffic. + * 0 values are ignored. + */ + uint16_t nat_port; + /* L2 header re-write Source MAC address. */ + uint16_t l2_rewrite_smac[3]; + /* The value of ip protocol. */ + uint8_t ip_proto; + uint8_t unused_0; +} __attribute__((packed)); + +/* hwrm_cfa_flow_alloc_output (size:128b/16B) */ +struct hwrm_cfa_flow_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Flow record index. */ + uint16_t flow_handle; + uint8_t unused_0[5]; /* - * If set, this value shall represent the Logical VNIC ID of the - * destination VNIC for the RX path and network port id of the - * destination port for the TX path. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint32_t new_mirror_vnic_id; - /* New Logical VNIC ID of the VNIC where traffic is mirrored. */ + uint8_t valid; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_cfa_l2_filter_cfg_output { - uint16_t error_code; +/********************** + * hwrm_cfa_flow_free * + **********************/ + + +/* hwrm_cfa_flow_free_input (size:192b/24B) */ +struct hwrm_cfa_flow_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t target_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ + uint64_t resp_addr; + /* Flow record index. */ + uint16_t flow_handle; + uint8_t unused_0[6]; } __attribute__((packed)); -/* hwrm_cfa_l2_set_rx_mask */ -/* Description: This command will set rx mask of the function. */ -/* Input (56 bytes) */ -struct hwrm_cfa_l2_set_rx_mask_input { - uint16_t req_type; +/* hwrm_cfa_flow_free_output (size:256b/32B) */ +struct hwrm_cfa_flow_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* packet is 64 b */ + uint64_t packet; + /* byte is 64 b */ + uint64_t byte; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_cfa_flow_info * + **********************/ + + +/* hwrm_cfa_flow_info_input (size:192b/24B) */ +struct hwrm_cfa_flow_info_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t vnic_id; - /* VNIC ID */ - uint32_t mask; - /* Reserved for future use. */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_RESERVED UINT32_C(0x1) + uint64_t resp_addr; + /* Flow record index. */ + uint16_t flow_handle; + /* Max flow handle */ + #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_MAX_MASK \ + UINT32_C(0xfff) + #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_MAX_SFT 0 + /* CNP flow handle */ + #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT \ + UINT32_C(0x1000) + /* Direction rx = 1 */ + #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX \ + UINT32_C(0x8000) + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_cfa_flow_info_output (size:448b/56B) */ +struct hwrm_cfa_flow_info_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* flags is 8 b */ + uint8_t flags; + /* profile is 8 b */ + uint8_t profile; + /* src_fid is 16 b */ + uint16_t src_fid; + /* dst_fid is 16 b */ + uint16_t dst_fid; + /* l2_ctxt_id is 16 b */ + uint16_t l2_ctxt_id; + /* em_info is 64 b */ + uint64_t em_info; + /* tcam_info is 64 b */ + uint64_t tcam_info; + /* vfp_tcam_info is 64 b */ + uint64_t vfp_tcam_info; + /* ar_id is 16 b */ + uint16_t ar_id; + /* flow_handle is 16 b */ + uint16_t flow_handle; + /* tunnel_handle is 32 b */ + uint32_t tunnel_handle; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_cfa_flow_flush * + ***********************/ + + +/* hwrm_cfa_flow_flush_input (size:192b/24B) */ +struct hwrm_cfa_flow_flush_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * When this bit is '1', the function is requested to accept - * multi-cast packets specified by the multicast addr table. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST UINT32_C(0x2) + uint16_t cmpl_ring; /* - * When this bit is '1', the function is requested to accept all - * multi-cast packets. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST UINT32_C(0x4) + uint16_t seq_id; /* - * When this bit is '1', the function is requested to accept - * broadcast packets. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST UINT32_C(0x8) - /* - * When this bit is '1', the function is requested to be put in - * the promiscuous mode. The HWRM should accept any function to - * set up promiscuous mode. The HWRM shall follow the semantics - * below for the promiscuous mode support. # When partitioning - * is not enabled on a port (i.e. single PF on the port), then - * the PF shall be allowed to be in the promiscuous mode. When - * the PF is in the promiscuous mode, then it shall receive all - * host bound traffic on that port. # When partitioning is - * enabled on a port (i.e. multiple PFs per port) and a PF on - * that port is in the promiscuous mode, then the PF receives - * all traffic within that partition as identified by a unique - * identifier for the PF (e.g. S-Tag). If a unique outer VLAN - * for the PF is specified, then the setting of promiscuous mode - * on that PF shall result in the PF receiving all host bound - * traffic with matching outer VLAN. # A VF shall can be set in - * the promiscuous mode. In the promiscuous mode, the VF does - * not receive any traffic unless a unique outer VLAN for the VF - * is specified. If a unique outer VLAN for the VF is specified, - * then the setting of promiscuous mode on that VF shall result - * in the VF receiving all host bound traffic with the matching - * outer VLAN. # The HWRM shall allow the setting of promiscuous - * mode on a function independently from the promiscuous mode - * settings on other functions. - */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS UINT32_C(0x10) - /* - * If this flag is set, the corresponding RX filters shall be - * set up to cover multicast/broadcast filters for the outermost - * Layer 2 destination MAC address field. - */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_OUTERMOST UINT32_C(0x20) - /* - * If this flag is set, the corresponding RX filters shall be - * set up to cover multicast/broadcast filters for the VLAN- - * tagged packets that match the TPID and VID fields of VLAN - * tags in the VLAN tag table specified in this command. + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY UINT32_C(0x40) + uint64_t resp_addr; + uint32_t flags; + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_cfa_flow_flush_output (size:128b/16B) */ +struct hwrm_cfa_flow_flush_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * If this flag is set, the corresponding RX filters shall be - * set up to cover multicast/broadcast filters for non-VLAN - * tagged packets and VLAN-tagged packets that match the TPID - * and VID fields of VLAN tags in the VLAN tag table specified - * in this command. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN UINT32_C(0x80) + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_cfa_flow_stats * + ***********************/ + + +/* hwrm_cfa_flow_stats_input (size:320b/40B) */ +struct hwrm_cfa_flow_stats_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * If this flag is set, the corresponding RX filters shall be - * set up to cover multicast/broadcast filters for non-VLAN - * tagged packets and VLAN-tagged packets matching any VLAN tag. - * If this flag is set, then the HWRM shall ignore VLAN tags - * specified in vlan_tag_tbl. If none of vlanonly, vlan_nonvlan, - * and anyvlan_nonvlan flags is set, then the HWRM shall ignore - * VLAN tags specified in vlan_tag_tbl. The HWRM client shall - * set at most one flag out of vlanonly, vlan_nonvlan, and - * anyvlan_nonvlan. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN \ - UINT32_C(0x100) - uint64_t mc_tbl_addr; - /* This is the address for mcast address tbl. */ - uint32_t num_mc_entries; + uint16_t cmpl_ring; /* - * This value indicates how many entries in mc_tbl are valid. - * Each entry is 6 bytes. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t unused_0; - uint64_t vlan_tag_tbl_addr; + uint16_t seq_id; /* - * This is the address for VLAN tag table. Each VLAN entry in - * the table is 4 bytes of a VLAN tag including TPID, PCP, DEI, - * and VID fields in network byte order. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint32_t num_vlan_tags; + uint16_t target_id; /* - * This value indicates how many entries in vlan_tag_tbl are - * valid. Each entry is 4 bytes. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t unused_1; + uint64_t resp_addr; + /* Flow handle. */ + uint16_t num_flows; + /* Flow handle. */ + uint16_t flow_handle_0; + /* Flow handle. */ + uint16_t flow_handle_1; + /* Flow handle. */ + uint16_t flow_handle_2; + /* Flow handle. */ + uint16_t flow_handle_3; + /* Flow handle. */ + uint16_t flow_handle_4; + /* Flow handle. */ + uint16_t flow_handle_5; + /* Flow handle. */ + uint16_t flow_handle_6; + /* Flow handle. */ + uint16_t flow_handle_7; + /* Flow handle. */ + uint16_t flow_handle_8; + /* Flow handle. */ + uint16_t flow_handle_9; + uint8_t unused_0[2]; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_cfa_l2_set_rx_mask_output { - uint16_t error_code; +/* hwrm_cfa_flow_stats_output (size:1408b/176B) */ +struct hwrm_cfa_flow_stats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* packet_0 is 64 b */ + uint64_t packet_0; + /* packet_1 is 64 b */ + uint64_t packet_1; + /* packet_2 is 64 b */ + uint64_t packet_2; + /* packet_3 is 64 b */ + uint64_t packet_3; + /* packet_4 is 64 b */ + uint64_t packet_4; + /* packet_5 is 64 b */ + uint64_t packet_5; + /* packet_6 is 64 b */ + uint64_t packet_6; + /* packet_7 is 64 b */ + uint64_t packet_7; + /* packet_8 is 64 b */ + uint64_t packet_8; + /* packet_9 is 64 b */ + uint64_t packet_9; + /* byte_0 is 64 b */ + uint64_t byte_0; + /* byte_1 is 64 b */ + uint64_t byte_1; + /* byte_2 is 64 b */ + uint64_t byte_2; + /* byte_3 is 64 b */ + uint64_t byte_3; + /* byte_4 is 64 b */ + uint64_t byte_4; + /* byte_5 is 64 b */ + uint64_t byte_5; + /* byte_6 is 64 b */ + uint64_t byte_6; + /* byte_7 is 64 b */ + uint64_t byte_7; + /* byte_8 is 64 b */ + uint64_t byte_8; + /* byte_9 is 64 b */ + uint64_t byte_9; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************** + * hwrm_cfa_vf_pair_alloc * + **************************/ + + +/* hwrm_cfa_vf_pair_alloc_input (size:448b/56B) */ +struct hwrm_cfa_vf_pair_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t seq_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ -} __attribute__((packed)); - -/* Command specific Error Codes (8 bytes) */ -struct hwrm_cfa_l2_set_rx_mask_cmd_err { - uint8_t code; + uint16_t target_id; /* - * command specific error codes that goes to the cmd_err field - * in Common HWRM Error Response. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - /* Unknown error */ - #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + uint64_t resp_addr; + /* Logical VF number (range: 0 -> MAX_VFS -1). */ + uint16_t vf_a_id; + /* Logical VF number (range: 0 -> MAX_VFS -1). */ + uint16_t vf_b_id; + uint8_t unused_0[4]; + /* VF Pair name (32 byte string). */ + char pair_name[32]; +} __attribute__((packed)); + +/* hwrm_cfa_vf_pair_alloc_output (size:128b/16B) */ +struct hwrm_cfa_vf_pair_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Unable to complete operation due to conflict - * with Ntuple Filter + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define \ - HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR \ - UINT32_C(0x1) - uint8_t unused_0[7]; + uint8_t valid; } __attribute__((packed)); -/* hwrm_cfa_vlan_antispoof_cfg */ -/* Description: Configures vlan anti-spoof filters for VF. */ -/* Input (32 bytes) */ -struct hwrm_cfa_vlan_antispoof_cfg_input { - uint16_t req_type; +/************************* + * hwrm_cfa_vf_pair_free * + *************************/ + + +/* hwrm_cfa_vf_pair_free_input (size:384b/48B) */ +struct hwrm_cfa_vf_pair_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format for the - * rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request will be - * optionally completed on. If the value is -1, then no CR completion - * will be generated. Any other value must be a valid CR ring_id value - * for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids - * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written when the - * request is complete. This area must be 16B aligned and must be - * cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t fid; + uint64_t resp_addr; + /* VF Pair name (32 byte string). */ + char pair_name[32]; +} __attribute__((packed)); + +/* hwrm_cfa_vf_pair_free_output (size:128b/16B) */ +struct hwrm_cfa_vf_pair_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Function ID of the function that is being configured. Only valid for - * a VF FID configured by the PF. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint8_t unused_0; - uint8_t unused_1; - uint32_t num_vlan_entries; - /* Number of VLAN entries in the vlan_tag_mask_tbl. */ - uint64_t vlan_tag_mask_tbl_addr; + uint8_t valid; +} __attribute__((packed)); + +/************************* + * hwrm_cfa_vf_pair_info * + *************************/ + + +/* hwrm_cfa_vf_pair_info_input (size:448b/56B) */ +struct hwrm_cfa_vf_pair_info_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN antispoof - * table. Each table entry contains the 16-bit TPID (0x8100 or 0x88a8 - * only), 16-bit VLAN ID, and a 16-bit mask, all in network order to - * match hwrm_cfa_l2_set_rx_mask. For an individual VLAN entry, the mask - * value should be 0xfff for the 12-bit VLAN ID. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ -}; - -/* Output (16 bytes) */ -struct hwrm_cfa_vlan_antispoof_cfg_output { - uint16_t error_code; + uint16_t cmpl_ring; /* - * Pass/Fail or error type Note: receiver to verify the in parameters, - * and fail the call with an error when appropriate + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t seq_id; /* - * This field is the length of the response in bytes. The last byte of - * the response is a valid flag that will read as '1' when the command - * has been completely written to memory. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t target_id; /* - * This field is used in Output records to indicate that the output is - * completely written to RAM. This field should be read as '1' to - * indicate that the output has been completely written. When writing a - * command completion or response to an internal processor, the order of - * writes has to be such that this field is written last. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ -}; + uint64_t resp_addr; + uint32_t flags; + /* If this flag is set, lookup by name else lookup by index. */ + #define HWRM_CFA_VF_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE UINT32_C(0x1) + /* vf pair table index. */ + uint16_t vf_pair_index; + uint8_t unused_0[2]; + /* VF Pair name (32 byte string). */ + char vf_pair_name[32]; +} __attribute__((packed)); -/* hwrm_cfa_ntuple_filter_alloc */ -/* - * Description: This is a ntuple filter that uses fields from L4/L3 header and - * optionally fields from L2. The ntuple filters apply to receive traffic only. - * All L2/L3/L4 header fields are specified in network byte order. These filters - * can be used for Receive Flow Steering (RFS). # For ethertype value, only - * 0x0800 (IPv4) and 0x86dd (IPv6) shall be supported for ntuple filters. # If a - * field specified in this command is not enabled as a valid field, then that - * field shall not be used in matching packet header fields against this filter. - */ -/* Input (128 bytes) */ -struct hwrm_cfa_ntuple_filter_alloc_input { - uint16_t req_type; +/* hwrm_cfa_vf_pair_info_output (size:512b/64B) */ +struct hwrm_cfa_vf_pair_info_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* vf pair table index. */ + uint16_t next_vf_pair_index; + /* vf pair member a's vf_fid. */ + uint16_t vf_a_fid; + /* vf pair member a's Linux logical VF number. */ + uint16_t vf_a_index; + /* vf pair member b's vf_fid. */ + uint16_t vf_b_fid; + /* vf pair member a's Linux logical VF number. */ + uint16_t vf_b_index; + /* vf pair state. */ + uint8_t pair_state; + /* Pair has been allocated */ + #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ALLOCATED UINT32_C(0x1) + /* Both pair members are active */ + #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE UINT32_C(0x2) + #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_LAST \ + HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE + uint8_t unused_0[5]; + /* VF Pair name (32 byte string). */ + char pair_name[32]; + uint8_t unused_1[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_cfa_pair_alloc * + ***********************/ + + +/* hwrm_cfa_pair_alloc_input (size:576b/72B) */ +struct hwrm_cfa_pair_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t flags; + uint64_t resp_addr; + /* Pair mode (0-vf2fn, 1-rep2fn, 2-rep2rep, 3-proxy, 4-pfpair, 5-rep2fn_mod). */ + uint8_t pair_mode; + /* Pair between VF on local host with PF or VF on specified host. */ + #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_VF2FN UINT32_C(0x0) + /* Pair between REP on local host with PF or VF on specified host. */ + #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN UINT32_C(0x1) + /* Pair between REP on local host with REP on specified host. */ + #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2REP UINT32_C(0x2) + /* Pair for the proxy interface. */ + #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_PROXY UINT32_C(0x3) + /* Pair for the PF interface. */ + #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_PFPAIR UINT32_C(0x4) + /* Modify exiting rep2fn pair and move pair to new PF. */ + #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MOD UINT32_C(0x5) + /* Modify exiting rep2fn pairs paired with same PF and move pairs to new PF. */ + #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MODALL UINT32_C(0x6) + #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_LAST \ + HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MODALL + uint8_t unused_0; + /* Logical VF number (range: 0 -> MAX_VFS -1). */ + uint16_t vf_a_id; + /* Logical Host (0xff-local host). */ + uint8_t host_b_id; + /* Logical PF (0xff-PF for command channel). */ + uint8_t pf_b_id; + /* Logical VF number (range: 0 -> MAX_VFS -1). */ + uint16_t vf_b_id; + /* Loopback port (0xff-internal loopback), valid for mode-3. */ + uint8_t port_id; + /* Priority used for encap of loopback packets valid for mode-3. */ + uint8_t pri; + /* New PF for rep2fn modify, valid for mode 5. */ + uint16_t new_pf_fid; + uint32_t enables; /* - * Setting of this flag indicates the applicability to the - * loopback path. + * This bit must be '1' for the q_ab field to be + * configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \ - UINT32_C(0x1) + #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID UINT32_C(0x1) /* - * Setting of this flag indicates drop action. If this flag is - * not set, then it should be considered accept action. + * This bit must be '1' for the q_ba field to be + * configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x2) + #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID UINT32_C(0x2) /* - * Setting of this flag indicates that a meter is expected to be - * attached to this flow. This hint can be used when choosing - * the action record format required for the flow. + * This bit must be '1' for the fc_ab field to be + * configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER UINT32_C(0x4) - uint32_t enables; - /* This bit must be '1' for the l2_filter_id field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \ - UINT32_C(0x1) - /* This bit must be '1' for the ethertype field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \ - UINT32_C(0x2) - /* This bit must be '1' for the tunnel_type field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ - UINT32_C(0x4) - /* This bit must be '1' for the src_macaddr field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR \ - UINT32_C(0x8) - /* This bit must be '1' for the ipaddr_type field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \ - UINT32_C(0x10) - /* This bit must be '1' for the src_ipaddr field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \ - UINT32_C(0x20) + #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID UINT32_C(0x4) /* - * This bit must be '1' for the src_ipaddr_mask field to be + * This bit must be '1' for the fc_ba field to be * configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK \ - UINT32_C(0x40) - /* This bit must be '1' for the dst_ipaddr field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \ - UINT32_C(0x80) + #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID UINT32_C(0x8) + /* VF Pair name (32 byte string). */ + char pair_name[32]; /* - * This bit must be '1' for the dst_ipaddr_mask field to be - * configured. + * The q_ab value specifies the logical index of the TX/RX CoS + * queue to be assigned for traffic in the A to B direction of + * the interface pair. The default value is 0. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK \ - UINT32_C(0x100) - /* This bit must be '1' for the ip_protocol field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \ - UINT32_C(0x200) - /* This bit must be '1' for the src_port field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \ - UINT32_C(0x400) + uint8_t q_ab; /* - * This bit must be '1' for the src_port_mask field to be - * configured. + * The q_ba value specifies the logical index of the TX/RX CoS + * queue to be assigned for traffic in the B to A direction of + * the interface pair. The default value is 1. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK \ - UINT32_C(0x800) - /* This bit must be '1' for the dst_port field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \ - UINT32_C(0x1000) + uint8_t q_ba; /* - * This bit must be '1' for the dst_port_mask field to be - * configured. + * Specifies whether RX ring flow control is disabled (0) or enabled + * (1) in the A to B direction. The default value is 0, meaning that + * packets will be dropped when the B-side RX rings are full. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK \ - UINT32_C(0x2000) - /* This bit must be '1' for the pri_hint field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_PRI_HINT \ - UINT32_C(0x4000) + uint8_t fc_ab; /* - * This bit must be '1' for the ntuple_filter_id field to be - * configured. + * Specifies whether RX ring flow control is disabled (0) or enabled + * (1) in the B to A direction. The default value is 1, meaning that + * the RX CoS queue will be flow controlled when the A-side RX rings + * are full. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_NTUPLE_FILTER_ID \ - UINT32_C(0x8000) - /* This bit must be '1' for the dst_id field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID \ - UINT32_C(0x10000) + uint8_t fc_ba; + uint8_t unused_1[4]; +} __attribute__((packed)); + +/* hwrm_cfa_pair_alloc_output (size:192b/24B) */ +struct hwrm_cfa_pair_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Only valid for modes 1 and 2. */ + uint16_t rx_cfa_code_a; + /* Only valid for modes 1 and 2. */ + uint16_t tx_cfa_action_a; + /* Only valid for mode 2. */ + uint16_t rx_cfa_code_b; + /* Only valid for mode 2. */ + uint16_t tx_cfa_action_b; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_cfa_pair_free * + **********************/ + + +/* hwrm_cfa_pair_free_input (size:384b/48B) */ +struct hwrm_cfa_pair_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This bit must be '1' for the mirror_vnic_id field to be - * configured. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ - UINT32_C(0x20000) - /* This bit must be '1' for the dst_macaddr field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR \ - UINT32_C(0x40000) - uint64_t l2_filter_id; + uint16_t cmpl_ring; /* - * This value identifies a set of CFA data structures used for - * an L2 context. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint8_t src_macaddr[6]; + uint16_t seq_id; /* - * This value indicates the source MAC address in the Ethernet - * header. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t ethertype; - /* This value indicates the ethertype in the Ethernet header. */ - uint8_t ip_addr_type; + uint16_t target_id; /* - * This value indicates the type of IP address. 4 - IPv4 6 - - * IPv6 All others are invalid. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - /* invalid */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN \ - UINT32_C(0x0) - /* IPv4 */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \ - UINT32_C(0x4) - /* IPv6 */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \ - UINT32_C(0x6) - uint8_t ip_protocol; + uint64_t resp_addr; + /* VF Pair name (32 byte string). */ + char pair_name[32]; +} __attribute__((packed)); + +/* hwrm_cfa_pair_free_output (size:128b/16B) */ +struct hwrm_cfa_pair_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * The value of protocol filed in IP header. Applies to UDP and - * TCP traffic. 6 - TCP 17 - UDP + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - /* invalid */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \ - UINT32_C(0x0) - /* TCP */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP \ - UINT32_C(0x6) - /* UDP */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP \ - UINT32_C(0x11) - uint16_t dst_id; + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_cfa_pair_info * + **********************/ + + +/* hwrm_cfa_pair_info_input (size:448b/56B) */ +struct hwrm_cfa_pair_info_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * If set, this value shall represent the Logical VNIC ID of the - * destination VNIC for the RX path and network port id of the - * destination port for the TX path. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t mirror_vnic_id; - /* Logical VNIC ID of the VNIC where traffic is mirrored. */ - uint8_t tunnel_type; + uint16_t cmpl_ring; /* - * This value indicates the tunnel type for this filter. If this - * field is not specified, then the filter shall apply to both - * non-tunneled and tunneled packets. If this field conflicts - * with the tunnel_type specified in the l2_filter_id, then the - * HWRM shall return an error for this command. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - /* Non-tunnel */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ - UINT32_C(0x0) - /* Virtual eXtensible Local Area Network (VXLAN) */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ - UINT32_C(0x1) + uint16_t seq_id; /* - * Network Virtualization Generic Routing - * Encapsulation (NVGRE) + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ - UINT32_C(0x2) + uint16_t target_id; /* - * Generic Routing Encapsulation (GRE) inside - * Ethernet payload + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ - UINT32_C(0x3) - /* IP in IP */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ - UINT32_C(0x4) - /* Generic Network Virtualization Encapsulation (Geneve) */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ - UINT32_C(0x5) - /* Multi-Protocol Lable Switching (MPLS) */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ - UINT32_C(0x6) - /* Stateless Transport Tunnel (STT) */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7) + uint64_t resp_addr; + uint32_t flags; + /* If this flag is set, lookup by name else lookup by index. */ + #define HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE UINT32_C(0x1) + /* If this flag is set, lookup by PF id and VF id. */ + #define HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_REPRE UINT32_C(0x2) + /* Pair table index. */ + uint16_t pair_index; + /* Pair pf index. */ + uint8_t pair_pfid; + /* Pair vf index. */ + uint8_t pair_vfid; + /* Pair name (32 byte string). */ + char pair_name[32]; +} __attribute__((packed)); + +/* hwrm_cfa_pair_info_output (size:576b/72B) */ +struct hwrm_cfa_pair_info_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Pair table index. */ + uint16_t next_pair_index; + /* Pair member a's fid. */ + uint16_t a_fid; + /* Logical host number. */ + uint8_t host_a_index; + /* Logical PF number. */ + uint8_t pf_a_index; + /* Pair member a's Linux logical VF number. */ + uint16_t vf_a_index; + /* Rx CFA code. */ + uint16_t rx_cfa_code_a; + /* Tx CFA action. */ + uint16_t tx_cfa_action_a; + /* Pair member b's fid. */ + uint16_t b_fid; + /* Logical host number. */ + uint8_t host_b_index; + /* Logical PF number. */ + uint8_t pf_b_index; + /* Pair member a's Linux logical VF number. */ + uint16_t vf_b_index; + /* Rx CFA code. */ + uint16_t rx_cfa_code_b; + /* Tx CFA action. */ + uint16_t tx_cfa_action_b; + /* Pair mode (0-vf2fn, 1-rep2fn, 2-rep2rep, 3-proxy, 4-pfpair). */ + uint8_t pair_mode; + /* Pair between VF on local host with PF or VF on specified host. */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_VF2FN UINT32_C(0x0) + /* Pair between REP on local host with PF or VF on specified host. */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_REP2FN UINT32_C(0x1) + /* Pair between REP on local host with REP on specified host. */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_REP2REP UINT32_C(0x2) + /* Pair for the proxy interface. */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PROXY UINT32_C(0x3) + /* Pair for the PF interface. */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PFPAIR UINT32_C(0x4) + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_LAST \ + HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PFPAIR + /* Pair state. */ + uint8_t pair_state; + /* Pair has been allocated */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ALLOCATED UINT32_C(0x1) + /* Both pair members are active */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE UINT32_C(0x2) + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_LAST \ + HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE + /* Pair name (32 byte string). */ + char pair_name[32]; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_cfa_vfr_alloc * + **********************/ + + +/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */ +struct hwrm_cfa_vfr_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Generic Routing Encapsulation (GRE) inside IP - * datagram payload + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ - UINT32_C(0x8) - /* Any tunneled traffic */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ - UINT32_C(0xff) - uint8_t pri_hint; + uint16_t cmpl_ring; /* - * This hint is provided to help in placing the filter in the - * filter table. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - /* No preference */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \ - UINT32_C(0x0) - /* Above the given filter */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE UINT32_C(0x1) - /* Below the given filter */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_BELOW UINT32_C(0x2) - /* As high as possible */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_HIGHEST \ - UINT32_C(0x3) - /* As low as possible */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LOWEST UINT32_C(0x4) - uint32_t src_ipaddr[4]; + uint16_t seq_id; /* - * The value of source IP address to be used in filtering. For - * IPv4, first four bytes represent the IP address. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint32_t src_ipaddr_mask[4]; + uint16_t target_id; /* - * The value of source IP address mask to be used in filtering. - * For IPv4, first four bytes represent the IP address mask. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t dst_ipaddr[4]; + uint64_t resp_addr; + /* Logical VF number (range: 0 -> MAX_VFS -1). */ + uint16_t vf_id; /* - * The value of destination IP address to be used in filtering. - * For IPv4, first four bytes represent the IP address. + * This field is reserved for the future use. + * It shall be set to 0. */ - uint32_t dst_ipaddr_mask[4]; + uint16_t reserved; + uint8_t unused_0[4]; + /* VF Representor name (32 byte string). */ + char vfr_name[32]; +} __attribute__((packed)); + +/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */ +struct hwrm_cfa_vfr_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Rx CFA code. */ + uint16_t rx_cfa_code; + /* Tx CFA action. */ + uint16_t tx_cfa_action; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************* + * hwrm_cfa_vfr_free * + *********************/ + + +/* hwrm_cfa_vfr_free_input (size:384b/48B) */ +struct hwrm_cfa_vfr_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * The value of destination IP address mask to be used in - * filtering. For IPv4, first four bytes represent the IP - * address mask. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t src_port; + uint16_t cmpl_ring; /* - * The value of source port to be used in filtering. Applies to - * UDP and TCP traffic. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t src_port_mask; + uint16_t seq_id; /* - * The value of source port mask to be used in filtering. - * Applies to UDP and TCP traffic. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t dst_port; + uint16_t target_id; /* - * The value of destination port to be used in filtering. - * Applies to UDP and TCP traffic. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t dst_port_mask; + uint64_t resp_addr; + /* VF Representor name (32 byte string). */ + char vfr_name[32]; +} __attribute__((packed)); + +/* hwrm_cfa_vfr_free_output (size:128b/16B) */ +struct hwrm_cfa_vfr_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * The value of destination port mask to be used in filtering. - * Applies to UDP and TCP traffic. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint64_t ntuple_filter_id_hint; - /* This is the ID of the filter that goes along with the pri_hint. */ + uint8_t valid; } __attribute__((packed)); -/* Output (24 bytes) */ -struct hwrm_cfa_ntuple_filter_alloc_output { - uint16_t error_code; +/****************************** + * hwrm_tunnel_dst_port_query * + ******************************/ + + +/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_query_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t ntuple_filter_id; - /* This value is an opaque id into CFA data structures. */ - uint32_t flow_id; + uint16_t seq_id; /* - * This is the ID of the flow associated with this filter. This - * value shall be used to match and associate the flow - * identifier returned in completion records. A value of - * 0xFFFFFFFF shall indicate no flow id. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; + uint16_t target_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ + uint64_t resp_addr; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_LAST \ + HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_IPGRE_V1 + uint8_t unused_0[7]; } __attribute__((packed)); -/* Command specific Error Codes (8 bytes) */ -struct hwrm_cfa_ntuple_filter_alloc_cmd_err { - uint8_t code; +/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_query_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; /* - * command specific error codes that goes to the cmd_err field - * in Common HWRM Error Response. + * This field represents the identifier of L4 destination port + * used for the given tunnel type. This field is valid for + * specific tunnel types that use layer 4 (e.g. UDP) + * transports for tunneling. */ - /* Unknown error */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + uint16_t tunnel_dst_port_id; + /* + * This field represents the value of L4 destination port + * identified by tunnel_dst_port_id. This field is valid for + * specific tunnel types that use layer 4 (e.g. UDP) + * transports for tunneling. + * This field is in network byte order. + * + * A value of 0 means that the destination port is not + * configured. + */ + uint16_t tunnel_dst_port_val; + uint8_t unused_0[3]; /* - * Unable to complete operation due to conflict - * with Rx Mask VLAN + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define \ - HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR \ - UINT32_C(0x1) - uint8_t unused_0[7]; + uint8_t valid; } __attribute__((packed)); -/* hwrm_cfa_ntuple_filter_free */ -/* Description: Free an ntuple filter */ -/* Input (24 bytes) */ -struct hwrm_cfa_ntuple_filter_free_input { - uint16_t req_type; +/****************************** + * hwrm_tunnel_dst_port_alloc * + ******************************/ + + +/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t ntuple_filter_id; - /* This value is an opaque id into CFA data structures. */ + uint64_t resp_addr; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 + uint8_t unused_0; + /* + * This field represents the value of L4 destination port used + * for the given tunnel type. This field is valid for + * specific tunnel types that use layer 4 (e.g. UDP) + * transports for tunneling. + * + * This field is in network byte order. + * + * A value of 0 shall fail the command. + */ + uint16_t tunnel_dst_port_val; + uint8_t unused_1[4]; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_cfa_ntuple_filter_free_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; +/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * Identifier of a tunnel L4 destination port value. Only applies to tunnel + * types that has l4 destination port parameters. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t tunnel_dst_port_id; + uint8_t unused_0[5]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_cfa_ntuple_filter_cfg */ -/* - * Description: Configure an ntuple filter with a new destination VNIC and/or - * meter. - */ -/* Input (48 bytes) */ -struct hwrm_cfa_ntuple_filter_cfg_input { - uint16_t req_type; +/***************************** + * hwrm_tunnel_dst_port_free * + *****************************/ + + +/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t enables; - /* This bit must be '1' for the new_dst_id field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_DST_ID \ + uint64_t resp_addr; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN \ UINT32_C(0x1) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_LAST \ + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1 + uint8_t unused_0; + /* + * Identifier of a tunnel L4 destination port value. Only applies to tunnel + * types that has l4 destination port parameters. + */ + uint16_t tunnel_dst_port_id; + uint8_t unused_1[4]; +} __attribute__((packed)); + +/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_1[7]; /* - * This bit must be '1' for the new_mirror_vnic_id field to be - * configured. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* ctx_hw_stats (size:1280b/160B) */ +struct ctx_hw_stats { + /* Number of received unicast packets */ + uint64_t rx_ucast_pkts; + /* Number of received multicast packets */ + uint64_t rx_mcast_pkts; + /* Number of received broadcast packets */ + uint64_t rx_bcast_pkts; + /* Number of discarded packets on received path */ + uint64_t rx_discard_pkts; + /* Number of dropped packets on received path */ + uint64_t rx_drop_pkts; + /* Number of received bytes for unicast traffic */ + uint64_t rx_ucast_bytes; + /* Number of received bytes for multicast traffic */ + uint64_t rx_mcast_bytes; + /* Number of received bytes for broadcast traffic */ + uint64_t rx_bcast_bytes; + /* Number of transmitted unicast packets */ + uint64_t tx_ucast_pkts; + /* Number of transmitted multicast packets */ + uint64_t tx_mcast_pkts; + /* Number of transmitted broadcast packets */ + uint64_t tx_bcast_pkts; + /* Number of discarded packets on transmit path */ + uint64_t tx_discard_pkts; + /* Number of dropped packets on transmit path */ + uint64_t tx_drop_pkts; + /* Number of transmitted bytes for unicast traffic */ + uint64_t tx_ucast_bytes; + /* Number of transmitted bytes for multicast traffic */ + uint64_t tx_mcast_bytes; + /* Number of transmitted bytes for broadcast traffic */ + uint64_t tx_bcast_bytes; + /* Number of TPA packets */ + uint64_t tpa_pkts; + /* Number of TPA bytes */ + uint64_t tpa_bytes; + /* Number of TPA events */ + uint64_t tpa_events; + /* Number of TPA aborts */ + uint64_t tpa_aborts; +} __attribute__((packed)); + +/*********************** + * hwrm_stat_ctx_alloc * + ***********************/ + + +/* hwrm_stat_ctx_alloc_input (size:256b/32B) */ +struct hwrm_stat_ctx_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \ - UINT32_C(0x2) + uint64_t resp_addr; + /* This is the address for statistic block. */ + uint64_t stats_dma_addr; /* - * This bit must be '1' for the new_meter_instance_id field to - * be configured. + * The statistic block update period in ms. + * e.g. 250ms, 500ms, 750ms, 1000ms. + * If update_period_ms is 0, then the stats update + * shall be never done and the DMA address shall not be used. + * In this case, the stat block can only be read by + * hwrm_stat_ctx_query command. */ - #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \ - UINT32_C(0x4) - uint32_t unused_0; - uint64_t ntuple_filter_id; - /* This value is an opaque id into CFA data structures. */ - uint32_t new_dst_id; + uint32_t update_period_ms; /* - * If set, this value shall represent the new Logical VNIC ID of - * the destination VNIC for the RX path and new network port id - * of the destination port for the TX path. + * This field is used to specify statistics context specific + * configuration flags. */ - uint32_t new_mirror_vnic_id; - /* New Logical VNIC ID of the VNIC where traffic is mirrored. */ - uint16_t new_meter_instance_id; + uint8_t stat_ctx_flags; /* - * New meter to attach to the flow. Specifying the invalid - * instance ID is used to remove any existing meter from the - * flow. + * When this bit is set to '1', the statistics context shall be + * allocated for RoCE traffic only. In this case, traffic other + * than offloaded RoCE traffic shall not be included in this + * statistic context. + * When this bit is set to '0', the statistics context shall be + * used for the network traffic other than offloaded RoCE traffic. */ + #define HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_ROCE UINT32_C(0x1) + uint8_t unused_0[3]; +} __attribute__((packed)); + +/* hwrm_stat_ctx_alloc_output (size:128b/16B) */ +struct hwrm_stat_ctx_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This is the statistics context ID value. */ + uint32_t stat_ctx_id; + uint8_t unused_0[3]; /* - * A value of 0xfff is considered invalid and - * implies the instance is not configured. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \ - UINT32_C(0xffff) - uint16_t unused_1[3]; + uint8_t valid; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_cfa_ntuple_filter_cfg_output { - uint16_t error_code; +/********************** + * hwrm_stat_ctx_free * + **********************/ + + +/* hwrm_stat_ctx_free_input (size:192b/24B) */ +struct hwrm_stat_ctx_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t seq_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ -} __attribute__((packed)); - -/* hwrm_cfa_em_flow_alloc */ -/* - * Description: This is a generic Exact Match (EM) flow that uses fields from - * L4/L3/L2 headers. The EM flows apply to transmit and receive traffic. All - * L2/L3/L4 header fields are specified in network byte order. For each EM flow, - * there is an associated set of actions specified. For tunneled packets, all - * L2/L3/L4 fields specified are fields of inner headers unless otherwise - * specified. # If a field specified in this command is not enabled as a valid - * field, then that field shall not be used in matching packet header fields - * against this EM flow entry. - */ -/* Input (112 bytes) */ -struct hwrm_cfa_em_flow_alloc_input { - uint16_t req_type; + uint16_t target_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t cmpl_ring; + uint64_t resp_addr; + /* ID of the statistics context that is being queried. */ + uint32_t stat_ctx_id; + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_stat_ctx_free_output (size:128b/16B) */ +struct hwrm_stat_ctx_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This is the statistics context ID value. */ + uint32_t stat_ctx_id; + uint8_t unused_0[3]; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_stat_ctx_query * + ***********************/ + + +/* hwrm_stat_ctx_query_input (size:192b/24B) */ +struct hwrm_stat_ctx_query_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint64_t resp_addr; + uint16_t cmpl_ring; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t flags; + uint16_t seq_id; /* - * Enumeration denoting the RX, TX type of the resource. This - * enumeration is used for resources that are similar for both - * TX and RX paths of the chip. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1) - /* tx path */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_TX \ - (UINT32_C(0x0) << 0) - /* rx path */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX \ - (UINT32_C(0x1) << 0) - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_LAST \ - CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX - /* - * Setting of this flag indicates enabling of a byte counter for - * a given flow. - */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_BYTE_CTR UINT32_C(0x2) - /* - * Setting of this flag indicates enabling of a packet counter - * for a given flow. - */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PKT_CTR UINT32_C(0x4) - /* - * Setting of this flag indicates de-capsulation action for the - * given flow. - */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DECAP UINT32_C(0x8) - /* - * Setting of this flag indicates encapsulation action for the - * given flow. - */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_ENCAP UINT32_C(0x10) - /* - * Setting of this flag indicates drop action. If this flag is - * not set, then it should be considered accept action. - */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x20) - /* - * Setting of this flag indicates that a meter is expected to be - * attached to this flow. This hint can be used when choosing - * the action record format required for the flow. - */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_METER UINT32_C(0x40) - uint32_t enables; - /* This bit must be '1' for the l2_filter_id field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID UINT32_C(0x1) - /* This bit must be '1' for the tunnel_type field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_TYPE UINT32_C(0x2) - /* This bit must be '1' for the tunnel_id field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_ID UINT32_C(0x4) - /* This bit must be '1' for the src_macaddr field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR UINT32_C(0x8) - /* This bit must be '1' for the dst_macaddr field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR UINT32_C(0x10) - /* This bit must be '1' for the ovlan_vid field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID UINT32_C(0x20) - /* This bit must be '1' for the ivlan_vid field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID UINT32_C(0x40) - /* This bit must be '1' for the ethertype field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE UINT32_C(0x80) - /* This bit must be '1' for the src_ipaddr field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR UINT32_C(0x100) - /* This bit must be '1' for the dst_ipaddr field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR UINT32_C(0x200) - /* This bit must be '1' for the ipaddr_type field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE UINT32_C(0x400) - /* This bit must be '1' for the ip_protocol field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL UINT32_C(0x800) - /* This bit must be '1' for the src_port field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT UINT32_C(0x1000) - /* This bit must be '1' for the dst_port field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT UINT32_C(0x2000) - /* This bit must be '1' for the dst_id field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x4000) + uint16_t target_id; /* - * This bit must be '1' for the mirror_vnic_id field to be - * configured. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ - UINT32_C(0x8000) + uint64_t resp_addr; + /* ID of the statistics context that is being queried. */ + uint32_t stat_ctx_id; + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_stat_ctx_query_output (size:1408b/176B) */ +struct hwrm_stat_ctx_query_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Number of transmitted unicast packets */ + uint64_t tx_ucast_pkts; + /* Number of transmitted multicast packets */ + uint64_t tx_mcast_pkts; + /* Number of transmitted broadcast packets */ + uint64_t tx_bcast_pkts; + /* Number of transmitted packets with error */ + uint64_t tx_err_pkts; + /* Number of dropped packets on transmit path */ + uint64_t tx_drop_pkts; + /* Number of transmitted bytes for unicast traffic */ + uint64_t tx_ucast_bytes; + /* Number of transmitted bytes for multicast traffic */ + uint64_t tx_mcast_bytes; + /* Number of transmitted bytes for broadcast traffic */ + uint64_t tx_bcast_bytes; + /* Number of received unicast packets */ + uint64_t rx_ucast_pkts; + /* Number of received multicast packets */ + uint64_t rx_mcast_pkts; + /* Number of received broadcast packets */ + uint64_t rx_bcast_pkts; + /* Number of received packets with error */ + uint64_t rx_err_pkts; + /* Number of dropped packets on received path */ + uint64_t rx_drop_pkts; + /* Number of received bytes for unicast traffic */ + uint64_t rx_ucast_bytes; + /* Number of received bytes for multicast traffic */ + uint64_t rx_mcast_bytes; + /* Number of received bytes for broadcast traffic */ + uint64_t rx_bcast_bytes; + /* Number of aggregated unicast packets */ + uint64_t rx_agg_pkts; + /* Number of aggregated unicast bytes */ + uint64_t rx_agg_bytes; + /* Number of aggregation events */ + uint64_t rx_agg_events; + /* Number of aborted aggregations */ + uint64_t rx_agg_aborts; + uint8_t unused_0[7]; /* - * This bit must be '1' for the encap_record_id field to be - * configured. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ENCAP_RECORD_ID \ - UINT32_C(0x10000) + uint8_t valid; +} __attribute__((packed)); + +/*************************** + * hwrm_stat_ctx_clr_stats * + ***************************/ + + +/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */ +struct hwrm_stat_ctx_clr_stats_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This bit must be '1' for the meter_instance_id field to be - * configured. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_METER_INSTANCE_ID \ - UINT32_C(0x20000) - uint64_t l2_filter_id; + uint16_t cmpl_ring; /* - * This value identifies a set of CFA data structures used for - * an L2 context. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint8_t tunnel_type; - /* Tunnel Type. */ - /* Non-tunnel */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ - UINT32_C(0x0) - /* Virtual eXtensible Local Area Network (VXLAN) */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1) + uint16_t seq_id; /* - * Network Virtualization Generic Routing - * Encapsulation (NVGRE) + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NVGRE UINT32_C(0x2) + uint16_t target_id; /* - * Generic Routing Encapsulation (GRE) inside - * Ethernet payload + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_L2GRE UINT32_C(0x3) - /* IP in IP */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPIP UINT32_C(0x4) - /* Generic Network Virtualization Encapsulation (Geneve) */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5) - /* Multi-Protocol Lable Switching (MPLS) */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_MPLS UINT32_C(0x6) - /* Stateless Transport Tunnel (STT) */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7) + uint64_t resp_addr; + /* ID of the statistics context that is being queried. */ + uint32_t stat_ctx_id; + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */ +struct hwrm_stat_ctx_clr_stats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Generic Routing Encapsulation (GRE) inside IP - * datagram payload + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8) + uint8_t valid; +} __attribute__((packed)); + +/******************** + * hwrm_pcie_qstats * + ********************/ + + +/* hwrm_pcie_qstats_input (size:256b/32B) */ +struct hwrm_pcie_qstats_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * IPV4 over virtual eXtensible Local Area - * Network (IPV4oVXLAN) + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 UINT32_C(0x9) - /* Any tunneled traffic */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ - UINT32_C(0xff) - uint8_t unused_0; - uint16_t unused_1; - uint32_t tunnel_id; + uint16_t cmpl_ring; /* - * Tunnel identifier. Virtual Network Identifier (VNI). Only - * valid with tunnel_types VXLAN, NVGRE, and Geneve. Only lower - * 24-bits of VNI field are used in setting up the filter. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint8_t src_macaddr[6]; + uint16_t seq_id; /* - * This value indicates the source MAC address in the Ethernet - * header. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t meter_instance_id; - /* The meter instance to attach to the flow. */ + uint16_t target_id; /* - * A value of 0xfff is considered invalid and - * implies the instance is not configured. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_INVALID \ - UINT32_C(0xffff) - uint8_t dst_macaddr[6]; + uint64_t resp_addr; /* - * This value indicates the destination MAC address in the - * Ethernet header. + * The size of PCIe statistics block in bytes. + * Firmware will DMA the PCIe statistics to + * the host with this field size in the response. */ - uint16_t ovlan_vid; + uint16_t pcie_stat_size; + uint8_t unused_0[6]; /* - * This value indicates the VLAN ID of the outer VLAN tag in the - * Ethernet header. + * This is the host address where + * PCIe statistics will be stored */ - uint16_t ivlan_vid; + uint64_t pcie_stat_host_addr; +} __attribute__((packed)); + +/* hwrm_pcie_qstats_output (size:128b/16B) */ +struct hwrm_pcie_qstats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The size of PCIe statistics block in bytes. */ + uint16_t pcie_stat_size; + uint8_t unused_0[5]; /* - * This value indicates the VLAN ID of the inner VLAN tag in the - * Ethernet header. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t ethertype; - /* This value indicates the ethertype in the Ethernet header. */ - uint8_t ip_addr_type; + uint8_t valid; +} __attribute__((packed)); + +/* Port Tx Statistics Formats */ +/* tx_port_stats (size:3264b/408B) */ +struct tx_port_stats { + /* Total Number of 64 Bytes frames transmitted */ + uint64_t tx_64b_frames; + /* Total Number of 65-127 Bytes frames transmitted */ + uint64_t tx_65b_127b_frames; + /* Total Number of 128-255 Bytes frames transmitted */ + uint64_t tx_128b_255b_frames; + /* Total Number of 256-511 Bytes frames transmitted */ + uint64_t tx_256b_511b_frames; + /* Total Number of 512-1023 Bytes frames transmitted */ + uint64_t tx_512b_1023b_frames; + /* Total Number of 1024-1518 Bytes frames transmitted */ + uint64_t tx_1024b_1518_frames; /* - * This value indicates the type of IP address. 4 - IPv4 6 - - * IPv6 All others are invalid. + * Total Number of each good VLAN (exludes FCS errors) + * frame transmitted which is 1519 to 1522 bytes in length + * inclusive (excluding framing bits but including FCS bytes). */ - /* invalid */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN UINT32_C(0x0) - /* IPv4 */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 UINT32_C(0x4) - /* IPv6 */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 UINT32_C(0x6) - uint8_t ip_protocol; + uint64_t tx_good_vlan_frames; + /* Total Number of 1519-2047 Bytes frames transmitted */ + uint64_t tx_1519b_2047_frames; + /* Total Number of 2048-4095 Bytes frames transmitted */ + uint64_t tx_2048b_4095b_frames; + /* Total Number of 4096-9216 Bytes frames transmitted */ + uint64_t tx_4096b_9216b_frames; + /* Total Number of 9217-16383 Bytes frames transmitted */ + uint64_t tx_9217b_16383b_frames; + /* Total Number of good frames transmitted */ + uint64_t tx_good_frames; + /* Total Number of frames transmitted */ + uint64_t tx_total_frames; + /* Total number of unicast frames transmitted */ + uint64_t tx_ucast_frames; + /* Total number of multicast frames transmitted */ + uint64_t tx_mcast_frames; + /* Total number of broadcast frames transmitted */ + uint64_t tx_bcast_frames; + /* Total number of PAUSE control frames transmitted */ + uint64_t tx_pause_frames; /* - * The value of protocol filed in IP header. Applies to UDP and - * TCP traffic. 6 - TCP 17 - UDP + * Total number of PFC/per-priority PAUSE + * control frames transmitted */ - /* invalid */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN UINT32_C(0x0) - /* TCP */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_TCP UINT32_C(0x6) - /* UDP */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UDP UINT32_C(0x11) - uint8_t unused_2; - uint8_t unused_3; - uint32_t src_ipaddr[4]; + uint64_t tx_pfc_frames; + /* Total number of jabber frames transmitted */ + uint64_t tx_jabber_frames; + /* Total number of frames transmitted with FCS error */ + uint64_t tx_fcs_err_frames; + /* Total number of control frames transmitted */ + uint64_t tx_control_frames; + /* Total number of over-sized frames transmitted */ + uint64_t tx_oversz_frames; + /* Total number of frames with single deferral */ + uint64_t tx_single_dfrl_frames; + /* Total number of frames with multiple deferrals */ + uint64_t tx_multi_dfrl_frames; + /* Total number of frames with single collision */ + uint64_t tx_single_coll_frames; + /* Total number of frames with multiple collisions */ + uint64_t tx_multi_coll_frames; + /* Total number of frames with late collisions */ + uint64_t tx_late_coll_frames; + /* Total number of frames with excessive collisions */ + uint64_t tx_excessive_coll_frames; + /* Total number of fragmented frames transmitted */ + uint64_t tx_frag_frames; + /* Total number of transmit errors */ + uint64_t tx_err; + /* Total number of single VLAN tagged frames transmitted */ + uint64_t tx_tagged_frames; + /* Total number of double VLAN tagged frames transmitted */ + uint64_t tx_dbl_tagged_frames; + /* Total number of runt frames transmitted */ + uint64_t tx_runt_frames; + /* Total number of TX FIFO under runs */ + uint64_t tx_fifo_underruns; /* - * The value of source IP address to be used in filtering. For - * IPv4, first four bytes represent the IP address. + * Total number of PFC frames with PFC enabled bit for + * Pri 0 transmitted */ - uint32_t dst_ipaddr[4]; + uint64_t tx_pfc_ena_frames_pri0; /* - * big_endian = True The value of destination IP address to be - * used in filtering. For IPv4, first four bytes represent the - * IP address. + * Total number of PFC frames with PFC enabled bit for + * Pri 1 transmitted */ - uint16_t src_port; + uint64_t tx_pfc_ena_frames_pri1; /* - * The value of source port to be used in filtering. Applies to - * UDP and TCP traffic. + * Total number of PFC frames with PFC enabled bit for + * Pri 2 transmitted */ - uint16_t dst_port; + uint64_t tx_pfc_ena_frames_pri2; /* - * The value of destination port to be used in filtering. - * Applies to UDP and TCP traffic. + * Total number of PFC frames with PFC enabled bit for + * Pri 3 transmitted */ - uint16_t dst_id; + uint64_t tx_pfc_ena_frames_pri3; /* - * If set, this value shall represent the Logical VNIC ID of the - * destination VNIC for the RX path and network port id of the - * destination port for the TX path. + * Total number of PFC frames with PFC enabled bit for + * Pri 4 transmitted */ - uint16_t mirror_vnic_id; - /* Logical VNIC ID of the VNIC where traffic is mirrored. */ - uint32_t encap_record_id; - /* Logical ID of the encapsulation record. */ - uint32_t unused_4; -} __attribute__((packed)); - -/* Output (24 bytes) */ -struct hwrm_cfa_em_flow_alloc_output { - uint16_t error_code; + uint64_t tx_pfc_ena_frames_pri4; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Total number of PFC frames with PFC enabled bit for + * Pri 5 transmitted */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint64_t tx_pfc_ena_frames_pri5; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * Total number of PFC frames with PFC enabled bit for + * Pri 6 transmitted */ - uint64_t em_filter_id; - /* This value is an opaque id into CFA data structures. */ - uint32_t flow_id; + uint64_t tx_pfc_ena_frames_pri6; /* - * This is the ID of the flow associated with this filter. This - * value shall be used to match and associate the flow - * identifier returned in completion records. A value of - * 0xFFFFFFFF shall indicate no flow id. + * Total number of PFC frames with PFC enabled bit for + * Pri 7 transmitted */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; + uint64_t tx_pfc_ena_frames_pri7; + /* Total number of EEE LPI Events on TX */ + uint64_t tx_eee_lpi_events; + /* EEE LPI Duration Counter on TX */ + uint64_t tx_eee_lpi_duration; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * Total number of Link Level Flow Control (LLFC) messages + * transmitted */ + uint64_t tx_llfc_logical_msgs; + /* Total number of HCFC messages transmitted */ + uint64_t tx_hcfc_msgs; + /* Total number of TX collisions */ + uint64_t tx_total_collisions; + /* Total number of transmitted bytes */ + uint64_t tx_bytes; + /* Total number of end-to-end HOL frames */ + uint64_t tx_xthol_frames; + /* Total Tx Drops per Port reported by STATS block */ + uint64_t tx_stat_discard; + /* Total Tx Error Drops per Port reported by STATS block */ + uint64_t tx_stat_error; } __attribute__((packed)); -/* hwrm_cfa_em_flow_free */ -/* Description: Free an EM flow table entry */ -/* Input (24 bytes) */ -struct hwrm_cfa_em_flow_free_input { - uint16_t req_type; +/* Port Rx Statistics Formats */ +/* rx_port_stats (size:4224b/528B) */ +struct rx_port_stats { + /* Total Number of 64 Bytes frames received */ + uint64_t rx_64b_frames; + /* Total Number of 65-127 Bytes frames received */ + uint64_t rx_65b_127b_frames; + /* Total Number of 128-255 Bytes frames received */ + uint64_t rx_128b_255b_frames; + /* Total Number of 256-511 Bytes frames received */ + uint64_t rx_256b_511b_frames; + /* Total Number of 512-1023 Bytes frames received */ + uint64_t rx_512b_1023b_frames; + /* Total Number of 1024-1518 Bytes frames received */ + uint64_t rx_1024b_1518_frames; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * Total Number of each good VLAN (exludes FCS errors) + * frame received which is 1519 to 1522 bytes in length + * inclusive (excluding framing bits but including FCS bytes). */ - uint16_t cmpl_ring; + uint64_t rx_good_vlan_frames; + /* Total Number of 1519-2047 Bytes frames received */ + uint64_t rx_1519b_2047b_frames; + /* Total Number of 2048-4095 Bytes frames received */ + uint64_t rx_2048b_4095b_frames; + /* Total Number of 4096-9216 Bytes frames received */ + uint64_t rx_4096b_9216b_frames; + /* Total Number of 9217-16383 Bytes frames received */ + uint64_t rx_9217b_16383b_frames; + /* Total number of frames received */ + uint64_t rx_total_frames; + /* Total number of unicast frames received */ + uint64_t rx_ucast_frames; + /* Total number of multicast frames received */ + uint64_t rx_mcast_frames; + /* Total number of broadcast frames received */ + uint64_t rx_bcast_frames; + /* Total number of received frames with FCS error */ + uint64_t rx_fcs_err_frames; + /* Total number of control frames received */ + uint64_t rx_ctrl_frames; + /* Total number of PAUSE frames received */ + uint64_t rx_pause_frames; + /* Total number of PFC frames received */ + uint64_t rx_pfc_frames; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * Total number of frames received with an unsupported + * opcode */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint64_t rx_unsupported_opcode_frames; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * Total number of frames received with an unsupported + * DA for pause and PFC */ - uint64_t resp_addr; + uint64_t rx_unsupported_da_pausepfc_frames; + /* Total number of frames received with an unsupported SA */ + uint64_t rx_wrong_sa_frames; + /* Total number of received packets with alignment error */ + uint64_t rx_align_err_frames; + /* Total number of received frames with out-of-range length */ + uint64_t rx_oor_len_frames; + /* Total number of received frames with error termination */ + uint64_t rx_code_err_frames; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. - */ - uint64_t em_filter_id; - /* This value is an opaque id into CFA data structures. */ -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_cfa_em_flow_free_output { - uint16_t error_code; + * Total number of received frames with a false carrier is + * detected during idle, as defined by RX_ER samples active + * and RXD is 0xE. The event is reported along with the + * statistics generated on the next received frame. Only + * one false carrier condition can be detected and logged + * between frames. + * + * Carrier event, valid for 10M/100M speed modes only. + */ + uint64_t rx_false_carrier_frames; + /* Total number of over-sized frames received */ + uint64_t rx_ovrsz_frames; + /* Total number of jabber packets received */ + uint64_t rx_jbr_frames; + /* Total number of received frames with MTU error */ + uint64_t rx_mtu_err_frames; + /* Total number of received frames with CRC match */ + uint64_t rx_match_crc_frames; + /* Total number of frames received promiscuously */ + uint64_t rx_promiscuous_frames; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Total number of received frames with one or two VLAN + * tags */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint64_t rx_tagged_frames; + /* Total number of received frames with two VLAN tags */ + uint64_t rx_double_tagged_frames; + /* Total number of truncated frames received */ + uint64_t rx_trunc_frames; + /* Total number of good frames (without errors) received */ + uint64_t rx_good_frames; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 0 */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint64_t rx_pfc_xon2xoff_frames_pri0; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 1 */ -} __attribute__((packed)); - -/* hwrm_cfa_em_flow_cfg */ -/* - * Description: Configure an EM flow with a new destination VNIC and/or meter. - */ -/* Input (48 bytes) */ -struct hwrm_cfa_em_flow_cfg_input { - uint16_t req_type; + uint64_t rx_pfc_xon2xoff_frames_pri1; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 2 */ - uint16_t cmpl_ring; + uint64_t rx_pfc_xon2xoff_frames_pri2; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 3 */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint64_t rx_pfc_xon2xoff_frames_pri3; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 4 */ - uint64_t resp_addr; + uint64_t rx_pfc_xon2xoff_frames_pri4; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 5 */ - uint32_t enables; - /* This bit must be '1' for the new_dst_id field to be configured. */ - #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_DST_ID UINT32_C(0x1) + uint64_t rx_pfc_xon2xoff_frames_pri5; /* - * This bit must be '1' for the new_mirror_vnic_id field to be - * configured. + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 6 */ - #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \ - UINT32_C(0x2) + uint64_t rx_pfc_xon2xoff_frames_pri6; /* - * This bit must be '1' for the new_meter_instance_id field to - * be configured. + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 7 */ - #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \ - UINT32_C(0x4) - uint32_t unused_0; - uint64_t em_filter_id; - /* This value is an opaque id into CFA data structures. */ - uint32_t new_dst_id; + uint64_t rx_pfc_xon2xoff_frames_pri7; /* - * If set, this value shall represent the new Logical VNIC ID of - * the destination VNIC for the RX path and network port id of - * the destination port for the TX path. + * Total number of received PFC frames with PFC enabled + * bit for Pri 0 */ - uint32_t new_mirror_vnic_id; - /* New Logical VNIC ID of the VNIC where traffic is mirrored. */ - uint16_t new_meter_instance_id; + uint64_t rx_pfc_ena_frames_pri0; /* - * New meter to attach to the flow. Specifying the invalid - * instance ID is used to remove any existing meter from the - * flow. + * Total number of received PFC frames with PFC enabled + * bit for Pri 1 */ + uint64_t rx_pfc_ena_frames_pri1; /* - * A value of 0xfff is considered invalid and - * implies the instance is not configured. + * Total number of received PFC frames with PFC enabled + * bit for Pri 2 */ - #define HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \ - UINT32_C(0xffff) - uint16_t unused_1[3]; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_cfa_em_flow_cfg_output { - uint16_t error_code; + uint64_t rx_pfc_ena_frames_pri2; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Total number of received PFC frames with PFC enabled + * bit for Pri 3 */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint64_t rx_pfc_ena_frames_pri3; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * Total number of received PFC frames with PFC enabled + * bit for Pri 4 */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint64_t rx_pfc_ena_frames_pri4; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * Total number of received PFC frames with PFC enabled + * bit for Pri 5 */ -} __attribute__((packed)); - -/* hwrm_tunnel_dst_port_query */ -/* - * Description: This function is called by a driver to query tunnel type - * specific destination port configuration. - */ -/* Input (24 bytes) */ -struct hwrm_tunnel_dst_port_query_input { - uint16_t req_type; + uint64_t rx_pfc_ena_frames_pri5; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * Total number of received PFC frames with PFC enabled + * bit for Pri 6 */ - uint16_t cmpl_ring; + uint64_t rx_pfc_ena_frames_pri6; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * Total number of received PFC frames with PFC enabled + * bit for Pri 7 */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint64_t rx_pfc_ena_frames_pri7; + /* Total Number of frames received with SCH CRC error */ + uint64_t rx_sch_crc_err_frames; + /* Total Number of under-sized frames received */ + uint64_t rx_undrsz_frames; + /* Total Number of fragmented frames received */ + uint64_t rx_frag_frames; + /* Total number of RX EEE LPI Events */ + uint64_t rx_eee_lpi_events; + /* EEE LPI Duration Counter on RX */ + uint64_t rx_eee_lpi_duration; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * Total number of physical type Link Level Flow Control + * (LLFC) messages received */ - uint64_t resp_addr; + uint64_t rx_llfc_physical_msgs; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * Total number of logical type Link Level Flow Control + * (LLFC) messages received */ - uint8_t tunnel_type; - /* Tunnel Type. */ - /* Virtual eXtensible Local Area Network (VXLAN) */ - #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN \ - UINT32_C(0x1) - /* Generic Network Virtualization Encapsulation (Geneve) */ - #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE \ - UINT32_C(0x5) + uint64_t rx_llfc_logical_msgs; /* - * IPV4 over virtual eXtensible Local Area - * Network (IPV4oVXLAN) + * Total number of logical type Link Level Flow Control + * (LLFC) messages received with CRC error */ - #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_V4 \ - UINT32_C(0x9) - uint8_t unused_0[7]; + uint64_t rx_llfc_msgs_with_crc_err; + /* Total number of HCFC messages received */ + uint64_t rx_hcfc_msgs; + /* Total number of HCFC messages received with CRC error */ + uint64_t rx_hcfc_msgs_with_crc_err; + /* Total number of received bytes */ + uint64_t rx_bytes; + /* Total number of bytes received in runt frames */ + uint64_t rx_runt_bytes; + /* Total number of runt frames received */ + uint64_t rx_runt_frames; + /* Total Rx Discards per Port reported by STATS block */ + uint64_t rx_stat_discard; + uint64_t rx_stat_err; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_tunnel_dst_port_query_output { - uint16_t error_code; +/* Port Rx Statistics extended Formats */ +/* rx_port_stats_ext (size:320b/40B) */ +struct rx_port_stats_ext { + /* Number of times link state changed to down */ + uint64_t link_down_events; + /* Number of times the idle rings with pause bit are found */ + uint64_t continuous_pause_events; + /* Number of times the active rings pause bit resumed back */ + uint64_t resume_pause_events; + /* Number of times, the ROCE cos queue PFC is disabled to avoid pause flood/burst */ + uint64_t continuous_roce_pause_events; + /* Number of times, the ROCE cos queue PFC is enabled back */ + uint64_t resume_roce_pause_events; +} __attribute__((packed)); + +/* PCIe Statistics Formats */ +/* pcie_ctx_hw_stats (size:768b/96B) */ +struct pcie_ctx_hw_stats { + /* Number of physical layer receiver errors */ + uint64_t pcie_pl_signal_integrity; + /* Number of DLLP CRC errors detected by Data Link Layer */ + uint64_t pcie_dl_signal_integrity; + /* + * Number of TLP LCRC and sequence number errors detected + * by Data Link Layer + */ + uint64_t pcie_tl_signal_integrity; + /* Number of times LTSSM entered Recovery state */ + uint64_t pcie_link_integrity; + /* Number of TLP bytes that have been trasmitted */ + uint64_t pcie_tx_traffic_rate; + /* Number of TLP bytes that have been received */ + uint64_t pcie_rx_traffic_rate; + /* Number of DLLP bytes that have been trasmitted */ + uint64_t pcie_tx_dllp_statistics; + /* Number of DLLP bytes that have been received */ + uint64_t pcie_rx_dllp_statistics; + /* + * Number of times spent in each phase of gen3 + * equalization + */ + uint64_t pcie_equalization_time; + /* Records the last 16 transitions of the LTSSM */ + uint32_t pcie_ltssm_histogram[4]; + /* + * Record the last 8 reasons on why LTSSM transitioned + * to Recovery + */ + uint64_t pcie_recovery_histogram; +} __attribute__((packed)); + +/********************** + * hwrm_exec_fwd_resp * + **********************/ + + +/* hwrm_exec_fwd_resp_input (size:1024b/128B) */ +struct hwrm_exec_fwd_resp_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t tunnel_dst_port_id; + uint16_t seq_id; /* - * This field represents the identifier of L4 destination port - * used for the given tunnel type. This field is valid for - * specific tunnel types that use layer 4 (e.g. UDP) transports - * for tunneling. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t tunnel_dst_port_val; + uint16_t target_id; /* - * This field represents the value of L4 destination port - * identified by tunnel_dst_port_id. This field is valid for - * specific tunnel types that use layer 4 (e.g. UDP) transports - * for tunneling. This field is in network byte order. A value - * of 0 means that the destination port is not configured. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; + uint64_t resp_addr; + /* + * This is an encapsulated request. This request should + * be executed by the HWRM and the response should be + * provided in the response buffer inside the encapsulated + * request. + */ + uint32_t encap_request[26]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This value indicates the target id of the response to + * the encapsulated request. + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - HWRM */ + uint16_t encap_resp_target_id; + uint8_t unused_0[6]; } __attribute__((packed)); -/* hwrm_tunnel_dst_port_alloc */ -/* - * Description: This function is called by a driver to allocate l4 destination - * port for a specific tunnel type. The destination port value is provided in - * the input. If the HWRM supports only one global destination port for a tunnel - * type, then the HWRM shall keep track of its usage as described below. # The - * first caller that allocates a destination port shall always succeed and the - * HWRM shall save the destination port configuration for that tunnel type and - * increment the usage count to 1. # Subsequent callers allocating the same - * destination port for that tunnel type shall succeed and the HWRM shall - * increment the usage count for that port for each subsequent caller that - * succeeds. # Any subsequent caller trying to allocate a different destination - * port for that tunnel type shall fail until the usage count for the original - * destination port goes to zero. # A caller that frees a port will cause the - * usage count for that port to decrement. - */ -/* Input (24 bytes) */ -struct hwrm_tunnel_dst_port_alloc_input { - uint16_t req_type; +/* hwrm_exec_fwd_resp_output (size:128b/16B) */ +struct hwrm_exec_fwd_resp_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t cmpl_ring; + uint8_t valid; +} __attribute__((packed)); + +/************************ + * hwrm_reject_fwd_resp * + ************************/ + + +/* hwrm_reject_fwd_resp_input (size:1024b/128B) */ +struct hwrm_reject_fwd_resp_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint8_t tunnel_type; - /* Tunnel Type. */ - /* Virtual eXtensible Local Area Network (VXLAN) */ - #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1) - /* Generic Network Virtualization Encapsulation (Geneve) */ - #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ - UINT32_C(0x5) + uint64_t resp_addr; /* - * IPV4 over virtual eXtensible Local Area - * Network (IPV4oVXLAN) + * This is an encapsulated request. This request should + * be rejected by the HWRM and the error response should be + * provided in the response buffer inside the encapsulated + * request. */ - #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ - UINT32_C(0x9) - uint8_t unused_0; - uint16_t tunnel_dst_port_val; + uint32_t encap_request[26]; /* - * This field represents the value of L4 destination port used - * for the given tunnel type. This field is valid for specific - * tunnel types that use layer 4 (e.g. UDP) transports for - * tunneling. This field is in network byte order. A value of 0 - * shall fail the command. + * This value indicates the target id of the response to + * the encapsulated request. + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - HWRM */ - uint32_t unused_1; + uint16_t encap_resp_target_id; + uint8_t unused_0[6]; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_tunnel_dst_port_alloc_output { - uint16_t error_code; +/* hwrm_reject_fwd_resp_output (size:128b/16B) */ +struct hwrm_reject_fwd_resp_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint8_t valid; +} __attribute__((packed)); + +/***************** + * hwrm_fwd_resp * + *****************/ + + +/* hwrm_fwd_resp_input (size:1024b/128B) */ +struct hwrm_fwd_resp_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t tunnel_dst_port_id; + uint16_t seq_id; /* - * Identifier of a tunnel L4 destination port value. Only - * applies to tunnel types that has l4 destination port - * parameters. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t unused_4; - uint8_t valid; + uint16_t target_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ + uint64_t resp_addr; + /* + * This value indicates the target id of the encapsulated + * response. + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - HWRM + */ + uint16_t encap_resp_target_id; + /* + * This value indicates the completion ring the encapsulated + * response will be optionally completed on. If the value is + * -1, then no CR completion shall be generated for the + * encapsulated response. Any other value must be a + * valid CR ring_id value. If a valid encap_resp_cmpl_ring + * is provided, then a CR completion shall be generated for + * the encapsulated response. + */ + uint16_t encap_resp_cmpl_ring; + /* This field indicates the length of encapsulated response. */ + uint16_t encap_resp_len; + uint8_t unused_0; + uint8_t unused_1; + /* + * This is the host address where the encapsulated response + * will be written. + * This area must be 16B aligned and must be cleared to zero + * before the original request is made. + */ + uint64_t encap_resp_addr; + /* This is an encapsulated response. */ + uint32_t encap_resp[24]; } __attribute__((packed)); -/* hwrm_tunnel_dst_port_free */ -/* - * Description: This function is called by a driver to free l4 destination port - * for a specific tunnel type. - */ -/* Input (24 bytes) */ -struct hwrm_tunnel_dst_port_free_input { - uint16_t req_type; +/* hwrm_fwd_resp_output (size:128b/16B) */ +struct hwrm_fwd_resp_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t cmpl_ring; + uint8_t valid; +} __attribute__((packed)); + +/***************************** + * hwrm_fwd_async_event_cmpl * + *****************************/ + + +/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */ +struct hwrm_fwd_async_event_cmpl_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t cmpl_ring; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t resp_addr; + uint16_t seq_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint8_t tunnel_type; - /* Tunnel Type. */ - /* Virtual eXtensible Local Area Network (VXLAN) */ - #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1) - /* Generic Network Virtualization Encapsulation (Geneve) */ - #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5) + uint16_t target_id; /* - * IPV4 over virtual eXtensible Local Area - * Network (IPV4oVXLAN) + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \ - UINT32_C(0x9) - uint8_t unused_0; - uint16_t tunnel_dst_port_id; + uint64_t resp_addr; /* - * Identifier of a tunnel L4 destination port value. Only - * applies to tunnel types that has l4 destination port - * parameters. + * This value indicates the target id of the encapsulated + * asynchronous event. + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - Broadcast to all children VFs (only applicable when + * a PF is the requester) + */ + uint16_t encap_async_event_target_id; + uint8_t unused_0[6]; + /* This is an encapsulated asynchronous event completion. */ + uint32_t encap_async_event_cmpl[4]; +} __attribute__((packed)); + +/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */ +struct hwrm_fwd_async_event_cmpl_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint32_t unused_1; + uint8_t valid; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_tunnel_dst_port_free_output { - uint16_t error_code; +/************************** + * hwrm_nvm_raw_write_blk * + **************************/ + + +/* hwrm_nvm_raw_write_blk_input (size:256b/32B) */ +struct hwrm_nvm_raw_write_blk_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t seq_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ -} __attribute__((packed)); - -/* hwrm_stat_ctx_alloc */ -/* - * Description: This command allocates and does basic preparation for a stat - * context. - */ -/* Input (32 bytes) */ -struct hwrm_stat_ctx_alloc_input { - uint16_t req_type; + uint16_t target_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t cmpl_ring; + uint64_t resp_addr; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * 64-bit Host Source Address. + * This is the loation of the source data to be written. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint64_t host_src_addr; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * 32-bit Destination Address. + * This is the NVRAM byte-offset where the source data will be written to. */ - uint64_t resp_addr; + uint32_t dest_addr; + /* Length of data to be written, in bytes. */ + uint32_t len; +} __attribute__((packed)); + +/* hwrm_nvm_raw_write_blk_output (size:128b/16B) */ +struct hwrm_nvm_raw_write_blk_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint64_t stats_dma_addr; - /* This is the address for statistic block. */ - uint32_t update_period_ms; + uint8_t valid; +} __attribute__((packed)); + +/***************** + * hwrm_nvm_read * + *****************/ + + +/* hwrm_nvm_read_input (size:320b/40B) */ +struct hwrm_nvm_read_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * The statistic block update period in ms. e.g. 250ms, 500ms, - * 750ms, 1000ms. If update_period_ms is 0, then the stats - * update shall be never done and the DMA address shall not be - * used. In this case, the stat block can only be read by - * hwrm_stat_ctx_query command. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint8_t stat_ctx_flags; + uint16_t cmpl_ring; /* - * This field is used to specify statistics context specific - * configuration flags. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ + uint16_t seq_id; /* - * When this bit is set to '1', the statistics context shall be - * allocated for RoCE traffic only. In this case, traffic other - * than offloaded RoCE traffic shall not be included in this - * statistic context. When this bit is set to '0', the - * statistics context shall be used for the network traffic - * other than offloaded RoCE traffic. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_ROCE UINT32_C(0x1) - uint8_t unused_0[3]; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_stat_ctx_alloc_output { - uint16_t error_code; + uint16_t target_id; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint64_t resp_addr; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * 64-bit Host Destination Address. + * This is the host address where the data will be written to. */ - uint32_t stat_ctx_id; - /* This is the statistics context ID value. */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; + uint64_t host_dest_addr; + /* The 0-based index of the directory entry. */ + uint16_t dir_idx; + uint8_t unused_0[2]; + /* The NVRAM byte-offset to read from. */ + uint32_t offset; + /* The length of the data to be read, in bytes. */ + uint32_t len; + uint8_t unused_1[4]; +} __attribute__((packed)); + +/* hwrm_nvm_read_output (size:128b/16B) */ +struct hwrm_nvm_read_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_stat_ctx_free */ -/* Description: This command is used to free a stat context. */ -/* Input (24 bytes) */ -struct hwrm_stat_ctx_free_input { - uint16_t req_type; +/********************* + * hwrm_nvm_raw_dump * + *********************/ + + +/* hwrm_nvm_raw_dump_input (size:256b/32B) */ +struct hwrm_nvm_raw_dump_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t stat_ctx_id; - /* ID of the statistics context that is being queried. */ - uint32_t unused_0; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_stat_ctx_free_output { - uint16_t error_code; + uint64_t resp_addr; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * 64-bit Host Destination Address. + * This is the host address where the data will be written to. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint64_t host_dest_addr; + /* 32-bit NVRAM byte-offset to read from. */ + uint32_t offset; + /* Total length of NVRAM contents to be read, in bytes. */ + uint32_t len; +} __attribute__((packed)); + +/* hwrm_nvm_raw_dump_output (size:128b/16B) */ +struct hwrm_nvm_raw_dump_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint32_t stat_ctx_id; - /* This is the statistics context ID value. */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; + uint8_t valid; +} __attribute__((packed)); + +/**************************** + * hwrm_nvm_get_dir_entries * + ****************************/ + + +/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */ +struct hwrm_nvm_get_dir_entries_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ -} __attribute__((packed)); - -/* hwrm_stat_ctx_query */ -/* Description: This command returns statistics of a context. */ -/* Input (24 bytes) */ -struct hwrm_stat_ctx_query_input { - uint16_t req_type; + uint16_t cmpl_ring; /* - * This value indicates what type of request this is. The format for the - * rest of the command is determined by this field. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t cmpl_ring; + uint16_t seq_id; /* - * This value indicates the what completion ring the request will be - * optionally completed on. If the value is -1, then no CR completion - * will be generated. Any other value must be a valid CR ring_id value - * for this function. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t target_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids - * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t resp_addr; + uint64_t resp_addr; /* - * This is the host address where the response will be written when the - * request is complete. This area must be 16B aligned and must be - * cleared to zero before the request is made. + * 64-bit Host Destination Address. + * This is the host address where the directory will be written. */ - uint32_t stat_ctx_id; - /* ID of the statistics context that is being queried. */ - uint32_t unused_0; + uint64_t host_dest_addr; } __attribute__((packed)); -/* Output (176 bytes) */ -struct hwrm_stat_ctx_query_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in parameters, - * and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; - /* - * This field is the length of the response in bytes. The last byte of - * the response is a valid flag that will read as '1' when the command - * has been completely written to memory. - */ - uint64_t tx_ucast_pkts; - /* Number of transmitted unicast packets */ - uint64_t tx_mcast_pkts; - /* Number of transmitted multicast packets */ - uint64_t tx_bcast_pkts; - /* Number of transmitted broadcast packets */ - uint64_t tx_err_pkts; - /* Number of transmitted packets with error */ - uint64_t tx_drop_pkts; - /* Number of dropped packets on transmit path */ - uint64_t tx_ucast_bytes; - /* Number of transmitted bytes for unicast traffic */ - uint64_t tx_mcast_bytes; - /* Number of transmitted bytes for multicast traffic */ - uint64_t tx_bcast_bytes; - /* Number of transmitted bytes for broadcast traffic */ - uint64_t rx_ucast_pkts; - /* Number of received unicast packets */ - uint64_t rx_mcast_pkts; - /* Number of received multicast packets */ - uint64_t rx_bcast_pkts; - /* Number of received broadcast packets */ - uint64_t rx_err_pkts; - /* Number of received packets with error */ - uint64_t rx_drop_pkts; - /* Number of dropped packets on received path */ - uint64_t rx_ucast_bytes; - /* Number of received bytes for unicast traffic */ - uint64_t rx_mcast_bytes; - /* Number of received bytes for multicast traffic */ - uint64_t rx_bcast_bytes; - /* Number of received bytes for broadcast traffic */ - uint64_t rx_agg_pkts; - /* Number of aggregated unicast packets */ - uint64_t rx_agg_bytes; - /* Number of aggregated unicast bytes */ - uint64_t rx_agg_events; - /* Number of aggregation events */ - uint64_t rx_agg_aborts; - /* Number of aborted aggregations */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; +/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */ +struct hwrm_nvm_get_dir_entries_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This field is used in Output records to indicate that the output is - * completely written to RAM. This field should be read as '1' to - * indicate that the output has been completely written. When writing a - * command completion or response to an internal processor, the order of - * writes has to be such that this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_stat_ctx_clr_stats */ -/* Description: This command clears statistics of a context. */ -/* Input (24 bytes) */ -struct hwrm_stat_ctx_clr_stats_input { - uint16_t req_type; +/************************* + * hwrm_nvm_get_dir_info * + *************************/ + + +/* hwrm_nvm_get_dir_info_input (size:128b/16B) */ +struct hwrm_nvm_get_dir_info_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t stat_ctx_id; - /* ID of the statistics context that is being queried. */ - uint32_t unused_0; + uint64_t resp_addr; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_stat_ctx_clr_stats_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; - /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. - */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; +/* hwrm_nvm_get_dir_info_output (size:192b/24B) */ +struct hwrm_nvm_get_dir_info_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Number of directory entries in the directory. */ + uint32_t entries; + /* Size of each directory entry, in bytes. */ + uint32_t entry_length; + uint8_t unused_0[7]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_exec_fwd_resp */ -/* - * Description: This command is used to send an encapsulated request to the - * HWRM. This command instructs the HWRM to execute the request and forward the - * response of the encapsulated request to the location specified in the - * original request that is encapsulated. The target id of this command shall be - * set to 0xFFFF (HWRM). The response location in this command shall be used to - * acknowledge the receipt of the encapsulated request and forwarding of the - * response. - */ -/* Input (128 bytes) */ -struct hwrm_exec_fwd_resp_input { - uint16_t req_type; +/****************** + * hwrm_nvm_write * + ******************/ + + +/* hwrm_nvm_write_input (size:384b/48B) */ +struct hwrm_nvm_write_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t encap_request[26]; + uint64_t resp_addr; /* - * This is an encapsulated request. This request should be - * executed by the HWRM and the response should be provided in - * the response buffer inside the encapsulated request. + * 64-bit Host Source Address. + * This is where the source data is. */ - uint16_t encap_resp_target_id; + uint64_t host_src_addr; + /* The Directory Entry Type (valid values are defined in the bnxnvm_directory_type enum defined in the file bnxnvm_defs.h). */ + uint16_t dir_type; /* - * This value indicates the target id of the response to the - * encapsulated request. 0x0 - 0xFFF8 - Used for function ids - * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - - * HWRM + * Directory ordinal. + * The 0-based instance of the combined Directory Entry Type and Extension. */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_exec_fwd_resp_output { - uint16_t error_code; + uint16_t dir_ordinal; + /* The Directory Entry Extension flags (see BNX_DIR_EXT_* in the file bnxnvm_defs.h). */ + uint16_t dir_ext; + /* Directory Entry Attribute flags (see BNX_DIR_ATTR_* in the file bnxnvm_defs.h). */ + uint16_t dir_attr; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Length of data to write, in bytes. May be less than or equal to the allocated size for the directory entry. + * The data length stored in the directory entry will be updated to reflect this value once the write is complete. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint32_t dir_data_length; + /* Option. */ + uint16_t option; + uint16_t flags; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * When this bit is '1', the original active image + * will not be removed. TBD: what purpose is this? */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG \ + UINT32_C(0x1) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The requested length of the allocated NVM for the item, in bytes. This value may be greater than or equal to the specified data length (dir_data_length). + * If this value is less than the specified data length, it will be ignored. + * The response will contain the actual allocated item length, which may be greater than the requested item length. + * The purpose for allocating more than the required number of bytes for an item's data is to pre-allocate extra storage (padding) to accomodate + * the potential future growth of an item (e.g. upgraded firmware with a size increase, log growth, expanded configuration data). */ + uint32_t dir_item_length; + uint32_t unused_0; } __attribute__((packed)); -/* hwrm_reject_fwd_resp */ -/* - * Description: This command is used to send an encapsulated request to the - * HWRM. This command instructs the HWRM to reject the request and forward the - * error response of the encapsulated request to the location specified in the - * original request that is encapsulated. The target id of this command shall be - * set to 0xFFFF (HWRM). The response location in this command shall be used to - * acknowledge the receipt of the encapsulated request and forwarding of the - * response. - */ -/* Input (128 bytes) */ -struct hwrm_reject_fwd_resp_input { - uint16_t req_type; +/* hwrm_nvm_write_output (size:128b/16B) */ +struct hwrm_nvm_write_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * Length of the allocated NVM for the item, in bytes. The value may be greater than or equal to the specified data length or the requested item length. + * The actual item length used when creating a new directory entry will be a multiple of an NVM block size. */ - uint16_t cmpl_ring; + uint32_t dir_item_length; + /* The directory index of the created or modified item. */ + uint16_t dir_idx; + uint8_t unused_0; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_nvm_write_cmd_err (size:64b/8B) */ +struct hwrm_nvm_write_cmd_err { /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. */ - uint64_t resp_addr; + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_WRITE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* Unable to complete operation due to fragmentation */ + #define HWRM_NVM_WRITE_CMD_ERR_CODE_FRAG_ERR UINT32_C(0x1) + /* nvm is completely full. */ + #define HWRM_NVM_WRITE_CMD_ERR_CODE_NO_SPACE UINT32_C(0x2) + #define HWRM_NVM_WRITE_CMD_ERR_CODE_LAST \ + HWRM_NVM_WRITE_CMD_ERR_CODE_NO_SPACE + uint8_t unused_0[7]; +} __attribute__((packed)); + +/******************* + * hwrm_nvm_modify * + *******************/ + + +/* hwrm_nvm_modify_input (size:320b/40B) */ +struct hwrm_nvm_modify_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint32_t encap_request[26]; + uint16_t cmpl_ring; /* - * This is an encapsulated request. This request should be - * rejected by the HWRM and the error response should be - * provided in the response buffer inside the encapsulated - * request. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t encap_resp_target_id; + uint16_t seq_id; /* - * This value indicates the target id of the response to the - * encapsulated request. 0x0 - 0xFFF8 - Used for function ids - * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - - * HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_reject_fwd_resp_output { - uint16_t error_code; + uint16_t target_id; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint64_t resp_addr; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * 64-bit Host Source Address. + * This is where the modified data is. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint64_t host_src_addr; + /* 16-bit directory entry index. */ + uint16_t dir_idx; + uint8_t unused_0[2]; + /* 32-bit NVRAM byte-offset to modify content from. */ + uint32_t offset; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * Length of data to be modified, in bytes. The length shall + * be non-zero. */ + uint32_t len; + uint8_t unused_1[4]; } __attribute__((packed)); -/* hwrm_nvm_get_dir_entries */ -/* Input (24 bytes) */ -struct hwrm_nvm_get_dir_entries_input { - uint16_t req_type; - uint16_t cmpl_ring; - uint16_t seq_id; - uint16_t target_id; - uint64_t resp_addr; - uint64_t host_dest_addr; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_nvm_get_dir_entries_output { - uint16_t error_code; - uint16_t req_type; - uint16_t seq_id; - uint16_t resp_len; - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; +/* hwrm_nvm_modify_output (size:128b/16B) */ +struct hwrm_nvm_modify_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; } __attribute__((packed)); +/*************************** + * hwrm_nvm_find_dir_entry * + ***************************/ -/* hwrm_nvm_erase_dir_entry */ -/* Input (24 bytes) */ -struct hwrm_nvm_erase_dir_entry_input { - uint16_t req_type; - uint16_t cmpl_ring; - uint16_t seq_id; - uint16_t target_id; - uint64_t resp_addr; - uint16_t dir_idx; - uint16_t unused_0[3]; -}; - -/* Output (16 bytes) */ -struct hwrm_nvm_erase_dir_entry_output { - uint16_t error_code; - uint16_t req_type; - uint16_t seq_id; - uint16_t resp_len; - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; -}; - -/* hwrm_nvm_get_dir_info */ -/* Input (16 bytes) */ -struct hwrm_nvm_get_dir_info_input { - uint16_t req_type; - uint16_t cmpl_ring; - uint16_t seq_id; - uint16_t target_id; - uint64_t resp_addr; -} __attribute__((packed)); -/* Output (24 bytes) */ -struct hwrm_nvm_get_dir_info_output { - uint16_t error_code; +/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */ +struct hwrm_nvm_find_dir_entry_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t entries; - /* Number of directory entries in the directory. */ - uint32_t entry_length; - /* Size of each directory entry, in bytes. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t seq_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ -} __attribute__((packed)); - -/* hwrm_nvm_write */ -/* - * Note: Write to the allocated NVRAM of an item referenced by an existing - * directory entry. - */ -/* Input (48 bytes) */ -struct hwrm_nvm_write_input { - uint16_t req_type; + uint16_t target_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t cmpl_ring; + uint64_t resp_addr; + uint32_t enables; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This bit must be '1' for the dir_idx_valid field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_ENABLES_DIR_IDX_VALID \ + UINT32_C(0x1) + /* Directory Entry Index */ + uint16_t dir_idx; + /* Directory Entry (Image) Type */ + uint16_t dir_type; + /* + * Directory ordinal. + * The instance of this Directory Type + */ + uint16_t dir_ordinal; + /* The Directory Entry Extension flags. */ + uint16_t dir_ext; + /* This value indicates the search option using dir_ordinal. */ + uint8_t opt_ordinal; + /* This value indicates the search option using dir_ordinal. */ + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_MASK UINT32_C(0x3) + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_SFT 0 + /* Equal to specified ordinal value. */ + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_EQ UINT32_C(0x0) + /* Greater than or equal to specified ordinal value */ + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GE UINT32_C(0x1) + /* Greater than specified ordinal value */ + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GT UINT32_C(0x2) + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_LAST \ + HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GT + uint8_t unused_0[3]; +} __attribute__((packed)); + +/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */ +struct hwrm_nvm_find_dir_entry_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Allocated NVRAM for this directory entry, in bytes. */ + uint32_t dir_item_length; + /* Size of the stored data for this directory entry, in bytes. */ + uint32_t dir_data_length; + /* + * Firmware version. + * Only valid if the directory entry is for embedded firmware stored in APE_BIN Format. + */ + uint32_t fw_ver; + /* Directory ordinal. */ + uint16_t dir_ordinal; + /* Directory Entry Index */ + uint16_t dir_idx; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/**************************** + * hwrm_nvm_erase_dir_entry * + ****************************/ + + +/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */ +struct hwrm_nvm_erase_dir_entry_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint64_t resp_addr; + uint16_t cmpl_ring; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t host_src_addr; - /* 64-bit Host Source Address. This is where the source data is. */ - uint16_t dir_type; + uint16_t seq_id; /* - * The Directory Entry Type (valid values are defined in the - * bnxnvm_directory_type enum defined in the file - * bnxnvm_defs.h). + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t dir_ordinal; + uint16_t target_id; /* - * Directory ordinal. The 0-based instance of the combined - * Directory Entry Type and Extension. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t dir_ext; + uint64_t resp_addr; + /* Directory Entry Index */ + uint16_t dir_idx; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */ +struct hwrm_nvm_erase_dir_entry_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * The Directory Entry Extension flags (see BNX_DIR_EXT_* in the - * file bnxnvm_defs.h). + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t dir_attr; + uint8_t valid; +} __attribute__((packed)); + +/************************* + * hwrm_nvm_get_dev_info * + *************************/ + + +/* hwrm_nvm_get_dev_info_input (size:128b/16B) */ +struct hwrm_nvm_get_dev_info_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Directory Entry Attribute flags (see BNX_DIR_ATTR_* in the - * file bnxnvm_defs.h). + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint32_t dir_data_length; + uint16_t cmpl_ring; /* - * Length of data to write, in bytes. May be less than or equal - * to the allocated size for the directory entry. The data - * length stored in the directory entry will be updated to - * reflect this value once the write is complete. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t option; - /* Option. */ - uint16_t flags; + uint16_t seq_id; /* - * When this bit is '1', the original active image will not be - * removed. TBD: what purpose is this? + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG UINT32_C(0x1) - uint32_t dir_item_length; + uint16_t target_id; /* - * The requested length of the allocated NVM for the item, in - * bytes. This value may be greater than or equal to the - * specified data length (dir_data_length). If this value is - * less than the specified data length, it will be ignored. The - * response will contain the actual allocated item length, which - * may be greater than the requested item length. The purpose - * for allocating more than the required number of bytes for an - * item's data is to pre-allocate extra storage (padding) to - * accommodate the potential future growth of an item (e.g. - * upgraded firmware with a size increase, log growth, expanded - * configuration data). + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t unused_0; + uint64_t resp_addr; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_nvm_write_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; +/* hwrm_nvm_get_dev_info_output (size:256b/32B) */ +struct hwrm_nvm_get_dev_info_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Manufacturer ID. */ + uint16_t manufacturer_id; + /* Device ID. */ + uint16_t device_id; + /* Sector size of the NVRAM device. */ + uint32_t sector_size; + /* Total size, in bytes of the NVRAM device. */ + uint32_t nvram_size; + uint32_t reserved_size; + /* Available size that can be used, in bytes. Available size is the NVRAM size take away the used size and reserved size. */ + uint32_t available_size; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************** + * hwrm_nvm_mod_dir_entry * + **************************/ + + +/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */ +struct hwrm_nvm_mod_dir_entry_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint32_t dir_item_length; + uint16_t cmpl_ring; /* - * Length of the allocated NVM for the item, in bytes. The value - * may be greater than or equal to the specified data length or - * the requested item length. The actual item length used when - * creating a new directory entry will be a multiple of an NVM - * block size. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t dir_idx; - /* The directory index of the created or modified item. */ - uint8_t unused_0; - uint8_t valid; + uint16_t seq_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ -} __attribute__((packed)); - -/* hwrm_nvm_read */ -/* - * Note: Read the contents of an NVRAM item as referenced (indexed) by an - * existing directory entry. - */ -/* Input (40 bytes) */ -struct hwrm_nvm_read_input { - uint16_t req_type; + uint16_t target_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t cmpl_ring; + uint64_t resp_addr; + uint32_t enables; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This bit must be '1' for the checksum field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_NVM_MOD_DIR_ENTRY_INPUT_ENABLES_CHECKSUM UINT32_C(0x1) + /* Directory Entry Index */ + uint16_t dir_idx; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * Directory ordinal. + * The (0-based) instance of this Directory Type. */ - uint64_t resp_addr; + uint16_t dir_ordinal; + /* The Directory Entry Extension flags (see BNX_DIR_EXT_* for extension flag definitions). */ + uint16_t dir_ext; + /* Directory Entry Attribute flags (see BNX_DIR_ATTR_* for attribute flag definitions). */ + uint16_t dir_attr; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * If valid, then this field updates the checksum + * value of the content in the directory entry. */ - uint64_t host_dest_addr; + uint32_t checksum; +} __attribute__((packed)); + +/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */ +struct hwrm_nvm_mod_dir_entry_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * 64-bit Host Destination Address. This is the host address - * where the data will be written to. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t dir_idx; - /* The 0-based index of the directory entry. */ - uint8_t unused_0; - uint8_t unused_1; - uint32_t offset; - /* The NVRAM byte-offset to read from. */ - uint32_t len; - /* The length of the data to be read, in bytes. */ - uint32_t unused_2; + uint8_t valid; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_nvm_read_output { - uint16_t error_code; +/************************** + * hwrm_nvm_verify_update * + **************************/ + + +/* hwrm_nvm_verify_update_input (size:192b/24B) */ +struct hwrm_nvm_verify_update_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t seq_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ -} __attribute__((packed)); - -/* Hardware Resource Manager Specification */ -/* Description: This structure is used to specify port description. */ -/* - * Note: The Hardware Resource Manager (HWRM) manages various hardware resources - * inside the chip. The HWRM is implemented in firmware, and runs on embedded - * processors inside the chip. This firmware service is vital part of the chip. - * The chip can not be used by a driver or HWRM client without the HWRM. - */ -/* Input (16 bytes) */ -struct input { - uint16_t req_type; + uint16_t target_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t cmpl_ring; + uint64_t resp_addr; + /* Directory Entry Type, to be verified. */ + uint16_t dir_type; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * Directory ordinal. + * The instance of the Directory Type to be verified. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t dir_ordinal; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The Directory Entry Extension flags. + * The "UPDATE" extension flag must be set in this value. + * A corresponding directory entry with the same type and ordinal values but *without* + * the "UPDATE" extension flag must also exist. The other flags of the extension must + * be identical between the active and update entries. */ - uint64_t resp_addr; + uint16_t dir_ext; + uint8_t unused_0[2]; +} __attribute__((packed)); + +/* hwrm_nvm_verify_update_output (size:128b/16B) */ +struct hwrm_nvm_verify_update_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* Output (8 bytes) */ -struct output { - uint16_t error_code; +/*************************** + * hwrm_nvm_install_update * + ***************************/ + + +/* hwrm_nvm_install_update_input (size:192b/24B) */ +struct hwrm_nvm_install_update_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ -} __attribute__((packed)); - -/* Short Command Structure (16 bytes) */ -struct hwrm_short_input { - uint16_t req_type; + uint16_t seq_id; /* - * This field indicates the type of request in the request - * buffer. The format for the rest of the command (request) is - * determined by this field. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t signature; + uint16_t target_id; /* - * This field indicates a signature that is used to identify - * short form of the command listed here. This field shall be - * set to 17185 (0x4321). + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - /* Signature indicating this is a short form of HWRM command */ - #define HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD UINT32_C(0x4321) - uint16_t unused_0; - /* Reserved for future use. */ - uint16_t size; - /* This value indicates the length of the request. */ - uint64_t req_addr; - /* - * This is the host address where the request was written. This - * area must be 16B aligned. - */ -} __attribute__((packed)); - -#define HWRM_GET_HWRM_ERROR_CODE(arg) \ - { \ - typeof(arg) x = (arg); \ - ((x) == 0xf ? "HWRM_ERROR" : \ - ((x) == 0xffff ? "CMD_NOT_SUPPORTED" : \ - ((x) == 0xfffe ? "UNKNOWN_ERR" : \ - ((x) == 0x4 ? "RESOURCE_ALLOC_ERROR" : \ - ((x) == 0x5 ? "INVALID_FLAGS" : \ - ((x) == 0x6 ? "INVALID_ENABLES" : \ - ((x) == 0x0 ? "SUCCESS" : \ - ((x) == 0x1 ? "FAIL" : \ - ((x) == 0x2 ? "INVALID_PARAMS" : \ - ((x) == 0x3 ? "RESOURCE_ACCESS_DENIED" : \ - "Unknown error_code")))))))))) \ - } - -/* Return Codes (8 bytes) */ -struct ret_codes { - uint16_t error_code; - /* These are numbers assigned to return/error codes. */ - /* Request was successfully executed by the HWRM. */ - #define HWRM_ERR_CODE_SUCCESS (UINT32_C(0x0)) - /* THe HWRM failed to execute the request. */ - #define HWRM_ERR_CODE_FAIL (UINT32_C(0x1)) + uint64_t resp_addr; /* - * The request contains invalid argument(s) or - * input parameters. + * Installation type. If the value 3 through 0xffff is used, + * only packaged items with that type value will be installed and + * conditional installation directives for those packaged items + * will be over-ridden (i.e. 'create' or 'replace' will be treated + * as 'install'). */ - #define HWRM_ERR_CODE_INVALID_PARAMS (UINT32_C(0x2)) + uint32_t install_type; /* - * The requester is not allowed to access the - * requested resource. This error code shall be - * provided in a response to a request to query - * or modify an existing resource that is not - * accessible by the requester. + * Perform a normal package installation. Conditional installation + * directives (e.g. 'create' and 'replace') of packaged items + * will be followed. */ - #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (UINT32_C(0x3)) + #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_NORMAL UINT32_C(0x0) /* - * The HWRM is unable to allocate the requested - * resource. This code only applies to requests - * for HWRM resource allocations. + * Install all packaged items regardless of installation directive + * (i.e. treat all packaged items as though they have an installation + * directive of 'install'). */ - #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (UINT32_C(0x4)) - /* Invalid combination of flags is specified in the request. */ - #define HWRM_ERR_CODE_INVALID_FLAGS (UINT32_C(0x5)) + #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_ALL \ + UINT32_C(0xffffffff) + #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_LAST \ + HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_ALL + uint16_t flags; + /* If set to 1, then securely erase all unused locations in persistent storage. */ + #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_ERASE_UNUSED_SPACE \ + UINT32_C(0x1) /* - * Invalid combination of enables fields is - * specified in the request. + * If set to 1, then unspecifed images, images not in the package file, will be safely deleted. + * When combined with erase_unused_space then unspecified images will be securely erased. */ - #define HWRM_ERR_CODE_INVALID_ENABLES (UINT32_C(0x6)) + #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_REMOVE_UNUSED_PKG \ + UINT32_C(0x2) /* - * Generic HWRM execution error that represents - * an internal error. + * If set to 1, FW will defragment the NVM if defragmentation is required for the update. + * Allow additional time for this command to complete if this bit is set to 1. */ - #define HWRM_ERR_CODE_HWRM_ERROR (UINT32_C(0xf)) - /* Unknown error */ - #define HWRM_ERR_CODE_UNKNOWN_ERR (UINT32_C(0xfffe)) - /* Unsupported or invalid command */ - #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (UINT32_C(0xffff)) - uint16_t unused_0[3]; + #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_ALLOWED_TO_DEFRAG \ + UINT32_C(0x4) + uint8_t unused_0[2]; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_err_output { - uint16_t error_code; +/* hwrm_nvm_install_update_output (size:192b/24B) */ +struct hwrm_nvm_install_update_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Bit-mask of successfully installed items. + * Bit-0 corresponding to the first packaged item, Bit-1 for the second item, etc. + * A value of 0 indicates that no items were successfully installed. + */ + uint64_t installed_items; + /* result is 8 b */ + uint8_t result; + /* There was no problem with the package installation. */ + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_SUCCESS UINT32_C(0x0) + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_LAST \ + HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_SUCCESS + /* problem_item is 8 b */ + uint8_t problem_item; + /* There was no problem with any packaged items. */ + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_NONE \ + UINT32_C(0x0) + /* There was a problem with the NVM package itself. */ + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_PACKAGE \ + UINT32_C(0xff) + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_LAST \ + HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_PACKAGE + /* reset_required is 8 b */ + uint8_t reset_required; + /* + * No reset is required for installed/updated firmware or + * microcode to take effect. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_NONE \ + UINT32_C(0x0) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * A PCIe reset (e.g. system reboot) is + * required for newly installed/updated firmware or + * microcode to take effect. */ - uint32_t opaque_0; - /* debug info for this error response. */ - uint16_t opaque_1; - /* debug info for this error response. */ - uint8_t cmd_err; + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_PCI \ + UINT32_C(0x1) /* - * In the case of an error response, command specific error code - * is returned in this field. + * A controller power reset (e.g. system power-cycle) is + * required for newly installed/updated firmware or + * microcode to take effect. Some newly installed/updated + * firmware or microcode may still take effect upon the + * next PCIe reset. */ - uint8_t valid; + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_POWER \ + UINT32_C(0x2) + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_LAST \ + HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_POWER + uint8_t unused_0[4]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* Port Tx Statistics Formats (408 bytes) */ -struct tx_port_stats { - uint64_t tx_64b_frames; - /* Total Number of 64 Bytes frames transmitted */ - uint64_t tx_65b_127b_frames; - /* Total Number of 65-127 Bytes frames transmitted */ - uint64_t tx_128b_255b_frames; - /* Total Number of 128-255 Bytes frames transmitted */ - uint64_t tx_256b_511b_frames; - /* Total Number of 256-511 Bytes frames transmitted */ - uint64_t tx_512b_1023b_frames; - /* Total Number of 512-1023 Bytes frames transmitted */ - uint64_t tx_1024b_1518_frames; - /* Total Number of 1024-1518 Bytes frames transmitted */ - uint64_t tx_good_vlan_frames; +/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */ +struct hwrm_nvm_install_update_cmd_err { /* - * Total Number of each good VLAN (exludes FCS errors) frame - * transmitted which is 1519 to 1522 bytes in length inclusive - * (excluding framing bits but including FCS bytes). + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. */ - uint64_t tx_1519b_2047_frames; - /* Total Number of 1519-2047 Bytes frames transmitted */ - uint64_t tx_2048b_4095b_frames; - /* Total Number of 2048-4095 Bytes frames transmitted */ - uint64_t tx_4096b_9216b_frames; - /* Total Number of 4096-9216 Bytes frames transmitted */ - uint64_t tx_9217b_16383b_frames; - /* Total Number of 9217-16383 Bytes frames transmitted */ - uint64_t tx_good_frames; - /* Total Number of good frames transmitted */ - uint64_t tx_total_frames; - /* Total Number of frames transmitted */ - uint64_t tx_ucast_frames; - /* Total number of unicast frames transmitted */ - uint64_t tx_mcast_frames; - /* Total number of multicast frames transmitted */ - uint64_t tx_bcast_frames; - /* Total number of broadcast frames transmitted */ - uint64_t tx_pause_frames; - /* Total number of PAUSE control frames transmitted */ - uint64_t tx_pfc_frames; - /* Total number of PFC/per-priority PAUSE control frames transmitted */ - uint64_t tx_jabber_frames; - /* Total number of jabber frames transmitted */ - uint64_t tx_fcs_err_frames; - /* Total number of frames transmitted with FCS error */ - uint64_t tx_control_frames; - /* Total number of control frames transmitted */ - uint64_t tx_oversz_frames; - /* Total number of over-sized frames transmitted */ - uint64_t tx_single_dfrl_frames; - /* Total number of frames with single deferral */ - uint64_t tx_multi_dfrl_frames; - /* Total number of frames with multiple deferrals */ - uint64_t tx_single_coll_frames; - /* Total number of frames with single collision */ - uint64_t tx_multi_coll_frames; - /* Total number of frames with multiple collisions */ - uint64_t tx_late_coll_frames; - /* Total number of frames with late collisions */ - uint64_t tx_excessive_coll_frames; - /* Total number of frames with excessive collisions */ - uint64_t tx_frag_frames; - /* Total number of fragmented frames transmitted */ - uint64_t tx_err; - /* Total number of transmit errors */ - uint64_t tx_tagged_frames; - /* Total number of single VLAN tagged frames transmitted */ - uint64_t tx_dbl_tagged_frames; - /* Total number of double VLAN tagged frames transmitted */ - uint64_t tx_runt_frames; - /* Total number of runt frames transmitted */ - uint64_t tx_fifo_underruns; - /* Total number of TX FIFO under runs */ - uint64_t tx_pfc_ena_frames_pri0; + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* Unable to complete operation due to fragmentation */ + #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR UINT32_C(0x1) + /* nvm is completely full. */ + #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE UINT32_C(0x2) + #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST \ + HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE + uint8_t unused_0[7]; +} __attribute__((packed)); + +/****************** + * hwrm_nvm_flush * + ******************/ + + +/* hwrm_nvm_flush_input (size:128b/16B) */ +struct hwrm_nvm_flush_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Total number of PFC frames with PFC enabled bit for Pri 0 - * transmitted + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint64_t tx_pfc_ena_frames_pri1; + uint16_t cmpl_ring; /* - * Total number of PFC frames with PFC enabled bit for Pri 1 - * transmitted + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t tx_pfc_ena_frames_pri2; + uint16_t seq_id; /* - * Total number of PFC frames with PFC enabled bit for Pri 2 - * transmitted + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t tx_pfc_ena_frames_pri3; + uint16_t target_id; /* - * Total number of PFC frames with PFC enabled bit for Pri 3 - * transmitted + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t tx_pfc_ena_frames_pri4; + uint64_t resp_addr; +} __attribute__((packed)); + +/* hwrm_nvm_flush_output (size:128b/16B) */ +struct hwrm_nvm_flush_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Total number of PFC frames with PFC enabled bit for Pri 4 - * transmitted + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint64_t tx_pfc_ena_frames_pri5; + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_nvm_flush_cmd_err (size:64b/8B) */ +struct hwrm_nvm_flush_cmd_err { /* - * Total number of PFC frames with PFC enabled bit for Pri 5 - * transmitted + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. */ - uint64_t tx_pfc_ena_frames_pri6; + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_FLUSH_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* flush could not be performed */ + #define HWRM_NVM_FLUSH_CMD_ERR_CODE_FAIL UINT32_C(0x1) + #define HWRM_NVM_FLUSH_CMD_ERR_CODE_LAST \ + HWRM_NVM_FLUSH_CMD_ERR_CODE_FAIL + uint8_t unused_0[7]; +} __attribute__((packed)); + +/************************* + * hwrm_nvm_get_variable * + *************************/ + + +/* hwrm_nvm_get_variable_input (size:320b/40B) */ +struct hwrm_nvm_get_variable_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Total number of PFC frames with PFC enabled bit for Pri 6 - * transmitted + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint64_t tx_pfc_ena_frames_pri7; + uint16_t cmpl_ring; /* - * Total number of PFC frames with PFC enabled bit for Pri 7 - * transmitted + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t tx_eee_lpi_events; - /* Total number of EEE LPI Events on TX */ - uint64_t tx_eee_lpi_duration; - /* EEE LPI Duration Counter on TX */ - uint64_t tx_llfc_logical_msgs; + uint16_t seq_id; /* - * Total number of Link Level Flow Control (LLFC) messages - * transmitted + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t tx_hcfc_msgs; - /* Total number of HCFC messages transmitted */ - uint64_t tx_total_collisions; - /* Total number of TX collisions */ - uint64_t tx_bytes; - /* Total number of transmitted bytes */ - uint64_t tx_xthol_frames; - /* Total number of end-to-end HOL frames */ - uint64_t tx_stat_discard; - /* Total Tx Drops per Port reported by STATS block */ - uint64_t tx_stat_error; - /* Total Tx Error Drops per Port reported by STATS block */ -} __attribute__((packed)); - -/* Port Rx Statistics Formats (528 bytes) */ -struct rx_port_stats { - uint64_t rx_64b_frames; - /* Total Number of 64 Bytes frames received */ - uint64_t rx_65b_127b_frames; - /* Total Number of 65-127 Bytes frames received */ - uint64_t rx_128b_255b_frames; - /* Total Number of 128-255 Bytes frames received */ - uint64_t rx_256b_511b_frames; - /* Total Number of 256-511 Bytes frames received */ - uint64_t rx_512b_1023b_frames; - /* Total Number of 512-1023 Bytes frames received */ - uint64_t rx_1024b_1518_frames; - /* Total Number of 1024-1518 Bytes frames received */ - uint64_t rx_good_vlan_frames; + uint16_t target_id; /* - * Total Number of each good VLAN (exludes FCS errors) frame - * received which is 1519 to 1522 bytes in length inclusive - * (excluding framing bits but including FCS bytes). + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t rx_1519b_2047b_frames; - /* Total Number of 1519-2047 Bytes frames received */ - uint64_t rx_2048b_4095b_frames; - /* Total Number of 2048-4095 Bytes frames received */ - uint64_t rx_4096b_9216b_frames; - /* Total Number of 4096-9216 Bytes frames received */ - uint64_t rx_9217b_16383b_frames; - /* Total Number of 9217-16383 Bytes frames received */ - uint64_t rx_total_frames; - /* Total number of frames received */ - uint64_t rx_ucast_frames; - /* Total number of unicast frames received */ - uint64_t rx_mcast_frames; - /* Total number of multicast frames received */ - uint64_t rx_bcast_frames; - /* Total number of broadcast frames received */ - uint64_t rx_fcs_err_frames; - /* Total number of received frames with FCS error */ - uint64_t rx_ctrl_frames; - /* Total number of control frames received */ - uint64_t rx_pause_frames; - /* Total number of PAUSE frames received */ - uint64_t rx_pfc_frames; - /* Total number of PFC frames received */ - uint64_t rx_unsupported_opcode_frames; - /* Total number of frames received with an unsupported opcode */ - uint64_t rx_unsupported_da_pausepfc_frames; + uint64_t resp_addr; /* - * Total number of frames received with an unsupported DA for - * pause and PFC - */ - uint64_t rx_wrong_sa_frames; - /* Total number of frames received with an unsupported SA */ - uint64_t rx_align_err_frames; - /* Total number of received packets with alignment error */ - uint64_t rx_oor_len_frames; - /* Total number of received frames with out-of-range length */ - uint64_t rx_code_err_frames; - /* Total number of received frames with error termination */ - uint64_t rx_false_carrier_frames; + * This is the host address where + * nvm variable will be stored + */ + uint64_t dest_data_addr; + /* size of data in bits */ + uint16_t data_len; + /* nvm cfg option number */ + uint16_t option_num; + /* reserved. */ + #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0) + /* reserved. */ + #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF \ + UINT32_C(0xffff) + #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_LAST \ + HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF + /* + * Number of dimensions for this nvm configuration variable. + * This value indicates how many of the indexN values to use. + * A value of 0 means that none of the indexN values are valid. + * A value of 1 requires at index0 is valued, a value of 2 + * requires that index0 and index1 are valid, and so forth + */ + uint16_t dimensions; + /* index for the 1st dimensions */ + uint16_t index_0; + /* index for the 2nd dimensions */ + uint16_t index_1; + /* index for the 3rd dimensions */ + uint16_t index_2; + /* index for the 4th dimensions */ + uint16_t index_3; + uint8_t flags; + /* + * When this bit is set to 1, the factory default value will be returned, + * 0 returns the operational value. + */ + #define HWRM_NVM_GET_VARIABLE_INPUT_FLAGS_FACTORY_DFLT \ + UINT32_C(0x1) + uint8_t unused_0; +} __attribute__((packed)); + +/* hwrm_nvm_get_variable_output (size:128b/16B) */ +struct hwrm_nvm_get_variable_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* size of data of the actual variable retrieved in bits */ + uint16_t data_len; + /* + * option_num is the option number for the data retrieved. It is possible in the + * future that the option number returned would be different than requested. This + * condition could occur if an option is deprecated and a new option id is defined + * with similar characteristics, but has a slightly different definition. This + * also makes it convenient for the caller to identify the variable result with + * the option id from the response. + */ + uint16_t option_num; + /* reserved. */ + #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0) + /* reserved. */ + #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_FFFF \ + UINT32_C(0xffff) + #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_LAST \ + HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_FFFF + uint8_t unused_0[3]; /* - * Total number of received frames with a false carrier is - * detected during idle, as defined by RX_ER samples active and - * RXD is 0xE. The event is reported along with the statistics - * generated on the next received frame. Only one false carrier - * condition can be detected and logged between frames. Carrier - * event, valid for 10M/100M speed modes only. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint64_t rx_ovrsz_frames; - /* Total number of over-sized frames received */ - uint64_t rx_jbr_frames; - /* Total number of jabber packets received */ - uint64_t rx_mtu_err_frames; - /* Total number of received frames with MTU error */ - uint64_t rx_match_crc_frames; - /* Total number of received frames with CRC match */ - uint64_t rx_promiscuous_frames; - /* Total number of frames received promiscuously */ - uint64_t rx_tagged_frames; - /* Total number of received frames with one or two VLAN tags */ - uint64_t rx_double_tagged_frames; - /* Total number of received frames with two VLAN tags */ - uint64_t rx_trunc_frames; - /* Total number of truncated frames received */ - uint64_t rx_good_frames; - /* Total number of good frames (without errors) received */ - uint64_t rx_pfc_xon2xoff_frames_pri0; + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */ +struct hwrm_nvm_get_variable_cmd_err { /* - * Total number of received PFC frames with transition from XON - * to XOFF on Pri 0 + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. */ - uint64_t rx_pfc_xon2xoff_frames_pri1; + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* variable does not exist */ + #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST UINT32_C(0x1) + /* configuration is corrupted and the variable cannot be saved */ + #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR UINT32_C(0x2) + /* length specified is too small */ + #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT UINT32_C(0x3) + #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LAST \ + HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT + uint8_t unused_0[7]; +} __attribute__((packed)); + +/************************* + * hwrm_nvm_set_variable * + *************************/ + + +/* hwrm_nvm_set_variable_input (size:320b/40B) */ +struct hwrm_nvm_set_variable_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Total number of received PFC frames with transition from XON - * to XOFF on Pri 1 + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint64_t rx_pfc_xon2xoff_frames_pri2; + uint16_t cmpl_ring; /* - * Total number of received PFC frames with transition from XON - * to XOFF on Pri 2 + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t rx_pfc_xon2xoff_frames_pri3; + uint16_t seq_id; /* - * Total number of received PFC frames with transition from XON - * to XOFF on Pri 3 + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t rx_pfc_xon2xoff_frames_pri4; + uint16_t target_id; /* - * Total number of received PFC frames with transition from XON - * to XOFF on Pri 4 + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t rx_pfc_xon2xoff_frames_pri5; + uint64_t resp_addr; /* - * Total number of received PFC frames with transition from XON - * to XOFF on Pri 5 - */ - uint64_t rx_pfc_xon2xoff_frames_pri6; + * This is the host address where + * nvm variable will be copied from + */ + uint64_t src_data_addr; + /* size of data in bits */ + uint16_t data_len; + /* nvm cfg option number */ + uint16_t option_num; + /* reserved. */ + #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0) + /* reserved. */ + #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF \ + UINT32_C(0xffff) + #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_LAST \ + HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF + /* + * Number of dimensions for this nvm configuration variable. + * This value indicates how many of the indexN values to use. + * A value of 0 means that none of the indexN values are valid. + * A value of 1 requires at index0 is valued, a value of 2 + * requires that index0 and index1 are valid, and so forth + */ + uint16_t dimensions; + /* index for the 1st dimensions */ + uint16_t index_0; + /* index for the 2nd dimensions */ + uint16_t index_1; + /* index for the 3rd dimensions */ + uint16_t index_2; + /* index for the 4th dimensions */ + uint16_t index_3; + uint8_t flags; + /* When this bit is 1, flush internal cache after this write operation (see hwrm_nvm_flush command.) */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_FORCE_FLUSH \ + UINT32_C(0x1) + /* encryption method */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_MASK \ + UINT32_C(0xe) + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_SFT 1 + /* No encryption. */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_NONE \ + (UINT32_C(0x0) << 1) + /* one-way encryption. */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1 \ + (UINT32_C(0x1) << 1) + /* symmetric AES256 encryption. */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_AES256 \ + (UINT32_C(0x2) << 1) + /* SHA1 digest appended to plaintext contents, for authentication */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH \ + (UINT32_C(0x3) << 1) + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_LAST \ + HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH + uint8_t unused_0; +} __attribute__((packed)); + +/* hwrm_nvm_set_variable_output (size:128b/16B) */ +struct hwrm_nvm_set_variable_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Total number of received PFC frames with transition from XON - * to XOFF on Pri 6 + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint64_t rx_pfc_xon2xoff_frames_pri7; + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */ +struct hwrm_nvm_set_variable_cmd_err { /* - * Total number of received PFC frames with transition from XON - * to XOFF on Pri 7 + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. */ - uint64_t rx_pfc_ena_frames_pri0; + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* variable does not exist */ + #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST UINT32_C(0x1) + /* configuration is corrupted and the variable cannot be saved */ + #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR UINT32_C(0x2) + #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_LAST \ + HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR + uint8_t unused_0[7]; +} __attribute__((packed)); + +/**************************** + * hwrm_nvm_validate_option * + ****************************/ + + +/* hwrm_nvm_validate_option_input (size:320b/40B) */ +struct hwrm_nvm_validate_option_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Total number of received PFC frames with PFC enabled bit for - * Pri 0 + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint64_t rx_pfc_ena_frames_pri1; + uint16_t cmpl_ring; /* - * Total number of received PFC frames with PFC enabled bit for - * Pri 1 + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t rx_pfc_ena_frames_pri2; + uint16_t seq_id; /* - * Total number of received PFC frames with PFC enabled bit for - * Pri 2 + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t rx_pfc_ena_frames_pri3; + uint16_t target_id; /* - * Total number of received PFC frames with PFC enabled bit for - * Pri 3 + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t rx_pfc_ena_frames_pri4; + uint64_t resp_addr; /* - * Total number of received PFC frames with PFC enabled bit for - * Pri 4 + * This is the host address where + * nvm variable will be copied from */ - uint64_t rx_pfc_ena_frames_pri5; + uint64_t src_data_addr; + /* size of data in bits */ + uint16_t data_len; + /* nvm cfg option number */ + uint16_t option_num; + /* reserved. */ + #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_0 \ + UINT32_C(0x0) + /* reserved. */ + #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_FFFF \ + UINT32_C(0xffff) + #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_LAST \ + HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_FFFF + /* + * Number of dimensions for this nvm configuration variable. + * This value indicates how many of the indexN values to use. + * A value of 0 means that none of the indexN values are valid. + * A value of 1 requires at index0 is valued, a value of 2 + * requires that index0 and index1 are valid, and so forth + */ + uint16_t dimensions; + /* index for the 1st dimensions */ + uint16_t index_0; + /* index for the 2nd dimensions */ + uint16_t index_1; + /* index for the 3rd dimensions */ + uint16_t index_2; + /* index for the 4th dimensions */ + uint16_t index_3; + uint8_t unused_0[2]; +} __attribute__((packed)); + +/* hwrm_nvm_validate_option_output (size:128b/16B) */ +struct hwrm_nvm_validate_option_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t result; + /* indicates that the value provided for the option is not matching with the saved data. */ + #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_NOT_MATCH UINT32_C(0x0) + /* indicates that the value provided for the option is matching the saved data. */ + #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_MATCH UINT32_C(0x1) + #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_LAST \ + HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_MATCH + uint8_t unused_0[6]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_nvm_validate_option_cmd_err (size:64b/8B) */ +struct hwrm_nvm_validate_option_cmd_err { /* - * Total number of received PFC frames with PFC enabled bit for - * Pri 5 + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. */ - uint64_t rx_pfc_ena_frames_pri6; + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + #define HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_LAST \ + HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN + uint8_t unused_0[7]; +} __attribute__((packed)); + +/***************************** + * hwrm_nvm_factory_defaults * + *****************************/ + + +/* hwrm_nvm_factory_defaults_input (size:192b/24B) */ +struct hwrm_nvm_factory_defaults_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Total number of received PFC frames with PFC enabled bit for - * Pri 6 + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint64_t rx_pfc_ena_frames_pri7; + uint16_t cmpl_ring; /* - * Total number of received PFC frames with PFC enabled bit for - * Pri 7 + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t rx_sch_crc_err_frames; - /* Total Number of frames received with SCH CRC error */ - uint64_t rx_undrsz_frames; - /* Total Number of under-sized frames received */ - uint64_t rx_frag_frames; - /* Total Number of fragmented frames received */ - uint64_t rx_eee_lpi_events; - /* Total number of RX EEE LPI Events */ - uint64_t rx_eee_lpi_duration; - /* EEE LPI Duration Counter on RX */ - uint64_t rx_llfc_physical_msgs; + uint16_t seq_id; /* - * Total number of physical type Link Level Flow Control (LLFC) - * messages received + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t rx_llfc_logical_msgs; + uint16_t target_id; /* - * Total number of logical type Link Level Flow Control (LLFC) - * messages received + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t rx_llfc_msgs_with_crc_err; + uint64_t resp_addr; + /* mode is 8 b */ + uint8_t mode; + /* If set to 1, it will trigger restoration of factory default settings */ + #define HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_RESTORE UINT32_C(0x0) + /* If set to 1, it will trigger creation of factory default settings */ + #define HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_CREATE UINT32_C(0x1) + #define HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_LAST \ + HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_CREATE + uint8_t unused_0[7]; +} __attribute__((packed)); + +/* hwrm_nvm_factory_defaults_output (size:128b/16B) */ +struct hwrm_nvm_factory_defaults_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t result; + /* factory defaults created successfully. */ + #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_CREATE_OK \ + UINT32_C(0x0) + /* factory defaults restored successfully. */ + #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_RESTORE_OK \ + UINT32_C(0x1) + /* factory defaults already created. */ + #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_CREATE_ALREADY \ + UINT32_C(0x2) + #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_LAST \ + HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_CREATE_ALREADY + uint8_t unused_0[6]; /* - * Total number of logical type Link Level Flow Control (LLFC) - * messages received with CRC error + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint64_t rx_hcfc_msgs; - /* Total number of HCFC messages received */ - uint64_t rx_hcfc_msgs_with_crc_err; - /* Total number of HCFC messages received with CRC error */ - uint64_t rx_bytes; - /* Total number of received bytes */ - uint64_t rx_runt_bytes; - /* Total number of bytes received in runt frames */ - uint64_t rx_runt_frames; - /* Total number of runt frames received */ - uint64_t rx_stat_discard; - /* Total Rx Discards per Port reported by STATS block */ - uint64_t rx_stat_err; - /* Total Rx Error Drops per Port reported by STATS block */ + uint8_t valid; } __attribute__((packed)); -/* Periodic Statistics Context DMA to host (160 bytes) */ -/* - * per-context HW statistics -- chip view - */ +/* hwrm_nvm_factory_defaults_cmd_err (size:64b/8B) */ +struct hwrm_nvm_factory_defaults_cmd_err { + /* + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. + */ + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_UNKNOWN \ + UINT32_C(0x0) + /* valid configuration not present to create defaults */ + #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_VALID_CFG \ + UINT32_C(0x1) + /* No saved configuration present to restore, restore failed */ + #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG \ + UINT32_C(0x2) + #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_LAST \ + HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG + uint8_t unused_0[7]; +} __attribute__((packed)); -struct ctx_hw_stats64 { - uint64_t rx_ucast_pkts; - uint64_t rx_mcast_pkts; - uint64_t rx_bcast_pkts; - uint64_t rx_drop_pkts; - uint64_t rx_discard_pkts; - uint64_t rx_ucast_bytes; - uint64_t rx_mcast_bytes; - uint64_t rx_bcast_bytes; - - uint64_t tx_ucast_pkts; - uint64_t tx_mcast_pkts; - uint64_t tx_bcast_pkts; - uint64_t tx_drop_pkts; - uint64_t tx_discard_pkts; - uint64_t tx_ucast_bytes; - uint64_t tx_mcast_bytes; - uint64_t tx_bcast_bytes; - - uint64_t tpa_pkts; - uint64_t tpa_bytes; - uint64_t tpa_events; - uint64_t tpa_aborts; -} __attribute__((packed)); - -#endif /* _HSI_STRUCT_DEF_DPDK_ */ +#endif /* _HSI_STRUCT_DEF_DPDK_H_ */ diff --git a/drivers/net/bnxt/meson.build b/drivers/net/bnxt/meson.build new file mode 100644 index 00000000..e130f271 --- /dev/null +++ b/drivers/net/bnxt/meson.build @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +install_headers('rte_pmd_bnxt.h') +version = 2 +sources = files('bnxt_cpr.c', + 'bnxt_ethdev.c', + 'bnxt_filter.c', + 'bnxt_flow.c', + 'bnxt_hwrm.c', + 'bnxt_irq.c', + 'bnxt_ring.c', + 'bnxt_rxq.c', + 'bnxt_rxr.c', + 'bnxt_stats.c', + 'bnxt_txq.c', + 'bnxt_txr.c', + 'bnxt_util.c', + 'bnxt_vnic.c', + 'rte_pmd_bnxt.c') diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c index cae95f8f..c298de83 100644 --- a/drivers/net/bnxt/rte_pmd_bnxt.c +++ b/drivers/net/bnxt/rte_pmd_bnxt.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017-2018 Broadcom + * All rights reserved. */ #include diff --git a/drivers/net/bnxt/rte_pmd_bnxt.h b/drivers/net/bnxt/rte_pmd_bnxt.h index cd7227ac..68fbe34d 100644 --- a/drivers/net/bnxt/rte_pmd_bnxt.h +++ b/drivers/net/bnxt/rte_pmd_bnxt.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017-2018 Broadcom + * All rights reserved. */ #ifndef _PMD_BNXT_H_ diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile index 4a6633ed..acad16a1 100644 --- a/drivers/net/bonding/Makefile +++ b/drivers/net/bonding/Makefile @@ -27,6 +27,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_pmd.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_args.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_8023ad.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_alb.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_flow.c # # Export include files diff --git a/drivers/net/bonding/meson.build b/drivers/net/bonding/meson.build index b90abc6d..602d2880 100644 --- a/drivers/net/bonding/meson.build +++ b/drivers/net/bonding/meson.build @@ -2,7 +2,8 @@ # Copyright(c) 2017 Intel Corporation name = 'bond' #, james bond :-) -sources = files('rte_eth_bond_api.c', 'rte_eth_bond_pmd.c', +version = 2 +sources = files('rte_eth_bond_api.c', 'rte_eth_bond_pmd.c', 'rte_eth_bond_flow.c', 'rte_eth_bond_args.c', 'rte_eth_bond_8023ad.c', 'rte_eth_bond_alb.c') deps += 'sched' # needed for rte_bitmap.h diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c index c452318f..f8cea4b6 100644 --- a/drivers/net/bonding/rte_eth_bond_8023ad.c +++ b/drivers/net/bonding/rte_eth_bond_8023ad.c @@ -16,9 +16,12 @@ static void bond_mode_8023ad_ext_periodic_cb(void *arg); #ifdef RTE_LIBRTE_BOND_DEBUG_8023AD -#define MODE4_DEBUG(fmt, ...) RTE_LOG(DEBUG, PMD, "%6u [Port %u: %s] " fmt, \ - bond_dbg_get_time_diff_ms(), slave_id, \ - __func__, ##__VA_ARGS__) + +#define MODE4_DEBUG(fmt, ...) \ + rte_log(RTE_LOG_DEBUG, bond_logtype, \ + "%6u [Port %u: %s] " fmt, \ + bond_dbg_get_time_diff_ms(), slave_id, \ + __func__, ##__VA_ARGS__) static uint64_t start_time; @@ -77,44 +80,46 @@ bond_print_lacp(struct lacpdu *l) if (p_len && p_state[p_len-1] == ' ') p_state[p_len-1] = '\0'; - RTE_LOG(DEBUG, PMD, "LACP: {\n"\ - " subtype= %02X\n"\ - " ver_num=%02X\n"\ - " actor={ tlv=%02X, len=%02X\n"\ - " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\ - " state={ %s }\n"\ - " }\n"\ - " partner={ tlv=%02X, len=%02X\n"\ - " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\ - " state={ %s }\n"\ - " }\n"\ - " collector={info=%02X, length=%02X, max_delay=%04X\n, " \ - "type_term=%02X, terminator_length = %02X}\n",\ - l->subtype,\ - l->version_number,\ - l->actor.tlv_type_info,\ - l->actor.info_length,\ - l->actor.port_params.system_priority,\ - a_address,\ - l->actor.port_params.key,\ - l->actor.port_params.port_priority,\ - l->actor.port_params.port_number,\ - a_state,\ - l->partner.tlv_type_info,\ - l->partner.info_length,\ - l->partner.port_params.system_priority,\ - p_address,\ - l->partner.port_params.key,\ - l->partner.port_params.port_priority,\ - l->partner.port_params.port_number,\ - p_state,\ - l->tlv_type_collector_info,\ - l->collector_info_length,\ - l->collector_max_delay,\ - l->tlv_type_terminator,\ - l->terminator_length); + RTE_BOND_LOG(DEBUG, + "LACP: {\n" + " subtype= %02X\n" + " ver_num=%02X\n" + " actor={ tlv=%02X, len=%02X\n" + " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n" + " state={ %s }\n" + " }\n" + " partner={ tlv=%02X, len=%02X\n" + " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n" + " state={ %s }\n" + " }\n" + " collector={info=%02X, length=%02X, max_delay=%04X\n, " + "type_term=%02X, terminator_length = %02X }", + l->subtype, + l->version_number, + l->actor.tlv_type_info, + l->actor.info_length, + l->actor.port_params.system_priority, + a_address, + l->actor.port_params.key, + l->actor.port_params.port_priority, + l->actor.port_params.port_number, + a_state, + l->partner.tlv_type_info, + l->partner.info_length, + l->partner.port_params.system_priority, + p_address, + l->partner.port_params.key, + l->partner.port_params.port_priority, + l->partner.port_params.port_number, + p_state, + l->tlv_type_collector_info, + l->collector_info_length, + l->collector_max_delay, + l->tlv_type_terminator, + l->terminator_length); } + #define BOND_PRINT_LACP(lacpdu) bond_print_lacp(lacpdu) #else #define BOND_PRINT_LACP(lacpdu) do { } while (0) @@ -200,31 +205,34 @@ show_warnings(uint16_t slave_id) rte_get_tsc_hz() / 1000); if (warnings & WRN_RX_QUEUE_FULL) { - RTE_LOG(DEBUG, PMD, - "Slave %u: failed to enqueue LACP packet into RX ring.\n" - "Receive and transmit functions must be invoked on bonded\n" - "interface at least 10 times per second or LACP will not\n" - "work correctly\n", slave_id); + RTE_BOND_LOG(DEBUG, + "Slave %u: failed to enqueue LACP packet into RX ring.\n" + "Receive and transmit functions must be invoked on bonded" + "interface at least 10 times per second or LACP will notwork correctly", + slave_id); } if (warnings & WRN_TX_QUEUE_FULL) { - RTE_LOG(DEBUG, PMD, - "Slave %u: failed to enqueue LACP packet into TX ring.\n" - "Receive and transmit functions must be invoked on bonded\n" - "interface at least 10 times per second or LACP will not\n" - "work correctly\n", slave_id); + RTE_BOND_LOG(DEBUG, + "Slave %u: failed to enqueue LACP packet into TX ring.\n" + "Receive and transmit functions must be invoked on bonded" + "interface at least 10 times per second or LACP will not work correctly", + slave_id); } if (warnings & WRN_RX_MARKER_TO_FAST) - RTE_LOG(INFO, PMD, "Slave %u: marker to early - ignoring.\n", slave_id); + RTE_BOND_LOG(INFO, "Slave %u: marker to early - ignoring.", + slave_id); if (warnings & WRN_UNKNOWN_SLOW_TYPE) { - RTE_LOG(INFO, PMD, - "Slave %u: ignoring unknown slow protocol frame type", slave_id); + RTE_BOND_LOG(INFO, + "Slave %u: ignoring unknown slow protocol frame type", + slave_id); } if (warnings & WRN_UNKNOWN_MARKER_TYPE) - RTE_LOG(INFO, PMD, "Slave %u: ignoring unknown marker type", slave_id); + RTE_BOND_LOG(INFO, "Slave %u: ignoring unknown marker type", + slave_id); if (warnings & WRN_NOT_LACP_CAPABLE) MODE4_DEBUG("Port %u is not LACP capable!\n", slave_id); @@ -507,8 +515,8 @@ mux_machine(struct bond_dev_private *internals, uint16_t slave_id) ACTOR_STATE_SET(port, DISTRIBUTING); SM_FLAG_SET(port, NTT); MODE4_DEBUG("COLLECTING -> DISTRIBUTING\n"); - RTE_LOG(INFO, PMD, - "Bond %u: slave id %u distributing started.\n", + RTE_BOND_LOG(INFO, + "Bond %u: slave id %u distributing started.", internals->port_id, slave_id); } } else { @@ -518,8 +526,8 @@ mux_machine(struct bond_dev_private *internals, uint16_t slave_id) ACTOR_STATE_CLR(port, DISTRIBUTING); SM_FLAG_SET(port, NTT); MODE4_DEBUG("DISTRIBUTING -> COLLECTING\n"); - RTE_LOG(INFO, PMD, - "Bond %u: slave id %u distributing stopped.\n", + RTE_BOND_LOG(INFO, + "Bond %u: slave id %u distributing stopped.", internals->port_id, slave_id); } } @@ -557,7 +565,7 @@ tx_machine(struct bond_dev_private *internals, uint16_t slave_id) lacp_pkt = rte_pktmbuf_alloc(port->mbuf_pool); if (lacp_pkt == NULL) { - RTE_LOG(ERR, PMD, "Failed to allocate LACP packet from pool\n"); + RTE_BOND_LOG(ERR, "Failed to allocate LACP packet from pool"); return; } @@ -1337,7 +1345,7 @@ bond_8023ad_setup_validate(uint16_t port_id, conf->tx_period_ms == 0 || conf->rx_marker_period_ms == 0 || conf->update_timeout_ms == 0) { - RTE_LOG(ERR, PMD, "given mode 4 configuration is invalid\n"); + RTE_BOND_LOG(ERR, "given mode 4 configuration is invalid"); return -EINVAL; } } diff --git a/drivers/net/bonding/rte_eth_bond_alb.c b/drivers/net/bonding/rte_eth_bond_alb.c index 3f9945b3..c3891c7e 100644 --- a/drivers/net/bonding/rte_eth_bond_alb.c +++ b/drivers/net/bonding/rte_eth_bond_alb.c @@ -60,8 +60,8 @@ bond_mode_alb_enable(struct rte_eth_dev *bond_dev) 0, data_size, socket_id); if (internals->mode6.mempool == NULL) { - RTE_LOG(ERR, PMD, "%s: Failed to initialize ALB mempool.\n", - bond_dev->device->name); + RTE_BOND_LOG(ERR, "%s: Failed to initialize ALB mempool.\n", + bond_dev->device->name); goto mempool_alloc_error; } } diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c index f854b737..8bc04cfd 100644 --- a/drivers/net/bonding/rte_eth_bond_api.c +++ b/drivers/net/bonding/rte_eth_bond_api.c @@ -194,7 +194,8 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id) uint16_t first; bonded_eth_dev = &rte_eth_devices[bonded_port_id]; - if (bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter == 0) + if ((bonded_eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) == 0) return 0; internals = bonded_eth_dev->data->dev_private; @@ -211,9 +212,12 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id) for (i = 0, mask = 1; i < RTE_BITMAP_SLAB_BIT_SIZE; i ++, mask <<= 1) { - if (unlikely(slab & mask)) + if (unlikely(slab & mask)) { + uint16_t vlan_id = pos + i; + res = rte_eth_dev_vlan_filter(slave_port_id, - (uint16_t)pos, 1); + vlan_id, 1); + } } found = rte_bitmap_scan(internals->vlan_filter_bmp, &pos, &slab); @@ -222,6 +226,49 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id) return res; } +static int +slave_rte_flow_prepare(uint16_t slave_id, struct bond_dev_private *internals) +{ + struct rte_flow *flow; + struct rte_flow_error ferror; + uint16_t slave_port_id = internals->slaves[slave_id].port_id; + + if (internals->flow_isolated_valid != 0) { + rte_eth_dev_stop(slave_port_id); + if (rte_flow_isolate(slave_port_id, internals->flow_isolated, + &ferror)) { + RTE_BOND_LOG(ERR, "rte_flow_isolate failed for slave" + " %d: %s", slave_id, ferror.message ? + ferror.message : "(no stated reason)"); + return -1; + } + } + TAILQ_FOREACH(flow, &internals->flow_list, next) { + flow->flows[slave_id] = rte_flow_create(slave_port_id, + &flow->fd->attr, + flow->fd->items, + flow->fd->actions, + &ferror); + if (flow->flows[slave_id] == NULL) { + RTE_BOND_LOG(ERR, "Cannot create flow for slave" + " %d: %s", slave_id, + ferror.message ? ferror.message : + "(no stated reason)"); + /* Destroy successful bond flows from the slave */ + TAILQ_FOREACH(flow, &internals->flow_list, next) { + if (flow->flows[slave_id] != NULL) { + rte_flow_destroy(slave_port_id, + flow->flows[slave_id], + &ferror); + flow->flows[slave_id] = NULL; + } + } + return -1; + } + } + return 0; +} + static int __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id) { @@ -284,6 +331,8 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id) /* Take the first dev's offload capabilities */ internals->rx_offload_capa = dev_info.rx_offload_capa; internals->tx_offload_capa = dev_info.tx_offload_capa; + internals->rx_queue_offload_capa = dev_info.rx_queue_offload_capa; + internals->tx_queue_offload_capa = dev_info.tx_queue_offload_capa; internals->flow_type_rss_offloads = dev_info.flow_type_rss_offloads; /* Inherit first slave's max rx packet size */ @@ -292,16 +341,10 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id) } else { internals->rx_offload_capa &= dev_info.rx_offload_capa; internals->tx_offload_capa &= dev_info.tx_offload_capa; + internals->rx_queue_offload_capa &= dev_info.rx_queue_offload_capa; + internals->tx_queue_offload_capa &= dev_info.tx_queue_offload_capa; internals->flow_type_rss_offloads &= dev_info.flow_type_rss_offloads; - if (link_properties_valid(bonded_eth_dev, - &slave_eth_dev->data->dev_link) != 0) { - RTE_BOND_LOG(ERR, "Invalid link properties for slave %d" - " in bonding mode %d", slave_port_id, - internals->mode); - return -1; - } - /* RETA size is GCD of all slaves RETA sizes, so, if all sizes will be * the power of 2, the lower one is GCD */ @@ -316,6 +359,19 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id) bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &= internals->flow_type_rss_offloads; + if (slave_rte_flow_prepare(internals->slave_count, internals) != 0) { + RTE_BOND_LOG(ERR, "Failed to prepare new slave flows: port=%d", + slave_port_id); + return -1; + } + + /* Add additional MAC addresses to the slave */ + if (slave_add_mac_addresses(bonded_eth_dev, slave_port_id) != 0) { + RTE_BOND_LOG(ERR, "Failed to add mac address(es) to slave %hu", + slave_port_id); + return -1; + } + internals->slave_count++; if (bonded_eth_dev->data->dev_started) { @@ -330,7 +386,7 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id) /* Add slave details to bonded device */ slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDED_SLAVE; - /* Update all slave devices MACs*/ + /* Update all slave devices MACs */ mac_address_slaves_update(bonded_eth_dev); /* Register link status change callback with bonded device pointer as @@ -348,11 +404,6 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id) !internals->user_defined_primary_port) bond_ethdev_primary_set(internals, slave_port_id); - - if (find_slave_by_id(internals->active_slaves, - internals->active_slave_count, - slave_port_id) == internals->active_slave_count) - activate_slave(bonded_eth_dev, slave_port_id); } } @@ -393,6 +444,8 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id, struct rte_eth_dev *bonded_eth_dev; struct bond_dev_private *internals; struct rte_eth_dev *slave_eth_dev; + struct rte_flow_error flow_error; + struct rte_flow *flow; int i, slave_idx; bonded_eth_dev = &rte_eth_devices[bonded_port_id]; @@ -432,6 +485,21 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id, rte_eth_dev_default_mac_addr_set(slave_port_id, &(internals->slaves[slave_idx].persisted_mac_addr)); + /* remove additional MAC addresses from the slave */ + slave_remove_mac_addresses(bonded_eth_dev, slave_port_id); + + /* + * Remove bond device flows from slave device. + * Note: don't restore flow isolate mode. + */ + TAILQ_FOREACH(flow, &internals->flow_list, next) { + if (flow->flows[slave_idx] != NULL) { + rte_flow_destroy(slave_port_id, flow->flows[slave_idx], + &flow_error); + flow->flows[slave_idx] = NULL; + } + } + slave_eth_dev = &rte_eth_devices[slave_port_id]; slave_remove(internals, slave_eth_dev); slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE); @@ -458,6 +526,8 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id, if (internals->slave_count == 0) { internals->rx_offload_capa = 0; internals->tx_offload_capa = 0; + internals->rx_queue_offload_capa = 0; + internals->tx_queue_offload_capa = 0; internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK; internals->reta_size = 0; internals->candidate_max_rx_pktlen = 0; @@ -643,9 +713,21 @@ rte_eth_bond_mac_address_reset(uint16_t bonded_port_id) internals->user_defined_mac = 0; if (internals->slave_count > 0) { + int slave_port; + /* Get the primary slave location based on the primary port + * number as, while slave_add(), we will keep the primary + * slave based on slave_count,but not based on the primary port. + */ + for (slave_port = 0; slave_port < internals->slave_count; + slave_port++) { + if (internals->slaves[slave_port].port_id == + internals->primary_port) + break; + } + /* Set MAC Address of Bonded Device */ if (mac_address_set(bonded_eth_dev, - &internals->slaves[internals->primary_port].persisted_mac_addr) + &internals->slaves[slave_port].persisted_mac_addr) != 0) { RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device"); return -1; diff --git a/drivers/net/bonding/rte_eth_bond_args.c b/drivers/net/bonding/rte_eth_bond_args.c index 27d3101b..b60fde6a 100644 --- a/drivers/net/bonding/rte_eth_bond_args.c +++ b/drivers/net/bonding/rte_eth_bond_args.c @@ -32,7 +32,7 @@ find_port_id_by_pci_addr(const struct rte_pci_addr *pci_addr) struct rte_pci_addr *eth_pci_addr; unsigned i; - for (i = 0; i < rte_eth_dev_count(); i++) { + RTE_ETH_FOREACH_DEV(i) { pci_dev = RTE_ETH_DEV_TO_PCI(&rte_eth_devices[i]); eth_pci_addr = &pci_dev->addr; @@ -50,7 +50,7 @@ find_port_id_by_dev_name(const char *name) { unsigned i; - for (i = 0; i < rte_eth_dev_count(); i++) { + RTE_ETH_FOREACH_DEV(i) { if (rte_eth_devices[i].data == NULL) continue; @@ -92,7 +92,7 @@ parse_port_id(const char *port_str) if (pci_bus->parse(port_str, &dev_addr) == 0) { dev = pci_bus->find_device(NULL, bond_pci_addr_cmp, &dev_addr); if (dev == NULL) { - RTE_LOG(ERR, PMD, "unable to find PCI device\n"); + RTE_BOND_LOG(ERR, "unable to find PCI device"); return -1; } port_id = find_port_id_by_pci_addr(&dev_addr); @@ -134,7 +134,8 @@ bond_ethdev_parse_slave_port_kvarg(const char *key, if (strcmp(key, PMD_BOND_SLAVE_PORT_KVARG) == 0) { int port_id = parse_port_id(value); if (port_id < 0) { - RTE_BOND_LOG(ERR, "Invalid slave port value (%s) specified", value); + RTE_BOND_LOG(ERR, "Invalid slave port value (%s) specified", + value); return -1; } else slave_ports->slaves[slave_ports->slave_count++] = @@ -244,7 +245,7 @@ bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key __rte_unused, if (primary_slave_port_id < 0) return -1; - *(uint8_t *)extra_args = (uint8_t)primary_slave_port_id; + *(uint16_t *)extra_args = (uint16_t)primary_slave_port_id; return 0; } diff --git a/drivers/net/bonding/rte_eth_bond_flow.c b/drivers/net/bonding/rte_eth_bond_flow.c new file mode 100644 index 00000000..31e4bcae --- /dev/null +++ b/drivers/net/bonding/rte_eth_bond_flow.c @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#include + +#include +#include +#include + +#include "rte_eth_bond_private.h" + +static struct rte_flow * +bond_flow_alloc(int numa_node, const struct rte_flow_attr *attr, + const struct rte_flow_item *items, + const struct rte_flow_action *actions) +{ + struct rte_flow *flow; + size_t fdsz; + + fdsz = rte_flow_copy(NULL, 0, attr, items, actions); + flow = rte_zmalloc_socket(NULL, sizeof(struct rte_flow) + fdsz, + RTE_CACHE_LINE_SIZE, numa_node); + if (unlikely(flow == NULL)) { + RTE_BOND_LOG(ERR, "Could not allocate new flow"); + return NULL; + } + flow->fd = (void *)((uintptr_t)flow + sizeof(*flow)); + if (unlikely(rte_flow_copy(flow->fd, fdsz, attr, items, actions) != + fdsz)) { + RTE_BOND_LOG(ERR, "Failed to copy flow description"); + rte_free(flow); + return NULL; + } + return flow; +} + +static void +bond_flow_release(struct rte_flow **flow) +{ + rte_free(*flow); + *flow = NULL; +} + +static int +bond_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const struct rte_flow_item patterns[], + const struct rte_flow_action actions[], + struct rte_flow_error *err) +{ + struct bond_dev_private *internals = dev->data->dev_private; + int i; + int ret; + + for (i = 0; i < internals->slave_count; i++) { + ret = rte_flow_validate(internals->slaves[i].port_id, attr, + patterns, actions, err); + if (ret) { + RTE_BOND_LOG(ERR, "Operation rte_flow_validate failed" + " for slave %d with error %d", i, ret); + return ret; + } + } + return 0; +} + +static struct rte_flow * +bond_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const struct rte_flow_item patterns[], + const struct rte_flow_action actions[], + struct rte_flow_error *err) +{ + struct bond_dev_private *internals = dev->data->dev_private; + struct rte_flow *flow; + int i; + + flow = bond_flow_alloc(dev->data->numa_node, attr, patterns, actions); + if (unlikely(flow == NULL)) { + rte_flow_error_set(err, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, rte_strerror(ENOMEM)); + return NULL; + } + for (i = 0; i < internals->slave_count; i++) { + flow->flows[i] = rte_flow_create(internals->slaves[i].port_id, + attr, patterns, actions, err); + if (unlikely(flow->flows[i] == NULL)) { + RTE_BOND_LOG(ERR, "Failed to create flow on slave %d", + i); + goto err; + } + } + TAILQ_INSERT_TAIL(&internals->flow_list, flow, next); + return flow; +err: + /* Destroy all slaves flows. */ + for (i = 0; i < internals->slave_count; i++) { + if (flow->flows[i] != NULL) + rte_flow_destroy(internals->slaves[i].port_id, + flow->flows[i], err); + } + bond_flow_release(&flow); + return NULL; +} + +static int +bond_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *err) +{ + struct bond_dev_private *internals = dev->data->dev_private; + int i; + int ret = 0; + + for (i = 0; i < internals->slave_count; i++) { + int lret; + + if (unlikely(flow->flows[i] == NULL)) + continue; + lret = rte_flow_destroy(internals->slaves[i].port_id, + flow->flows[i], err); + if (unlikely(lret != 0)) { + RTE_BOND_LOG(ERR, "Failed to destroy flow on slave %d:" + " %d", i, lret); + ret = lret; + } + } + TAILQ_REMOVE(&internals->flow_list, flow, next); + bond_flow_release(&flow); + return ret; +} + +static int +bond_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *err) +{ + struct bond_dev_private *internals = dev->data->dev_private; + struct rte_flow *flow; + void *tmp; + int ret = 0; + int lret; + + /* Destroy all bond flows from its slaves instead of flushing them to + * keep the LACP flow or any other external flows. + */ + TAILQ_FOREACH_SAFE(flow, &internals->flow_list, next, tmp) { + lret = bond_flow_destroy(dev, flow, err); + if (unlikely(lret != 0)) + ret = lret; + } + if (unlikely(ret != 0)) + RTE_BOND_LOG(ERR, "Failed to flush flow in all slaves"); + return ret; +} + +static int +bond_flow_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, + const struct rte_flow_action *action, + struct rte_flow_query_count *count, + struct rte_flow_error *err) +{ + struct bond_dev_private *internals = dev->data->dev_private; + struct rte_flow_query_count slave_count; + int i; + int ret; + + count->bytes = 0; + count->hits = 0; + rte_memcpy(&slave_count, count, sizeof(slave_count)); + for (i = 0; i < internals->slave_count; i++) { + ret = rte_flow_query(internals->slaves[i].port_id, + flow->flows[i], action, + &slave_count, err); + if (unlikely(ret != 0)) { + RTE_BOND_LOG(ERR, "Failed to query flow on" + " slave %d: %d", i, ret); + return ret; + } + count->bytes += slave_count.bytes; + count->hits += slave_count.hits; + slave_count.bytes = 0; + slave_count.hits = 0; + } + return 0; +} + +static int +bond_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, + const struct rte_flow_action *action, void *arg, + struct rte_flow_error *err) +{ + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_COUNT: + return bond_flow_query_count(dev, flow, action, arg, err); + default: + return rte_flow_error_set(err, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, arg, + rte_strerror(ENOTSUP)); + } +} + +static int +bond_flow_isolate(struct rte_eth_dev *dev, int set, + struct rte_flow_error *err) +{ + struct bond_dev_private *internals = dev->data->dev_private; + int i; + int ret; + + for (i = 0; i < internals->slave_count; i++) { + ret = rte_flow_isolate(internals->slaves[i].port_id, set, err); + if (unlikely(ret != 0)) { + RTE_BOND_LOG(ERR, "Operation rte_flow_isolate failed" + " for slave %d with error %d", i, ret); + internals->flow_isolated_valid = 0; + return ret; + } + } + internals->flow_isolated = set; + internals->flow_isolated_valid = 1; + return 0; +} + +const struct rte_flow_ops bond_flow_ops = { + .validate = bond_flow_validate, + .create = bond_flow_create, + .destroy = bond_flow_destroy, + .flush = bond_flow_flush, + .query = bond_flow_query, + .isolate = bond_flow_isolate, +}; diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index c34c3251..58f7377c 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "rte_eth_bond.h" #include "rte_eth_bond_private.h" @@ -24,6 +25,7 @@ #define REORDER_PERIOD_MS 10 #define DEFAULT_POLLING_INTERVAL_10_MS (10) +#define BOND_MAX_MAC_ADDRS 16 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port) @@ -570,34 +572,21 @@ update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator) } #ifdef RTE_LIBRTE_BOND_DEBUG_ALB -#define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \ - RTE_LOG(DEBUG, PMD, \ - "%s " \ - "port:%d " \ - "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \ - "SrcIP:%s " \ - "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \ - "DstIP:%s " \ - "%s " \ - "%d\n", \ - info, \ - port, \ - eth_h->s_addr.addr_bytes[0], \ - eth_h->s_addr.addr_bytes[1], \ - eth_h->s_addr.addr_bytes[2], \ - eth_h->s_addr.addr_bytes[3], \ - eth_h->s_addr.addr_bytes[4], \ - eth_h->s_addr.addr_bytes[5], \ - src_ip, \ - eth_h->d_addr.addr_bytes[0], \ - eth_h->d_addr.addr_bytes[1], \ - eth_h->d_addr.addr_bytes[2], \ - eth_h->d_addr.addr_bytes[3], \ - eth_h->d_addr.addr_bytes[4], \ - eth_h->d_addr.addr_bytes[5], \ - dst_ip, \ - arp_op, \ - ++burstnumber) +#define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \ + rte_log(RTE_LOG_DEBUG, bond_logtype, \ + "%s port:%d SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X SrcIP:%s " \ + "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X DstIP:%s %s %d\n", \ + info, \ + port, \ + eth_h->s_addr.addr_bytes[0], eth_h->s_addr.addr_bytes[1], \ + eth_h->s_addr.addr_bytes[2], eth_h->s_addr.addr_bytes[3], \ + eth_h->s_addr.addr_bytes[4], eth_h->s_addr.addr_bytes[5], \ + src_ip, \ + eth_h->d_addr.addr_bytes[0], eth_h->d_addr.addr_bytes[1], \ + eth_h->d_addr.addr_bytes[2], eth_h->d_addr.addr_bytes[3], \ + eth_h->d_addr.addr_bytes[4], eth_h->d_addr.addr_bytes[5], \ + dst_ip, \ + arp_op, ++burstnumber) #endif static void @@ -617,7 +606,7 @@ mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h, uint16_t offset = get_vlan_offset(eth_h, ðer_type); #ifdef RTE_LIBRTE_BOND_DEBUG_ALB - snprintf(buf, 16, "%s", info); + strlcpy(buf, info, 16); #endif if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) { @@ -1138,7 +1127,8 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) /* Allocate new packet to send ARP update on current slave */ upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool); if (upd_pkt == NULL) { - RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n"); + RTE_BOND_LOG(ERR, + "Failed to allocate ARP packet from pool"); continue; } pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr) @@ -1560,12 +1550,12 @@ mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr) struct ether_addr *mac_addr; if (eth_dev == NULL) { - RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__); + RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified"); return -1; } if (dst_mac_addr == NULL) { - RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__); + RTE_BOND_LOG(ERR, "NULL pointer MAC specified"); return -1; } @@ -1599,6 +1589,61 @@ mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr) return 0; } +static const struct ether_addr null_mac_addr; + +/* + * Add additional MAC addresses to the slave + */ +int +slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev, + uint16_t slave_port_id) +{ + int i, ret; + struct ether_addr *mac_addr; + + for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) { + mac_addr = &bonded_eth_dev->data->mac_addrs[i]; + if (is_same_ether_addr(mac_addr, &null_mac_addr)) + break; + + ret = rte_eth_dev_mac_addr_add(slave_port_id, mac_addr, 0); + if (ret < 0) { + /* rollback */ + for (i--; i > 0; i--) + rte_eth_dev_mac_addr_remove(slave_port_id, + &bonded_eth_dev->data->mac_addrs[i]); + return ret; + } + } + + return 0; +} + +/* + * Remove additional MAC addresses from the slave + */ +int +slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev, + uint16_t slave_port_id) +{ + int i, rc, ret; + struct ether_addr *mac_addr; + + rc = 0; + for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) { + mac_addr = &bonded_eth_dev->data->mac_addrs[i]; + if (is_same_ether_addr(mac_addr, &null_mac_addr)) + break; + + ret = rte_eth_dev_mac_addr_remove(slave_port_id, mac_addr); + /* save only the first error */ + if (ret < 0 && rc == 0) + rc = ret; + } + + return rc; +} + int mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev) { @@ -1686,9 +1731,9 @@ bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode) if (internals->mode4.dedicated_queues.enabled == 0) { eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad; eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad; - RTE_LOG(WARNING, PMD, + RTE_BOND_LOG(WARNING, "Using mode 4, it is necessary to do TX burst " - "and RX burst at least every 100ms.\n"); + "and RX burst at least every 100ms."); } else { /* Use flow director's optimization */ eth_dev->rx_pkt_burst = @@ -1818,8 +1863,13 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, bonded_eth_dev->data->dev_conf.rxmode.mq_mode; } - slave_eth_dev->data->dev_conf.rxmode.hw_vlan_filter = - bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter; + if (bonded_eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) + slave_eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_VLAN_FILTER; + else + slave_eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_VLAN_FILTER; nb_rx_queues = bonded_eth_dev->data->nb_rx_queues; nb_tx_queues = bonded_eth_dev->data->nb_tx_queues; @@ -1831,12 +1881,20 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, } } + errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id, + bonded_eth_dev->data->mtu); + if (errval != 0 && errval != -ENOTSUP) { + RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)", + slave_eth_dev->data->port_id, errval); + return errval; + } + /* Configure device */ errval = rte_eth_dev_configure(slave_eth_dev->data->port_id, nb_rx_queues, nb_tx_queues, &(slave_eth_dev->data->dev_conf)); if (errval != 0) { - RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)", + RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u, err (%d)", slave_eth_dev->data->port_id, errval); return errval; } @@ -1918,10 +1976,10 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, &internals->reta_conf[0], internals->slaves[i].reta_size); if (errval != 0) { - RTE_LOG(WARNING, PMD, - "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)." - " RSS Configuration for bonding may be inconsistent.\n", - slave_eth_dev->data->port_id, errval); + RTE_BOND_LOG(WARNING, + "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)." + " RSS Configuration for bonding may be inconsistent.", + slave_eth_dev->data->port_id, errval); } break; } @@ -1950,10 +2008,19 @@ slave_remove(struct bond_dev_private *internals, slave_eth_dev->data->port_id) break; - if (i < (internals->slave_count - 1)) + if (i < (internals->slave_count - 1)) { + struct rte_flow *flow; + memmove(&internals->slaves[i], &internals->slaves[i + 1], sizeof(internals->slaves[0]) * (internals->slave_count - i - 1)); + TAILQ_FOREACH(flow, &internals->flow_list, next) { + memmove(&flow->flows[i], &flow->flows[i + 1], + sizeof(flow->flows[0]) * + (internals->slave_count - i - 1)); + flow->flows[internals->slave_count - 1] = NULL; + } + } internals->slave_count--; @@ -2026,7 +2093,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) if (internals->slave_count == 0) { RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices"); - return -1; + goto out_err; } if (internals->user_defined_mac == 0) { @@ -2037,19 +2104,15 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) new_mac_addr = &internals->slaves[i].persisted_mac_addr; if (new_mac_addr == NULL) - return -1; + goto out_err; if (mac_address_set(eth_dev, new_mac_addr) != 0) { RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address", eth_dev->data->port_id); - return -1; + goto out_err; } } - /* Update all slave devices MACs*/ - if (mac_address_slaves_update(eth_dev) != 0) - return -1; - /* If bonded device is configure in promiscuous mode then re-apply config */ if (internals->promiscuous_en) bond_ethdev_promiscuous_enable(eth_dev); @@ -2073,7 +2136,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) "bonded port (%d) failed to reconfigure slave device (%d)", eth_dev->data->port_id, internals->slaves[i].port_id); - return -1; + goto out_err; } /* We will need to poll for link status if any slave doesn't * support interrupts @@ -2081,6 +2144,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) if (internals->slaves[i].link_status_poll_enabled) internals->link_status_polling_enabled = 1; } + /* start polling if needed */ if (internals->link_status_polling_enabled) { rte_eal_alarm_set( @@ -2089,6 +2153,10 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) (void *)&rte_eth_devices[internals->port_id]); } + /* Update all slave devices MACs*/ + if (mac_address_slaves_update(eth_dev) != 0) + goto out_err; + if (internals->user_defined_primary_port) bond_ethdev_primary_set(internals, internals->primary_port); @@ -2100,6 +2168,10 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) bond_tlb_enable(internals); return 0; + +out_err: + eth_dev->data->dev_started = 0; + return -1; } static void @@ -2157,7 +2229,6 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev) tlb_last_obytets[internals->active_slaves[i]] = 0; } - internals->active_slave_count = 0; internals->link_status_polling_enabled = 0; for (i = 0; i < internals->slave_count; i++) internals->slaves[i].last_link_status = 0; @@ -2172,20 +2243,22 @@ bond_ethdev_close(struct rte_eth_dev *dev) struct bond_dev_private *internals = dev->data->dev_private; uint8_t bond_port_id = internals->port_id; int skipped = 0; + struct rte_flow_error ferror; - RTE_LOG(INFO, EAL, "Closing bonded device %s\n", dev->device->name); + RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name); while (internals->slave_count != skipped) { uint16_t port_id = internals->slaves[skipped].port_id; rte_eth_dev_stop(port_id); if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) { - RTE_LOG(ERR, EAL, - "Failed to remove port %d from bonded device " - "%s\n", port_id, dev->device->name); + RTE_BOND_LOG(ERR, + "Failed to remove port %d from bonded device %s", + port_id, dev->device->name); skipped++; } } + bond_flow_ops.flush(dev, &ferror); bond_ethdev_free_queues(dev); rte_bitmap_reset(internals->vlan_filter_bmp); } @@ -2201,7 +2274,7 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) uint16_t max_nb_rx_queues = UINT16_MAX; uint16_t max_nb_tx_queues = UINT16_MAX; - dev_info->max_mac_addrs = 1; + dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS; dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ? internals->candidate_max_rx_pktlen : @@ -2244,6 +2317,8 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->rx_offload_capa = internals->rx_offload_capa; dev_info->tx_offload_capa = internals->tx_offload_capa; + dev_info->rx_queue_offload_capa = internals->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = internals->tx_queue_offload_capa; dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads; dev_info->reta_size = internals->reta_size; @@ -2269,9 +2344,9 @@ bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) res = rte_eth_dev_vlan_filter(port_id, vlan_id, on); if (res == ENOTSUP) - RTE_LOG(WARNING, PMD, - "Setting VLAN filter on slave port %u not supported.\n", - port_id); + RTE_BOND_LOG(WARNING, + "Setting VLAN filter on slave port %u not supported.", + port_id); } rte_spinlock_unlock(&internals->lock); @@ -2633,6 +2708,11 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, if (!valid_slave) return rc; + /* Synchronize lsc callback parallel calls either by real link event + * from the slaves PMDs or by the bonding PMD itself. + */ + rte_spinlock_lock(&internals->lsc_lock); + /* Search for port in active port list */ active_pos = find_slave_by_id(internals->active_slaves, internals->active_slave_count, port_id); @@ -2640,7 +2720,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, rte_eth_link_get_nowait(port_id, &link); if (link.link_status) { if (active_pos < internals->active_slave_count) - return rc; + goto link_update; /* if no active slave ports then set this port to be primary port */ if (internals->active_slave_count < 1) { @@ -2652,6 +2732,17 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, mac_address_slaves_update(bonded_eth_dev); } + /* check link state properties if bonded link is up*/ + if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) { + if (link_properties_valid(bonded_eth_dev, &link) != 0) + RTE_BOND_LOG(ERR, "Invalid link properties " + "for slave %d in bonding mode %d", + port_id, internals->mode); + } else { + /* inherit slave link properties */ + link_properties_set(bonded_eth_dev, &link); + } + activate_slave(bonded_eth_dev, port_id); /* If user has defined the primary port then default to using it */ @@ -2660,7 +2751,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, bond_ethdev_primary_set(internals, port_id); } else { if (active_pos == internals->active_slave_count) - return rc; + goto link_update; /* Remove from active slave list */ deactivate_slave(bonded_eth_dev, port_id); @@ -2679,6 +2770,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, } } +link_update: /** * Update bonded device link properties after any change to active * slaves @@ -2713,7 +2805,10 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, NULL); } } - return 0; + + rte_spinlock_unlock(&internals->lsc_lock); + + return rc; } static int @@ -2851,11 +2946,88 @@ bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return 0; } -static void +static int bond_ethdev_mac_address_set(struct rte_eth_dev *dev, struct ether_addr *addr) { - if (mac_address_set(dev, addr)) + if (mac_address_set(dev, addr)) { RTE_BOND_LOG(ERR, "Failed to update MAC address"); + return -EINVAL; + } + + return 0; +} + +static int +bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused, + enum rte_filter_type type, enum rte_filter_op op, void *arg) +{ + if (type == RTE_ETH_FILTER_GENERIC && op == RTE_ETH_FILTER_GET) { + *(const void **)arg = &bond_flow_ops; + return 0; + } + return -ENOTSUP; +} + +static int +bond_ethdev_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + __rte_unused uint32_t index, uint32_t vmdq) +{ + struct rte_eth_dev *slave_eth_dev; + struct bond_dev_private *internals = dev->data->dev_private; + int ret, i; + + rte_spinlock_lock(&internals->lock); + + for (i = 0; i < internals->slave_count; i++) { + slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id]; + if (*slave_eth_dev->dev_ops->mac_addr_add == NULL || + *slave_eth_dev->dev_ops->mac_addr_remove == NULL) { + ret = -ENOTSUP; + goto end; + } + } + + for (i = 0; i < internals->slave_count; i++) { + ret = rte_eth_dev_mac_addr_add(internals->slaves[i].port_id, + mac_addr, vmdq); + if (ret < 0) { + /* rollback */ + for (i--; i >= 0; i--) + rte_eth_dev_mac_addr_remove( + internals->slaves[i].port_id, mac_addr); + goto end; + } + } + + ret = 0; +end: + rte_spinlock_unlock(&internals->lock); + return ret; +} + +static void +bond_ethdev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct rte_eth_dev *slave_eth_dev; + struct bond_dev_private *internals = dev->data->dev_private; + int i; + + rte_spinlock_lock(&internals->lock); + + for (i = 0; i < internals->slave_count; i++) { + slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id]; + if (*slave_eth_dev->dev_ops->mac_addr_remove == NULL) + goto end; + } + + struct ether_addr *mac_addr = &dev->data->mac_addrs[index]; + + for (i = 0; i < internals->slave_count; i++) + rte_eth_dev_mac_addr_remove(internals->slaves[i].port_id, + mac_addr); + +end: + rte_spinlock_unlock(&internals->lock); } const struct eth_dev_ops default_dev_ops = { @@ -2879,7 +3051,10 @@ const struct eth_dev_ops default_dev_ops = { .rss_hash_update = bond_ethdev_rss_hash_update, .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get, .mtu_set = bond_ethdev_mtu_set, - .mac_addr_set = bond_ethdev_mac_address_set + .mac_addr_set = bond_ethdev_mac_address_set, + .mac_addr_add = bond_ethdev_mac_addr_add, + .mac_addr_remove = bond_ethdev_mac_addr_remove, + .filter_ctrl = bond_filter_ctrl }; static int @@ -2906,10 +3081,13 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) eth_dev->data->nb_rx_queues = (uint16_t)1; eth_dev->data->nb_tx_queues = (uint16_t)1; - eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0, - socket_id); + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN * + BOND_MAX_MAC_ADDRS, 0, socket_id); if (eth_dev->data->mac_addrs == NULL) { - RTE_BOND_LOG(ERR, "Unable to malloc mac_addrs"); + RTE_BOND_LOG(ERR, + "Failed to allocate %u bytes needed to store MAC addresses", + ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS); goto err; } @@ -2917,6 +3095,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC; rte_spinlock_init(&internals->lock); + rte_spinlock_init(&internals->lsc_lock); internals->port_id = eth_dev->data->port_id; internals->mode = BONDING_MODE_INVALID; @@ -2936,6 +3115,8 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) internals->active_slave_count = 0; internals->rx_offload_capa = 0; internals->tx_offload_capa = 0; + internals->rx_queue_offload_capa = 0; + internals->tx_queue_offload_capa = 0; internals->candidate_max_rx_pktlen = 0; internals->max_rx_pktlen = 0; @@ -2945,10 +3126,13 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) memset(internals->active_slaves, 0, sizeof(internals->active_slaves)); memset(internals->slaves, 0, sizeof(internals->slaves)); + TAILQ_INIT(&internals->flow_list); + internals->flow_isolated_valid = 0; + /* Set mode 4 default configuration */ bond_mode_8023ad_setup(eth_dev, NULL); if (bond_ethdev_mode_set(eth_dev, mode)) { - RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d", + RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode to %d", eth_dev->data->port_id, mode); goto err; } @@ -2959,7 +3143,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) RTE_CACHE_LINE_SIZE); if (internals->vlan_filter_bmpmem == NULL) { RTE_BOND_LOG(ERR, - "Failed to allocate vlan bitmap for bonded device %u\n", + "Failed to allocate vlan bitmap for bonded device %u", eth_dev->data->port_id); goto err; } @@ -2968,7 +3152,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) internals->vlan_filter_bmpmem, vlan_filter_bmp_size); if (internals->vlan_filter_bmp == NULL) { RTE_BOND_LOG(ERR, - "Failed to init vlan bitmap for bonded device %u\n", + "Failed to init vlan bitmap for bonded device %u", eth_dev->data->port_id); rte_free(internals->vlan_filter_bmpmem); goto err; @@ -2994,12 +3178,27 @@ bond_probe(struct rte_vdev_device *dev) uint8_t bonding_mode, socket_id/*, agg_mode*/; int arg_count, port_id; uint8_t agg_mode; + struct rte_eth_dev *eth_dev; if (!dev) return -EINVAL; name = rte_vdev_device_name(dev); - RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name); + RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(rte_vdev_device_args(dev)) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + RTE_BOND_LOG(ERR, "Failed to probe %s", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &default_dev_ops; + eth_dev->device = &dev->device; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), pmd_bond_init_valid_arguments); @@ -3011,13 +3210,13 @@ bond_probe(struct rte_vdev_device *dev) if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG, &bond_ethdev_parse_slave_mode_kvarg, &bonding_mode) != 0) { - RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n", + RTE_BOND_LOG(ERR, "Invalid mode for bonded device %s", name); goto parse_error; } } else { - RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded " - "device %s\n", name); + RTE_BOND_LOG(ERR, "Mode must be specified only once for bonded " + "device %s", name); goto parse_error; } @@ -3027,13 +3226,13 @@ bond_probe(struct rte_vdev_device *dev) if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG, &bond_ethdev_parse_socket_id_kvarg, &socket_id) != 0) { - RTE_LOG(ERR, EAL, "Invalid socket Id specified for " - "bonded device %s\n", name); + RTE_BOND_LOG(ERR, "Invalid socket Id specified for " + "bonded device %s", name); goto parse_error; } } else if (arg_count > 1) { - RTE_LOG(ERR, EAL, "Socket Id can be specified only once for " - "bonded device %s\n", name); + RTE_BOND_LOG(ERR, "Socket Id can be specified only once for " + "bonded device %s", name); goto parse_error; } else { socket_id = rte_socket_id(); @@ -3044,21 +3243,22 @@ bond_probe(struct rte_vdev_device *dev) /* Create link bonding eth device */ port_id = bond_alloc(dev, bonding_mode); if (port_id < 0) { - RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on " - "socket %u.\n", name, bonding_mode, socket_id); + RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on " + "socket %u.", name, bonding_mode, socket_id); goto parse_error; } internals = rte_eth_devices[port_id].data->dev_private; internals->kvlist = kvlist; + rte_eth_dev_probing_finish(&rte_eth_devices[port_id]); if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) { if (rte_kvargs_process(kvlist, PMD_BOND_AGG_MODE_KVARG, &bond_ethdev_parse_slave_agg_mode_kvarg, &agg_mode) != 0) { - RTE_LOG(ERR, EAL, - "Failed to parse agg selection mode for bonded device %s\n", + RTE_BOND_LOG(ERR, + "Failed to parse agg selection mode for bonded device %s", name); goto parse_error; } @@ -3070,8 +3270,8 @@ bond_probe(struct rte_vdev_device *dev) rte_eth_bond_8023ad_agg_selection_set(port_id, AGG_STABLE); } - RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on " - "socket %u.\n", name, port_id, bonding_mode, socket_id); + RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on " + "socket %u.", name, port_id, bonding_mode, socket_id); return 0; parse_error: @@ -3091,7 +3291,7 @@ bond_remove(struct rte_vdev_device *dev) return -EINVAL; name = rte_vdev_device_name(dev); - RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name); + RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name); /* now free all data allocation - for eth_dev structure, * dummy pci driver and internal (private) data @@ -3118,6 +3318,10 @@ bond_remove(struct rte_vdev_device *dev) eth_dev->tx_pkt_burst = NULL; internals = eth_dev->data->dev_private; + /* Try to release mempool used in mode6. If the bond + * device is not mode6, free the NULL is not problem. + */ + rte_mempool_free(internals->mode6.mempool); rte_bitmap_free(internals->vlan_filter_bmp); rte_free(internals->vlan_filter_bmpmem); rte_free(eth_dev->data->dev_private); @@ -3178,23 +3382,23 @@ bond_ethdev_configure(struct rte_eth_dev *dev) struct ether_addr bond_mac; if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG, - &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) { - RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n", - name); + &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) { + RTE_BOND_LOG(INFO, "Invalid mac address for bonded device %s", + name); return -1; } /* Set MAC address */ if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) { - RTE_LOG(ERR, EAL, - "Failed to set mac address on bonded device %s\n", - name); + RTE_BOND_LOG(ERR, + "Failed to set mac address on bonded device %s", + name); return -1; } } else if (arg_count > 1) { - RTE_LOG(ERR, EAL, - "MAC address can be specified only once for bonded device %s\n", - name); + RTE_BOND_LOG(ERR, + "MAC address can be specified only once for bonded device %s", + name); return -1; } @@ -3204,40 +3408,40 @@ bond_ethdev_configure(struct rte_eth_dev *dev) uint8_t xmit_policy; if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG, - &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) != - 0) { - RTE_LOG(INFO, EAL, - "Invalid xmit policy specified for bonded device %s\n", - name); + &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) != + 0) { + RTE_BOND_LOG(INFO, + "Invalid xmit policy specified for bonded device %s", + name); return -1; } /* Set balance mode transmit policy*/ if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) { - RTE_LOG(ERR, EAL, - "Failed to set balance xmit policy on bonded device %s\n", - name); + RTE_BOND_LOG(ERR, + "Failed to set balance xmit policy on bonded device %s", + name); return -1; } } else if (arg_count > 1) { - RTE_LOG(ERR, EAL, - "Transmit policy can be specified only once for bonded device" - " %s\n", name); + RTE_BOND_LOG(ERR, + "Transmit policy can be specified only once for bonded device %s", + name); return -1; } if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) { if (rte_kvargs_process(kvlist, - PMD_BOND_AGG_MODE_KVARG, - &bond_ethdev_parse_slave_agg_mode_kvarg, - &agg_mode) != 0) { - RTE_LOG(ERR, EAL, - "Failed to parse agg selection mode for bonded device %s\n", - name); + PMD_BOND_AGG_MODE_KVARG, + &bond_ethdev_parse_slave_agg_mode_kvarg, + &agg_mode) != 0) { + RTE_BOND_LOG(ERR, + "Failed to parse agg selection mode for bonded device %s", + name); } if (internals->mode == BONDING_MODE_8023AD) - rte_eth_bond_8023ad_agg_selection_set(port_id, - agg_mode); + rte_eth_bond_8023ad_agg_selection_set(port_id, + agg_mode); } /* Parse/add slave ports to bonded device */ @@ -3248,23 +3452,23 @@ bond_ethdev_configure(struct rte_eth_dev *dev) memset(&slave_ports, 0, sizeof(slave_ports)); if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG, - &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) { - RTE_LOG(ERR, EAL, - "Failed to parse slave ports for bonded device %s\n", - name); + &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) { + RTE_BOND_LOG(ERR, + "Failed to parse slave ports for bonded device %s", + name); return -1; } for (i = 0; i < slave_ports.slave_count; i++) { if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) { - RTE_LOG(ERR, EAL, - "Failed to add port %d as slave to bonded device %s\n", - slave_ports.slaves[i], name); + RTE_BOND_LOG(ERR, + "Failed to add port %d as slave to bonded device %s", + slave_ports.slaves[i], name); } } } else { - RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name); + RTE_BOND_LOG(INFO, "No slaves specified for bonded device %s", name); return -1; } @@ -3274,27 +3478,27 @@ bond_ethdev_configure(struct rte_eth_dev *dev) uint16_t primary_slave_port_id; if (rte_kvargs_process(kvlist, - PMD_BOND_PRIMARY_SLAVE_KVARG, - &bond_ethdev_parse_primary_slave_port_id_kvarg, - &primary_slave_port_id) < 0) { - RTE_LOG(INFO, EAL, - "Invalid primary slave port id specified for bonded device" - " %s\n", name); + PMD_BOND_PRIMARY_SLAVE_KVARG, + &bond_ethdev_parse_primary_slave_port_id_kvarg, + &primary_slave_port_id) < 0) { + RTE_BOND_LOG(INFO, + "Invalid primary slave port id specified for bonded device %s", + name); return -1; } /* Set balance mode transmit policy*/ if (rte_eth_bond_primary_set(port_id, primary_slave_port_id) - != 0) { - RTE_LOG(ERR, EAL, - "Failed to set primary slave port %d on bonded device %s\n", - primary_slave_port_id, name); + != 0) { + RTE_BOND_LOG(ERR, + "Failed to set primary slave port %d on bonded device %s", + primary_slave_port_id, name); return -1; } } else if (arg_count > 1) { - RTE_LOG(INFO, EAL, - "Primary slave can be specified only once for bonded device" - " %s\n", name); + RTE_BOND_LOG(INFO, + "Primary slave can be specified only once for bonded device %s", + name); return -1; } @@ -3304,26 +3508,26 @@ bond_ethdev_configure(struct rte_eth_dev *dev) uint32_t lsc_poll_interval_ms; if (rte_kvargs_process(kvlist, - PMD_BOND_LSC_POLL_PERIOD_KVARG, - &bond_ethdev_parse_time_ms_kvarg, - &lsc_poll_interval_ms) < 0) { - RTE_LOG(INFO, EAL, - "Invalid lsc polling interval value specified for bonded" - " device %s\n", name); + PMD_BOND_LSC_POLL_PERIOD_KVARG, + &bond_ethdev_parse_time_ms_kvarg, + &lsc_poll_interval_ms) < 0) { + RTE_BOND_LOG(INFO, + "Invalid lsc polling interval value specified for bonded" + " device %s", name); return -1; } if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms) - != 0) { - RTE_LOG(ERR, EAL, - "Failed to set lsc monitor polling interval (%u ms) on" - " bonded device %s\n", lsc_poll_interval_ms, name); + != 0) { + RTE_BOND_LOG(ERR, + "Failed to set lsc monitor polling interval (%u ms) on bonded device %s", + lsc_poll_interval_ms, name); return -1; } } else if (arg_count > 1) { - RTE_LOG(INFO, EAL, - "LSC polling interval can be specified only once for bonded" - " device %s\n", name); + RTE_BOND_LOG(INFO, + "LSC polling interval can be specified only once for bonded" + " device %s", name); return -1; } @@ -3333,27 +3537,27 @@ bond_ethdev_configure(struct rte_eth_dev *dev) uint32_t link_up_delay_ms; if (rte_kvargs_process(kvlist, - PMD_BOND_LINK_UP_PROP_DELAY_KVARG, - &bond_ethdev_parse_time_ms_kvarg, - &link_up_delay_ms) < 0) { - RTE_LOG(INFO, EAL, - "Invalid link up propagation delay value specified for" - " bonded device %s\n", name); + PMD_BOND_LINK_UP_PROP_DELAY_KVARG, + &bond_ethdev_parse_time_ms_kvarg, + &link_up_delay_ms) < 0) { + RTE_BOND_LOG(INFO, + "Invalid link up propagation delay value specified for" + " bonded device %s", name); return -1; } /* Set balance mode transmit policy*/ if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms) - != 0) { - RTE_LOG(ERR, EAL, - "Failed to set link up propagation delay (%u ms) on bonded" - " device %s\n", link_up_delay_ms, name); + != 0) { + RTE_BOND_LOG(ERR, + "Failed to set link up propagation delay (%u ms) on bonded" + " device %s", link_up_delay_ms, name); return -1; } } else if (arg_count > 1) { - RTE_LOG(INFO, EAL, - "Link up propagation delay can be specified only once for" - " bonded device %s\n", name); + RTE_BOND_LOG(INFO, + "Link up propagation delay can be specified only once for" + " bonded device %s", name); return -1; } @@ -3363,27 +3567,27 @@ bond_ethdev_configure(struct rte_eth_dev *dev) uint32_t link_down_delay_ms; if (rte_kvargs_process(kvlist, - PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG, - &bond_ethdev_parse_time_ms_kvarg, - &link_down_delay_ms) < 0) { - RTE_LOG(INFO, EAL, - "Invalid link down propagation delay value specified for" - " bonded device %s\n", name); + PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG, + &bond_ethdev_parse_time_ms_kvarg, + &link_down_delay_ms) < 0) { + RTE_BOND_LOG(INFO, + "Invalid link down propagation delay value specified for" + " bonded device %s", name); return -1; } /* Set balance mode transmit policy*/ if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms) - != 0) { - RTE_LOG(ERR, EAL, - "Failed to set link down propagation delay (%u ms) on" - " bonded device %s\n", link_down_delay_ms, name); + != 0) { + RTE_BOND_LOG(ERR, + "Failed to set link down propagation delay (%u ms) on bonded device %s", + link_down_delay_ms, name); return -1; } } else if (arg_count > 1) { - RTE_LOG(INFO, EAL, - "Link down propagation delay can be specified only once for" - " bonded device %s\n", name); + RTE_BOND_LOG(INFO, + "Link down propagation delay can be specified only once for bonded device %s", + name); return -1; } @@ -3409,3 +3613,12 @@ RTE_PMD_REGISTER_PARAM_STRING(net_bonding, "lsc_poll_period_ms= " "up_delay= " "down_delay="); + +int bond_logtype; + +RTE_INIT(bond_init_log) +{ + bond_logtype = rte_log_register("pmd.net.bon"); + if (bond_logtype >= 0) + rte_log_set_level(bond_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h index 92e15f8c..43e0e448 100644 --- a/drivers/net/bonding/rte_eth_bond_private.h +++ b/drivers/net/bonding/rte_eth_bond_private.h @@ -5,9 +5,12 @@ #ifndef _RTE_ETH_BOND_PRIVATE_H_ #define _RTE_ETH_BOND_PRIVATE_H_ +#include + #include #include #include +#include #include "rte_eth_bond.h" #include "rte_eth_bond_8023ad_private.h" @@ -28,8 +31,11 @@ #define PMD_BOND_XMIT_POLICY_LAYER23_KVARG ("l23") #define PMD_BOND_XMIT_POLICY_LAYER34_KVARG ("l34") +extern int bond_logtype; + #define RTE_BOND_LOG(lvl, msg, ...) \ - RTE_LOG(lvl, PMD, "%s(%d) - " msg "\n", __func__, __LINE__, ##__VA_ARGS__) + rte_log(RTE_LOG_ ## lvl, bond_logtype, \ + "%s(%d) - " msg "\n", __func__, __LINE__, ##__VA_ARGS__) #define BONDING_MODE_INVALID 0xFF @@ -37,6 +43,8 @@ extern const char *pmd_bond_init_valid_arguments[]; extern struct rte_vdev_driver pmd_bond_drv; +extern const struct rte_flow_ops bond_flow_ops; + /** Port Queue Mapping Structure */ struct bond_rx_queue { uint16_t queue_id; @@ -80,6 +88,14 @@ struct bond_slave_details { uint16_t reta_size; }; +struct rte_flow { + TAILQ_ENTRY(rte_flow) next; + /* Slaves flows */ + struct rte_flow *flows[RTE_MAX_ETHPORTS]; + /* Flow description for synchronization */ + struct rte_flow_desc *fd; +}; + typedef void (*burst_xmit_hash_t)(struct rte_mbuf **buf, uint16_t nb_pkts, uint8_t slave_count, uint16_t *slaves); @@ -89,6 +105,7 @@ struct bond_dev_private { uint8_t mode; /**< Link Bonding Mode */ rte_spinlock_t lock; + rte_spinlock_t lsc_lock; uint16_t primary_port; /**< Primary Slave Port */ uint16_t current_primary_port; /**< Primary Slave Port */ @@ -128,8 +145,17 @@ struct bond_dev_private { /**< TLB active slaves send order */ struct mode_alb_private mode6; - uint32_t rx_offload_capa; /** Rx offload capability */ - uint32_t tx_offload_capa; /** Tx offload capability */ + uint64_t rx_offload_capa; /** Rx offload capability */ + uint64_t tx_offload_capa; /** Tx offload capability */ + uint64_t rx_queue_offload_capa; /** per queue Rx offload capability */ + uint64_t tx_queue_offload_capa; /** per queue Tx offload capability */ + + /**< List of the configured flows */ + TAILQ_HEAD(sub_flows, rte_flow) flow_list; + + /**< Flow isolation state */ + int flow_isolated; + int flow_isolated_valid; /** Bit mask of RSS offloads, the bit offset also means flow type */ uint64_t flow_type_rss_offloads; @@ -204,6 +230,14 @@ mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr); int mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev); +int +slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev, + uint16_t slave_port_id); + +int +slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev, + uint16_t slave_port_id); + int bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode); diff --git a/drivers/net/bonding/rte_pmd_bond_version.map b/drivers/net/bonding/rte_pmd_bond_version.map index ec3374b0..03ddb44e 100644 --- a/drivers/net/bonding/rte_pmd_bond_version.map +++ b/drivers/net/bonding/rte_pmd_bond_version.map @@ -1,6 +1,7 @@ DPDK_2.0 { global: + rte_eth_bond_8023ad_slave_info; rte_eth_bond_active_slaves_get; rte_eth_bond_create; rte_eth_bond_link_monitoring_set; diff --git a/drivers/net/cxgbe/Makefile b/drivers/net/cxgbe/Makefile index 65df1425..5d66c4b3 100644 --- a/drivers/net/cxgbe/Makefile +++ b/drivers/net/cxgbe/Makefile @@ -1,33 +1,6 @@ -# BSD LICENSE -# -# Copyright(c) 2014-2015 Chelsio Communications. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Chelsio Communications nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2014-2018 Chelsio Communications. +# All rights reserved. include $(RTE_SDK)/mk/rte.vars.mk @@ -45,12 +18,6 @@ EXPORT_MAP := rte_pmd_cxgbe_version.map LIBABIVER := 1 -ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) -# -# CFLAGS for icc -# -CFLAGS_BASE_DRIVER = -wd188 -else # # CFLAGS for gcc/clang # @@ -59,9 +26,7 @@ ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1) CFLAGS += -Wno-deprecated endif endif -CFLAGS_BASE_DRIVER = -endif LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs LDLIBS += -lrte_bus_pci @@ -80,8 +45,14 @@ VPATH += $(SRCDIR)/base # all source are stored in SRCS-y # SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_main.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_main.c SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += sge.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_filter.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_flow.c SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4_hw.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += clip_tbl.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4vf_hw.c include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h index f2057af1..e98dd218 100644 --- a/drivers/net/cxgbe/base/adapter.h +++ b/drivers/net/cxgbe/base/adapter.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ /* This file should not be included directly. Include common.h instead. */ @@ -39,12 +11,16 @@ #include #include #include +#include +#include #include "cxgbe_compat.h" #include "t4_regs_values.h" +#include "cxgbe_ofld.h" enum { MAX_ETH_QSETS = 64, /* # of Ethernet Tx/Rx queue sets */ + MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ }; struct adapter; @@ -68,6 +44,7 @@ struct port_info { u8 port_type; /* firmware port type */ u8 mod_type; /* firmware module type */ u8 port_id; /* physical port ID */ + u8 pidx; /* port index for this PF */ u8 tx_chan; /* associated channel */ u8 n_rx_qsets; /* # of rx qsets */ @@ -77,6 +54,7 @@ struct port_info { u16 *rss; /* rss table */ u8 rss_mode; /* rss mode */ u16 rss_size; /* size of VI's RSS table slice */ + u64 rss_hf; /* RSS Hash Function */ }; /* Enable or disable autonegotiation. If this is set to enable, @@ -196,6 +174,7 @@ struct sge_eth_rxq { /* a SW Ethernet Rx queue */ * scenario where a packet needs 32 bytes. */ #define ETH_COALESCE_PKT_NUM 15 +#define ETH_COALESCE_VF_PKT_NUM 7 #define ETH_COALESCE_PKT_PER_DESC 2 struct tx_eth_coal_desc { @@ -225,6 +204,10 @@ struct eth_coalesce { unsigned int len; unsigned int flits; unsigned int max; + __u8 ethmacdst[ETHER_ADDR_LEN]; + __u8 ethmacsrc[ETHER_ADDR_LEN]; + __be16 ethtype; + __be16 vlantci; }; struct sge_txq { @@ -247,6 +230,7 @@ struct sge_txq { unsigned int equeidx; /* last sent credit request */ unsigned int last_pidx; /* last pidx recorded by tx monitor */ unsigned int last_coal_idx;/* last coal-idx recorded by tx monitor */ + unsigned int abs_id; int db_disabled; /* doorbell state */ unsigned short db_pidx; /* doorbell producer index */ @@ -267,16 +251,27 @@ struct sge_eth_tx_stats { /* Ethernet tx queue statistics */ struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ struct sge_txq q; struct rte_eth_dev *eth_dev; /* port that this queue belongs to */ + struct rte_eth_dev_data *data; struct sge_eth_tx_stats stats; /* queue statistics */ rte_spinlock_t txq_lock; unsigned int flags; /* flags for state of the queue */ } __rte_cache_aligned; +struct sge_ctrl_txq { /* State for an SGE control Tx queue */ + struct sge_txq q; /* txq */ + struct adapter *adapter; /* adapter associated with this queue */ + rte_spinlock_t ctrlq_lock; /* control queue lock */ + u8 full; /* the Tx ring is full */ + u64 txp; /* number of transmits */ + struct rte_mempool *mb_pool; /* mempool to generate ctrl pkts */ +} __rte_cache_aligned; + struct sge { struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; struct sge_rspq fw_evtq __rte_cache_aligned; + struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; u16 max_ethqsets; /* # of available Ethernet queue sets */ u32 stat_len; /* length of status page at ring end */ @@ -308,7 +303,7 @@ struct adapter { struct rte_pci_device *pdev; /* associated rte pci device */ struct rte_eth_dev *eth_dev; /* first port's rte eth device */ struct adapter_params params; /* adapter parameters */ - struct port_info port[MAX_NPORTS]; /* ports belonging to this adapter */ + struct port_info *port[MAX_NPORTS];/* ports belonging to this adapter */ struct sge sge; /* associated SGE */ /* support for single-threading access to adapter mailbox registers */ @@ -325,8 +320,76 @@ struct adapter { unsigned int vpd_flag; int use_unpacked_mode; /* unpacked rx mode state */ + rte_spinlock_t win0_lock; + + unsigned int clipt_start; /* CLIP table start */ + unsigned int clipt_end; /* CLIP table end */ + struct clip_tbl *clipt; /* CLIP table */ + + struct tid_info tids; /* Info used to access TID related tables */ }; +/** + * t4_os_rwlock_init - initialize rwlock + * @lock: the rwlock + */ +static inline void t4_os_rwlock_init(rte_rwlock_t *lock) +{ + rte_rwlock_init(lock); +} + +/** + * t4_os_write_lock - get a write lock + * @lock: the rwlock + */ +static inline void t4_os_write_lock(rte_rwlock_t *lock) +{ + rte_rwlock_write_lock(lock); +} + +/** + * t4_os_write_unlock - unlock a write lock + * @lock: the rwlock + */ +static inline void t4_os_write_unlock(rte_rwlock_t *lock) +{ + rte_rwlock_write_unlock(lock); +} + +/** + * ethdev2pinfo - return the port_info structure associated with a rte_eth_dev + * @dev: the rte_eth_dev + * + * Return the struct port_info associated with a rte_eth_dev + */ +static inline struct port_info *ethdev2pinfo(const struct rte_eth_dev *dev) +{ + return (struct port_info *)dev->data->dev_private; +} + +/** + * adap2pinfo - return the port_info of a port + * @adap: the adapter + * @idx: the port index + * + * Return the port_info structure for the port of the given index. + */ +static inline struct port_info *adap2pinfo(const struct adapter *adap, int idx) +{ + return adap->port[idx]; +} + +/** + * ethdev2adap - return the adapter structure associated with a rte_eth_dev + * @dev: the rte_eth_dev + * + * Return the struct adapter associated with a rte_eth_dev + */ +static inline struct adapter *ethdev2adap(const struct rte_eth_dev *dev) +{ + return ethdev2pinfo(dev)->adapter; +} + #define CXGBE_PCI_REG(reg) rte_read32(reg) static inline uint64_t cxgbe_read_addr64(volatile void *addr) @@ -602,7 +665,7 @@ static inline int t4_os_find_pci_capability(struct adapter *adapter, int cap) static inline void t4_os_set_hw_addr(struct adapter *adapter, int port_idx, u8 hw_addr[]) { - struct port_info *pi = &adapter->port[port_idx]; + struct port_info *pi = adap2pinfo(adapter, port_idx); ether_addr_copy((struct ether_addr *)hw_addr, &pi->eth_dev->data->mac_addrs[0]); @@ -688,15 +751,35 @@ static inline void t4_os_atomic_list_del(struct mbox_entry *entry, } /** - * adap2pinfo - return the port_info of a port - * @adap: the adapter - * @idx: the port index + * t4_init_completion - initialize completion + * @c: the completion context + */ +static inline void t4_init_completion(struct t4_completion *c) +{ + c->done = 0; + t4_os_lock_init(&c->lock); +} + +/** + * t4_complete - set completion as done + * @c: the completion context + */ +static inline void t4_complete(struct t4_completion *c) +{ + t4_os_lock(&c->lock); + c->done = 1; + t4_os_unlock(&c->lock); +} + +/** + * cxgbe_port_viid - get the VI id of a port + * @dev: the device for the port * - * Return the port_info structure for the port of the given index. + * Return the VI id of the given port. */ -static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) +static inline unsigned int cxgbe_port_viid(const struct rte_eth_dev *dev) { - return &adap->port[idx]; + return ethdev2pinfo(dev)->viid; } void *t4_alloc_mem(size_t size); @@ -713,12 +796,17 @@ void t4_sge_tx_monitor_start(struct adapter *adap); void t4_sge_tx_monitor_stop(struct adapter *adap); int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf, uint16_t nb_pkts); +int t4_mgmt_tx(struct sge_ctrl_txq *txq, struct rte_mbuf *mbuf); int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, const struct pkt_gl *gl); int t4_sge_init(struct adapter *adap); +int t4vf_sge_init(struct adapter *adap); int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, struct rte_eth_dev *eth_dev, uint16_t queue_id, unsigned int iqid, int socket_id); +int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, + struct rte_eth_dev *eth_dev, uint16_t queue_id, + unsigned int iqid, int socket_id); int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *rspq, bool fwevtq, struct rte_eth_dev *eth_dev, int intr_idx, struct sge_fl *fl, rspq_handler_t handler, @@ -735,6 +823,7 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, unsigned int cnt); int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts, unsigned int budget, unsigned int *work_done); -int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); +int cxgbe_write_rss(const struct port_info *pi, const u16 *queues); +int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t flags); #endif /* __T4_ADAPTER_H__ */ diff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h index 1eda57d0..157201da 100644 --- a/drivers/net/cxgbe/base/common.h +++ b/drivers/net/cxgbe/base/common.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #ifndef __CHELSIO_COMMON_H @@ -36,6 +8,7 @@ #include "cxgbe_compat.h" #include "t4_hw.h" +#include "t4vf_hw.h" #include "t4_chip_type.h" #include "t4fw_interface.h" @@ -45,6 +18,9 @@ extern "C" { #define CXGBE_PAGE_SIZE RTE_PGSIZE_4K +#define T4_MEMORY_WRITE 0 +#define T4_MEMORY_READ 1 + enum { MAX_NPORTS = 4, /* max # of ports */ }; @@ -62,18 +38,20 @@ enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST }; enum dev_state { DEV_STATE_UNINIT, DEV_STATE_INIT, DEV_STATE_ERR }; -enum { +enum cc_pause { PAUSE_RX = 1 << 0, PAUSE_TX = 1 << 1, PAUSE_AUTONEG = 1 << 2 }; -enum { - FEC_RS = 1 << 0, - FEC_BASER_RS = 1 << 1, - FEC_RESERVED = 1 << 2, +enum cc_fec { + FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */ + FEC_RS = 1 << 1, /* Reed-Solomon */ + FEC_BASER_RS = 1 << 2, /* BaseR/Reed-Solomon */ }; +enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 }; + struct port_stats { u64 tx_octets; /* total # of octets in good frames */ u64 tx_frames; /* all good frames */ @@ -178,6 +156,9 @@ struct tp_params { int vnic_shift; int port_shift; int protocol_shift; + int ethertype_shift; + + u64 hash_filter_mask; }; struct vpd_params { @@ -209,12 +190,59 @@ struct arch_specific_params { u16 mps_tcam_size; }; +/* + * Global Receive Side Scaling (RSS) parameters in host-native format. + */ +struct rss_params { + unsigned int mode; /* RSS mode */ + union { + struct { + uint synmapen:1; /* SYN Map Enable */ + uint syn4tupenipv6:1; /* en 4-tuple IPv6 SYNs hash */ + uint syn2tupenipv6:1; /* en 2-tuple IPv6 SYNs hash */ + uint syn4tupenipv4:1; /* en 4-tuple IPv4 SYNs hash */ + uint syn2tupenipv4:1; /* en 2-tuple IPv4 SYNs hash */ + uint ofdmapen:1; /* Offload Map Enable */ + uint tnlmapen:1; /* Tunnel Map Enable */ + uint tnlalllookup:1; /* Tunnel All Lookup */ + uint hashtoeplitz:1; /* use Toeplitz hash */ + } basicvirtual; + } u; +}; + +/* + * Maximum resources provisioned for a PCI PF. + */ +struct pf_resources { + unsigned int neq; /* N egress Qs */ + unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */ +}; + +/* + * Maximum resources provisioned for a PCI VF. + */ +struct vf_resources { + unsigned int nvi; /* N virtual interfaces */ + unsigned int neq; /* N egress Qs */ + unsigned int nethctrl; /* N egress ETH or CTRL Qs */ + unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */ + unsigned int niq; /* N ingress Qs */ + unsigned int tc; /* PCI-E traffic class */ + unsigned int pmask; /* port access rights mask */ + unsigned int nexactf; /* N exact MPS filters */ + unsigned int r_caps; /* read capabilities */ + unsigned int wx_caps; /* write/execute capabilities */ +}; + struct adapter_params { struct sge_params sge; struct tp_params tp; struct vpd_params vpd; struct pci_params pci; struct devlog_params devlog; + struct rss_params rss; + struct pf_resources pfres; + struct vf_resources vfres; enum pcie_memwin drv_memwin; unsigned int sf_size; /* serial flash size in bytes */ @@ -235,23 +263,46 @@ struct adapter_params { unsigned char nports; /* # of ethernet ports */ unsigned char portvec; + unsigned char hash_filter; + enum chip_type chip; /* chip code */ struct arch_specific_params arch; /* chip specific params */ bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ + u8 fw_caps_support; /* 32-bit Port Capabilities */ +}; + +/* Firmware Port Capabilities types. + */ +typedef u16 fw_port_cap16_t; /* 16-bit Port Capabilities integral value */ +typedef u32 fw_port_cap32_t; /* 32-bit Port Capabilities integral value */ + +enum fw_caps { + FW_CAPS_UNKNOWN = 0, /* 0'ed out initial state */ + FW_CAPS16 = 1, /* old Firmware: 16-bit Port Capabilities */ + FW_CAPS32 = 2, /* new Firmware: 32-bit Port Capabilities */ }; struct link_config { - unsigned short supported; /* link capabilities */ - unsigned short advertising; /* advertised capabilities */ - unsigned int requested_speed; /* speed user has requested */ - unsigned int speed; /* actual link speed */ - unsigned char requested_fc; /* flow control user has requested */ - unsigned char fc; /* actual link flow control */ - unsigned char requested_fec; /* Forward Error Correction user */ - unsigned char fec; /* has requested and actual FEC */ - unsigned char autoneg; /* autonegotiating? */ - unsigned char link_ok; /* link up? */ + fw_port_cap32_t pcaps; /* link capabilities */ + fw_port_cap32_t acaps; /* advertised capabilities */ + + u32 requested_speed; /* speed (Mb/s) user has requested */ + u32 speed; /* actual link speed (Mb/s) */ + + enum cc_pause requested_fc; /* flow control user has requested */ + enum cc_pause fc; /* actual link flow control */ + + enum cc_fec auto_fec; /* Forward Error Correction + * "automatic" (IEEE 802.3) + */ + enum cc_fec requested_fec; /* Forward Error Correction requested */ + enum cc_fec fec; /* Forward Error Correction actual */ + + unsigned char autoneg; /* autonegotiating? */ + + unsigned char link_ok; /* link up? */ + unsigned char link_down_rc; /* link down reason */ }; #include "adapter.h" @@ -269,9 +320,19 @@ static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, delay, NULL); } +static inline int is_pf4(struct adapter *adap) +{ + return adap->pf == 4; +} + #define for_each_port(adapter, iter) \ for (iter = 0; iter < (adapter)->params.nports; ++iter) +static inline int is_hashfilter(const struct adapter *adap) +{ + return adap->params.hash_filter; +} + void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, unsigned int mask, unsigned int val); @@ -285,9 +346,12 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, enum dev_master master, enum dev_state *state); int t4_fw_bye(struct adapter *adap, unsigned int mbox); int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); +int t4vf_fw_reset(struct adapter *adap); int t4_fw_halt(struct adapter *adap, unsigned int mbox, int reset); int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset); int t4_fl_pkt_align(struct adapter *adap); +int t4vf_fl_pkt_align(struct adapter *adap, u32 sge_control, u32 sge_control2); +int t4vf_get_vfres(struct adapter *adap); int t4_fixup_host_params_compat(struct adapter *adap, unsigned int page_size, unsigned int cache_line_size, enum chip_type chip_compat); @@ -297,6 +361,13 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox); int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, u32 *val); +int t4vf_query_params(struct adapter *adap, unsigned int nparams, + const u32 *params, u32 *vals); +int t4vf_get_dev_params(struct adapter *adap); +int t4vf_get_vpd_params(struct adapter *adap); +int t4vf_get_rss_glb_config(struct adapter *adap); +int t4vf_set_params(struct adapter *adapter, unsigned int nparams, + const u32 *params, const u32 *vals); int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, @@ -331,6 +402,8 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int fl0id, unsigned int fl1id); int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); +int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); static inline unsigned int core_ticks_per_usec(const struct adapter *adap) { @@ -379,6 +452,21 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); } +int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool); + +static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd, + int size, void *rpl) +{ + return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true); +} + +static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, + int size, void *rpl) +{ + return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false); +} + + void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, u32 *vals, unsigned int nregs, unsigned int start_idx); @@ -387,6 +475,7 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int nregs, unsigned int start_idx); int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p); +int t4_get_pfres(struct adapter *adapter); int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented); int t4_flash_cfg_addr(struct adapter *adapter); @@ -394,22 +483,34 @@ unsigned int t4_get_mps_bg_map(struct adapter *adapter, unsigned int pidx); unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx); const char *t4_get_port_type_description(enum fw_port_type port_type); void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); +void t4vf_get_port_stats(struct adapter *adapter, int pidx, + struct port_stats *p); void t4_get_port_stats_offset(struct adapter *adap, int idx, struct port_stats *stats, struct port_stats *offset); void t4_clr_port_stats(struct adapter *adap, int idx); +void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps, + fw_port_cap32_t acaps); void t4_reset_link_config(struct adapter *adap, int idx); int t4_get_version_info(struct adapter *adapter); void t4_dump_version_info(struct adapter *adapter); int t4_get_flash_params(struct adapter *adapter); int t4_get_chip_type(struct adapter *adap, int ver); int t4_prep_adapter(struct adapter *adapter); +int t4vf_prep_adapter(struct adapter *adapter); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); +int t4vf_port_init(struct adapter *adap); int t4_init_rss_mode(struct adapter *adap, int mbox); int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, int start, int n, const u16 *rspq, unsigned int nrspq); int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, unsigned int flags, unsigned int defq); +int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, + u64 *flags, unsigned int *defq); +void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs, + unsigned int start_index, unsigned int rw); +void t4_write_rss_key(struct adapter *adap, u32 *key, int idx); +void t4_read_rss_key(struct adapter *adap, u32 *key); enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, @@ -421,8 +522,20 @@ int t4_init_tp_params(struct adapter *adap); int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel); int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); unsigned int t4_get_regs_len(struct adapter *adap); +unsigned int t4vf_get_pf_from_vf(struct adapter *adap); void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size); int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data); int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data); int t4_seeprom_wp(struct adapter *adapter, int enable); +int t4_memory_rw_addr(struct adapter *adap, int win, + u32 addr, u32 len, void *hbuf, int dir); +int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr, + u32 len, void *hbuf, int dir); +static inline int t4_memory_rw(struct adapter *adap, int win, + int mtype, u32 maddr, u32 len, + void *hbuf, int dir) +{ + return t4_memory_rw_mtype(adap, win, mtype, maddr, len, hbuf, dir); +} +fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16); #endif /* __CHELSIO_COMMON_H */ diff --git a/drivers/net/cxgbe/base/t4_chip_type.h b/drivers/net/cxgbe/base/t4_chip_type.h index cd7a9282..c0c5d0b2 100644 --- a/drivers/net/cxgbe/base/t4_chip_type.h +++ b/drivers/net/cxgbe/base/t4_chip_type.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #ifndef __T4_CHIP_TYPE_H__ diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c index 56f38c83..31762c9c 100644 --- a/drivers/net/cxgbe/base/t4_hw.c +++ b/drivers/net/cxgbe/base/t4_hw.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #include @@ -55,9 +27,6 @@ #include "t4_regs_values.h" #include "t4fw_interface.h" -static void init_link_config(struct link_config *lc, unsigned int pcaps, - unsigned int acaps); - /** * t4_read_mtu_tbl - returns the values in the HW path MTU table * @adap: the adapter @@ -2166,6 +2135,91 @@ int t4_seeprom_wp(struct adapter *adapter, int enable) return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); } +/** + * t4_fw_tp_pio_rw - Access TP PIO through LDST + * @adap: the adapter + * @vals: where the indirect register values are stored/written + * @nregs: how many indirect registers to read/write + * @start_idx: index of first indirect register to read/write + * @rw: Read (1) or Write (0) + * + * Access TP PIO registers through LDST + */ +void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs, + unsigned int start_index, unsigned int rw) +{ + int cmd = FW_LDST_ADDRSPC_TP_PIO; + struct fw_ldst_cmd c; + unsigned int i; + int ret; + + for (i = 0 ; i < nregs; i++) { + memset(&c, 0, sizeof(c)); + c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | + F_FW_CMD_REQUEST | + (rw ? F_FW_CMD_READ : + F_FW_CMD_WRITE) | + V_FW_LDST_CMD_ADDRSPACE(cmd)); + c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); + + c.u.addrval.addr = cpu_to_be32(start_index + i); + c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]); + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + if (ret == 0) { + if (rw) + vals[i] = be32_to_cpu(c.u.addrval.val); + } + } +} + +/** + * t4_read_rss_key - read the global RSS key + * @adap: the adapter + * @key: 10-entry array holding the 320-bit RSS key + * + * Reads the global 320-bit RSS key. + */ +void t4_read_rss_key(struct adapter *adap, u32 *key) +{ + t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1); +} + +/** + * t4_write_rss_key - program one of the RSS keys + * @adap: the adapter + * @key: 10-entry array holding the 320-bit RSS key + * @idx: which RSS key to write + * + * Writes one of the RSS keys with the given 320-bit value. If @idx is + * 0..15 the corresponding entry in the RSS key table is written, + * otherwise the global RSS key is written. + */ +void t4_write_rss_key(struct adapter *adap, u32 *key, int idx) +{ + u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT); + u8 rss_key_addr_cnt = 16; + + /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble), + * allows access to key addresses 16-63 by using KeyWrAddrX + * as index[5:4](upper 2) into key table + */ + if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) && + (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3)) + rss_key_addr_cnt = 32; + + t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0); + + if (idx >= 0 && idx < rss_key_addr_cnt) { + if (rss_key_addr_cnt > 16) + t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, + V_KEYWRADDRX(idx >> 4) | + V_T6_VFWRADDR(idx) | F_KEYWREN); + else + t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, + V_KEYWRADDR(idx) | F_KEYWREN); + } +} + /** * t4_config_rss_range - configure a portion of the RSS mapping table * @adapter: the adapter @@ -2257,7 +2311,11 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, * Send this portion of the RRS table update to the firmware; * bail out on any errors. */ - ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); + if (is_pf4(adapter)) + ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), + NULL); + else + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); if (ret) return ret; } @@ -2287,7 +2345,44 @@ int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags | V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); - return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); + if (is_pf4(adapter)) + return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); + else + return t4vf_wr_mbox(adapter, &c, sizeof(c), NULL); +} + +/** + * t4_read_config_vi_rss - read the configured per VI RSS settings + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @viid: the VI id + * @flags: where to place the configured flags + * @defq: where to place the id of the default RSS queue for the VI. + * + * Read configured VI-specific RSS properties. + */ +int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, + u64 *flags, unsigned int *defq) +{ + struct fw_rss_vi_config_cmd c; + unsigned int result; + int ret; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ | + V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); + c.retval_len16 = cpu_to_be32(FW_LEN16(c)); + ret = t4_wr_mbox(adapter, mbox, &c, sizeof(c), &c); + if (!ret) { + result = be32_to_cpu(c.u.basicvirtual.defaultq_to_udpen); + if (defq) + *defq = G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(result); + if (flags) + *flags = result & M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ; + } + + return ret; } /** @@ -2385,6 +2480,46 @@ int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p) return 0; } +/** + * t4_get_pfres - retrieve VF resource limits + * @adapter: the adapter + * + * Retrieves configured resource limits and capabilities for a physical + * function. The results are stored in @adapter->pfres. + */ +int t4_get_pfres(struct adapter *adapter) +{ + struct pf_resources *pfres = &adapter->params.pfres; + struct fw_pfvf_cmd cmd, rpl; + u32 word; + int v; + + /* + * Execute PFVF Read command to get VF resource limits; bail out early + * with error on command failure. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ | + V_FW_PFVF_CMD_PFN(adapter->pf) | + V_FW_PFVF_CMD_VFN(0)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl); + if (v != FW_SUCCESS) + return v; + + /* + * Extract PF resource limits and return success. + */ + word = be32_to_cpu(rpl.niqflint_niq); + pfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word); + + word = be32_to_cpu(rpl.type_to_neq); + pfres->neq = G_FW_PFVF_CMD_NEQ(word); + return 0; +} + /* serial flash and firmware constants and flash config file constants */ enum { SF_ATTEMPTS = 10, /* max retries for SF operations */ @@ -2670,14 +2805,142 @@ void t4_dump_version_info(struct adapter *adapter) G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers)); } -#define ADVERT_MASK (V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED) | \ - FW_PORT_CAP_ANEG) +#define ADVERT_MASK (V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED) | \ + FW_PORT_CAP32_ANEG) +/** + * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits + * @caps16: a 16-bit Port Capabilities value + * + * Returns the equivalent 32-bit Port Capabilities value. + */ +fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) +{ + fw_port_cap32_t caps32 = 0; + +#define CAP16_TO_CAP32(__cap) \ + do { \ + if (caps16 & FW_PORT_CAP_##__cap) \ + caps32 |= FW_PORT_CAP32_##__cap; \ + } while (0) + + CAP16_TO_CAP32(SPEED_100M); + CAP16_TO_CAP32(SPEED_1G); + CAP16_TO_CAP32(SPEED_25G); + CAP16_TO_CAP32(SPEED_10G); + CAP16_TO_CAP32(SPEED_40G); + CAP16_TO_CAP32(SPEED_100G); + CAP16_TO_CAP32(FC_RX); + CAP16_TO_CAP32(FC_TX); + CAP16_TO_CAP32(ANEG); + CAP16_TO_CAP32(MDIX); + CAP16_TO_CAP32(MDIAUTO); + CAP16_TO_CAP32(FEC_RS); + CAP16_TO_CAP32(FEC_BASER_RS); + CAP16_TO_CAP32(802_3_PAUSE); + CAP16_TO_CAP32(802_3_ASM_DIR); + +#undef CAP16_TO_CAP32 + + return caps32; +} + +/** + * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits + * @caps32: a 32-bit Port Capabilities value + * + * Returns the equivalent 16-bit Port Capabilities value. Note that + * not all 32-bit Port Capabilities can be represented in the 16-bit + * Port Capabilities and some fields/values may not make it. + */ +static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32) +{ + fw_port_cap16_t caps16 = 0; + +#define CAP32_TO_CAP16(__cap) \ + do { \ + if (caps32 & FW_PORT_CAP32_##__cap) \ + caps16 |= FW_PORT_CAP_##__cap; \ + } while (0) + + CAP32_TO_CAP16(SPEED_100M); + CAP32_TO_CAP16(SPEED_1G); + CAP32_TO_CAP16(SPEED_10G); + CAP32_TO_CAP16(SPEED_25G); + CAP32_TO_CAP16(SPEED_40G); + CAP32_TO_CAP16(SPEED_100G); + CAP32_TO_CAP16(FC_RX); + CAP32_TO_CAP16(FC_TX); + CAP32_TO_CAP16(802_3_PAUSE); + CAP32_TO_CAP16(802_3_ASM_DIR); + CAP32_TO_CAP16(ANEG); + CAP32_TO_CAP16(MDIX); + CAP32_TO_CAP16(MDIAUTO); + CAP32_TO_CAP16(FEC_RS); + CAP32_TO_CAP16(FEC_BASER_RS); + +#undef CAP32_TO_CAP16 + + return caps16; +} + +/* Translate Firmware Pause specification to Common Code */ +static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause) +{ + enum cc_pause cc_pause = 0; + + if (fw_pause & FW_PORT_CAP32_FC_RX) + cc_pause |= PAUSE_RX; + if (fw_pause & FW_PORT_CAP32_FC_TX) + cc_pause |= PAUSE_TX; + + return cc_pause; +} + +/* Translate Common Code Pause Frame specification into Firmware */ +static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause) +{ + fw_port_cap32_t fw_pause = 0; + + if (cc_pause & PAUSE_RX) + fw_pause |= FW_PORT_CAP32_FC_RX; + if (cc_pause & PAUSE_TX) + fw_pause |= FW_PORT_CAP32_FC_TX; + + return fw_pause; +} + +/* Translate Firmware Forward Error Correction specification to Common Code */ +static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec) +{ + enum cc_fec cc_fec = 0; + + if (fw_fec & FW_PORT_CAP32_FEC_RS) + cc_fec |= FEC_RS; + if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS) + cc_fec |= FEC_BASER_RS; + + return cc_fec; +} + +/* Translate Common Code Forward Error Correction specification to Firmware */ +static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec) +{ + fw_port_cap32_t fw_fec = 0; + + if (cc_fec & FEC_RS) + fw_fec |= FW_PORT_CAP32_FEC_RS; + if (cc_fec & FEC_BASER_RS) + fw_fec |= FW_PORT_CAP32_FEC_BASER_RS; + + return fw_fec; +} /** * t4_link_l1cfg - apply link configuration to MAC/PHY - * @phy: the PHY to setup - * @mac: the MAC to setup - * @lc: the requested link configuration + * @adapter: the adapter + * @mbox: the Firmware Mailbox to use + * @port: the Port ID + * @lc: the Port's Link Configuration * * Set up a port's MAC and PHY according to a desired link configuration. * - If the PHY can auto-negotiate first decide what to advertise, then @@ -2689,48 +2952,60 @@ void t4_dump_version_info(struct adapter *adapter) int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc) { - struct fw_port_cmd c; - unsigned int mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO); - unsigned int fc, fec; + unsigned int fw_mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO); + unsigned int fw_caps = adap->params.fw_caps_support; + fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap; + struct fw_port_cmd cmd; lc->link_ok = 0; - fc = 0; - if (lc->requested_fc & PAUSE_RX) - fc |= FW_PORT_CAP_FC_RX; - if (lc->requested_fc & PAUSE_TX) - fc |= FW_PORT_CAP_FC_TX; - - fec = 0; - if (lc->requested_fec & FEC_RS) - fec |= FW_PORT_CAP_FEC_RS; - if (lc->requested_fec & FEC_BASER_RS) - fec |= FW_PORT_CAP_FEC_BASER_RS; - if (lc->requested_fec & FEC_RESERVED) - fec |= FW_PORT_CAP_FEC_RESERVED; - memset(&c, 0, sizeof(c)); - c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | - F_FW_CMD_REQUEST | F_FW_CMD_EXEC | - V_FW_PORT_CMD_PORTID(port)); - c.action_to_len16 = - cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | - FW_LEN16(c)); - - if (!(lc->supported & FW_PORT_CAP_ANEG)) { - c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) | - fc | fec); + fw_fc = cc_to_fwcap_pause(lc->requested_fc); + + /* Convert Common Code Forward Error Control settings into the + * Firmware's API. If the current Requested FEC has "Automatic" + * (IEEE 802.3) specified, then we use whatever the Firmware + * sent us as part of it's IEEE 802.3-based interpratation of + * the Transceiver Module EPROM FEC parameters. Otherwise we + * use whatever is in the current Requested FEC settings. + */ + if (lc->requested_fec & FEC_AUTO) + cc_fec = lc->auto_fec; + else + cc_fec = lc->requested_fec; + fw_fec = cc_to_fwcap_fec(cc_fec); + + /* Figure out what our Requested Port Capabilities are going to be. + */ + if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { + rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec; lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; - lc->fec = lc->requested_fec; + lc->fec = cc_fec; } else if (lc->autoneg == AUTONEG_DISABLE) { - c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | - fec | mdi); + rcap = lc->requested_speed | fw_fc | fw_fec | fw_mdi; lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; - lc->fec = lc->requested_fec; + lc->fec = cc_fec; } else { - c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | fec | mdi); + rcap = lc->acaps | fw_fc | fw_fec | fw_mdi; } - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + /* And send that on to the Firmware ... + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_EXEC | + V_FW_PORT_CMD_PORTID(port)); + cmd.action_to_len16 = + cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ? + FW_PORT_ACTION_L1_CFG : + FW_PORT_ACTION_L1_CFG32) | + FW_LEN16(cmd)); + + if (fw_caps == FW_CAPS16) + cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap)); + else + cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap); + + return t4_wr_mbox(adap, mbox, &cmd, sizeof(cmd), NULL); } /** @@ -3823,12 +4098,17 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | - F_FW_CMD_EXEC | V_FW_VI_CMD_PFN(pf) | - V_FW_VI_CMD_VFN(vf)); + F_FW_CMD_EXEC); + if (is_pf4(adap)) + c.op_to_vfn |= cpu_to_be32(V_FW_VI_CMD_PFN(pf) | + V_FW_VI_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c)); c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid)); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (is_pf4(adap)) + return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + else + return t4vf_wr_mbox(adap, &c, sizeof(c), NULL); } /** @@ -3874,7 +4154,11 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); - return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); + if (is_pf4(adap)) + return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, + sleep_ok); + else + return t4vf_wr_mbox(adap, &c, sizeof(c), NULL); } /** @@ -3921,7 +4205,10 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, V_FW_VI_MAC_CMD_IDX(idx)); memcpy(p->macaddr, addr, sizeof(p->macaddr)); - ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (is_pf4(adap)) + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + else + ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c); if (ret == 0) { ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); if (ret >= max_mac_addr) @@ -3955,7 +4242,10 @@ int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, V_FW_VI_ENABLE_CMD_EEN(tx_en) | V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) | FW_LEN16(c)); - return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); + if (is_pf4(adap)) + return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); + else + return t4vf_wr_mbox_ns(adap, &c, sizeof(c), NULL); } /** @@ -3996,15 +4286,20 @@ int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | - F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | - V_FW_IQ_CMD_VFN(vf)); + F_FW_CMD_EXEC); c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) | V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c)); c.iqid = cpu_to_be16(iqid); c.fl0id = cpu_to_be16(fl0id); c.fl1id = cpu_to_be16(fl1id); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + if (is_pf4(adap)) { + c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) | + V_FW_IQ_CMD_VFN(vf)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + } else { + return t4vf_wr_mbox(adap, &c, sizeof(c), NULL); + } } /** @@ -4028,14 +4323,19 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | - F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | - V_FW_IQ_CMD_VFN(vf)); + F_FW_CMD_EXEC); + if (is_pf4(adap)) + c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) | + V_FW_IQ_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c)); c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); c.iqid = cpu_to_be16(iqid); c.fl0id = cpu_to_be16(fl0id); c.fl1id = cpu_to_be16(fl1id); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + if (is_pf4(adap)) + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + else + return t4vf_wr_mbox(adap, &c, sizeof(c), NULL); } /** @@ -4055,11 +4355,203 @@ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | - F_FW_CMD_REQUEST | F_FW_CMD_EXEC | - V_FW_EQ_ETH_CMD_PFN(pf) | - V_FW_EQ_ETH_CMD_VFN(vf)); + F_FW_CMD_REQUEST | F_FW_CMD_EXEC); + if (is_pf4(adap)) + c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) | + V_FW_IQ_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid)); + if (is_pf4(adap)) + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + else + return t4vf_wr_mbox(adap, &c, sizeof(c), NULL); +} + +/** + * t4_link_down_rc_str - return a string for a Link Down Reason Code + * @link_down_rc: Link Down Reason Code + * + * Returns a string representation of the Link Down Reason Code. + */ +static const char *t4_link_down_rc_str(unsigned char link_down_rc) +{ + static const char * const reason[] = { + "Link Down", + "Remote Fault", + "Auto-negotiation Failure", + "Reserved", + "Insufficient Airflow", + "Unable To Determine Reason", + "No RX Signal Detected", + "Reserved", + }; + + if (link_down_rc >= ARRAY_SIZE(reason)) + return "Bad Reason Code"; + + return reason[link_down_rc]; +} + +/* Return the highest speed set in the port capabilities, in Mb/s. */ +static unsigned int fwcap_to_speed(fw_port_cap32_t caps) +{ +#define TEST_SPEED_RETURN(__caps_speed, __speed) \ + do { \ + if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ + return __speed; \ + } while (0) + + TEST_SPEED_RETURN(100G, 100000); + TEST_SPEED_RETURN(50G, 50000); + TEST_SPEED_RETURN(40G, 40000); + TEST_SPEED_RETURN(25G, 25000); + TEST_SPEED_RETURN(10G, 10000); + TEST_SPEED_RETURN(1G, 1000); + TEST_SPEED_RETURN(100M, 100); + +#undef TEST_SPEED_RETURN + + return 0; +} + +/** + * t4_handle_get_port_info - process a FW reply message + * @pi: the port info + * @rpl: start of the FW message + * + * Processes a GET_PORT_INFO FW reply message. + */ +static void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) +{ + const struct fw_port_cmd *cmd = (const void *)rpl; + int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(cmd->action_to_len16)); + fw_port_cap32_t pcaps, acaps, linkattr; + struct link_config *lc = &pi->link_cfg; + struct adapter *adapter = pi->adapter; + enum fw_port_module_type mod_type; + enum fw_port_type port_type; + unsigned int speed, fc, fec; + int link_ok, linkdnrc; + + /* Extract the various fields from the Port Information message. + */ + switch (action) { + case FW_PORT_ACTION_GET_PORT_INFO: { + u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); + + link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS) != 0; + linkdnrc = G_FW_PORT_CMD_LINKDNRC(lstatus); + port_type = G_FW_PORT_CMD_PTYPE(lstatus); + mod_type = G_FW_PORT_CMD_MODTYPE(lstatus); + pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap)); + acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap)); + + /* Unfortunately the format of the Link Status in the old + * 16-bit Port Information message isn't the same as the + * 16-bit Port Capabilities bitfield used everywhere else ... + */ + linkattr = 0; + if (lstatus & F_FW_PORT_CMD_RXPAUSE) + linkattr |= FW_PORT_CAP32_FC_RX; + if (lstatus & F_FW_PORT_CMD_TXPAUSE) + linkattr |= FW_PORT_CAP32_FC_TX; + if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) + linkattr |= FW_PORT_CAP32_SPEED_100M; + if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) + linkattr |= FW_PORT_CAP32_SPEED_1G; + if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) + linkattr |= FW_PORT_CAP32_SPEED_10G; + if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G)) + linkattr |= FW_PORT_CAP32_SPEED_25G; + if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) + linkattr |= FW_PORT_CAP32_SPEED_40G; + if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G)) + linkattr |= FW_PORT_CAP32_SPEED_100G; + + break; + } + + case FW_PORT_ACTION_GET_PORT_INFO32: { + u32 lstatus32 = + be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32); + + link_ok = (lstatus32 & F_FW_PORT_CMD_LSTATUS32) != 0; + linkdnrc = G_FW_PORT_CMD_LINKDNRC32(lstatus32); + port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32); + mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus32); + pcaps = be32_to_cpu(cmd->u.info32.pcaps32); + acaps = be32_to_cpu(cmd->u.info32.acaps32); + linkattr = be32_to_cpu(cmd->u.info32.linkattr32); + break; + } + + default: + dev_warn(adapter, "Handle Port Information: Bad Command/Action %#x\n", + be32_to_cpu(cmd->action_to_len16)); + return; + } + + fec = fwcap_to_cc_fec(acaps); + + fc = fwcap_to_cc_pause(linkattr); + speed = fwcap_to_speed(linkattr); + + if (mod_type != pi->mod_type) { + lc->auto_fec = fec; + pi->port_type = port_type; + pi->mod_type = mod_type; + t4_os_portmod_changed(adapter, pi->pidx); + } + if (link_ok != lc->link_ok || speed != lc->speed || + fc != lc->fc || fec != lc->fec) { /* something changed */ + if (!link_ok && lc->link_ok) { + lc->link_down_rc = linkdnrc; + dev_warn(adap, "Port %d link down, reason: %s\n", + pi->tx_chan, t4_link_down_rc_str(linkdnrc)); + } + lc->link_ok = link_ok; + lc->speed = speed; + lc->fc = fc; + lc->fec = fec; + lc->pcaps = pcaps; + lc->acaps = acaps & ADVERT_MASK; + + if (lc->acaps & FW_PORT_CAP32_ANEG) { + lc->autoneg = AUTONEG_ENABLE; + } else { + /* When Autoneg is disabled, user needs to set + * single speed. + * Similar to cxgb4_ethtool.c: set_link_ksettings + */ + lc->acaps = 0; + lc->requested_speed = fwcap_to_speed(acaps); + lc->autoneg = AUTONEG_DISABLE; + } + } +} + +/** + * t4_ctrl_eq_free - free a control egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees a control egress queue. + */ +int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_ctrl_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_EXEC | + V_FW_EQ_CTRL_CMD_PFN(pf) | + V_FW_EQ_CTRL_CMD_VFN(vf)); + c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); + c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -4084,67 +4576,21 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) unsigned int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16)); - if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) { + if (opcode == FW_PORT_CMD && + (action == FW_PORT_ACTION_GET_PORT_INFO || + action == FW_PORT_ACTION_GET_PORT_INFO32)) { /* link/module state change message */ - unsigned int speed = 0, fc = 0, i; int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid)); struct port_info *pi = NULL; - struct link_config *lc; - u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype); - int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0; - u32 mod = G_FW_PORT_CMD_MODTYPE(stat); - - if (stat & F_FW_PORT_CMD_RXPAUSE) - fc |= PAUSE_RX; - if (stat & F_FW_PORT_CMD_TXPAUSE) - fc |= PAUSE_TX; - if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) - speed = ETH_SPEED_NUM_100M; - else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) - speed = ETH_SPEED_NUM_1G; - else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) - speed = ETH_SPEED_NUM_10G; - else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G)) - speed = ETH_SPEED_NUM_25G; - else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) - speed = ETH_SPEED_NUM_40G; - else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G)) - speed = ETH_SPEED_NUM_100G; + int i; for_each_port(adap, i) { pi = adap2pinfo(adap, i); if (pi->tx_chan == chan) break; } - lc = &pi->link_cfg; - if (mod != pi->mod_type) { - pi->mod_type = mod; - t4_os_portmod_changed(adap, i); - } - if (link_ok != lc->link_ok || speed != lc->speed || - fc != lc->fc) { /* something changed */ - if (!link_ok && lc->link_ok) { - static const char * const reason[] = { - "Link Down", - "Remote Fault", - "Auto-negotiation Failure", - "Reserved", - "Insufficient Airflow", - "Unable To Determine Reason", - "No RX Signal Detected", - "Reserved", - }; - unsigned int rc = G_FW_PORT_CMD_LINKDNRC(stat); - - dev_warn(adap, "Port %d link down, reason: %s\n", - chan, reason[rc]); - } - lc->link_ok = link_ok; - lc->speed = speed; - lc->fc = fc; - lc->supported = be16_to_cpu(p->u.info.pcap); - } + t4_handle_get_port_info(pi, rpl); } else { dev_warn(adap, "Unknown firmware reply %d\n", opcode); return -EINVAL; @@ -4173,12 +4619,10 @@ void t4_reset_link_config(struct adapter *adap, int idx) * Initializes the SW state maintained for each link, including the link's * capabilities and default speed/flow-control/autonegotiation settings. */ -static void init_link_config(struct link_config *lc, unsigned int pcaps, - unsigned int acaps) +void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps, + fw_port_cap32_t acaps) { - unsigned int fec; - - lc->supported = pcaps; + lc->pcaps = pcaps; lc->requested_speed = 0; lc->speed = 0; lc->requested_fc = 0; @@ -4188,21 +4632,16 @@ static void init_link_config(struct link_config *lc, unsigned int pcaps, * For Forward Error Control, we default to whatever the Firmware * tells us the Link is currently advertising. */ - fec = 0; - if (acaps & FW_PORT_CAP_FEC_RS) - fec |= FEC_RS; - if (acaps & FW_PORT_CAP_FEC_BASER_RS) - fec |= FEC_BASER_RS; - if (acaps & FW_PORT_CAP_FEC_RESERVED) - fec |= FEC_RESERVED; - lc->requested_fec = fec; - lc->fec = fec; - - if (lc->supported & FW_PORT_CAP_ANEG) { - lc->advertising = lc->supported & ADVERT_MASK; + lc->auto_fec = fwcap_to_cc_fec(acaps); + lc->requested_fec = FEC_AUTO; + lc->fec = lc->auto_fec; + + if (lc->pcaps & FW_PORT_CAP32_ANEG) { + lc->acaps = lc->pcaps & ADVERT_MASK; lc->autoneg = AUTONEG_ENABLE; + lc->requested_fc |= PAUSE_AUTONEG; } else { - lc->advertising = 0; + lc->acaps = 0; lc->autoneg = AUTONEG_DISABLE; } } @@ -4242,9 +4681,8 @@ struct flash_desc { int t4_get_flash_params(struct adapter *adapter) { /* - * Table for non-Numonix supported flash parts. Numonix parts are left - * to the preexisting well-tested code. All flash parts have 64KB - * sectors. + * Table for non-standard supported Flash parts. Note, all Flash + * parts must have 64KB sectors. */ static struct flash_desc supported_flash[] = { { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ @@ -4253,7 +4691,7 @@ int t4_get_flash_params(struct adapter *adapter) int ret; u32 flashid = 0; unsigned int part, manufacturer; - unsigned int density, size; + unsigned int density, size = 0; /** * Issue a Read ID Command to the Flash part. We decode supported @@ -4268,6 +4706,9 @@ int t4_get_flash_params(struct adapter *adapter) if (ret < 0) return ret; + /** + * Check to see if it's one of our non-standard supported Flash parts. + */ for (part = 0; part < ARRAY_SIZE(supported_flash); part++) { if (supported_flash[part].vendor_and_model_id == flashid) { adapter->params.sf_size = @@ -4278,6 +4719,15 @@ int t4_get_flash_params(struct adapter *adapter) } } + /** + * Decode Flash part size. The code below looks repetative with + * common encodings, but that's not guaranteed in the JEDEC + * specification for the Read JADEC ID command. The only thing that + * we're guaranteed by the JADEC specification is where the + * Manufacturer ID is in the returned result. After that each + * Manufacturer ~could~ encode things completely differently. + * Note, all Flash parts must have 64KB sectors. + */ manufacturer = flashid & 0xff; switch (manufacturer) { case 0x20: { /* Micron/Numonix */ @@ -4314,21 +4764,81 @@ int t4_get_flash_params(struct adapter *adapter) case 0x22: size = 1 << 28; /* 256MB */ break; - default: - dev_err(adapter, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n", - flashid, density); - return -EINVAL; } + break; + } - adapter->params.sf_size = size; - adapter->params.sf_nsec = size / SF_SEC_SIZE; + case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */ + /** + * This Density -> Size decoding table is taken from ISSI + * Data Sheets. + */ + density = (flashid >> 16) & 0xff; + switch (density) { + case 0x16: + size = 1 << 25; /* 32MB */ + break; + case 0x17: + size = 1 << 26; /* 64MB */ + break; + } break; } - default: - dev_err(adapter, "Unsupported Flash Part, ID = %#x\n", flashid); - return -EINVAL; + + case 0xc2: { /* Macronix */ + /** + * This Density -> Size decoding table is taken from Macronix + * Data Sheets. + */ + density = (flashid >> 16) & 0xff; + switch (density) { + case 0x17: + size = 1 << 23; /* 8MB */ + break; + case 0x18: + size = 1 << 24; /* 16MB */ + break; + } + break; } + case 0xef: { /* Winbond */ + /** + * This Density -> Size decoding table is taken from Winbond + * Data Sheets. + */ + density = (flashid >> 16) & 0xff; + switch (density) { + case 0x17: + size = 1 << 23; /* 8MB */ + break; + case 0x18: + size = 1 << 24; /* 16MB */ + break; + } + break; + } + } + + /* If we didn't recognize the FLASH part, that's no real issue: the + * Hardware/Software contract says that Hardware will _*ALWAYS*_ + * use a FLASH part which is at least 4MB in size and has 64KB + * sectors. The unrecognized FLASH part is likely to be much larger + * than 4MB, but that's all we really need. + */ + if (size == 0) { + dev_warn(adapter, + "Unknown Flash Part, ID = %#x, assuming 4MB\n", + flashid); + size = 1 << 22; + } + + /** + * Store decoded Flash size and fall through into vetting code. + */ + adapter->params.sf_size = size; + adapter->params.sf_nsec = size / SF_SEC_SIZE; + found: /* * We should reject adapters with FLASHes which are too small. So, emit @@ -4633,6 +5143,8 @@ int t4_init_tp_params(struct adapter *adap) adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT); adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL); + adap->params.tp.ethertype_shift = t4_filter_field_shift(adap, + F_ETHERTYPE); /* * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID @@ -4641,6 +5153,11 @@ int t4_init_tp_params(struct adapter *adap) if ((adap->params.tp.ingress_config & F_VNIC) == 0) adap->params.tp.vnic_shift = -1; + v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A); + adap->params.tp.hash_filter_mask = v; + v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A); + adap->params.tp.hash_filter_mask |= ((u64)v << 32); + return 0; } @@ -4723,47 +5240,305 @@ int t4_init_rss_mode(struct adapter *adap, int mbox) int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) { - u8 addr[6]; + unsigned int fw_caps = adap->params.fw_caps_support; + fw_port_cap32_t pcaps, acaps; + enum fw_port_type port_type; + struct fw_port_cmd cmd; int ret, i, j = 0; - struct fw_port_cmd c; + int mdio_addr; + u32 action; + u8 addr[6]; - memset(&c, 0, sizeof(c)); + memset(&cmd, 0, sizeof(cmd)); for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); unsigned int rss_size = 0; - struct port_info *p = adap2pinfo(adap, i); while ((adap->params.portvec & (1 << j)) == 0) j++; - c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | - F_FW_CMD_REQUEST | F_FW_CMD_READ | - V_FW_PORT_CMD_PORTID(j)); - c.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION( - FW_PORT_ACTION_GET_PORT_INFO) | - FW_LEN16(c)); - ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + /* If we haven't yet determined whether we're talking to + * Firmware which knows the new 32-bit Port Capabilities, it's + * time to find out now. This will also tell new Firmware to + * send us Port Status Updates using the new 32-bit Port + * Capabilities version of the Port Information message. + */ + if (fw_caps == FW_CAPS_UNKNOWN) { + u32 param, val, caps; + + caps = FW_PARAMS_PARAM_PFVF_PORT_CAPS32; + param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | + V_FW_PARAMS_PARAM_X(caps)); + val = 1; + ret = t4_set_params(adap, mbox, pf, vf, 1, ¶m, + &val); + fw_caps = ret == 0 ? FW_CAPS32 : FW_CAPS16; + adap->params.fw_caps_support = fw_caps; + } + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ | + V_FW_PORT_CMD_PORTID(j)); + action = fw_caps == FW_CAPS16 ? FW_PORT_ACTION_GET_PORT_INFO : + FW_PORT_ACTION_GET_PORT_INFO32; + cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) | + FW_LEN16(cmd)); + ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd); if (ret) return ret; + /* Extract the various fields from the Port Information message. + */ + if (fw_caps == FW_CAPS16) { + u32 lstatus = + be32_to_cpu(cmd.u.info.lstatus_to_modtype); + + port_type = G_FW_PORT_CMD_PTYPE(lstatus); + mdio_addr = (lstatus & F_FW_PORT_CMD_MDIOCAP) ? + (int)G_FW_PORT_CMD_MDIOADDR(lstatus) : -1; + pcaps = be16_to_cpu(cmd.u.info.pcap); + acaps = be16_to_cpu(cmd.u.info.acap); + pcaps = fwcaps16_to_caps32(pcaps); + acaps = fwcaps16_to_caps32(acaps); + } else { + u32 lstatus32 = + be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32); + + port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32); + mdio_addr = (lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ? + (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) : + -1; + pcaps = be32_to_cpu(cmd.u.info32.pcaps32); + acaps = be32_to_cpu(cmd.u.info32.acaps32); + } + ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); if (ret < 0) return ret; - p->viid = ret; - p->tx_chan = j; - p->rss_size = rss_size; + pi->viid = ret; + pi->tx_chan = j; + pi->rss_size = rss_size; t4_os_set_hw_addr(adap, i, addr); - ret = be32_to_cpu(c.u.info.lstatus_to_modtype); - p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ? - G_FW_PORT_CMD_MDIOADDR(ret) : -1; - p->port_type = G_FW_PORT_CMD_PTYPE(ret); - p->mod_type = FW_PORT_MOD_TYPE_NA; + pi->port_type = port_type; + pi->mdio_addr = mdio_addr; + pi->mod_type = FW_PORT_MOD_TYPE_NA; - init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap), - be16_to_cpu(c.u.info.acap)); + init_link_config(&pi->link_cfg, pcaps, acaps); j++; } return 0; } + +/** + * t4_memory_rw_addr - read/write adapter memory via PCIE memory window + * @adap: the adapter + * @win: PCI-E Memory Window to use + * @addr: address within adapter memory + * @len: amount of memory to transfer + * @hbuf: host memory buffer + * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) + * + * Reads/writes an [almost] arbitrary memory region in the firmware: the + * firmware memory address and host buffer must be aligned on 32-bit + * boudaries; the length may be arbitrary. + * + * NOTES: + * 1. The memory is transferred as a raw byte sequence from/to the + * firmware's memory. If this memory contains data structures which + * contain multi-byte integers, it's the caller's responsibility to + * perform appropriate byte order conversions. + * + * 2. It is the Caller's responsibility to ensure that no other code + * uses the specified PCI-E Memory Window while this routine is + * using it. This is typically done via the use of OS-specific + * locks, etc. + */ +int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr, + u32 len, void *hbuf, int dir) +{ + u32 pos, offset, resid; + u32 win_pf, mem_reg, mem_aperture, mem_base; + u32 *buf; + + /* Argument sanity checks ...*/ + if (addr & 0x3 || (uintptr_t)hbuf & 0x3) + return -EINVAL; + buf = (u32 *)hbuf; + + /* It's convenient to be able to handle lengths which aren't a + * multiple of 32-bits because we often end up transferring files to + * the firmware. So we'll handle that by normalizing the length here + * and then handling any residual transfer at the end. + */ + resid = len & 0x3; + len -= resid; + + /* Each PCI-E Memory Window is programmed with a window size -- or + * "aperture" -- which controls the granularity of its mapping onto + * adapter memory. We need to grab that aperture in order to know + * how to use the specified window. The window is also programmed + * with the base address of the Memory Window in BAR0's address + * space. For T4 this is an absolute PCI-E Bus Address. For T5 + * the address is relative to BAR0. + */ + mem_reg = t4_read_reg(adap, + PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, + win)); + mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT); + mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT; + + win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf); + + /* Calculate our initial PCI-E Memory Window Position and Offset into + * that Window. + */ + pos = addr & ~(mem_aperture - 1); + offset = addr - pos; + + /* Set up initial PCI-E Memory Window to cover the start of our + * transfer. (Read it back to ensure that changes propagate before we + * attempt to use the new value.) + */ + t4_write_reg(adap, + PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win), + pos | win_pf); + t4_read_reg(adap, + PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win)); + + /* Transfer data to/from the adapter as long as there's an integral + * number of 32-bit transfers to complete. + * + * A note on Endianness issues: + * + * The "register" reads and writes below from/to the PCI-E Memory + * Window invoke the standard adapter Big-Endian to PCI-E Link + * Little-Endian "swizzel." As a result, if we have the following + * data in adapter memory: + * + * Memory: ... | b0 | b1 | b2 | b3 | ... + * Address: i+0 i+1 i+2 i+3 + * + * Then a read of the adapter memory via the PCI-E Memory Window + * will yield: + * + * x = readl(i) + * 31 0 + * [ b3 | b2 | b1 | b0 ] + * + * If this value is stored into local memory on a Little-Endian system + * it will show up correctly in local memory as: + * + * ( ..., b0, b1, b2, b3, ... ) + * + * But on a Big-Endian system, the store will show up in memory + * incorrectly swizzled as: + * + * ( ..., b3, b2, b1, b0, ... ) + * + * So we need to account for this in the reads and writes to the + * PCI-E Memory Window below by undoing the register read/write + * swizzels. + */ + while (len > 0) { + if (dir == T4_MEMORY_READ) + *buf++ = le32_to_cpu((__le32)t4_read_reg(adap, + mem_base + + offset)); + else + t4_write_reg(adap, mem_base + offset, + (u32)cpu_to_le32(*buf++)); + offset += sizeof(__be32); + len -= sizeof(__be32); + + /* If we've reached the end of our current window aperture, + * move the PCI-E Memory Window on to the next. Note that + * doing this here after "len" may be 0 allows us to set up + * the PCI-E Memory Window for a possible final residual + * transfer below ... + */ + if (offset == mem_aperture) { + pos += mem_aperture; + offset = 0; + t4_write_reg(adap, + PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, + win), pos | win_pf); + t4_read_reg(adap, + PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, + win)); + } + } + + /* If the original transfer had a length which wasn't a multiple of + * 32-bits, now's where we need to finish off the transfer of the + * residual amount. The PCI-E Memory Window has already been moved + * above (if necessary) to cover this final transfer. + */ + if (resid) { + union { + u32 word; + char byte[4]; + } last; + unsigned char *bp; + int i; + + if (dir == T4_MEMORY_READ) { + last.word = le32_to_cpu((__le32)t4_read_reg(adap, + mem_base + + offset)); + for (bp = (unsigned char *)buf, i = resid; i < 4; i++) + bp[i] = last.byte[i]; + } else { + last.word = *buf; + for (i = resid; i < 4; i++) + last.byte[i] = 0; + t4_write_reg(adap, mem_base + offset, + (u32)cpu_to_le32(last.word)); + } + } + + return 0; +} + +/** + * t4_memory_rw_mtype -read/write EDC 0, EDC 1 or MC via PCIE memory window + * @adap: the adapter + * @win: PCI-E Memory Window to use + * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC + * @maddr: address within indicated memory type + * @len: amount of memory to transfer + * @hbuf: host memory buffer + * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) + * + * Reads/writes adapter memory using t4_memory_rw_addr(). This routine + * provides an (memory type, address within memory type) interface. + */ +int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr, + u32 len, void *hbuf, int dir) +{ + u32 mtype_offset; + u32 edc_size, mc_size; + + /* Offset into the region of memory which is being accessed + * MEM_EDC0 = 0 + * MEM_EDC1 = 1 + * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller + * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5) + */ + edc_size = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR)); + if (mtype != MEM_MC1) { + mtype_offset = (mtype * (edc_size * 1024 * 1024)); + } else { + mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap, + A_MA_EXT_MEMORY0_BAR)); + mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; + } + + return t4_memory_rw_addr(adap, win, + mtype_offset + maddr, len, + hbuf, dir); +} diff --git a/drivers/net/cxgbe/base/t4_hw.h b/drivers/net/cxgbe/base/t4_hw.h index 07498841..e77563df 100644 --- a/drivers/net/cxgbe/base/t4_hw.h +++ b/drivers/net/cxgbe/base/t4_hw.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #ifndef __T4_HW_H @@ -70,6 +42,10 @@ enum { SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / SGE_EQ_IDXSIZE, }; +enum { + TCB_SIZE = 128, /* TCB size */ +}; + struct sge_qstat { /* data written to SGE queue status entries */ __be32 qid; __be16 cidx; diff --git a/drivers/net/cxgbe/base/t4_msg.h b/drivers/net/cxgbe/base/t4_msg.h index 6acd749a..5d433c91 100644 --- a/drivers/net/cxgbe/base/t4_msg.h +++ b/drivers/net/cxgbe/base/t4_msg.h @@ -1,40 +1,21 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #ifndef T4_MSG_H #define T4_MSG_H enum { + CPL_ACT_OPEN_REQ = 0x3, + CPL_SET_TCB_FIELD = 0x5, + CPL_ABORT_REQ = 0xA, + CPL_ABORT_RPL = 0xB, + CPL_TID_RELEASE = 0x1A, + CPL_ACT_OPEN_RPL = 0x25, + CPL_ABORT_RPL_RSS = 0x2D, + CPL_SET_TCB_RPL = 0x3A, + CPL_ACT_OPEN_REQ6 = 0x83, CPL_SGE_EGR_UPDATE = 0xA5, CPL_FW4_MSG = 0xC0, CPL_FW6_MSG = 0xE0, @@ -42,6 +23,20 @@ enum { CPL_TX_PKT_XT = 0xEE, }; +enum CPL_error { + CPL_ERR_NONE = 0, + CPL_ERR_TCAM_FULL = 3, +}; + +enum { + ULP_MODE_NONE = 0, +}; + +enum { + CPL_ABORT_SEND_RST = 0, + CPL_ABORT_NO_RST, +}; + enum { /* TX_PKT_XT checksum types */ TX_CSUM_TCPIP = 8, TX_CSUM_UDPIP = 9, @@ -53,6 +48,24 @@ union opcode_tid { __u8 opcode; }; +#define S_CPL_OPCODE 24 +#define V_CPL_OPCODE(x) ((x) << S_CPL_OPCODE) + +#define G_TID(x) ((x) & 0xFFFFFF) + +/* tid is assumed to be 24-bits */ +#define MK_OPCODE_TID(opcode, tid) (V_CPL_OPCODE(opcode) | (tid)) + +#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) + +/* extract the TID from a CPL command */ +#define GET_TID(cmd) (G_TID(be32_to_cpu(OPCODE_TID(cmd)))) + +/* partitioning of TID fields that also carry a queue id */ +#define S_TID_TID 0 +#define M_TID_TID 0x3fff +#define G_TID_TID(x) (((x) >> S_TID_TID) & M_TID_TID) + struct rss_header { __u8 opcode; #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN @@ -94,6 +107,169 @@ struct work_request_hdr { #define WR_HDR_SIZE 0 #endif +#define S_COOKIE 5 +#define M_COOKIE 0x7 +#define V_COOKIE(x) ((x) << S_COOKIE) +#define G_COOKIE(x) (((x) >> S_COOKIE) & M_COOKIE) + +/* option 0 fields */ +#define S_TX_CHAN 2 +#define V_TX_CHAN(x) ((x) << S_TX_CHAN) + +#define S_DELACK 5 +#define V_DELACK(x) ((x) << S_DELACK) + +#define S_NON_OFFLOAD 7 +#define V_NON_OFFLOAD(x) ((x) << S_NON_OFFLOAD) +#define F_NON_OFFLOAD V_NON_OFFLOAD(1U) + +#define S_ULP_MODE 8 +#define V_ULP_MODE(x) ((x) << S_ULP_MODE) + +#define S_SMAC_SEL 28 +#define V_SMAC_SEL(x) ((__u64)(x) << S_SMAC_SEL) + +#define S_TCAM_BYPASS 48 +#define V_TCAM_BYPASS(x) ((__u64)(x) << S_TCAM_BYPASS) +#define F_TCAM_BYPASS V_TCAM_BYPASS(1ULL) + +/* option 2 fields */ +#define S_RSS_QUEUE 0 +#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE) + +#define S_RSS_QUEUE_VALID 10 +#define V_RSS_QUEUE_VALID(x) ((x) << S_RSS_QUEUE_VALID) +#define F_RSS_QUEUE_VALID V_RSS_QUEUE_VALID(1U) + +#define S_CONG_CNTRL 14 +#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL) + +#define S_RX_CHANNEL 26 +#define V_RX_CHANNEL(x) ((x) << S_RX_CHANNEL) +#define F_RX_CHANNEL V_RX_CHANNEL(1U) + +#define S_CCTRL_ECN 27 +#define V_CCTRL_ECN(x) ((x) << S_CCTRL_ECN) + +#define S_T5_OPT_2_VALID 31 +#define V_T5_OPT_2_VALID(x) ((x) << S_T5_OPT_2_VALID) +#define F_T5_OPT_2_VALID V_T5_OPT_2_VALID(1U) + +struct cpl_t6_act_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be64 opt0; + __be32 rsvd; + __be32 opt2; + __be64 params; + __be32 rsvd2; + __be32 opt3; +}; + +struct cpl_t6_act_open_req6 { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be64 local_ip_hi; + __be64 local_ip_lo; + __be64 peer_ip_hi; + __be64 peer_ip_lo; + __be64 opt0; + __be32 rsvd; + __be32 opt2; + __be64 params; + __be32 rsvd2; + __be32 opt3; +}; + +#define S_FILTER_TUPLE 24 +#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE) + +struct cpl_act_open_rpl { + RSS_HDR + union opcode_tid ot; + __be32 atid_status; +}; + +/* cpl_act_open_rpl.atid_status fields */ +#define S_AOPEN_STATUS 0 +#define M_AOPEN_STATUS 0xFF +#define G_AOPEN_STATUS(x) (((x) >> S_AOPEN_STATUS) & M_AOPEN_STATUS) + +#define S_AOPEN_ATID 8 +#define M_AOPEN_ATID 0xFFFFFF +#define G_AOPEN_ATID(x) (((x) >> S_AOPEN_ATID) & M_AOPEN_ATID) + +struct cpl_set_tcb_field { + WR_HDR; + union opcode_tid ot; + __be16 reply_ctrl; + __be16 word_cookie; + __be64 mask; + __be64 val; +}; + +/* cpl_set_tcb_field.word_cookie fields */ +#define S_WORD 0 +#define V_WORD(x) ((x) << S_WORD) + +/* cpl_get_tcb.reply_ctrl fields */ +#define S_QUEUENO 0 +#define V_QUEUENO(x) ((x) << S_QUEUENO) + +#define S_REPLY_CHAN 14 +#define V_REPLY_CHAN(x) ((x) << S_REPLY_CHAN) + +#define S_NO_REPLY 15 +#define V_NO_REPLY(x) ((x) << S_NO_REPLY) + +struct cpl_set_tcb_rpl { + RSS_HDR + union opcode_tid ot; + __be16 rsvd; + __u8 cookie; + __u8 status; + __be64 oldval; +}; + +/* cpl_abort_req status command code + */ +struct cpl_abort_req { + WR_HDR; + union opcode_tid ot; + __be32 rsvd0; + __u8 rsvd1; + __u8 cmd; + __u8 rsvd2[6]; +}; + +struct cpl_abort_rpl_rss { + RSS_HDR + union opcode_tid ot; + __u8 rsvd[3]; + __u8 status; +}; + +struct cpl_abort_rpl { + WR_HDR; + union opcode_tid ot; + __be32 rsvd0; + __u8 rsvd1; + __u8 cmd; + __u8 rsvd2[6]; +}; + +struct cpl_tid_release { + WR_HDR; + union opcode_tid ot; + __be32 rsvd; +}; + struct cpl_tx_data { union opcode_tid ot; __be32 len; @@ -299,7 +475,13 @@ struct cpl_fw6_msg { __be64 data[4]; }; +/* ULP_TX opcodes */ +enum { + ULP_TX_PKT = 4 +}; + enum { + ULP_TX_SC_NOOP = 0x80, ULP_TX_SC_IMM = 0x81, ULP_TX_SC_DSGL = 0x82, ULP_TX_SC_ISGL = 0x83 diff --git a/drivers/net/cxgbe/base/t4_pci_id_tbl.h b/drivers/net/cxgbe/base/t4_pci_id_tbl.h index 1230e738..5f5cbe04 100644 --- a/drivers/net/cxgbe/base/t4_pci_id_tbl.h +++ b/drivers/net/cxgbe/base/t4_pci_id_tbl.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #ifndef __T4_PCI_ID_TBL_H__ diff --git a/drivers/net/cxgbe/base/t4_regs.h b/drivers/net/cxgbe/base/t4_regs.h index 1100e16f..6f872edc 100644 --- a/drivers/net/cxgbe/base/t4_regs.h +++ b/drivers/net/cxgbe/base/t4_regs.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #define MYPF_BASE 0x1b000 @@ -77,6 +49,7 @@ #define SGE_BASE_ADDR 0x1000 #define A_SGE_PF_KDOORBELL 0x0 +#define A_SGE_VF_KDOORBELL 0x0 #define S_QID 15 #define M_QID 0x1ffffU @@ -103,6 +76,9 @@ #define A_SGE_PF_GTS 0x4 +#define T4VF_SGE_BASE_ADDR 0x0000 +#define A_SGE_VF_GTS 0x4 + #define S_INGRESSQID 16 #define M_INGRESSQID 0xffffU #define V_INGRESSQID(x) ((x) << S_INGRESSQID) @@ -191,6 +167,8 @@ #define V_QUEUESPERPAGEPF0(x) ((x) << S_QUEUESPERPAGEPF0) #define G_QUEUESPERPAGEPF0(x) (((x) >> S_QUEUESPERPAGEPF0) & M_QUEUESPERPAGEPF0) +#define A_SGE_EGRESS_QUEUES_PER_PAGE_VF 0x1014 + #define S_ERR_CPL_EXCEED_IQE_SIZE 22 #define V_ERR_CPL_EXCEED_IQE_SIZE(x) ((x) << S_ERR_CPL_EXCEED_IQE_SIZE) #define F_ERR_CPL_EXCEED_IQE_SIZE V_ERR_CPL_EXCEED_IQE_SIZE(1U) @@ -280,6 +258,11 @@ #define A_SGE_CONM_CTRL 0x1094 +#define S_T6_EGRTHRESHOLDPACKING 16 +#define M_T6_EGRTHRESHOLDPACKING 0xffU +#define G_T6_EGRTHRESHOLDPACKING(x) (((x) >> S_T6_EGRTHRESHOLDPACKING) & \ + M_T6_EGRTHRESHOLDPACKING) + #define S_EGRTHRESHOLD 8 #define M_EGRTHRESHOLD 0x3fU #define V_EGRTHRESHOLD(x) ((x) << S_EGRTHRESHOLD) @@ -370,6 +353,7 @@ #define G_STATSOURCE_T5(x) (((x) >> S_STATSOURCE_T5) & M_STATSOURCE_T5) #define A_SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 +#define A_SGE_INGRESS_QUEUES_PER_PAGE_VF 0x10f8 #define A_SGE_CONTROL2 0x1124 @@ -443,6 +427,8 @@ /* registers for module CIM */ #define CIM_BASE_ADDR 0x7b00 +#define A_CIM_VF_EXT_MAILBOX_CTRL 0x0 + #define A_CIM_PF_MAILBOX_DATA 0x240 #define A_CIM_PF_MAILBOX_CTRL 0x280 @@ -462,6 +448,8 @@ #define V_UPCRST(x) ((x) << S_UPCRST) #define F_UPCRST V_UPCRST(1U) +#define NUM_CIM_PF_MAILBOX_DATA_INSTANCES 16 + /* registers for module TP */ #define A_TP_OUT_CONFIG 0x7d04 @@ -470,6 +458,7 @@ #define F_CRXPKTENC V_CRXPKTENC(1U) #define TP_BASE_ADDR 0x7d00 +#define A_TP_CMM_TCB_BASE 0x7d10 #define A_TP_TIMER_RESOLUTION 0x7d90 @@ -503,9 +492,34 @@ #define V_MTUVALUE(x) ((x) << S_MTUVALUE) #define G_MTUVALUE(x) (((x) >> S_MTUVALUE) & M_MTUVALUE) +#define A_TP_RSS_CONFIG_VRT 0x7e00 + +#define S_KEYMODE 6 +#define M_KEYMODE 0x3U +#define G_KEYMODE(x) (((x) >> S_KEYMODE) & M_KEYMODE) + +#define S_KEYWRADDR 0 +#define V_KEYWRADDR(x) ((x) << S_KEYWRADDR) + +#define S_KEYWREN 4 +#define V_KEYWREN(x) ((x) << S_KEYWREN) +#define F_KEYWREN V_KEYWREN(1U) + +#define S_KEYWRADDRX 30 +#define V_KEYWRADDRX(x) ((x) << S_KEYWRADDRX) + +#define S_KEYEXTEND 26 +#define V_KEYEXTEND(x) ((x) << S_KEYEXTEND) +#define F_KEYEXTEND V_KEYEXTEND(1U) + +#define S_T6_VFWRADDR 8 +#define V_T6_VFWRADDR(x) ((x) << S_T6_VFWRADDR) + #define A_TP_PIO_ADDR 0x7e40 #define A_TP_PIO_DATA 0x7e44 +#define A_TP_RSS_SECRET_KEY0 0x40 + #define A_TP_VLAN_PRI_MAP 0x140 #define S_FRAGMENTATION 9 @@ -558,8 +572,27 @@ #define V_CSUM_HAS_PSEUDO_HDR(x) ((x) << S_CSUM_HAS_PSEUDO_HDR) #define F_CSUM_HAS_PSEUDO_HDR V_CSUM_HAS_PSEUDO_HDR(1U) +#define S_RM_OVLAN 9 +#define V_RM_OVLAN(x) ((x) << S_RM_OVLAN) + +/* registers for module MA */ +#define A_MA_EDRAM0_BAR 0x77c0 + +#define S_EDRAM0_SIZE 0 +#define M_EDRAM0_SIZE 0xfffU +#define V_EDRAM0_SIZE(x) ((x) << S_EDRAM0_SIZE) +#define G_EDRAM0_SIZE(x) (((x) >> S_EDRAM0_SIZE) & M_EDRAM0_SIZE) + +#define A_MA_EXT_MEMORY0_BAR 0x77c8 + +#define S_EXT_MEM0_SIZE 0 +#define M_EXT_MEM0_SIZE 0xfffU +#define V_EXT_MEM0_SIZE(x) ((x) << S_EXT_MEM0_SIZE) +#define G_EXT_MEM0_SIZE(x) (((x) >> S_EXT_MEM0_SIZE) & M_EXT_MEM0_SIZE) + /* registers for module MPS */ #define MPS_BASE_ADDR 0x9000 +#define T4VF_MPS_BASE_ADDR 0x0100 #define S_REPLICATE 11 #define V_REPLICATE(x) ((x) << S_REPLICATE) @@ -766,6 +799,69 @@ #define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8 #define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc +#define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L 0x80 +#define A_MPS_VF_STAT_TX_VF_BCAST_FRAMES_L 0x88 +#define A_MPS_VF_STAT_TX_VF_MCAST_BYTES_L 0x90 +#define A_MPS_VF_STAT_TX_VF_MCAST_FRAMES_L 0x98 +#define A_MPS_VF_STAT_TX_VF_UCAST_BYTES_L 0xa0 +#define A_MPS_VF_STAT_TX_VF_UCAST_FRAMES_L 0xa8 +#define A_MPS_VF_STAT_TX_VF_DROP_FRAMES_L 0xb0 +#define A_MPS_VF_STAT_RX_VF_BCAST_FRAMES_L 0xd0 +#define A_MPS_VF_STAT_RX_VF_MCAST_FRAMES_L 0xe0 +#define A_MPS_VF_STAT_RX_VF_UCAST_FRAMES_L 0xf0 +#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_L 0xf8 + +#define A_MPS_PORT0_RX_IVLAN 0x3011c + +#define S_IVLAN_ETYPE 0 +#define M_IVLAN_ETYPE 0xffffU +#define V_IVLAN_ETYPE(x) ((x) << S_IVLAN_ETYPE) + +#define MPS_PORT_RX_IVLAN_STRIDE 0x4000 +#define MPS_PORT_RX_IVLAN(idx) \ + (A_MPS_PORT0_RX_IVLAN + (idx) * MPS_PORT_RX_IVLAN_STRIDE) + +#define A_MPS_PORT0_RX_OVLAN0 0x30120 + +#define S_OVLAN_MASK 16 +#define M_OVLAN_MASK 0xffffU +#define V_OVLAN_MASK(x) ((x) << S_OVLAN_MASK) + +#define S_OVLAN_ETYPE 0 +#define M_OVLAN_ETYPE 0xffffU +#define V_OVLAN_ETYPE(x) ((x) << S_OVLAN_ETYPE) + +#define MPS_PORT_RX_OVLAN_STRIDE 0x4000 +#define MPS_PORT_RX_OVLAN_BASE(idx) \ +(A_MPS_PORT0_RX_OVLAN0 + (idx) * MPS_PORT_RX_OVLAN_STRIDE) +#define MPS_PORT_RX_OVLAN_REG(idx, reg) (MPS_PORT_RX_OVLAN_BASE(idx) + (reg)) + +#define A_RX_OVLAN0 0x0 +#define A_RX_OVLAN1 0x4 +#define A_RX_OVLAN2 0x8 + +#define A_MPS_PORT0_RX_CTL 0x30100 + +#define S_OVLAN_EN0 0 +#define V_OVLAN_EN0(x) ((x) << S_OVLAN_EN0) +#define F_OVLAN_EN0 V_OVLAN_EN0(1) + +#define S_OVLAN_EN1 1 +#define V_OVLAN_EN1(x) ((x) << S_OVLAN_EN1) +#define F_OVLAN_EN1 V_OVLAN_EN1(1) + +#define S_OVLAN_EN2 2 +#define V_OVLAN_EN2(x) ((x) << S_OVLAN_EN2) +#define F_OVLAN_EN2 V_OVLAN_EN2(1) + +#define S_IVLAN_EN 4 +#define V_IVLAN_EN(x) ((x) << S_IVLAN_EN) +#define F_IVLAN_EN V_IVLAN_EN(1) + +#define MPS_PORT_RX_CTL_STRIDE 0x4000 +#define MPS_PORT_RX_CTL(idx) \ + (A_MPS_PORT0_RX_CTL + (idx) * MPS_PORT_RX_CTL_STRIDE) + /* registers for module ULP_RX */ #define ULP_RX_BASE_ADDR 0x19150 @@ -823,6 +919,7 @@ #define F_PFCIM V_PFCIM(1U) #define A_PL_WHOAMI 0x19400 +#define A_PL_VF_WHOAMI 0x0 #define A_PL_RST 0x19428 @@ -837,8 +934,21 @@ #define F_PIORSTMODE V_PIORSTMODE(1U) #define A_PL_REV 0x1943c +#define A_PL_VF_REV 0x4 #define S_REV 0 #define M_REV 0xfU #define V_REV(x) ((x) << S_REV) #define G_REV(x) (((x) >> S_REV) & M_REV) + +/* registers for module LE */ +#define A_LE_DB_CONFIG 0x19c04 + +#define S_HASHEN 20 +#define V_HASHEN(x) ((x) << S_HASHEN) +#define F_HASHEN V_HASHEN(1U) + +#define A_LE_DB_TID_HASHBASE 0x19df8 + +#define LE_3_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eac +#define LE_4_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eb0 diff --git a/drivers/net/cxgbe/base/t4_regs_values.h b/drivers/net/cxgbe/base/t4_regs_values.h index 9085ff6d..a9414d20 100644 --- a/drivers/net/cxgbe/base/t4_regs_values.h +++ b/drivers/net/cxgbe/base/t4_regs_values.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #ifndef __T4_REGS_VALUES_H__ diff --git a/drivers/net/cxgbe/base/t4_tcb.h b/drivers/net/cxgbe/base/t4_tcb.h new file mode 100644 index 00000000..25435f9f --- /dev/null +++ b/drivers/net/cxgbe/base/t4_tcb.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef _T4_TCB_DEFS_H +#define _T4_TCB_DEFS_H + +/* 105:96 */ +#define W_TCB_RSS_INFO 3 +#define S_TCB_RSS_INFO 0 +#define M_TCB_RSS_INFO 0x3ffULL +#define V_TCB_RSS_INFO(x) ((x) << S_TCB_RSS_INFO) + +/* 191:160 */ +#define W_TCB_TIMESTAMP 5 +#define S_TCB_TIMESTAMP 0 +#define M_TCB_TIMESTAMP 0xffffffffULL +#define V_TCB_TIMESTAMP(x) ((x) << S_TCB_TIMESTAMP) + +/* 223:192 */ +#define S_TCB_T_RTT_TS_RECENT_AGE 0 +#define M_TCB_T_RTT_TS_RECENT_AGE 0xffffffffULL +#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE) + +#endif /* _T4_TCB_DEFS_H */ diff --git a/drivers/net/cxgbe/base/t4fw_interface.h b/drivers/net/cxgbe/base/t4fw_interface.h index 6ca4f318..e80b58a3 100644 --- a/drivers/net/cxgbe/base/t4fw_interface.h +++ b/drivers/net/cxgbe/base/t4fw_interface.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #ifndef _T4FW_INTERFACE_H_ @@ -82,8 +54,13 @@ enum fw_memtype { ********************************/ enum fw_wr_opcodes { + FW_FILTER_WR = 0x02, + FW_ULPTX_WR = 0x04, + FW_TP_WR = 0x05, FW_ETH_TX_PKT_WR = 0x08, FW_ETH_TX_PKTS_WR = 0x09, + FW_ETH_TX_PKT_VM_WR = 0x11, + FW_ETH_TX_PKTS_VM_WR = 0x12, FW_ETH_TX_PKTS2_WR = 0x78, }; @@ -102,6 +79,11 @@ struct fw_wr_hdr { #define V_FW_WR_OP(x) ((x) << S_FW_WR_OP) #define G_FW_WR_OP(x) (((x) >> S_FW_WR_OP) & M_FW_WR_OP) +/* atomic flag (hi) - firmware encapsulates CPLs in CPL_BARRIER + */ +#define S_FW_WR_ATOMIC 23 +#define V_FW_WR_ATOMIC(x) ((x) << S_FW_WR_ATOMIC) + /* work request immediate data length (hi) */ #define S_FW_WR_IMMDLEN 0 @@ -118,6 +100,11 @@ struct fw_wr_hdr { #define G_FW_WR_EQUEQ(x) (((x) >> S_FW_WR_EQUEQ) & M_FW_WR_EQUEQ) #define F_FW_WR_EQUEQ V_FW_WR_EQUEQ(1U) +/* flow context identifier (lo) + */ +#define S_FW_WR_FLOWID 8 +#define V_FW_WR_FLOWID(x) ((x) << S_FW_WR_FLOWID) + /* length in units of 16-bytes (lo) */ #define S_FW_WR_LEN16 0 @@ -146,6 +133,173 @@ struct fw_eth_tx_pkts_wr { __u8 type; }; +struct fw_eth_tx_pkt_vm_wr { + __be32 op_immdlen; + __be32 equiq_to_len16; + __be32 r3[2]; + __u8 ethmacdst[6]; + __u8 ethmacsrc[6]; + __be16 ethtype; + __be16 vlantci; +}; + +struct fw_eth_tx_pkts_vm_wr { + __be32 op_pkd; + __be32 equiq_to_len16; + __be32 r3; + __be16 plen; + __u8 npkt; + __u8 r4; + __u8 ethmacdst[6]; + __u8 ethmacsrc[6]; + __be16 ethtype; + __be16 vlantci; +}; + +/* filter wr reply code in cookie in CPL_SET_TCB_RPL */ +enum fw_filter_wr_cookie { + FW_FILTER_WR_SUCCESS, + FW_FILTER_WR_FLT_ADDED, + FW_FILTER_WR_FLT_DELETED, + FW_FILTER_WR_SMT_TBL_FULL, + FW_FILTER_WR_EINVAL, +}; + +struct fw_filter_wr { + __be32 op_pkd; + __be32 len16_pkd; + __be64 r3; + __be32 tid_to_iq; + __be32 del_filter_to_l2tix; + __be16 ethtype; + __be16 ethtypem; + __u8 frag_to_ovlan_vldm; + __u8 smac_sel; + __be16 rx_chan_rx_rpl_iq; + __be32 maci_to_matchtypem; + __u8 ptcl; + __u8 ptclm; + __u8 ttyp; + __u8 ttypm; + __be16 ivlan; + __be16 ivlanm; + __be16 ovlan; + __be16 ovlanm; + __u8 lip[16]; + __u8 lipm[16]; + __u8 fip[16]; + __u8 fipm[16]; + __be16 lp; + __be16 lpm; + __be16 fp; + __be16 fpm; + __be16 r7; + __u8 sma[6]; +}; + +#define S_FW_FILTER_WR_TID 12 +#define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID) + +#define S_FW_FILTER_WR_RQTYPE 11 +#define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE) + +#define S_FW_FILTER_WR_NOREPLY 10 +#define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY) + +#define S_FW_FILTER_WR_IQ 0 +#define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ) + +#define S_FW_FILTER_WR_DEL_FILTER 31 +#define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER) +#define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U) + +#define S_FW_FILTER_WR_RPTTID 25 +#define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID) + +#define S_FW_FILTER_WR_DROP 24 +#define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP) + +#define S_FW_FILTER_WR_DIRSTEER 23 +#define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER) + +#define S_FW_FILTER_WR_MASKHASH 22 +#define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH) + +#define S_FW_FILTER_WR_DIRSTEERHASH 21 +#define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH) + +#define S_FW_FILTER_WR_LPBK 20 +#define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK) + +#define S_FW_FILTER_WR_DMAC 19 +#define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC) + +#define S_FW_FILTER_WR_INSVLAN 17 +#define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN) + +#define S_FW_FILTER_WR_RMVLAN 16 +#define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN) + +#define S_FW_FILTER_WR_HITCNTS 15 +#define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS) + +#define S_FW_FILTER_WR_TXCHAN 13 +#define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN) + +#define S_FW_FILTER_WR_PRIO 12 +#define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO) + +#define S_FW_FILTER_WR_L2TIX 0 +#define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX) + +#define S_FW_FILTER_WR_FRAG 7 +#define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG) + +#define S_FW_FILTER_WR_FRAGM 6 +#define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM) + +#define S_FW_FILTER_WR_IVLAN_VLD 5 +#define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD) + +#define S_FW_FILTER_WR_OVLAN_VLD 4 +#define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD) + +#define S_FW_FILTER_WR_IVLAN_VLDM 3 +#define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM) + +#define S_FW_FILTER_WR_OVLAN_VLDM 2 +#define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM) + +#define S_FW_FILTER_WR_RX_CHAN 15 +#define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN) + +#define S_FW_FILTER_WR_RX_RPL_IQ 0 +#define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ) + +#define S_FW_FILTER_WR_MACI 23 +#define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI) + +#define S_FW_FILTER_WR_MACIM 14 +#define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM) + +#define S_FW_FILTER_WR_FCOE 13 +#define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE) + +#define S_FW_FILTER_WR_FCOEM 12 +#define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM) + +#define S_FW_FILTER_WR_PORT 9 +#define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT) + +#define S_FW_FILTER_WR_PORTM 6 +#define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM) + +#define S_FW_FILTER_WR_MATCHTYPE 3 +#define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE) + +#define S_FW_FILTER_WR_MATCHTYPEM 0 +#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM) + /****************************************************************************** * C O M M A N D s *********************/ @@ -171,24 +325,34 @@ struct fw_eth_tx_pkts_wr { #define FW_CMD_HELLO_RETRIES 3 enum fw_cmd_opcodes { + FW_LDST_CMD = 0x01, FW_RESET_CMD = 0x03, FW_HELLO_CMD = 0x04, FW_BYE_CMD = 0x05, FW_INITIALIZE_CMD = 0x06, FW_CAPS_CONFIG_CMD = 0x07, FW_PARAMS_CMD = 0x08, + FW_PFVF_CMD = 0x09, FW_IQ_CMD = 0x10, FW_EQ_ETH_CMD = 0x12, + FW_EQ_CTRL_CMD = 0x13, FW_VI_CMD = 0x14, FW_VI_MAC_CMD = 0x15, FW_VI_RXMODE_CMD = 0x16, FW_VI_ENABLE_CMD = 0x17, + FW_VI_STATS_CMD = 0x1a, FW_PORT_CMD = 0x1b, FW_RSS_IND_TBL_CMD = 0x20, + FW_RSS_GLB_CONFIG_CMD = 0x22, FW_RSS_VI_CONFIG_CMD = 0x23, + FW_CLIP_CMD = 0x28, FW_DEBUG_CMD = 0x81, }; +enum fw_cmd_cap { + FW_CMD_CAP_PORT = 0x04, +}; + /* * Generic command header flit0 */ @@ -238,6 +402,94 @@ struct fw_cmd_hdr { #define FW_LEN16(fw_struct) V_FW_CMD_LEN16(sizeof(fw_struct) / 16) +/* address spaces + */ +enum fw_ldst_addrspc { + FW_LDST_ADDRSPC_TP_PIO = 0x0010, +}; + +struct fw_ldst_cmd { + __be32 op_to_addrspace; + __be32 cycles_to_len16; + union fw_ldst { + struct fw_ldst_addrval { + __be32 addr; + __be32 val; + } addrval; + struct fw_ldst_idctxt { + __be32 physid; + __be32 msg_ctxtflush; + __be32 ctxt_data7; + __be32 ctxt_data6; + __be32 ctxt_data5; + __be32 ctxt_data4; + __be32 ctxt_data3; + __be32 ctxt_data2; + __be32 ctxt_data1; + __be32 ctxt_data0; + } idctxt; + struct fw_ldst_mdio { + __be16 paddr_mmd; + __be16 raddr; + __be16 vctl; + __be16 rval; + } mdio; + struct fw_ldst_mps { + __be16 fid_ctl; + __be16 rplcpf_pkd; + __be32 rplc127_96; + __be32 rplc95_64; + __be32 rplc63_32; + __be32 rplc31_0; + __be32 atrb; + __be16 vlan[16]; + } mps; + struct fw_ldst_func { + __u8 access_ctl; + __u8 mod_index; + __be16 ctl_id; + __be32 offset; + __be64 data0; + __be64 data1; + } func; + struct fw_ldst_pcie { + __u8 ctrl_to_fn; + __u8 bnum; + __u8 r; + __u8 ext_r; + __u8 select_naccess; + __u8 pcie_fn; + __be16 nset_pkd; + __be32 data[12]; + } pcie; + struct fw_ldst_i2c_deprecated { + __u8 pid_pkd; + __u8 base; + __u8 boffset; + __u8 data; + __be32 r9; + } i2c_deprecated; + struct fw_ldst_i2c { + __u8 pid; + __u8 did; + __u8 boffset; + __u8 blen; + __be32 r9; + __u8 data[48]; + } i2c; + struct fw_ldst_le { + __be32 index; + __be32 r9; + __u8 val[33]; + __u8 r11[7]; + } le; + } u; +}; + +#define S_FW_LDST_CMD_ADDRSPACE 0 +#define M_FW_LDST_CMD_ADDRSPACE 0xff +#define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE) + struct fw_reset_cmd { __be32 op_to_write; __be32 retval_len16; @@ -386,6 +638,7 @@ struct fw_caps_config_cmd { enum fw_params_mnem { FW_PARAMS_MNEM_DEV = 1, /* device params */ FW_PARAMS_MNEM_PFVF = 2, /* function params */ + FW_PARAMS_MNEM_REG = 3, /* limited register access */ FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ }; @@ -395,6 +648,12 @@ enum fw_params_mnem { enum fw_params_param_dev { FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */ FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */ + FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs + * allocated by the device's + * Lookup Engine + */ + FW_PARAMS_PARAM_DEV_FWREV = 0x0B, /* fw version */ + FW_PARAMS_PARAM_DEV_TPREV = 0x0C, /* tp version */ FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17, }; @@ -402,7 +661,12 @@ enum fw_params_param_dev { * physical and virtual function parameters */ enum fw_params_param_pfvf { - FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31 + FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03, + FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04, + FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05, + FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06, + FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31, + FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A }; /* @@ -443,6 +707,10 @@ enum fw_params_param_dmaq { #define G_FW_PARAMS_PARAM_YZ(x) \ (((x) >> S_FW_PARAMS_PARAM_YZ) & M_FW_PARAMS_PARAM_YZ) +#define S_FW_PARAMS_PARAM_XYZ 0 +#define M_FW_PARAMS_PARAM_XYZ 0xffffff +#define V_FW_PARAMS_PARAM_XYZ(x) ((x) << S_FW_PARAMS_PARAM_XYZ) + struct fw_params_cmd { __be32 op_to_vfn; __be32 retval_len16; @@ -464,6 +732,74 @@ struct fw_params_cmd { #define G_FW_PARAMS_CMD_VFN(x) \ (((x) >> S_FW_PARAMS_CMD_VFN) & M_FW_PARAMS_CMD_VFN) +struct fw_pfvf_cmd { + __be32 op_to_vfn; + __be32 retval_len16; + __be32 niqflint_niq; + __be32 type_to_neq; + __be32 tc_to_nexactf; + __be32 r_caps_to_nethctrl; + __be16 nricq; + __be16 nriqp; + __be32 r4; +}; + +#define S_FW_PFVF_CMD_PFN 8 +#define V_FW_PFVF_CMD_PFN(x) ((x) << S_FW_PFVF_CMD_PFN) + +#define S_FW_PFVF_CMD_VFN 0 +#define V_FW_PFVF_CMD_VFN(x) ((x) << S_FW_PFVF_CMD_VFN) + +#define S_FW_PFVF_CMD_NIQFLINT 20 +#define M_FW_PFVF_CMD_NIQFLINT 0xfff +#define G_FW_PFVF_CMD_NIQFLINT(x) \ + (((x) >> S_FW_PFVF_CMD_NIQFLINT) & M_FW_PFVF_CMD_NIQFLINT) + +#define S_FW_PFVF_CMD_NIQ 0 +#define M_FW_PFVF_CMD_NIQ 0xfffff +#define G_FW_PFVF_CMD_NIQ(x) \ + (((x) >> S_FW_PFVF_CMD_NIQ) & M_FW_PFVF_CMD_NIQ) + +#define S_FW_PFVF_CMD_PMASK 20 +#define M_FW_PFVF_CMD_PMASK 0xf +#define G_FW_PFVF_CMD_PMASK(x) \ + (((x) >> S_FW_PFVF_CMD_PMASK) & M_FW_PFVF_CMD_PMASK) + +#define S_FW_PFVF_CMD_NEQ 0 +#define M_FW_PFVF_CMD_NEQ 0xfffff +#define G_FW_PFVF_CMD_NEQ(x) \ + (((x) >> S_FW_PFVF_CMD_NEQ) & M_FW_PFVF_CMD_NEQ) + +#define S_FW_PFVF_CMD_TC 24 +#define M_FW_PFVF_CMD_TC 0xff +#define G_FW_PFVF_CMD_TC(x) \ + (((x) >> S_FW_PFVF_CMD_TC) & M_FW_PFVF_CMD_TC) + +#define S_FW_PFVF_CMD_NVI 16 +#define M_FW_PFVF_CMD_NVI 0xff +#define G_FW_PFVF_CMD_NVI(x) \ + (((x) >> S_FW_PFVF_CMD_NVI) & M_FW_PFVF_CMD_NVI) + +#define S_FW_PFVF_CMD_NEXACTF 0 +#define M_FW_PFVF_CMD_NEXACTF 0xffff +#define G_FW_PFVF_CMD_NEXACTF(x) \ + (((x) >> S_FW_PFVF_CMD_NEXACTF) & M_FW_PFVF_CMD_NEXACTF) + +#define S_FW_PFVF_CMD_R_CAPS 24 +#define M_FW_PFVF_CMD_R_CAPS 0xff +#define G_FW_PFVF_CMD_R_CAPS(x) \ + (((x) >> S_FW_PFVF_CMD_R_CAPS) & M_FW_PFVF_CMD_R_CAPS) + +#define S_FW_PFVF_CMD_WX_CAPS 16 +#define M_FW_PFVF_CMD_WX_CAPS 0xff +#define G_FW_PFVF_CMD_WX_CAPS(x) \ + (((x) >> S_FW_PFVF_CMD_WX_CAPS) & M_FW_PFVF_CMD_WX_CAPS) + +#define S_FW_PFVF_CMD_NETHCTRL 0 +#define M_FW_PFVF_CMD_NETHCTRL 0xffff +#define G_FW_PFVF_CMD_NETHCTRL(x) \ + (((x) >> S_FW_PFVF_CMD_NETHCTRL) & M_FW_PFVF_CMD_NETHCTRL) + /* * ingress queue type; the first 1K ingress queues can have associated 0, * 1 or 2 free lists and an interrupt, all other ingress queues lack these @@ -473,6 +809,11 @@ enum fw_iq_type { FW_IQ_TYPE_FL_INT_CAP, }; +enum fw_iq_iqtype { + FW_IQ_IQTYPE_NIC = 1, + FW_IQ_IQTYPE_OFLD, +}; + struct fw_iq_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; @@ -606,6 +947,9 @@ struct fw_iq_cmd { (((x) >> S_FW_IQ_CMD_IQFLINTCONGEN) & M_FW_IQ_CMD_IQFLINTCONGEN) #define F_FW_IQ_CMD_IQFLINTCONGEN V_FW_IQ_CMD_IQFLINTCONGEN(1U) +#define S_FW_IQ_CMD_IQTYPE 24 +#define V_FW_IQ_CMD_IQTYPE(x) ((x) << S_FW_IQ_CMD_IQTYPE) + #define S_FW_IQ_CMD_FL0CNGCHMAP 20 #define M_FW_IQ_CMD_FL0CNGCHMAP 0xf #define V_FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << S_FW_IQ_CMD_FL0CNGCHMAP) @@ -724,6 +1068,11 @@ struct fw_eq_eth_cmd { #define G_FW_EQ_ETH_CMD_EQID(x) \ (((x) >> S_FW_EQ_ETH_CMD_EQID) & M_FW_EQ_ETH_CMD_EQID) +#define S_FW_EQ_ETH_CMD_PHYSEQID 0 +#define M_FW_EQ_ETH_CMD_PHYSEQID 0xfffff +#define G_FW_EQ_ETH_CMD_PHYSEQID(x) \ + (((x) >> S_FW_EQ_ETH_CMD_PHYSEQID) & M_FW_EQ_ETH_CMD_PHYSEQID) + #define S_FW_EQ_ETH_CMD_FETCHRO 22 #define M_FW_EQ_ETH_CMD_FETCHRO 0x1 #define V_FW_EQ_ETH_CMD_FETCHRO(x) ((x) << S_FW_EQ_ETH_CMD_FETCHRO) @@ -786,6 +1135,75 @@ struct fw_eq_eth_cmd { #define G_FW_EQ_ETH_CMD_VIID(x) \ (((x) >> S_FW_EQ_ETH_CMD_VIID) & M_FW_EQ_ETH_CMD_VIID) +struct fw_eq_ctrl_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 cmpliqid_eqid; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; +}; + +#define S_FW_EQ_CTRL_CMD_PFN 8 +#define V_FW_EQ_CTRL_CMD_PFN(x) ((x) << S_FW_EQ_CTRL_CMD_PFN) + +#define S_FW_EQ_CTRL_CMD_VFN 0 +#define V_FW_EQ_CTRL_CMD_VFN(x) ((x) << S_FW_EQ_CTRL_CMD_VFN) + +#define S_FW_EQ_CTRL_CMD_ALLOC 31 +#define V_FW_EQ_CTRL_CMD_ALLOC(x) ((x) << S_FW_EQ_CTRL_CMD_ALLOC) +#define F_FW_EQ_CTRL_CMD_ALLOC V_FW_EQ_CTRL_CMD_ALLOC(1U) + +#define S_FW_EQ_CTRL_CMD_FREE 30 +#define V_FW_EQ_CTRL_CMD_FREE(x) ((x) << S_FW_EQ_CTRL_CMD_FREE) +#define F_FW_EQ_CTRL_CMD_FREE V_FW_EQ_CTRL_CMD_FREE(1U) + +#define S_FW_EQ_CTRL_CMD_EQSTART 28 +#define V_FW_EQ_CTRL_CMD_EQSTART(x) ((x) << S_FW_EQ_CTRL_CMD_EQSTART) +#define F_FW_EQ_CTRL_CMD_EQSTART V_FW_EQ_CTRL_CMD_EQSTART(1U) + +#define S_FW_EQ_CTRL_CMD_CMPLIQID 20 +#define V_FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << S_FW_EQ_CTRL_CMD_CMPLIQID) + +#define S_FW_EQ_CTRL_CMD_EQID 0 +#define M_FW_EQ_CTRL_CMD_EQID 0xfffff +#define V_FW_EQ_CTRL_CMD_EQID(x) ((x) << S_FW_EQ_CTRL_CMD_EQID) +#define G_FW_EQ_CTRL_CMD_EQID(x) \ + (((x) >> S_FW_EQ_CTRL_CMD_EQID) & M_FW_EQ_CTRL_CMD_EQID) + +#define S_FW_EQ_CTRL_CMD_PHYSEQID 0 +#define M_FW_EQ_CTRL_CMD_PHYSEQID 0xfffff +#define V_FW_EQ_CTRL_CMD_PHYSEQID(x) ((x) << S_FW_EQ_CTRL_CMD_PHYSEQID) +#define G_FW_EQ_CTRL_CMD_PHYSEQID(x) \ + (((x) >> S_FW_EQ_CTRL_CMD_PHYSEQID) & M_FW_EQ_CTRL_CMD_PHYSEQID) + +#define S_FW_EQ_CTRL_CMD_FETCHRO 22 +#define V_FW_EQ_CTRL_CMD_FETCHRO(x) ((x) << S_FW_EQ_CTRL_CMD_FETCHRO) +#define F_FW_EQ_CTRL_CMD_FETCHRO V_FW_EQ_CTRL_CMD_FETCHRO(1U) + +#define S_FW_EQ_CTRL_CMD_HOSTFCMODE 20 +#define M_FW_EQ_CTRL_CMD_HOSTFCMODE 0x3 +#define V_FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_CTRL_CMD_HOSTFCMODE) + +#define S_FW_EQ_CTRL_CMD_PCIECHN 16 +#define V_FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << S_FW_EQ_CTRL_CMD_PCIECHN) + +#define S_FW_EQ_CTRL_CMD_IQID 0 +#define V_FW_EQ_CTRL_CMD_IQID(x) ((x) << S_FW_EQ_CTRL_CMD_IQID) + +#define S_FW_EQ_CTRL_CMD_FBMIN 23 +#define V_FW_EQ_CTRL_CMD_FBMIN(x) ((x) << S_FW_EQ_CTRL_CMD_FBMIN) + +#define S_FW_EQ_CTRL_CMD_FBMAX 20 +#define V_FW_EQ_CTRL_CMD_FBMAX(x) ((x) << S_FW_EQ_CTRL_CMD_FBMAX) + +#define S_FW_EQ_CTRL_CMD_CIDXFTHRESH 16 +#define V_FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_CTRL_CMD_CIDXFTHRESH) + +#define S_FW_EQ_CTRL_CMD_EQSIZE 0 +#define V_FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << S_FW_EQ_CTRL_CMD_EQSIZE) + enum fw_vi_func { FW_VI_FUNC_ETH, }; @@ -988,6 +1406,9 @@ struct fw_vi_enable_cmd { (((x) >> S_FW_VI_ENABLE_CMD_DCB_INFO) & M_FW_VI_ENABLE_CMD_DCB_INFO) #define F_FW_VI_ENABLE_CMD_DCB_INFO V_FW_VI_ENABLE_CMD_DCB_INFO(1U) +/* VI VF stats offset definitions */ +#define VI_VF_NUM_STATS 16 + /* VI PF stats offset definitions */ #define VI_PF_NUM_STATS 17 enum fw_vi_stats_pf_index { @@ -1065,7 +1486,16 @@ struct fw_vi_stats_cmd { } u; }; -/* port capabilities bitmap */ +#define S_FW_VI_STATS_CMD_VIID 0 +#define V_FW_VI_STATS_CMD_VIID(x) ((x) << S_FW_VI_STATS_CMD_VIID) + +#define S_FW_VI_STATS_CMD_NSTATS 12 +#define V_FW_VI_STATS_CMD_NSTATS(x) ((x) << S_FW_VI_STATS_CMD_NSTATS) + +#define S_FW_VI_STATS_CMD_IX 0 +#define V_FW_VI_STATS_CMD_IX(x) ((x) << S_FW_VI_STATS_CMD_IX) + +/* old 16-bit port capabilities bitmap */ enum fw_port_cap { FW_PORT_CAP_SPEED_100M = 0x0001, FW_PORT_CAP_SPEED_1G = 0x0002, @@ -1100,9 +1530,45 @@ enum fw_port_mdi { #define V_FW_PORT_CAP_MDI(x) ((x) << S_FW_PORT_CAP_MDI) #define G_FW_PORT_CAP_MDI(x) (((x) >> S_FW_PORT_CAP_MDI) & M_FW_PORT_CAP_MDI) +/* new 32-bit port capabilities bitmap (fw_port_cap32_t) */ +#define FW_PORT_CAP32_SPEED_100M 0x00000001UL +#define FW_PORT_CAP32_SPEED_1G 0x00000002UL +#define FW_PORT_CAP32_SPEED_10G 0x00000004UL +#define FW_PORT_CAP32_SPEED_25G 0x00000008UL +#define FW_PORT_CAP32_SPEED_40G 0x00000010UL +#define FW_PORT_CAP32_SPEED_50G 0x00000020UL +#define FW_PORT_CAP32_SPEED_100G 0x00000040UL +#define FW_PORT_CAP32_FC_RX 0x00010000UL +#define FW_PORT_CAP32_FC_TX 0x00020000UL +#define FW_PORT_CAP32_802_3_PAUSE 0x00040000UL +#define FW_PORT_CAP32_802_3_ASM_DIR 0x00080000UL +#define FW_PORT_CAP32_ANEG 0x00100000UL +#define FW_PORT_CAP32_MDIX 0x00200000UL +#define FW_PORT_CAP32_MDIAUTO 0x00400000UL +#define FW_PORT_CAP32_FEC_RS 0x00800000UL +#define FW_PORT_CAP32_FEC_BASER_RS 0x01000000UL + +#define S_FW_PORT_CAP32_SPEED 0 +#define M_FW_PORT_CAP32_SPEED 0xfff +#define V_FW_PORT_CAP32_SPEED(x) ((x) << S_FW_PORT_CAP32_SPEED) +#define G_FW_PORT_CAP32_SPEED(x) \ + (((x) >> S_FW_PORT_CAP32_SPEED) & M_FW_PORT_CAP32_SPEED) + +enum fw_port_mdi32 { + FW_PORT_CAP32_MDI_AUTO, +}; + +#define S_FW_PORT_CAP32_MDI 21 +#define M_FW_PORT_CAP32_MDI 3 +#define V_FW_PORT_CAP32_MDI(x) ((x) << S_FW_PORT_CAP32_MDI) +#define G_FW_PORT_CAP32_MDI(x) \ + (((x) >> S_FW_PORT_CAP32_MDI) & M_FW_PORT_CAP32_MDI) + enum fw_port_action { FW_PORT_ACTION_L1_CFG = 0x0001, FW_PORT_ACTION_GET_PORT_INFO = 0x0003, + FW_PORT_ACTION_L1_CFG32 = 0x0009, + FW_PORT_ACTION_GET_PORT_INFO32 = 0x000a, }; struct fw_port_cmd { @@ -1191,6 +1657,18 @@ struct fw_port_cmd { __be64 r12; } control; } dcb; + struct fw_port_l1cfg32 { + __be32 rcap32; + __be32 r; + } l1cfg32; + struct fw_port_info32 { + __be32 lstatus32_to_cbllen32; + __be32 auxlinfo32_mtu32; + __be32 linkattr32; + __be32 pcaps32; + __be32 acaps32; + __be32 lpacaps32; + } info32; } u; }; @@ -1264,6 +1742,36 @@ struct fw_port_cmd { #define G_FW_PORT_CMD_MODTYPE(x) \ (((x) >> S_FW_PORT_CMD_MODTYPE) & M_FW_PORT_CMD_MODTYPE) +#define S_FW_PORT_CMD_LSTATUS32 31 +#define M_FW_PORT_CMD_LSTATUS32 0x1 +#define V_FW_PORT_CMD_LSTATUS32(x) ((x) << S_FW_PORT_CMD_LSTATUS32) +#define F_FW_PORT_CMD_LSTATUS32 V_FW_PORT_CMD_LSTATUS32(1U) + +#define S_FW_PORT_CMD_LINKDNRC32 28 +#define M_FW_PORT_CMD_LINKDNRC32 0x7 +#define G_FW_PORT_CMD_LINKDNRC32(x) \ + (((x) >> S_FW_PORT_CMD_LINKDNRC32) & M_FW_PORT_CMD_LINKDNRC32) + +#define S_FW_PORT_CMD_MDIOCAP32 26 +#define M_FW_PORT_CMD_MDIOCAP32 0x1 +#define V_FW_PORT_CMD_MDIOCAP32(x) ((x) << S_FW_PORT_CMD_MDIOCAP32) +#define F_FW_PORT_CMD_MDIOCAP32 V_FW_PORT_CMD_MDIOCAP32(1U) + +#define S_FW_PORT_CMD_MDIOADDR32 21 +#define M_FW_PORT_CMD_MDIOADDR32 0x1f +#define G_FW_PORT_CMD_MDIOADDR32(x) \ + (((x) >> S_FW_PORT_CMD_MDIOADDR32) & M_FW_PORT_CMD_MDIOADDR32) + +#define S_FW_PORT_CMD_PORTTYPE32 13 +#define M_FW_PORT_CMD_PORTTYPE32 0xff +#define G_FW_PORT_CMD_PORTTYPE32(x) \ + (((x) >> S_FW_PORT_CMD_PORTTYPE32) & M_FW_PORT_CMD_PORTTYPE32) + +#define S_FW_PORT_CMD_MODTYPE32 8 +#define M_FW_PORT_CMD_MODTYPE32 0x1f +#define G_FW_PORT_CMD_MODTYPE32(x) \ + (((x) >> S_FW_PORT_CMD_MODTYPE32) & M_FW_PORT_CMD_MODTYPE32) + /* * These are configured into the VPD and hence tools that generate * VPD may use this enumeration. @@ -1532,6 +2040,83 @@ struct fw_rss_ind_tbl_cmd { #define G_FW_RSS_IND_TBL_CMD_IQ2(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ2) & M_FW_RSS_IND_TBL_CMD_IQ2) +struct fw_rss_glb_config_cmd { + __be32 op_to_write; + __be32 retval_len16; + union fw_rss_glb_config { + struct fw_rss_glb_config_manual { + __be32 mode_pkd; + __be32 r3; + __be64 r4; + __be64 r5; + } manual; + struct fw_rss_glb_config_basicvirtual { + __be32 mode_keymode; + __be32 synmapen_to_hashtoeplitz; + __be64 r8; + __be64 r9; + } basicvirtual; + } u; +}; + +#define S_FW_RSS_GLB_CONFIG_CMD_MODE 28 +#define M_FW_RSS_GLB_CONFIG_CMD_MODE 0xf +#define G_FW_RSS_GLB_CONFIG_CMD_MODE(x) \ + (((x) >> S_FW_RSS_GLB_CONFIG_CMD_MODE) & M_FW_RSS_GLB_CONFIG_CMD_MODE) + +#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1 + +#define S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN 8 +#define V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) +#define F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 7 +#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) +#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 \ + V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 6 +#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) +#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 \ + V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 5 +#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) +#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 \ + V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 4 +#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) +#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 \ + V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN 3 +#define V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) +#define F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN 2 +#define V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) +#define F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP 1 +#define V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) +#define F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP \ + V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ 0 +#define V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) +#define F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ \ + V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(1U) + struct fw_rss_vi_config_cmd { __be32 op_to_viid; __be32 retval_len16; @@ -1611,6 +2196,22 @@ struct fw_rss_vi_config_cmd { (((x) >> S_FW_RSS_VI_CONFIG_CMD_UDPEN) & M_FW_RSS_VI_CONFIG_CMD_UDPEN) #define F_FW_RSS_VI_CONFIG_CMD_UDPEN V_FW_RSS_VI_CONFIG_CMD_UDPEN(1U) +struct fw_clip_cmd { + __be32 op_to_write; + __be32 alloc_to_len16; + __be64 ip_hi; + __be64 ip_lo; + __be32 r4[2]; +}; + +#define S_FW_CLIP_CMD_ALLOC 31 +#define V_FW_CLIP_CMD_ALLOC(x) ((x) << S_FW_CLIP_CMD_ALLOC) +#define F_FW_CLIP_CMD_ALLOC V_FW_CLIP_CMD_ALLOC(1U) + +#define S_FW_CLIP_CMD_FREE 30 +#define V_FW_CLIP_CMD_FREE(x) ((x) << S_FW_CLIP_CMD_FREE) +#define F_FW_CLIP_CMD_FREE V_FW_CLIP_CMD_FREE(1U) + /****************************************************************************** * D E B U G C O M M A N D s ******************************************************/ diff --git a/drivers/net/cxgbe/base/t4vf_hw.c b/drivers/net/cxgbe/base/t4vf_hw.c new file mode 100644 index 00000000..d96456bb --- /dev/null +++ b/drivers/net/cxgbe/base/t4vf_hw.c @@ -0,0 +1,880 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#include +#include + +#include "common.h" +#include "t4_regs.h" + +/** + * t4vf_wait_dev_ready - wait till to reads of registers work + * + * Wait for the device to become ready (signified by our "who am I" register + * returning a value other than all 1's). Return an error if it doesn't + * become ready ... + */ +static int t4vf_wait_dev_ready(struct adapter *adapter) +{ + const u32 whoami = T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI; + const u32 notready1 = 0xffffffff; + const u32 notready2 = 0xeeeeeeee; + u32 val; + + val = t4_read_reg(adapter, whoami); + if (val != notready1 && val != notready2) + return 0; + + msleep(500); + val = t4_read_reg(adapter, whoami); + if (val != notready1 && val != notready2) + return 0; + + dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n", + val); + return -EIO; +} + +/* + * Get the reply to a mailbox command and store it in @rpl in big-endian order. + */ +static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, + u32 mbox_addr) +{ + for ( ; nflit; nflit--, mbox_addr += 8) + *rpl++ = htobe64(t4_read_reg64(adap, mbox_addr)); +} + +/** + * t4vf_wr_mbox_core - send a command to FW through the mailbox + * @adapter: the adapter + * @cmd: the command to write + * @size: command length in bytes + * @rpl: where to optionally store the reply + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sends the given command to FW through the mailbox and waits for the + * FW to execute the command. If @rpl is not %NULL it is used to store + * the FW's reply to the command. The command and its optional reply + * are of the same length. FW can take up to 500 ms to respond. + * @sleep_ok determines whether we may sleep while awaiting the response. + * If sleeping is allowed we use progressive backoff otherwise we spin. + * + * The return value is 0 on success or a negative errno on failure. A + * failure can happen either because we are not able to execute the + * command or FW executes it but signals an error. In the latter case + * the return value is the error code indicated by FW (negated). + */ +int t4vf_wr_mbox_core(struct adapter *adapter, + const void __attribute__((__may_alias__)) *cmd, + int size, void *rpl, bool sleep_ok) +{ + /* + * We delay in small increments at first in an effort to maintain + * responsiveness for simple, fast executing commands but then back + * off to larger delays to a maximum retry delay. + */ + static const int delay[] = { + 1, 1, 3, 5, 10, 10, 20, 50, 100 + }; + + + u32 mbox_ctl = T4VF_CIM_BASE_ADDR + A_CIM_VF_EXT_MAILBOX_CTRL; + __be64 cmd_rpl[MBOX_LEN / 8]; + struct mbox_entry entry; + unsigned int delay_idx; + u32 v, mbox_data; + const __be64 *p; + int i, ret; + int ms; + + /* In T6, mailbox size is changed to 128 bytes to avoid + * invalidating the entire prefetch buffer. + */ + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + mbox_data = T4VF_MBDATA_BASE_ADDR; + else + mbox_data = T6VF_MBDATA_BASE_ADDR; + + /* + * Commands must be multiples of 16 bytes in length and may not be + * larger than the size of the Mailbox Data register array. + */ + if ((size % 16) != 0 || + size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) + return -EINVAL; + + /* + * Queue ourselves onto the mailbox access list. When our entry is at + * the front of the list, we have rights to access the mailbox. So we + * wait [for a while] till we're at the front [or bail out with an + * EBUSY] ... + */ + t4_os_atomic_add_tail(&entry, &adapter->mbox_list, &adapter->mbox_lock); + + delay_idx = 0; + ms = delay[0]; + + for (i = 0; ; i += ms) { + /* + * If we've waited too long, return a busy indication. This + * really ought to be based on our initial position in the + * mailbox access list but this is a start. We very rarely + * contend on access to the mailbox ... + */ + if (i > (2 * FW_CMD_MAX_TIMEOUT)) { + t4_os_atomic_list_del(&entry, &adapter->mbox_list, + &adapter->mbox_lock); + ret = -EBUSY; + return ret; + } + + /* + * If we're at the head, break out and start the mailbox + * protocol. + */ + if (t4_os_list_first_entry(&adapter->mbox_list) == &entry) + break; + + /* + * Delay for a bit before checking again ... + */ + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else { + rte_delay_ms(ms); + } + } + + /* + * Loop trying to get ownership of the mailbox. Return an error + * if we can't gain ownership. + */ + v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl)); + for (i = 0; v == X_MBOWNER_NONE && i < 3; i++) + v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl)); + + if (v != X_MBOWNER_PL) { + t4_os_atomic_list_del(&entry, &adapter->mbox_list, + &adapter->mbox_lock); + ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT; + return ret; + } + + /* + * Write the command array into the Mailbox Data register array and + * transfer ownership of the mailbox to the firmware. + */ + for (i = 0, p = cmd; i < size; i += 8) + t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); + + t4_read_reg(adapter, mbox_data); /* flush write */ + t4_write_reg(adapter, mbox_ctl, + F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); + t4_read_reg(adapter, mbox_ctl); /* flush write */ + delay_idx = 0; + ms = delay[0]; + + /* + * Spin waiting for firmware to acknowledge processing our command. + */ + for (i = 0; i < FW_CMD_MAX_TIMEOUT; i++) { + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else { + rte_delay_ms(ms); + } + + /* + * If we're the owner, see if this is the reply we wanted. + */ + v = t4_read_reg(adapter, mbox_ctl); + if (G_MBOWNER(v) == X_MBOWNER_PL) { + /* + * If the Message Valid bit isn't on, revoke ownership + * of the mailbox and continue waiting for our reply. + */ + if ((v & F_MBMSGVALID) == 0) { + t4_write_reg(adapter, mbox_ctl, + V_MBOWNER(X_MBOWNER_NONE)); + continue; + } + + /* + * We now have our reply. Extract the command return + * value, copy the reply back to our caller's buffer + * (if specified) and revoke ownership of the mailbox. + * We return the (negated) firmware command return + * code (this depends on FW_SUCCESS == 0). (Again we + * avoid clogging the log with FW_VI_STATS_CMD + * reply results.) + */ + + /* + * Retrieve the command reply and release the mailbox. + */ + get_mbox_rpl(adapter, cmd_rpl, size / 8, mbox_data); + t4_write_reg(adapter, mbox_ctl, + V_MBOWNER(X_MBOWNER_NONE)); + t4_os_atomic_list_del(&entry, &adapter->mbox_list, + &adapter->mbox_lock); + + /* return value in high-order host-endian word */ + v = be64_to_cpu(cmd_rpl[0]); + + if (rpl) { + /* request bit in high-order BE word */ + WARN_ON((be32_to_cpu(*(const u32 *)cmd) + & F_FW_CMD_REQUEST) == 0); + memcpy(rpl, cmd_rpl, size); + } + return -((int)G_FW_CMD_RETVAL(v)); + } + } + + /* + * We timed out. Return the error ... + */ + dev_err(adapter, "command %#x timed out\n", + *(const u8 *)cmd); + dev_err(adapter, " Control = %#x\n", t4_read_reg(adapter, mbox_ctl)); + t4_os_atomic_list_del(&entry, &adapter->mbox_list, &adapter->mbox_lock); + ret = -ETIMEDOUT; + return ret; +} + +/** + * t4vf_fw_reset - issue a reset to FW + * @adapter: the adapter + * + * Issues a reset command to FW. For a Physical Function this would + * result in the Firmware resetting all of its state. For a Virtual + * Function this just resets the state associated with the VF. + */ +int t4vf_fw_reset(struct adapter *adapter) +{ + struct fw_reset_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RESET_CMD) | + F_FW_CMD_WRITE); + cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(FW_LEN16(cmd))); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_prep_adapter - prepare SW and HW for operation + * @adapter: the adapter + * + * Initialize adapter SW state for the various HW modules, set initial + * values for some adapter tunables, take PHYs out of reset, and + * initialize the MDIO interface. + */ +int t4vf_prep_adapter(struct adapter *adapter) +{ + u32 pl_vf_rev; + int ret, ver; + + ret = t4vf_wait_dev_ready(adapter); + if (ret < 0) + return ret; + + /* + * Default port and clock for debugging in case we can't reach + * firmware. + */ + adapter->params.nports = 1; + adapter->params.vfres.pmask = 1; + adapter->params.vpd.cclk = 50000; + + pl_vf_rev = G_REV(t4_read_reg(adapter, A_PL_VF_REV)); + adapter->params.pci.device_id = adapter->pdev->id.device_id; + adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id; + + /* + * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS + * ADAPTER (VERSION << 4 | REVISION) + */ + ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id); + adapter->params.chip = 0; + switch (ver) { + case CHELSIO_T5: + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, + pl_vf_rev); + adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE; + adapter->params.arch.mps_tcam_size = + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + break; + case CHELSIO_T6: + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, + pl_vf_rev); + adapter->params.arch.sge_fl_db = 0; + adapter->params.arch.mps_tcam_size = + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + break; + default: + dev_err(adapter, "%s: Device %d is not supported\n", + __func__, adapter->params.pci.device_id); + return -EINVAL; + } + return 0; +} + +/** + * t4vf_query_params - query FW or device parameters + * @adapter: the adapter + * @nparams: the number of parameters + * @params: the parameter names + * @vals: the parameter values + * + * Reads the values of firmware or device parameters. Up to 7 parameters + * can be queried at once. + */ +int t4vf_query_params(struct adapter *adapter, unsigned int nparams, + const u32 *params, u32 *vals) +{ + struct fw_params_cmd cmd, rpl; + struct fw_params_param *p; + unsigned int i; + size_t len16; + int ret; + + if (nparams > 7) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ); + len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, + param[nparams]), 16); + cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16)); + for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) + p->mnem = cpu_to_be32(*params++); + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (ret == 0) + for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++) + *vals++ = be32_to_cpu(p->val); + return ret; +} + +/** + * t4vf_get_vpd_params - retrieve device VPD paremeters + * @adapter: the adapter + * + * Retrives various device Vital Product Data parameters. The parameters + * are stored in @adapter->params.vpd. + */ +int t4vf_get_vpd_params(struct adapter *adapter) +{ + struct vpd_params *vpd_params = &adapter->params.vpd; + u32 params[7], vals[7]; + int v; + + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); + v = t4vf_query_params(adapter, 1, params, vals); + if (v != FW_SUCCESS) + return v; + vpd_params->cclk = vals[0]; + dev_debug(adapter, "%s: vpd_params->cclk = %u\n", + __func__, vpd_params->cclk); + return 0; +} + +/** + * t4vf_get_dev_params - retrieve device paremeters + * @adapter: the adapter + * + * Retrives fw and tp version. + */ +int t4vf_get_dev_params(struct adapter *adapter) +{ + u32 params[7], vals[7]; + int v; + + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV)); + params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV)); + v = t4vf_query_params(adapter, 2, params, vals); + if (v != FW_SUCCESS) + return v; + adapter->params.fw_vers = vals[0]; + adapter->params.tp_vers = vals[1]; + + dev_info(adapter, "Firmware version: %u.%u.%u.%u\n", + G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers), + G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers), + G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers), + G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers)); + + dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n", + G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers), + G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers), + G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers), + G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers)); + return 0; +} + +/** + * t4vf_set_params - sets FW or device parameters + * @adapter: the adapter + * @nparams: the number of parameters + * @params: the parameter names + * @vals: the parameter values + * + * Sets the values of firmware or device parameters. Up to 7 parameters + * can be specified at once. + */ +int t4vf_set_params(struct adapter *adapter, unsigned int nparams, + const u32 *params, const u32 *vals) +{ + struct fw_params_param *p; + struct fw_params_cmd cmd; + unsigned int i; + size_t len16; + + if (nparams > 7) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_WRITE); + len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, + param[nparams]), 16); + cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16)); + for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) { + p->mnem = cpu_to_be32(*params++); + p->val = cpu_to_be32(*vals++); + } + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_fl_pkt_align - return the fl packet alignment + * @adapter: the adapter + * + * T4 has a single field to specify the packing and padding boundary. + * T5 onwards has separate fields for this and hence the alignment for + * next packet offset is maximum of these two. + */ +int t4vf_fl_pkt_align(struct adapter *adapter, u32 sge_control, + u32 sge_control2) +{ + unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift; + + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately. The actual Ingress Packet Data alignment boundary + * within Packed Buffer Mode is the maximum of these two + * specifications. + */ + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + ingpad_shift = X_INGPADBOUNDARY_SHIFT; + else + ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT; + + ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift); + + fl_align = ingpadboundary; + if (!is_t4(adapter->params.chip)) { + ingpackboundary = G_INGPACKBOUNDARY(sge_control2); + if (ingpackboundary == X_INGPACKBOUNDARY_16B) + ingpackboundary = 16; + else + ingpackboundary = 1 << (ingpackboundary + + X_INGPACKBOUNDARY_SHIFT); + + fl_align = max(ingpadboundary, ingpackboundary); + } + return fl_align; +} + +unsigned int t4vf_get_pf_from_vf(struct adapter *adapter) +{ + u32 whoami; + + whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI); + return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? + G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami)); +} + +/** + * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration + * @adapter: the adapter + * + * Retrieves global RSS mode and parameters with which we have to live + * and stores them in the @adapter's RSS parameters. + */ +int t4vf_get_rss_glb_config(struct adapter *adapter) +{ + struct rss_params *rss = &adapter->params.rss; + struct fw_rss_glb_config_cmd cmd, rpl; + int v; + + /* + * Execute an RSS Global Configuration read command to retrieve + * our RSS configuration. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v != FW_SUCCESS) + return v; + + /* + * Translate the big-endian RSS Global Configuration into our + * cpu-endian format based on the RSS mode. We also do first level + * filtering at this point to weed out modes which don't support + * VF Drivers ... + */ + rss->mode = G_FW_RSS_GLB_CONFIG_CMD_MODE + (be32_to_cpu(rpl.u.manual.mode_pkd)); + switch (rss->mode) { + case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { + u32 word = be32_to_cpu + (rpl.u.basicvirtual.synmapen_to_hashtoeplitz); + + rss->u.basicvirtual.synmapen = + ((word & F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0); + rss->u.basicvirtual.syn4tupenipv6 = + ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0); + rss->u.basicvirtual.syn2tupenipv6 = + ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0); + rss->u.basicvirtual.syn4tupenipv4 = + ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0); + rss->u.basicvirtual.syn2tupenipv4 = + ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0); + rss->u.basicvirtual.ofdmapen = + ((word & F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0); + rss->u.basicvirtual.tnlmapen = + ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0); + rss->u.basicvirtual.tnlalllookup = + ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0); + rss->u.basicvirtual.hashtoeplitz = + ((word & F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0); + + /* we need at least Tunnel Map Enable to be set */ + if (!rss->u.basicvirtual.tnlmapen) + return -EINVAL; + break; + } + + default: + /* all unknown/unsupported RSS modes result in an error */ + return -EINVAL; + } + return 0; +} + +/** + * t4vf_get_vfres - retrieve VF resource limits + * @adapter: the adapter + * + * Retrieves configured resource limits and capabilities for a virtual + * function. The results are stored in @adapter->vfres. + */ +int t4vf_get_vfres(struct adapter *adapter) +{ + struct vf_resources *vfres = &adapter->params.vfres; + struct fw_pfvf_cmd cmd, rpl; + u32 word; + int v; + + /* + * Execute PFVF Read command to get VF resource limits; bail out early + * with error on command failure. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v != FW_SUCCESS) + return v; + + /* + * Extract VF resource limits and return success. + */ + word = be32_to_cpu(rpl.niqflint_niq); + vfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word); + vfres->niq = G_FW_PFVF_CMD_NIQ(word); + + word = be32_to_cpu(rpl.type_to_neq); + vfres->neq = G_FW_PFVF_CMD_NEQ(word); + vfres->pmask = G_FW_PFVF_CMD_PMASK(word); + + word = be32_to_cpu(rpl.tc_to_nexactf); + vfres->tc = G_FW_PFVF_CMD_TC(word); + vfres->nvi = G_FW_PFVF_CMD_NVI(word); + vfres->nexactf = G_FW_PFVF_CMD_NEXACTF(word); + + word = be32_to_cpu(rpl.r_caps_to_nethctrl); + vfres->r_caps = G_FW_PFVF_CMD_R_CAPS(word); + vfres->wx_caps = G_FW_PFVF_CMD_WX_CAPS(word); + vfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word); + return 0; +} + +/** + * t4vf_get_port_stats_fw - collect "port" statistics via Firmware + * @adapter: the adapter + * @pidx: the port index + * @s: the stats structure to fill + * + * Collect statistics for the "port"'s Virtual Interface via Firmware + * commands. + */ +static int t4vf_get_port_stats_fw(struct adapter *adapter, int pidx, + struct port_stats *p) +{ + struct port_info *pi = adap2pinfo(adapter, pidx); + unsigned int rem = VI_VF_NUM_STATS; + struct fw_vi_stats_vf fwstats; + __be64 *fwsp = (__be64 *)&fwstats; + + /* + * Grab the Virtual Interface statistics a chunk at a time via mailbox + * commands. We could use a Work Request and get all of them at once + * but that's an asynchronous interface which is awkward to use. + */ + while (rem) { + unsigned int ix = VI_VF_NUM_STATS - rem; + unsigned int nstats = min(6U, rem); + struct fw_vi_stats_cmd cmd, rpl; + size_t len = (offsetof(struct fw_vi_stats_cmd, u) + + sizeof(struct fw_vi_stats_ctl)); + size_t len16 = DIV_ROUND_UP(len, 16); + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_STATS_CMD) | + V_FW_VI_STATS_CMD_VIID(pi->viid) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ); + cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16)); + cmd.u.ctl.nstats_ix = + cpu_to_be16(V_FW_VI_STATS_CMD_IX(ix) | + V_FW_VI_STATS_CMD_NSTATS(nstats)); + ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl); + if (ret != FW_SUCCESS) + return ret; + + memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats); + + rem -= nstats; + fwsp += nstats; + } + + /* + * Translate firmware statistics into host native statistics. + */ + p->tx_octets = be64_to_cpu(fwstats.tx_bcast_bytes) + + be64_to_cpu(fwstats.tx_mcast_bytes) + + be64_to_cpu(fwstats.tx_ucast_bytes); + p->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames); + p->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames); + p->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames); + p->tx_drop = be64_to_cpu(fwstats.tx_drop_frames); + + p->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames); + p->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames); + p->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames); + p->rx_len_err = be64_to_cpu(fwstats.rx_err_frames); + + return 0; +} + +/** + * t4vf_get_port_stats - collect "port" statistics + * @adapter: the adapter + * @pidx: the port index + * @s: the stats structure to fill + * + * Collect statistics for the "port"'s Virtual Interface. + */ +void t4vf_get_port_stats(struct adapter *adapter, int pidx, + struct port_stats *p) +{ + /* + * If this is not the first Virtual Interface for our Virtual + * Function, we need to use Firmware commands to retrieve its + * MPS statistics. + */ + if (pidx != 0) + t4vf_get_port_stats_fw(adapter, pidx, p); + + /* + * But for the first VI, we can grab its statistics via the MPS + * register mapped into the VF register space. + */ +#define GET_STAT(name) \ + t4_read_reg64(adapter, \ + T4VF_MPS_BASE_ADDR + A_MPS_VF_STAT_##name##_L) + p->tx_octets = GET_STAT(TX_VF_BCAST_BYTES) + + GET_STAT(TX_VF_MCAST_BYTES) + + GET_STAT(TX_VF_UCAST_BYTES); + p->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); + p->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); + p->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); + p->tx_drop = GET_STAT(TX_VF_DROP_FRAMES); + + p->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); + p->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); + p->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); + + p->rx_len_err = GET_STAT(RX_VF_ERR_FRAMES); +#undef GET_STAT +} + +static int t4vf_alloc_vi(struct adapter *adapter, int port_id) +{ + struct fw_vi_cmd cmd, rpl; + int v; + + /* + * Execute a VI command to allocate Virtual Interface and return its + * VIID. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_WRITE | + F_FW_CMD_EXEC); + cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | + F_FW_VI_CMD_ALLOC); + cmd.portid_pkd = V_FW_VI_CMD_PORTID(port_id); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v != FW_SUCCESS) + return v; + return G_FW_VI_CMD_VIID(be16_to_cpu(rpl.type_to_viid)); +} + +int t4vf_port_init(struct adapter *adapter) +{ + unsigned int fw_caps = adapter->params.fw_caps_support; + struct fw_port_cmd port_cmd, port_rpl; + struct fw_vi_cmd vi_cmd, vi_rpl; + fw_port_cap32_t pcaps, acaps; + enum fw_port_type port_type; + int mdio_addr; + int ret, i; + + for_each_port(adapter, i) { + struct port_info *p = adap2pinfo(adapter, i); + + /* + * If we haven't yet determined if we're talking to Firmware + * which knows the new 32-bit Port Caps, it's time to find + * out now. This will also tell new Firmware to send us Port + * Status Updates using the new 32-bit Port Capabilities + * version of the Port Information message. + */ + if (fw_caps == FW_CAPS_UNKNOWN) { + u32 param, val; + + param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | + V_FW_PARAMS_PARAM_X + (FW_PARAMS_PARAM_PFVF_PORT_CAPS32)); + val = 1; + ret = t4vf_set_params(adapter, 1, ¶m, &val); + fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16); + adapter->params.fw_caps_support = fw_caps; + } + + ret = t4vf_alloc_vi(adapter, p->port_id); + if (ret < 0) { + dev_err(&pdev->dev, "cannot allocate VI for port %d:" + " err=%d\n", p->port_id, ret); + return ret; + } + p->viid = ret; + + /* + * Execute a VI Read command to get our Virtual Interface + * information like MAC address, etc. + */ + memset(&vi_cmd, 0, sizeof(vi_cmd)); + vi_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ); + vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); + vi_cmd.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(p->viid)); + ret = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); + if (ret != FW_SUCCESS) + return ret; + + p->rss_size = G_FW_VI_CMD_RSSSIZE + (be16_to_cpu(vi_rpl.norss_rsssize)); + t4_os_set_hw_addr(adapter, i, vi_rpl.mac); + + /* + * If we don't have read access to our port information, we're + * done now. Else, execute a PORT Read command to get it ... + */ + if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT)) + return 0; + + memset(&port_cmd, 0, sizeof(port_cmd)); + port_cmd.op_to_portid = cpu_to_be32 + (V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_READ | + V_FW_PORT_CMD_PORTID(p->port_id)); + port_cmd.action_to_len16 = cpu_to_be32 + (V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ? + FW_PORT_ACTION_GET_PORT_INFO : + FW_PORT_ACTION_GET_PORT_INFO32) | + FW_LEN16(port_cmd)); + ret = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), + &port_rpl); + if (ret != FW_SUCCESS) + return ret; + + /* + * Extract the various fields from the Port Information message. + */ + if (fw_caps == FW_CAPS16) { + u32 lstatus = be32_to_cpu + (port_rpl.u.info.lstatus_to_modtype); + + port_type = G_FW_PORT_CMD_PTYPE(lstatus); + mdio_addr = ((lstatus & F_FW_PORT_CMD_MDIOCAP) ? + (int)G_FW_PORT_CMD_MDIOADDR(lstatus) : + -1); + pcaps = fwcaps16_to_caps32 + (be16_to_cpu(port_rpl.u.info.pcap)); + acaps = fwcaps16_to_caps32 + (be16_to_cpu(port_rpl.u.info.acap)); + } else { + u32 lstatus32 = be32_to_cpu + (port_rpl.u.info32.lstatus32_to_cbllen32); + + port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32); + mdio_addr = ((lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ? + (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) : + -1); + pcaps = be32_to_cpu(port_rpl.u.info32.pcaps32); + acaps = be32_to_cpu(port_rpl.u.info32.acaps32); + } + + p->port_type = port_type; + p->mdio_addr = mdio_addr; + p->mod_type = FW_PORT_MOD_TYPE_NA; + init_link_config(&p->link_cfg, pcaps, acaps); + } + return 0; +} diff --git a/drivers/net/cxgbe/base/t4vf_hw.h b/drivers/net/cxgbe/base/t4vf_hw.h new file mode 100644 index 00000000..55e436e7 --- /dev/null +++ b/drivers/net/cxgbe/base/t4vf_hw.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef __T4VF_HW_H +#define __T4VF_HW_H + +#define T4VF_PL_BASE_ADDR 0x0200 +#define T4VF_CIM_BASE_ADDR 0x0300 +#define T4VF_MBDATA_BASE_ADDR 0x0240 +#define T6VF_MBDATA_BASE_ADDR 0x0280 + +#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES NUM_CIM_PF_MAILBOX_DATA_INSTANCES +#endif /* __T4VF_HW_H */ diff --git a/drivers/net/cxgbe/clip_tbl.c b/drivers/net/cxgbe/clip_tbl.c new file mode 100644 index 00000000..5e4dc527 --- /dev/null +++ b/drivers/net/cxgbe/clip_tbl.c @@ -0,0 +1,193 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#include "common.h" +#include "clip_tbl.h" + +/** + * Allocate clip entry in HW with associated IPV4/IPv6 address + */ +static int clip6_get_mbox(const struct rte_eth_dev *dev, const u32 *lip) +{ + struct adapter *adap = ethdev2adap(dev); + struct fw_clip_cmd c; + u64 hi = ((u64)lip[1]) << 32 | lip[0]; + u64 lo = ((u64)lip[3]) << 32 | lip[2]; + + memset(&c, 0, sizeof(c)); + c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CLIP_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE); + c.alloc_to_len16 = cpu_to_be32(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c)); + c.ip_hi = hi; + c.ip_lo = lo; + return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); +} + +/** + * Delete clip entry in HW having the associated IPV4/IPV6 address + */ +static int clip6_release_mbox(const struct rte_eth_dev *dev, const u32 *lip) +{ + struct adapter *adap = ethdev2adap(dev); + struct fw_clip_cmd c; + u64 hi = ((u64)lip[1]) << 32 | lip[0]; + u64 lo = ((u64)lip[3]) << 32 | lip[2]; + + memset(&c, 0, sizeof(c)); + c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CLIP_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ); + c.alloc_to_len16 = cpu_to_be32(F_FW_CLIP_CMD_FREE | FW_LEN16(c)); + c.ip_hi = hi; + c.ip_lo = lo; + return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); +} + +/** + * cxgbe_clip_release - Release associated CLIP entry + * @ce: clip entry to release + * + * Releases ref count and frees up a clip entry from CLIP table + */ +void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce) +{ + int ret; + + t4_os_lock(&ce->lock); + if (rte_atomic32_dec_and_test(&ce->refcnt)) { + ret = clip6_release_mbox(dev, ce->addr); + if (ret) + dev_debug(adap, "CLIP FW DEL CMD failed: %d", ret); + } + t4_os_unlock(&ce->lock); +} + +/** + * find_or_alloc_clipe - Find/Allocate a free CLIP entry + * @c: CLIP table + * @lip: IPV4/IPV6 address to compare/add + * Returns pointer to the IPV4/IPV6 entry found/created + * + * Finds/Allocates an CLIP entry to be used for a filter rule. + */ +static struct clip_entry *find_or_alloc_clipe(struct clip_tbl *c, + const u32 *lip) +{ + struct clip_entry *end, *e; + struct clip_entry *first_free = NULL; + unsigned int clipt_size = c->clipt_size; + + for (e = &c->cl_list[0], end = &c->cl_list[clipt_size]; e != end; ++e) { + if (rte_atomic32_read(&e->refcnt) == 0) { + if (!first_free) + first_free = e; + } else { + if (memcmp(lip, e->addr, sizeof(e->addr)) == 0) + goto exists; + } + } + + if (first_free) { + e = first_free; + goto exists; + } + + return NULL; + +exists: + return e; +} + +static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev, + u32 *lip, u8 v6) +{ + struct adapter *adap = ethdev2adap(dev); + struct clip_tbl *ctbl = adap->clipt; + struct clip_entry *ce; + int ret = 0; + + if (!ctbl) + return NULL; + + t4_os_write_lock(&ctbl->lock); + ce = find_or_alloc_clipe(ctbl, lip); + if (ce) { + t4_os_lock(&ce->lock); + if (!rte_atomic32_read(&ce->refcnt)) { + rte_memcpy(ce->addr, lip, sizeof(ce->addr)); + if (v6) { + ce->type = FILTER_TYPE_IPV6; + rte_atomic32_set(&ce->refcnt, 1); + ret = clip6_get_mbox(dev, lip); + if (ret) + dev_debug(adap, + "CLIP FW ADD CMD failed: %d", + ret); + } else { + ce->type = FILTER_TYPE_IPV4; + } + } else { + rte_atomic32_inc(&ce->refcnt); + } + t4_os_unlock(&ce->lock); + } + t4_os_write_unlock(&ctbl->lock); + + return ret ? NULL : ce; +} + +/** + * cxgbe_clip_alloc - Allocate a IPV6 CLIP entry + * @dev: rte_eth_dev pointer + * @lip: IPV6 address to add + * Returns pointer to the CLIP entry created + * + * Allocates a IPV6 CLIP entry to be used for a filter rule. + */ +struct clip_entry *cxgbe_clip_alloc(struct rte_eth_dev *dev, u32 *lip) +{ + return t4_clip_alloc(dev, lip, FILTER_TYPE_IPV6); +} + +/** + * Initialize CLIP Table + */ +struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start, + unsigned int clipt_end) +{ + unsigned int clipt_size; + struct clip_tbl *ctbl; + unsigned int i; + + if (clipt_start >= clipt_end) + return NULL; + + clipt_size = clipt_end - clipt_start + 1; + + ctbl = t4_os_alloc(sizeof(*ctbl) + + clipt_size * sizeof(struct clip_entry)); + if (!ctbl) + return NULL; + + ctbl->clipt_start = clipt_start; + ctbl->clipt_size = clipt_size; + + t4_os_rwlock_init(&ctbl->lock); + + for (i = 0; i < ctbl->clipt_size; i++) { + t4_os_lock_init(&ctbl->cl_list[i].lock); + rte_atomic32_set(&ctbl->cl_list[i].refcnt, 0); + } + + return ctbl; +} + +/** + * Cleanup CLIP Table + */ +void t4_cleanup_clip_tbl(struct adapter *adap) +{ + if (adap->clipt) + t4_os_free(adap->clipt); +} diff --git a/drivers/net/cxgbe/clip_tbl.h b/drivers/net/cxgbe/clip_tbl.h new file mode 100644 index 00000000..737ccc69 --- /dev/null +++ b/drivers/net/cxgbe/clip_tbl.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef _CXGBE_CLIP_H_ +#define _CXGBE_CLIP_H_ + +/* + * State for the corresponding entry of the HW CLIP table. + */ +struct clip_entry { + enum filter_type type; /* entry type */ + u32 addr[4]; /* IPV4 or IPV6 address */ + rte_spinlock_t lock; /* entry lock */ + rte_atomic32_t refcnt; /* entry reference count */ +}; + +struct clip_tbl { + unsigned int clipt_start; /* start index of CLIP table */ + unsigned int clipt_size; /* size of CLIP table */ + rte_rwlock_t lock; /* table rw lock */ + struct clip_entry cl_list[0]; /* MUST BE LAST */ +}; + +struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start, + unsigned int clipt_end); +void t4_cleanup_clip_tbl(struct adapter *adap); +struct clip_entry *cxgbe_clip_alloc(struct rte_eth_dev *dev, u32 *lip); +void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce); +#endif /* _CXGBE_CLIP_H_ */ diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h index f9891545..5e6f5c98 100644 --- a/drivers/net/cxgbe/cxgbe.h +++ b/drivers/net/cxgbe/cxgbe.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #ifndef _CXGBE_H_ @@ -46,20 +18,51 @@ #define CXGBE_MIN_RX_BUFSIZE ETHER_MIN_MTU /* min buf size */ #define CXGBE_MAX_RX_PKTLEN (9000 + ETHER_HDR_LEN + ETHER_CRC_LEN) /* max pkt */ +/* Max poll time is 100 * 100msec = 10 sec */ +#define CXGBE_LINK_STATUS_POLL_MS 100 /* 100ms */ +#define CXGBE_LINK_STATUS_POLL_CNT 100 /* Max number of times to poll */ + +#define CXGBE_DEFAULT_RSS_KEY_LEN 40 /* 320-bits */ +#define CXGBE_RSS_HF_IPV4_MASK (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_OTHER) +#define CXGBE_RSS_HF_IPV6_MASK (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_OTHER | \ + ETH_RSS_IPV6_EX) +#define CXGBE_RSS_HF_TCP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_IPV6_TCP_EX) +#define CXGBE_RSS_HF_UDP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_UDP | \ + ETH_RSS_IPV6_UDP_EX) +#define CXGBE_RSS_HF_ALL (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP) + +#define CXGBE_DEVARG_KEEP_OVLAN "keep_ovlan" +#define CXGBE_DEVARG_FORCE_LINK_UP "force_link_up" + +bool force_linkup(struct adapter *adap); int cxgbe_probe(struct adapter *adapter); +int cxgbevf_probe(struct adapter *adapter); void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps); +int cxgbe_set_link_status(struct port_info *pi, bool status); int cxgbe_up(struct adapter *adap); int cxgbe_down(struct port_info *pi); void cxgbe_close(struct adapter *adapter); void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats); +void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats); void cxgbe_stats_reset(struct port_info *pi); +int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us, + unsigned int cnt, struct t4_completion *c); int link_start(struct port_info *pi); void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int us, unsigned int cnt, unsigned int size, unsigned int iqe_size); int setup_sge_fwevtq(struct adapter *adapter); +int setup_sge_ctrl_txq(struct adapter *adapter); void cfg_queues(struct rte_eth_dev *eth_dev); int cfg_queue_count(struct rte_eth_dev *eth_dev); +int init_rss(struct adapter *adap); int setup_rss(struct port_info *pi); void cxgbe_enable_rx_queues(struct port_info *pi); +void print_port_info(struct adapter *adap); +void print_adapter_info(struct adapter *adap); +int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key); +void configure_max_ethqsets(struct adapter *adapter); #endif /* _CXGBE_H_ */ diff --git a/drivers/net/cxgbe/cxgbe_compat.h b/drivers/net/cxgbe/cxgbe_compat.h index 03bba9fe..5d47c5f3 100644 --- a/drivers/net/cxgbe/cxgbe_compat.h +++ b/drivers/net/cxgbe/cxgbe_compat.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #ifndef _CXGBE_COMPAT_H_ @@ -226,15 +198,6 @@ static inline int cxgbe_fls(int x) return x ? sizeof(x) * 8 - __builtin_clz(x) : 0; } -/** - * cxgbe_ffs - find first bit set - * @x: the word to search - */ -static inline int cxgbe_ffs(int x) -{ - return x ? __builtin_ffs(x) : 0; -} - static inline unsigned long ilog2(unsigned long n) { unsigned int e = 0; @@ -278,4 +241,16 @@ static inline void writel_relaxed(unsigned int val, volatile void __iomem *addr) rte_write32_relaxed(val, addr); } +/* + * Multiplies an integer by a fraction, while avoiding unnecessary + * overflow or loss of precision. + */ +#define mult_frac(x, numer, denom)( \ +{ \ + typeof(x) quot = (x) / (denom); \ + typeof(x) rem = (x) % (denom); \ + (quot * (numer)) + ((rem * (numer)) / (denom)); \ +} \ +) + #endif /* _CXGBE_COMPAT_H_ */ diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c index 5cd260f4..4dcad7a2 100644 --- a/drivers/net/cxgbe/cxgbe_ethdev.c +++ b/drivers/net/cxgbe/cxgbe_ethdev.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #include @@ -63,6 +35,8 @@ #include #include "cxgbe.h" +#include "cxgbe_pfvf.h" +#include "cxgbe_flow.h" /* * Macros needed to support the PCI Device ID Table ... @@ -85,8 +59,21 @@ */ #include "t4_pci_id_tbl.h" -static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts) +#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT |\ + DEV_TX_OFFLOAD_IPV4_CKSUM |\ + DEV_TX_OFFLOAD_UDP_CKSUM |\ + DEV_TX_OFFLOAD_TCP_CKSUM |\ + DEV_TX_OFFLOAD_TCP_TSO) + +#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP |\ + DEV_RX_OFFLOAD_CRC_STRIP |\ + DEV_RX_OFFLOAD_IPV4_CKSUM |\ + DEV_RX_OFFLOAD_JUMBO_FRAME |\ + DEV_RX_OFFLOAD_UDP_CKSUM |\ + DEV_RX_OFFLOAD_TCP_CKSUM) + +uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) { struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue; uint16_t pkts_sent, pkts_remain; @@ -119,8 +106,8 @@ static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return total_sent; } -static uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts) +uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) { struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue; unsigned int work_done; @@ -135,8 +122,8 @@ static uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return work_done; } -static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, - struct rte_eth_dev_info *device_info) +void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *device_info) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -148,8 +135,6 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, .nb_align = 1, }; - device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE; device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN; device_info->max_rx_queues = max_queues; @@ -159,25 +144,22 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, device_info->max_vfs = adapter->params.arch.vfcount; device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */ - device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; + device_info->rx_queue_offload_capa = 0UL; + device_info->rx_offload_capa = CXGBE_RX_OFFLOADS; - device_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + device_info->tx_queue_offload_capa = 0UL; + device_info->tx_offload_capa = CXGBE_TX_OFFLOADS; device_info->reta_size = pi->rss_size; + device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN; + device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL; device_info->rx_desc_lim = cxgbe_desc_lim; device_info->tx_desc_lim = cxgbe_desc_lim; cxgbe_get_speed_caps(pi, &device_info->speed_capa); } -static void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) +void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -186,7 +168,7 @@ static void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) 1, -1, 1, -1, false); } -static void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) +void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -195,7 +177,7 @@ static void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) 0, -1, 1, -1, false); } -static void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) +void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -206,7 +188,7 @@ static void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) -1, 1, 1, -1, false); } -static void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) +void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -217,28 +199,91 @@ static void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) -1, 0, 1, -1, false); } -static int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, - __rte_unused int wait_to_complete) +int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, + int wait_to_complete) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; struct sge *s = &adapter->sge; - struct rte_eth_link *old_link = ð_dev->data->dev_link; - unsigned int work_done, budget = 4; + struct rte_eth_link new_link = { 0 }; + unsigned int i, work_done, budget = 32; + u8 old_link = pi->link_cfg.link_ok; + + for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) { + cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); + + /* Exit if link status changed or always forced up */ + if (pi->link_cfg.link_ok != old_link || force_linkup(adapter)) + break; + + if (!wait_to_complete) + break; + + rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS); + } + + new_link.link_status = force_linkup(adapter) ? + ETH_LINK_UP : pi->link_cfg.link_ok; + new_link.link_autoneg = pi->link_cfg.autoneg; + new_link.link_duplex = ETH_LINK_FULL_DUPLEX; + new_link.link_speed = pi->link_cfg.speed; + + return rte_eth_linkstatus_set(eth_dev, &new_link); +} + +/** + * Set device link up. + */ +int cxgbe_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct adapter *adapter = pi->adapter; + unsigned int work_done, budget = 32; + struct sge *s = &adapter->sge; + int ret; + /* Flush all link events */ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); - if (old_link->link_status == pi->link_cfg.link_ok) - return -1; /* link not changed */ - eth_dev->data->dev_link.link_status = pi->link_cfg.link_ok; - eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; - eth_dev->data->dev_link.link_speed = pi->link_cfg.speed; + /* If link already up, nothing to do */ + if (pi->link_cfg.link_ok) + return 0; + + ret = cxgbe_set_link_status(pi, true); + if (ret) + return ret; - /* link has changed */ + cxgbe_dev_link_update(dev, 1); return 0; } -static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) +/** + * Set device link down. + */ +int cxgbe_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct adapter *adapter = pi->adapter; + unsigned int work_done, budget = 32; + struct sge *s = &adapter->sge; + int ret; + + /* Flush all link events */ + cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); + + /* If link already down, nothing to do */ + if (!pi->link_cfg.link_ok) + return 0; + + ret = cxgbe_set_link_status(pi, false); + if (ret) + return ret; + + cxgbe_dev_link_update(dev, 0); + return 0; +} + +int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -254,9 +299,11 @@ static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) /* set to jumbo mode if needed */ if (new_mtu > ETHER_MAX_LEN) - eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; + eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1, -1, -1, true); @@ -266,21 +313,13 @@ static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) return err; } -static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, - uint16_t tx_queue_id); -static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, - uint16_t tx_queue_id); -static void cxgbe_dev_tx_queue_release(void *q); -static void cxgbe_dev_rx_queue_release(void *q); - /* * Stop device. */ -static void cxgbe_dev_close(struct rte_eth_dev *eth_dev) +void cxgbe_dev_close(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; - int i, dev_down = 0; CXGBE_FUNC_TRACE(); @@ -294,28 +333,12 @@ static void cxgbe_dev_close(struct rte_eth_dev *eth_dev) * have been disabled */ t4_sge_eth_clear_queues(pi); - - /* See if all ports are down */ - for_each_port(adapter, i) { - pi = adap2pinfo(adapter, i); - /* - * Skip first port of the adapter since it will be closed - * by DPDK - */ - if (i == 0) - continue; - dev_down += (pi->eth_dev->data->dev_started == 0) ? 1 : 0; - } - - /* If rest of the ports are stopped, then free up resources */ - if (dev_down == (adapter->params.nports - 1)) - cxgbe_close(adapter); } /* Start the device. * It returns 0 on success. */ -static int cxgbe_dev_start(struct rte_eth_dev *eth_dev) +int cxgbe_dev_start(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -367,7 +390,7 @@ out: /* * Stop device: disable rx and tx functions to allow for reconfiguring. */ -static void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) +void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -386,19 +409,35 @@ static void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) t4_sge_eth_clear_queues(pi); } -static int cxgbe_dev_configure(struct rte_eth_dev *eth_dev) +int cxgbe_dev_configure(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; + uint64_t configured_offloads; int err; CXGBE_FUNC_TRACE(); + configured_offloads = eth_dev->data->dev_conf.rxmode.offloads; + + /* KEEP_CRC offload flag is not supported by PMD + * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed + */ + if (rte_eth_dev_must_keep_crc(configured_offloads)) { + dev_info(adapter, "can't disable hw crc strip\n"); + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_CRC_STRIP; + } if (!(adapter->flags & FW_QUEUE_BOUND)) { err = setup_sge_fwevtq(adapter); if (err) return err; adapter->flags |= FW_QUEUE_BOUND; + if (is_pf4(adapter)) { + err = setup_sge_ctrl_txq(adapter); + if (err) + return err; + } } err = cfg_queue_count(eth_dev); @@ -408,8 +447,7 @@ static int cxgbe_dev_configure(struct rte_eth_dev *eth_dev) return 0; } -static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, - uint16_t tx_queue_id) +int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) { int ret; struct sge_eth_txq *txq = (struct sge_eth_txq *) @@ -424,8 +462,7 @@ static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, return ret; } -static int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, - uint16_t tx_queue_id) +int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) { int ret; struct sge_eth_txq *txq = (struct sge_eth_txq *) @@ -440,10 +477,10 @@ static int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, return ret; } -static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, - uint16_t queue_idx, uint16_t nb_desc, - unsigned int socket_id, - const struct rte_eth_txconf *tx_conf) +int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf __rte_unused) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -452,8 +489,6 @@ static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, int err = 0; unsigned int temp_nb_desc; - RTE_SET_USED(tx_conf); - dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n", __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc, socket_id, pi->first_qset); @@ -488,13 +523,12 @@ static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx, s->fw_evtq.cntxt_id, socket_id); - dev_debug(adapter, "%s: txq->q.cntxt_id= %d err = %d\n", - __func__, txq->q.cntxt_id, err); - + dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n", + __func__, txq->q.cntxt_id, txq->q.abs_id, err); return err; } -static void cxgbe_dev_tx_queue_release(void *q) +void cxgbe_dev_tx_queue_release(void *q) { struct sge_eth_txq *txq = (struct sge_eth_txq *)q; @@ -510,8 +544,7 @@ static void cxgbe_dev_tx_queue_release(void *q) } } -static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, - uint16_t rx_queue_id) +int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { int ret; struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); @@ -530,8 +563,7 @@ static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, return ret; } -static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, - uint16_t rx_queue_id) +int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { int ret; struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); @@ -549,11 +581,11 @@ static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, return ret; } -static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, - uint16_t queue_idx, uint16_t nb_desc, - unsigned int socket_id, - const struct rte_eth_rxconf *rx_conf, - struct rte_mempool *mp) +int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mp) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -565,8 +597,6 @@ static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info dev_info; unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; - RTE_SET_USED(rx_conf); - dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n", __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc, socket_id, mp); @@ -613,21 +643,25 @@ static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, /* Set to jumbo mode if necessary */ if (pkt_len > ETHER_MAX_LEN) - eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; + eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx, &rxq->fl, t4_ethrx_handler, - t4_get_tp_ch_map(adapter, pi->tx_chan), mp, + is_pf4(adapter) ? + t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp, queue_idx, socket_id); - dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u\n", - __func__, err, pi->port_id, rxq->rspq.cntxt_id); + dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n", + __func__, err, pi->port_id, rxq->rspq.cntxt_id, + rxq->rspq.abs_id); return err; } -static void cxgbe_dev_rx_queue_release(void *q) +void cxgbe_dev_rx_queue_release(void *q) { struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q; struct sge_rspq *rq = &rxq->rspq; @@ -750,7 +784,7 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev, struct adapter *adapter = pi->adapter; struct link_config *lc = &pi->link_cfg; - if (lc->supported & FW_PORT_CAP_ANEG) { + if (lc->pcaps & FW_PORT_CAP32_ANEG) { if (fc_conf->autoneg) lc->requested_fc |= PAUSE_AUTONEG; else @@ -773,7 +807,7 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev, &pi->link_cfg); } -static const uint32_t * +const uint32_t * cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) { static const uint32_t ptypes[] = { @@ -787,6 +821,88 @@ cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) return NULL; } +/* Update RSS hash configuration + */ +static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct adapter *adapter = pi->adapter; + int err; + + err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf); + if (err) + return err; + + pi->rss_hf = rss_conf->rss_hf; + + if (rss_conf->rss_key) { + u32 key[10], mod_key[10]; + int i, j; + + memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN); + + for (i = 9, j = 0; i >= 0; i--, j++) + mod_key[j] = cpu_to_be32(key[i]); + + t4_write_rss_key(adapter, mod_key, -1); + } + + return 0; +} + +/* Get RSS hash configuration + */ +static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct adapter *adapter = pi->adapter; + u64 rss_hf = 0; + u64 flags = 0; + int err; + + err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid, + &flags, NULL); + + if (err) + return err; + + if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) { + rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK; + if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN) + rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK; + } + + if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) + rss_hf |= CXGBE_RSS_HF_IPV6_MASK; + + if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) { + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + } + + if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) + rss_hf |= CXGBE_RSS_HF_IPV4_MASK; + + rss_conf->rss_hf = rss_hf; + + if (rss_conf->rss_key) { + u32 key[10], mod_key[10]; + int i, j; + + t4_read_rss_key(adapter, key); + + for (i = 9, j = 0; i >= 0; i--, j++) + mod_key[j] = be32_to_cpu(key[i]); + + memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN); + } + + return 0; +} + static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev) { RTE_SET_USED(dev); @@ -956,6 +1072,23 @@ static int cxgbe_get_regs(struct rte_eth_dev *eth_dev, return 0; } +int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) +{ + struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct adapter *adapter = pi->adapter; + int ret; + + ret = t4_change_mac(adapter, adapter->mbox, pi->viid, + pi->xact_addr_filt, (u8 *)addr, true, true); + if (ret < 0) { + dev_err(adapter, "failed to set mac addr; err = %d\n", + ret); + return ret; + } + pi->xact_addr_filt = ret; + return 0; +} + static const struct eth_dev_ops cxgbe_eth_dev_ops = { .dev_start = cxgbe_dev_start, .dev_stop = cxgbe_dev_stop, @@ -968,6 +1101,8 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = { .dev_infos_get = cxgbe_dev_info_get, .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get, .link_update = cxgbe_dev_link_update, + .dev_set_link_up = cxgbe_dev_set_link_up, + .dev_set_link_down = cxgbe_dev_set_link_down, .mtu_set = cxgbe_dev_mtu_set, .tx_queue_setup = cxgbe_dev_tx_queue_setup, .tx_queue_start = cxgbe_dev_tx_queue_start, @@ -977,6 +1112,7 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = { .rx_queue_start = cxgbe_dev_rx_queue_start, .rx_queue_stop = cxgbe_dev_rx_queue_stop, .rx_queue_release = cxgbe_dev_rx_queue_release, + .filter_ctrl = cxgbe_dev_filter_ctrl, .stats_get = cxgbe_dev_stats_get, .stats_reset = cxgbe_dev_stats_reset, .flow_ctrl_get = cxgbe_flow_ctrl_get, @@ -985,6 +1121,9 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = { .get_eeprom = cxgbe_get_eeprom, .set_eeprom = cxgbe_set_eeprom, .get_reg = cxgbe_get_regs, + .rss_hash_update = cxgbe_dev_rss_hash_update, + .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get, + .mac_addr_set = cxgbe_mac_addr_set, }; /* @@ -1004,14 +1143,34 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev) eth_dev->dev_ops = &cxgbe_eth_dev_ops; eth_dev->rx_pkt_burst = &cxgbe_recv_pkts; eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts; + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - /* for secondary processes, we don't initialise any further as primary - * has already done this work. + /* for secondary processes, we attach to ethdevs allocated by primary + * and do minimal initialization. */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY) + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + int i; + + for (i = 1; i < MAX_NPORTS; i++) { + struct rte_eth_dev *rest_eth_dev; + char namei[RTE_ETH_NAME_MAX_LEN]; + + snprintf(namei, sizeof(namei), "%s_%d", + pci_dev->device.name, i); + rest_eth_dev = rte_eth_dev_attach_secondary(namei); + if (rest_eth_dev) { + rest_eth_dev->device = &pci_dev->device; + rest_eth_dev->dev_ops = + eth_dev->dev_ops; + rest_eth_dev->rx_pkt_burst = + eth_dev->rx_pkt_burst; + rest_eth_dev->tx_pkt_burst = + eth_dev->tx_pkt_burst; + rte_eth_dev_probing_finish(rest_eth_dev); + } + } return 0; - - pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + } snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id); adapter = rte_zmalloc(name, sizeof(*adapter), 0); @@ -1043,6 +1202,16 @@ out_free_adapter: return err; } +static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adap = pi->adapter; + + /* Free up other ports and all resources */ + cxgbe_close(adap); + return 0; +} + static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { @@ -1052,7 +1221,7 @@ static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_remove(pci_dev, NULL); + return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit); } static struct rte_pci_driver rte_cxgbe_pmd = { @@ -1065,3 +1234,6 @@ static struct rte_pci_driver rte_cxgbe_pmd = { RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl); RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe, + CXGBE_DEVARG_KEEP_OVLAN "=<0|1> " + CXGBE_DEVARG_FORCE_LINK_UP "=<0|1> "); diff --git a/drivers/net/cxgbe/cxgbe_filter.c b/drivers/net/cxgbe/cxgbe_filter.c new file mode 100644 index 00000000..7f0d3800 --- /dev/null +++ b/drivers/net/cxgbe/cxgbe_filter.c @@ -0,0 +1,1252 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ +#include +#include "common.h" +#include "t4_tcb.h" +#include "t4_regs.h" +#include "cxgbe_filter.h" +#include "clip_tbl.h" + +/** + * Initialize Hash Filters + */ +int init_hash_filter(struct adapter *adap) +{ + unsigned int n_user_filters; + unsigned int user_filter_perc; + int ret; + u32 params[7], val[7]; + +#define FW_PARAM_DEV(param) \ + (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) + +#define FW_PARAM_PFVF(param) \ + (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \ + V_FW_PARAMS_PARAM_Y(0) | \ + V_FW_PARAMS_PARAM_Z(0)) + + params[0] = FW_PARAM_DEV(NTID); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, + params, val); + if (ret < 0) + return ret; + adap->tids.ntids = val[0]; + adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); + + user_filter_perc = 100; + n_user_filters = mult_frac(adap->tids.nftids, + user_filter_perc, + 100); + + adap->tids.nftids = n_user_filters; + adap->params.hash_filter = 1; + return 0; +} + +/** + * Validate if the requested filter specification can be set by checking + * if the requested features have been enabled + */ +int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs) +{ + u32 fconf; + + /* + * Check for unconfigured fields being used. + */ + fconf = adapter->params.tp.vlan_pri_map; + +#define S(_field) \ + (fs->val._field || fs->mask._field) +#define U(_mask, _field) \ + (!(fconf & (_mask)) && S(_field)) + + if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) || U(F_PROTOCOL, proto)) + return -EOPNOTSUPP; + +#undef S +#undef U + + /* + * If the user is requesting that the filter action loop + * matching packets back out one of our ports, make sure that + * the egress port is in range. + */ + if (fs->action == FILTER_SWITCH && + fs->eport >= adapter->params.nports) + return -ERANGE; + + /* + * Don't allow various trivially obvious bogus out-of-range + * values ... + */ + if (fs->val.iport >= adapter->params.nports) + return -ERANGE; + + return 0; +} + +/** + * Get the queue to which the traffic must be steered to. + */ +static unsigned int get_filter_steerq(struct rte_eth_dev *dev, + struct ch_filter_specification *fs) +{ + struct port_info *pi = ethdev2pinfo(dev); + struct adapter *adapter = pi->adapter; + unsigned int iq; + + /* + * If the user has requested steering matching Ingress Packets + * to a specific Queue Set, we need to make sure it's in range + * for the port and map that into the Absolute Queue ID of the + * Queue Set's Response Queue. + */ + if (!fs->dirsteer) { + iq = 0; + } else { + /* + * If the iq id is greater than the number of qsets, + * then assume it is an absolute qid. + */ + if (fs->iq < pi->n_rx_qsets) + iq = adapter->sge.ethrxq[pi->first_qset + + fs->iq].rspq.abs_id; + else + iq = fs->iq; + } + + return iq; +} + +/* Return an error number if the indicated filter isn't writable ... */ +int writable_filter(struct filter_entry *f) +{ + if (f->locked) + return -EPERM; + if (f->pending) + return -EBUSY; + + return 0; +} + +/** + * Send CPL_SET_TCB_FIELD message + */ +static void set_tcb_field(struct adapter *adapter, unsigned int ftid, + u16 word, u64 mask, u64 val, int no_reply) +{ + struct rte_mbuf *mbuf; + struct cpl_set_tcb_field *req; + struct sge_ctrl_txq *ctrlq; + + ctrlq = &adapter->sge.ctrlq[0]; + mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool); + WARN_ON(!mbuf); + + mbuf->data_len = sizeof(*req); + mbuf->pkt_len = mbuf->data_len; + + req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *); + memset(req, 0, sizeof(*req)); + INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid); + req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) | + V_QUEUENO(adapter->sge.fw_evtq.abs_id) | + V_NO_REPLY(no_reply)); + req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid)); + req->mask = cpu_to_be64(mask); + req->val = cpu_to_be64(val); + + t4_mgmt_tx(ctrlq, mbuf); +} + +/** + * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command. + */ +static inline void mk_set_tcb_field_ulp(struct filter_entry *f, + struct cpl_set_tcb_field *req, + unsigned int word, + u64 mask, u64 val, u8 cookie, + int no_reply) +{ + struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req; + struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1); + + txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) | + V_ULP_TXPKT_DEST(0)); + txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16)); + sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM)); + sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr)); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid)); + req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) | + V_QUEUENO(0)); + req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie)); + req->mask = cpu_to_be64(mask); + req->val = cpu_to_be64(val); + sc = (struct ulptx_idata *)(req + 1); + sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); + sc->len = cpu_to_be32(0); +} + +/** + * Check if entry already filled. + */ +bool is_filter_set(struct tid_info *t, int fidx, int family) +{ + bool result = FALSE; + int i, max; + + /* IPv6 requires four slots and IPv4 requires only 1 slot. + * Ensure, there's enough slots available. + */ + max = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx; + + t4_os_lock(&t->ftid_lock); + for (i = fidx; i <= max; i++) { + if (rte_bitmap_get(t->ftid_bmap, i)) { + result = TRUE; + break; + } + } + t4_os_unlock(&t->ftid_lock); + return result; +} + +/** + * Allocate a available free entry + */ +int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family) +{ + struct tid_info *t = &adap->tids; + int pos; + int size = t->nftids; + + t4_os_lock(&t->ftid_lock); + if (family == FILTER_TYPE_IPV6) + pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, 4); + else + pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size); + t4_os_unlock(&t->ftid_lock); + + return pos < size ? pos : -1; +} + +/** + * Construct hash filter ntuple. + */ +static u64 hash_filter_ntuple(const struct filter_entry *f) +{ + struct adapter *adap = ethdev2adap(f->dev); + struct tp_params *tp = &adap->params.tp; + u64 ntuple = 0; + u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */ + + if (tp->port_shift >= 0) + ntuple |= (u64)f->fs.mask.iport << tp->port_shift; + + if (tp->protocol_shift >= 0) { + if (!f->fs.val.proto) + ntuple |= (u64)tcp_proto << tp->protocol_shift; + else + ntuple |= (u64)f->fs.val.proto << tp->protocol_shift; + } + + if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype) + ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift; + + if (ntuple != tp->hash_filter_mask) + return 0; + + return ntuple; +} + +/** + * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command. + */ +static void mk_abort_req_ulp(struct cpl_abort_req *abort_req, + unsigned int tid) +{ + struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req; + struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1); + + txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) | + V_ULP_TXPKT_DEST(0)); + txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16)); + sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM)); + sc->len = cpu_to_be32(sizeof(*abort_req) - + sizeof(struct work_request_hdr)); + OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid)); + abort_req->rsvd0 = cpu_to_be32(0); + abort_req->rsvd1 = 0; + abort_req->cmd = CPL_ABORT_NO_RST; + sc = (struct ulptx_idata *)(abort_req + 1); + sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); + sc->len = cpu_to_be32(0); +} + +/** + * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command. + */ +static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl, + unsigned int tid) +{ + struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl; + struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1); + + txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) | + V_ULP_TXPKT_DEST(0)); + txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16)); + sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM)); + sc->len = cpu_to_be32(sizeof(*abort_rpl) - + sizeof(struct work_request_hdr)); + OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); + abort_rpl->rsvd0 = cpu_to_be32(0); + abort_rpl->rsvd1 = 0; + abort_rpl->cmd = CPL_ABORT_NO_RST; + sc = (struct ulptx_idata *)(abort_rpl + 1); + sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); + sc->len = cpu_to_be32(0); +} + +/** + * Delete the specified hash filter. + */ +static int cxgbe_del_hash_filter(struct rte_eth_dev *dev, + unsigned int filter_id, + struct filter_ctx *ctx) +{ + struct adapter *adapter = ethdev2adap(dev); + struct tid_info *t = &adapter->tids; + struct filter_entry *f; + struct sge_ctrl_txq *ctrlq; + unsigned int port_id = ethdev2pinfo(dev)->port_id; + int ret; + + if (filter_id > adapter->tids.ntids) + return -E2BIG; + + f = lookup_tid(t, filter_id); + if (!f) { + dev_err(adapter, "%s: no filter entry for filter_id = %d\n", + __func__, filter_id); + return -EINVAL; + } + + ret = writable_filter(f); + if (ret) + return ret; + + if (f->valid) { + unsigned int wrlen; + struct rte_mbuf *mbuf; + struct work_request_hdr *wr; + struct ulptx_idata *aligner; + struct cpl_set_tcb_field *req; + struct cpl_abort_req *abort_req; + struct cpl_abort_rpl *abort_rpl; + + f->ctx = ctx; + f->pending = 1; + + wrlen = cxgbe_roundup(sizeof(*wr) + + (sizeof(*req) + sizeof(*aligner)) + + sizeof(*abort_req) + sizeof(*abort_rpl), + 16); + + ctrlq = &adapter->sge.ctrlq[port_id]; + mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool); + if (!mbuf) { + dev_err(adapter, "%s: could not allocate skb ..\n", + __func__); + goto out_err; + } + + mbuf->data_len = wrlen; + mbuf->pkt_len = mbuf->data_len; + + req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *); + INIT_ULPTX_WR(req, wrlen, 0, 0); + wr = (struct work_request_hdr *)req; + wr++; + req = (struct cpl_set_tcb_field *)wr; + mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO, + V_TCB_RSS_INFO(M_TCB_RSS_INFO), + V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id), + 0, 1); + aligner = (struct ulptx_idata *)(req + 1); + abort_req = (struct cpl_abort_req *)(aligner + 1); + mk_abort_req_ulp(abort_req, f->tid); + abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1); + mk_abort_rpl_ulp(abort_rpl, f->tid); + t4_mgmt_tx(ctrlq, mbuf); + } + return 0; + +out_err: + return -ENOMEM; +} + +/** + * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter. + */ +static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf, + unsigned int qid_filterid, struct adapter *adap) +{ + struct cpl_t6_act_open_req6 *req = NULL; + u64 local_lo, local_hi, peer_lo, peer_hi; + u32 *lip = (u32 *)f->fs.val.lip; + u32 *fip = (u32 *)f->fs.val.fip; + + switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { + case CHELSIO_T6: + req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *); + + INIT_TP_WR(req, 0); + break; + default: + dev_err(adap, "%s: unsupported chip type!\n", __func__); + return; + } + + local_hi = ((u64)lip[1]) << 32 | lip[0]; + local_lo = ((u64)lip[3]) << 32 | lip[2]; + peer_hi = ((u64)fip[1]) << 32 | fip[0]; + peer_lo = ((u64)fip[3]) << 32 | fip[2]; + + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, + qid_filterid)); + req->local_port = cpu_to_be16(f->fs.val.lport); + req->peer_port = cpu_to_be16(f->fs.val.fport); + req->local_ip_hi = local_hi; + req->local_ip_lo = local_lo; + req->peer_ip_hi = peer_hi; + req->peer_ip_lo = peer_lo; + req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) | + V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F) + << 1) | + V_TX_CHAN(f->fs.eport) | + V_ULP_MODE(ULP_MODE_NONE) | + F_TCAM_BYPASS | F_NON_OFFLOAD); + req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f))); + req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID | + V_RSS_QUEUE(f->fs.iq) | + F_T5_OPT_2_VALID | + F_RX_CHANNEL | + V_CONG_CNTRL((f->fs.action == FILTER_DROP) | + (f->fs.dirsteer << 1)) | + V_CCTRL_ECN(f->fs.action == FILTER_SWITCH)); +} + +/** + * Build a ACT_OPEN_REQ message for setting IPv4 hash filter. + */ +static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf, + unsigned int qid_filterid, struct adapter *adap) +{ + struct cpl_t6_act_open_req *req = NULL; + + switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { + case CHELSIO_T6: + req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *); + + INIT_TP_WR(req, 0); + break; + default: + dev_err(adap, "%s: unsupported chip type!\n", __func__); + return; + } + + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, + qid_filterid)); + req->local_port = cpu_to_be16(f->fs.val.lport); + req->peer_port = cpu_to_be16(f->fs.val.fport); + req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 | + f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24; + req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 | + f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24; + req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) | + V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F) + << 1) | + V_TX_CHAN(f->fs.eport) | + V_ULP_MODE(ULP_MODE_NONE) | + F_TCAM_BYPASS | F_NON_OFFLOAD); + req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f))); + req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID | + V_RSS_QUEUE(f->fs.iq) | + F_T5_OPT_2_VALID | + F_RX_CHANNEL | + V_CONG_CNTRL((f->fs.action == FILTER_DROP) | + (f->fs.dirsteer << 1)) | + V_CCTRL_ECN(f->fs.action == FILTER_SWITCH)); +} + +/** + * Set the specified hash filter. + */ +static int cxgbe_set_hash_filter(struct rte_eth_dev *dev, + struct ch_filter_specification *fs, + struct filter_ctx *ctx) +{ + struct port_info *pi = ethdev2pinfo(dev); + struct adapter *adapter = pi->adapter; + struct tid_info *t = &adapter->tids; + struct filter_entry *f; + struct rte_mbuf *mbuf; + struct sge_ctrl_txq *ctrlq; + unsigned int iq; + int atid, size; + int ret = 0; + + ret = validate_filter(adapter, fs); + if (ret) + return ret; + + iq = get_filter_steerq(dev, fs); + + ctrlq = &adapter->sge.ctrlq[pi->port_id]; + + f = t4_os_alloc(sizeof(*f)); + if (!f) + goto out_err; + + f->fs = *fs; + f->ctx = ctx; + f->dev = dev; + f->fs.iq = iq; + + atid = cxgbe_alloc_atid(t, f); + if (atid < 0) + goto out_err; + + if (f->fs.type) { + /* IPv6 hash filter */ + f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip); + if (!f->clipt) + goto free_atid; + + size = sizeof(struct cpl_t6_act_open_req6); + mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool); + if (!mbuf) { + ret = -ENOMEM; + goto free_clip; + } + + mbuf->data_len = size; + mbuf->pkt_len = mbuf->data_len; + + mk_act_open_req6(f, mbuf, + ((adapter->sge.fw_evtq.abs_id << 14) | atid), + adapter); + } else { + /* IPv4 hash filter */ + size = sizeof(struct cpl_t6_act_open_req); + mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool); + if (!mbuf) { + ret = -ENOMEM; + goto free_atid; + } + + mbuf->data_len = size; + mbuf->pkt_len = mbuf->data_len; + + mk_act_open_req(f, mbuf, + ((adapter->sge.fw_evtq.abs_id << 14) | atid), + adapter); + } + + f->pending = 1; + t4_mgmt_tx(ctrlq, mbuf); + return 0; + +free_clip: + cxgbe_clip_release(f->dev, f->clipt); +free_atid: + cxgbe_free_atid(t, atid); + +out_err: + t4_os_free(f); + return ret; +} + +/** + * Clear a filter and release any of its resources that we own. This also + * clears the filter's "pending" status. + */ +void clear_filter(struct filter_entry *f) +{ + if (f->clipt) + cxgbe_clip_release(f->dev, f->clipt); + + /* + * The zeroing of the filter rule below clears the filter valid, + * pending, locked flags etc. so it's all we need for + * this operation. + */ + memset(f, 0, sizeof(*f)); +} + +/** + * t4_mk_filtdelwr - create a delete filter WR + * @ftid: the filter ID + * @wr: the filter work request to populate + * @qid: ingress queue to receive the delete notification + * + * Creates a filter work request to delete the supplied filter. If @qid is + * negative the delete notification is suppressed. + */ +static void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) +{ + memset(wr, 0, sizeof(*wr)); + wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR)); + wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16)); + wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) | + V_FW_FILTER_WR_NOREPLY(qid < 0)); + wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER); + if (qid >= 0) + wr->rx_chan_rx_rpl_iq = + cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid)); +} + +/** + * Create FW work request to delete the filter at a specified index + */ +static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx) +{ + struct adapter *adapter = ethdev2adap(dev); + struct filter_entry *f = &adapter->tids.ftid_tab[fidx]; + struct rte_mbuf *mbuf; + struct fw_filter_wr *fwr; + struct sge_ctrl_txq *ctrlq; + unsigned int port_id = ethdev2pinfo(dev)->port_id; + + ctrlq = &adapter->sge.ctrlq[port_id]; + mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool); + if (!mbuf) + return -ENOMEM; + + mbuf->data_len = sizeof(*fwr); + mbuf->pkt_len = mbuf->data_len; + + fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *); + t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id); + + /* + * Mark the filter as "pending" and ship off the Filter Work Request. + * When we get the Work Request Reply we'll clear the pending status. + */ + f->pending = 1; + t4_mgmt_tx(ctrlq, mbuf); + return 0; +} + +int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx) +{ + struct adapter *adapter = ethdev2adap(dev); + struct filter_entry *f = &adapter->tids.ftid_tab[fidx]; + struct rte_mbuf *mbuf; + struct fw_filter_wr *fwr; + struct sge_ctrl_txq *ctrlq; + unsigned int port_id = ethdev2pinfo(dev)->port_id; + int ret; + + ctrlq = &adapter->sge.ctrlq[port_id]; + mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool); + if (!mbuf) { + ret = -ENOMEM; + goto out; + } + + mbuf->data_len = sizeof(*fwr); + mbuf->pkt_len = mbuf->data_len; + + fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *); + memset(fwr, 0, sizeof(*fwr)); + + /* + * Construct the work request to set the filter. + */ + fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR)); + fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16)); + fwr->tid_to_iq = + cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) | + V_FW_FILTER_WR_RQTYPE(f->fs.type) | + V_FW_FILTER_WR_NOREPLY(0) | + V_FW_FILTER_WR_IQ(f->fs.iq)); + fwr->del_filter_to_l2tix = + cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | + V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | + V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | + V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | + V_FW_FILTER_WR_TXCHAN(f->fs.eport) | + V_FW_FILTER_WR_PRIO(f->fs.prio)); + fwr->ethtype = cpu_to_be16(f->fs.val.ethtype); + fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype); + fwr->smac_sel = 0; + fwr->rx_chan_rx_rpl_iq = + cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) | + V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id + )); + fwr->maci_to_matchtypem = + cpu_to_be32(V_FW_FILTER_WR_PORT(f->fs.val.iport) | + V_FW_FILTER_WR_PORTM(f->fs.mask.iport)); + fwr->ptcl = f->fs.val.proto; + fwr->ptclm = f->fs.mask.proto; + rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip)); + rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm)); + rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip)); + rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm)); + fwr->lp = cpu_to_be16(f->fs.val.lport); + fwr->lpm = cpu_to_be16(f->fs.mask.lport); + fwr->fp = cpu_to_be16(f->fs.val.fport); + fwr->fpm = cpu_to_be16(f->fs.mask.fport); + + /* + * Mark the filter as "pending" and ship off the Filter Work Request. + * When we get the Work Request Reply we'll clear the pending status. + */ + f->pending = 1; + t4_mgmt_tx(ctrlq, mbuf); + return 0; + +out: + return ret; +} + +/** + * Set the corresponding entry in the bitmap. 4 slots are + * marked for IPv6, whereas only 1 slot is marked for IPv4. + */ +static int cxgbe_set_ftid(struct tid_info *t, int fidx, int family) +{ + t4_os_lock(&t->ftid_lock); + if (rte_bitmap_get(t->ftid_bmap, fidx)) { + t4_os_unlock(&t->ftid_lock); + return -EBUSY; + } + + if (family == FILTER_TYPE_IPV4) { + rte_bitmap_set(t->ftid_bmap, fidx); + } else { + rte_bitmap_set(t->ftid_bmap, fidx); + rte_bitmap_set(t->ftid_bmap, fidx + 1); + rte_bitmap_set(t->ftid_bmap, fidx + 2); + rte_bitmap_set(t->ftid_bmap, fidx + 3); + } + t4_os_unlock(&t->ftid_lock); + return 0; +} + +/** + * Clear the corresponding entry in the bitmap. 4 slots are + * cleared for IPv6, whereas only 1 slot is cleared for IPv4. + */ +static void cxgbe_clear_ftid(struct tid_info *t, int fidx, int family) +{ + t4_os_lock(&t->ftid_lock); + if (family == FILTER_TYPE_IPV4) { + rte_bitmap_clear(t->ftid_bmap, fidx); + } else { + rte_bitmap_clear(t->ftid_bmap, fidx); + rte_bitmap_clear(t->ftid_bmap, fidx + 1); + rte_bitmap_clear(t->ftid_bmap, fidx + 2); + rte_bitmap_clear(t->ftid_bmap, fidx + 3); + } + t4_os_unlock(&t->ftid_lock); +} + +/** + * Check a delete filter request for validity and send it to the hardware. + * Return 0 on success, an error number otherwise. We attach any provided + * filter operation context to the internal filter specification in order to + * facilitate signaling completion of the operation. + */ +int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id, + struct ch_filter_specification *fs, + struct filter_ctx *ctx) +{ + struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct adapter *adapter = pi->adapter; + struct filter_entry *f; + unsigned int chip_ver; + int ret; + + if (is_hashfilter(adapter) && fs->cap) + return cxgbe_del_hash_filter(dev, filter_id, ctx); + + if (filter_id >= adapter->tids.nftids) + return -ERANGE; + + chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); + + ret = is_filter_set(&adapter->tids, filter_id, fs->type); + if (!ret) { + dev_warn(adap, "%s: could not find filter entry: %u\n", + __func__, filter_id); + return -EINVAL; + } + + /* + * Ensure filter id is aligned on the 2 slot boundary for T6, + * and 4 slot boundary for cards below T6. + */ + if (fs->type) { + if (chip_ver < CHELSIO_T6) + filter_id &= ~(0x3); + else + filter_id &= ~(0x1); + } + + f = &adapter->tids.ftid_tab[filter_id]; + ret = writable_filter(f); + if (ret) + return ret; + + if (f->valid) { + f->ctx = ctx; + cxgbe_clear_ftid(&adapter->tids, + f->tid - adapter->tids.ftid_base, + f->fs.type ? FILTER_TYPE_IPV6 : + FILTER_TYPE_IPV4); + return del_filter_wr(dev, filter_id); + } + + /* + * If the caller has passed in a Completion Context then we need to + * mark it as a successful completion so they don't stall waiting + * for it. + */ + if (ctx) { + ctx->result = 0; + t4_complete(&ctx->completion); + } + + return 0; +} + +/** + * Check a Chelsio Filter Request for validity, convert it into our internal + * format and send it to the hardware. Return 0 on success, an error number + * otherwise. We attach any provided filter operation context to the internal + * filter specification in order to facilitate signaling completion of the + * operation. + */ +int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id, + struct ch_filter_specification *fs, + struct filter_ctx *ctx) +{ + struct port_info *pi = ethdev2pinfo(dev); + struct adapter *adapter = pi->adapter; + unsigned int fidx, iq, fid_bit = 0; + struct filter_entry *f; + unsigned int chip_ver; + uint8_t bitoff[16] = {0}; + int ret; + + if (is_hashfilter(adapter) && fs->cap) + return cxgbe_set_hash_filter(dev, fs, ctx); + + if (filter_id >= adapter->tids.nftids) + return -ERANGE; + + chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); + + ret = validate_filter(adapter, fs); + if (ret) + return ret; + + /* + * Ensure filter id is aligned on the 4 slot boundary for IPv6 + * maskfull filters. + */ + if (fs->type) + filter_id &= ~(0x3); + + ret = is_filter_set(&adapter->tids, filter_id, fs->type); + if (ret) + return -EBUSY; + + iq = get_filter_steerq(dev, fs); + + /* + * IPv6 filters occupy four slots and must be aligned on four-slot + * boundaries for T5. On T6, IPv6 filters occupy two-slots and + * must be aligned on two-slot boundaries. + * + * IPv4 filters only occupy a single slot and have no alignment + * requirements but writing a new IPv4 filter into the middle + * of an existing IPv6 filter requires clearing the old IPv6 + * filter. + */ + if (fs->type == FILTER_TYPE_IPV4) { /* IPv4 */ + /* + * For T6, If our IPv4 filter isn't being written to a + * multiple of two filter index and there's an IPv6 + * filter at the multiple of 2 base slot, then we need + * to delete that IPv6 filter ... + * For adapters below T6, IPv6 filter occupies 4 entries. + */ + if (chip_ver < CHELSIO_T6) + fidx = filter_id & ~0x3; + else + fidx = filter_id & ~0x1; + + if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) { + f = &adapter->tids.ftid_tab[fidx]; + if (f->valid) + return -EBUSY; + } + } else { /* IPv6 */ + unsigned int max_filter_id; + + if (chip_ver < CHELSIO_T6) { + /* + * Ensure that the IPv6 filter is aligned on a + * multiple of 4 boundary. + */ + if (filter_id & 0x3) + return -EINVAL; + + max_filter_id = filter_id + 4; + } else { + /* + * For T6, CLIP being enabled, IPv6 filter would occupy + * 2 entries. + */ + if (filter_id & 0x1) + return -EINVAL; + + max_filter_id = filter_id + 2; + } + + /* + * Check all except the base overlapping IPv4 filter + * slots. + */ + for (fidx = filter_id + 1; fidx < max_filter_id; fidx++) { + f = &adapter->tids.ftid_tab[fidx]; + if (f->valid) + return -EBUSY; + } + } + + /* + * Check to make sure that provided filter index is not + * already in use by someone else + */ + f = &adapter->tids.ftid_tab[filter_id]; + if (f->valid) + return -EBUSY; + + fidx = adapter->tids.ftid_base + filter_id; + fid_bit = filter_id; + ret = cxgbe_set_ftid(&adapter->tids, fid_bit, + fs->type ? FILTER_TYPE_IPV6 : FILTER_TYPE_IPV4); + if (ret) + return ret; + + /* + * Check to make sure the filter requested is writable ... + */ + ret = writable_filter(f); + if (ret) { + /* Clear the bits we have set above */ + cxgbe_clear_ftid(&adapter->tids, fid_bit, + fs->type ? FILTER_TYPE_IPV6 : + FILTER_TYPE_IPV4); + return ret; + } + + /* + * Allocate a clip table entry only if we have non-zero IPv6 address + */ + if (chip_ver > CHELSIO_T5 && fs->type && + memcmp(fs->val.lip, bitoff, sizeof(bitoff))) { + f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip); + if (!f->clipt) + goto free_tid; + } + + /* + * Convert the filter specification into our internal format. + * We copy the PF/VF specification into the Outer VLAN field + * here so the rest of the code -- including the interface to + * the firmware -- doesn't have to constantly do these checks. + */ + f->fs = *fs; + f->fs.iq = iq; + f->dev = dev; + + /* + * Attempt to set the filter. If we don't succeed, we clear + * it and return the failure. + */ + f->ctx = ctx; + f->tid = fidx; /* Save the actual tid */ + ret = set_filter_wr(dev, filter_id); + if (ret) { + fid_bit = f->tid - adapter->tids.ftid_base; + goto free_tid; + } + + return ret; + +free_tid: + cxgbe_clear_ftid(&adapter->tids, fid_bit, + fs->type ? FILTER_TYPE_IPV6 : + FILTER_TYPE_IPV4); + clear_filter(f); + return ret; +} + +/** + * Handle a Hash filter write reply. + */ +void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl) +{ + struct tid_info *t = &adap->tids; + struct filter_entry *f; + struct filter_ctx *ctx = NULL; + unsigned int tid = GET_TID(rpl); + unsigned int ftid = G_TID_TID(G_AOPEN_ATID + (be32_to_cpu(rpl->atid_status))); + unsigned int status = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status)); + + f = lookup_atid(t, ftid); + if (!f) { + dev_warn(adap, "%s: could not find filter entry: %d\n", + __func__, ftid); + return; + } + + ctx = f->ctx; + f->ctx = NULL; + + switch (status) { + case CPL_ERR_NONE: { + f->tid = tid; + f->pending = 0; /* asynchronous setup completed */ + f->valid = 1; + + cxgbe_insert_tid(t, f, f->tid, 0); + cxgbe_free_atid(t, ftid); + if (ctx) { + ctx->tid = f->tid; + ctx->result = 0; + } + if (f->fs.hitcnts) + set_tcb_field(adap, tid, + W_TCB_TIMESTAMP, + V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) | + V_TCB_T_RTT_TS_RECENT_AGE + (M_TCB_T_RTT_TS_RECENT_AGE), + V_TCB_TIMESTAMP(0ULL) | + V_TCB_T_RTT_TS_RECENT_AGE(0ULL), + 1); + break; + } + default: + dev_warn(adap, "%s: filter creation failed with status = %u\n", + __func__, status); + + if (ctx) { + if (status == CPL_ERR_TCAM_FULL) + ctx->result = -EAGAIN; + else + ctx->result = -EINVAL; + } + + cxgbe_free_atid(t, ftid); + t4_os_free(f); + } + + if (ctx) + t4_complete(&ctx->completion); +} + +/** + * Handle a LE-TCAM filter write/deletion reply. + */ +void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl) +{ + struct filter_entry *f = NULL; + unsigned int tid = GET_TID(rpl); + int idx, max_fidx = adap->tids.nftids; + + /* Get the corresponding filter entry for this tid */ + if (adap->tids.ftid_tab) { + /* Check this in normal filter region */ + idx = tid - adap->tids.ftid_base; + if (idx >= max_fidx) + return; + + f = &adap->tids.ftid_tab[idx]; + if (f->tid != tid) + return; + } + + /* We found the filter entry for this tid */ + if (f) { + unsigned int ret = G_COOKIE(rpl->cookie); + struct filter_ctx *ctx; + + /* + * Pull off any filter operation context attached to the + * filter. + */ + ctx = f->ctx; + f->ctx = NULL; + + if (ret == FW_FILTER_WR_FLT_ADDED) { + f->pending = 0; /* asynchronous setup completed */ + f->valid = 1; + if (ctx) { + ctx->tid = f->tid; + ctx->result = 0; + } + } else if (ret == FW_FILTER_WR_FLT_DELETED) { + /* + * Clear the filter when we get confirmation from the + * hardware that the filter has been deleted. + */ + clear_filter(f); + if (ctx) + ctx->result = 0; + } else { + /* + * Something went wrong. Issue a warning about the + * problem and clear everything out. + */ + dev_warn(adap, "filter %u setup failed with error %u\n", + idx, ret); + clear_filter(f); + if (ctx) + ctx->result = -EINVAL; + } + + if (ctx) + t4_complete(&ctx->completion); + } +} + +/* + * Retrieve the packet count for the specified filter. + */ +int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx, + u64 *c, int hash, bool get_byte) +{ + struct filter_entry *f; + unsigned int tcb_base, tcbaddr; + int ret; + + tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE); + if (is_hashfilter(adapter) && hash) { + if (fidx < adapter->tids.ntids) { + f = adapter->tids.tid_tab[fidx]; + if (!f) + return -EINVAL; + + if (is_t5(adapter->params.chip)) { + *c = 0; + return 0; + } + tcbaddr = tcb_base + (fidx * TCB_SIZE); + goto get_count; + } else { + return -ERANGE; + } + } else { + if (fidx >= adapter->tids.nftids) + return -ERANGE; + + f = &adapter->tids.ftid_tab[fidx]; + if (!f->valid) + return -EINVAL; + + tcbaddr = tcb_base + f->tid * TCB_SIZE; + } + + f = &adapter->tids.ftid_tab[fidx]; + if (!f->valid) + return -EINVAL; + +get_count: + if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) { + /* + * For T5, the Filter Packet Hit Count is maintained as a + * 32-bit Big Endian value in the TCB field {timestamp}. + * Similar to the craziness above, instead of the filter hit + * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) * + * sizeof(u32)), it actually shows up at offset 24. Whacky. + */ + if (get_byte) { + unsigned int word_offset = 4; + __be64 be64_byte_count; + + t4_os_lock(&adapter->win0_lock); + ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0, + tcbaddr + + (word_offset * sizeof(__be32)), + sizeof(be64_byte_count), + &be64_byte_count, + T4_MEMORY_READ); + t4_os_unlock(&adapter->win0_lock); + if (ret < 0) + return ret; + *c = be64_to_cpu(be64_byte_count); + } else { + unsigned int word_offset = 6; + __be32 be32_count; + + t4_os_lock(&adapter->win0_lock); + ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0, + tcbaddr + + (word_offset * sizeof(__be32)), + sizeof(be32_count), &be32_count, + T4_MEMORY_READ); + t4_os_unlock(&adapter->win0_lock); + if (ret < 0) + return ret; + *c = (u64)be32_to_cpu(be32_count); + } + } + return 0; +} + +/** + * Handle a Hash filter delete reply. + */ +void hash_del_filter_rpl(struct adapter *adap, + const struct cpl_abort_rpl_rss *rpl) +{ + struct tid_info *t = &adap->tids; + struct filter_entry *f; + struct filter_ctx *ctx = NULL; + unsigned int tid = GET_TID(rpl); + + f = lookup_tid(t, tid); + if (!f) { + dev_warn(adap, "%s: could not find filter entry: %u\n", + __func__, tid); + return; + } + + ctx = f->ctx; + f->ctx = NULL; + + f->valid = 0; + + if (f->clipt) + cxgbe_clip_release(f->dev, f->clipt); + + cxgbe_remove_tid(t, 0, tid, 0); + t4_os_free(f); + + if (ctx) { + ctx->result = 0; + t4_complete(&ctx->completion); + } +} diff --git a/drivers/net/cxgbe/cxgbe_filter.h b/drivers/net/cxgbe/cxgbe_filter.h new file mode 100644 index 00000000..af8fa752 --- /dev/null +++ b/drivers/net/cxgbe/cxgbe_filter.h @@ -0,0 +1,235 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef _CXGBE_FILTER_H_ +#define _CXGBE_FILTER_H_ + +#include "t4_msg.h" +/* + * Defined bit width of user definable filter tuples + */ +#define ETHTYPE_BITWIDTH 16 +#define FRAG_BITWIDTH 1 +#define MACIDX_BITWIDTH 9 +#define FCOE_BITWIDTH 1 +#define IPORT_BITWIDTH 3 +#define MATCHTYPE_BITWIDTH 3 +#define PROTO_BITWIDTH 8 +#define TOS_BITWIDTH 8 +#define PF_BITWIDTH 8 +#define VF_BITWIDTH 8 +#define IVLAN_BITWIDTH 16 +#define OVLAN_BITWIDTH 16 + +/* + * Filter matching rules. These consist of a set of ingress packet field + * (value, mask) tuples. The associated ingress packet field matches the + * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field + * rule can be constructed by specifying a tuple of (0, 0).) A filter rule + * matches an ingress packet when all of the individual individual field + * matching rules are true. + * + * Partial field masks are always valid, however, while it may be easy to + * understand their meanings for some fields (e.g. IP address to match a + * subnet), for others making sensible partial masks is less intuitive (e.g. + * MPS match type) ... + */ +struct ch_filter_tuple { + /* + * Compressed header matching field rules. The TP_VLAN_PRI_MAP + * register selects which of these fields will participate in the + * filter match rules -- up to a maximum of 36 bits. Because + * TP_VLAN_PRI_MAP is a global register, all filters must use the same + * set of fields. + */ + uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */ + uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */ + uint32_t ivlan_vld:1; /* inner VLAN valid */ + uint32_t ovlan_vld:1; /* outer VLAN valid */ + uint32_t pfvf_vld:1; /* PF/VF valid */ + uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */ + uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */ + uint32_t iport:IPORT_BITWIDTH; /* ingress port */ + uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */ + uint32_t proto:PROTO_BITWIDTH; /* protocol type */ + uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */ + uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */ + uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */ + uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */ + uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */ + + /* + * Uncompressed header matching field rules. These are always + * available for field rules. + */ + uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */ + uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */ + uint16_t lport; /* local port */ + uint16_t fport; /* foreign port */ + + /* reservations for future additions */ + uint8_t rsvd[12]; +}; + +/* + * Filter specification + */ +struct ch_filter_specification { + /* Administrative fields for filter. */ + uint32_t hitcnts:1; /* count filter hits in TCB */ + uint32_t prio:1; /* filter has priority over active/server */ + + /* + * Fundamental filter typing. This is the one element of filter + * matching that doesn't exist as a (value, mask) tuple. + */ + uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */ + uint32_t cap:1; /* 0 => LE-TCAM, 1 => Hash */ + + /* + * Packet dispatch information. Ingress packets which match the + * filter rules will be dropped, passed to the host or switched back + * out as egress packets. + */ + uint32_t action:2; /* drop, pass, switch */ + + uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */ + uint32_t iq:10; /* ingress queue */ + + uint32_t eport:2; /* egress port to switch packet out */ + + /* Filter rule value/mask pairs. */ + struct ch_filter_tuple val; + struct ch_filter_tuple mask; +}; + +enum { + FILTER_PASS = 0, /* default */ + FILTER_DROP, + FILTER_SWITCH +}; + +enum filter_type { + FILTER_TYPE_IPV4 = 0, + FILTER_TYPE_IPV6, +}; + +struct t4_completion { + unsigned int done; /* completion done (0 - No, 1 - Yes) */ + rte_spinlock_t lock; /* completion lock */ +}; + +/* + * Filter operation context to allow callers to wait for + * an asynchronous completion. + */ +struct filter_ctx { + struct t4_completion completion; /* completion rendezvous */ + int result; /* result of operation */ + u32 tid; /* to store tid of hash filter */ +}; + +/* + * Host shadow copy of ingress filter entry. This is in host native format + * and doesn't match the ordering or bit order, etc. of the hardware or the + * firmware command. + */ +struct filter_entry { + /* + * Administrative fields for filter. + */ + u32 valid:1; /* filter allocated and valid */ + u32 locked:1; /* filter is administratively locked */ + u32 pending:1; /* filter action is pending FW reply */ + struct filter_ctx *ctx; /* caller's completion hook */ + struct clip_entry *clipt; /* CLIP Table entry for IPv6 */ + struct rte_eth_dev *dev; /* Port's rte eth device */ + void *private; /* For use by apps using filter_entry */ + + /* This will store the actual tid */ + u32 tid; + + /* + * The filter itself. + */ + struct ch_filter_specification fs; +}; + +#define FILTER_ID_MAX (~0U) + +struct tid_info; +struct adapter; + +/** + * Find first clear bit in the bitmap. + */ +static inline unsigned int cxgbe_find_first_zero_bit(struct rte_bitmap *bmap, + unsigned int size) +{ + unsigned int idx; + + for (idx = 0; idx < size; idx++) + if (!rte_bitmap_get(bmap, idx)) + break; + + return idx; +} + +/** + * Find a free region of 'num' consecutive entries. + */ +static inline unsigned int +cxgbe_bitmap_find_free_region(struct rte_bitmap *bmap, unsigned int size, + unsigned int num) +{ + unsigned int idx, j, free = 0; + + if (num > size) + return size; + + for (idx = 0; idx < size; idx += num) { + for (j = 0; j < num; j++) { + if (!rte_bitmap_get(bmap, idx + j)) { + free++; + } else { + free = 0; + break; + } + } + + /* Found the Region */ + if (free == num) + break; + + /* Reached the end and still no region found */ + if ((idx + num) > size) { + idx = size; + break; + } + } + + return idx; +} + +bool is_filter_set(struct tid_info *, int fidx, int family); +void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl); +void clear_filter(struct filter_entry *f); +int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx); +int writable_filter(struct filter_entry *f); +int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id, + struct ch_filter_specification *fs, + struct filter_ctx *ctx); +int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id, + struct ch_filter_specification *fs, + struct filter_ctx *ctx); +int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family); +int init_hash_filter(struct adapter *adap); +void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl); +void hash_del_filter_rpl(struct adapter *adap, + const struct cpl_abort_rpl_rss *rpl); +int validate_filter(struct adapter *adap, struct ch_filter_specification *fs); +int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx, + u64 *c, int hash, bool get_byte); +#endif /* _CXGBE_FILTER_H_ */ diff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c new file mode 100644 index 00000000..01c945f1 --- /dev/null +++ b/drivers/net/cxgbe/cxgbe_flow.c @@ -0,0 +1,845 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ +#include "common.h" +#include "cxgbe_flow.h" + +#define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \ +do { \ + if (!((fs)->val.elem || (fs)->mask.elem)) { \ + (fs)->val.elem = (__v); \ + (fs)->mask.elem = (__m); \ + } else { \ + return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \ + NULL, "a filter can be specified" \ + " only once"); \ + } \ +} while (0) + +#define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \ +do { \ + memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \ + memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \ +} while (0) + +#define CXGBE_FILL_FS(v, m, elem) \ + __CXGBE_FILL_FS(v, m, fs, elem, e) + +#define CXGBE_FILL_FS_MEMCPY(v, m, elem) \ + __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem) + +static int +cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e) +{ + /* rte_flow specification does not allow it. */ + if (!i->spec && (i->mask || i->last)) + return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + i, "last or mask given without spec"); + /* + * We don't support it. + * Although, we can support values in last as 0's or last == spec. + * But this will not provide user with any additional functionality + * and will only increase the complexity for us. + */ + if (i->last) + return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + i, "last is not supported by chelsio pmd"); + return 0; +} + +static void +cxgbe_fill_filter_region(struct adapter *adap, + struct ch_filter_specification *fs) +{ + struct tp_params *tp = &adap->params.tp; + u64 hash_filter_mask = tp->hash_filter_mask; + u64 ntuple_mask = 0; + + fs->cap = 0; + + if (!is_hashfilter(adap)) + return; + + if (fs->type) { + uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff}; + uint8_t bitoff[16] = {0}; + + if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) || + !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) || + memcmp(fs->mask.lip, biton, sizeof(biton)) || + memcmp(fs->mask.fip, biton, sizeof(biton))) + return; + } else { + uint32_t biton = 0xffffffff; + uint32_t bitoff = 0x0U; + + if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) || + !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) || + memcmp(fs->mask.lip, &biton, sizeof(biton)) || + memcmp(fs->mask.fip, &biton, sizeof(biton))) + return; + } + + if (!fs->val.lport || fs->mask.lport != 0xffff) + return; + if (!fs->val.fport || fs->mask.fport != 0xffff) + return; + + if (tp->protocol_shift >= 0) + ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift; + if (tp->ethertype_shift >= 0) + ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift; + if (tp->port_shift >= 0) + ntuple_mask |= (u64)fs->mask.iport << tp->port_shift; + + if (ntuple_mask != hash_filter_mask) + return; + + fs->cap = 1; /* use hash region */ +} + +static int +ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_item_phy_port *val = item->spec; + const struct rte_flow_item_phy_port *umask = item->mask; + const struct rte_flow_item_phy_port *mask; + + mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask; + + if (val->index > 0x7) + return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, + "port index upto 0x7 is supported"); + + CXGBE_FILL_FS(val->index, mask->index, iport); + + return 0; +} + +static int +ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_item_udp *val = item->spec; + const struct rte_flow_item_udp *umask = item->mask; + const struct rte_flow_item_udp *mask; + + mask = umask ? umask : (const struct rte_flow_item_udp *)dmask; + + if (mask->hdr.dgram_len || mask->hdr.dgram_cksum) + return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, + "udp: only src/dst port supported"); + + CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto); + if (!val) + return 0; + CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port), + be16_to_cpu(mask->hdr.src_port), fport); + CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port), + be16_to_cpu(mask->hdr.dst_port), lport); + return 0; +} + +static int +ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_item_tcp *val = item->spec; + const struct rte_flow_item_tcp *umask = item->mask; + const struct rte_flow_item_tcp *mask; + + mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask; + + if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off || + mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum || + mask->hdr.tcp_urp) + return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, + "tcp: only src/dst port supported"); + + CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto); + if (!val) + return 0; + CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port), + be16_to_cpu(mask->hdr.src_port), fport); + CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port), + be16_to_cpu(mask->hdr.dst_port), lport); + return 0; +} + +static int +ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_item_ipv4 *val = item->spec; + const struct rte_flow_item_ipv4 *umask = item->mask; + const struct rte_flow_item_ipv4 *mask; + + mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask; + + if (mask->hdr.time_to_live || mask->hdr.type_of_service) + return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, "ttl/tos are not supported"); + + fs->type = FILTER_TYPE_IPV4; + CXGBE_FILL_FS(ETHER_TYPE_IPv4, 0xffff, ethtype); + if (!val) + return 0; /* ipv4 wild card */ + + CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto); + CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip); + CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip); + + return 0; +} + +static int +ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_item_ipv6 *val = item->spec; + const struct rte_flow_item_ipv6 *umask = item->mask; + const struct rte_flow_item_ipv6 *mask; + + mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask; + + if (mask->hdr.vtc_flow || + mask->hdr.payload_len || mask->hdr.hop_limits) + return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, + "tc/flow/hop are not supported"); + + fs->type = FILTER_TYPE_IPV6; + CXGBE_FILL_FS(ETHER_TYPE_IPv6, 0xffff, ethtype); + if (!val) + return 0; /* ipv6 wild card */ + + CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto); + CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip); + CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip); + + return 0; +} + +static int +cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr, + struct rte_flow_error *e) +{ + if (attr->egress) + return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR, + attr, "attribute: is" + " not supported !"); + if (attr->group > 0) + return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR, + attr, "group parameter is" + " not supported."); + + flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX; + + return 0; +} + +static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq) +{ + struct port_info *pi = ethdev2pinfo(dev); + + if (rxq > pi->n_rx_qsets) + return -EINVAL; + return 0; +} + +static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx) +{ + struct adapter *adap = ethdev2adap(f->dev); + struct ch_filter_specification fs = f->fs; + + if (fidx >= adap->tids.nftids) { + dev_err(adap, "invalid flow index %d.\n", fidx); + return -EINVAL; + } + if (!is_filter_set(&adap->tids, fidx, fs.type)) { + dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f); + return -EINVAL; + } + + return 0; +} + +static int +cxgbe_validate_fidxonadd(struct ch_filter_specification *fs, + struct adapter *adap, unsigned int fidx) +{ + if (is_filter_set(&adap->tids, fidx, fs->type)) { + dev_err(adap, "filter index: %d is busy.\n", fidx); + return -EBUSY; + } + if (fidx >= adap->tids.nftids) { + dev_err(adap, "filter index (%u) >= max(%u)\n", + fidx, adap->tids.nftids); + return -ERANGE; + } + + return 0; +} + +static int +cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del) +{ + if (flow->fs.cap) + return 0; /* Hash filters */ + return del ? cxgbe_validate_fidxondel(flow->f, fidx) : + cxgbe_validate_fidxonadd(&flow->fs, + ethdev2adap(flow->dev), fidx); +} + +static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx) +{ + struct ch_filter_specification *fs = &flow->fs; + struct adapter *adap = ethdev2adap(flow->dev); + + /* For tcam get the next available slot, if default value specified */ + if (flow->fidx == FILTER_ID_MAX) { + int idx; + + idx = cxgbe_alloc_ftid(adap, fs->type); + if (idx < 0) { + dev_err(adap, "unable to get a filter index in tcam\n"); + return -ENOMEM; + } + *fidx = (unsigned int)idx; + } else { + *fidx = flow->fidx; + } + + return 0; +} + +static int +ch_rte_parse_atype_switch(const struct rte_flow_action *a, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_action_phy_port *port; + + switch (a->type) { + case RTE_FLOW_ACTION_TYPE_PHY_PORT: + port = (const struct rte_flow_action_phy_port *)a->conf; + fs->eport = port->index; + break; + default: + /* We are not supposed to come here */ + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "Action not supported"); + } + + return 0; +} + +static int +cxgbe_rtef_parse_actions(struct rte_flow *flow, + const struct rte_flow_action action[], + struct rte_flow_error *e) +{ + struct ch_filter_specification *fs = &flow->fs; + const struct rte_flow_action_queue *q; + const struct rte_flow_action *a; + char abit = 0; + int ret; + + for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) { + switch (a->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + continue; + case RTE_FLOW_ACTION_TYPE_DROP: + if (abit++) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "specify only 1 pass/drop"); + fs->action = FILTER_DROP; + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + q = (const struct rte_flow_action_queue *)a->conf; + if (!q) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, q, + "specify rx queue index"); + if (check_rxq(flow->dev, q->index)) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, q, + "Invalid rx queue"); + if (abit++) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "specify only 1 pass/drop"); + fs->action = FILTER_PASS; + fs->dirsteer = 1; + fs->iq = q->index; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + fs->hitcnts = 1; + break; + case RTE_FLOW_ACTION_TYPE_PHY_PORT: + /* We allow multiple switch actions, but switch is + * not compatible with either queue or drop + */ + if (abit++ && fs->action != FILTER_SWITCH) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "overlapping action specified"); + ret = ch_rte_parse_atype_switch(a, fs, e); + if (ret) + return ret; + fs->action = FILTER_SWITCH; + break; + default: + /* Not supported action : return error */ + return rte_flow_error_set(e, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + a, "Action not supported"); + } + } + + return 0; +} + +struct chrte_fparse parseitem[] = { + [RTE_FLOW_ITEM_TYPE_PHY_PORT] = { + .fptr = ch_rte_parsetype_port, + .dmask = &(const struct rte_flow_item_phy_port){ + .index = 0x7, + } + }, + + [RTE_FLOW_ITEM_TYPE_IPV4] = { + .fptr = ch_rte_parsetype_ipv4, + .dmask = &rte_flow_item_ipv4_mask, + }, + + [RTE_FLOW_ITEM_TYPE_IPV6] = { + .fptr = ch_rte_parsetype_ipv6, + .dmask = &rte_flow_item_ipv6_mask, + }, + + [RTE_FLOW_ITEM_TYPE_UDP] = { + .fptr = ch_rte_parsetype_udp, + .dmask = &rte_flow_item_udp_mask, + }, + + [RTE_FLOW_ITEM_TYPE_TCP] = { + .fptr = ch_rte_parsetype_tcp, + .dmask = &rte_flow_item_tcp_mask, + }, +}; + +static int +cxgbe_rtef_parse_items(struct rte_flow *flow, + const struct rte_flow_item items[], + struct rte_flow_error *e) +{ + struct adapter *adap = ethdev2adap(flow->dev); + const struct rte_flow_item *i; + char repeat[ARRAY_SIZE(parseitem)] = {0}; + + for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) { + struct chrte_fparse *idx = &flow->item_parser[i->type]; + int ret; + + if (i->type > ARRAY_SIZE(parseitem)) + return rte_flow_error_set(e, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + i, "Item not supported"); + + switch (i->type) { + case RTE_FLOW_ITEM_TYPE_VOID: + continue; + default: + /* check if item is repeated */ + if (repeat[i->type]) + return rte_flow_error_set(e, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, i, + "parse items cannot be repeated (except void)"); + repeat[i->type] = 1; + + /* validate the item */ + ret = cxgbe_validate_item(i, e); + if (ret) + return ret; + + if (!idx || !idx->fptr) { + return rte_flow_error_set(e, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, i, + "Item not supported"); + } else { + ret = idx->fptr(idx->dmask, i, &flow->fs, e); + if (ret) + return ret; + } + } + } + + cxgbe_fill_filter_region(adap, &flow->fs); + + return 0; +} + +static int +cxgbe_flow_parse(struct rte_flow *flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item item[], + const struct rte_flow_action action[], + struct rte_flow_error *e) +{ + int ret; + + /* parse user request into ch_filter_specification */ + ret = cxgbe_rtef_parse_attr(flow, attr, e); + if (ret) + return ret; + ret = cxgbe_rtef_parse_items(flow, item, e); + if (ret) + return ret; + return cxgbe_rtef_parse_actions(flow, action, e); +} + +static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct ch_filter_specification *fs = &flow->fs; + struct adapter *adap = ethdev2adap(dev); + struct tid_info *t = &adap->tids; + struct filter_ctx ctx; + unsigned int fidx; + int err; + + if (cxgbe_get_fidx(flow, &fidx)) + return -ENOMEM; + if (cxgbe_verify_fidx(flow, fidx, 0)) + return -1; + + t4_init_completion(&ctx.completion); + /* go create the filter */ + err = cxgbe_set_filter(dev, fidx, fs, &ctx); + if (err) { + dev_err(adap, "Error %d while creating filter.\n", err); + return err; + } + + /* Poll the FW for reply */ + err = cxgbe_poll_for_completion(&adap->sge.fw_evtq, + CXGBE_FLOW_POLL_US, + CXGBE_FLOW_POLL_CNT, + &ctx.completion); + if (err) { + dev_err(adap, "Filter set operation timed out (%d)\n", err); + return err; + } + if (ctx.result) { + dev_err(adap, "Hardware error %d while creating the filter.\n", + ctx.result); + return ctx.result; + } + + if (fs->cap) { /* to destroy the filter */ + flow->fidx = ctx.tid; + flow->f = lookup_tid(t, ctx.tid); + } else { + flow->fidx = fidx; + flow->f = &adap->tids.ftid_tab[fidx]; + } + + return 0; +} + +static struct rte_flow * +cxgbe_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item item[], + const struct rte_flow_action action[], + struct rte_flow_error *e) +{ + struct rte_flow *flow; + int ret; + + flow = t4_os_alloc(sizeof(struct rte_flow)); + if (!flow) { + rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Unable to allocate memory for" + " filter_entry"); + return NULL; + } + + flow->item_parser = parseitem; + flow->dev = dev; + + if (cxgbe_flow_parse(flow, attr, item, action, e)) { + t4_os_free(flow); + return NULL; + } + + /* go, interact with cxgbe_filter */ + ret = __cxgbe_flow_create(dev, flow); + if (ret) { + rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Unable to create flow rule"); + t4_os_free(flow); + return NULL; + } + + flow->f->private = flow; /* Will be used during flush */ + + return flow; +} + +static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct adapter *adap = ethdev2adap(dev); + struct filter_entry *f = flow->f; + struct ch_filter_specification *fs; + struct filter_ctx ctx; + int err; + + fs = &f->fs; + if (cxgbe_verify_fidx(flow, flow->fidx, 1)) + return -1; + + t4_init_completion(&ctx.completion); + err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx); + if (err) { + dev_err(adap, "Error %d while deleting filter.\n", err); + return err; + } + + /* Poll the FW for reply */ + err = cxgbe_poll_for_completion(&adap->sge.fw_evtq, + CXGBE_FLOW_POLL_US, + CXGBE_FLOW_POLL_CNT, + &ctx.completion); + if (err) { + dev_err(adap, "Filter delete operation timed out (%d)\n", err); + return err; + } + if (ctx.result) { + dev_err(adap, "Hardware error %d while deleting the filter.\n", + ctx.result); + return ctx.result; + } + + return 0; +} + +static int +cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *e) +{ + int ret; + + ret = __cxgbe_flow_destroy(dev, flow); + if (ret) + return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE, + flow, "error destroying filter."); + t4_os_free(flow); + return 0; +} + +static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count, + u64 *byte_count) +{ + struct adapter *adap = ethdev2adap(flow->dev); + struct ch_filter_specification fs = flow->f->fs; + unsigned int fidx = flow->fidx; + int ret = 0; + + ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0); + if (ret) + return ret; + return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1); +} + +static int +cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, + const struct rte_flow_action *action, void *data, + struct rte_flow_error *e) +{ + struct ch_filter_specification fs; + struct rte_flow_query_count *c; + struct filter_entry *f; + int ret; + + RTE_SET_USED(dev); + + f = flow->f; + fs = f->fs; + + if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) + return rte_flow_error_set(e, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "only count supported for query"); + + /* + * This is a valid operation, Since we are allowed to do chelsio + * specific operations in rte side of our code but not vise-versa + * + * So, fs can be queried/modified here BUT rte_flow_query_count + * cannot be worked on by the lower layer since we want to maintain + * it as rte_flow agnostic. + */ + if (!fs.hitcnts) + return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + &fs, "filter hit counters were not" + " enabled during filter creation"); + + c = (struct rte_flow_query_count *)data; + ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes); + if (ret) + return rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION, + f, "cxgbe pmd failed to" + " perform query"); + + /* Query was successful */ + c->bytes_set = 1; + c->hits_set = 1; + + return 0; /* success / partial_success */ +} + +static int +cxgbe_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item item[], + const struct rte_flow_action action[], + struct rte_flow_error *e) +{ + struct adapter *adap = ethdev2adap(dev); + struct rte_flow *flow; + unsigned int fidx; + int ret; + + flow = t4_os_alloc(sizeof(struct rte_flow)); + if (!flow) + return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Unable to allocate memory for filter_entry"); + + flow->item_parser = parseitem; + flow->dev = dev; + + ret = cxgbe_flow_parse(flow, attr, item, action, e); + if (ret) { + t4_os_free(flow); + return ret; + } + + if (validate_filter(adap, &flow->fs)) { + t4_os_free(flow); + return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "validation failed. Check f/w config file."); + } + + if (cxgbe_get_fidx(flow, &fidx)) { + t4_os_free(flow); + return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "no memory in tcam."); + } + + if (cxgbe_verify_fidx(flow, fidx, 0)) { + t4_os_free(flow); + return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "validation failed"); + } + + t4_os_free(flow); + return 0; +} + +/* + * @ret : > 0 filter destroyed succsesfully + * < 0 error destroying filter + * == 1 filter not active / not found + */ +static int +cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev, + struct rte_flow_error *e) +{ + if (f && (f->valid || f->pending) && + f->dev == dev && /* Only if user has asked for this port */ + f->private) /* We (rte_flow) created this filter */ + return cxgbe_flow_destroy(dev, (struct rte_flow *)f->private, + e); + return 1; +} + +static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e) +{ + struct adapter *adap = ethdev2adap(dev); + unsigned int i; + int ret = 0; + + if (adap->tids.ftid_tab) { + struct filter_entry *f = &adap->tids.ftid_tab[0]; + + for (i = 0; i < adap->tids.nftids; i++, f++) { + ret = cxgbe_check_n_destroy(f, dev, e); + if (ret < 0) + goto out; + } + } + + if (is_hashfilter(adap) && adap->tids.tid_tab) { + struct filter_entry *f; + + for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) { + f = (struct filter_entry *)adap->tids.tid_tab[i]; + + ret = cxgbe_check_n_destroy(f, dev, e); + if (ret < 0) + goto out; + } + } + +out: + return ret >= 0 ? 0 : ret; +} + +static const struct rte_flow_ops cxgbe_flow_ops = { + .validate = cxgbe_flow_validate, + .create = cxgbe_flow_create, + .destroy = cxgbe_flow_destroy, + .flush = cxgbe_flow_flush, + .query = cxgbe_flow_query, + .isolate = NULL, +}; + +int +cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = 0; + + RTE_SET_USED(dev); + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &cxgbe_flow_ops; + break; + default: + ret = -ENOTSUP; + break; + } + return ret; +} diff --git a/drivers/net/cxgbe/cxgbe_flow.h b/drivers/net/cxgbe/cxgbe_flow.h new file mode 100644 index 00000000..0f750474 --- /dev/null +++ b/drivers/net/cxgbe/cxgbe_flow.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ +#ifndef _CXGBE_FLOW_H_ +#define _CXGBE_FLOW_H_ + +#include +#include "cxgbe_filter.h" +#include "cxgbe.h" + +#define CXGBE_FLOW_POLL_US 10 +#define CXGBE_FLOW_POLL_CNT 10 + +struct chrte_fparse { + int (*fptr)(const void *mask, /* currently supported mask */ + const struct rte_flow_item *item, /* user input */ + struct ch_filter_specification *fs, /* where to parse */ + struct rte_flow_error *e); + const void *dmask; /* Specify what is supported by chelsio by default*/ +}; + +struct rte_flow { + struct filter_entry *f; + struct ch_filter_specification fs; /* temp, to create filter */ + struct chrte_fparse *item_parser; + /* + * filter_entry doesn't store user priority. + * Post creation of filter this will indicate the + * flow index (fidx) for both hash and tcam filters + */ + unsigned int fidx; + struct rte_eth_dev *dev; +}; + +int +cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); + +#endif /* _CXGBE_FLOW_H_ */ diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c index 28db6c06..c3938e8d 100644 --- a/drivers/net/cxgbe/cxgbe_main.c +++ b/drivers/net/cxgbe/cxgbe_main.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #include @@ -57,14 +29,31 @@ #include #include #include -#include #include #include +#include #include "common.h" #include "t4_regs.h" #include "t4_msg.h" #include "cxgbe.h" +#include "clip_tbl.h" + +/** + * Allocate a chunk of memory. The allocated memory is cleared. + */ +void *t4_alloc_mem(size_t size) +{ + return rte_zmalloc(NULL, size, 0); +} + +/** + * Free memory allocated through t4_alloc_mem(). + */ +void t4_free_mem(void *addr) +{ + rte_free(addr); +} /* * Response queue handler for the FW event queue. @@ -98,6 +87,18 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, const struct cpl_fw6_msg *msg = (const void *)rsp; t4_handle_fw_rpl(q->adapter, msg->data); + } else if (opcode == CPL_ABORT_RPL_RSS) { + const struct cpl_abort_rpl_rss *p = (const void *)rsp; + + hash_del_filter_rpl(q->adapter, p); + } else if (opcode == CPL_SET_TCB_RPL) { + const struct cpl_set_tcb_rpl *p = (const void *)rsp; + + filter_rpl(q->adapter, p); + } else if (opcode == CPL_ACT_OPEN_RPL) { + const struct cpl_act_open_rpl *p = (const void *)rsp; + + hash_filter_rpl(q->adapter, p); } else { dev_err(adapter, "unexpected CPL %#x on FW event queue\n", opcode); @@ -106,6 +107,79 @@ out: return 0; } +/** + * Setup sge control queues to pass control information. + */ +int setup_sge_ctrl_txq(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int err = 0, i = 0; + + for_each_port(adapter, i) { + char name[RTE_ETH_NAME_MAX_LEN]; + struct sge_ctrl_txq *q = &s->ctrlq[i]; + + q->q.size = 1024; + err = t4_sge_alloc_ctrl_txq(adapter, q, + adapter->eth_dev, i, + s->fw_evtq.cntxt_id, + rte_socket_id()); + if (err) { + dev_err(adapter, "Failed to alloc ctrl txq. Err: %d", + err); + goto out; + } + snprintf(name, sizeof(name), "cxgbe_ctrl_pool_%d", i); + q->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size, + RTE_CACHE_LINE_SIZE, + RTE_MBUF_PRIV_ALIGN, + RTE_MBUF_DEFAULT_BUF_SIZE, + SOCKET_ID_ANY); + if (!q->mb_pool) { + dev_err(adapter, "Can't create ctrl pool for port: %d", + i); + err = -ENOMEM; + goto out; + } + } + return 0; +out: + t4_free_sge_resources(adapter); + return err; +} + +/** + * cxgbe_poll_for_completion: Poll rxq for completion + * @q: rxq to poll + * @us: microseconds to delay + * @cnt: number of times to poll + * @c: completion to check for 'done' status + * + * Polls the rxq for reples until completion is done or the count + * expires. + */ +int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us, + unsigned int cnt, struct t4_completion *c) +{ + unsigned int i; + unsigned int work_done, budget = 4; + + if (!c) + return -EINVAL; + + for (i = 0; i < cnt; i++) { + cxgbe_poll(q, NULL, budget, &work_done); + t4_os_lock(&c->lock); + if (c->done) { + t4_os_unlock(&c->lock); + return 0; + } + t4_os_unlock(&c->lock); + udelay(us); + } + return -ETIMEDOUT; +} + int setup_sge_fwevtq(struct adapter *adapter) { struct sge *s = &adapter->sge; @@ -197,17 +271,186 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, return 0; } +/** + * Allocate an active-open TID and set it to the supplied value. + */ +int cxgbe_alloc_atid(struct tid_info *t, void *data) +{ + int atid = -1; + + t4_os_lock(&t->atid_lock); + if (t->afree) { + union aopen_entry *p = t->afree; + + atid = p - t->atid_tab; + t->afree = p->next; + p->data = data; + t->atids_in_use++; + } + t4_os_unlock(&t->atid_lock); + return atid; +} + +/** + * Release an active-open TID. + */ +void cxgbe_free_atid(struct tid_info *t, unsigned int atid) +{ + union aopen_entry *p = &t->atid_tab[atid]; + + t4_os_lock(&t->atid_lock); + p->next = t->afree; + t->afree = p; + t->atids_in_use--; + t4_os_unlock(&t->atid_lock); +} + +/** + * Populate a TID_RELEASE WR. Caller must properly size the skb. + */ +static void mk_tid_release(struct rte_mbuf *mbuf, unsigned int tid) +{ + struct cpl_tid_release *req; + + req = rte_pktmbuf_mtod(mbuf, struct cpl_tid_release *); + INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid); +} + +/** + * Release a TID and inform HW. If we are unable to allocate the release + * message we defer to a work queue. + */ +void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid, + unsigned short family) +{ + struct rte_mbuf *mbuf; + struct adapter *adap = container_of(t, struct adapter, tids); + + WARN_ON(tid >= t->ntids); + + if (t->tid_tab[tid]) { + t->tid_tab[tid] = NULL; + rte_atomic32_dec(&t->conns_in_use); + if (t->hash_base && tid >= t->hash_base) { + if (family == FILTER_TYPE_IPV4) + rte_atomic32_dec(&t->hash_tids_in_use); + } else { + if (family == FILTER_TYPE_IPV4) + rte_atomic32_dec(&t->tids_in_use); + } + } + + mbuf = rte_pktmbuf_alloc((&adap->sge.ctrlq[chan])->mb_pool); + if (mbuf) { + mbuf->data_len = sizeof(struct cpl_tid_release); + mbuf->pkt_len = mbuf->data_len; + mk_tid_release(mbuf, tid); + t4_mgmt_tx(&adap->sge.ctrlq[chan], mbuf); + } +} + +/** + * Insert a TID. + */ +void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid, + unsigned short family) +{ + t->tid_tab[tid] = data; + if (t->hash_base && tid >= t->hash_base) { + if (family == FILTER_TYPE_IPV4) + rte_atomic32_inc(&t->hash_tids_in_use); + } else { + if (family == FILTER_TYPE_IPV4) + rte_atomic32_inc(&t->tids_in_use); + } + + rte_atomic32_inc(&t->conns_in_use); +} + +/** + * Free TID tables. + */ +static void tid_free(struct tid_info *t) +{ + if (t->tid_tab) { + if (t->ftid_bmap) + rte_bitmap_free(t->ftid_bmap); + + if (t->ftid_bmap_array) + t4_os_free(t->ftid_bmap_array); + + t4_os_free(t->tid_tab); + } + + memset(t, 0, sizeof(struct tid_info)); +} + +/** + * Allocate and initialize the TID tables. Returns 0 on success. + */ +static int tid_init(struct tid_info *t) +{ + size_t size; + unsigned int ftid_bmap_size; + unsigned int natids = t->natids; + unsigned int max_ftids = t->nftids; + + ftid_bmap_size = rte_bitmap_get_memory_footprint(t->nftids); + size = t->ntids * sizeof(*t->tid_tab) + + max_ftids * sizeof(*t->ftid_tab) + + natids * sizeof(*t->atid_tab); + + t->tid_tab = t4_os_alloc(size); + if (!t->tid_tab) + return -ENOMEM; + + t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; + t->ftid_tab = (struct filter_entry *)&t->tid_tab[t->natids]; + t->ftid_bmap_array = t4_os_alloc(ftid_bmap_size); + if (!t->ftid_bmap_array) { + tid_free(t); + return -ENOMEM; + } + + t4_os_lock_init(&t->atid_lock); + t4_os_lock_init(&t->ftid_lock); + + t->afree = NULL; + t->atids_in_use = 0; + rte_atomic32_init(&t->tids_in_use); + rte_atomic32_set(&t->tids_in_use, 0); + rte_atomic32_init(&t->conns_in_use); + rte_atomic32_set(&t->conns_in_use, 0); + + /* Setup the free list for atid_tab and clear the stid bitmap. */ + if (natids) { + while (--natids) + t->atid_tab[natids - 1].next = &t->atid_tab[natids]; + t->afree = t->atid_tab; + } + + t->ftid_bmap = rte_bitmap_init(t->nftids, t->ftid_bmap_array, + ftid_bmap_size); + if (!t->ftid_bmap) { + tid_free(t); + return -ENOMEM; + } + + return 0; +} + static inline bool is_x_1g_port(const struct link_config *lc) { - return (lc->supported & FW_PORT_CAP_SPEED_1G) != 0; + return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0; } static inline bool is_x_10g_port(const struct link_config *lc) { unsigned int speeds, high_speeds; - speeds = V_FW_PORT_CAP_SPEED(G_FW_PORT_CAP_SPEED(lc->supported)); - high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G); + speeds = V_FW_PORT_CAP32_SPEED(G_FW_PORT_CAP32_SPEED(lc->pcaps)); + high_speeds = speeds & + ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G); return high_speeds != 0; } @@ -270,7 +513,7 @@ void cfg_queues(struct rte_eth_dev *eth_dev) * We default up to # of cores queues per 1G/10G port. */ if (nb_ports) - q_per_port = (MAX_ETH_QSETS - + q_per_port = (s->max_ethqsets - (adap->params.nports - nb_ports)) / nb_ports; @@ -294,8 +537,6 @@ void cfg_queues(struct rte_eth_dev *eth_dev) qidx += pi->n_rx_qsets; } - s->max_ethqsets = qidx; - for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { struct sge_eth_rxq *r = &s->ethrxq[i]; @@ -345,14 +586,17 @@ static void setup_memwin(struct adapter *adap) MEMWIN_NIC)); } -static int init_rss(struct adapter *adap) +int init_rss(struct adapter *adap) { unsigned int i; - int err; - err = t4_init_rss_mode(adap, adap->mbox); - if (err) - return err; + if (is_pf4(adap)) { + int err; + + err = t4_init_rss_mode(adap, adap->mbox); + if (err) + return err; + } for_each_port(adap, i) { struct port_info *pi = adap2pinfo(adap, i); @@ -360,6 +604,8 @@ static int init_rss(struct adapter *adap) pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0); if (!pi->rss) return -ENOMEM; + + pi->rss_hf = CXGBE_RSS_HF_ALL; } return 0; } @@ -367,7 +613,7 @@ static int init_rss(struct adapter *adap) /** * Dump basic information about the adapter. */ -static void print_adapter_info(struct adapter *adap) +void print_adapter_info(struct adapter *adap) { /** * Hardware/Firmware/etc. Version/Revision IDs. @@ -375,27 +621,29 @@ static void print_adapter_info(struct adapter *adap) t4_dump_version_info(adap); } -static void print_port_info(struct adapter *adap) +void print_port_info(struct adapter *adap) { int i; char buf[80]; struct rte_pci_addr *loc = &adap->pdev->addr; for_each_port(adap, i) { - const struct port_info *pi = &adap->port[i]; + const struct port_info *pi = adap2pinfo(adap, i); char *bufp = buf; - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M) bufp += sprintf(bufp, "100M/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G) bufp += sprintf(bufp, "1G/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G) bufp += sprintf(bufp, "10G/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G) bufp += sprintf(bufp, "25G/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G) bufp += sprintf(bufp, "40G/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G) + bufp += sprintf(bufp, "50G/"); + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G) bufp += sprintf(bufp, "100G/"); if (bufp != buf) --bufp; @@ -412,6 +660,84 @@ static void print_port_info(struct adapter *adap) } } +static int +check_devargs_handler(__rte_unused const char *key, const char *value, + __rte_unused void *opaque) +{ + if (strcmp(value, "1")) + return -1; + + return 0; +} + +int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key) +{ + struct rte_kvargs *kvlist; + + if (!devargs) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (!kvlist) + return 0; + + if (!rte_kvargs_count(kvlist, key)) { + rte_kvargs_free(kvlist); + return 0; + } + + if (rte_kvargs_process(kvlist, key, + check_devargs_handler, NULL) < 0) { + rte_kvargs_free(kvlist); + return 0; + } + rte_kvargs_free(kvlist); + + return 1; +} + +static void configure_vlan_types(struct adapter *adapter) +{ + struct rte_pci_device *pdev = adapter->pdev; + int i; + + for_each_port(adapter, i) { + /* OVLAN Type 0x88a8 */ + t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN0), + V_OVLAN_MASK(M_OVLAN_MASK) | + V_OVLAN_ETYPE(M_OVLAN_ETYPE), + V_OVLAN_MASK(M_OVLAN_MASK) | + V_OVLAN_ETYPE(0x88a8)); + /* OVLAN Type 0x9100 */ + t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN1), + V_OVLAN_MASK(M_OVLAN_MASK) | + V_OVLAN_ETYPE(M_OVLAN_ETYPE), + V_OVLAN_MASK(M_OVLAN_MASK) | + V_OVLAN_ETYPE(0x9100)); + /* OVLAN Type 0x8100 */ + t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN2), + V_OVLAN_MASK(M_OVLAN_MASK) | + V_OVLAN_ETYPE(M_OVLAN_ETYPE), + V_OVLAN_MASK(M_OVLAN_MASK) | + V_OVLAN_ETYPE(0x8100)); + + /* IVLAN 0X8100 */ + t4_set_reg_field(adapter, MPS_PORT_RX_IVLAN(i), + V_IVLAN_ETYPE(M_IVLAN_ETYPE), + V_IVLAN_ETYPE(0x8100)); + + t4_set_reg_field(adapter, MPS_PORT_RX_CTL(i), + F_OVLAN_EN0 | F_OVLAN_EN1 | + F_OVLAN_EN2 | F_IVLAN_EN, + F_OVLAN_EN0 | F_OVLAN_EN1 | + F_OVLAN_EN2 | F_IVLAN_EN); + } + + if (cxgbe_get_devargs(pdev->device.devargs, CXGBE_DEVARG_KEEP_OVLAN)) + t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG, + V_RM_OVLAN(1), V_RM_OVLAN(0)); +} + static void configure_pcie_ext_tag(struct adapter *adapter) { u16 v; @@ -442,6 +768,40 @@ static void configure_pcie_ext_tag(struct adapter *adapter) } } +/* Figure out how many Queue Sets we can support */ +void configure_max_ethqsets(struct adapter *adapter) +{ + unsigned int ethqsets; + + /* + * We need to reserve an Ingress Queue for the Asynchronous Firmware + * Event Queue. + * + * For each Queue Set, we'll need the ability to allocate two Egress + * Contexts -- one for the Ingress Queue Free List and one for the TX + * Ethernet Queue. + */ + if (is_pf4(adapter)) { + struct pf_resources *pfres = &adapter->params.pfres; + + ethqsets = pfres->niqflint - 1; + if (pfres->neq < ethqsets * 2) + ethqsets = pfres->neq / 2; + } else { + struct vf_resources *vfres = &adapter->params.vfres; + + ethqsets = vfres->niqflint - 1; + if (vfres->nethctrl != ethqsets) + ethqsets = min(vfres->nethctrl, ethqsets); + if (vfres->neq < ethqsets * 2) + ethqsets = vfres->neq / 2; + } + + if (ethqsets > MAX_ETH_QSETS) + ethqsets = MAX_ETH_QSETS; + adapter->sge.max_ethqsets = ethqsets; +} + /* * Tweak configuration based on system architecture, etc. Most of these have * defaults assigned to them by Firmware Configuration Files (if we're using @@ -580,8 +940,7 @@ static int adap_init0_config(struct adapter *adapter, int reset) * This will allow the firmware to optimize aspects of the hardware * configuration which will result in improved performance. */ - caps_cmd.niccaps &= cpu_to_be16(~(FW_CAPS_CONFIG_NIC_HASHFILTER | - FW_CAPS_CONFIG_NIC_ETHOFLD)); + caps_cmd.niccaps &= cpu_to_be16(~FW_CAPS_CONFIG_NIC_ETHOFLD); caps_cmd.toecaps = 0; caps_cmd.iscsicaps = 0; caps_cmd.rdmacaps = 0; @@ -648,6 +1007,7 @@ bye: static int adap_init0(struct adapter *adap) { + struct fw_caps_config_cmd caps_cmd; int ret = 0; u32 v, port_vec; enum dev_state state; @@ -723,6 +1083,17 @@ static int adap_init0(struct adapter *adap) goto bye; } + /* Now that we've successfully configured and initialized the adapter + * (or found it already initialized), we can ask the Firmware what + * resources it has provisioned for us. + */ + ret = t4_get_pfres(adap); + if (ret) { + dev_err(adap->pdev_dev, + "Unable to retrieve resource provisioning info\n"); + goto bye; + } + /* Find out what ports are available to us. */ v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC); @@ -764,6 +1135,50 @@ static int adap_init0(struct adapter *adap) V_FW_PARAMS_PARAM_Y(0) | \ V_FW_PARAMS_PARAM_Z(0)) + params[0] = FW_PARAM_PFVF(FILTER_START); + params[1] = FW_PARAM_PFVF(FILTER_END); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); + if (ret < 0) + goto bye; + adap->tids.ftid_base = val[0]; + adap->tids.nftids = val[1] - val[0] + 1; + + params[0] = FW_PARAM_PFVF(CLIP_START); + params[1] = FW_PARAM_PFVF(CLIP_END); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); + if (ret < 0) + goto bye; + adap->clipt_start = val[0]; + adap->clipt_end = val[1]; + + /* + * Get device capabilities so we can determine what resources we need + * to manage. + */ + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ); + caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), + &caps_cmd); + if (ret < 0) + goto bye; + + if ((caps_cmd.niccaps & cpu_to_be16(FW_CAPS_CONFIG_NIC_HASHFILTER)) && + is_t6(adap->params.chip)) { + if (init_hash_filter(adap) < 0) + goto bye; + } + + /* query tid-related parameters */ + params[0] = FW_PARAM_DEV(NTID); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, + params, val); + if (ret < 0) + goto bye; + adap->tids.ntids = val[0]; + adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); + /* If we're running on newer firmware, let it know that we're * prepared to deal with encapsulated CPL messages. Older * firmware won't understand this and we'll just get @@ -828,6 +1243,8 @@ static int adap_init0(struct adapter *adap) t4_init_sge_params(adap); t4_init_tp_params(adap); configure_pcie_ext_tag(adap); + configure_vlan_types(adap); + configure_max_ethqsets(adap); adap->params.drv_memwin = MEMWIN_NIC; adap->flags |= FW_OK; @@ -860,7 +1277,7 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id) NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" }; - const struct port_info *pi = &adap->port[port_id]; + const struct port_info *pi = adap2pinfo(adap, port_id); if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) dev_info(adap, "Port%d: port module unplugged\n", pi->port_id); @@ -881,6 +1298,18 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id) pi->port_id, pi->mod_type); } +inline bool force_linkup(struct adapter *adap) +{ + struct rte_pci_device *pdev = adap->pdev; + + if (is_pf4(adap)) + return false; /* force_linkup not required for pf driver*/ + if (!cxgbe_get_devargs(pdev->device.devargs, + CXGBE_DEVARG_FORCE_LINK_UP)) + return false; + return true; +} + /** * link_start - enable a port * @dev: the port to enable @@ -912,7 +1341,7 @@ int link_start(struct port_info *pi) ret = 0; } } - if (ret == 0) + if (ret == 0 && is_pf4(adapter)) ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan, &pi->link_cfg); if (ret == 0) { @@ -926,18 +1355,80 @@ int link_start(struct port_info *pi) ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid, true, true, false); } + + if (ret == 0 && force_linkup(adapter)) + pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP; return ret; } /** - * cxgb4_write_rss - write the RSS table for a given port + * cxgbe_write_rss_conf - flash the RSS configuration for a given port + * @pi: the port + * @rss_hf: Hash configuration to apply + */ +int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf) +{ + struct adapter *adapter = pi->adapter; + const struct sge_eth_rxq *rxq; + u64 flags = 0; + u16 rss; + int err; + + /* Should never be called before setting up sge eth rx queues */ + if (!(adapter->flags & FULL_INIT_DONE)) { + dev_err(adap, "%s No RXQs available on port %d\n", + __func__, pi->port_id); + return -EINVAL; + } + + /* Don't allow unsupported hash functions */ + if (rss_hf & ~CXGBE_RSS_HF_ALL) + return -EINVAL; + + if (rss_hf & CXGBE_RSS_HF_IPV4_MASK) + flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; + + if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; + + if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | + F_FW_RSS_VI_CONFIG_CMD_UDPEN; + + if (rss_hf & CXGBE_RSS_HF_IPV6_MASK) + flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; + + if (rss_hf & CXGBE_RSS_HF_TCP_IPV6_MASK) + flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | + F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; + + if (rss_hf & CXGBE_RSS_HF_UDP_IPV6_MASK) + flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | + F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | + F_FW_RSS_VI_CONFIG_CMD_UDPEN; + + rxq = &adapter->sge.ethrxq[pi->first_qset]; + rss = rxq[0].rspq.abs_id; + + /* If Tunnel All Lookup isn't specified in the global RSS + * Configuration, then we need to specify a default Ingress + * Queue for any ingress packets which aren't hashed. We'll + * use our first ingress queue ... + */ + err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid, + flags, rss); + return err; +} + +/** + * cxgbe_write_rss - write the RSS table for a given port * @pi: the port * @queues: array of queue indices for RSS * * Sets up the portion of the HW RSS table for the port's VI to distribute * packets to the Rx queues in @queues. */ -int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) +int cxgbe_write_rss(const struct port_info *pi, const u16 *queues) { u16 *rss; int i, err; @@ -958,20 +1449,6 @@ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0, pi->rss_size, rss, pi->rss_size); - /* - * If Tunnel All Lookup isn't specified in the global RSS - * Configuration, then we need to specify a default Ingress - * Queue for any ingress packets which aren't hashed. We'll - * use our first ingress queue ... - */ - if (!err) - err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid, - F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | - F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | - F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | - F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | - F_FW_RSS_VI_CONFIG_CMD_UDPEN, - rss[0]); rte_free(rss); return err; } @@ -1001,7 +1478,11 @@ int setup_rss(struct port_info *pi) for (j = 0; j < pi->rss_size; j++) pi->rss[j] = j % pi->n_rx_qsets; - err = cxgb4_write_rss(pi, pi->rss); + err = cxgbe_write_rss(pi, pi->rss); + if (err) + return err; + + err = cxgbe_write_rss_conf(pi, pi->rss_hf); if (err) return err; pi->flags |= PORT_RSS_DONE; @@ -1016,7 +1497,8 @@ int setup_rss(struct port_info *pi) static void enable_rx(struct adapter *adap, struct sge_rspq *q) { /* 0-increment GTS to start the timer and enable interrupts */ - t4_write_reg(adap, MYPF_REG(A_SGE_PF_GTS), + t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) : + T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS, V_SEINTARM(q->intr_params) | V_INGRESSQID(q->cntxt_id)); } @@ -1051,7 +1533,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type, #define FW_CAPS_TO_SPEED(__fw_name) \ do { \ - if (fw_caps & FW_PORT_CAP_ ## __fw_name) \ + if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \ SET_SPEED(__fw_name); \ } while (0) @@ -1106,6 +1588,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type, case FW_PORT_TYPE_CR4_QSFP: FW_CAPS_TO_SPEED(SPEED_25G); FW_CAPS_TO_SPEED(SPEED_40G); + FW_CAPS_TO_SPEED(SPEED_50G); FW_CAPS_TO_SPEED(SPEED_100G); break; @@ -1128,13 +1611,37 @@ void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps) { *speed_caps = 0; - fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.supported, + fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.pcaps, speed_caps); - if (!(pi->link_cfg.supported & FW_PORT_CAP_ANEG)) + if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)) *speed_caps |= ETH_LINK_SPEED_FIXED; } +/** + * cxgbe_set_link_status - Set device link up or down. + * @pi: Underlying port's info + * @status: 0 - down, 1 - up + * + * Set the device link up or down. + */ +int cxgbe_set_link_status(struct port_info *pi, bool status) +{ + struct adapter *adapter = pi->adapter; + int err = 0; + + err = t4_enable_vi(adapter, adapter->mbox, pi->viid, status, status); + if (err) { + dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err); + return err; + } + + if (!status) + t4_reset_link_config(adapter, pi->pidx); + + return 0; +} + /** * cxgb_up - enable the adapter * @adap: adapter being enabled @@ -1147,7 +1654,8 @@ int cxgbe_up(struct adapter *adap) { enable_rx(adap, &adap->sge.fw_evtq); t4_sge_tx_monitor_start(adap); - t4_intr_enable(adap); + if (is_pf4(adap)) + t4_intr_enable(adap); adap->flags |= FULL_INIT_DONE; /* TODO: deadman watchdog ?? */ @@ -1159,17 +1667,7 @@ int cxgbe_up(struct adapter *adap) */ int cxgbe_down(struct port_info *pi) { - struct adapter *adapter = pi->adapter; - int err = 0; - - err = t4_enable_vi(adapter, adapter->mbox, pi->viid, false, false); - if (err) { - dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err); - return err; - } - - t4_reset_link_config(adapter, pi->port_id); - return 0; + return cxgbe_set_link_status(pi, false); } /* @@ -1181,7 +1679,10 @@ void cxgbe_close(struct adapter *adapter) int i; if (adapter->flags & FULL_INIT_DONE) { - t4_intr_disable(adapter); + if (is_pf4(adapter)) + t4_intr_disable(adapter); + tid_free(&adapter->tids); + t4_cleanup_clip_tbl(adapter); t4_sge_tx_monitor_stop(adapter); t4_free_sge_resources(adapter); for_each_port(adapter, i) { @@ -1190,11 +1691,16 @@ void cxgbe_close(struct adapter *adapter) t4_free_vi(adapter, adapter->mbox, adapter->pf, 0, pi->viid); rte_free(pi->eth_dev->data->mac_addrs); + /* Skip first port since it'll be freed by DPDK stack */ + if (i) { + rte_free(pi->eth_dev->data->dev_private); + rte_eth_dev_release_port(pi->eth_dev); + } } adapter->flags &= ~FULL_INIT_DONE; } - if (adapter->flags & FW_OK) + if (is_pf4(adapter) && (adapter->flags & FW_OK)) t4_fw_bye(adapter, adapter->mbox); } @@ -1220,6 +1726,7 @@ int cxgbe_probe(struct adapter *adapter) t4_os_lock_init(&adapter->mbox_lock); TAILQ_INIT(&adapter->mbox_list); + t4_os_lock_init(&adapter->win0_lock); err = t4_prep_adapter(adapter); if (err) @@ -1265,21 +1772,16 @@ int cxgbe_probe(struct adapter *adapter) } for_each_port(adapter, i) { - char name[RTE_ETH_NAME_MAX_LEN]; - struct rte_eth_dev_data *data = NULL; const unsigned int numa_node = rte_socket_id(); + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_dev *eth_dev; - pi = &adapter->port[i]; - pi->adapter = adapter; - pi->xact_addr_filt = -1; - pi->port_id = i; - - snprintf(name, sizeof(name), "cxgbe%d", - adapter->eth_dev->data->port_id + i); + snprintf(name, sizeof(name), "%s_%d", + adapter->pdev->device.name, i); if (i == 0) { /* First port is already allocated by DPDK */ - pi->eth_dev = adapter->eth_dev; + eth_dev = adapter->eth_dev; goto allocate_mac; } @@ -1289,21 +1791,26 @@ int cxgbe_probe(struct adapter *adapter) */ /* reserve an ethdev entry */ - pi->eth_dev = rte_eth_dev_allocate(name); - if (!pi->eth_dev) + eth_dev = rte_eth_dev_allocate(name); + if (!eth_dev) goto out_free; - data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); - if (!data) + eth_dev->data->dev_private = + rte_zmalloc_socket(name, sizeof(struct port_info), + RTE_CACHE_LINE_SIZE, numa_node); + if (!eth_dev->data->dev_private) goto out_free; - data->port_id = adapter->eth_dev->data->port_id + i; - - pi->eth_dev->data = data; - allocate_mac: + pi = (struct port_info *)eth_dev->data->dev_private; + adapter->port[i] = pi; + pi->eth_dev = eth_dev; + pi->adapter = adapter; + pi->xact_addr_filt = -1; + pi->port_id = i; + pi->pidx = i; + pi->eth_dev->device = &adapter->pdev->device; - pi->eth_dev->data->dev_private = pi; pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops; pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst; pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst; @@ -1318,6 +1825,11 @@ allocate_mac: err = -1; goto out_free; } + + if (i > 0) { + /* First port will be notified by upper layer */ + rte_eth_dev_probing_finish(eth_dev); + } } if (adapter->flags & FW_OK) { @@ -1334,6 +1846,35 @@ allocate_mac: print_adapter_info(adapter); print_port_info(adapter); + adapter->clipt = t4_init_clip_tbl(adapter->clipt_start, + adapter->clipt_end); + if (!adapter->clipt) { + /* We tolerate a lack of clip_table, giving up some + * functionality + */ + dev_warn(adapter, "could not allocate CLIP. Continuing\n"); + } + + if (tid_init(&adapter->tids) < 0) { + /* Disable filtering support */ + dev_warn(adapter, "could not allocate TID table, " + "filter support disabled. Continuing\n"); + } + + if (is_hashfilter(adapter)) { + if (t4_read_reg(adapter, A_LE_DB_CONFIG) & F_HASHEN) { + u32 hash_base, hash_reg; + + hash_reg = A_LE_DB_TID_HASHBASE; + hash_base = t4_read_reg(adapter, hash_reg); + adapter->tids.hash_base = hash_base / 4; + } + } else { + /* Disable hash filtering support */ + dev_warn(adapter, + "Maskless filter support disabled. Continuing\n"); + } + err = init_rss(adapter); if (err) goto out_free; @@ -1349,8 +1890,11 @@ out_free: /* Skip first port since it'll be de-allocated by DPDK */ if (i == 0) continue; - if (pi->eth_dev->data) - rte_free(pi->eth_dev->data); + if (pi->eth_dev) { + if (pi->eth_dev->data->dev_private) + rte_free(pi->eth_dev->data->dev_private); + rte_eth_dev_release_port(pi->eth_dev); + } } if (adapter->flags & FW_OK) diff --git a/drivers/net/cxgbe/cxgbe_ofld.h b/drivers/net/cxgbe/cxgbe_ofld.h new file mode 100644 index 00000000..50931ed0 --- /dev/null +++ b/drivers/net/cxgbe/cxgbe_ofld.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef _CXGBE_OFLD_H_ +#define _CXGBE_OFLD_H_ + +#include + +#include "cxgbe_filter.h" + +#define INIT_TP_WR(w, tid) do { \ + (w)->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_TP_WR) | \ + V_FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \ + (w)->wr.wr_mid = cpu_to_be32( \ + V_FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \ + V_FW_WR_FLOWID(tid)); \ + (w)->wr.wr_lo = cpu_to_be64(0); \ +} while (0) + +#define INIT_TP_WR_MIT_CPL(w, cpl, tid) do { \ + INIT_TP_WR(w, tid); \ + OPCODE_TID(w) = cpu_to_be32(MK_OPCODE_TID(cpl, tid)); \ +} while (0) + +#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \ + (w)->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) | \ + V_FW_WR_ATOMIC(atomic)); \ + (w)->wr.wr_mid = cpu_to_be32(V_FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \ + V_FW_WR_FLOWID(tid)); \ + (w)->wr.wr_lo = cpu_to_be64(0); \ +} while (0) + +/* + * Max # of ATIDs. The absolute HW max is 16K but we keep it lower. + */ +#define MAX_ATIDS 8192U + +union aopen_entry { + void *data; + union aopen_entry *next; +}; + +/* + * Holds the size, base address, free list start, etc of filter TID. + * The tables themselves are allocated dynamically. + */ +struct tid_info { + void **tid_tab; + unsigned int ntids; + struct filter_entry *ftid_tab; /* Normal filters */ + union aopen_entry *atid_tab; + struct rte_bitmap *ftid_bmap; + uint8_t *ftid_bmap_array; + unsigned int nftids, natids; + unsigned int ftid_base, hash_base; + + union aopen_entry *afree; + unsigned int atids_in_use; + + /* TIDs in the TCAM */ + rte_atomic32_t tids_in_use; + /* TIDs in the HASH */ + rte_atomic32_t hash_tids_in_use; + rte_atomic32_t conns_in_use; + + rte_spinlock_t atid_lock __rte_cache_aligned; + rte_spinlock_t ftid_lock; +}; + +static inline void *lookup_tid(const struct tid_info *t, unsigned int tid) +{ + return tid < t->ntids ? t->tid_tab[tid] : NULL; +} + +static inline void *lookup_atid(const struct tid_info *t, unsigned int atid) +{ + return atid < t->natids ? t->atid_tab[atid].data : NULL; +} + +int cxgbe_alloc_atid(struct tid_info *t, void *data); +void cxgbe_free_atid(struct tid_info *t, unsigned int atid); +void cxgbe_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid, + unsigned short family); +void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid, + unsigned short family); + +#endif /* _CXGBE_OFLD_H_ */ diff --git a/drivers/net/cxgbe/cxgbe_pfvf.h b/drivers/net/cxgbe/cxgbe_pfvf.h new file mode 100644 index 00000000..8d0a105a --- /dev/null +++ b/drivers/net/cxgbe/cxgbe_pfvf.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef _CXGBE_PFVF_H_ +#define _CXGBE_PFVF_H_ + +void cxgbe_dev_rx_queue_release(void *q); +void cxgbe_dev_tx_queue_release(void *q); +void cxgbe_dev_stop(struct rte_eth_dev *eth_dev); +void cxgbe_dev_close(struct rte_eth_dev *eth_dev); +void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *device_info); +void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev); +void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev); +void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev); +void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev); +int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr); +int cxgbe_dev_configure(struct rte_eth_dev *eth_dev); +int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); +int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, + uint16_t tx_queue_id); +int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, + uint16_t tx_queue_id); +int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id); +int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id); +int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu); +int cxgbe_dev_start(struct rte_eth_dev *eth_dev); +int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, + int wait_to_complete); +int cxgbe_dev_set_link_up(struct rte_eth_dev *dev); +int cxgbe_dev_set_link_down(struct rte_eth_dev *dev); +uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +const uint32_t *cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev); +#endif /* _CXGBE_PFVF_H_ */ diff --git a/drivers/net/cxgbe/cxgbevf_ethdev.c b/drivers/net/cxgbe/cxgbevf_ethdev.c new file mode 100644 index 00000000..3b32ca9d --- /dev/null +++ b/drivers/net/cxgbe/cxgbevf_ethdev.c @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#include +#include + +#include "cxgbe.h" +#include "cxgbe_pfvf.h" + +/* + * Macros needed to support the PCI Device ID Table ... + */ +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ + static const struct rte_pci_id cxgb4vf_pci_tbl[] = { +#define CH_PCI_DEVICE_ID_FUNCTION 0x8 + +#define PCI_VENDOR_ID_CHELSIO 0x1425 + +#define CH_PCI_ID_TABLE_ENTRY(devid) \ + { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) } + +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \ + { .vendor_id = 0, } \ + } + +/* + *... and the PCI ID Table itself ... + */ +#include "t4_pci_id_tbl.h" + +/* + * Get port statistics. + */ +static int cxgbevf_dev_stats_get(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *eth_stats) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + struct port_stats ps; + unsigned int i; + + cxgbevf_stats_get(pi, &ps); + + /* RX Stats */ + eth_stats->ierrors = ps.rx_len_err; + + /* TX Stats */ + eth_stats->opackets = ps.tx_bcast_frames + ps.tx_mcast_frames + + ps.tx_ucast_frames; + eth_stats->obytes = ps.tx_octets; + eth_stats->oerrors = ps.tx_drop; + + for (i = 0; i < pi->n_rx_qsets; i++) { + struct sge_eth_rxq *rxq = + &s->ethrxq[pi->first_qset + i]; + + eth_stats->q_ipackets[i] = rxq->stats.pkts; + eth_stats->q_ibytes[i] = rxq->stats.rx_bytes; + eth_stats->ipackets += eth_stats->q_ipackets[i]; + eth_stats->ibytes += eth_stats->q_ibytes[i]; + } + + for (i = 0; i < pi->n_tx_qsets; i++) { + struct sge_eth_txq *txq = + &s->ethtxq[pi->first_qset + i]; + + eth_stats->q_opackets[i] = txq->stats.pkts; + eth_stats->q_obytes[i] = txq->stats.tx_bytes; + eth_stats->q_errors[i] = txq->stats.mapping_err; + } + return 0; +} + +static const struct eth_dev_ops cxgbevf_eth_dev_ops = { + .dev_start = cxgbe_dev_start, + .dev_stop = cxgbe_dev_stop, + .dev_close = cxgbe_dev_close, + .promiscuous_enable = cxgbe_dev_promiscuous_enable, + .promiscuous_disable = cxgbe_dev_promiscuous_disable, + .allmulticast_enable = cxgbe_dev_allmulticast_enable, + .allmulticast_disable = cxgbe_dev_allmulticast_disable, + .dev_configure = cxgbe_dev_configure, + .dev_infos_get = cxgbe_dev_info_get, + .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get, + .link_update = cxgbe_dev_link_update, + .dev_set_link_up = cxgbe_dev_set_link_up, + .dev_set_link_down = cxgbe_dev_set_link_down, + .mtu_set = cxgbe_dev_mtu_set, + .tx_queue_setup = cxgbe_dev_tx_queue_setup, + .tx_queue_start = cxgbe_dev_tx_queue_start, + .tx_queue_stop = cxgbe_dev_tx_queue_stop, + .tx_queue_release = cxgbe_dev_tx_queue_release, + .rx_queue_setup = cxgbe_dev_rx_queue_setup, + .rx_queue_start = cxgbe_dev_rx_queue_start, + .rx_queue_stop = cxgbe_dev_rx_queue_stop, + .rx_queue_release = cxgbe_dev_rx_queue_release, + .stats_get = cxgbevf_dev_stats_get, + .mac_addr_set = cxgbe_mac_addr_set, +}; + +/* + * Initialize driver + * It returns 0 on success. + */ +static int eth_cxgbevf_dev_init(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct rte_pci_device *pci_dev; + char name[RTE_ETH_NAME_MAX_LEN]; + struct adapter *adapter = NULL; + int err = 0; + + CXGBE_FUNC_TRACE(); + + eth_dev->dev_ops = &cxgbevf_eth_dev_ops; + eth_dev->rx_pkt_burst = &cxgbe_recv_pkts; + eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts; + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + + /* for secondary processes, we attach to ethdevs allocated by primary + * and do minimal initialization. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + int i; + + for (i = 1; i < MAX_NPORTS; i++) { + struct rte_eth_dev *rest_eth_dev; + char namei[RTE_ETH_NAME_MAX_LEN]; + + snprintf(namei, sizeof(namei), "%s_%d", + pci_dev->device.name, i); + rest_eth_dev = rte_eth_dev_attach_secondary(namei); + if (rest_eth_dev) { + rest_eth_dev->device = &pci_dev->device; + rest_eth_dev->dev_ops = + eth_dev->dev_ops; + rest_eth_dev->rx_pkt_burst = + eth_dev->rx_pkt_burst; + rest_eth_dev->tx_pkt_burst = + eth_dev->tx_pkt_burst; + rte_eth_dev_probing_finish(rest_eth_dev); + } + } + return 0; + } + + snprintf(name, sizeof(name), "cxgbevfadapter%d", + eth_dev->data->port_id); + adapter = rte_zmalloc(name, sizeof(*adapter), 0); + if (!adapter) + return -1; + + adapter->use_unpacked_mode = 1; + adapter->regs = (void *)pci_dev->mem_resource[0].addr; + if (!adapter->regs) { + dev_err(adapter, "%s: cannot map device registers\n", __func__); + err = -ENOMEM; + goto out_free_adapter; + } + adapter->pdev = pci_dev; + adapter->eth_dev = eth_dev; + pi->adapter = adapter; + err = cxgbevf_probe(adapter); + if (err) { + dev_err(adapter, "%s: cxgbevf probe failed with err %d\n", + __func__, err); + goto out_free_adapter; + } + + return 0; + +out_free_adapter: + rte_free(adapter); + return err; +} + +static int eth_cxgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct port_info), + eth_cxgbevf_dev_init); +} + +static int eth_cxgbevf_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, NULL); +} + +static struct rte_pci_driver rte_cxgbevf_pmd = { + .id_table = cxgb4vf_pci_tbl, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_cxgbevf_pci_probe, + .remove = eth_cxgbevf_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_cxgbevf, rte_cxgbevf_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_cxgbevf, cxgb4vf_pci_tbl); +RTE_PMD_REGISTER_KMOD_DEP(net_cxgbevf, "* igb_uio | vfio-pci"); diff --git a/drivers/net/cxgbe/cxgbevf_main.c b/drivers/net/cxgbe/cxgbevf_main.c new file mode 100644 index 00000000..4214d031 --- /dev/null +++ b/drivers/net/cxgbe/cxgbevf_main.c @@ -0,0 +1,295 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#include +#include +#include + +#include "common.h" +#include "t4_regs.h" +#include "t4_msg.h" +#include "cxgbe.h" + +/* + * Figure out how many Ports and Queue Sets we can support. This depends on + * knowing our Virtual Function Resources and may be called a second time if + * we fall back from MSI-X to MSI Interrupt Mode. + */ +static void size_nports_qsets(struct adapter *adapter) +{ + struct vf_resources *vfres = &adapter->params.vfres; + unsigned int pmask_nports; + + /* + * The number of "ports" which we support is equal to the number of + * Virtual Interfaces with which we've been provisioned. + */ + adapter->params.nports = vfres->nvi; + if (adapter->params.nports > MAX_NPORTS) { + dev_warn(adapter->pdev_dev, "only using %d of %d maximum" + " allowed virtual interfaces\n", MAX_NPORTS, + adapter->params.nports); + adapter->params.nports = MAX_NPORTS; + } + + /* + * We may have been provisioned with more VIs than the number of + * ports we're allowed to access (our Port Access Rights Mask). + * This is obviously a configuration conflict but we don't want to + * do anything silly just because of that. + */ + pmask_nports = hweight32(adapter->params.vfres.pmask); + if (pmask_nports < adapter->params.nports) { + dev_warn(adapter->pdev_dev, "only using %d of %d provissioned" + " virtual interfaces; limited by Port Access Rights" + " mask %#x\n", pmask_nports, adapter->params.nports, + adapter->params.vfres.pmask); + adapter->params.nports = pmask_nports; + } + + configure_max_ethqsets(adapter); + if (adapter->sge.max_ethqsets < adapter->params.nports) { + dev_warn(adapter->pdev_dev, "only using %d of %d available" + " virtual interfaces (too few Queue Sets)\n", + adapter->sge.max_ethqsets, adapter->params.nports); + adapter->params.nports = adapter->sge.max_ethqsets; + } +} + +void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats) +{ + t4vf_get_port_stats(pi->adapter, pi->pidx, stats); +} + +static int adap_init0vf(struct adapter *adapter) +{ + u32 param, val = 0; + int err; + + err = t4vf_fw_reset(adapter); + if (err < 0) { + dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err); + return err; + } + + /* + * Grab basic operational parameters. These will predominantly have + * been set up by the Physical Function Driver or will be hard coded + * into the adapter. We just have to live with them ... Note that + * we _must_ get our VPD parameters before our SGE parameters because + * we need to know the adapter's core clock from the VPD in order to + * properly decode the SGE Timer Values. + */ + err = t4vf_get_dev_params(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " device parameters: err=%d\n", err); + return err; + } + + err = t4vf_get_vpd_params(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " VPD parameters: err=%d\n", err); + return err; + } + + adapter->pf = t4vf_get_pf_from_vf(adapter); + err = t4vf_sge_init(adapter); + if (err) { + dev_err(adapter->pdev_dev, "error in sge init\n"); + return err; + } + + err = t4vf_get_rss_glb_config(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " RSS parameters: err=%d\n", err); + return err; + } + if (adapter->params.rss.mode != + FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { + dev_err(adapter->pdev_dev, "unable to operate with global RSS" + " mode %d\n", adapter->params.rss.mode); + return -EINVAL; + } + + /* If we're running on newer firmware, let it know that we're + * prepared to deal with encapsulated CPL messages. Older + * firmware won't understand this and we'll just get + * unencapsulated messages ... + */ + param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP); + val = 1; + t4vf_set_params(adapter, 1, ¶m, &val); + + /* + * Grab our Virtual Interface resource allocation, extract the + * features that we're interested in and do a bit of sanity testing on + * what we discover. + */ + err = t4vf_get_vfres(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to get virtual interface" + " resources: err=%d\n", err); + return err; + } + + /* + * Check for various parameter sanity issues. + */ + if (adapter->params.vfres.pmask == 0) { + dev_err(adapter->pdev_dev, "no port access configured\n" + "usable!\n"); + return -EINVAL; + } + if (adapter->params.vfres.nvi == 0) { + dev_err(adapter->pdev_dev, "no virtual interfaces configured/" + "usable!\n"); + return -EINVAL; + } + + /* + * Initialize nports and max_ethqsets now that we have our Virtual + * Function Resources. + */ + size_nports_qsets(adapter); + adapter->flags |= FW_OK; + return 0; +} + +int cxgbevf_probe(struct adapter *adapter) +{ + struct port_info *pi; + unsigned int pmask; + int err = 0; + int i; + + t4_os_lock_init(&adapter->mbox_lock); + TAILQ_INIT(&adapter->mbox_list); + err = t4vf_prep_adapter(adapter); + if (err) + return err; + + if (!is_t4(adapter->params.chip)) { + adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr; + if (!adapter->bar2) { + dev_err(adapter, "cannot map device bar2 region\n"); + err = -ENOMEM; + return err; + } + } + + err = adap_init0vf(adapter); + if (err) { + dev_err(adapter, "%s: Adapter initialization failed, error %d\n", + __func__, err); + goto out_free; + } + + pmask = adapter->params.vfres.pmask; + for_each_port(adapter, i) { + const unsigned int numa_node = rte_socket_id(); + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_dev *eth_dev; + int port_id; + + if (pmask == 0) + break; + port_id = ffs(pmask) - 1; + pmask &= ~(1 << port_id); + + snprintf(name, sizeof(name), "%s_%d", + adapter->pdev->device.name, i); + + if (i == 0) { + /* First port is already allocated by DPDK */ + eth_dev = adapter->eth_dev; + goto allocate_mac; + } + + /* + * now do all data allocation - for eth_dev structure, + * and internal (private) data for the remaining ports + */ + + /* reserve an ethdev entry */ + eth_dev = rte_eth_dev_allocate(name); + if (!eth_dev) { + err = -ENOMEM; + goto out_free; + } + eth_dev->data->dev_private = + rte_zmalloc_socket(name, sizeof(struct port_info), + RTE_CACHE_LINE_SIZE, numa_node); + if (!eth_dev->data->dev_private) + goto out_free; + +allocate_mac: + pi = (struct port_info *)eth_dev->data->dev_private; + adapter->port[i] = pi; + pi->eth_dev = eth_dev; + pi->adapter = adapter; + pi->xact_addr_filt = -1; + pi->port_id = port_id; + pi->pidx = i; + + pi->eth_dev->device = &adapter->pdev->device; + pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops; + pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst; + pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst; + + rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev); + pi->eth_dev->data->mac_addrs = rte_zmalloc(name, + ETHER_ADDR_LEN, 0); + if (!pi->eth_dev->data->mac_addrs) { + dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n", + __func__); + err = -ENOMEM; + goto out_free; + } + + if (i > 0) { + /* First port will be notified by upper layer */ + rte_eth_dev_probing_finish(eth_dev); + } + } + + if (adapter->flags & FW_OK) { + err = t4vf_port_init(adapter); + if (err) { + dev_err(adapter, "%s: t4_port_init failed with err %d\n", + __func__, err); + goto out_free; + } + } + + cfg_queues(adapter->eth_dev); + print_adapter_info(adapter); + print_port_info(adapter); + + err = init_rss(adapter); + if (err) + goto out_free; + return 0; + +out_free: + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + if (pi->viid != 0) + t4_free_vi(adapter, adapter->mbox, adapter->pf, + 0, pi->viid); + /* Skip first port since it'll be de-allocated by DPDK */ + if (i == 0) + continue; + if (pi->eth_dev) { + if (pi->eth_dev->data->dev_private) + rte_free(pi->eth_dev->data->dev_private); + rte_eth_dev_release_port(pi->eth_dev); + } + } + return -err; +} diff --git a/drivers/net/cxgbe/meson.build b/drivers/net/cxgbe/meson.build new file mode 100644 index 00000000..7c69a34b --- /dev/null +++ b/drivers/net/cxgbe/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +sources = files('cxgbe_ethdev.c', + 'cxgbe_main.c', + 'cxgbevf_ethdev.c', + 'cxgbevf_main.c', + 'sge.c', + 'cxgbe_filter.c', + 'cxgbe_flow.c', + 'clip_tbl.c', + 'base/t4_hw.c', + 'base/t4vf_hw.c') +includes += include_directories('base') diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c index 3d5aa596..4ea40d19 100644 --- a/drivers/net/cxgbe/sge.c +++ b/drivers/net/cxgbe/sge.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2015 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #include @@ -82,6 +54,11 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap, */ #define MAX_IMM_TX_PKT_LEN 256 +/* + * Max size of a WR sent through a control Tx queue. + */ +#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN + /* * Rx buffer sizes for "usembufs" Free List buffers (one ingress packet * per mbuf buffer). We currently only support two sizes for 1500- and @@ -337,7 +314,11 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) * mechanism. */ if (unlikely(!q->bar2_addr)) { - t4_write_reg_relaxed(adap, MYPF_REG(A_SGE_PF_KDOORBELL), + u32 reg = is_pf4(adap) ? MYPF_REG(A_SGE_PF_KDOORBELL) : + T4VF_SGE_BASE_ADDR + + A_SGE_VF_KDOORBELL; + + t4_write_reg_relaxed(adap, reg, val | V_QID(q->cntxt_id)); } else { writel_relaxed(val | V_QID(q->bar2_qid), @@ -385,7 +366,8 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q, struct rte_mbuf *buf_bulk[n]; int ret, i; struct rte_pktmbuf_pool_private *mbp_priv; - u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.jumbo_frame; + u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_JUMBO_FRAME; /* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */ mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool); @@ -570,12 +552,16 @@ static inline int is_eth_imm(const struct rte_mbuf *m) /** * calc_tx_flits - calculate the number of flits for a packet Tx WR * @m: the packet + * @adap: adapter structure pointer * * Returns the number of flits needed for a Tx WR for the given Ethernet * packet, including the needed WR and CPL headers. */ -static inline unsigned int calc_tx_flits(const struct rte_mbuf *m) +static inline unsigned int calc_tx_flits(const struct rte_mbuf *m, + struct adapter *adap) { + size_t wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkt_wr) : + sizeof(struct fw_eth_tx_pkt_vm_wr); unsigned int flits; int hdrlen; @@ -600,11 +586,10 @@ static inline unsigned int calc_tx_flits(const struct rte_mbuf *m) */ flits = sgl_len(m->nb_segs); if (m->tso_segsz) - flits += (sizeof(struct fw_eth_tx_pkt_wr) + - sizeof(struct cpl_tx_pkt_lso_core) + + flits += (wr_size + sizeof(struct cpl_tx_pkt_lso_core) + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); else - flits += (sizeof(struct fw_eth_tx_pkt_wr) + + flits += (wr_size + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); return flits; } @@ -848,14 +833,20 @@ static void tx_timer_cb(void *data) static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap, struct sge_eth_txq *txq) { - u32 wr_mid; - struct sge_txq *q = &txq->q; + struct fw_eth_tx_pkts_vm_wr *vmwr; + const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) + + sizeof(vmwr->ethmacsrc) + + sizeof(vmwr->ethtype) + + sizeof(vmwr->vlantci)); struct fw_eth_tx_pkts_wr *wr; + struct sge_txq *q = &txq->q; unsigned int ndesc; + u32 wr_mid; /* fill the pkts WR header */ wr = (void *)&q->desc[q->pidx]; wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); + vmwr = (void *)&q->desc[q->pidx]; wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2)); ndesc = flits_to_desc(q->coalesce.flits); @@ -863,12 +854,18 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap, wr->plen = cpu_to_be16(q->coalesce.len); wr->npkt = q->coalesce.idx; wr->r3 = 0; - wr->type = q->coalesce.type; + if (is_pf4(adap)) { + wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); + wr->type = q->coalesce.type; + } else { + wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR)); + vmwr->r4 = 0; + memcpy((void *)vmwr->ethmacdst, (void *)q->coalesce.ethmacdst, + fw_hdr_copy_len); + } /* zero out coalesce structure members */ - q->coalesce.idx = 0; - q->coalesce.flits = 0; - q->coalesce.len = 0; + memset((void *)&q->coalesce, 0, sizeof(struct eth_coalesce)); txq_advance(q, ndesc); txq->stats.coal_wr++; @@ -896,13 +893,27 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq, unsigned int *nflits, struct adapter *adap) { + struct fw_eth_tx_pkts_vm_wr *wr; + const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) + + sizeof(wr->ethmacsrc) + + sizeof(wr->ethtype) + + sizeof(wr->vlantci)); struct sge_txq *q = &txq->q; unsigned int flits, ndesc; unsigned char type = 0; - int credits; + int credits, wr_size; /* use coal WR type 1 when no frags are present */ type = (mbuf->nb_segs == 1) ? 1 : 0; + if (!is_pf4(adap)) { + if (!type) + return 0; + + if (q->coalesce.idx && memcmp((void *)q->coalesce.ethmacdst, + rte_pktmbuf_mtod(mbuf, void *), + fw_hdr_copy_len)) + ship_tx_pkt_coalesce_wr(adap, txq); + } if (unlikely(type != q->coalesce.type && q->coalesce.idx)) ship_tx_pkt_coalesce_wr(adap, txq); @@ -948,16 +959,21 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq, new: /* start a new pkts WR, the WR header is not filled below */ - flits += sizeof(struct fw_eth_tx_pkts_wr) / sizeof(__be64); + wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkts_wr) : + sizeof(struct fw_eth_tx_pkts_vm_wr); + flits += wr_size / sizeof(__be64); ndesc = flits_to_desc(q->coalesce.flits + flits); credits = txq_avail(q) - ndesc; if (unlikely(credits < 0 || wraps_around(q, ndesc))) return 0; - q->coalesce.flits += 2; + q->coalesce.flits += wr_size / sizeof(__be64); q->coalesce.type = type; q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] + - 2 * sizeof(__be64); + q->coalesce.flits * sizeof(__be64); + if (!is_pf4(adap)) + memcpy((void *)q->coalesce.ethmacdst, + rte_pktmbuf_mtod(mbuf, void *), fw_hdr_copy_len); return 1; } @@ -987,6 +1003,8 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq, struct cpl_tx_pkt_core *cpl; struct tx_sw_desc *sd; unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len; + unsigned int max_coal_pkt_num = is_pf4(adap) ? ETH_COALESCE_PKT_NUM : + ETH_COALESCE_VF_PKT_NUM; #ifdef RTE_LIBRTE_CXGBE_TPUT RTE_SET_USED(nb_pkts); @@ -1030,9 +1048,12 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq, cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci); } - cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | - V_TXPKT_INTF(pi->tx_chan) | - V_TXPKT_PF(adap->pf)); + cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT)); + if (is_pf4(adap)) + cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) | + V_TXPKT_PF(adap->pf)); + else + cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id)); cpl->pack = htons(0); cpl->len = htons(len); cpl->ctrl1 = cpu_to_be64(cntrl); @@ -1061,7 +1082,7 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq, sd->coalesce.idx = (idx & 1) + 1; /* send the coaelsced work request if max reached */ - if (++q->coalesce.idx == ETH_COALESCE_PKT_NUM + if (++q->coalesce.idx == max_coal_pkt_num #ifndef RTE_LIBRTE_CXGBE_TPUT || q->coalesce.idx >= nb_pkts #endif @@ -1085,6 +1106,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf, struct adapter *adap; struct rte_mbuf *m = mbuf; struct fw_eth_tx_pkt_wr *wr; + struct fw_eth_tx_pkt_vm_wr *vmwr; struct cpl_tx_pkt_core *cpl; struct tx_sw_desc *d; dma_addr_t addr[m->nb_segs]; @@ -1095,7 +1117,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf, u32 wr_mid; u64 cntrl, *end; bool v6; - u32 max_pkt_len = txq->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; + u32 max_pkt_len = txq->data->dev_conf.rxmode.max_rx_pkt_len; /* Reject xmit if queue is stopped */ if (unlikely(txq->flags & EQ_STOPPED)) @@ -1115,7 +1137,7 @@ out_free: (unlikely(m->pkt_len > max_pkt_len))) goto out_free; - pi = (struct port_info *)txq->eth_dev->data->dev_private; + pi = (struct port_info *)txq->data->dev_private; adap = pi->adapter; cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS; @@ -1141,7 +1163,7 @@ out_free: if (txq->q.coalesce.idx) ship_tx_pkt_coalesce_wr(adap, txq); - flits = calc_tx_flits(m); + flits = calc_tx_flits(m, adap); ndesc = flits_to_desc(flits); credits = txq_avail(&txq->q) - ndesc; @@ -1163,31 +1185,55 @@ out_free: } wr = (void *)&txq->q.desc[txq->q.pidx]; + vmwr = (void *)&txq->q.desc[txq->q.pidx]; wr->equiq_to_len16 = htonl(wr_mid); - wr->r3 = rte_cpu_to_be_64(0); - end = (u64 *)wr + flits; + if (is_pf4(adap)) { + wr->r3 = rte_cpu_to_be_64(0); + end = (u64 *)wr + flits; + } else { + const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) + + sizeof(vmwr->ethmacsrc) + + sizeof(vmwr->ethtype) + + sizeof(vmwr->vlantci)); + + vmwr->r3[0] = rte_cpu_to_be_32(0); + vmwr->r3[1] = rte_cpu_to_be_32(0); + memcpy((void *)vmwr->ethmacdst, rte_pktmbuf_mtod(m, void *), + fw_hdr_copy_len); + end = (u64 *)vmwr + flits; + } len = 0; len += sizeof(*cpl); /* Coalescing skipped and we send through normal path */ if (!(m->ol_flags & PKT_TX_TCP_SEG)) { - wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | + wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ? + FW_ETH_TX_PKT_WR : + FW_ETH_TX_PKT_VM_WR) | V_FW_WR_IMMDLEN(len)); - cpl = (void *)(wr + 1); + if (is_pf4(adap)) + cpl = (void *)(wr + 1); + else + cpl = (void *)(vmwr + 1); if (m->ol_flags & PKT_TX_IP_CKSUM) { cntrl = hwcsum(adap->params.chip, m) | F_TXPKT_IPCSUM_DIS; txq->stats.tx_cso++; } } else { - lso = (void *)(wr + 1); + if (is_pf4(adap)) + lso = (void *)(wr + 1); + else + lso = (void *)(vmwr + 1); v6 = (m->ol_flags & PKT_TX_IPV6) != 0; l3hdr_len = m->l3_len; l4hdr_len = m->l4_len; eth_xtra_len = m->l2_len - ETHER_HDR_LEN; len += sizeof(*lso); - wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | + wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ? + FW_ETH_TX_PKT_WR : + FW_ETH_TX_PKT_VM_WR) | V_FW_WR_IMMDLEN(len)); lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | @@ -1221,9 +1267,14 @@ out_free: cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci); } - cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | - V_TXPKT_INTF(pi->tx_chan) | - V_TXPKT_PF(adap->pf)); + cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT)); + if (is_pf4(adap)) + cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) | + V_TXPKT_PF(adap->pf)); + else + cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id) | + V_TXPKT_PF(0)); + cpl->pack = htons(0); cpl->len = htons(m->pkt_len); cpl->ctrl1 = cpu_to_be64(cntrl); @@ -1253,6 +1304,126 @@ out_free: return 0; } +/** + * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs + * @q: the SGE control Tx queue + * + * This is a variant of reclaim_completed_tx() that is used for Tx queues + * that send only immediate data (presently just the control queues) and + * thus do not have any mbufs to release. + */ +static inline void reclaim_completed_tx_imm(struct sge_txq *q) +{ + int hw_cidx = ntohs(q->stat->cidx); + int reclaim = hw_cidx - q->cidx; + + if (reclaim < 0) + reclaim += q->size; + + q->in_use -= reclaim; + q->cidx = hw_cidx; +} + +/** + * is_imm - check whether a packet can be sent as immediate data + * @mbuf: the packet + * + * Returns true if a packet can be sent as a WR with immediate data. + */ +static inline int is_imm(const struct rte_mbuf *mbuf) +{ + return mbuf->pkt_len <= MAX_CTRL_WR_LEN; +} + +/** + * inline_tx_mbuf: inline a packet's data into TX descriptors + * @q: the TX queue where the packet will be inlined + * @from: pointer to data portion of packet + * @to: pointer after cpl where data has to be inlined + * @len: length of data to inline + * + * Inline a packet's contents directly to TX descriptors, starting at + * the given position within the TX DMA ring. + * Most of the complexity of this operation is dealing with wrap arounds + * in the middle of the packet we want to inline. + */ +static void inline_tx_mbuf(const struct sge_txq *q, caddr_t from, caddr_t *to, + int len) +{ + int left = RTE_PTR_DIFF(q->stat, *to); + + if (likely((uintptr_t)*to + len <= (uintptr_t)q->stat)) { + rte_memcpy(*to, from, len); + *to = RTE_PTR_ADD(*to, len); + } else { + rte_memcpy(*to, from, left); + from = RTE_PTR_ADD(from, left); + left = len - left; + rte_memcpy((void *)q->desc, from, left); + *to = RTE_PTR_ADD((void *)q->desc, left); + } +} + +/** + * ctrl_xmit - send a packet through an SGE control Tx queue + * @q: the control queue + * @mbuf: the packet + * + * Send a packet through an SGE control Tx queue. Packets sent through + * a control queue must fit entirely as immediate data. + */ +static int ctrl_xmit(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf) +{ + unsigned int ndesc; + struct fw_wr_hdr *wr; + caddr_t dst; + + if (unlikely(!is_imm(mbuf))) { + WARN_ON(1); + rte_pktmbuf_free(mbuf); + return -1; + } + + reclaim_completed_tx_imm(&q->q); + ndesc = DIV_ROUND_UP(mbuf->pkt_len, sizeof(struct tx_desc)); + t4_os_lock(&q->ctrlq_lock); + + q->full = txq_avail(&q->q) < ndesc ? 1 : 0; + if (unlikely(q->full)) { + t4_os_unlock(&q->ctrlq_lock); + return -1; + } + + wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; + dst = (void *)wr; + inline_tx_mbuf(&q->q, rte_pktmbuf_mtod(mbuf, caddr_t), + &dst, mbuf->data_len); + + txq_advance(&q->q, ndesc); + if (unlikely(txq_avail(&q->q) < 64)) + wr->lo |= htonl(F_FW_WR_EQUEQ); + + q->txp++; + + ring_tx_db(q->adapter, &q->q); + t4_os_unlock(&q->ctrlq_lock); + + rte_pktmbuf_free(mbuf); + return 0; +} + +/** + * t4_mgmt_tx - send a management message + * @q: the control queue + * @mbuf: the packet containing the management message + * + * Send a management message through control queue. + */ +int t4_mgmt_tx(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf) +{ + return ctrl_xmit(q, mbuf); +} + /** * alloc_ring - allocate resources for an SGE descriptor ring * @dev: the PCI device's core device @@ -1299,7 +1470,8 @@ static void *alloc_ring(size_t nelem, size_t elem_size, * handle the maximum ring size is allocated in order to allow for * resizing in later calls to the queue setup function. */ - tz = rte_memzone_reserve_aligned(z_name, len, socket_id, 0, 4096); + tz = rte_memzone_reserve_aligned(z_name, len, socket_id, + RTE_MEMZONE_IOVA_CONTIG, 4096); if (!tz) return NULL; @@ -1468,6 +1640,7 @@ static int process_responses(struct sge_rspq *q, int budget, rsp_type = G_RSPD_TYPE(rc->u.type_gen); if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) { + struct sge *s = &q->adapter->sge; unsigned int stat_pidx; int stat_pidx_diff; @@ -1550,10 +1723,12 @@ static int process_responses(struct sge_rspq *q, int budget, } if (cpl->vlan_ex) { - pkt->ol_flags |= PKT_RX_VLAN; + pkt->ol_flags |= PKT_RX_VLAN | + PKT_RX_VLAN_STRIPPED; pkt->vlan_tci = ntohs(cpl->vlan); } + rte_pktmbuf_adj(pkt, s->pktshift); rxq->stats.pkts++; rxq->stats.rx_bytes += pkt->pkt_len; rx_pkts[budget - budget_left] = pkt; @@ -1612,7 +1787,11 @@ int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts, val = V_CIDXINC(cidx_inc) | V_SEINTARM(params); if (unlikely(!q->bar2_addr)) { - t4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS), + u32 reg = is_pf4(q->adapter) ? MYPF_REG(A_SGE_PF_GTS) : + T4VF_SGE_BASE_ADDR + + A_SGE_VF_GTS; + + t4_write_reg(q->adapter, reg, val | V_INGRESSQID((u32)q->cntxt_id)); } else { writel(val | V_INGRESSQID(q->bar2_qid), @@ -1689,6 +1868,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, char z_name[RTE_MEMZONE_NAMESIZE]; char z_name_sw[RTE_MEMZONE_NAMESIZE]; unsigned int nb_refill; + u8 pciechan; /* Size needs to be multiple of 16, including status entry. */ iq->size = cxgbe_roundup(iq->size, 16); @@ -1706,8 +1886,23 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | - F_FW_CMD_WRITE | F_FW_CMD_EXEC | - V_FW_IQ_CMD_PFN(adap->pf) | V_FW_IQ_CMD_VFN(0)); + F_FW_CMD_WRITE | F_FW_CMD_EXEC); + + if (is_pf4(adap)) { + pciechan = pi->tx_chan; + c.op_to_vfn |= htonl(V_FW_IQ_CMD_PFN(adap->pf) | + V_FW_IQ_CMD_VFN(0)); + if (cong >= 0) + c.iqns_to_fl0congen = + htonl(F_FW_IQ_CMD_IQFLINTCONGEN | + V_FW_IQ_CMD_IQTYPE(cong ? + FW_IQ_IQTYPE_NIC : + FW_IQ_IQTYPE_OFLD) | + F_FW_IQ_CMD_IQRO); + } else { + pciechan = pi->port_id; + } + c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | (sizeof(c) / 16)); c.type_to_iqandstindex = @@ -1719,16 +1914,12 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx : -intr_idx - 1)); c.iqdroprss_to_iqesize = - htons(V_FW_IQ_CMD_IQPCIECH(cong > 0 ? cxgbe_ffs(cong) - 1 : - pi->tx_chan) | + htons(V_FW_IQ_CMD_IQPCIECH(pciechan) | F_FW_IQ_CMD_IQGTSMODE | V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) | V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4)); c.iqsize = htons(iq->size); c.iqaddr = cpu_to_be64(iq->phys_addr); - if (cong >= 0) - c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN | - F_FW_IQ_CMD_IQRO); if (fl) { struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq, @@ -1768,7 +1959,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, 0 : F_FW_IQ_CMD_FL0PACKEN) | F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | F_FW_IQ_CMD_FL0PADEN); - if (cong >= 0) + if (is_pf4(adap) && cong >= 0) c.iqns_to_fl0congen |= htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF | @@ -1789,7 +1980,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, c.fl0addr = cpu_to_be64(fl->addr); } - ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + if (is_pf4(adap)) + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + else + ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c); if (ret) goto err; @@ -1806,7 +2000,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, iq->stat = (void *)&iq->desc[iq->size * 8]; iq->eth_dev = eth_dev; iq->handler = hnd; - iq->port_id = pi->port_id; + iq->port_id = pi->pidx; iq->mb_pool = mp; /* set offset to -1 to distinguish ingress queues without FL */ @@ -1846,7 +2040,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, * a lot easier to fix in one place ... For now we do something very * simple (and hopefully less wrong). */ - if (!is_t4(adap->params.chip) && cong >= 0) { + if (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) { u32 param, val; int i; @@ -1893,9 +2087,11 @@ err: return ret; } -static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) +static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id, + unsigned int abs_id) { q->cntxt_id = id; + q->abs_id = abs_id; q->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS, &q->bar2_qid); q->cidx = 0; @@ -1943,6 +2139,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); char z_name[RTE_MEMZONE_NAMESIZE]; char z_name_sw[RTE_MEMZONE_NAMESIZE]; + u8 pciechan; /* Add status entries */ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); @@ -1961,16 +2158,22 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | - F_FW_CMD_WRITE | F_FW_CMD_EXEC | - V_FW_EQ_ETH_CMD_PFN(adap->pf) | - V_FW_EQ_ETH_CMD_VFN(0)); + F_FW_CMD_WRITE | F_FW_CMD_EXEC); + if (is_pf4(adap)) { + pciechan = pi->tx_chan; + c.op_to_vfn |= htonl(V_FW_EQ_ETH_CMD_PFN(adap->pf) | + V_FW_EQ_ETH_CMD_VFN(0)); + } else { + pciechan = pi->port_id; + } + c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC | F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16)); c.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(pi->viid)); c.fetchszm_to_iqid = htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | - V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | + V_FW_EQ_ETH_CMD_PCIECHN(pciechan) | F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid)); c.dcaen_to_eqsize = htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | @@ -1978,7 +2181,10 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, V_FW_EQ_ETH_CMD_EQSIZE(nentries)); c.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr); - ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + if (is_pf4(adap)) + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + else + ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c); if (ret) { rte_free(txq->q.sdesc); txq->q.sdesc = NULL; @@ -1986,7 +2192,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, return ret; } - init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd))); + init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)), + G_FW_EQ_ETH_CMD_PHYSEQID(ntohl(c.physeqid_pkd))); txq->stats.tso = 0; txq->stats.pkts = 0; txq->stats.tx_cso = 0; @@ -1997,10 +2204,69 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, txq->stats.mapping_err = 0; txq->flags |= EQ_STOPPED; txq->eth_dev = eth_dev; + txq->data = eth_dev->data; t4_os_lock_init(&txq->txq_lock); return 0; } +int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, + struct rte_eth_dev *eth_dev, uint16_t queue_id, + unsigned int iqid, int socket_id) +{ + int ret, nentries; + struct fw_eq_ctrl_cmd c; + struct sge *s = &adap->sge; + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + char z_name[RTE_MEMZONE_NAMESIZE]; + char z_name_sw[RTE_MEMZONE_NAMESIZE]; + + /* Add status entries */ + nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); + + snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", + eth_dev->device->driver->name, "ctrl_tx_ring", + eth_dev->data->port_id, queue_id); + snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name); + + txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc), + 0, &txq->q.phys_addr, + NULL, 0, queue_id, + socket_id, z_name, z_name_sw); + if (!txq->q.desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_WRITE | F_FW_CMD_EXEC | + V_FW_EQ_CTRL_CMD_PFN(adap->pf) | + V_FW_EQ_CTRL_CMD_VFN(0)); + c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_ALLOC | + F_FW_EQ_CTRL_CMD_EQSTART | (sizeof(c) / 16)); + c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(0)); + c.physeqid_pkd = htonl(0); + c.fetchszm_to_iqid = + htonl(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | + V_FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) | + F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(iqid)); + c.dcaen_to_eqsize = + htonl(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) | + V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | + V_FW_EQ_CTRL_CMD_EQSIZE(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + if (ret) { + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, G_FW_EQ_CTRL_CMD_EQID(ntohl(c.cmpliqid_eqid)), + G_FW_EQ_CTRL_CMD_EQID(ntohl(c. physeqid_pkd))); + txq->adapter = adap; + txq->full = 0; + return 0; +} + static void free_txq(struct sge_txq *q) { q->cntxt_id = 0; @@ -2095,7 +2361,7 @@ void t4_sge_tx_monitor_stop(struct adapter *adap) */ void t4_free_sge_resources(struct adapter *adap) { - int i; + unsigned int i; struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0]; struct sge_eth_txq *txq = &adap->sge.ethtxq[0]; @@ -2112,6 +2378,18 @@ void t4_free_sge_resources(struct adapter *adap) } } + /* clean up control Tx queues */ + for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { + struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; + + if (cq->q.desc) { + reclaim_completed_tx_imm(&cq->q); + t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, + cq->q.cntxt_id); + free_txq(&cq->q); + } + } + if (adap->sge.fw_evtq.desc) free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); } @@ -2280,3 +2558,182 @@ int t4_sge_init(struct adapter *adap) return 0; } + +int t4vf_sge_init(struct adapter *adap) +{ + struct sge_params *sge_params = &adap->params.sge; + u32 sge_ingress_queues_per_page; + u32 sge_egress_queues_per_page; + u32 sge_control, sge_control2; + u32 fl_small_pg, fl_large_pg; + u32 sge_ingress_rx_threshold; + u32 sge_timer_value_0_and_1; + u32 sge_timer_value_2_and_3; + u32 sge_timer_value_4_and_5; + u32 sge_congestion_control; + struct sge *s = &adap->sge; + unsigned int s_hps, s_qpp; + u32 sge_host_page_size; + u32 params[7], vals[7]; + int v; + + /* query basic params from fw */ + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL)); + params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE)); + params[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0)); + params[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE1)); + params[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1)); + params[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3)); + params[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5)); + v = t4vf_query_params(adap, 7, params, vals); + if (v != FW_SUCCESS) + return v; + + sge_control = vals[0]; + sge_host_page_size = vals[1]; + fl_small_pg = vals[2]; + fl_large_pg = vals[3]; + sge_timer_value_0_and_1 = vals[4]; + sge_timer_value_2_and_3 = vals[5]; + sge_timer_value_4_and_5 = vals[6]; + + /* + * Start by vetting the basic SGE parameters which have been set up by + * the Physical Function Driver. + */ + + /* We only bother using the Large Page logic if the Large Page Buffer + * is larger than our Page Size Buffer. + */ + if (fl_large_pg <= fl_small_pg) + fl_large_pg = 0; + + /* The Page Size Buffer must be exactly equal to our Page Size and the + * Large Page Size Buffer should be 0 (per above) or a power of 2. + */ + if (fl_small_pg != CXGBE_PAGE_SIZE || + (fl_large_pg & (fl_large_pg - 1)) != 0) { + dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", + fl_small_pg, fl_large_pg); + return -EINVAL; + } + + if ((sge_control & F_RXPKTCPLMODE) != + V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) { + dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); + return -EINVAL; + } + + + /* Grab ingress packing boundary from SGE_CONTROL2 for */ + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2)); + v = t4vf_query_params(adap, 1, params, vals); + if (v != FW_SUCCESS) { + dev_err(adapter, "Unable to get SGE Control2; " + "probably old firmware.\n"); + return v; + } + sge_control2 = vals[0]; + + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD)); + params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL)); + v = t4vf_query_params(adap, 2, params, vals); + if (v != FW_SUCCESS) + return v; + sge_ingress_rx_threshold = vals[0]; + sge_congestion_control = vals[1]; + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF)); + params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF)); + v = t4vf_query_params(adap, 2, params, vals); + if (v != FW_SUCCESS) { + dev_warn(adap, "Unable to get VF SGE Queues/Page; " + "probably old firmware.\n"); + return v; + } + sge_egress_queues_per_page = vals[0]; + sge_ingress_queues_per_page = vals[1]; + + /* + * We need the Queues/Page for our VF. This is based on the + * PF from which we're instantiated and is indexed in the + * register we just read. + */ + s_hps = (S_HOSTPAGESIZEPF0 + + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adap->pf); + sge_params->hps = + ((sge_host_page_size >> s_hps) & M_HOSTPAGESIZEPF0); + + s_qpp = (S_QUEUESPERPAGEPF0 + + (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adap->pf); + sge_params->eq_qpp = + ((sge_egress_queues_per_page >> s_qpp) + & M_QUEUESPERPAGEPF0); + sge_params->iq_qpp = + ((sge_ingress_queues_per_page >> s_qpp) + & M_QUEUESPERPAGEPF0); + + /* + * Now translate the queried parameters into our internal forms. + */ + if (fl_large_pg) + s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; + s->stat_len = ((sge_control & F_EGRSTATUSPAGESIZE) + ? 128 : 64); + s->pktshift = G_PKTSHIFT(sge_control); + s->fl_align = t4vf_fl_pkt_align(adap, sge_control, sge_control2); + + /* + * A FL with <= fl_starve_thres buffers is starving and a periodic + * timer will attempt to refill it. This needs to be larger than the + * SGE's Egress Congestion Threshold. If it isn't, then we can get + * stuck waiting for new packets while the SGE is waiting for us to + * give it more Free List entries. (Note that the SGE's Egress + * Congestion Threshold is in units of 2 Free List pointers.) + */ + switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { + case CHELSIO_T5: + s->fl_starve_thres = + G_EGRTHRESHOLDPACKING(sge_congestion_control); + break; + case CHELSIO_T6: + default: + s->fl_starve_thres = + G_T6_EGRTHRESHOLDPACKING(sge_congestion_control); + break; + } + s->fl_starve_thres = s->fl_starve_thres * 2 + 1; + + /* + * Save RX interrupt holdoff timer values and counter + * threshold values from the SGE parameters. + */ + s->timer_val[0] = core_ticks_to_us(adap, + G_TIMERVALUE0(sge_timer_value_0_and_1)); + s->timer_val[1] = core_ticks_to_us(adap, + G_TIMERVALUE1(sge_timer_value_0_and_1)); + s->timer_val[2] = core_ticks_to_us(adap, + G_TIMERVALUE2(sge_timer_value_2_and_3)); + s->timer_val[3] = core_ticks_to_us(adap, + G_TIMERVALUE3(sge_timer_value_2_and_3)); + s->timer_val[4] = core_ticks_to_us(adap, + G_TIMERVALUE4(sge_timer_value_4_and_5)); + s->timer_val[5] = core_ticks_to_us(adap, + G_TIMERVALUE5(sge_timer_value_4_and_5)); + s->counter_val[0] = G_THRESHOLD_0(sge_ingress_rx_threshold); + s->counter_val[1] = G_THRESHOLD_1(sge_ingress_rx_threshold); + s->counter_val[2] = G_THRESHOLD_2(sge_ingress_rx_threshold); + s->counter_val[3] = G_THRESHOLD_3(sge_ingress_rx_threshold); + return 0; +} diff --git a/drivers/net/dpaa/Makefile b/drivers/net/dpaa/Makefile index 9c2a5ea8..d7a0a50c 100644 --- a/drivers/net/dpaa/Makefile +++ b/drivers/net/dpaa/Makefile @@ -27,6 +27,9 @@ EXPORT_MAP := rte_pmd_dpaa_version.map LIBABIVER := 1 +# depends on dpaa bus which uses experimental API +CFLAGS += -DALLOW_EXPERIMENTAL_API + # Interfaces with DPDK SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_rxtx.c diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c index 9b69ef45..7a950ac0 100644 --- a/drivers/net/dpaa/dpaa_ethdev.c +++ b/drivers/net/dpaa/dpaa_ethdev.c @@ -45,14 +45,43 @@ #include #include +/* Supported Rx offloads */ +static uint64_t dev_rx_offloads_sup = + DEV_RX_OFFLOAD_JUMBO_FRAME; + +/* Rx offloads which cannot be disabled */ +static uint64_t dev_rx_offloads_nodis = + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_SCATTER; + +/* Supported Tx offloads */ +static uint64_t dev_tx_offloads_sup; + +/* Tx offloads which cannot be disabled */ +static uint64_t dev_tx_offloads_nodis = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_MT_LOCKFREE | + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + /* Keep track of whether QMAN and BMAN have been globally initialized */ static int is_global_init; -/* At present we only allow up to 4 push mode queues - as each of this queue - * need dedicated portal and we are short of portals. +static int default_q; /* use default queue - FMC is not executed*/ +/* At present we only allow up to 4 push mode queues as default - as each of + * this queue need dedicated portal and we are short of portals. */ -#define DPAA_MAX_PUSH_MODE_QUEUE 4 +#define DPAA_MAX_PUSH_MODE_QUEUE 8 +#define DPAA_DEFAULT_PUSH_MODE_QUEUE 4 -static int dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; +static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE; static int dpaa_push_queue_idx; /* Queue index which are in push mode*/ @@ -95,6 +124,9 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = { static struct rte_dpaa_driver rte_dpaa_pmd; +static void +dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); + static inline void dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts) { @@ -122,9 +154,11 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) return -EINVAL; if (frame_size > ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.jumbo_frame = 1; + dev->data->dev_conf.rxmode.offloads &= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; @@ -134,13 +168,32 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) } static int -dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused) +dpaa_eth_dev_configure(struct rte_eth_dev *dev) { struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct rte_eth_conf *eth_conf = &dev->data->dev_conf; + uint64_t rx_offloads = eth_conf->rxmode.offloads; + uint64_t tx_offloads = eth_conf->txmode.offloads; PMD_INIT_FUNC_TRACE(); - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) { + /* Rx offloads validation */ + if (dev_rx_offloads_nodis & ~rx_offloads) { + DPAA_PMD_WARN( + "Rx offloads non configurable - requested 0x%" PRIx64 + " ignored 0x%" PRIx64, + rx_offloads, dev_rx_offloads_nodis); + } + + /* Tx offloads validation */ + if (dev_tx_offloads_nodis & ~tx_offloads) { + DPAA_PMD_WARN( + "Tx offloads non configurable - requested 0x%" PRIx64 + " ignored 0x%" PRIx64, + tx_offloads, dev_tx_offloads_nodis); + } + + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= DPAA_MAX_RX_PKT_LEN) { fman_if_set_maxfrm(dpaa_intf->fif, @@ -256,14 +309,12 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev, dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL; dev_info->speed_capa = (ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G); - dev_info->rx_offload_capa = - (DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM); - dev_info->tx_offload_capa = - (DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM); + dev_info->rx_offload_capa = dev_rx_offloads_sup | + dev_rx_offloads_nodis; + dev_info->tx_offload_capa = dev_tx_offloads_sup | + dev_tx_offloads_nodis; + dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE; + dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE; } static int dpaa_eth_link_update(struct rte_eth_dev *dev, @@ -275,9 +326,9 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); if (dpaa_intf->fif->mac_type == fman_mac_1g) - link->link_speed = 1000; + link->link_speed = ETH_SPEED_NUM_1G; else if (dpaa_intf->fif->mac_type == fman_mac_10g) - link->link_speed = 10000; + link->link_speed = ETH_SPEED_NUM_10G; else DPAA_PMD_ERR("invalid link_speed: %s, %d", dpaa_intf->name, dpaa_intf->fif->mac_type); @@ -316,12 +367,12 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings); uint64_t values[sizeof(struct dpaa_if_stats) / 8]; - if (xstats == NULL) - return 0; - if (n < num) return num; + if (xstats == NULL) + return 0; + fman_if_stats_get_all(dpaa_intf->fif, values, sizeof(struct dpaa_if_stats) / 8); @@ -335,10 +386,13 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, static int dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, - __rte_unused unsigned int limit) + unsigned int limit) { unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); + if (limit < stat_cnt) + return stat_cnt; + if (xstats_names != NULL) for (i = 0; i < stat_cnt; i++) snprintf(xstats_names[i].name, @@ -366,7 +420,7 @@ dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, return 0; fman_if_stats_get_all(dpaa_intf->fif, values_copy, - sizeof(struct dpaa_if_stats)); + sizeof(struct dpaa_if_stats) / 8); for (i = 0; i < stat_cnt; i++) values[i] = @@ -463,7 +517,15 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, PMD_INIT_FUNC_TRACE(); - DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx); + if (queue_idx >= dev->data->nb_rx_queues) { + rte_errno = EOVERFLOW; + DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", + (void *)dev, queue_idx, dev->data->nb_rx_queues); + return -rte_errno; + } + + DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)", + queue_idx, rxq->fqid); if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) { struct fman_if_ic_params icp; @@ -527,9 +589,11 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; } ret = qman_init_fq(rxq, flags, &opts); - if (ret) - DPAA_PMD_ERR("Channel/Queue association failed. fqid %d" - " ret: %d", rxq->fqid, ret); + if (ret) { + DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x " + "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); + return ret; + } rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb; rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare; rxq->is_static = true; @@ -553,7 +617,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, return 0; } -int __rte_experimental +int dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, int eth_rx_queue_id, u16 ch_id, @@ -604,8 +668,8 @@ dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, ret = qman_init_fq(rxq, flags, &opts); if (ret) { - DPAA_PMD_ERR("Channel/Queue association failed. fqid %d ret:%d", - rxq->fqid, ret); + DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x " + "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); return ret; } @@ -616,7 +680,7 @@ dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, return ret; } -int __rte_experimental +int dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, int eth_rx_queue_id) { @@ -662,7 +726,15 @@ int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, PMD_INIT_FUNC_TRACE(); - DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx); + if (queue_idx >= dev->data->nb_tx_queues) { + rte_errno = EOVERFLOW; + DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", + (void *)dev, queue_idx, dev->data->nb_tx_queues); + return -rte_errno; + } + + DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)", + queue_idx, dpaa_intf->tx_queues[queue_idx].fqid); dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx]; return 0; } @@ -813,7 +885,7 @@ dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev, fman_if_clear_mac_addr(dpaa_intf->fif, index); } -static void +static int dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) { @@ -825,6 +897,8 @@ dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0); if (ret) RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret); + + return ret; } static struct eth_dev_ops dpaa_devops = { @@ -882,7 +956,7 @@ is_dpaa_supported(struct rte_eth_dev *dev) return is_device_supported(dev, &rte_dpaa_pmd); } -int __rte_experimental +int rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on) { struct rte_eth_dev *dev; @@ -953,15 +1027,15 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, ret = qman_reserve_fqid(fqid); if (ret) { - DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d", + DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d", fqid, ret); return -EINVAL; } - DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid); + DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid); ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq); if (ret) { - DPAA_PMD_ERR("create rx fqid %d failed with ret: %d", + DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d", fqid, ret); return ret; } @@ -977,7 +1051,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, &cgr_opts); if (ret) { DPAA_PMD_WARN( - "rx taildrop init fail on rx fqid %d (ret=%d)", + "rx taildrop init fail on rx fqid 0x%x(ret=%d)", fqid, ret); goto without_cgr; } @@ -988,7 +1062,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, without_cgr: ret = qman_init_fq(fq, flags, &opts); if (ret) - DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret); + DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret); return ret; } @@ -1016,10 +1090,10 @@ static int dpaa_tx_queue_init(struct qman_fq *fq, /* no tx-confirmation */ opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi; opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo; - DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid); + DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid); ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); if (ret) - DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret); + DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret); return ret; } @@ -1090,25 +1164,20 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) dpaa_intf->cfg = cfg; /* Initialize Rx FQ's */ - if (getenv("DPAA_NUM_RX_QUEUES")) - num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES")); - else + if (default_q) { num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; - - /* if push mode queues to be enabled. Currenly we are allowing only - * one queue per thread. - */ - if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { - dpaa_push_mode_max_queue = - atoi(getenv("DPAA_PUSH_QUEUES_NUMBER")); - if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE) - dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; + } else { + if (getenv("DPAA_NUM_RX_QUEUES")) + num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES")); + else + num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; } - /* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX + + /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX * queues. */ - if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) { + if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) { DPAA_PMD_ERR("Invalid number of RX queues\n"); return -EINVAL; } @@ -1141,8 +1210,11 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) } for (loop = 0; loop < num_rx_fqs; loop++) { - fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid * - DPAA_PCD_FQID_MULTIPLIER + loop; + if (default_q) + fqid = cfg->rx_def; + else + fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid * + DPAA_PCD_FQID_MULTIPLIER + loop; if (dpaa_intf->cgr_rx) dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop]; @@ -1317,6 +1389,9 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); if (!eth_dev) return -ENOMEM; + eth_dev->device = &dpaa_dev->device; + eth_dev->dev_ops = &dpaa_devops; + rte_eth_dev_probing_finish(eth_dev); return 0; } @@ -1335,6 +1410,26 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, return ret; } + if (access("/tmp/fmc.bin", F_OK) == -1) { + RTE_LOG(INFO, PMD, + "* FMC not configured.Enabling default mode\n"); + default_q = 1; + } + + /* disabling the default push mode for LS1043 */ + if (dpaa_svr_family == SVR_LS1043A_FAMILY) + dpaa_push_mode_max_queue = 0; + + /* if push mode queues to be enabled. Currenly we are allowing + * only one queue per thread. + */ + if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { + dpaa_push_mode_max_queue = + atoi(getenv("DPAA_PUSH_QUEUES_NUMBER")); + if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE) + dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; + } + is_global_init = 1; } @@ -1366,8 +1461,10 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, /* Invoke PMD device initialization function */ diag = dpaa_dev_init(eth_dev); - if (diag == 0) + if (diag == 0) { + rte_eth_dev_probing_finish(eth_dev); return 0; + } if (rte_eal_process_type() == RTE_PROC_PRIMARY) rte_free(eth_dev->data->dev_private); diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h index c051ae32..c79b9f86 100644 --- a/drivers/net/dpaa/dpaa_ethdev.h +++ b/drivers/net/dpaa/dpaa_ethdev.h @@ -51,6 +51,10 @@ /*Maximum number of slots available in TX ring*/ #define DPAA_TX_BURST_SIZE 7 +/* Optimal burst size for RX and TX as default */ +#define DPAA_DEF_RX_BURST_SIZE 7 +#define DPAA_DEF_TX_BURST_SIZE DPAA_TX_BURST_SIZE + #ifndef VLAN_TAG_SIZE #define VLAN_TAG_SIZE 4 /** < Vlan Header Length */ #endif @@ -74,14 +78,10 @@ #define DPAA_DEBUG_FQ_TX_ERROR 1 #define DPAA_RSS_OFFLOAD_ALL ( \ - ETH_RSS_FRAG_IPV4 | \ - ETH_RSS_NONFRAG_IPV4_TCP | \ - ETH_RSS_NONFRAG_IPV4_UDP | \ - ETH_RSS_NONFRAG_IPV4_SCTP | \ - ETH_RSS_FRAG_IPV6 | \ - ETH_RSS_NONFRAG_IPV6_TCP | \ - ETH_RSS_NONFRAG_IPV6_UDP | \ - ETH_RSS_NONFRAG_IPV6_SCTP) + ETH_RSS_IP | \ + ETH_RSS_UDP | \ + ETH_RSS_TCP | \ + ETH_RSS_SCTP) #define DPAA_TX_CKSUM_OFFLOAD_MASK ( \ PKT_TX_IP_CKSUM | \ @@ -160,12 +160,14 @@ struct dpaa_if_stats { uint64_t tund; /**parse)) & DPAA_PARSE_MASK; + uint64_t prs = *((uintptr_t *)(&annot->parse)) & DPAA_PARSE_MASK; DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot); @@ -351,7 +350,7 @@ dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid) prev_seg = cur_seg; } - dpaa_eth_packet_info(first_seg, (uint64_t)vaddr); + dpaa_eth_packet_info(first_seg, vaddr); rte_pktmbuf_free_seg(temp); return first_seg; @@ -394,7 +393,7 @@ dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid) mbuf->ol_flags = 0; mbuf->next = NULL; rte_mbuf_refcnt_set(mbuf, 1); - dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr); + dpaa_eth_packet_info(mbuf, mbuf->buf_addr); return mbuf; } @@ -432,7 +431,7 @@ dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr, } fd = &dqrr[i]->fd; - dpaa_intf = fq[i]->dpaa_intf; + dpaa_intf = fq[0]->dpaa_intf; format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT; @@ -455,7 +454,7 @@ dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr, mbuf->ol_flags = 0; mbuf->next = NULL; rte_mbuf_refcnt_set(mbuf, 1); - dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr); + dpaa_eth_packet_info(mbuf, mbuf->buf_addr); } } @@ -561,7 +560,8 @@ uint16_t dpaa_eth_queue_rx(void *q, struct qman_fq *fq = q; struct qm_dqrr_entry *dq; uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; - int ret; + int num_rx_bufs, ret; + uint32_t vdqcr_flags = 0; if (likely(fq->is_static)) return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs); @@ -574,8 +574,19 @@ uint16_t dpaa_eth_queue_rx(void *q, } } - ret = qman_set_vdq(fq, (nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES) ? - DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_bufs); + /* Until request for four buffers, we provide exact number of buffers. + * Otherwise we do not set the QM_VDQCR_EXACT flag. + * Not setting QM_VDQCR_EXACT flag can provide two more buffers than + * requested, so we request two less in this case. + */ + if (nb_bufs < 4) { + vdqcr_flags = QM_VDQCR_EXACT; + num_rx_bufs = nb_bufs; + } else { + num_rx_bufs = nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES ? + (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_bufs - 2); + } + ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags); if (ret) return 0; @@ -593,7 +604,7 @@ uint16_t dpaa_eth_queue_rx(void *q, static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info) { int ret; - uint64_t buf = 0; + size_t buf = 0; struct bm_buffer bufs; ret = bman_acquire(bp_info->bp, &bufs, 1, 0); @@ -602,10 +613,10 @@ static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info) return (void *)buf; } - DPAA_DP_LOG(DEBUG, "got buffer 0x%lx from pool %d", + DPAA_DP_LOG(DEBUG, "got buffer 0x%" PRIx64 " from pool %d", (uint64_t)bufs.addr, bufs.bpid); - buf = (uint64_t)DPAA_MEMPOOL_PTOV(bp_info, bufs.addr) + buf = (size_t)DPAA_MEMPOOL_PTOV(bp_info, bufs.addr) - bp_info->meta_data_size; if (!buf) goto out; @@ -826,6 +837,8 @@ tx_on_external_pool(struct qman_fq *txq, struct rte_mbuf *mbuf, } DPAA_MBUF_TO_CONTIG_FD(dmable_mbuf, fd_arr, dpaa_intf->bp_info->bpid); + if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) + dpaa_unsegmented_checksum(mbuf, fd_arr); return 0; } diff --git a/drivers/net/dpaa/meson.build b/drivers/net/dpaa/meson.build new file mode 100644 index 00000000..62dec7b0 --- /dev/null +++ b/drivers/net/dpaa/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if host_machine.system() != 'linux' + build = false +endif +deps += ['mempool_dpaa'] + +sources = files('dpaa_ethdev.c', + 'dpaa_rxtx.c') + +allow_experimental_apis = true + +install_headers('rte_pmd_dpaa.h') diff --git a/drivers/net/dpaa/rte_pmd_dpaa.h b/drivers/net/dpaa/rte_pmd_dpaa.h index 38405ec0..37eea9b0 100644 --- a/drivers/net/dpaa/rte_pmd_dpaa.h +++ b/drivers/net/dpaa/rte_pmd_dpaa.h @@ -18,9 +18,6 @@ #include /** - * @warning - * @b EXPERIMENTAL: this API may change, or be removed, without prior notice - * * Enable/Disable TX loopback * * @param port @@ -33,7 +30,7 @@ * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int __rte_experimental +int rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on); #endif /* _PMD_DPAA_H_ */ diff --git a/drivers/net/dpaa/rte_pmd_dpaa_version.map b/drivers/net/dpaa/rte_pmd_dpaa_version.map index 3b937b10..8cb4500b 100644 --- a/drivers/net/dpaa/rte_pmd_dpaa_version.map +++ b/drivers/net/dpaa/rte_pmd_dpaa_version.map @@ -3,12 +3,10 @@ DPDK_17.11 { local: *; }; -EXPERIMENTAL { +DPDK_18.08 { global: dpaa_eth_eventq_attach; dpaa_eth_eventq_detach; rte_pmd_dpaa_set_tx_loopback; - - local: *; } DPDK_17.11; diff --git a/drivers/net/dpaa2/Makefile b/drivers/net/dpaa2/Makefile index 5a93a0b9..9b0b1433 100644 --- a/drivers/net/dpaa2/Makefile +++ b/drivers/net/dpaa2/Makefile @@ -10,14 +10,8 @@ include $(RTE_SDK)/mk/rte.vars.mk # LIB = librte_pmd_dpaa2.a -ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y) -CFLAGS += -O0 -g -CFLAGS += "-Wno-error" -else CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) -endif - CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2 CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc @@ -25,7 +19,6 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2 -CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa2 CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal # versioning export map @@ -34,6 +27,9 @@ EXPORT_MAP := rte_pmd_dpaa2_version.map # library version LIBABIVER := 1 +# depends on fslmc bus which uses experimental API +CFLAGS += -DALLOW_EXPERIMENTAL_API + SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += base/dpaa2_hw_dpni.c SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_rxtx.c SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_ethdev.c diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c index b93376de..713a41bf 100644 --- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c +++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c @@ -17,7 +17,7 @@ #include #include -#include +#include #include #include @@ -42,7 +42,7 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, p_params = rte_malloc( NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); if (!p_params) { - PMD_INIT_LOG(ERR, "Memory unavailable"); + DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); return -ENOMEM; } memset(p_params, 0, DIST_PARAM_IOVA_SIZE); @@ -50,8 +50,8 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg); if (ret) { - PMD_INIT_LOG(ERR, "given rss_hf (%lx) not supported", - req_dist_set); + DPAA2_PMD_ERR("Given RSS Hash (%" PRIx64 ") not supported", + req_dist_set); rte_free(p_params); return ret; } @@ -61,7 +61,7 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, ret = dpkg_prepare_key_cfg(&kg_cfg, p_params); if (ret) { - PMD_INIT_LOG(ERR, "Unable to prepare extract parameters"); + DPAA2_PMD_ERR("Unable to prepare extract parameters"); rte_free(p_params); return ret; } @@ -70,7 +70,7 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, &tc_cfg); rte_free(p_params); if (ret) { - PMD_INIT_LOG(ERR, + DPAA2_PMD_ERR( "Setting distribution for Rx failed with err: %d", ret); return ret; @@ -93,7 +93,7 @@ int dpaa2_remove_flow_dist( p_params = rte_malloc( NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); if (!p_params) { - PMD_INIT_LOG(ERR, "Memory unavailable"); + DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); return -ENOMEM; } memset(p_params, 0, DIST_PARAM_IOVA_SIZE); @@ -105,7 +105,7 @@ int dpaa2_remove_flow_dist( ret = dpkg_prepare_key_cfg(&kg_cfg, p_params); if (ret) { - PMD_INIT_LOG(ERR, "Unable to prepare extract parameters"); + DPAA2_PMD_ERR("Unable to prepare extract parameters"); rte_free(p_params); return ret; } @@ -114,8 +114,8 @@ int dpaa2_remove_flow_dist( &tc_cfg); rte_free(p_params); if (ret) - PMD_INIT_LOG(ERR, - "Setting distribution for Rx failed with err:%d", + DPAA2_PMD_ERR( + "Setting distribution for Rx failed with err: %d", ret); return ret; } @@ -256,7 +256,7 @@ dpaa2_distset_to_dpkg_profile_cfg( break; default: - PMD_INIT_LOG(WARNING, + DPAA2_PMD_WARN( "Unsupported flow dist option %x", dist_field); return -EINVAL; @@ -307,7 +307,7 @@ dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, &layout); if (retcode) { - PMD_INIT_LOG(ERR, "Err(%d) in setting rx buffer layout\n", + DPAA2_PMD_ERR("Error configuring buffer pool Rx layout (%d)", retcode); return retcode; } @@ -322,9 +322,9 @@ dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg); if (retcode != 0) { - PMD_INIT_LOG(ERR, "Error in attaching the buffer pool list" - " bpid = %d Error code = %d\n", - bpool_cfg.pools[0].dpbp_id, retcode); + DPAA2_PMD_ERR("Error configuring buffer pool on interface." + " bpid = %d error code = %d", + bpool_cfg.pools[0].dpbp_id, retcode); return retcode; } diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c index 09a11d65..c5047367 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.c +++ b/drivers/net/dpaa2/dpaa2_ethdev.c @@ -18,7 +18,7 @@ #include #include -#include +#include "dpaa2_pmd_logs.h" #include #include #include @@ -27,6 +27,36 @@ #include "dpaa2_ethdev.h" #include +/* Supported Rx offloads */ +static uint64_t dev_rx_offloads_sup = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_JUMBO_FRAME; + +/* Rx offloads which cannot be disabled */ +static uint64_t dev_rx_offloads_nodis = + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_SCATTER; + +/* Supported Tx offloads */ +static uint64_t dev_tx_offloads_sup = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + +/* Tx offloads which cannot be disabled */ +static uint64_t dev_tx_offloads_nodis = + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_MT_LOCKFREE | + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + struct rte_dpaa2_xstats_name_off { char name[RTE_ETH_XSTATS_NAME_SIZE]; uint8_t page_id; /* dpni statistics page id */ @@ -57,57 +87,7 @@ static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); -/** - * Atomically reads the link status information from global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &dev->data->dev_link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &dev->data->dev_link; - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} +int dpaa2_logtype_pmd; static int dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) @@ -119,7 +99,7 @@ dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return -1; } @@ -131,8 +111,8 @@ dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) priv->token, vlan_id); if (ret < 0) - PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d", - ret, vlan_id, priv->hw_id); + DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d", + ret, vlan_id, priv->hw_id); return ret; } @@ -149,25 +129,25 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) if (mask & ETH_VLAN_FILTER_MASK) { /* VLAN Filter not avaialble */ if (!priv->max_vlan_filters) { - RTE_LOG(INFO, PMD, "VLAN filter not available\n"); + DPAA2_PMD_INFO("VLAN filter not available"); goto next_mask; } - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, priv->token, true); else ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, priv->token, false); if (ret < 0) - RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n", - ret); + DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret); } next_mask: if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) - RTE_LOG(INFO, PMD, - "VLAN extend offload not supported\n"); + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND) + DPAA2_PMD_INFO("VLAN extend offload not supported"); } return 0; @@ -187,10 +167,10 @@ dpaa2_fw_version_get(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) - RTE_LOG(WARNING, PMD, "\tmc_get_soc_version failed\n"); + DPAA2_PMD_WARN("\tmc_get_soc_version failed"); if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) - RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n"); + DPAA2_PMD_WARN("\tmc_get_version failed"); ret = snprintf(fw_version, fw_size, "%x-%d.%d.%d", @@ -220,20 +200,18 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + dev_info->rx_offload_capa = dev_rx_offloads_sup | + dev_rx_offloads_nodis; + dev_info->tx_offload_capa = dev_tx_offloads_sup | + dev_tx_offloads_nodis; dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_10G; + + dev_info->max_hash_mac_addrs = 0; + dev_info->max_vfs = 0; + dev_info->max_vmdq_pools = ETH_16_POOLS; + dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL; } static int @@ -253,7 +231,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, RTE_CACHE_LINE_SIZE); if (!mc_q) { - PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n"); + DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues"); return -1; } @@ -320,18 +298,39 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) struct dpaa2_dev_priv *priv = dev->data->dev_private; struct fsl_mc_io *dpni = priv->hw; struct rte_eth_conf *eth_conf = &dev->data->dev_conf; - int rx_ip_csum_offload = false; + uint64_t rx_offloads = eth_conf->rxmode.offloads; + uint64_t tx_offloads = eth_conf->txmode.offloads; + int rx_l3_csum_offload = false; + int rx_l4_csum_offload = false; + int tx_l3_csum_offload = false; + int tx_l4_csum_offload = false; int ret; PMD_INIT_FUNC_TRACE(); - if (eth_conf->rxmode.jumbo_frame == 1) { + /* Rx offloads validation */ + if (dev_rx_offloads_nodis & ~rx_offloads) { + DPAA2_PMD_WARN( + "Rx offloads non configurable - requested 0x%" PRIx64 + " ignored 0x%" PRIx64, + rx_offloads, dev_rx_offloads_nodis); + } + + /* Tx offloads validation */ + if (dev_tx_offloads_nodis & ~tx_offloads) { + DPAA2_PMD_WARN( + "Tx offloads non configurable - requested 0x%" PRIx64 + " ignored 0x%" PRIx64, + tx_offloads, dev_tx_offloads_nodis); + } + + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, eth_conf->rxmode.max_rx_pkt_len); if (ret) { - PMD_INIT_LOG(ERR, - "unable to set mtu. check config\n"); + DPAA2_PMD_ERR( + "Unable to set mtu. check config"); return ret; } } else { @@ -343,40 +342,52 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) ret = dpaa2_setup_flow_dist(dev, eth_conf->rx_adv_conf.rss_conf.rss_hf); if (ret) { - PMD_INIT_LOG(ERR, "unable to set flow distribution." - "please check queue config\n"); + DPAA2_PMD_ERR("Unable to set flow distribution." + "Check queue config"); return ret; } } - if (eth_conf->rxmode.hw_ip_checksum) - rx_ip_csum_offload = true; + if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) + rx_l3_csum_offload = true; + + if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) || + (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM)) + rx_l4_csum_offload = true; ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, - DPNI_OFF_RX_L3_CSUM, rx_ip_csum_offload); + DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload); if (ret) { - PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret); + DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret); return ret; } ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, - DPNI_OFF_RX_L4_CSUM, rx_ip_csum_offload); + DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload); if (ret) { - PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret); + DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret); return ret; } + if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) + tx_l3_csum_offload = true; + + if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) || + (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) || + (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)) + tx_l4_csum_offload = true; + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, - DPNI_OFF_TX_L3_CSUM, true); + DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload); if (ret) { - PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret); + DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret); return ret; } ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, - DPNI_OFF_TX_L4_CSUM, true); + DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload); if (ret) { - PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret); + DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret); return ret; } @@ -390,14 +401,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, DPNI_FLCTYPE_HASH, true); if (ret) { - PMD_INIT_LOG(ERR, "Error setting FLCTYPE: Err = %d\n", - ret); + DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret); return ret; } } - if (eth_conf->rxmode.hw_vlan_filter) - dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); + dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); /* update the current status */ dpaa2_dev_link_update(dev, 0); @@ -427,8 +436,8 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); - PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p", - dev, rx_queue_id, mb_pool, rx_conf); + DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p", + dev, rx_queue_id, mb_pool, rx_conf); if (!priv->bp_list || priv->bp_list->mp != mb_pool) { bpid = mempool_to_bpid(mb_pool); @@ -445,7 +454,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, memset(&cfg, 0, sizeof(struct dpni_queue)); options = options | DPNI_QUEUE_OPT_USER_CTX; - cfg.user_context = (uint64_t)(dpaa2_q); + cfg.user_context = (size_t)(dpaa2_q); /*if ls2088 or rev2 device, enable the stashing */ @@ -467,7 +476,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, dpaa2_q->tc_index, flow_id, options, &cfg); if (ret) { - PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret); + DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret); return -1; } @@ -479,14 +488,14 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, taildrop.threshold = CONG_THRESHOLD_RX_Q; taildrop.units = DPNI_CONGESTION_UNIT_BYTES; taildrop.oal = CONG_RX_OAL; - PMD_DRV_LOG(DEBUG, "Enabling Early Drop on queue = %d", - rx_queue_id); + DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d", + rx_queue_id); ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, DPNI_CP_QUEUE, DPNI_QUEUE_RX, dpaa2_q->tc_index, flow_id, &taildrop); if (ret) { - PMD_INIT_LOG(ERR, "Error in setting the rx flow" - " err : = %d\n", ret); + DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", + ret); return -1; } } @@ -529,9 +538,9 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, tc_id, flow_id, options, &tx_flow_cfg); if (ret) { - PMD_INIT_LOG(ERR, "Error in setting the tx flow: " - "tc_id=%d, flow =%d ErrorCode = %x\n", - tc_id, flow_id, -ret); + DPAA2_PMD_ERR("Error in setting the tx flow: " + "tc_id=%d, flow=%d err=%d", + tc_id, flow_id, ret); return -1; } @@ -543,8 +552,8 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, priv->token, DPNI_CONF_DISABLE); if (ret) { - PMD_INIT_LOG(ERR, "Error in set tx conf mode settings" - " ErrorCode = %x", ret); + DPAA2_PMD_ERR("Error in set tx conf mode settings: " + "err=%d", ret); return -1; } } @@ -560,7 +569,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, */ cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; cong_notif_cfg.message_ctx = 0; - cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn; + cong_notif_cfg.message_iova = (size_t)dpaa2_q->cscn; cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; cong_notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | @@ -573,9 +582,9 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, tc_id, &cong_notif_cfg); if (ret) { - PMD_INIT_LOG(ERR, - "Error in setting tx congestion notification: = %d", - -ret); + DPAA2_PMD_ERR( + "Error in setting tx congestion notification: " + "err=%d", ret); return -ret; } } @@ -610,7 +619,7 @@ dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (unlikely(!DPAA2_PER_LCORE_DPIO)) { ret = dpaa2_affine_qbman_swp(); if (ret) { - RTE_LOG(ERR, PMD, "Failure in affining portal\n"); + DPAA2_PMD_ERR("Failure in affining portal"); return -EINVAL; } } @@ -620,8 +629,8 @@ dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { frame_cnt = qbman_fq_state_frame_count(&state); - RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n", - rx_queue_id, frame_cnt); + DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u", + rx_queue_id, frame_cnt); } return frame_cnt; } @@ -670,14 +679,14 @@ dpaa2_interrupt_handler(void *param) PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL"); + DPAA2_PMD_ERR("dpni is NULL"); return; } ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, irq_index, &status); if (unlikely(ret)) { - RTE_LOG(ERR, PMD, "Can't get irq status (err %d)", ret); + DPAA2_PMD_ERR("Can't get irq status (err %d)", ret); clear = 0xffffffff; goto out; } @@ -693,7 +702,7 @@ out: ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, irq_index, clear); if (unlikely(ret)) - RTE_LOG(ERR, PMD, "Can't clear irq status (err %d)", ret); + DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret); } static int @@ -710,16 +719,16 @@ dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, irq_index, mask); if (err < 0) { - PMD_INIT_LOG(ERR, "Error: dpni_set_irq_mask():%d (%s)", err, - strerror(-err)); + DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err, + strerror(-err)); return err; } err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, irq_index, enable); if (err < 0) - PMD_INIT_LOG(ERR, "Error: dpni_set_irq_enable():%d (%s)", err, - strerror(-err)); + DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err, + strerror(-err)); return err; } @@ -747,8 +756,8 @@ dpaa2_dev_start(struct rte_eth_dev *dev) ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); if (ret) { - PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n", - ret, priv->hw_id); + DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d", + priv->hw_id, ret); return ret; } @@ -758,7 +767,7 @@ dpaa2_dev_start(struct rte_eth_dev *dev) ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, &qdid); if (ret) { - PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret); + DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret); return ret; } priv->qdid = qdid; @@ -769,8 +778,8 @@ dpaa2_dev_start(struct rte_eth_dev *dev) DPNI_QUEUE_RX, dpaa2_q->tc_index, dpaa2_q->flow_id, &cfg, &qid); if (ret) { - PMD_INIT_LOG(ERR, "Error to get flow " - "information Error code = %d\n", ret); + DPAA2_PMD_ERR("Error in getting flow information: " + "err=%d", ret); return ret; } dpaa2_q->fqid = qid.fqid; @@ -785,8 +794,8 @@ dpaa2_dev_start(struct rte_eth_dev *dev) ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, priv->token, &err_cfg); if (ret) { - PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:" - "code = %d\n", ret); + DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d", + ret); return ret; } @@ -845,14 +854,14 @@ dpaa2_dev_stop(struct rte_eth_dev *dev) ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); if (ret) { - PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n", - ret, priv->hw_id); + DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev", + ret, priv->hw_id); return; } /* clear the recorded link status */ memset(&link, 0, sizeof(link)); - dpaa2_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); } static void @@ -878,13 +887,12 @@ dpaa2_dev_close(struct rte_eth_dev *dev) /* Clean the device first */ ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); if (ret) { - PMD_INIT_LOG(ERR, "Failure cleaning dpni device with" - " error code %d\n", ret); + DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret); return; } memset(&link, 0, sizeof(link)); - dpaa2_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); } static void @@ -898,17 +906,17 @@ dpaa2_dev_promiscuous_enable( PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return; } ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); if (ret < 0) - RTE_LOG(ERR, PMD, "Unable to enable U promisc mode %d\n", ret); + DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret); ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); if (ret < 0) - RTE_LOG(ERR, PMD, "Unable to enable M promisc mode %d\n", ret); + DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret); } static void @@ -922,21 +930,20 @@ dpaa2_dev_promiscuous_disable( PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return; } ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); if (ret < 0) - RTE_LOG(ERR, PMD, "Unable to disable U promisc mode %d\n", ret); + DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret); if (dev->data->all_multicast == 0) { ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); if (ret < 0) - RTE_LOG(ERR, PMD, - "Unable to disable M promisc mode %d\n", - ret); + DPAA2_PMD_ERR("Unable to disable M promisc mode %d", + ret); } } @@ -951,13 +958,13 @@ dpaa2_dev_allmulticast_enable( PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return; } ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); if (ret < 0) - RTE_LOG(ERR, PMD, "Unable to enable multicast mode %d\n", ret); + DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret); } static void @@ -970,7 +977,7 @@ dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return; } @@ -980,7 +987,7 @@ dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); if (ret < 0) - RTE_LOG(ERR, PMD, "Unable to disable multicast mode %d\n", ret); + DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret); } static int @@ -995,7 +1002,7 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return -EINVAL; } @@ -1004,9 +1011,11 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; if (frame_size > ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.jumbo_frame = 1; + dev->data->dev_conf.rxmode.offloads &= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; @@ -1016,10 +1025,10 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, frame_size); if (ret) { - PMD_DRV_LOG(ERR, "setting the max frame length failed"); + DPAA2_PMD_ERR("Setting the max frame length failed"); return -1; } - PMD_DRV_LOG(INFO, "MTU is configured %d for the device", mtu); + DPAA2_PMD_INFO("MTU configured for the device: %d", mtu); return 0; } @@ -1036,15 +1045,15 @@ dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return -1; } ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token, addr->addr_bytes); if (ret) - RTE_LOG(ERR, PMD, - "error: Adding the MAC ADDR failed: err = %d\n", ret); + DPAA2_PMD_ERR( + "error: Adding the MAC ADDR failed: err = %d", ret); return 0; } @@ -1063,18 +1072,18 @@ dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, macaddr = &data->mac_addrs[index]; if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return; } ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, priv->token, macaddr->addr_bytes); if (ret) - RTE_LOG(ERR, PMD, - "error: Removing the MAC ADDR failed: err = %d\n", ret); + DPAA2_PMD_ERR( + "error: Removing the MAC ADDR failed: err = %d", ret); } -static void +static int dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) { @@ -1085,17 +1094,20 @@ dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); - return; + DPAA2_PMD_ERR("dpni is NULL"); + return -EINVAL; } ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, priv->token, addr->addr_bytes); if (ret) - RTE_LOG(ERR, PMD, - "error: Setting the MAC ADDR failed %d\n", ret); + DPAA2_PMD_ERR( + "error: Setting the MAC ADDR failed %d", ret); + + return ret; } + static int dpaa2_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) @@ -1111,12 +1123,12 @@ int dpaa2_dev_stats_get(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); if (!dpni) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return -EINVAL; } if (!stats) { - RTE_LOG(ERR, PMD, "stats is NULL\n"); + DPAA2_PMD_ERR("stats is NULL"); return -EINVAL; } @@ -1155,7 +1167,7 @@ int dpaa2_dev_stats_get(struct rte_eth_dev *dev, return 0; err: - RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); + DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); return retcode; }; @@ -1169,12 +1181,12 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, union dpni_statistics value[3] = {}; unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); - if (xstats == NULL) - return 0; - if (n < num) return num; + if (xstats == NULL) + return 0; + /* Get Counters from page_0*/ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 0, 0, &value[0]); @@ -1200,17 +1212,20 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, } return i; err: - RTE_LOG(ERR, PMD, "Error in obtaining extended stats (%d)\n", retcode); + DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode); return retcode; } static int dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, - __rte_unused unsigned int limit) + unsigned int limit) { unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); + if (limit < stat_cnt) + return stat_cnt; + if (xstats_names != NULL) for (i = 0; i < stat_cnt; i++) snprintf(xstats_names[i].name, @@ -1269,7 +1284,7 @@ dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, for (i = 0; i < n; i++) { if (ids[i] >= stat_cnt) { - PMD_INIT_LOG(ERR, "id value isn't valid"); + DPAA2_PMD_ERR("xstats id value isn't valid"); return -1; } values[i] = values_copy[ids[i]]; @@ -1294,7 +1309,7 @@ dpaa2_xstats_get_names_by_id( for (i = 0; i < limit; i++) { if (ids[i] >= stat_cnt) { - PMD_INIT_LOG(ERR, "id value isn't valid"); + DPAA2_PMD_ERR("xstats id value isn't valid"); return -1; } strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); @@ -1312,7 +1327,7 @@ dpaa2_dev_stats_reset(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return; } @@ -1323,7 +1338,7 @@ dpaa2_dev_stats_reset(struct rte_eth_dev *dev) return; error: - RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); + DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); return; }; @@ -1335,24 +1350,17 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev, int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; - struct rte_eth_link link, old; + struct rte_eth_link link; struct dpni_link_state state = {0}; if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return 0; } - memset(&old, 0, sizeof(old)); - dpaa2_dev_atomic_read_link_status(dev, &old); ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); if (ret < 0) { - RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); - return -1; - } - - if ((old.link_status == state.up) && (old.link_speed == state.rate)) { - RTE_LOG(DEBUG, PMD, "No change in status\n"); + DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); return -1; } @@ -1365,13 +1373,14 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev, else link.link_duplex = ETH_LINK_FULL_DUPLEX; - dpaa2_dev_atomic_write_link_status(dev, &link); - - if (link.link_status) - PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id); + ret = rte_eth_linkstatus_set(dev, &link); + if (ret == -1) + DPAA2_PMD_DEBUG("No change in status"); else - PMD_DRV_LOG(INFO, "Port %d Link is Down", dev->data->port_id); - return 0; + DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id, + link.link_status ? "Up" : "Down"); + + return ret; } /** @@ -1391,7 +1400,7 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev) dpni = (struct fsl_mc_io *)priv->hw; if (dpni == NULL) { - RTE_LOG(ERR, PMD, "DPNI is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return ret; } @@ -1399,7 +1408,7 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev) ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); if (ret) { /* Unable to obtain dpni status; Not continuing */ - PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret); + DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); return -EINVAL; } @@ -1407,13 +1416,13 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev) if (!en) { ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); if (ret) { - PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret); + DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); return -EINVAL; } } ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); if (ret < 0) { - RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); + DPAA2_PMD_ERR("Unable to get link state (%d)", ret); return -1; } @@ -1422,10 +1431,9 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev) dev->data->dev_link.link_status = state.up; if (state.up) - PMD_DRV_LOG(INFO, "Port %d Link is set as UP", - dev->data->port_id); + DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id); else - PMD_DRV_LOG(INFO, "Port %d Link is DOWN", dev->data->port_id); + DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id); return ret; } @@ -1448,7 +1456,7 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev) dpni = (struct fsl_mc_io *)priv->hw; if (dpni == NULL) { - RTE_LOG(ERR, PMD, "Device has not yet been configured\n"); + DPAA2_PMD_ERR("Device has not yet been configured"); return ret; } @@ -1461,12 +1469,12 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev) do { ret = dpni_disable(dpni, 0, priv->token); if (ret) { - PMD_DRV_LOG(ERR, "dpni disable failed (%d)", ret); + DPAA2_PMD_ERR("dpni disable failed (%d)", ret); return ret; } ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); if (ret) { - PMD_DRV_LOG(ERR, "dpni_is_enabled failed (%d)", ret); + DPAA2_PMD_ERR("dpni enable check failed (%d)", ret); return ret; } if (dpni_enabled) @@ -1475,12 +1483,12 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev) } while (dpni_enabled && --retries); if (!retries) { - PMD_DRV_LOG(WARNING, "Retry count exceeded disabling DPNI\n"); + DPAA2_PMD_WARN("Retry count exceeded disabling dpni"); /* todo- we may have to manually cleanup queues. */ } else { - PMD_DRV_LOG(INFO, "Port %d Link DOWN successful", - dev->data->port_id); + DPAA2_PMD_INFO("Port %d Link DOWN successful", + dev->data->port_id); } dev->data->dev_link.link_status = 0; @@ -1502,13 +1510,13 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) dpni = (struct fsl_mc_io *)priv->hw; if (dpni == NULL || fc_conf == NULL) { - RTE_LOG(ERR, PMD, "device not configured\n"); + DPAA2_PMD_ERR("device not configured"); return ret; } ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); if (ret) { - RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); + DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); return ret; } @@ -1558,7 +1566,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) dpni = (struct fsl_mc_io *)priv->hw; if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return ret; } @@ -1568,7 +1576,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) */ ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); if (ret) { - RTE_LOG(ERR, PMD, "Unable to get link state (err=%d)\n", ret); + DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret); return -1; } @@ -1613,16 +1621,15 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; break; default: - RTE_LOG(ERR, PMD, "Incorrect Flow control flag (%d)\n", - fc_conf->mode); + DPAA2_PMD_ERR("Incorrect Flow control flag (%d)", + fc_conf->mode); return -1; } ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); if (ret) - RTE_LOG(ERR, PMD, - "Unable to set Link configuration (err=%d)\n", - ret); + DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)", + ret); /* Enable link */ dpaa2_dev_set_link_up(dev); @@ -1643,13 +1650,13 @@ dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, if (rss_conf->rss_hf) { ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); if (ret) { - PMD_INIT_LOG(ERR, "unable to set flow dist"); + DPAA2_PMD_ERR("Unable to set flow dist"); return ret; } } else { ret = dpaa2_remove_flow_dist(dev, 0); if (ret) { - PMD_INIT_LOG(ERR, "unable to remove flow dist"); + DPAA2_PMD_ERR("Unable to remove flow dist"); return ret; } } @@ -1702,12 +1709,12 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, } options |= DPNI_QUEUE_OPT_USER_CTX; - cfg.user_context = (uint64_t)(dpaa2_ethq); + cfg.user_context = (size_t)(dpaa2_ethq); ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, dpaa2_ethq->tc_index, flow_id, options, &cfg); if (ret) { - RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret); + DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); return ret; } @@ -1734,7 +1741,7 @@ int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, dpaa2_ethq->tc_index, flow_id, options, &cfg); if (ret) - RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret); + DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); return ret; } @@ -1801,15 +1808,15 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); if (!dpni_dev) { - PMD_INIT_LOG(ERR, "malloc failed for dpni device\n"); + DPAA2_PMD_ERR("Memory allocation failed for dpni device"); return -1; } dpni_dev->regs = rte_mcp_ptr_list[0]; ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); if (ret) { - PMD_INIT_LOG(ERR, - "Failure in opening dpni@%d with err code %d\n", + DPAA2_PMD_ERR( + "Failure in opening dpni@%d with err code %d", hw_id, ret); rte_free(dpni_dev); return -1; @@ -1818,16 +1825,15 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) /* Clean the device first */ ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); if (ret) { - PMD_INIT_LOG(ERR, - "Failure cleaning dpni@%d with err code %d\n", - hw_id, ret); + DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d", + hw_id, ret); goto init_err; } ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); if (ret) { - PMD_INIT_LOG(ERR, - "Failure in get dpni@%d attribute, err code %d\n", + DPAA2_PMD_ERR( + "Failure in get dpni@%d attribute, err code %d", hw_id, ret); goto init_err; } @@ -1843,8 +1849,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) /* Using number of TX queues as number of TX TCs */ priv->nb_tx_queues = attr.num_tx_tcs; - PMD_DRV_LOG(DEBUG, "RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", - priv->num_rx_tc, priv->nb_rx_queues, priv->nb_tx_queues); + DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", + priv->num_rx_tc, priv->nb_rx_queues, + priv->nb_tx_queues); priv->hw = dpni_dev; priv->hw_id = hw_id; @@ -1856,7 +1863,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) /* Allocate memory for hardware structure for queues */ ret = dpaa2_alloc_rx_tx_queues(eth_dev); if (ret) { - PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n"); + DPAA2_PMD_ERR("Queue allocation Failed"); goto init_err; } @@ -1864,9 +1871,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) eth_dev->data->mac_addrs = rte_zmalloc("dpni", ETHER_ADDR_LEN * attr.mac_filter_entries, 0); if (eth_dev->data->mac_addrs == NULL) { - PMD_INIT_LOG(ERR, + DPAA2_PMD_ERR( "Failed to allocate %d bytes needed to store MAC addresses", - ETHER_ADDR_LEN * attr.mac_filter_entries); + ETHER_ADDR_LEN * attr.mac_filter_entries); ret = -ENOMEM; goto init_err; } @@ -1875,7 +1882,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) priv->token, (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes)); if (ret) { - PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n", + DPAA2_PMD_ERR("DPNI get mac address failed:Err Code = %d", ret); goto init_err; } @@ -1887,8 +1894,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, &layout); if (ret) { - PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout", - ret); + DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret); goto init_err; } @@ -1899,7 +1905,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX_CONFIRM, &layout); if (ret) { - PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout", + DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout", ret); goto init_err; } @@ -1908,7 +1914,6 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; eth_dev->tx_pkt_burst = dpaa2_dev_tx; - rte_fslmc_vfio_dmamap(); RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); return 0; @@ -1931,7 +1936,7 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) return 0; if (!dpni) { - PMD_INIT_LOG(WARNING, "Already closed or not started"); + DPAA2_PMD_WARN("Already closed or not started"); return -1; } @@ -1958,8 +1963,8 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) /* Close the device at underlying layer*/ ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); if (ret) { - PMD_INIT_LOG(ERR, - "Failure closing dpni device with err code %d\n", + DPAA2_PMD_ERR( + "Failure closing dpni device with err code %d", ret); } @@ -1971,7 +1976,7 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = NULL; eth_dev->tx_pkt_burst = NULL; - RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); + DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name); return 0; } @@ -1991,8 +1996,8 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, sizeof(struct dpaa2_dev_priv), RTE_CACHE_LINE_SIZE); if (eth_dev->data->dev_private == NULL) { - PMD_INIT_LOG(CRIT, "Cannot allocate memzone for" - " private port data\n"); + DPAA2_PMD_CRIT( + "Unable to allocate memory for private data"); rte_eth_dev_release_port(eth_dev); return -ENOMEM; } @@ -2013,8 +2018,10 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, /* Invoke PMD device initialization function */ diag = dpaa2_dev_init(eth_dev); - if (diag == 0) + if (diag == 0) { + rte_eth_dev_probing_finish(eth_dev); return 0; + } if (rte_eal_process_type() == RTE_PROC_PRIMARY) rte_free(eth_dev->data->dev_private); @@ -2045,3 +2052,10 @@ static struct rte_dpaa2_driver rte_dpaa2_pmd = { }; RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); + +RTE_INIT(dpaa2_pmd_init_log) +{ + dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2"); + if (dpaa2_logtype_pmd >= 0) + rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE); +} diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h index ba0856f3..bd69f523 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.h +++ b/drivers/net/dpaa2/dpaa2_ethdev.h @@ -50,6 +50,12 @@ /* Disable RX tail drop, default is enable */ #define DPAA2_RX_TAILDROP_OFF 0x04 +#define DPAA2_RSS_OFFLOAD_ALL ( \ + ETH_RSS_IP | \ + ETH_RSS_UDP | \ + ETH_RSS_TCP | \ + ETH_RSS_SCTP) + /* LX2 FRC Parsed values (Little Endian) */ #define DPAA2_PKT_TYPE_ETHER 0x0060 #define DPAA2_PKT_TYPE_IPV4 0x0000 diff --git a/drivers/net/dpaa2/dpaa2_pmd_logs.h b/drivers/net/dpaa2/dpaa2_pmd_logs.h new file mode 100644 index 00000000..c04babdb --- /dev/null +++ b/drivers/net/dpaa2/dpaa2_pmd_logs.h @@ -0,0 +1,41 @@ +/*- + * SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 NXP + */ + +#ifndef _DPAA2_PMD_LOGS_H_ +#define _DPAA2_PMD_LOGS_H_ + +extern int dpaa2_logtype_pmd; + +#define DPAA2_PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, dpaa2_logtype_pmd, "dpaa2_net: " \ + fmt "\n", ##args) + +#define DPAA2_PMD_DEBUG(fmt, args...) \ + rte_log(RTE_LOG_DEBUG, dpaa2_logtype_pmd, "dpaa2_net: %s(): "\ + fmt "\n", __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() DPAA2_PMD_DEBUG(">>") + +#define DPAA2_PMD_CRIT(fmt, args...) \ + DPAA2_PMD_LOG(CRIT, fmt, ## args) +#define DPAA2_PMD_INFO(fmt, args...) \ + DPAA2_PMD_LOG(INFO, fmt, ## args) +#define DPAA2_PMD_ERR(fmt, args...) \ + DPAA2_PMD_LOG(ERR, fmt, ## args) +#define DPAA2_PMD_WARN(fmt, args...) \ + DPAA2_PMD_LOG(WARNING, fmt, ## args) + +/* DP Logs, toggled out at compile time if level lower than current level */ +#define DPAA2_PMD_DP_LOG(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, fmt, ## args) + +#define DPAA2_PMD_DP_DEBUG(fmt, args...) \ + DPAA2_PMD_DP_LOG(DEBUG, fmt, ## args) +#define DPAA2_PMD_DP_INFO(fmt, args...) \ + DPAA2_PMD_DP_LOG(INFO, fmt, ## args) +#define DPAA2_PMD_DP_WARN(fmt, args...) \ + DPAA2_PMD_DP_LOG(WARNING, fmt, ## args) + +#endif /* _DPAA2_PMD_LOGS_H_ */ diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c index 183293c1..ef109a62 100644 --- a/drivers/net/dpaa2/dpaa2_rxtx.c +++ b/drivers/net/dpaa2/dpaa2_rxtx.c @@ -16,13 +16,12 @@ #include #include -#include #include #include #include #include -#include +#include "dpaa2_pmd_logs.h" #include "dpaa2_ethdev.h" #include "base/dpaa2_hw_dpni_annot.h" @@ -37,7 +36,7 @@ static inline void __attribute__((hot)) dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc) { - PMD_RX_LOG(DEBUG, "frc = 0x%x ", frc); + DPAA2_PMD_DP_DEBUG("frc = 0x%x\t", frc); m->packet_type = RTE_PTYPE_UNKNOWN; switch (frc) { @@ -104,13 +103,12 @@ dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc) } static inline uint32_t __attribute__((hot)) -dpaa2_dev_rx_parse_slow(uint64_t hw_annot_addr) +dpaa2_dev_rx_parse_slow(struct dpaa2_annot_hdr *annotation) { uint32_t pkt_type = RTE_PTYPE_UNKNOWN; - struct dpaa2_annot_hdr *annotation = - (struct dpaa2_annot_hdr *)hw_annot_addr; - PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4); + DPAA2_PMD_DP_DEBUG("(slow parse) Annotation = 0x%" PRIx64 "\t", + annotation->word4); if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) { pkt_type = RTE_PTYPE_L2_ETHER_ARP; goto parse_done; @@ -167,12 +165,13 @@ parse_done: } static inline uint32_t __attribute__((hot)) -dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, uint64_t hw_annot_addr) +dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr) { struct dpaa2_annot_hdr *annotation = (struct dpaa2_annot_hdr *)hw_annot_addr; - PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4); + DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t", + annotation->word4); /* Check offloads first */ if (BIT_ISSET_AT_POS(annotation->word3, @@ -203,29 +202,27 @@ dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, uint64_t hw_annot_addr) return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; default: - PMD_RX_LOG(DEBUG, "Slow parse the parsing results\n"); break; } - return dpaa2_dev_rx_parse_slow(hw_annot_addr); + return dpaa2_dev_rx_parse_slow(annotation); } static inline struct rte_mbuf *__attribute__((hot)) eth_sg_fd_to_mbuf(const struct qbman_fd *fd) { struct qbman_sge *sgt, *sge; - dma_addr_t sg_addr; + size_t sg_addr, fd_addr; int i = 0; - uint64_t fd_addr; struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp; - fd_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); + fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); /* Get Scatter gather table address */ sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd)); sge = &sgt[i++]; - sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge)); + sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge)); /* First Scatter gather entry */ first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, @@ -243,14 +240,14 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd) DPAA2_GET_FD_FRC_PARSE_SUM(fd)); else first_seg->packet_type = dpaa2_dev_rx_parse(first_seg, - (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) - + DPAA2_FD_PTA_SIZE); + (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + + DPAA2_FD_PTA_SIZE)); rte_mbuf_refcnt_set(first_seg, 1); cur_seg = first_seg; while (!DPAA2_SG_IS_FINAL(sge)) { sge = &sgt[i++]; - sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR( + sg_addr = (size_t)DPAA2_IOVA_TO_VADDR( DPAA2_GET_FLE_ADDR(sge)); next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size); @@ -299,11 +296,11 @@ eth_fd_to_mbuf(const struct qbman_fd *fd) dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd)); else mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, - (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) - + DPAA2_FD_PTA_SIZE); + (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + + DPAA2_FD_PTA_SIZE)); - PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d," - "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n", + DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d," + "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", mbuf, mbuf->buf_addr, mbuf->data_off, DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, @@ -320,15 +317,9 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, struct qbman_sge *sgt, *sge = NULL; int i; - if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) { - int ret = rte_vlan_insert(&mbuf); - if (ret) - return ret; - } - temp = rte_pktmbuf_alloc(mbuf->pool); if (temp == NULL) { - PMD_TX_LOG(ERR, "No memory to allocate S/G table"); + DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n"); return -ENOMEM; } @@ -340,7 +331,7 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg); /*Set Scatter gather table and Scatter gather entries*/ sgt = (struct qbman_sge *)( - (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + DPAA2_GET_FD_OFFSET(fd)); for (i = 0; i < mbuf->nb_segs; i++) { @@ -392,17 +383,10 @@ static void __attribute__ ((noinline)) __attribute__((hot)) eth_mbuf_to_fd(struct rte_mbuf *mbuf, struct qbman_fd *fd, uint16_t bpid) { - if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) { - if (rte_vlan_insert(&mbuf)) { - rte_pktmbuf_free(mbuf); - return; - } - } - DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid); - PMD_TX_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d," - "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n", + DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d," + "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", mbuf, mbuf->buf_addr, mbuf->data_off, DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, @@ -431,15 +415,9 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, struct rte_mbuf *m; void *mb = NULL; - if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) { - int ret = rte_vlan_insert(&mbuf); - if (ret) - return ret; - } - if (rte_dpaa2_mbuf_alloc_bulk( rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) { - PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer"); + DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n"); return -1; } m = (struct rte_mbuf *)mb; @@ -455,19 +433,26 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid); - PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p", - (void *)mbuf, mbuf->buf_addr); - - PMD_TX_LOG(DEBUG, " fdaddr =%lx bpid =%d meta =%d off =%d, len =%d", - DPAA2_GET_FD_ADDR(fd), + DPAA2_PMD_DP_DEBUG( + "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d," + " meta: %d, off: %d, len: %d\n", + (void *)mbuf, + mbuf->buf_addr, + DPAA2_GET_FD_ADDR(fd), DPAA2_GET_FD_BPID(fd), rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd)); - return 0; +return 0; } +/* This function assumes that caller will be keep the same value for nb_pkts + * across calls per queue, if that is not the case, better use non-prefetch + * version of rx call. + * It will return the packets as requested in previous call without honoring + * the current nb_pkts or bufs space. + */ uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { @@ -475,7 +460,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; struct qbman_result *dq_storage, *dq_storage1 = NULL; uint32_t fqid = dpaa2_q->fqid; - int ret, num_rx = 0; + int ret, num_rx = 0, pull_size; uint8_t pending, status; struct qbman_swp *swp; const struct qbman_fd *fd, *next_fd; @@ -483,48 +468,51 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; struct rte_eth_dev *dev = dpaa2_q->dev; - if (unlikely(!DPAA2_PER_LCORE_DPIO)) { - ret = dpaa2_affine_qbman_swp(); + if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { + ret = dpaa2_affine_qbman_ethrx_swp(); if (ret) { - RTE_LOG(ERR, PMD, "Failure in affining portal\n"); + DPAA2_PMD_ERR("Failure in affining portal"); return 0; } } - swp = DPAA2_PER_LCORE_PORTAL; + swp = DPAA2_PER_LCORE_ETHRX_PORTAL; + pull_size = (nb_pkts > DPAA2_DQRR_RING_SIZE) ? + DPAA2_DQRR_RING_SIZE : nb_pkts; if (unlikely(!q_storage->active_dqs)) { q_storage->toggle = 0; dq_storage = q_storage->dq_storage[q_storage->toggle]; - q_storage->last_num_pkts = (nb_pkts > DPAA2_DQRR_RING_SIZE) ? - DPAA2_DQRR_RING_SIZE : nb_pkts; + q_storage->last_num_pkts = pull_size; qbman_pull_desc_clear(&pulldesc); qbman_pull_desc_set_numframes(&pulldesc, q_storage->last_num_pkts); qbman_pull_desc_set_fq(&pulldesc, fqid); qbman_pull_desc_set_storage(&pulldesc, dq_storage, - (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); - if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) { + (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); + if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { while (!qbman_check_command_complete( - get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index))) + get_swp_active_dqs( + DPAA2_PER_LCORE_ETHRX_DPIO->index))) ; - clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index); + clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); } while (1) { if (qbman_swp_pull(swp, &pulldesc)) { - PMD_RX_LOG(WARNING, "VDQ command is not issued." - "QBMAN is busy\n"); + DPAA2_PMD_DP_DEBUG("VDQ command is not issued." + " QBMAN is busy (1)\n"); /* Portal was busy, try again */ continue; } break; } q_storage->active_dqs = dq_storage; - q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index; - set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage); + q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, + dq_storage); } dq_storage = q_storage->active_dqs; - rte_prefetch0((void *)((uint64_t)(dq_storage))); - rte_prefetch0((void *)((uint64_t)(dq_storage + 1))); + rte_prefetch0((void *)(size_t)(dq_storage)); + rte_prefetch0((void *)(size_t)(dq_storage + 1)); /* Prepare next pull descriptor. This will give space for the * prefething done on DQRR entries @@ -532,10 +520,10 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) q_storage->toggle ^= 1; dq_storage1 = q_storage->dq_storage[q_storage->toggle]; qbman_pull_desc_clear(&pulldesc); - qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE); + qbman_pull_desc_set_numframes(&pulldesc, pull_size); qbman_pull_desc_set_fq(&pulldesc, fqid); qbman_pull_desc_set_storage(&pulldesc, dq_storage1, - (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); + (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); /* Check if the previous issued command is completed. * Also seems like the SWP is shared between the Ethernet Driver @@ -554,7 +542,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) */ while (!qbman_check_new_result(dq_storage)) ; - rte_prefetch0((void *)((uint64_t)(dq_storage + 2))); + rte_prefetch0((void *)((size_t)(dq_storage + 2))); /* Check whether Last Pull command is Expired and * setting Condition for Loop termination */ @@ -569,7 +557,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) next_fd = qbman_result_DQ_fd(dq_storage + 1); /* Prefetch Annotation address for the parse results */ - rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(next_fd) + rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(next_fd) + DPAA2_FD_PTA_SIZE + 16)); if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg)) @@ -578,31 +566,31 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) bufs[num_rx] = eth_fd_to_mbuf(fd); bufs[num_rx]->port = dev->data->port_id; - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) rte_vlan_strip(bufs[num_rx]); dq_storage++; num_rx++; } while (pending); - if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) { + if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { while (!qbman_check_command_complete( - get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index))) + get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) ; - clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index); + clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); } /* issue a volatile dequeue command for next pull */ while (1) { if (qbman_swp_pull(swp, &pulldesc)) { - PMD_RX_LOG(WARNING, "VDQ command is not issued." - "QBMAN is busy\n"); + DPAA2_PMD_DP_DEBUG("VDQ command is not issued." + "QBMAN is busy (2)\n"); continue; } break; } q_storage->active_dqs = dq_storage1; - q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index; - set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1); + q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); dpaa2_q->rx_pkts += num_rx; @@ -616,7 +604,7 @@ dpaa2_dev_process_parallel_event(struct qbman_swp *swp, struct dpaa2_queue *rxq, struct rte_event *ev) { - rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(fd) + + rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + DPAA2_FD_PTA_SIZE + 16)); ev->flow_id = rxq->ev.flow_id; @@ -641,7 +629,7 @@ dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)), { uint8_t dqrr_index; - rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(fd) + + rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + DPAA2_FD_PTA_SIZE + 16)); ev->flow_id = rxq->ev.flow_id; @@ -686,13 +674,13 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) if (unlikely(!DPAA2_PER_LCORE_DPIO)) { ret = dpaa2_affine_qbman_swp(); if (ret) { - RTE_LOG(ERR, PMD, "Failure in affining portal\n"); + DPAA2_PMD_ERR("Failure in affining portal"); return 0; } } swp = DPAA2_PER_LCORE_PORTAL; - PMD_TX_LOG(DEBUG, "===> dev =%p, fqid =%d", dev, dpaa2_q->fqid); + DPAA2_PMD_DP_DEBUG("===> dev =%p, fqid =%d\n", dev, dpaa2_q->fqid); /*Prepare enqueue descriptor*/ qbman_eq_desc_clear(&eqdesc); @@ -726,7 +714,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) fd_arr[loop].simple.frc = 0; DPAA2_RESET_FD_CTRL((&fd_arr[loop])); - DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL); + DPAA2_SET_FD_FLC((&fd_arr[loop]), (size_t)NULL); if (likely(RTE_MBUF_DIRECT(*bufs))) { mp = (*bufs)->pool; /* Check the basic scenario and set @@ -736,8 +724,10 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) priv->bp_list->dpaa2_ops_index && (*bufs)->nb_segs == 1 && rte_mbuf_refcnt_read((*bufs)) == 1)) { - if (unlikely((*bufs)->ol_flags - & PKT_TX_VLAN_PKT)) { + if (unlikely(((*bufs)->ol_flags + & PKT_TX_VLAN_PKT) || + (dev->data->dev_conf.txmode.offloads + & DEV_TX_OFFLOAD_VLAN_INSERT))) { ret = rte_vlan_insert(bufs); if (ret) goto send_n_return; @@ -753,19 +743,26 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) } /* Not a hw_pkt pool allocated frame */ if (unlikely(!mp || !priv->bp_list)) { - PMD_TX_LOG(ERR, "err: no bpool attached"); + DPAA2_PMD_ERR("Err: No buffer pool attached"); goto send_n_return; } + if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) || + (dev->data->dev_conf.txmode.offloads + & DEV_TX_OFFLOAD_VLAN_INSERT))) { + int ret = rte_vlan_insert(bufs); + if (ret) + goto send_n_return; + } if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { - PMD_TX_LOG(ERR, "non hw offload bufffer "); + DPAA2_PMD_WARN("Non DPAA2 buffer pool"); /* alloc should be from the default buffer pool * attached to this interface */ bpid = priv->bp_list->buf_pool.bpid; if (unlikely((*bufs)->nb_segs > 1)) { - PMD_TX_LOG(ERR, "S/G support not added" + DPAA2_PMD_ERR("S/G support not added" " for non hw offload buffer"); goto send_n_return; } diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c index 69cf119c..9f228169 100644 --- a/drivers/net/dpaa2/mc/dpni.c +++ b/drivers/net/dpaa2/mc/dpni.c @@ -198,7 +198,7 @@ int dpni_set_pools(struct fsl_mc_io *mc_io, token); cmd_params = (struct dpni_cmd_set_pools *)cmd.params; cmd_params->num_dpbp = cfg->num_dpbp; - for (i = 0; i < DPNI_MAX_DPBP; i++) { + for (i = 0; i < cmd_params->num_dpbp; i++) { cmd_params->pool[i].dpbp_id = cpu_to_le16(cfg->pools[i].dpbp_id); cmd_params->pool[i].priority_mask = diff --git a/drivers/net/dpaa2/meson.build b/drivers/net/dpaa2/meson.build new file mode 100644 index 00000000..213f0d72 --- /dev/null +++ b/drivers/net/dpaa2/meson.build @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if host_machine.system() != 'linux' + build = false +endif + +deps += ['mempool_dpaa2'] +sources = files('base/dpaa2_hw_dpni.c', + 'dpaa2_ethdev.c', + 'dpaa2_rxtx.c', + 'mc/dpkg.c', + 'mc/dpni.c') + +includes += include_directories('base', 'mc') + +# depends on fslmc bus which uses experimental API +allow_experimental_apis = true diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile index ba81a1f4..9c87e883 100644 --- a/drivers/net/e1000/Makefile +++ b/drivers/net/e1000/Makefile @@ -22,7 +22,8 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # -CFLAGS_BASE_DRIVER = -wd177 -wd181 -wd188 -wd869 -wd2259 +CFLAGS_BASE_DRIVER = -diag-disable 177 -diag-disable 181 +CFLAGS_BASE_DRIVER += -diag-disable 869 -diag-disable 2259 else # # CFLAGS for gcc/clang @@ -61,6 +62,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82575.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_i210.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_api.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_ich8lan.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_logs.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mac.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_manage.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mbx.c diff --git a/drivers/net/e1000/base/e1000_82575.c b/drivers/net/e1000/base/e1000_82575.c index 15c7dd84..da1a9a70 100644 --- a/drivers/net/e1000/base/e1000_82575.c +++ b/drivers/net/e1000/base/e1000_82575.c @@ -312,6 +312,9 @@ STATIC s32 e1000_init_phy_params_82575(struct e1000_hw *hw) phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; break; + case BCM54616_E_PHY_ID: + phy->type = e1000_phy_none; + break; default: ret_val = -E1000_ERR_PHY; goto out; @@ -1607,6 +1610,8 @@ STATIC s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) case e1000_phy_82580: ret_val = e1000_copper_link_setup_82577(hw); break; + case e1000_phy_none: + break; default: ret_val = -E1000_ERR_PHY; break; diff --git a/drivers/net/e1000/base/e1000_defines.h b/drivers/net/e1000/base/e1000_defines.h index dbc2bbbe..e2101c17 100644 --- a/drivers/net/e1000/base/e1000_defines.h +++ b/drivers/net/e1000/base/e1000_defines.h @@ -1274,6 +1274,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I350_I_PHY_ID 0x015403B0 #define I210_I_PHY_ID 0x01410C00 #define IGP04E1000_E_PHY_ID 0x02A80391 +#define BCM54616_E_PHY_ID 0x03625D10 #define M88_VENDOR 0x0141 /* M88E1000 Specific Registers */ diff --git a/drivers/net/e1000/base/e1000_phy.h b/drivers/net/e1000/base/e1000_phy.h index 3e45a9ef..2cd0e14b 100644 --- a/drivers/net/e1000/base/e1000_phy.h +++ b/drivers/net/e1000/base/e1000_phy.h @@ -330,4 +330,12 @@ struct sfp_e1000_flags { #define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00 #define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100 +/* EEPROM byte offsets */ +#define IGB_SFF_8472_SWAP 0x5C +#define IGB_SFF_8472_COMP 0x5E + +/* Bitmasks */ +#define IGB_SFF_ADDRESSING_MODE 0x4 +#define IGB_SFF_8472_UNSUP 0x00 + #endif diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h index 23b089c8..902001f3 100644 --- a/drivers/net/e1000/e1000_ethdev.h +++ b/drivers/net/e1000/e1000_ethdev.h @@ -4,6 +4,10 @@ #ifndef _E1000_ETHDEV_H_ #define _E1000_ETHDEV_H_ + +#include + +#include #include #include @@ -27,6 +31,7 @@ #define E1000_CTRL_EXT_EXTEND_VLAN (1<<26) /* EXTENDED VLAN */ #define IGB_VFTA_SIZE 128 +#define IGB_HKEY_MAX_INDEX 10 #define IGB_MAX_RX_QUEUE_NUM 8 #define IGB_MAX_RX_QUEUE_NUM_82576 16 @@ -229,8 +234,8 @@ struct igb_ethertype_filter { }; struct igb_rte_flow_rss_conf { - struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */ - uint16_t num; /**< Number of entries in queue[]. */ + struct rte_flow_action_rss conf; /**< RSS parameters. */ + uint8_t key[IGB_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */ uint16_t queue[IGB_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */ }; @@ -357,6 +362,9 @@ void eth_igb_rx_queue_release(void *rxq); void igb_dev_clear_queues(struct rte_eth_dev *dev); void igb_dev_free_queues(struct rte_eth_dev *dev); +uint64_t igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev); +uint64_t igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev); + int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, @@ -370,6 +378,9 @@ int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset); int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset); int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset); +uint64_t igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev); +uint64_t igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev); + int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); @@ -417,6 +428,8 @@ void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_txq_info *qinfo); +uint32_t em_get_max_pktlen(struct rte_eth_dev *dev); + /* * RX/TX EM function prototypes */ @@ -426,6 +439,9 @@ void eth_em_rx_queue_release(void *rxq); void em_dev_clear_queues(struct rte_eth_dev *dev); void em_dev_free_queues(struct rte_eth_dev *dev); +uint64_t em_get_rx_port_offloads_capa(struct rte_eth_dev *dev); +uint64_t em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev); + int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, @@ -439,6 +455,9 @@ int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset); int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset); int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset); +uint64_t em_get_tx_port_offloads_capa(struct rte_eth_dev *dev); +uint64_t em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev); + int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); @@ -487,6 +506,10 @@ int eth_igb_syn_filter_set(struct rte_eth_dev *dev, int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, struct rte_eth_flex_filter *filter, bool add); +int igb_rss_conf_init(struct igb_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in); +int igb_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with); int igb_config_rss_filter(struct rte_eth_dev *dev, struct igb_rte_flow_rss_conf *conf, bool add); diff --git a/drivers/net/e1000/e1000_logs.c b/drivers/net/e1000/e1000_logs.c new file mode 100644 index 00000000..22173939 --- /dev/null +++ b/drivers/net/e1000/e1000_logs.c @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include "e1000_logs.h" + +/* declared as extern in e1000_logs.h */ +int e1000_logtype_init; +int e1000_logtype_driver; + +/* avoids double registering of logs if EM and IGB drivers are in use */ +static int e1000_log_initialized; + +void +e1000_igb_init_log(void) +{ + if (!e1000_log_initialized) { + e1000_logtype_init = rte_log_register("pmd.net.e1000.init"); + if (e1000_logtype_init >= 0) + rte_log_set_level(e1000_logtype_init, RTE_LOG_NOTICE); + e1000_logtype_driver = rte_log_register("pmd.net.e1000.driver"); + if (e1000_logtype_driver >= 0) + rte_log_set_level(e1000_logtype_driver, RTE_LOG_NOTICE); + e1000_log_initialized = 1; + } +} diff --git a/drivers/net/e1000/e1000_logs.h b/drivers/net/e1000/e1000_logs.h index 50348e9e..69d3d311 100644 --- a/drivers/net/e1000/e1000_logs.h +++ b/drivers/net/e1000/e1000_logs.h @@ -5,6 +5,8 @@ #ifndef _E1000_LOGS_H_ #define _E1000_LOGS_H_ +#include + extern int e1000_logtype_init; #define PMD_INIT_LOG(level, fmt, args...) \ rte_log(RTE_LOG_ ## level, e1000_logtype_init, \ @@ -41,4 +43,8 @@ extern int e1000_logtype_driver; #define PMD_DRV_LOG(level, fmt, args...) \ PMD_DRV_LOG_RAW(level, fmt "\n", ## args) + +/* log init function shared by e1000 and igb drivers */ +void e1000_igb_init_log(void); + #endif /* _E1000_LOGS_H_ */ diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c index 242375ff..053e855b 100644 --- a/drivers/net/e1000/em_ethdev.c +++ b/drivers/net/e1000/em_ethdev.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -20,7 +19,6 @@ #include #include #include -#include #include #include @@ -94,6 +92,8 @@ static int em_get_rx_buffer_size(struct e1000_hw *hw); static int eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool); static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index); +static int eth_em_default_mac_addr_set(struct rte_eth_dev *dev, + struct ether_addr *addr); static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, @@ -105,9 +105,6 @@ static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, static enum e1000_fc_mode em_fc_setting = e1000_fc_full; -int e1000_logtype_init; -int e1000_logtype_driver; - /* * The set of PCI devices this driver supports */ @@ -190,6 +187,7 @@ static const struct eth_dev_ops eth_em_ops = { .dev_led_off = eth_em_led_off, .flow_ctrl_get = eth_em_flow_ctrl_get, .flow_ctrl_set = eth_em_flow_ctrl_set, + .mac_addr_set = eth_em_default_mac_addr_set, .mac_addr_add = eth_em_rar_set, .mac_addr_remove = eth_em_rar_clear, .set_mc_addr_list = eth_em_set_mc_addr_list, @@ -197,57 +195,6 @@ static const struct eth_dev_ops eth_em_ops = { .txq_info_get = em_txq_info_get, }; -/** - * Atomically reads the link status information from global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_em_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_em_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} /** * eth_em_dev_is_ich8 - Check for ICH8 device @@ -506,6 +453,7 @@ eth_em_configure(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; + PMD_INIT_FUNC_TRACE(); return 0; @@ -802,7 +750,7 @@ eth_em_stop(struct rte_eth_dev *dev) /* clear the recorded link status */ memset(&link, 0, sizeof(link)); - rte_em_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); if (!rte_intr_allow_others(intr_handle)) /* resume to the default handler */ @@ -1069,9 +1017,11 @@ eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queu return 0; } -static uint32_t -em_get_max_pktlen(const struct e1000_hw *hw) +uint32_t +em_get_max_pktlen(struct rte_eth_dev *dev) { + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + switch (hw->mac.type) { case e1000_82571: case e1000_82572: @@ -1100,20 +1050,9 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ - dev_info->max_rx_pktlen = em_get_max_pktlen(hw); + dev_info->max_rx_pktlen = em_get_max_pktlen(dev); dev_info->max_mac_addrs = hw->mac.rar_entry_count; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM; /* * Starting with 631xESB hw supports 2 TX/RX queues per port. @@ -1135,6 +1074,13 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = 1; dev_info->max_tx_queues = 1; + dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { .nb_max = E1000_MAX_RING_DESC, .nb_min = E1000_MIN_RING_DESC, @@ -1152,6 +1098,12 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; + + /* Preferred queue parameters */ + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_txportconf.ring_size = 256; + dev_info->default_rxportconf.ring_size = 256; } /* return 0 means link status changed, -1 means not changed */ @@ -1160,7 +1112,7 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_eth_link link, old; + struct rte_eth_link link; int link_check, count; link_check = 0; @@ -1195,8 +1147,6 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL); } memset(&link, 0, sizeof(link)); - rte_em_dev_atomic_read_link_status(dev, &link); - old = link; /* Now we check if a transition has happened */ if (link_check && (link.link_status == ETH_LINK_DOWN)) { @@ -1210,19 +1160,13 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); } else if (!link_check && (link.link_status == ETH_LINK_UP)) { - link.link_speed = 0; + link.link_speed = ETH_SPEED_NUM_NONE; link.link_duplex = ETH_LINK_HALF_DUPLEX; link.link_status = ETH_LINK_DOWN; link.link_autoneg = ETH_LINK_FIXED; } - rte_em_dev_atomic_write_link_status(dev, &link); - - /* not changed */ - if (old.link_status == link.link_status) - return -1; - /* changed */ - return 0; + return rte_eth_linkstatus_set(dev, &link); } /* @@ -1460,15 +1404,18 @@ em_vlan_hw_strip_enable(struct rte_eth_dev *dev) static int eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask) { + struct rte_eth_rxmode *rxmode; + + rxmode = &dev->data->dev_conf.rxmode; if(mask & ETH_VLAN_STRIP_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) em_vlan_hw_strip_enable(dev); else em_vlan_hw_strip_disable(dev); } if(mask & ETH_VLAN_FILTER_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) em_vlan_hw_filter_enable(dev); else em_vlan_hw_filter_disable(dev); @@ -1631,8 +1578,8 @@ eth_em_interrupt_action(struct rte_eth_dev *dev, if (ret < 0) return 0; - memset(&link, 0, sizeof(link)); - rte_em_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); + if (link.link_status) { PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s", dev->data->port_id, link.link_speed, @@ -1809,6 +1756,15 @@ eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index) e1000_rar_set(hw, addr, index); } +static int +eth_em_default_mac_addr_set(struct rte_eth_dev *dev, + struct ether_addr *addr) +{ + eth_em_rar_clear(dev, 0); + + return eth_em_rar_set(dev, (void *)addr, 0, 0); +} + static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { @@ -1835,10 +1791,12 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) /* switch to jumbo mode if needed */ if (frame_size > ETHER_MAX_LEN) { - dev->data->dev_conf.rxmode.jumbo_frame = 1; + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; rctl |= E1000_RCTL_LPE; } else { - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; rctl &= ~E1000_RCTL_LPE; } E1000_WRITE_REG(hw, E1000_RCTL, rctl); @@ -1864,14 +1822,8 @@ RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map); RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci"); -RTE_INIT(e1000_init_log); -static void -e1000_init_log(void) +/* see e1000_logs.c */ +RTE_INIT(igb_init_log) { - e1000_logtype_init = rte_log_register("pmd.net.e1000.init"); - if (e1000_logtype_init >= 0) - rte_log_set_level(e1000_logtype_init, RTE_LOG_NOTICE); - e1000_logtype_driver = rte_log_register("pmd.net.e1000.driver"); - if (e1000_logtype_driver >= 0) - rte_log_set_level(e1000_logtype_driver, RTE_LOG_NOTICE); + e1000_igb_init_log(); } diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c index 02fae100..7d2ac4eb 100644 --- a/drivers/net/e1000/em_rxtx.c +++ b/drivers/net/e1000/em_rxtx.c @@ -85,6 +85,7 @@ struct em_rx_queue { struct em_rx_entry *sw_ring; /**< address of RX software ring. */ struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + uint64_t offloads; /**< Offloads of DEV_RX_OFFLOAD_* */ uint16_t nb_rx_desc; /**< number of RX descriptors. */ uint16_t rx_tail; /**< current value of RDT register. */ uint16_t nb_rx_hold; /**< number of held free RX desc. */ @@ -163,6 +164,7 @@ struct em_tx_queue { uint8_t wthresh; /**< Write-back threshold register. */ struct em_ctx_info ctx_cache; /**< Hardware context history.*/ + uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */ }; #if 1 @@ -1151,6 +1153,36 @@ em_reset_tx_queue(struct em_tx_queue *txq) memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache)); } +uint64_t +em_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_offload_capa; + + RTE_SET_USED(dev); + tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + + return tx_offload_capa; +} + +uint64_t +em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_queue_offload_capa; + + /* + * As only one Tx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + tx_queue_offload_capa = em_get_tx_port_offloads_capa(dev); + + return tx_queue_offload_capa; +} + int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1163,9 +1195,12 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, struct e1000_hw *hw; uint32_t tsize; uint16_t tx_rs_thresh, tx_free_thresh; + uint64_t offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + /* * Validate number of transmit descriptors. * It must not exceed hardware maximum, and must be multiple @@ -1269,6 +1304,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, em_reset_tx_queue(txq); dev->data->tx_queues[queue_idx] = txq; + txq->offloads = offloads; return 0; } @@ -1313,6 +1349,44 @@ em_reset_rx_queue(struct em_rx_queue *rxq) rxq->pkt_last_seg = NULL; } +uint64_t +em_get_rx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_offload_capa; + uint32_t max_rx_pktlen; + + max_rx_pktlen = em_get_max_pktlen(dev); + + rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER; + if (max_rx_pktlen > ETHER_MAX_LEN) + rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME; + + return rx_offload_capa; +} + +uint64_t +em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_queue_offload_capa; + + /* + * As only one Rx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + rx_queue_offload_capa = em_get_rx_port_offloads_capa(dev); + + return rx_queue_offload_capa; +} + int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1325,9 +1399,12 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, struct em_rx_queue *rxq; struct e1000_hw *hw; uint32_t rsize; + uint64_t offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + /* * Validate number of receive descriptors. * It must not exceed hardware maximum, and must be multiple @@ -1382,8 +1459,10 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, rxq->rx_free_thresh = rx_conf->rx_free_thresh; rxq->queue_id = queue_idx; rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? - 0 : ETHER_CRC_LEN); + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) + rxq->crc_len = ETHER_CRC_LEN; + else + rxq->crc_len = 0; rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx)); rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx)); @@ -1395,6 +1474,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, dev->data->rx_queues[queue_idx] = rxq; em_reset_rx_queue(rxq); + rxq->offloads = offloads; return 0; } @@ -1646,6 +1726,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) { struct e1000_hw *hw; struct em_rx_queue *rxq; + struct rte_eth_rxmode *rxmode; uint32_t rctl; uint32_t rfctl; uint32_t rxcsum; @@ -1654,6 +1735,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) int ret; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + rxmode = &dev->data->dev_conf.rxmode; /* * Make sure receives are disabled while setting @@ -1713,9 +1795,10 @@ eth_em_rx_init(struct rte_eth_dev *dev) * Reset crc_len in case it was changed after queue setup by a * call to configure */ - rxq->crc_len = - (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ? - 0 : ETHER_CRC_LEN); + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) + rxq->crc_len = ETHER_CRC_LEN; + else + rxq->crc_len = 0; bus_addr = rxq->rx_ring_phys_addr; E1000_WRITE_REG(hw, E1000_RDLEN(i), @@ -1745,7 +1828,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) * to avoid splitting packets that don't fit into * one buffer. */ - if (dev->data->dev_conf.rxmode.jumbo_frame || + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME || rctl_bsize < ETHER_MAX_LEN) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); @@ -1755,7 +1838,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) } } - if (dev->data->dev_conf.rxmode.enable_scatter) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_em_recv_scattered_pkts; @@ -1768,7 +1851,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) */ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); - if (dev->data->dev_conf.rxmode.hw_ip_checksum) + if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) rxcsum |= E1000_RXCSUM_IPOFL; else rxcsum &= ~E1000_RXCSUM_IPOFL; @@ -1780,24 +1863,24 @@ eth_em_rx_init(struct rte_eth_dev *dev) if ((hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_pch2lan || hw->mac.type == e1000_ich10lan) && - dev->data->dev_conf.rxmode.jumbo_frame == 1) { + rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3); E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13)); } if (hw->mac.type == e1000_pch2lan) { - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) e1000_lv_jumbo_workaround_ich8lan(hw, TRUE); else e1000_lv_jumbo_workaround_ich8lan(hw, FALSE); } /* Setup the Receive Control Register. */ - if (dev->data->dev_conf.rxmode.hw_strip_crc) - rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ - else + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ + else + rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | @@ -1814,7 +1897,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) /* * Configure support of jumbo frames, if any. */ - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) rctl |= E1000_RCTL_LPE; else rctl &= ~E1000_RCTL_LPE; @@ -1894,6 +1977,7 @@ em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->scattered_rx = dev->data->scattered_rx; qinfo->nb_desc = rxq->nb_rx_desc; qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.offloads = rxq->offloads; } void @@ -1911,4 +1995,5 @@ em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_thresh.wthresh = txq->wthresh; qinfo->conf.tx_free_thresh = txq->tx_free_thresh; qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; + qinfo->conf.offloads = txq->offloads; } diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index 3c5138de..64dfe683 100644 --- a/drivers/net/e1000/igb_ethdev.c +++ b/drivers/net/e1000/igb_ethdev.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include @@ -42,8 +41,6 @@ #define IGB_DEFAULT_TX_HTHRESH 1 #define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16) -#define IGB_HKEY_MAX_INDEX 10 - /* Bit shift and mask */ #define IGB_4_BIT_WIDTH (CHAR_BIT / 2) #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t) @@ -146,7 +143,7 @@ static int eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool); static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index); -static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, +static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr); static void igbvf_intr_disable(struct e1000_hw *hw); @@ -171,7 +168,7 @@ static int igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on); static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on); -static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev, +static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr); static int igbvf_get_reg_length(struct rte_eth_dev *dev); static int igbvf_get_regs(struct rte_eth_dev *dev, @@ -224,6 +221,10 @@ static int eth_igb_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom); static int eth_igb_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom); +static int eth_igb_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo); +static int eth_igb_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info); static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, uint32_t nb_mc_addr); @@ -403,6 +404,8 @@ static const struct eth_dev_ops eth_igb_ops = { .get_eeprom_length = eth_igb_get_eeprom_length, .get_eeprom = eth_igb_get_eeprom, .set_eeprom = eth_igb_set_eeprom, + .get_module_info = eth_igb_get_module_info, + .get_module_eeprom = eth_igb_get_module_eeprom, .timesync_adjust_time = igb_timesync_adjust_time, .timesync_read_time = igb_timesync_read_time, .timesync_write_time = igb_timesync_write_time, @@ -432,6 +435,9 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = { .dev_supported_ptypes_get = eth_igb_supported_ptypes_get, .rx_queue_setup = eth_igb_rx_queue_setup, .rx_queue_release = eth_igb_rx_queue_release, + .rx_descriptor_done = eth_igb_rx_descriptor_done, + .rx_descriptor_status = eth_igb_rx_descriptor_status, + .tx_descriptor_status = eth_igb_tx_descriptor_status, .tx_queue_setup = eth_igb_tx_queue_setup, .tx_queue_release = eth_igb_tx_queue_release, .set_mc_addr_list = eth_igb_set_mc_addr_list, @@ -522,57 +528,6 @@ static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = { #define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \ sizeof(rte_igbvf_stats_strings[0])) -/** - * Atomically reads the link status information from global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} static inline void igb_intr_enable(struct rte_eth_dev *dev) @@ -1559,7 +1514,7 @@ eth_igb_stop(struct rte_eth_dev *dev) /* clear the recorded link status */ memset(&link, 0, sizeof(link)); - rte_igb_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); if (!rte_intr_allow_others(intr_handle)) /* resume to the default handler */ @@ -1635,7 +1590,7 @@ eth_igb_close(struct rte_eth_dev *dev) } memset(&link, 0, sizeof(link)); - rte_igb_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); } static int @@ -2196,22 +2151,15 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ dev_info->max_mac_addrs = hw->mac.rar_entry_count; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; switch (hw->mac.type) { case e1000_82575: @@ -2274,6 +2222,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -2282,7 +2231,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .hthresh = IGB_DEFAULT_TX_HTHRESH, .wthresh = IGB_DEFAULT_TX_WTHRESH, }, - .txq_flags = 0, + .offloads = 0, }; dev_info->rx_desc_lim = rx_desc_lim; @@ -2325,14 +2274,9 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ dev_info->max_mac_addrs = hw->mac.rar_entry_count; - dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | @@ -2353,6 +2297,13 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) break; } + dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; + dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { .pthresh = IGB_DEFAULT_RX_PTHRESH, @@ -2361,6 +2312,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -2369,7 +2321,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .hthresh = IGB_DEFAULT_TX_HTHRESH, .wthresh = IGB_DEFAULT_TX_WTHRESH, }, - .txq_flags = 0, + .offloads = 0, }; dev_info->rx_desc_lim = rx_desc_lim; @@ -2382,7 +2334,7 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_eth_link link, old; + struct rte_eth_link link; int link_check, count; link_check = 0; @@ -2423,8 +2375,6 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL); } memset(&link, 0, sizeof(link)); - rte_igb_dev_atomic_read_link_status(dev, &link); - old = link; /* Now we check if a transition has happened */ if (link_check) { @@ -2443,14 +2393,8 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) link.link_status = ETH_LINK_DOWN; link.link_autoneg = ETH_LINK_FIXED; } - rte_igb_dev_atomic_write_link_status(dev, &link); - /* not changed */ - if (old.link_status == link.link_status) - return -1; - - /* changed */ - return 0; + return rte_eth_linkstatus_set(dev, &link); } /* @@ -2704,7 +2648,7 @@ igb_vlan_hw_extend_disable(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); /* Update maximum packet length */ - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) E1000_WRITE_REG(hw, E1000_RLPML, dev->data->dev_conf.rxmode.max_rx_pkt_len + VLAN_TAG_SIZE); @@ -2723,7 +2667,7 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); /* Update maximum packet length */ - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) E1000_WRITE_REG(hw, E1000_RLPML, dev->data->dev_conf.rxmode.max_rx_pkt_len + 2 * VLAN_TAG_SIZE); @@ -2732,22 +2676,25 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev) static int eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask) { + struct rte_eth_rxmode *rxmode; + + rxmode = &dev->data->dev_conf.rxmode; if(mask & ETH_VLAN_STRIP_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) igb_vlan_hw_strip_enable(dev); else igb_vlan_hw_strip_disable(dev); } if(mask & ETH_VLAN_FILTER_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) igb_vlan_hw_filter_enable(dev); else igb_vlan_hw_filter_disable(dev); } if(mask & ETH_VLAN_EXTEND_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_extend) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) igb_vlan_hw_extend_enable(dev); else igb_vlan_hw_extend_disable(dev); @@ -2887,8 +2834,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev, if (ret < 0) return 0; - memset(&link, 0, sizeof(link)); - rte_igb_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); if (link.link_status) { PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s", @@ -3146,13 +3092,14 @@ eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index) e1000_rar_set(hw, addr, index); } -static void +static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) { eth_igb_rar_clear(dev, 0); - eth_igb_rar_set(dev, (void *)addr, 0, 0); + + return 0; } /* * Virtual Function operations @@ -3250,14 +3197,14 @@ igbvf_dev_configure(struct rte_eth_dev *dev) * Keep the persistent behavior the same as Host PF */ #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC - if (!conf->rxmode.hw_strip_crc) { + if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) { PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); - conf->rxmode.hw_strip_crc = 1; + conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP; } #else - if (conf->rxmode.hw_strip_crc) { + if (!rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) { PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); - conf->rxmode.hw_strip_crc = 0; + conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP; } #endif @@ -3504,7 +3451,7 @@ igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) return 0; } -static void +static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) { struct e1000_hw *hw = @@ -3512,6 +3459,7 @@ igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) /* index is not used by rar_set() */ hw->mac.ops.rar_set(hw, (void *)addr, 0); + return 0; } @@ -4499,10 +4447,12 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) /* switch to jumbo mode if needed */ if (frame_size > ETHER_MAX_LEN) { - dev->data->dev_conf.rxmode.jumbo_frame = 1; + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; rctl |= E1000_RCTL_LPE; } else { - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; rctl &= ~E1000_RCTL_LPE; } E1000_WRITE_REG(hw, E1000_RCTL, rctl); @@ -5383,6 +5333,86 @@ eth_igb_set_eeprom(struct rte_eth_dev *dev, return nvm->ops.write(hw, first, length, data); } +static int +eth_igb_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + uint32_t status = 0; + uint16_t sff8472_rev, addr_mode; + bool page_swap = false; + + if (hw->phy.media_type == e1000_media_type_copper || + hw->phy.media_type == e1000_media_type_unknown) + return -EOPNOTSUPP; + + /* Check whether we support SFF-8472 or not */ + status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); + if (status) + return -EIO; + + /* addressing mode is not supported */ + status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); + if (status) + return -EIO; + + /* addressing mode is not supported */ + if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) { + PMD_DRV_LOG(ERR, + "Address change required to access page 0xA2, " + "but not supported. Please report the module " + "type to the driver maintainers.\n"); + page_swap = true; + } + + if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) { + /* We have an SFP, but it does not support SFF-8472 */ + modinfo->type = RTE_ETH_MODULE_SFF_8079; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; + } else { + /* We have an SFP which supports a revision of SFF-8472 */ + modinfo->type = RTE_ETH_MODULE_SFF_8472; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int +eth_igb_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + uint32_t status = 0; + uint16_t dataword[RTE_ETH_MODULE_SFF_8472_LEN / 2 + 1]; + u16 first_word, last_word; + int i = 0; + + if (info->length == 0) + return -EINVAL; + + first_word = info->offset >> 1; + last_word = (info->offset + info->length - 1) >> 1; + + /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ + for (i = 0; i < last_word - first_word + 1; i++) { + status = e1000_read_phy_reg_i2c(hw, (first_word + i) * 2, + &dataword[i]); + if (status) { + /* Error occurred while reading module */ + return -EIO; + } + + dataword[i] = rte_be_to_cpu_16(dataword[i]); + } + + memcpy(info->data, (u8 *)dataword + (info->offset & 1), info->length); + + return 0; +} + static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { @@ -5631,7 +5661,7 @@ igb_rss_filter_restore(struct rte_eth_dev *dev) struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - if (filter_info->rss_info.num) + if (filter_info->rss_info.conf.queue_num) igb_config_rss_filter(dev, &filter_info->rss_info, TRUE); } @@ -5654,3 +5684,9 @@ RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map); RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci"); + +/* see e1000_logs.c */ +RTE_INIT(e1000_init_log) +{ + e1000_igb_init_log(); +} diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c index a1427596..07385291 100644 --- a/drivers/net/e1000/igb_flow.c +++ b/drivers/net/e1000/igb_flow.c @@ -175,7 +175,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; + ipv4_mask = item->mask; /** * Only support src & dst addresses, protocol, * others should be masked. @@ -198,7 +198,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->src_ip_mask = ipv4_mask->hdr.src_addr; filter->proto_mask = ipv4_mask->hdr.next_proto_id; - ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec; + ipv4_spec = item->spec; filter->dst_ip = ipv4_spec->hdr.dst_addr; filter->src_ip = ipv4_spec->hdr.src_addr; filter->proto = ipv4_spec->hdr.next_proto_id; @@ -228,7 +228,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* get the TCP/UDP/SCTP info */ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { if (item->spec && item->mask) { - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_mask = item->mask; /** * Only support src & dst ports, tcp flags, @@ -263,14 +263,14 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + tcp_spec = item->spec; filter->dst_port = tcp_spec->hdr.dst_port; filter->src_port = tcp_spec->hdr.src_port; filter->tcp_flags = tcp_spec->hdr.tcp_flags; } } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { if (item->spec && item->mask) { - udp_mask = (const struct rte_flow_item_udp *)item->mask; + udp_mask = item->mask; /** * Only support src & dst ports, @@ -289,14 +289,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port_mask = udp_mask->hdr.dst_port; filter->src_port_mask = udp_mask->hdr.src_port; - udp_spec = (const struct rte_flow_item_udp *)item->spec; + udp_spec = item->spec; filter->dst_port = udp_spec->hdr.dst_port; filter->src_port = udp_spec->hdr.src_port; } } else { if (item->spec && item->mask) { - sctp_mask = (const struct rte_flow_item_sctp *) - item->mask; + sctp_mask = item->mask; /** * Only support src & dst ports, @@ -380,6 +379,15 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -533,8 +541,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /* Mask bits of source MAC address must be full of 0. * Mask bits of destination MAC address must be full @@ -624,6 +632,14 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* Not supported */ + if (attr->transfer) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Not supported */ if (attr->priority) { rte_flow_error_set(error, EINVAL, @@ -848,8 +864,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_spec = item->spec; + tcp_mask = item->mask; if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) || tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port || @@ -924,6 +940,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Support 2 priorities, the lowest or highest. */ if (!attr->priority) { filter->hig_pri = 0; @@ -1065,8 +1090,8 @@ item_loop: return -rte_errno; } - raw_spec = (const struct rte_flow_item_raw *)item->spec; - raw_mask = (const struct rte_flow_item_raw *)item->mask; + raw_spec = item->spec; + raw_mask = item->mask; if (!raw_mask->length || !raw_mask->relative) { @@ -1212,6 +1237,15 @@ item_loop: return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(filter, 0, sizeof(struct rte_eth_flex_filter)); rte_flow_error_set(error, EINVAL, @@ -1293,7 +1327,7 @@ igb_parse_rss_filter(struct rte_eth_dev *dev, rss = (const struct rte_flow_action_rss *)act->conf; - if (!rss || !rss->num) { + if (!rss || !rss->queue_num) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, @@ -1301,7 +1335,7 @@ igb_parse_rss_filter(struct rte_eth_dev *dev, return -rte_errno; } - for (n = 0; n < rss->num; n++) { + for (n = 0; n < rss->queue_num; n++) { if (rss->queue[n] >= dev->data->nb_rx_queues) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -1311,14 +1345,26 @@ igb_parse_rss_filter(struct rte_eth_dev *dev, } } - if (rss->rss_conf) - rss_conf->rss_conf = *rss->rss_conf; - else - rss_conf->rss_conf.rss_hf = IGB_RSS_OFFLOAD_ALL; - - for (n = 0; n < rss->num; ++n) - rss_conf->queue[n] = rss->queue[n]; - rss_conf->num = rss->num; + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key must be exactly 40 bytes"); + if (rss->queue_num > RTE_DIM(rss_conf->queue)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + if (igb_rss_conf_init(rss_conf, rss)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS context initialization failure"); /* check if the next not void item is END */ index++; @@ -1350,6 +1396,15 @@ igb_parse_rss_filter(struct rte_eth_dev *dev, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); rte_flow_error_set(error, EINVAL, @@ -1519,9 +1574,8 @@ igb_flow_create(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "failed to allocate memory"); goto out; } - rte_memcpy(&rss_filter_ptr->filter_info, - &rss_conf, - sizeof(struct igb_rte_flow_rss_conf)); + igb_rss_conf_init(&rss_filter_ptr->filter_info, + &rss_conf.conf); TAILQ_INSERT_TAIL(&igb_filter_rss_list, rss_filter_ptr, entries); flow->rule = rss_filter_ptr; @@ -1758,7 +1812,7 @@ igb_clear_rss_filter(struct rte_eth_dev *dev) struct e1000_filter_info *filter = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - if (filter->rss_info.num) + if (filter->rss_info.conf.queue_num) igb_config_rss_filter(dev, &filter->rss_info, FALSE); } diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index 2f371672..b955068a 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -107,6 +107,7 @@ struct igb_rx_queue { uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ uint32_t flags; /**< RX flags. */ + uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */ }; /** @@ -180,6 +181,7 @@ struct igb_tx_queue { /**< Start context position for transmit queue. */ struct igb_advctx_info ctx_cache[IGB_CTX_NUM]; /**< Hardware context history.*/ + uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */ }; #if 1 @@ -1447,6 +1449,33 @@ igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev) igb_reset_tx_queue_stat(txq); } +uint64_t +igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_offload_capa; + + RTE_SET_USED(dev); + rx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS; + + return rx_offload_capa; +} + +uint64_t +igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_queue_offload_capa; + + rx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev); + + return rx_queue_offload_capa; +} + int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1458,6 +1487,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, struct igb_tx_queue *txq; struct e1000_hw *hw; uint32_t size; + uint64_t offloads; + + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1542,6 +1574,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, dev->tx_pkt_burst = eth_igb_xmit_pkts; dev->tx_pkt_prepare = ð_igb_prep_pkts; dev->data->tx_queues[queue_idx] = txq; + txq->offloads = offloads; return 0; } @@ -1593,6 +1626,46 @@ igb_reset_rx_queue(struct igb_rx_queue *rxq) rxq->pkt_last_seg = NULL; } +uint64_t +igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_offload_capa; + + RTE_SET_USED(dev); + rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER; + + return rx_offload_capa; +} + +uint64_t +igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rx_queue_offload_capa; + + switch (hw->mac.type) { + case e1000_vfadapt_i350: + /* + * As only one Rx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev); + break; + default: + rx_queue_offload_capa = 0; + } + return rx_queue_offload_capa; +} + int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1605,6 +1678,9 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, struct igb_rx_queue *rxq; struct e1000_hw *hw; unsigned int size; + uint64_t offloads; + + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1630,6 +1706,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (rxq == NULL) return -ENOMEM; + rxq->offloads = offloads; rxq->mb_pool = mp; rxq->nb_rx_desc = nb_desc; rxq->pthresh = rx_conf->rx_thresh.pthresh; @@ -1644,8 +1721,10 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : - ETHER_CRC_LEN); + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) + rxq->crc_len = ETHER_CRC_LEN; + else + rxq->crc_len = 0; /* * Allocate RX ring hardware descriptors. A memzone large enough to @@ -2227,6 +2306,7 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev) int eth_igb_rx_init(struct rte_eth_dev *dev) { + struct rte_eth_rxmode *rxmode; struct e1000_hw *hw; struct igb_rx_queue *rxq; uint32_t rctl; @@ -2247,10 +2327,12 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rctl = E1000_READ_REG(hw, E1000_RCTL); E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + rxmode = &dev->data->dev_conf.rxmode; + /* * Configure support of jumbo frames, if any. */ - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { rctl |= E1000_RCTL_LPE; /* @@ -2292,9 +2374,10 @@ eth_igb_rx_init(struct rte_eth_dev *dev) * Reset crc_len in case it was changed after queue setup by a * call to configure */ - rxq->crc_len = - (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ? - 0 : ETHER_CRC_LEN); + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) + rxq->crc_len = ETHER_CRC_LEN; + else + rxq->crc_len = 0; bus_addr = rxq->rx_ring_phys_addr; E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx), @@ -2362,7 +2445,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl); } - if (dev->data->dev_conf.rxmode.enable_scatter) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; @@ -2406,19 +2489,27 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rxcsum |= E1000_RXCSUM_PCSD; /* Enable both L3/L4 rx checksum offload */ - if (dev->data->dev_conf.rxmode.hw_ip_checksum) - rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | - E1000_RXCSUM_CRCOFL); + if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) + rxcsum |= E1000_RXCSUM_IPOFL; + else + rxcsum &= ~E1000_RXCSUM_IPOFL; + if (rxmode->offloads & + (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) + rxcsum |= E1000_RXCSUM_TUOFL; else - rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | - E1000_RXCSUM_CRCOFL); + rxcsum &= ~E1000_RXCSUM_TUOFL; + if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) + rxcsum |= E1000_RXCSUM_CRCOFL; + else + rxcsum &= ~E1000_RXCSUM_CRCOFL; + E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); /* Setup the Receive Control Register. */ - if (dev->data->dev_conf.rxmode.hw_strip_crc) { - rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) { + rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ - /* set STRCRC bit in all queues */ + /* clear STRCRC bit in all queues */ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211 || @@ -2427,14 +2518,14 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rxq = dev->data->rx_queues[i]; uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(rxq->reg_idx)); - dvmolr |= E1000_DVMOLR_STRCRC; + dvmolr &= ~E1000_DVMOLR_STRCRC; E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr); } } } else { - rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ + rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ - /* clear STRCRC bit in all queues */ + /* set STRCRC bit in all queues */ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211 || @@ -2443,7 +2534,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rxq = dev->data->rx_queues[i]; uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(rxq->reg_idx)); - dvmolr &= ~E1000_DVMOLR_STRCRC; + dvmolr |= E1000_DVMOLR_STRCRC; E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr); } } @@ -2654,7 +2745,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); } - if (dev->data->dev_conf.rxmode.enable_scatter) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; @@ -2741,6 +2832,7 @@ igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; qinfo->conf.rx_drop_en = rxq->drop_en; + qinfo->conf.offloads = rxq->offloads; } void @@ -2756,6 +2848,41 @@ igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_thresh.pthresh = txq->pthresh; qinfo->conf.tx_thresh.hthresh = txq->hthresh; qinfo->conf.tx_thresh.wthresh = txq->wthresh; + qinfo->conf.offloads = txq->offloads; +} + +int +igb_rss_conf_init(struct igb_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in) +{ + if (in->key_len > RTE_DIM(out->key) || + in->queue_num > RTE_DIM(out->queue)) + return -EINVAL; + out->conf = (struct rte_flow_action_rss){ + .func = in->func, + .level = in->level, + .types = in->types, + .key_len = in->key_len, + .queue_num = in->queue_num, + .key = memcpy(out->key, in->key, in->key_len), + .queue = memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num), + }; + return 0; +} + +int +igb_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with) +{ + return (comp->func == with->func && + comp->level == with->level && + comp->types == with->types && + comp->key_len == with->key_len && + comp->queue_num == with->queue_num && + !memcmp(comp->key, with->key, with->key_len) && + !memcmp(comp->queue, with->queue, + sizeof(*with->queue) * with->queue_num)); } int @@ -2764,7 +2891,12 @@ igb_config_rss_filter(struct rte_eth_dev *dev, { uint32_t shift; uint16_t i, j; - struct rte_eth_rss_conf rss_conf = conf->rss_conf; + struct rte_eth_rss_conf rss_conf = { + .rss_key = conf->conf.key_len ? + (void *)(uintptr_t)conf->conf.key : NULL, + .rss_key_len = conf->conf.key_len, + .rss_hf = conf->conf.types, + }; struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -2772,8 +2904,8 @@ igb_config_rss_filter(struct rte_eth_dev *dev, hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (!add) { - if (memcmp(conf, &filter_info->rss_info, - sizeof(struct igb_rte_flow_rss_conf)) == 0) { + if (igb_action_rss_same(&filter_info->rss_info.conf, + &conf->conf)) { igb_rss_disable(dev); memset(&filter_info->rss_info, 0, sizeof(struct igb_rte_flow_rss_conf)); @@ -2782,7 +2914,7 @@ igb_config_rss_filter(struct rte_eth_dev *dev, return -EINVAL; } - if (filter_info->rss_info.num) + if (filter_info->rss_info.conf.queue_num) return -EINVAL; /* Fill in redirection table. */ @@ -2794,9 +2926,9 @@ igb_config_rss_filter(struct rte_eth_dev *dev, } reta; uint8_t q_idx; - q_idx = conf->queue[j]; - if (j == conf->num) + if (j == conf->conf.queue_num) j = 0; + q_idx = conf->conf.queue[j]; reta.bytes[i & 3] = (uint8_t)(q_idx << shift); if ((i & 3) == 3) E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword); @@ -2813,8 +2945,8 @@ igb_config_rss_filter(struct rte_eth_dev *dev, rss_conf.rss_key = rss_intel_key; /* Default hash key */ igb_hw_rss_hash_set(hw, &rss_conf); - rte_memcpy(&filter_info->rss_info, - conf, sizeof(struct igb_rte_flow_rss_conf)); + if (igb_rss_conf_init(&filter_info->rss_info, &conf->conf)) + return -EINVAL; return 0; } diff --git a/drivers/net/e1000/meson.build b/drivers/net/e1000/meson.build index 3a1bf5af..cf456995 100644 --- a/drivers/net/e1000/meson.build +++ b/drivers/net/e1000/meson.build @@ -5,6 +5,7 @@ subdir('base') objs = [base_objs] sources = files( + 'e1000_logs.c', 'em_ethdev.c', 'em_rxtx.c', 'igb_ethdev.c', diff --git a/drivers/net/ena/Makefile b/drivers/net/ena/Makefile index f9bfe053..ff9ce315 100644 --- a/drivers/net/ena/Makefile +++ b/drivers/net/ena/Makefile @@ -43,6 +43,9 @@ INCLUDES :=-I$(SRCDIR) -I$(SRCDIR)/base/ena_defs -I$(SRCDIR)/base EXPORT_MAP := rte_pmd_ena_version.map LIBABIVER := 1 +# rte_fbarray is not yet part of stable API +CFLAGS += -DALLOW_EXPERIMENTAL_API + VPATH += $(SRCDIR)/base # # all source are stored in SRCS-y @@ -55,5 +58,6 @@ CFLAGS += $(INCLUDES) LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs LDLIBS += -lrte_bus_pci +LDLIBS += -lrte_timer include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c index 38a05877..4abf1a28 100644 --- a/drivers/net/ena/base/ena_com.c +++ b/drivers/net/ena/base/ena_com.c @@ -37,11 +37,19 @@ /*****************************************************************************/ /* Timeout in micro-sec */ -#define ADMIN_CMD_TIMEOUT_US (1000000) +#define ADMIN_CMD_TIMEOUT_US (3000000) -#define ENA_ASYNC_QUEUE_DEPTH 4 +#define ENA_ASYNC_QUEUE_DEPTH 16 #define ENA_ADMIN_QUEUE_DEPTH 32 +#ifdef ENA_EXTENDED_STATS + +#define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08 +#define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF) +#define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16) + +#endif /* ENA_EXTENDED_STATS */ + #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \ ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \ | (ENA_COMMON_SPEC_VERSION_MINOR)) @@ -62,7 +70,9 @@ #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF -static int ena_alloc_cnt; +#define ENA_REGS_ADMIN_INTR_MASK 1 + +#define ENA_POLL_MS 5 /*****************************************************************************/ /*****************************************************************************/ @@ -86,6 +96,11 @@ struct ena_comp_ctx { bool occupied; }; +struct ena_com_stats_ctx { + struct ena_admin_aq_get_stats_cmd get_cmd; + struct ena_admin_acq_get_stats_resp get_resp; +}; + static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, struct ena_common_mem_addr *ena_addr, dma_addr_t addr) @@ -95,50 +110,49 @@ static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, return ENA_COM_INVAL; } - ena_addr->mem_addr_low = (u32)addr; - ena_addr->mem_addr_high = - ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 32)) >> 32); + ena_addr->mem_addr_low = lower_32_bits(addr); + ena_addr->mem_addr_high = (u16)upper_32_bits(addr); return 0; } static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) { - ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, - ADMIN_SQ_SIZE(queue->q_depth), - queue->sq.entries, - queue->sq.dma_addr, - queue->sq.mem_handle); + struct ena_com_admin_sq *sq = &queue->sq; + u16 size = ADMIN_SQ_SIZE(queue->q_depth); - if (!queue->sq.entries) { + ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr, + sq->mem_handle); + + if (!sq->entries) { ena_trc_err("memory allocation failed"); return ENA_COM_NO_MEM; } - queue->sq.head = 0; - queue->sq.tail = 0; - queue->sq.phase = 1; + sq->head = 0; + sq->tail = 0; + sq->phase = 1; - queue->sq.db_addr = NULL; + sq->db_addr = NULL; return 0; } static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) { - ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, - ADMIN_CQ_SIZE(queue->q_depth), - queue->cq.entries, - queue->cq.dma_addr, - queue->cq.mem_handle); + struct ena_com_admin_cq *cq = &queue->cq; + u16 size = ADMIN_CQ_SIZE(queue->q_depth); + + ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr, + cq->mem_handle); - if (!queue->cq.entries) { + if (!cq->entries) { ena_trc_err("memory allocation failed"); return ENA_COM_NO_MEM; } - queue->cq.head = 0; - queue->cq.phase = 1; + cq->head = 0; + cq->phase = 1; return 0; } @@ -146,44 +160,44 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) static int ena_com_admin_init_aenq(struct ena_com_dev *dev, struct ena_aenq_handlers *aenq_handlers) { + struct ena_com_aenq *aenq = &dev->aenq; u32 addr_low, addr_high, aenq_caps; + u16 size; dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; - ENA_MEM_ALLOC_COHERENT(dev->dmadev, - ADMIN_AENQ_SIZE(dev->aenq.q_depth), - dev->aenq.entries, - dev->aenq.dma_addr, - dev->aenq.mem_handle); + size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); + ENA_MEM_ALLOC_COHERENT(dev->dmadev, size, + aenq->entries, + aenq->dma_addr, + aenq->mem_handle); - if (!dev->aenq.entries) { + if (!aenq->entries) { ena_trc_err("memory allocation failed"); return ENA_COM_NO_MEM; } - dev->aenq.head = dev->aenq.q_depth; - dev->aenq.phase = 1; + aenq->head = aenq->q_depth; + aenq->phase = 1; - addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(dev->aenq.dma_addr); - addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(dev->aenq.dma_addr); + addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr); + addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr); - ENA_REG_WRITE32(addr_low, (unsigned char *)dev->reg_bar - + ENA_REGS_AENQ_BASE_LO_OFF); - ENA_REG_WRITE32(addr_high, (unsigned char *)dev->reg_bar - + ENA_REGS_AENQ_BASE_HI_OFF); + ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); + ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); aenq_caps = 0; aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; aenq_caps |= (sizeof(struct ena_admin_aenq_entry) << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; + ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); - ENA_REG_WRITE32(aenq_caps, (unsigned char *)dev->reg_bar - + ENA_REGS_AENQ_CAPS_OFF); - - if (unlikely(!aenq_handlers)) + if (unlikely(!aenq_handlers)) { ena_trc_err("aenq handlers pointer is NULL\n"); + return ENA_COM_INVAL; + } - dev->aenq.aenq_handlers = aenq_handlers; + aenq->aenq_handlers = aenq_handlers; return 0; } @@ -217,12 +231,11 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, return &queue->comp_ctx[command_id]; } -static struct ena_comp_ctx * -__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, - struct ena_admin_aq_entry *cmd, - size_t cmd_size_in_bytes, - struct ena_admin_acq_entry *comp, - size_t comp_size_in_bytes) +static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, + struct ena_admin_aq_entry *cmd, + size_t cmd_size_in_bytes, + struct ena_admin_acq_entry *comp, + size_t comp_size_in_bytes) { struct ena_comp_ctx *comp_ctx; u16 tail_masked, cmd_id; @@ -234,12 +247,9 @@ __ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, tail_masked = admin_queue->sq.tail & queue_size_mask; /* In case of queue FULL */ - cnt = admin_queue->sq.tail - admin_queue->sq.head; + cnt = ATOMIC32_READ(&admin_queue->outstanding_cmds); if (cnt >= admin_queue->q_depth) { - ena_trc_dbg("admin queue is FULL (tail %d head %d depth: %d)\n", - admin_queue->sq.tail, - admin_queue->sq.head, - admin_queue->q_depth); + ena_trc_dbg("admin queue is full.\n"); admin_queue->stats.out_of_space++; return ERR_PTR(ENA_COM_NO_SPACE); } @@ -253,6 +263,8 @@ __ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true); + if (unlikely(!comp_ctx)) + return ERR_PTR(ENA_COM_INVAL); comp_ctx->status = ENA_CMD_SUBMITTED; comp_ctx->comp_size = (u32)comp_size_in_bytes; @@ -272,7 +284,8 @@ __ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0)) admin_queue->sq.phase = !admin_queue->sq.phase; - ENA_REG_WRITE32(admin_queue->sq.tail, admin_queue->sq.db_addr); + ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail, + admin_queue->sq.db_addr); return comp_ctx; } @@ -298,12 +311,11 @@ static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue) return 0; } -static struct ena_comp_ctx * -ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, - struct ena_admin_aq_entry *cmd, - size_t cmd_size_in_bytes, - struct ena_admin_acq_entry *comp, - size_t comp_size_in_bytes) +static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, + struct ena_admin_aq_entry *cmd, + size_t cmd_size_in_bytes, + struct ena_admin_acq_entry *comp, + size_t comp_size_in_bytes) { unsigned long flags = 0; struct ena_comp_ctx *comp_ctx; @@ -317,7 +329,7 @@ ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, cmd_size_in_bytes, comp, comp_size_in_bytes); - if (unlikely(IS_ERR(comp_ctx))) + if (IS_ERR(comp_ctx)) admin_queue->running_state = false; ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); @@ -331,9 +343,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, size_t size; int dev_node = 0; - ENA_TOUCH(ctx); - - memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr)); + memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); io_sq->desc_entry_size = (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? @@ -347,23 +357,26 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, size, io_sq->desc_addr.virt_addr, io_sq->desc_addr.phys_addr, + io_sq->desc_addr.mem_handle, ctx->numa_node, dev_node); - if (!io_sq->desc_addr.virt_addr) + if (!io_sq->desc_addr.virt_addr) { ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, size, io_sq->desc_addr.virt_addr, io_sq->desc_addr.phys_addr, io_sq->desc_addr.mem_handle); + } } else { ENA_MEM_ALLOC_NODE(ena_dev->dmadev, size, io_sq->desc_addr.virt_addr, ctx->numa_node, dev_node); - if (!io_sq->desc_addr.virt_addr) + if (!io_sq->desc_addr.virt_addr) { io_sq->desc_addr.virt_addr = ENA_MEM_ALLOC(ena_dev->dmadev, size); + } } if (!io_sq->desc_addr.virt_addr) { @@ -385,8 +398,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, size_t size; int prev_node = 0; - ENA_TOUCH(ctx); - memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr)); + memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr)); /* Use the basic completion descriptor for Rx */ io_cq->cdesc_entry_size_in_bytes = @@ -397,17 +409,19 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev, - size, - io_cq->cdesc_addr.virt_addr, - io_cq->cdesc_addr.phys_addr, - ctx->numa_node, - prev_node); - if (!io_cq->cdesc_addr.virt_addr) + size, + io_cq->cdesc_addr.virt_addr, + io_cq->cdesc_addr.phys_addr, + io_cq->cdesc_addr.mem_handle, + ctx->numa_node, + prev_node); + if (!io_cq->cdesc_addr.virt_addr) { ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, size, io_cq->cdesc_addr.virt_addr, io_cq->cdesc_addr.phys_addr, io_cq->cdesc_addr.mem_handle); + } if (!io_cq->cdesc_addr.virt_addr) { ena_trc_err("memory allocation failed"); @@ -420,9 +434,8 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, return 0; } -static void -ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, - struct ena_admin_acq_entry *cqe) +static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, + struct ena_admin_acq_entry *cqe) { struct ena_comp_ctx *comp_ctx; u16 cmd_id; @@ -447,8 +460,7 @@ ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event); } -static void -ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue) +static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue) { struct ena_admin_acq_entry *cqe = NULL; u16 comp_num = 0; @@ -499,7 +511,7 @@ static int ena_com_comp_status_to_errno(u8 comp_status) case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE: return ENA_COM_NO_MEM; case ENA_ADMIN_UNSUPPORTED_OPCODE: - return ENA_COM_PERMISSION; + return ENA_COM_UNSUPPORTED; case ENA_ADMIN_BAD_OPCODE: case ENA_ADMIN_MALFORMED_REQUEST: case ENA_ADMIN_ILLEGAL_PARAMETER: @@ -510,20 +522,24 @@ static int ena_com_comp_status_to_errno(u8 comp_status) return 0; } -static int -ena_com_wait_and_process_admin_cq_polling( - struct ena_comp_ctx *comp_ctx, - struct ena_com_admin_queue *admin_queue) +static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, + struct ena_com_admin_queue *admin_queue) { unsigned long flags = 0; - u64 start_time; + unsigned long timeout; int ret; - start_time = ENA_GET_SYSTEM_USECS(); + timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout); + + while (1) { + ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); + ena_com_handle_admin_completion(admin_queue); + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); + + if (comp_ctx->status != ENA_CMD_SUBMITTED) + break; - while (comp_ctx->status == ENA_CMD_SUBMITTED) { - if ((ENA_GET_SYSTEM_USECS() - start_time) > - ADMIN_CMD_TIMEOUT_US) { + if (ENA_TIME_EXPIRE(timeout)) { ena_trc_err("Wait for completion (polling) timeout\n"); /* ENA didn't have any completion */ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); @@ -535,9 +551,7 @@ ena_com_wait_and_process_admin_cq_polling( goto err; } - ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); - ena_com_handle_admin_completion(admin_queue); - ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); + ENA_MSLEEP(ENA_POLL_MS); } if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { @@ -549,8 +563,8 @@ ena_com_wait_and_process_admin_cq_polling( goto err; } - ENA_ASSERT(comp_ctx->status == ENA_CMD_COMPLETED, - "Invalid comp status %d\n", comp_ctx->status); + ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED, + "Invalid comp status %d\n", comp_ctx->status); ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); err: @@ -558,16 +572,14 @@ err: return ret; } -static int -ena_com_wait_and_process_admin_cq_interrupts( - struct ena_comp_ctx *comp_ctx, - struct ena_com_admin_queue *admin_queue) +static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, + struct ena_com_admin_queue *admin_queue) { unsigned long flags = 0; - int ret = 0; + int ret; ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event, - ADMIN_CMD_TIMEOUT_US); + admin_queue->completion_timeout); /* In case the command wasn't completed find out the root cause. * There might be 2 kinds of errors @@ -607,16 +619,18 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = mmio_read->read_resp; - u32 mmio_read_reg, ret; + u32 mmio_read_reg, ret, i; unsigned long flags = 0; - int i; + u32 timeout = mmio_read->reg_read_to; ENA_MIGHT_SLEEP(); + if (timeout == 0) + timeout = ENA_REG_READ_TIMEOUT; + /* If readless is disabled, perform regular read */ if (!mmio_read->readless_supported) - return ENA_REG_READ32((unsigned char *)ena_dev->reg_bar + - offset); + return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset); ENA_SPINLOCK_LOCK(mmio_read->lock, flags); mmio_read->seq_num++; @@ -632,17 +646,16 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) */ wmb(); - ENA_REG_WRITE32(mmio_read_reg, (unsigned char *)ena_dev->reg_bar - + ENA_REGS_MMIO_REG_READ_OFF); + ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); - for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) { + for (i = 0; i < timeout; i++) { if (read_resp->req_id == mmio_read->seq_num) break; ENA_UDELAY(1); } - if (unlikely(i == ENA_REG_READ_TIMEOUT)) { + if (unlikely(i == timeout)) { ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", mmio_read->seq_num, offset, @@ -653,7 +666,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) } if (read_resp->reg_off != offset) { - ena_trc_err("reading failed for wrong offset value"); + ena_trc_err("Read failure: wrong offset provided"); ret = ENA_MMIO_READ_TIMEOUT; } else { ret = read_resp->reg_val; @@ -671,9 +684,8 @@ err: * It is expected that the IRQ called ena_com_handle_admin_completion * to mark the completions. */ -static int -ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx, - struct ena_com_admin_queue *admin_queue) +static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx, + struct ena_com_admin_queue *admin_queue) { if (admin_queue->polling) return ena_com_wait_and_process_admin_cq_polling(comp_ctx, @@ -692,7 +704,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, u8 direction; int ret; - memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd)); + memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) direction = ENA_ADMIN_SQ_DIRECTION_TX; @@ -706,12 +718,11 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, destroy_cmd.sq.sq_idx = io_sq->idx; destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ; - ret = ena_com_execute_admin_command( - admin_queue, - (struct ena_admin_aq_entry *)&destroy_cmd, - sizeof(destroy_cmd), - (struct ena_admin_acq_entry *)&destroy_resp, - sizeof(destroy_resp)); + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&destroy_cmd, + sizeof(destroy_cmd), + (struct ena_admin_acq_entry *)&destroy_resp, + sizeof(destroy_resp)); if (unlikely(ret && (ret != ENA_COM_NO_DEVICE))) ena_trc_err("failed to destroy io sq error: %d\n", ret); @@ -747,18 +758,20 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, io_sq->desc_addr.phys_addr, io_sq->desc_addr.mem_handle); else - ENA_MEM_FREE(ena_dev->dmadev, - io_sq->desc_addr.virt_addr); + ENA_MEM_FREE(ena_dev->dmadev, io_sq->desc_addr.virt_addr); io_sq->desc_addr.virt_addr = NULL; } } -static int wait_for_reset_state(struct ena_com_dev *ena_dev, - u32 timeout, u16 exp_state) +static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, + u16 exp_state) { u32 val, i; + /* Convert timeout from resolution of 100ms to ENA_POLL_MS */ + timeout = (timeout * 100) / ENA_POLL_MS; + for (i = 0; i < timeout; i++) { val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); @@ -771,16 +784,14 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, exp_state) return 0; - /* The resolution of the timeout is 100ms */ - ENA_MSLEEP(100); + ENA_MSLEEP(ENA_POLL_MS); } return ENA_COM_TIMER_EXPIRED; } -static bool -ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, - enum ena_admin_aq_feature_id feature_id) +static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, + enum ena_admin_aq_feature_id feature_id) { u32 feature_mask = 1 << feature_id; @@ -802,14 +813,9 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, struct ena_admin_get_feat_cmd get_cmd; int ret; - if (!ena_dev) { - ena_trc_err("%s : ena_dev is NULL\n", __func__); - return ENA_COM_NO_DEVICE; - } - if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { - ena_trc_info("Feature %d isn't supported\n", feature_id); - return ENA_COM_PERMISSION; + ena_trc_dbg("Feature %d isn't supported\n", feature_id); + return ENA_COM_UNSUPPORTED; } memset(&get_cmd, 0x0, sizeof(get_cmd)); @@ -945,10 +951,10 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, sizeof(struct ena_admin_rss_ind_table_entry); ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, - tbl_size, - rss->rss_ind_tbl, - rss->rss_ind_tbl_dma_addr, - rss->rss_ind_tbl_mem_handle); + tbl_size, + rss->rss_ind_tbl, + rss->rss_ind_tbl_dma_addr, + rss->rss_ind_tbl_mem_handle); if (unlikely(!rss->rss_ind_tbl)) goto mem_err1; @@ -1005,7 +1011,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, u8 direction; int ret; - memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd)); + memset(&create_cmd, 0x0, sizeof(create_cmd)); create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ; @@ -1041,12 +1047,11 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, } } - ret = ena_com_execute_admin_command( - admin_queue, - (struct ena_admin_aq_entry *)&create_cmd, - sizeof(create_cmd), - (struct ena_admin_acq_entry *)&cmd_completion, - sizeof(cmd_completion)); + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&create_cmd, + sizeof(create_cmd), + (struct ena_admin_acq_entry *)&cmd_completion, + sizeof(cmd_completion)); if (unlikely(ret)) { ena_trc_err("Failed to create IO SQ. error: %d\n", ret); return ret; @@ -1133,9 +1138,8 @@ static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev) return 0; } -static void -ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, - u16 intr_delay_resolution) +static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, + u16 intr_delay_resolution) { struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; unsigned int i; @@ -1165,13 +1169,18 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, size_t comp_size) { struct ena_comp_ctx *comp_ctx; - int ret = 0; + int ret; comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, comp, comp_size); - if (unlikely(IS_ERR(comp_ctx))) { - ena_trc_err("Failed to submit command [%ld]\n", - PTR_ERR(comp_ctx)); + if (IS_ERR(comp_ctx)) { + if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE)) + ena_trc_dbg("Failed to submit command [%ld]\n", + PTR_ERR(comp_ctx)); + else + ena_trc_err("Failed to submit command [%ld]\n", + PTR_ERR(comp_ctx)); + return PTR_ERR(comp_ctx); } @@ -1195,7 +1204,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev, struct ena_admin_acq_create_cq_resp_desc cmd_completion; int ret; - memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd)); + memset(&create_cmd, 0x0, sizeof(create_cmd)); create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ; @@ -1215,12 +1224,11 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev, return ret; } - ret = ena_com_execute_admin_command( - admin_queue, - (struct ena_admin_aq_entry *)&create_cmd, - sizeof(create_cmd), - (struct ena_admin_acq_entry *)&cmd_completion, - sizeof(cmd_completion)); + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&create_cmd, + sizeof(create_cmd), + (struct ena_admin_acq_entry *)&cmd_completion, + sizeof(cmd_completion)); if (unlikely(ret)) { ena_trc_err("Failed to create IO CQ. error: %d\n", ret); return ret; @@ -1290,7 +1298,7 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) { ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); - ENA_MSLEEP(20); + ENA_MSLEEP(ENA_POLL_MS); ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); } ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); @@ -1304,17 +1312,16 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, struct ena_admin_acq_destroy_cq_resp_desc destroy_resp; int ret; - memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd)); + memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); destroy_cmd.cq_idx = io_cq->idx; destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ; - ret = ena_com_execute_admin_command( - admin_queue, - (struct ena_admin_aq_entry *)&destroy_cmd, - sizeof(destroy_cmd), - (struct ena_admin_acq_entry *)&destroy_resp, - sizeof(destroy_resp)); + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&destroy_cmd, + sizeof(destroy_cmd), + (struct ena_admin_acq_entry *)&destroy_resp, + sizeof(destroy_resp)); if (unlikely(ret && (ret != ENA_COM_NO_DEVICE))) ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret); @@ -1341,13 +1348,12 @@ void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev) { u16 depth = ena_dev->aenq.q_depth; - ENA_ASSERT(ena_dev->aenq.head == depth, "Invalid AENQ state\n"); + ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n"); /* Init head_db to mark that all entries in the queue * are initially available */ - ENA_REG_WRITE32(depth, (unsigned char *)ena_dev->reg_bar - + ENA_REGS_AENQ_HEAD_DB_OFF); + ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); } int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) @@ -1356,12 +1362,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) struct ena_admin_set_feat_cmd cmd; struct ena_admin_set_feat_resp resp; struct ena_admin_get_feat_resp get_resp; - int ret = 0; - - if (unlikely(!ena_dev)) { - ena_trc_err("%s : ena_dev is NULL\n", __func__); - return ENA_COM_NO_DEVICE; - } + int ret; ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG); if (ret) { @@ -1373,7 +1374,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n", get_resp.u.aenq.supported_groups, groups_flag); - return ENA_COM_PERMISSION; + return ENA_COM_UNSUPPORTED; } memset(&cmd, 0x0, sizeof(cmd)); @@ -1476,41 +1477,42 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev) void ena_com_admin_destroy(struct ena_com_dev *ena_dev) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_com_admin_cq *cq = &admin_queue->cq; + struct ena_com_admin_sq *sq = &admin_queue->sq; + struct ena_com_aenq *aenq = &ena_dev->aenq; + u16 size; - if (!admin_queue) - return; - + ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event); if (admin_queue->comp_ctx) ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx); admin_queue->comp_ctx = NULL; - - if (admin_queue->sq.entries) - ENA_MEM_FREE_COHERENT(ena_dev->dmadev, - ADMIN_SQ_SIZE(admin_queue->q_depth), - admin_queue->sq.entries, - admin_queue->sq.dma_addr, - admin_queue->sq.mem_handle); - admin_queue->sq.entries = NULL; - - if (admin_queue->cq.entries) - ENA_MEM_FREE_COHERENT(ena_dev->dmadev, - ADMIN_CQ_SIZE(admin_queue->q_depth), - admin_queue->cq.entries, - admin_queue->cq.dma_addr, - admin_queue->cq.mem_handle); - admin_queue->cq.entries = NULL; - + size = ADMIN_SQ_SIZE(admin_queue->q_depth); + if (sq->entries) + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries, + sq->dma_addr, sq->mem_handle); + sq->entries = NULL; + + size = ADMIN_CQ_SIZE(admin_queue->q_depth); + if (cq->entries) + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries, + cq->dma_addr, cq->mem_handle); + cq->entries = NULL; + + size = ADMIN_AENQ_SIZE(aenq->q_depth); if (ena_dev->aenq.entries) - ENA_MEM_FREE_COHERENT(ena_dev->dmadev, - ADMIN_AENQ_SIZE(ena_dev->aenq.q_depth), - ena_dev->aenq.entries, - ena_dev->aenq.dma_addr, - ena_dev->aenq.mem_handle); - ena_dev->aenq.entries = NULL; + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries, + aenq->dma_addr, aenq->mem_handle); + aenq->entries = NULL; } void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) { + u32 mask_value = 0; + + if (polling) + mask_value = ENA_REGS_ADMIN_INTR_MASK; + + ENA_REG_WRITE32(ena_dev->bus, mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); ena_dev->admin_queue.polling = polling; } @@ -1536,8 +1538,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) return 0; } -void -ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) +void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) { struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; @@ -1548,10 +1549,8 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev) { struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; - ENA_REG_WRITE32(0x0, (unsigned char *)ena_dev->reg_bar - + ENA_REGS_MMIO_RESP_LO_OFF); - ENA_REG_WRITE32(0x0, (unsigned char *)ena_dev->reg_bar - + ENA_REGS_MMIO_RESP_HI_OFF); + ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); + ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); ENA_MEM_FREE_COHERENT(ena_dev->dmadev, sizeof(*mmio_read->read_resp), @@ -1570,10 +1569,8 @@ void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr); addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr); - ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar - + ENA_REGS_MMIO_RESP_LO_OFF); - ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar - + ENA_REGS_MMIO_RESP_HI_OFF); + ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); + ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); } int ena_com_admin_init(struct ena_com_dev *ena_dev, @@ -1619,24 +1616,20 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev, if (ret) goto error; - admin_queue->sq.db_addr = (u32 __iomem *) - ((unsigned char *)ena_dev->reg_bar + ENA_REGS_AQ_DB_OFF); + admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + ENA_REGS_AQ_DB_OFF); addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr); addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr); - ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar - + ENA_REGS_AQ_BASE_LO_OFF); - ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar - + ENA_REGS_AQ_BASE_HI_OFF); + ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF); + ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF); addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr); addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr); - ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar - + ENA_REGS_ACQ_BASE_LO_OFF); - ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar - + ENA_REGS_ACQ_BASE_HI_OFF); + ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF); + ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF); aq_caps = 0; aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK; @@ -1650,10 +1643,8 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev, ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) & ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK; - ENA_REG_WRITE32(aq_caps, (unsigned char *)ena_dev->reg_bar - + ENA_REGS_AQ_CAPS_OFF); - ENA_REG_WRITE32(acq_caps, (unsigned char *)ena_dev->reg_bar - + ENA_REGS_ACQ_CAPS_OFF); + ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF); + ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF); ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers); if (ret) goto error; @@ -1672,7 +1663,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev, { struct ena_com_io_sq *io_sq; struct ena_com_io_cq *io_cq; - int ret = 0; + int ret; if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n", @@ -1683,8 +1674,8 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev, io_sq = &ena_dev->io_sq_queues[ctx->qid]; io_cq = &ena_dev->io_cq_queues[ctx->qid]; - memset(io_sq, 0x0, sizeof(struct ena_com_io_sq)); - memset(io_cq, 0x0, sizeof(struct ena_com_io_cq)); + memset(io_sq, 0x0, sizeof(*io_sq)); + memset(io_cq, 0x0, sizeof(*io_cq)); /* Init CQ */ io_cq->q_depth = ctx->queue_size; @@ -1794,6 +1785,19 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, memcpy(&get_feat_ctx->offload, &get_resp.u.offload, sizeof(get_resp.u.offload)); + /* Driver hints isn't mandatory admin command. So in case the + * command isn't supported set driver hints to 0 + */ + rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS); + + if (!rc) + memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, + sizeof(get_resp.u.hw_hints)); + else if (rc == ENA_COM_UNSUPPORTED) + memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints)); + else + return rc; + return 0; } @@ -1826,6 +1830,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) struct ena_admin_aenq_common_desc *aenq_common; struct ena_com_aenq *aenq = &dev->aenq; ena_aenq_handler handler_cb; + unsigned long long timestamp; u16 masked_head, processed = 0; u8 phase; @@ -1837,11 +1842,13 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) /* Go over all the events */ while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { + timestamp = (unsigned long long)aenq_common->timestamp_low | + ((unsigned long long)aenq_common->timestamp_high << 32); + ENA_TOUCH(timestamp); /* In case debug is disabled */ ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", aenq_common->group, aenq_common->syndrom, - (unsigned long long)aenq_common->timestamp_low + - ((u64)aenq_common->timestamp_high << 32)); + timestamp); /* Handle specific event*/ handler_cb = ena_com_get_specific_aenq_cb(dev, @@ -1869,11 +1876,11 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) /* write the aenq doorbell after all AENQ descriptors were read */ mb(); - ENA_REG_WRITE32((u32)aenq->head, (unsigned char *)dev->reg_bar - + ENA_REGS_AENQ_HEAD_DB_OFF); + ENA_REG_WRITE32(dev->bus, (u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); } -int ena_com_dev_reset(struct ena_com_dev *ena_dev) +int ena_com_dev_reset(struct ena_com_dev *ena_dev, + enum ena_regs_reset_reason_types reset_reason) { u32 stat, timeout, cap, reset_val; int rc; @@ -1901,8 +1908,9 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev) /* start reset */ reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK; - ENA_REG_WRITE32(reset_val, (unsigned char *)ena_dev->reg_bar - + ENA_REGS_DEV_CTL_OFF); + reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) & + ENA_REGS_DEV_CTL_RESET_REASON_MASK; + ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); /* Write again the MMIO read request address */ ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); @@ -1915,29 +1923,32 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev) } /* reset done */ - ENA_REG_WRITE32(0, (unsigned char *)ena_dev->reg_bar - + ENA_REGS_DEV_CTL_OFF); + ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); rc = wait_for_reset_state(ena_dev, timeout, 0); if (rc != 0) { ena_trc_err("Reset indication didn't turn off\n"); return rc; } + timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >> + ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT; + if (timeout) + /* the resolution of timeout reg is 100ms */ + ena_dev->admin_queue.completion_timeout = timeout * 100000; + else + ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US; + return 0; } static int ena_get_dev_stats(struct ena_com_dev *ena_dev, - struct ena_admin_aq_get_stats_cmd *get_cmd, - struct ena_admin_acq_get_stats_resp *get_resp, + struct ena_com_stats_ctx *ctx, enum ena_admin_get_stats_type type) { + struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd; + struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp; struct ena_com_admin_queue *admin_queue; - int ret = 0; - - if (!ena_dev) { - ena_trc_err("%s : ena_dev is NULL\n", __func__); - return ENA_COM_NO_DEVICE; - } + int ret; admin_queue = &ena_dev->admin_queue; @@ -1945,12 +1956,11 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev, get_cmd->aq_common_descriptor.flags = 0; get_cmd->type = type; - ret = ena_com_execute_admin_command( - admin_queue, - (struct ena_admin_aq_entry *)get_cmd, - sizeof(*get_cmd), - (struct ena_admin_acq_entry *)get_resp, - sizeof(*get_resp)); + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)get_cmd, + sizeof(*get_cmd), + (struct ena_admin_acq_entry *)get_resp, + sizeof(*get_resp)); if (unlikely(ret)) ena_trc_err("Failed to get stats. error: %d\n", ret); @@ -1961,78 +1971,28 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev, int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, struct ena_admin_basic_stats *stats) { - int ret = 0; - struct ena_admin_aq_get_stats_cmd get_cmd; - struct ena_admin_acq_get_stats_resp get_resp; + struct ena_com_stats_ctx ctx; + int ret; - memset(&get_cmd, 0x0, sizeof(get_cmd)); - ret = ena_get_dev_stats(ena_dev, &get_cmd, &get_resp, - ENA_ADMIN_GET_STATS_TYPE_BASIC); + memset(&ctx, 0x0, sizeof(ctx)); + ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC); if (likely(ret == 0)) - memcpy(stats, &get_resp.basic_stats, - sizeof(get_resp.basic_stats)); + memcpy(stats, &ctx.get_resp.basic_stats, + sizeof(ctx.get_resp.basic_stats)); return ret; } -int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff, - u32 len) -{ - int ret = 0; - struct ena_admin_aq_get_stats_cmd get_cmd; - struct ena_admin_acq_get_stats_resp get_resp; - ena_mem_handle_t mem_handle = 0; - void *virt_addr; - dma_addr_t phys_addr; - - ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len, - virt_addr, phys_addr, mem_handle); - if (!virt_addr) { - ret = ENA_COM_NO_MEM; - goto done; - } - memset(&get_cmd, 0x0, sizeof(get_cmd)); - ret = ena_com_mem_addr_set(ena_dev, - &get_cmd.u.control_buffer.address, - phys_addr); - if (unlikely(ret)) { - ena_trc_err("memory address set failed\n"); - return ret; - } - get_cmd.u.control_buffer.length = len; - - get_cmd.device_id = ena_dev->stats_func; - get_cmd.queue_idx = ena_dev->stats_queue; - - ret = ena_get_dev_stats(ena_dev, &get_cmd, &get_resp, - ENA_ADMIN_GET_STATS_TYPE_EXTENDED); - if (ret < 0) - goto free_ext_stats_mem; - - ret = snprintf(buff, len, "%s", (char *)virt_addr); - -free_ext_stats_mem: - ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr, - mem_handle); -done: - return ret; -} - int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) { struct ena_com_admin_queue *admin_queue; struct ena_admin_set_feat_cmd cmd; struct ena_admin_set_feat_resp resp; - int ret = 0; - - if (unlikely(!ena_dev)) { - ena_trc_err("%s : ena_dev is NULL\n", __func__); - return ENA_COM_NO_DEVICE; - } + int ret; if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { - ena_trc_info("Feature %d isn't supported\n", ENA_ADMIN_MTU); - return ENA_COM_PERMISSION; + ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU); + return ENA_COM_UNSUPPORTED; } memset(&cmd, 0x0, sizeof(cmd)); @@ -2049,11 +2009,10 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) (struct ena_admin_acq_entry *)&resp, sizeof(resp)); - if (unlikely(ret)) { + if (unlikely(ret)) ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret); - return ENA_COM_INVAL; - } - return 0; + + return ret; } int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, @@ -2066,7 +2025,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, ENA_ADMIN_STATELESS_OFFLOAD_CONFIG); if (unlikely(ret)) { ena_trc_err("Failed to get offload capabilities %d\n", ret); - return ENA_COM_INVAL; + return ret; } memcpy(offload, &resp.u.offload, sizeof(resp.u.offload)); @@ -2085,9 +2044,9 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) { - ena_trc_info("Feature %d isn't supported\n", - ENA_ADMIN_RSS_HASH_FUNCTION); - return ENA_COM_PERMISSION; + ena_trc_dbg("Feature %d isn't supported\n", + ENA_ADMIN_RSS_HASH_FUNCTION); + return ENA_COM_UNSUPPORTED; } /* Validate hash function is supported */ @@ -2099,7 +2058,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) { ena_trc_err("Func hash %d isn't supported by device, abort\n", rss->hash_func); - return ENA_COM_PERMISSION; + return ENA_COM_UNSUPPORTED; } memset(&cmd, 0x0, sizeof(cmd)); @@ -2158,7 +2117,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) { ena_trc_err("Flow hash function %d isn't supported\n", func); - return ENA_COM_PERMISSION; + return ENA_COM_UNSUPPORTED; } switch (func) { @@ -2207,7 +2166,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, if (unlikely(rc)) return rc; - rss->hash_func = (enum ena_admin_hash_functions)get_resp.u.flow_hash_func.selected_func; + rss->hash_func = get_resp.u.flow_hash_func.selected_func; if (func) *func = rss->hash_func; @@ -2242,17 +2201,20 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; struct ena_admin_set_feat_cmd cmd; struct ena_admin_set_feat_resp resp; int ret; if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) { - ena_trc_info("Feature %d isn't supported\n", - ENA_ADMIN_RSS_HASH_INPUT); - return ENA_COM_PERMISSION; + ena_trc_dbg("Feature %d isn't supported\n", + ENA_ADMIN_RSS_HASH_INPUT); + return ENA_COM_UNSUPPORTED; } + memset(&cmd, 0x0, sizeof(cmd)); + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; cmd.aq_common_descriptor.flags = ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; @@ -2268,20 +2230,17 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) ena_trc_err("memory address set failed\n"); return ret; } - cmd.control_buffer.length = - sizeof(struct ena_admin_feature_rss_hash_control); + cmd.control_buffer.length = sizeof(*hash_ctrl); ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *)&cmd, sizeof(cmd), (struct ena_admin_acq_entry *)&resp, sizeof(resp)); - if (unlikely(ret)) { + if (unlikely(ret)) ena_trc_err("Failed to set hash input. error: %d\n", ret); - return ENA_COM_INVAL; - } - return 0; + return ret; } int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) @@ -2293,7 +2252,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) int rc, i; /* Get the supported hash input */ - rc = ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL); + rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL); if (unlikely(rc)) return rc; @@ -2322,7 +2281,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; - hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = + hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields = ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA; for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) { @@ -2332,7 +2291,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", i, hash_ctrl->supported_fields[i].fields, hash_ctrl->selected_fields[i].fields); - return ENA_COM_PERMISSION; + return ENA_COM_UNSUPPORTED; } } @@ -2340,7 +2299,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) /* In case of failure, restore the old hash ctrl */ if (unlikely(rc)) - ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL); + ena_com_get_hash_ctrl(ena_dev, 0, NULL); return rc; } @@ -2377,7 +2336,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, /* In case of failure, restore the old hash ctrl */ if (unlikely(rc)) - ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL); + ena_com_get_hash_ctrl(ena_dev, 0, NULL); return 0; } @@ -2404,14 +2363,13 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) struct ena_rss *rss = &ena_dev->rss; struct ena_admin_set_feat_cmd cmd; struct ena_admin_set_feat_resp resp; - int ret = 0; + int ret; - if (!ena_com_check_supported_feature_id( - ena_dev, - ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) { - ena_trc_info("Feature %d isn't supported\n", - ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); - return ENA_COM_PERMISSION; + if (!ena_com_check_supported_feature_id(ena_dev, + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) { + ena_trc_dbg("Feature %d isn't supported\n", + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); + return ENA_COM_UNSUPPORTED; } ret = ena_com_ind_tbl_convert_to_device(ena_dev); @@ -2446,12 +2404,10 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) (struct ena_admin_acq_entry *)&resp, sizeof(resp)); - if (unlikely(ret)) { + if (unlikely(ret)) ena_trc_err("Failed to set indirect table. error: %d\n", ret); - return ENA_COM_INVAL; - } - return 0; + return ret; } int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) @@ -2538,17 +2494,18 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) } int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, - u32 debug_area_size) { + u32 debug_area_size) +{ struct ena_host_attribute *host_attr = &ena_dev->host_attr; - ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, - debug_area_size, - host_attr->debug_area_virt_addr, - host_attr->debug_area_dma_addr, - host_attr->debug_area_dma_handle); - if (unlikely(!host_attr->debug_area_virt_addr)) { - host_attr->debug_area_size = 0; - return ENA_COM_NO_MEM; + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + debug_area_size, + host_attr->debug_area_virt_addr, + host_attr->debug_area_dma_addr, + host_attr->debug_area_dma_handle); + if (unlikely(!host_attr->debug_area_virt_addr)) { + host_attr->debug_area_size = 0; + return ENA_COM_NO_MEM; } host_attr->debug_area_size = debug_area_size; @@ -2590,6 +2547,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) struct ena_com_admin_queue *admin_queue; struct ena_admin_set_feat_cmd cmd; struct ena_admin_set_feat_resp resp; + int ret; /* Host attribute config is called before ena_com_get_dev_attr_feat @@ -2635,14 +2593,12 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) /* Interrupt moderation */ bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) { - return ena_com_check_supported_feature_id( - ena_dev, - ENA_ADMIN_INTERRUPT_MODERATION); + return ena_com_check_supported_feature_id(ena_dev, + ENA_ADMIN_INTERRUPT_MODERATION); } -int -ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, - u32 tx_coalesce_usecs) +int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, + u32 tx_coalesce_usecs) { if (!ena_dev->intr_delay_resolution) { ena_trc_err("Illegal interrupt delay granularity value\n"); @@ -2655,9 +2611,8 @@ ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, return 0; } -int -ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, - u32 rx_coalesce_usecs) +int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, + u32 rx_coalesce_usecs) { if (!ena_dev->intr_delay_resolution) { ena_trc_err("Illegal interrupt delay granularity value\n"); @@ -2690,9 +2645,9 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) ENA_ADMIN_INTERRUPT_MODERATION); if (rc) { - if (rc == ENA_COM_PERMISSION) { - ena_trc_info("Feature %d isn't supported\n", - ENA_ADMIN_INTERRUPT_MODERATION); + if (rc == ENA_COM_UNSUPPORTED) { + ena_trc_dbg("Feature %d isn't supported\n", + ENA_ADMIN_INTERRUPT_MODERATION); rc = 0; } else { ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n", @@ -2719,8 +2674,7 @@ err: return rc; } -void -ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev) +void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev) { struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; @@ -2763,14 +2717,12 @@ ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev) ENA_INTR_HIGHEST_BYTES; } -unsigned int -ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) +unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) { return ena_dev->intr_moder_tx_interval; } -unsigned int -ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) +unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) { struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; @@ -2794,7 +2746,10 @@ void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev, intr_moder_tbl[level].intr_moder_interval /= ena_dev->intr_delay_resolution; intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval; - intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval; + + /* use hardcoded value until ethtool supports bytecount parameter */ + if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED) + intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval; } void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, diff --git a/drivers/net/ena/base/ena_com.h b/drivers/net/ena/base/ena_com.h index e5345926..f58cd86a 100644 --- a/drivers/net/ena/base/ena_com.h +++ b/drivers/net/ena/base/ena_com.h @@ -35,15 +35,7 @@ #define ENA_COM #include "ena_plat.h" -#include "ena_common_defs.h" -#include "ena_admin_defs.h" -#include "ena_eth_io_defs.h" -#include "ena_regs_defs.h" -#if defined(__linux__) && !defined(__KERNEL__) -#include -#include -#define __iomem -#endif +#include "ena_includes.h" #define ENA_MAX_NUM_IO_QUEUES 128U /* We need to queues for each IO (on for Tx and one for Rx) */ @@ -89,6 +81,11 @@ #define ENA_INTR_DELAY_OLD_VALUE_WEIGHT 6 #define ENA_INTR_DELAY_NEW_VALUE_WEIGHT 4 +#define ENA_INTR_MODER_LEVEL_STRIDE 1 +#define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED 0xFFFFFF + +#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF + enum ena_intr_moder_level { ENA_INTR_MODER_LOWEST = 0, ENA_INTR_MODER_LOW, @@ -120,8 +117,8 @@ struct ena_com_rx_buf_info { }; struct ena_com_io_desc_addr { - u8 __iomem *pbuf_dev_addr; /* LLQ address */ - u8 *virt_addr; + u8 __iomem *pbuf_dev_addr; /* LLQ address */ + u8 *virt_addr; dma_addr_t phys_addr; ena_mem_handle_t mem_handle; }; @@ -130,13 +127,12 @@ struct ena_com_tx_meta { u16 mss; u16 l3_hdr_len; u16 l3_hdr_offset; - u16 l3_outer_hdr_len; /* In words */ - u16 l3_outer_hdr_offset; u16 l4_hdr_len; /* In words */ }; struct ena_com_io_cq { struct ena_com_io_desc_addr cdesc_addr; + void *bus; /* Interrupt unmask register */ u32 __iomem *unmask_reg; @@ -174,6 +170,7 @@ struct ena_com_io_cq { struct ena_com_io_sq { struct ena_com_io_desc_addr desc_addr; + void *bus; u32 __iomem *db_addr; u8 __iomem *header_addr; @@ -228,8 +225,11 @@ struct ena_com_stats_admin { struct ena_com_admin_queue { void *q_dmadev; + void *bus; ena_spinlock_t q_lock; /* spinlock for the admin queue */ + struct ena_comp_ctx *comp_ctx; + u32 completion_timeout; u16 q_depth; struct ena_com_admin_cq cq; struct ena_com_admin_sq sq; @@ -266,6 +266,7 @@ struct ena_com_mmio_read { struct ena_admin_ena_mmio_req_read_less_resp *read_resp; dma_addr_t read_resp_dma_addr; ena_mem_handle_t read_resp_mem_handle; + u32 reg_read_to; /* in us */ u16 seq_num; bool readless_supported; /* spin lock to ensure a single outstanding read */ @@ -316,6 +317,7 @@ struct ena_com_dev { u8 __iomem *reg_bar; void __iomem *mem_bar; void *dmadev; + void *bus; enum ena_admin_placement_policy_type tx_mem_queue_type; u32 tx_max_header_size; @@ -340,6 +342,7 @@ struct ena_com_dev_get_features_ctx { struct ena_admin_device_attr_feature_desc dev_attr; struct ena_admin_feature_aenq_desc aenq; struct ena_admin_feature_offload_desc offload; + struct ena_admin_ena_hw_hints hw_hints; }; struct ena_com_create_io_ctx { @@ -379,7 +382,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev); /* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism * @ena_dev: ENA communication layer struct - * @realess_supported: readless mode (enable/disable) + * @readless_supported: readless mode (enable/disable) */ void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported); @@ -421,14 +424,16 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev); /* ena_com_dev_reset - Perform device FLR to the device. * @ena_dev: ENA communication layer struct + * @reset_reason: Specify what is the trigger for the reset in case of an error. * * @return - 0 on success, negative value on failure. */ -int ena_com_dev_reset(struct ena_com_dev *ena_dev); +int ena_com_dev_reset(struct ena_com_dev *ena_dev, + enum ena_regs_reset_reason_types reset_reason); /* ena_com_create_io_queue - Create io queue. * @ena_dev: ENA communication layer struct - * ena_com_create_io_ctx - create context structure + * @ctx - create context structure * * Create the submission and the completion queues. * @@ -437,8 +442,9 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev); int ena_com_create_io_queue(struct ena_com_dev *ena_dev, struct ena_com_create_io_ctx *ctx); -/* ena_com_admin_destroy - Destroy IO queue with the queue id - qid. +/* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid. * @ena_dev: ENA communication layer struct + * @qid - the caller virtual queue id. */ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid); @@ -581,9 +587,8 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag); * * @return: 0 on Success and negative value otherwise. */ -int -ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, - struct ena_com_dev_get_features_ctx *get_feat_ctx); +int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx); /* ena_com_get_dev_basic_stats - Get device basic statistics * @ena_dev: ENA communication layer struct @@ -608,9 +613,8 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu); * * @return: 0 on Success and negative value otherwise. */ -int -ena_com_get_offload_settings(struct ena_com_dev *ena_dev, - struct ena_admin_feature_offload_desc *offload); +int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, + struct ena_admin_feature_offload_desc *offload); /* ena_com_rss_init - Init RSS * @ena_dev: ENA communication layer struct @@ -765,8 +769,8 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev); * * Retrieve the RSS indirection table from the device. * - * @note: If the caller called ena_com_indirect_table_fill_entry but didn't - * flash it to the device, the new configuration will be lost. + * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash + * it to the device, the new configuration will be lost. * * @return: 0 on Success and negative value otherwise. */ @@ -874,8 +878,7 @@ bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev); * moderation table back to the default parameters. * @ena_dev: ENA communication layer struct */ -void -ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev); +void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev); /* ena_com_update_nonadaptive_moderation_interval_tx - Update the * non-adaptive interval in Tx direction. @@ -884,9 +887,8 @@ ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev); * * @return - 0 on success, negative value on failure. */ -int -ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, - u32 tx_coalesce_usecs); +int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, + u32 tx_coalesce_usecs); /* ena_com_update_nonadaptive_moderation_interval_rx - Update the * non-adaptive interval in Rx direction. @@ -895,9 +897,8 @@ ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, * * @return - 0 on success, negative value on failure. */ -int -ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, - u32 rx_coalesce_usecs); +int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, + u32 rx_coalesce_usecs); /* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the * non-adaptive interval in Tx direction. @@ -905,8 +906,7 @@ ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, * * @return - interval in usec */ -unsigned int -ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev); +unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev); /* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the * non-adaptive interval in Rx direction. @@ -914,8 +914,7 @@ ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev); * * @return - interval in usec */ -unsigned int -ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev); +unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev); /* ena_com_init_intr_moderation_entry - Update a single entry in the interrupt * moderation table. @@ -940,20 +939,17 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, enum ena_intr_moder_level level, struct ena_intr_moder_entry *entry); -static inline bool -ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) +static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) { return ena_dev->adaptive_coalescing; } -static inline void -ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev) +static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev) { ena_dev->adaptive_coalescing = true; } -static inline void -ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev) +static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev) { ena_dev->adaptive_coalescing = false; } @@ -966,12 +962,11 @@ ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev) * @moder_tbl_idx: Current table level as input update new level as return * value. */ -static inline void -ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev, - unsigned int pkts, - unsigned int bytes, - unsigned int *smoothed_interval, - unsigned int *moder_tbl_idx) +static inline void ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev, + unsigned int pkts, + unsigned int bytes, + unsigned int *smoothed_interval, + unsigned int *moder_tbl_idx) { enum ena_intr_moder_level curr_moder_idx, new_moder_idx; struct ena_intr_moder_entry *curr_moder_entry; @@ -1001,17 +996,20 @@ ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev, if (curr_moder_idx == ENA_INTR_MODER_LOWEST) { if ((pkts > curr_moder_entry->pkts_per_interval) || (bytes > curr_moder_entry->bytes_per_interval)) - new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx + 1); + new_moder_idx = + (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE); } else { - pred_moder_entry = &intr_moder_tbl[curr_moder_idx - 1]; + pred_moder_entry = &intr_moder_tbl[curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE]; if ((pkts <= pred_moder_entry->pkts_per_interval) || (bytes <= pred_moder_entry->bytes_per_interval)) - new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx - 1); + new_moder_idx = + (enum ena_intr_moder_level)(curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE); else if ((pkts > curr_moder_entry->pkts_per_interval) || (bytes > curr_moder_entry->bytes_per_interval)) { if (curr_moder_idx != ENA_INTR_MODER_HIGHEST) - new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx + 1); + new_moder_idx = + (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE); } } new_moder_entry = &intr_moder_tbl[new_moder_idx]; @@ -1044,18 +1042,12 @@ static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg, intr_reg->intr_control |= (tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) - & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK; + & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK; if (unmask) intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; } -int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff, - u32 len); - -int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev, - u32 funct_queue); - #if defined(__cplusplus) } #endif /* __cplusplus */ diff --git a/drivers/net/ena/base/ena_defs/ena_admin_defs.h b/drivers/net/ena/base/ena_defs/ena_admin_defs.h index 7a031d90..04d4e9a5 100644 --- a/drivers/net/ena/base/ena_defs/ena_admin_defs.h +++ b/drivers/net/ena/base/ena_defs/ena_admin_defs.h @@ -34,174 +34,140 @@ #ifndef _ENA_ADMIN_H_ #define _ENA_ADMIN_H_ -/* admin commands opcodes */ enum ena_admin_aq_opcode { - /* create submission queue */ - ENA_ADMIN_CREATE_SQ = 1, + ENA_ADMIN_CREATE_SQ = 1, - /* destroy submission queue */ - ENA_ADMIN_DESTROY_SQ = 2, + ENA_ADMIN_DESTROY_SQ = 2, - /* create completion queue */ - ENA_ADMIN_CREATE_CQ = 3, + ENA_ADMIN_CREATE_CQ = 3, - /* destroy completion queue */ - ENA_ADMIN_DESTROY_CQ = 4, + ENA_ADMIN_DESTROY_CQ = 4, - /* get capabilities of particular feature */ - ENA_ADMIN_GET_FEATURE = 8, + ENA_ADMIN_GET_FEATURE = 8, - /* get capabilities of particular feature */ - ENA_ADMIN_SET_FEATURE = 9, + ENA_ADMIN_SET_FEATURE = 9, - /* get statistics */ - ENA_ADMIN_GET_STATS = 11, + ENA_ADMIN_GET_STATS = 11, }; -/* admin command completion status codes */ enum ena_admin_aq_completion_status { - /* Request completed successfully */ - ENA_ADMIN_SUCCESS = 0, + ENA_ADMIN_SUCCESS = 0, - /* no resources to satisfy request */ - ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1, + ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1, - /* Bad opcode in request descriptor */ - ENA_ADMIN_BAD_OPCODE = 2, + ENA_ADMIN_BAD_OPCODE = 2, - /* Unsupported opcode in request descriptor */ - ENA_ADMIN_UNSUPPORTED_OPCODE = 3, + ENA_ADMIN_UNSUPPORTED_OPCODE = 3, - /* Wrong request format */ - ENA_ADMIN_MALFORMED_REQUEST = 4, + ENA_ADMIN_MALFORMED_REQUEST = 4, - /* One of parameters is not valid. Provided in ACQ entry - * extended_status - */ - ENA_ADMIN_ILLEGAL_PARAMETER = 5, + /* Additional status is provided in ACQ entry extended_status */ + ENA_ADMIN_ILLEGAL_PARAMETER = 5, - /* unexpected error */ - ENA_ADMIN_UNKNOWN_ERROR = 6, + ENA_ADMIN_UNKNOWN_ERROR = 6, }; -/* get/set feature subcommands opcodes */ enum ena_admin_aq_feature_id { - /* list of all supported attributes/capabilities in the ENA */ - ENA_ADMIN_DEVICE_ATTRIBUTES = 1, + ENA_ADMIN_DEVICE_ATTRIBUTES = 1, + + ENA_ADMIN_MAX_QUEUES_NUM = 2, - /* max number of supported queues per for every queues type */ - ENA_ADMIN_MAX_QUEUES_NUM = 2, + ENA_ADMIN_HW_HINTS = 3, - /* Receive Side Scaling (RSS) function */ - ENA_ADMIN_RSS_HASH_FUNCTION = 10, + ENA_ADMIN_RSS_HASH_FUNCTION = 10, - /* stateless TCP/UDP/IP offload capabilities. */ - ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, - /* Multiple tuples flow table configuration */ - ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12, + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12, - /* max MTU, current MTU */ - ENA_ADMIN_MTU = 14, + ENA_ADMIN_MTU = 14, - /* Receive Side Scaling (RSS) hash input */ - ENA_ADMIN_RSS_HASH_INPUT = 18, + ENA_ADMIN_RSS_HASH_INPUT = 18, - /* interrupt moderation parameters */ - ENA_ADMIN_INTERRUPT_MODERATION = 20, + ENA_ADMIN_INTERRUPT_MODERATION = 20, - /* AENQ configuration */ - ENA_ADMIN_AENQ_CONFIG = 26, + ENA_ADMIN_AENQ_CONFIG = 26, - /* Link configuration */ - ENA_ADMIN_LINK_CONFIG = 27, + ENA_ADMIN_LINK_CONFIG = 27, - /* Host attributes configuration */ - ENA_ADMIN_HOST_ATTR_CONFIG = 28, + ENA_ADMIN_HOST_ATTR_CONFIG = 28, - /* Number of valid opcodes */ - ENA_ADMIN_FEATURES_OPCODE_NUM = 32, + ENA_ADMIN_FEATURES_OPCODE_NUM = 32, }; -/* descriptors and headers placement */ enum ena_admin_placement_policy_type { - /* descriptors and headers are in OS memory */ - ENA_ADMIN_PLACEMENT_POLICY_HOST = 1, + /* descriptors and headers are in host memory */ + ENA_ADMIN_PLACEMENT_POLICY_HOST = 1, - /* descriptors and headers in device memory (a.k.a Low Latency + /* descriptors and headers are in device memory (a.k.a Low Latency * Queue) */ - ENA_ADMIN_PLACEMENT_POLICY_DEV = 3, + ENA_ADMIN_PLACEMENT_POLICY_DEV = 3, }; -/* link speeds */ enum ena_admin_link_types { - ENA_ADMIN_LINK_SPEED_1G = 0x1, + ENA_ADMIN_LINK_SPEED_1G = 0x1, - ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2, + ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2, - ENA_ADMIN_LINK_SPEED_5G = 0x4, + ENA_ADMIN_LINK_SPEED_5G = 0x4, - ENA_ADMIN_LINK_SPEED_10G = 0x8, + ENA_ADMIN_LINK_SPEED_10G = 0x8, - ENA_ADMIN_LINK_SPEED_25G = 0x10, + ENA_ADMIN_LINK_SPEED_25G = 0x10, - ENA_ADMIN_LINK_SPEED_40G = 0x20, + ENA_ADMIN_LINK_SPEED_40G = 0x20, - ENA_ADMIN_LINK_SPEED_50G = 0x40, + ENA_ADMIN_LINK_SPEED_50G = 0x40, - ENA_ADMIN_LINK_SPEED_100G = 0x80, + ENA_ADMIN_LINK_SPEED_100G = 0x80, - ENA_ADMIN_LINK_SPEED_200G = 0x100, + ENA_ADMIN_LINK_SPEED_200G = 0x100, - ENA_ADMIN_LINK_SPEED_400G = 0x200, + ENA_ADMIN_LINK_SPEED_400G = 0x200, }; -/* completion queue update policy */ enum ena_admin_completion_policy_type { - /* cqe for each sq descriptor */ - ENA_ADMIN_COMPLETION_POLICY_DESC = 0, + /* completion queue entry for each sq descriptor */ + ENA_ADMIN_COMPLETION_POLICY_DESC = 0, - /* cqe upon request in sq descriptor */ - ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1, + /* completion queue entry upon request in sq descriptor */ + ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1, /* current queue head pointer is updated in OS memory upon sq * descriptor request */ - ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2, + ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2, /* current queue head pointer is updated in OS memory for each sq * descriptor */ - ENA_ADMIN_COMPLETION_POLICY_HEAD = 3, + ENA_ADMIN_COMPLETION_POLICY_HEAD = 3, }; -/* type of get statistics command */ +/* basic stats return ena_admin_basic_stats while extanded stats return a + * buffer (string format) with additional statistics per queue and per + * device id + */ enum ena_admin_get_stats_type { - /* Basic statistics */ - ENA_ADMIN_GET_STATS_TYPE_BASIC = 0, + ENA_ADMIN_GET_STATS_TYPE_BASIC = 0, - /* Extended statistics */ - ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1, + ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1, }; -/* scope of get statistics command */ enum ena_admin_get_stats_scope { - ENA_ADMIN_SPECIFIC_QUEUE = 0, + ENA_ADMIN_SPECIFIC_QUEUE = 0, - ENA_ADMIN_ETH_TRAFFIC = 1, + ENA_ADMIN_ETH_TRAFFIC = 1, }; -/* ENA Admin Queue (AQ) common descriptor */ struct ena_admin_aq_common_desc { - /* word 0 : */ - /* command identificator to associate it with the completion - * 11:0 : command_id + /* 11:0 : command_id * 15:12 : reserved12 */ uint16_t command_id; - /* as appears in ena_aq_opcode */ + /* as appears in ena_admin_aq_opcode */ uint8_t opcode; /* 0 : phase @@ -214,24 +180,17 @@ struct ena_admin_aq_common_desc { uint8_t flags; }; -/* used in ena_aq_entry. Can point directly to control data, or to a page - * list chunk. Used also at the end of indirect mode page list chunks, for - * chaining. +/* used in ena_admin_aq_entry. Can point directly to control data, or to a + * page list chunk. Used also at the end of indirect mode page list chunks, + * for chaining. */ struct ena_admin_ctrl_buff_info { - /* word 0 : indicates length of the buffer pointed by - * control_buffer_address. - */ uint32_t length; - /* words 1:2 : points to control buffer (direct or indirect) */ struct ena_common_mem_addr address; }; -/* submission queue full identification */ struct ena_admin_sq { - /* word 0 : */ - /* queue id */ uint16_t sq_idx; /* 4:0 : reserved @@ -242,36 +201,25 @@ struct ena_admin_sq { uint8_t reserved1; }; -/* AQ entry format */ struct ena_admin_aq_entry { - /* words 0 : */ struct ena_admin_aq_common_desc aq_common_descriptor; - /* words 1:3 : */ union { - /* command specific inline data */ uint32_t inline_data_w1[3]; - /* words 1:3 : points to control buffer (direct or - * indirect, chained if needed) - */ struct ena_admin_ctrl_buff_info control_buffer; } u; - /* command specific inline data */ uint32_t inline_data_w4[12]; }; -/* ENA Admin Completion Queue (ACQ) common descriptor */ struct ena_admin_acq_common_desc { - /* word 0 : */ /* command identifier to associate it with the aq descriptor * 11:0 : command_id * 15:12 : reserved12 */ uint16_t command; - /* status of request execution */ uint8_t status; /* 0 : phase @@ -279,33 +227,21 @@ struct ena_admin_acq_common_desc { */ uint8_t flags; - /* word 1 : */ - /* provides additional info */ uint16_t extended_status; - /* submission queue head index, serves as a hint what AQ entries can - * be revoked - */ + /* serves as a hint what AQ entries can be revoked */ uint16_t sq_head_indx; }; -/* ACQ entry format */ struct ena_admin_acq_entry { - /* words 0:1 : */ struct ena_admin_acq_common_desc acq_common_descriptor; - /* response type specific data */ uint32_t response_specific_data[14]; }; -/* ENA AQ Create Submission Queue command. Placed in control buffer pointed - * by AQ entry - */ struct ena_admin_aq_create_sq_cmd { - /* words 0 : */ struct ena_admin_aq_common_desc aq_common_descriptor; - /* word 1 : */ /* 4:0 : reserved0_w1 * 7:5 : sq_direction - 0x1 - Tx, 0x2 - Rx */ @@ -337,7 +273,6 @@ struct ena_admin_aq_create_sq_cmd { */ uint8_t sq_caps_3; - /* word 2 : */ /* associated completion queue id. This CQ must be created prior to * SQ creation */ @@ -346,85 +281,62 @@ struct ena_admin_aq_create_sq_cmd { /* submission queue depth in entries */ uint16_t sq_depth; - /* words 3:4 : SQ physical base address in OS memory. This field - * should not be used for Low Latency queues. Has to be page - * aligned. + /* SQ physical base address in OS memory. This field should not be + * used for Low Latency queues. Has to be page aligned. */ struct ena_common_mem_addr sq_ba; - /* words 5:6 : specifies queue head writeback location in OS - * memory. Valid if completion_policy is set to - * completion_policy_head_on_demand or completion_policy_head. Has - * to be cache aligned + /* specifies queue head writeback location in OS memory. Valid if + * completion_policy is set to completion_policy_head_on_demand or + * completion_policy_head. Has to be cache aligned */ struct ena_common_mem_addr sq_head_writeback; - /* word 7 : reserved word */ uint32_t reserved0_w7; - /* word 8 : reserved word */ uint32_t reserved0_w8; }; -/* submission queue direction */ enum ena_admin_sq_direction { - ENA_ADMIN_SQ_DIRECTION_TX = 1, + ENA_ADMIN_SQ_DIRECTION_TX = 1, - ENA_ADMIN_SQ_DIRECTION_RX = 2, + ENA_ADMIN_SQ_DIRECTION_RX = 2, }; -/* ENA Response for Create SQ Command. Appears in ACQ entry as - * response_specific_data - */ struct ena_admin_acq_create_sq_resp_desc { - /* words 0:1 : Common Admin Queue completion descriptor */ struct ena_admin_acq_common_desc acq_common_desc; - /* word 2 : */ - /* sq identifier */ uint16_t sq_idx; uint16_t reserved; - /* word 3 : queue doorbell address as an offset to PCIe MMIO REG BAR */ + /* queue doorbell address as an offset to PCIe MMIO REG BAR */ uint32_t sq_doorbell_offset; - /* word 4 : low latency queue ring base address as an offset to - * PCIe MMIO LLQ_MEM BAR + /* low latency queue ring base address as an offset to PCIe MMIO + * LLQ_MEM BAR */ uint32_t llq_descriptors_offset; - /* word 5 : low latency queue headers' memory as an offset to PCIe - * MMIO LLQ_MEM BAR + /* low latency queue headers' memory as an offset to PCIe MMIO + * LLQ_MEM BAR */ uint32_t llq_headers_offset; }; -/* ENA AQ Destroy Submission Queue command. Placed in control buffer - * pointed by AQ entry - */ struct ena_admin_aq_destroy_sq_cmd { - /* words 0 : */ struct ena_admin_aq_common_desc aq_common_descriptor; - /* words 1 : */ struct ena_admin_sq sq; }; -/* ENA Response for Destroy SQ Command. Appears in ACQ entry as - * response_specific_data - */ struct ena_admin_acq_destroy_sq_resp_desc { - /* words 0:1 : Common Admin Queue completion descriptor */ struct ena_admin_acq_common_desc acq_common_desc; }; -/* ENA AQ Create Completion Queue command */ struct ena_admin_aq_create_cq_cmd { - /* words 0 : */ struct ena_admin_aq_common_desc aq_common_descriptor; - /* word 1 : */ /* 4:0 : reserved5 * 5 : interrupt_mode_enabled - if set, cq operates * in interrupt mode, otherwise - polling @@ -441,62 +353,39 @@ struct ena_admin_aq_create_cq_cmd { /* completion queue depth in # of entries. must be power of 2 */ uint16_t cq_depth; - /* word 2 : msix vector assigned to this cq */ + /* msix vector assigned to this cq */ uint32_t msix_vector; - /* words 3:4 : cq physical base address in OS memory. CQ must be - * physically contiguous + /* cq physical base address in OS memory. CQ must be physically + * contiguous */ struct ena_common_mem_addr cq_ba; }; -/* ENA Response for Create CQ Command. Appears in ACQ entry as response - * specific data - */ struct ena_admin_acq_create_cq_resp_desc { - /* words 0:1 : Common Admin Queue completion descriptor */ struct ena_admin_acq_common_desc acq_common_desc; - /* word 2 : */ - /* cq identifier */ uint16_t cq_idx; - /* actual cq depth in # of entries */ + /* actual cq depth in number of entries */ uint16_t cq_actual_depth; - /* word 3 : cpu numa node address as an offset to PCIe MMIO REG BAR */ uint32_t numa_node_register_offset; - /* word 4 : completion head doorbell address as an offset to PCIe - * MMIO REG BAR - */ uint32_t cq_head_db_register_offset; - /* word 5 : interrupt unmask register address as an offset into - * PCIe MMIO REG BAR - */ uint32_t cq_interrupt_unmask_register_offset; }; -/* ENA AQ Destroy Completion Queue command. Placed in control buffer - * pointed by AQ entry - */ struct ena_admin_aq_destroy_cq_cmd { - /* words 0 : */ struct ena_admin_aq_common_desc aq_common_descriptor; - /* word 1 : */ - /* associated queue id. */ uint16_t cq_idx; uint16_t reserved1; }; -/* ENA Response for Destroy CQ Command. Appears in ACQ entry as - * response_specific_data - */ struct ena_admin_acq_destroy_cq_resp_desc { - /* words 0:1 : Common Admin Queue completion descriptor */ struct ena_admin_acq_common_desc acq_common_desc; }; @@ -504,21 +393,15 @@ struct ena_admin_acq_destroy_cq_resp_desc { * buffer pointed by AQ entry */ struct ena_admin_aq_get_stats_cmd { - /* words 0 : */ struct ena_admin_aq_common_desc aq_common_descriptor; - /* words 1:3 : */ union { /* command specific inline data */ uint32_t inline_data_w1[3]; - /* words 1:3 : points to control buffer (direct or - * indirect, chained if needed) - */ struct ena_admin_ctrl_buff_info control_buffer; } u; - /* word 4 : */ /* stats type as defined in enum ena_admin_get_stats_type */ uint8_t type; @@ -527,7 +410,6 @@ struct ena_admin_aq_get_stats_cmd { uint16_t reserved3; - /* word 5 : */ /* queue id. used when scope is specific_queue */ uint16_t queue_idx; @@ -539,89 +421,60 @@ struct ena_admin_aq_get_stats_cmd { /* Basic Statistics Command. */ struct ena_admin_basic_stats { - /* word 0 : */ uint32_t tx_bytes_low; - /* word 1 : */ uint32_t tx_bytes_high; - /* word 2 : */ uint32_t tx_pkts_low; - /* word 3 : */ uint32_t tx_pkts_high; - /* word 4 : */ uint32_t rx_bytes_low; - /* word 5 : */ uint32_t rx_bytes_high; - /* word 6 : */ uint32_t rx_pkts_low; - /* word 7 : */ uint32_t rx_pkts_high; - /* word 8 : */ uint32_t rx_drops_low; - /* word 9 : */ uint32_t rx_drops_high; }; -/* ENA Response for Get Statistics Command. Appears in ACQ entry as - * response_specific_data - */ struct ena_admin_acq_get_stats_resp { - /* words 0:1 : Common Admin Queue completion descriptor */ struct ena_admin_acq_common_desc acq_common_desc; - /* words 2:11 : */ struct ena_admin_basic_stats basic_stats; }; -/* ENA Get/Set Feature common descriptor. Appears as inline word in - * ena_aq_entry - */ struct ena_admin_get_set_feature_common_desc { - /* word 0 : */ /* 1:0 : select - 0x1 - current value; 0x3 - default * value * 7:3 : reserved3 */ uint8_t flags; - /* as appears in ena_feature_id */ + /* as appears in ena_admin_aq_feature_id */ uint8_t feature_id; - /* reserved16 */ uint16_t reserved16; }; -/* ENA Device Attributes Feature descriptor. */ struct ena_admin_device_attr_feature_desc { - /* word 0 : implementation id */ uint32_t impl_id; - /* word 1 : device version */ uint32_t device_version; - /* word 2 : bit map of which bits are supported value of 1 - * indicated that this feature is supported and can perform SET/GET - * for it - */ + /* bitmap of ena_admin_aq_feature_id */ uint32_t supported_features; - /* word 3 : */ uint32_t reserved3; - /* word 4 : Indicates how many bits are used physical address - * access. - */ + /* Indicates how many bits are used physical address access. */ uint32_t phys_addr_width; - /* word 5 : Indicates how many bits are used virtual address access. */ + /* Indicates how many bits are used virtual address access. */ uint32_t virt_addr_width; /* unicast MAC address (in Network byte order) */ @@ -629,36 +482,27 @@ struct ena_admin_device_attr_feature_desc { uint8_t reserved7[2]; - /* word 8 : Max supported MTU value */ uint32_t max_mtu; }; -/* ENA Max Queues Feature descriptor. */ struct ena_admin_queue_feature_desc { - /* word 0 : Max number of submission queues (including LLQs) */ + /* including LLQs */ uint32_t max_sq_num; - /* word 1 : Max submission queue depth */ uint32_t max_sq_depth; - /* word 2 : Max number of completion queues */ uint32_t max_cq_num; - /* word 3 : Max completion queue depth */ uint32_t max_cq_depth; - /* word 4 : Max number of LLQ submission queues */ uint32_t max_llq_num; - /* word 5 : Max submission queue depth of LLQ */ uint32_t max_llq_depth; - /* word 6 : Max header size */ uint32_t max_header_size; - /* word 7 : */ - /* Maximum Descriptors number, including meta descriptors, allowed - * for a single Tx packet + /* Maximum Descriptors number, including meta descriptor, allowed for + * a single Tx packet */ uint16_t max_packet_tx_descs; @@ -666,86 +510,69 @@ struct ena_admin_queue_feature_desc { uint16_t max_packet_rx_descs; }; -/* ENA MTU Set Feature descriptor. */ struct ena_admin_set_feature_mtu_desc { - /* word 0 : mtu payload size (exclude L2) */ + /* exclude L2 */ uint32_t mtu; }; -/* ENA host attributes Set Feature descriptor. */ struct ena_admin_set_feature_host_attr_desc { - /* words 0:1 : host OS info base address in OS memory. host info is - * 4KB of physically contiguous + /* host OS info base address in OS memory. host info is 4KB of + * physically contiguous */ struct ena_common_mem_addr os_info_ba; - /* words 2:3 : host debug area base address in OS memory. debug - * area must be physically contiguous + /* host debug area base address in OS memory. debug area must be + * physically contiguous */ struct ena_common_mem_addr debug_ba; - /* word 4 : debug area size */ + /* debug area size */ uint32_t debug_area_size; }; -/* ENA Interrupt Moderation Get Feature descriptor. */ struct ena_admin_feature_intr_moder_desc { - /* word 0 : */ /* interrupt delay granularity in usec */ uint16_t intr_delay_resolution; uint16_t reserved; }; -/* ENA Link Get Feature descriptor. */ struct ena_admin_get_feature_link_desc { - /* word 0 : Link speed in Mb */ + /* Link speed in Mb */ uint32_t speed; - /* word 1 : supported speeds (bit field of enum ena_admin_link - * types) - */ + /* bit field of enum ena_admin_link types */ uint32_t supported; - /* word 2 : */ - /* 0 : autoneg - auto negotiation + /* 0 : autoneg * 1 : duplex - Full Duplex * 31:2 : reserved2 */ uint32_t flags; }; -/* ENA AENQ Feature descriptor. */ struct ena_admin_feature_aenq_desc { - /* word 0 : bitmask for AENQ groups the device can report */ + /* bitmask for AENQ groups the device can report */ uint32_t supported_groups; - /* word 1 : bitmask for AENQ groups to report */ + /* bitmask for AENQ groups to report */ uint32_t enabled_groups; }; -/* ENA Stateless Offload Feature descriptor. */ struct ena_admin_feature_offload_desc { - /* word 0 : */ - /* Trasmit side stateless offload - * 0 : TX_L3_csum_ipv4 - IPv4 checksum - * 1 : TX_L4_ipv4_csum_part - TCP/UDP over IPv4 - * checksum, the checksum field should be initialized - * with pseudo header checksum - * 2 : TX_L4_ipv4_csum_full - TCP/UDP over IPv4 - * checksum - * 3 : TX_L4_ipv6_csum_part - TCP/UDP over IPv6 - * checksum, the checksum field should be initialized - * with pseudo header checksum - * 4 : TX_L4_ipv6_csum_full - TCP/UDP over IPv6 - * checksum - * 5 : tso_ipv4 - TCP/IPv4 Segmentation Offloading - * 6 : tso_ipv6 - TCP/IPv6 Segmentation Offloading - * 7 : tso_ecn - TCP Segmentation with ECN + /* 0 : TX_L3_csum_ipv4 + * 1 : TX_L4_ipv4_csum_part - The checksum field + * should be initialized with pseudo header checksum + * 2 : TX_L4_ipv4_csum_full + * 3 : TX_L4_ipv6_csum_part - The checksum field + * should be initialized with pseudo header checksum + * 4 : TX_L4_ipv6_csum_full + * 5 : tso_ipv4 + * 6 : tso_ipv6 + * 7 : tso_ecn */ uint32_t tx; - /* word 1 : */ /* Receive side supported stateless offload * 0 : RX_L3_csum_ipv4 - IPv4 checksum * 1 : RX_L4_ipv4_csum - TCP/UDP/IPv4 checksum @@ -754,118 +581,94 @@ struct ena_admin_feature_offload_desc { */ uint32_t rx_supported; - /* word 2 : */ - /* Receive side enabled stateless offload */ uint32_t rx_enabled; }; -/* hash functions */ enum ena_admin_hash_functions { - /* Toeplitz hash */ - ENA_ADMIN_TOEPLITZ = 1, + ENA_ADMIN_TOEPLITZ = 1, - /* CRC32 hash */ - ENA_ADMIN_CRC32 = 2, + ENA_ADMIN_CRC32 = 2, }; -/* ENA RSS flow hash control buffer structure */ struct ena_admin_feature_rss_flow_hash_control { - /* word 0 : number of valid keys */ uint32_t keys_num; - /* word 1 : */ uint32_t reserved; - /* Toeplitz keys */ uint32_t key[10]; }; -/* ENA RSS Flow Hash Function */ struct ena_admin_feature_rss_flow_hash_function { - /* word 0 : */ - /* supported hash functions - * 7:0 : funcs - supported hash functions (bitmask - * accroding to ena_admin_hash_functions) - */ + /* 7:0 : funcs - bitmask of ena_admin_hash_functions */ uint32_t supported_func; - /* word 1 : */ - /* selected hash func - * 7:0 : selected_func - selected hash function - * (bitmask accroding to ena_admin_hash_functions) + /* 7:0 : selected_func - bitmask of + * ena_admin_hash_functions */ uint32_t selected_func; - /* word 2 : initial value */ + /* initial value */ uint32_t init_val; }; /* RSS flow hash protocols */ enum ena_admin_flow_hash_proto { - /* tcp/ipv4 */ - ENA_ADMIN_RSS_TCP4 = 0, + ENA_ADMIN_RSS_TCP4 = 0, - /* udp/ipv4 */ - ENA_ADMIN_RSS_UDP4 = 1, + ENA_ADMIN_RSS_UDP4 = 1, - /* tcp/ipv6 */ - ENA_ADMIN_RSS_TCP6 = 2, + ENA_ADMIN_RSS_TCP6 = 2, - /* udp/ipv6 */ - ENA_ADMIN_RSS_UDP6 = 3, + ENA_ADMIN_RSS_UDP6 = 3, - /* ipv4 not tcp/udp */ - ENA_ADMIN_RSS_IP4 = 4, + ENA_ADMIN_RSS_IP4 = 4, - /* ipv6 not tcp/udp */ - ENA_ADMIN_RSS_IP6 = 5, + ENA_ADMIN_RSS_IP6 = 5, - /* fragmented ipv4 */ - ENA_ADMIN_RSS_IP4_FRAG = 6, + ENA_ADMIN_RSS_IP4_FRAG = 6, - /* not ipv4/6 */ - ENA_ADMIN_RSS_NOT_IP = 7, + ENA_ADMIN_RSS_NOT_IP = 7, - /* max number of protocols */ - ENA_ADMIN_RSS_PROTO_NUM = 16, + /* TCPv6 with extension header */ + ENA_ADMIN_RSS_TCP6_EX = 8, + + /* IPv6 with extension header */ + ENA_ADMIN_RSS_IP6_EX = 9, + + ENA_ADMIN_RSS_PROTO_NUM = 16, }; /* RSS flow hash fields */ enum ena_admin_flow_hash_fields { /* Ethernet Dest Addr */ - ENA_ADMIN_RSS_L2_DA = 0, + ENA_ADMIN_RSS_L2_DA = BIT(0), /* Ethernet Src Addr */ - ENA_ADMIN_RSS_L2_SA = 1, + ENA_ADMIN_RSS_L2_SA = BIT(1), /* ipv4/6 Dest Addr */ - ENA_ADMIN_RSS_L3_DA = 2, + ENA_ADMIN_RSS_L3_DA = BIT(2), /* ipv4/6 Src Addr */ - ENA_ADMIN_RSS_L3_SA = 5, + ENA_ADMIN_RSS_L3_SA = BIT(3), /* tcp/udp Dest Port */ - ENA_ADMIN_RSS_L4_DP = 6, + ENA_ADMIN_RSS_L4_DP = BIT(4), /* tcp/udp Src Port */ - ENA_ADMIN_RSS_L4_SP = 7, + ENA_ADMIN_RSS_L4_SP = BIT(5), }; -/* hash input fields for flow protocol */ struct ena_admin_proto_input { - /* word 0 : */ /* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */ uint16_t fields; uint16_t reserved2; }; -/* ENA RSS hash control buffer structure */ struct ena_admin_feature_rss_hash_control { - /* supported input fields */ struct ena_admin_proto_input supported_fields[ENA_ADMIN_RSS_PROTO_NUM]; - /* selected input fields */ struct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM]; struct ena_admin_proto_input reserved2[ENA_ADMIN_RSS_PROTO_NUM]; @@ -873,11 +676,9 @@ struct ena_admin_feature_rss_hash_control { struct ena_admin_proto_input reserved3[ENA_ADMIN_RSS_PROTO_NUM]; }; -/* ENA RSS flow hash input */ struct ena_admin_feature_rss_flow_hash_input { - /* word 0 : */ /* supported hash input sorting - * 1 : L3_sort - support swap L3 addresses if DA + * 1 : L3_sort - support swap L3 addresses if DA is * smaller than SA * 2 : L4_sort - support swap L4 ports if DP smaller * SP @@ -893,46 +694,37 @@ struct ena_admin_feature_rss_flow_hash_input { uint16_t enabled_input_sort; }; -/* Operating system type */ enum ena_admin_os_type { - /* Linux OS */ - ENA_ADMIN_OS_LINUX = 1, + ENA_ADMIN_OS_LINUX = 1, - /* Windows OS */ - ENA_ADMIN_OS_WIN = 2, + ENA_ADMIN_OS_WIN = 2, - /* DPDK OS */ - ENA_ADMIN_OS_DPDK = 3, + ENA_ADMIN_OS_DPDK = 3, - /* FreeBSD OS */ - ENA_ADMIN_OS_FREEBSD = 4, + ENA_ADMIN_OS_FREEBSD = 4, - /* PXE OS */ - ENA_ADMIN_OS_IPXE = 5, + ENA_ADMIN_OS_IPXE = 5, }; -/* host info */ struct ena_admin_host_info { - /* word 0 : OS type defined in enum ena_os_type */ + /* defined in enum ena_admin_os_type */ uint32_t os_type; /* os distribution string format */ uint8_t os_dist_str[128]; - /* word 33 : OS distribution numeric format */ + /* OS distribution numeric format */ uint32_t os_dist; /* kernel version string format */ uint8_t kernel_ver_str[32]; - /* word 42 : Kernel version numeric format */ + /* Kernel version numeric format */ uint32_t kernel_ver; - /* word 43 : */ - /* driver version - * 7:0 : major - major - * 15:8 : minor - minor - * 23:16 : sub_minor - sub minor + /* 7:0 : major + * 15:8 : minor + * 23:16 : sub_minor */ uint32_t driver_version; @@ -940,220 +732,200 @@ struct ena_admin_host_info { uint32_t supported_network_features[4]; }; -/* ENA RSS indirection table entry */ struct ena_admin_rss_ind_table_entry { - /* word 0 : */ - /* cq identifier */ uint16_t cq_idx; uint16_t reserved; }; -/* ENA RSS indirection table */ struct ena_admin_feature_rss_ind_table { - /* word 0 : */ /* min supported table size (2^min_size) */ uint16_t min_size; /* max supported table size (2^max_size) */ uint16_t max_size; - /* word 1 : */ /* table size (2^size) */ uint16_t size; uint16_t reserved; - /* word 2 : index of the inline entry. 0xFFFFFFFF means invalid */ + /* index of the inline entry. 0xFFFFFFFF means invalid */ uint32_t inline_index; - /* words 3 : used for updating single entry, ignored when setting - * the entire table through the control buffer. + /* used for updating single entry, ignored when setting the entire + * table through the control buffer. */ struct ena_admin_rss_ind_table_entry inline_entry; }; -/* ENA Get Feature command */ +/* When hint value is 0, driver should use it's own predefined value */ +struct ena_admin_ena_hw_hints { + /* value in ms */ + uint16_t mmio_read_timeout; + + /* value in ms */ + uint16_t driver_watchdog_timeout; + + /* Per packet tx completion timeout. value in ms */ + uint16_t missing_tx_completion_timeout; + + uint16_t missed_tx_completion_count_threshold_to_reset; + + /* value in ms */ + uint16_t admin_completion_tx_timeout; + + uint16_t netdev_wd_timeout; + + uint16_t max_tx_sgl_size; + + uint16_t max_rx_sgl_size; + + uint16_t reserved[8]; +}; + struct ena_admin_get_feat_cmd { - /* words 0 : */ struct ena_admin_aq_common_desc aq_common_descriptor; - /* words 1:3 : points to control buffer (direct or indirect, - * chained if needed) - */ struct ena_admin_ctrl_buff_info control_buffer; - /* words 4 : */ struct ena_admin_get_set_feature_common_desc feat_common; - /* words 5:15 : */ - union { - /* raw words */ - uint32_t raw[11]; - } u; + uint32_t raw[11]; }; -/* ENA Get Feature command response */ struct ena_admin_get_feat_resp { - /* words 0:1 : */ struct ena_admin_acq_common_desc acq_common_desc; - /* words 2:15 : */ union { - /* raw words */ uint32_t raw[14]; - /* words 2:10 : Get Device Attributes */ struct ena_admin_device_attr_feature_desc dev_attr; - /* words 2:5 : Max queues num */ struct ena_admin_queue_feature_desc max_queue; - /* words 2:3 : AENQ configuration */ struct ena_admin_feature_aenq_desc aenq; - /* words 2:4 : Get Link configuration */ struct ena_admin_get_feature_link_desc link; - /* words 2:4 : offload configuration */ struct ena_admin_feature_offload_desc offload; - /* words 2:4 : rss flow hash function */ struct ena_admin_feature_rss_flow_hash_function flow_hash_func; - /* words 2 : rss flow hash input */ struct ena_admin_feature_rss_flow_hash_input flow_hash_input; - /* words 2:3 : rss indirection table */ struct ena_admin_feature_rss_ind_table ind_table; - /* words 2 : interrupt moderation configuration */ struct ena_admin_feature_intr_moder_desc intr_moderation; + + struct ena_admin_ena_hw_hints hw_hints; } u; }; -/* ENA Set Feature command */ struct ena_admin_set_feat_cmd { - /* words 0 : */ struct ena_admin_aq_common_desc aq_common_descriptor; - /* words 1:3 : points to control buffer (direct or indirect, - * chained if needed) - */ struct ena_admin_ctrl_buff_info control_buffer; - /* words 4 : */ struct ena_admin_get_set_feature_common_desc feat_common; - /* words 5:15 : */ union { - /* raw words */ uint32_t raw[11]; - /* words 5 : mtu size */ + /* mtu size */ struct ena_admin_set_feature_mtu_desc mtu; - /* words 5:7 : host attributes */ + /* host attributes */ struct ena_admin_set_feature_host_attr_desc host_attr; - /* words 5:6 : AENQ configuration */ + /* AENQ configuration */ struct ena_admin_feature_aenq_desc aenq; - /* words 5:7 : rss flow hash function */ + /* rss flow hash function */ struct ena_admin_feature_rss_flow_hash_function flow_hash_func; - /* words 5 : rss flow hash input */ + /* rss flow hash input */ struct ena_admin_feature_rss_flow_hash_input flow_hash_input; - /* words 5:6 : rss indirection table */ + /* rss indirection table */ struct ena_admin_feature_rss_ind_table ind_table; } u; }; -/* ENA Set Feature command response */ struct ena_admin_set_feat_resp { - /* words 0:1 : */ struct ena_admin_acq_common_desc acq_common_desc; - /* words 2:15 : */ union { - /* raw words */ uint32_t raw[14]; } u; }; -/* ENA Asynchronous Event Notification Queue descriptor. */ struct ena_admin_aenq_common_desc { - /* word 0 : */ uint16_t group; uint16_t syndrom; - /* word 1 : */ /* 0 : phase */ uint8_t flags; uint8_t reserved1[3]; - /* word 2 : Timestamp LSB */ uint32_t timestamp_low; - /* word 3 : Timestamp MSB */ uint32_t timestamp_high; }; /* asynchronous event notification groups */ enum ena_admin_aenq_group { - /* Link State Change */ - ENA_ADMIN_LINK_CHANGE = 0, + ENA_ADMIN_LINK_CHANGE = 0, - ENA_ADMIN_FATAL_ERROR = 1, + ENA_ADMIN_FATAL_ERROR = 1, - ENA_ADMIN_WARNING = 2, + ENA_ADMIN_WARNING = 2, - ENA_ADMIN_NOTIFICATION = 3, + ENA_ADMIN_NOTIFICATION = 3, - ENA_ADMIN_KEEP_ALIVE = 4, + ENA_ADMIN_KEEP_ALIVE = 4, - ENA_ADMIN_AENQ_GROUPS_NUM = 5, + ENA_ADMIN_AENQ_GROUPS_NUM = 5, }; -/* syndorm of AENQ notification group */ enum ena_admin_aenq_notification_syndrom { - ENA_ADMIN_SUSPEND = 0, + ENA_ADMIN_SUSPEND = 0, + + ENA_ADMIN_RESUME = 1, - ENA_ADMIN_RESUME = 1, + ENA_ADMIN_UPDATE_HINTS = 2, }; -/* ENA Asynchronous Event Notification generic descriptor. */ struct ena_admin_aenq_entry { - /* words 0:3 : */ struct ena_admin_aenq_common_desc aenq_common_desc; /* command specific inline data */ uint32_t inline_data_w4[12]; }; -/* ENA Asynchronous Event Notification Queue Link Change descriptor. */ struct ena_admin_aenq_link_change_desc { - /* words 0:3 : */ struct ena_admin_aenq_common_desc aenq_common_desc; - /* word 4 : */ /* 0 : link_status */ uint32_t flags; }; -/* ENA MMIO Readless response interface */ +struct ena_admin_aenq_keep_alive_desc { + struct ena_admin_aenq_common_desc aenq_common_desc; + + uint32_t rx_drops_low; + + uint32_t rx_drops_high; +}; + struct ena_admin_ena_mmio_req_read_less_resp { - /* word 0 : */ - /* request id */ uint16_t req_id; - /* register offset */ uint16_t reg_off; - /* word 1 : value is valid when poll is cleared */ + /* value is valid when poll is cleared */ uint32_t reg_val; }; @@ -1220,8 +992,7 @@ struct ena_admin_ena_mmio_req_read_less_resp { /* feature_rss_flow_hash_function */ #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0) -#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK \ - GENMASK(7, 0) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK GENMASK(7, 0) /* feature_rss_flow_hash_input */ #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1 @@ -1247,653 +1018,392 @@ struct ena_admin_ena_mmio_req_read_less_resp { #define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0) #if !defined(ENA_DEFS_LINUX_MAINLINE) -static inline uint16_t -get_ena_admin_aq_common_desc_command_id( - const struct ena_admin_aq_common_desc *p) +static inline uint16_t get_ena_admin_aq_common_desc_command_id(const struct ena_admin_aq_common_desc *p) { return p->command_id & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; } -static inline void -set_ena_admin_aq_common_desc_command_id(struct ena_admin_aq_common_desc *p, - uint16_t val) +static inline void set_ena_admin_aq_common_desc_command_id(struct ena_admin_aq_common_desc *p, uint16_t val) { p->command_id |= val & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; } -static inline uint8_t -get_ena_admin_aq_common_desc_phase(const struct ena_admin_aq_common_desc *p) +static inline uint8_t get_ena_admin_aq_common_desc_phase(const struct ena_admin_aq_common_desc *p) { return p->flags & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; } -static inline void -set_ena_admin_aq_common_desc_phase(struct ena_admin_aq_common_desc *p, - uint8_t val) +static inline void set_ena_admin_aq_common_desc_phase(struct ena_admin_aq_common_desc *p, uint8_t val) { p->flags |= val & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; } -static inline uint8_t -get_ena_admin_aq_common_desc_ctrl_data( - const struct ena_admin_aq_common_desc *p) +static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data(const struct ena_admin_aq_common_desc *p) { - return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK) >> - ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT; + return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT; } -static inline void -set_ena_admin_aq_common_desc_ctrl_data(struct ena_admin_aq_common_desc *p, - uint8_t val) +static inline void set_ena_admin_aq_common_desc_ctrl_data(struct ena_admin_aq_common_desc *p, uint8_t val) { - p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT) - & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK; + p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK; } -static inline uint8_t -get_ena_admin_aq_common_desc_ctrl_data_indirect( - const struct ena_admin_aq_common_desc *p) +static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data_indirect(const struct ena_admin_aq_common_desc *p) { - return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK) - >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT; + return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT; } -static inline void -set_ena_admin_aq_common_desc_ctrl_data_indirect( - struct ena_admin_aq_common_desc *p, - uint8_t val) +static inline void set_ena_admin_aq_common_desc_ctrl_data_indirect(struct ena_admin_aq_common_desc *p, uint8_t val) { - p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT) - & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; } -static inline uint8_t -get_ena_admin_sq_sq_direction(const struct ena_admin_sq *p) +static inline uint8_t get_ena_admin_sq_sq_direction(const struct ena_admin_sq *p) { - return (p->sq_identity & ENA_ADMIN_SQ_SQ_DIRECTION_MASK) - >> ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT; + return (p->sq_identity & ENA_ADMIN_SQ_SQ_DIRECTION_MASK) >> ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT; } -static inline void -set_ena_admin_sq_sq_direction(struct ena_admin_sq *p, uint8_t val) +static inline void set_ena_admin_sq_sq_direction(struct ena_admin_sq *p, uint8_t val) { - p->sq_identity |= (val << ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & - ENA_ADMIN_SQ_SQ_DIRECTION_MASK; + p->sq_identity |= (val << ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & ENA_ADMIN_SQ_SQ_DIRECTION_MASK; } -static inline uint16_t -get_ena_admin_acq_common_desc_command_id( - const struct ena_admin_acq_common_desc *p) +static inline uint16_t get_ena_admin_acq_common_desc_command_id(const struct ena_admin_acq_common_desc *p) { return p->command & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; } -static inline void -set_ena_admin_acq_common_desc_command_id(struct ena_admin_acq_common_desc *p, - uint16_t val) +static inline void set_ena_admin_acq_common_desc_command_id(struct ena_admin_acq_common_desc *p, uint16_t val) { p->command |= val & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; } -static inline uint8_t -get_ena_admin_acq_common_desc_phase(const struct ena_admin_acq_common_desc *p) +static inline uint8_t get_ena_admin_acq_common_desc_phase(const struct ena_admin_acq_common_desc *p) { return p->flags & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK; } -static inline void -set_ena_admin_acq_common_desc_phase(struct ena_admin_acq_common_desc *p, - uint8_t val) +static inline void set_ena_admin_acq_common_desc_phase(struct ena_admin_acq_common_desc *p, uint8_t val) { p->flags |= val & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK; } -static inline uint8_t -get_ena_admin_aq_create_sq_cmd_sq_direction( - const struct ena_admin_aq_create_sq_cmd *p) +static inline uint8_t get_ena_admin_aq_create_sq_cmd_sq_direction(const struct ena_admin_aq_create_sq_cmd *p) { - return (p->sq_identity & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK) - >> ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT; + return (p->sq_identity & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT; } -static inline void -set_ena_admin_aq_create_sq_cmd_sq_direction( - struct ena_admin_aq_create_sq_cmd *p, - uint8_t val) +static inline void set_ena_admin_aq_create_sq_cmd_sq_direction(struct ena_admin_aq_create_sq_cmd *p, uint8_t val) { - p->sq_identity |= (val << - ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) - & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK; + p->sq_identity |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK; } -static inline uint8_t -get_ena_admin_aq_create_sq_cmd_placement_policy( - const struct ena_admin_aq_create_sq_cmd *p) +static inline uint8_t get_ena_admin_aq_create_sq_cmd_placement_policy(const struct ena_admin_aq_create_sq_cmd *p) { return p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; } -static inline void -set_ena_admin_aq_create_sq_cmd_placement_policy( - struct ena_admin_aq_create_sq_cmd *p, - uint8_t val) +static inline void set_ena_admin_aq_create_sq_cmd_placement_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val) { p->sq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; } -static inline uint8_t -get_ena_admin_aq_create_sq_cmd_completion_policy( - const struct ena_admin_aq_create_sq_cmd *p) +static inline uint8_t get_ena_admin_aq_create_sq_cmd_completion_policy(const struct ena_admin_aq_create_sq_cmd *p) { - return (p->sq_caps_2 - & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK) - >> ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT; + return (p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT; } -static inline void -set_ena_admin_aq_create_sq_cmd_completion_policy( - struct ena_admin_aq_create_sq_cmd *p, - uint8_t val) +static inline void set_ena_admin_aq_create_sq_cmd_completion_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val) { - p->sq_caps_2 |= - (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) - & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK; + p->sq_caps_2 |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK; } -static inline uint8_t -get_ena_admin_aq_create_sq_cmd_is_physically_contiguous( - const struct ena_admin_aq_create_sq_cmd *p) +static inline uint8_t get_ena_admin_aq_create_sq_cmd_is_physically_contiguous(const struct ena_admin_aq_create_sq_cmd *p) { - return p->sq_caps_3 & - ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; + return p->sq_caps_3 & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; } -static inline void -set_ena_admin_aq_create_sq_cmd_is_physically_contiguous( - struct ena_admin_aq_create_sq_cmd *p, - uint8_t val) +static inline void set_ena_admin_aq_create_sq_cmd_is_physically_contiguous(struct ena_admin_aq_create_sq_cmd *p, uint8_t val) { - p->sq_caps_3 |= val & - ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; + p->sq_caps_3 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; } -static inline uint8_t -get_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled( - const struct ena_admin_aq_create_cq_cmd *p) +static inline uint8_t get_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(const struct ena_admin_aq_create_cq_cmd *p) { - return (p->cq_caps_1 & - ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK) - >> ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT; + return (p->cq_caps_1 & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK) >> ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT; } -static inline void -set_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled( - struct ena_admin_aq_create_cq_cmd *p, - uint8_t val) +static inline void set_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(struct ena_admin_aq_create_cq_cmd *p, uint8_t val) { - p->cq_caps_1 |= - (val << ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT) - & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK; + p->cq_caps_1 |= (val << ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT) & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK; } -static inline uint8_t -get_ena_admin_aq_create_cq_cmd_cq_entry_size_words( - const struct ena_admin_aq_create_cq_cmd *p) +static inline uint8_t get_ena_admin_aq_create_cq_cmd_cq_entry_size_words(const struct ena_admin_aq_create_cq_cmd *p) { - return p->cq_caps_2 - & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; + return p->cq_caps_2 & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; } -static inline void -set_ena_admin_aq_create_cq_cmd_cq_entry_size_words( - struct ena_admin_aq_create_cq_cmd *p, - uint8_t val) +static inline void set_ena_admin_aq_create_cq_cmd_cq_entry_size_words(struct ena_admin_aq_create_cq_cmd *p, uint8_t val) { - p->cq_caps_2 |= - val & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; + p->cq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; } -static inline uint8_t -get_ena_admin_get_set_feature_common_desc_select( - const struct ena_admin_get_set_feature_common_desc *p) +static inline uint8_t get_ena_admin_get_set_feature_common_desc_select(const struct ena_admin_get_set_feature_common_desc *p) { return p->flags & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK; } -static inline void -set_ena_admin_get_set_feature_common_desc_select( - struct ena_admin_get_set_feature_common_desc *p, - uint8_t val) +static inline void set_ena_admin_get_set_feature_common_desc_select(struct ena_admin_get_set_feature_common_desc *p, uint8_t val) { p->flags |= val & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK; } -static inline uint32_t -get_ena_admin_get_feature_link_desc_autoneg( - const struct ena_admin_get_feature_link_desc *p) +static inline uint32_t get_ena_admin_get_feature_link_desc_autoneg(const struct ena_admin_get_feature_link_desc *p) { return p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK; } -static inline void -set_ena_admin_get_feature_link_desc_autoneg( - struct ena_admin_get_feature_link_desc *p, - uint32_t val) +static inline void set_ena_admin_get_feature_link_desc_autoneg(struct ena_admin_get_feature_link_desc *p, uint32_t val) { p->flags |= val & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK; } -static inline uint32_t -get_ena_admin_get_feature_link_desc_duplex( - const struct ena_admin_get_feature_link_desc *p) +static inline uint32_t get_ena_admin_get_feature_link_desc_duplex(const struct ena_admin_get_feature_link_desc *p) { - return (p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK) - >> ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT; + return (p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK) >> ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT; } -static inline void -set_ena_admin_get_feature_link_desc_duplex( - struct ena_admin_get_feature_link_desc *p, - uint32_t val) +static inline void set_ena_admin_get_feature_link_desc_duplex(struct ena_admin_get_feature_link_desc *p, uint32_t val) { - p->flags |= (val << ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT) - & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK; + p->flags |= (val << ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT) & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK; } -static inline uint32_t -get_ena_admin_feature_offload_desc_TX_L3_csum_ipv4( - const struct ena_admin_feature_offload_desc *p) +static inline uint32_t get_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p) { return p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK; } -static inline void -set_ena_admin_feature_offload_desc_TX_L3_csum_ipv4( - struct ena_admin_feature_offload_desc *p, - uint32_t val) +static inline void set_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val) { p->tx |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK; } -static inline uint32_t -get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part( - const struct ena_admin_feature_offload_desc *p) +static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(const struct ena_admin_feature_offload_desc *p) { - return (p->tx & - ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) - >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT; + return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT; } -static inline void -set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part( - struct ena_admin_feature_offload_desc *p, - uint32_t val) +static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val) { - p->tx |= (val << - ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT) - & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK; + p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK; } -static inline uint32_t -get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full( - const struct ena_admin_feature_offload_desc *p) +static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(const struct ena_admin_feature_offload_desc *p) { - return (p->tx & - ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) - >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT; + return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT; } -static inline void -set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full( - struct ena_admin_feature_offload_desc *p, - uint32_t val) +static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val) { - p->tx |= (val << - ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT) - & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK; + p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK; } -static inline uint32_t -get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part( - const struct ena_admin_feature_offload_desc *p) +static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(const struct ena_admin_feature_offload_desc *p) { - return (p->tx & - ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) - >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT; + return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT; } -static inline void -set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part( - struct ena_admin_feature_offload_desc *p, - uint32_t val) +static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val) { - p->tx |= (val << - ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT) - & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK; + p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK; } -static inline uint32_t -get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full( - const struct ena_admin_feature_offload_desc *p) +static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(const struct ena_admin_feature_offload_desc *p) { - return (p->tx & - ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) - >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT; + return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT; } -static inline void -set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full( - struct ena_admin_feature_offload_desc *p, - uint32_t val) +static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val) { - p->tx |= (val << - ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT) - & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK; + p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK; } -static inline uint32_t -get_ena_admin_feature_offload_desc_tso_ipv4( - const struct ena_admin_feature_offload_desc *p) +static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv4(const struct ena_admin_feature_offload_desc *p) { - return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) - >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT; + return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT; } -static inline void -set_ena_admin_feature_offload_desc_tso_ipv4( - struct ena_admin_feature_offload_desc *p, - uint32_t val) +static inline void set_ena_admin_feature_offload_desc_tso_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val) { - p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT) - & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK; + p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK; } -static inline uint32_t -get_ena_admin_feature_offload_desc_tso_ipv6( - const struct ena_admin_feature_offload_desc *p) +static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv6(const struct ena_admin_feature_offload_desc *p) { - return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) - >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT; + return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT; } -static inline void -set_ena_admin_feature_offload_desc_tso_ipv6( - struct ena_admin_feature_offload_desc *p, - uint32_t val) +static inline void set_ena_admin_feature_offload_desc_tso_ipv6(struct ena_admin_feature_offload_desc *p, uint32_t val) { - p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT) - & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK; + p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK; } -static inline uint32_t -get_ena_admin_feature_offload_desc_tso_ecn( - const struct ena_admin_feature_offload_desc *p) +static inline uint32_t get_ena_admin_feature_offload_desc_tso_ecn(const struct ena_admin_feature_offload_desc *p) { - return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) - >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT; + return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT; } -static inline void -set_ena_admin_feature_offload_desc_tso_ecn( - struct ena_admin_feature_offload_desc *p, - uint32_t val) +static inline void set_ena_admin_feature_offload_desc_tso_ecn(struct ena_admin_feature_offload_desc *p, uint32_t val) { - p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT) - & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK; + p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK; } -static inline uint32_t -get_ena_admin_feature_offload_desc_RX_L3_csum_ipv4( - const struct ena_admin_feature_offload_desc *p) +static inline uint32_t get_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p) { - return p->rx_supported & - ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK; + return p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK; } -static inline void -set_ena_admin_feature_offload_desc_RX_L3_csum_ipv4( - struct ena_admin_feature_offload_desc *p, - uint32_t val) +static inline void set_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val) { - p->rx_supported |= - val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK; + p->rx_supported |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK; } -static inline uint32_t -get_ena_admin_feature_offload_desc_RX_L4_ipv4_csum( - const struct ena_admin_feature_offload_desc *p) +static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(const struct ena_admin_feature_offload_desc *p) { - return (p->rx_supported & - ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) - >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT; + return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT; } -static inline void -set_ena_admin_feature_offload_desc_RX_L4_ipv4_csum( - struct ena_admin_feature_offload_desc *p, - uint32_t val) +static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(struct ena_admin_feature_offload_desc *p, uint32_t val) { - p->rx_supported |= - (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT) - & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK; + p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK; } -static inline uint32_t -get_ena_admin_feature_offload_desc_RX_L4_ipv6_csum( - const struct ena_admin_feature_offload_desc *p) +static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(const struct ena_admin_feature_offload_desc *p) { - return (p->rx_supported & - ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) - >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT; + return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT; } -static inline void -set_ena_admin_feature_offload_desc_RX_L4_ipv6_csum( - struct ena_admin_feature_offload_desc *p, - uint32_t val) +static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(struct ena_admin_feature_offload_desc *p, uint32_t val) { - p->rx_supported |= - (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT) - & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK; + p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK; } -static inline uint32_t -get_ena_admin_feature_offload_desc_RX_hash( - const struct ena_admin_feature_offload_desc *p) +static inline uint32_t get_ena_admin_feature_offload_desc_RX_hash(const struct ena_admin_feature_offload_desc *p) { - return (p->rx_supported & - ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) - >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT; + return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT; } -static inline void -set_ena_admin_feature_offload_desc_RX_hash( - struct ena_admin_feature_offload_desc *p, - uint32_t val) +static inline void set_ena_admin_feature_offload_desc_RX_hash(struct ena_admin_feature_offload_desc *p, uint32_t val) { - p->rx_supported |= - (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT) - & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK; + p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK; } -static inline uint32_t -get_ena_admin_feature_rss_flow_hash_function_funcs( - const struct ena_admin_feature_rss_flow_hash_function *p) +static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_funcs(const struct ena_admin_feature_rss_flow_hash_function *p) { - return p->supported_func & - ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK; + return p->supported_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK; } -static inline void -set_ena_admin_feature_rss_flow_hash_function_funcs( - struct ena_admin_feature_rss_flow_hash_function *p, - uint32_t val) +static inline void set_ena_admin_feature_rss_flow_hash_function_funcs(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val) { - p->supported_func |= - val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK; + p->supported_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK; } -static inline uint32_t -get_ena_admin_feature_rss_flow_hash_function_selected_func( - const struct ena_admin_feature_rss_flow_hash_function *p) +static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_selected_func(const struct ena_admin_feature_rss_flow_hash_function *p) { - return p->selected_func & - ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK; + return p->selected_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK; } -static inline void -set_ena_admin_feature_rss_flow_hash_function_selected_func( - struct ena_admin_feature_rss_flow_hash_function *p, - uint32_t val) +static inline void set_ena_admin_feature_rss_flow_hash_function_selected_func(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val) { - p->selected_func |= - val & - ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK; + p->selected_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK; } -static inline uint16_t -get_ena_admin_feature_rss_flow_hash_input_L3_sort( - const struct ena_admin_feature_rss_flow_hash_input *p) +static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p) { - return (p->supported_input_sort & - ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK) - >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT; + return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT; } -static inline void -set_ena_admin_feature_rss_flow_hash_input_L3_sort( - struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val) +static inline void set_ena_admin_feature_rss_flow_hash_input_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val) { - p->supported_input_sort |= - (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT) - & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK; + p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK; } -static inline uint16_t -get_ena_admin_feature_rss_flow_hash_input_L4_sort( - const struct ena_admin_feature_rss_flow_hash_input *p) +static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p) { - return (p->supported_input_sort & - ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK) - >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT; + return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT; } -static inline void -set_ena_admin_feature_rss_flow_hash_input_L4_sort( - struct ena_admin_feature_rss_flow_hash_input *p, - uint16_t val) +static inline void set_ena_admin_feature_rss_flow_hash_input_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val) { - p->supported_input_sort |= - (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT) - & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; + p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; } -static inline uint16_t -get_ena_admin_feature_rss_flow_hash_input_enable_L3_sort( - const struct ena_admin_feature_rss_flow_hash_input *p) +static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p) { - return (p->enabled_input_sort & - ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK) - >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT; + return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT; } -static inline void -set_ena_admin_feature_rss_flow_hash_input_enable_L3_sort( - struct ena_admin_feature_rss_flow_hash_input *p, - uint16_t val) +static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val) { - p->enabled_input_sort |= - (val << - ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT) - & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK; + p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK; } -static inline uint16_t -get_ena_admin_feature_rss_flow_hash_input_enable_L4_sort( - const struct ena_admin_feature_rss_flow_hash_input *p) +static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p) { - return (p->enabled_input_sort & - ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK) - >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT; + return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT; } -static inline void -set_ena_admin_feature_rss_flow_hash_input_enable_L4_sort( - struct ena_admin_feature_rss_flow_hash_input *p, - uint16_t val) +static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val) { - p->enabled_input_sort |= - (val << - ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT) - & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK; + p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK; } -static inline uint32_t -get_ena_admin_host_info_major(const struct ena_admin_host_info *p) +static inline uint32_t get_ena_admin_host_info_major(const struct ena_admin_host_info *p) { return p->driver_version & ENA_ADMIN_HOST_INFO_MAJOR_MASK; } -static inline void -set_ena_admin_host_info_major(struct ena_admin_host_info *p, uint32_t val) +static inline void set_ena_admin_host_info_major(struct ena_admin_host_info *p, uint32_t val) { p->driver_version |= val & ENA_ADMIN_HOST_INFO_MAJOR_MASK; } -static inline uint32_t -get_ena_admin_host_info_minor(const struct ena_admin_host_info *p) +static inline uint32_t get_ena_admin_host_info_minor(const struct ena_admin_host_info *p) { - return (p->driver_version & ENA_ADMIN_HOST_INFO_MINOR_MASK) - >> ENA_ADMIN_HOST_INFO_MINOR_SHIFT; + return (p->driver_version & ENA_ADMIN_HOST_INFO_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_MINOR_SHIFT; } -static inline void -set_ena_admin_host_info_minor(struct ena_admin_host_info *p, uint32_t val) +static inline void set_ena_admin_host_info_minor(struct ena_admin_host_info *p, uint32_t val) { - p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) - & ENA_ADMIN_HOST_INFO_MINOR_MASK; + p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_MINOR_MASK; } -static inline uint32_t -get_ena_admin_host_info_sub_minor(const struct ena_admin_host_info *p) +static inline uint32_t get_ena_admin_host_info_sub_minor(const struct ena_admin_host_info *p) { - return (p->driver_version & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK) - >> ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT; + return (p->driver_version & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT; } -static inline void -set_ena_admin_host_info_sub_minor(struct ena_admin_host_info *p, uint32_t val) +static inline void set_ena_admin_host_info_sub_minor(struct ena_admin_host_info *p, uint32_t val) { - p->driver_version |= (val << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) - & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK; + p->driver_version |= (val << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK; } -static inline uint8_t -get_ena_admin_aenq_common_desc_phase( - const struct ena_admin_aenq_common_desc *p) +static inline uint8_t get_ena_admin_aenq_common_desc_phase(const struct ena_admin_aenq_common_desc *p) { return p->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK; } -static inline void -set_ena_admin_aenq_common_desc_phase( - struct ena_admin_aenq_common_desc *p, - uint8_t val) +static inline void set_ena_admin_aenq_common_desc_phase(struct ena_admin_aenq_common_desc *p, uint8_t val) { p->flags |= val & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK; } -static inline uint32_t -get_ena_admin_aenq_link_change_desc_link_status( - const struct ena_admin_aenq_link_change_desc *p) +static inline uint32_t get_ena_admin_aenq_link_change_desc_link_status(const struct ena_admin_aenq_link_change_desc *p) { return p->flags & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; } -static inline void -set_ena_admin_aenq_link_change_desc_link_status( - struct ena_admin_aenq_link_change_desc *p, - uint32_t val) +static inline void set_ena_admin_aenq_link_change_desc_link_status(struct ena_admin_aenq_link_change_desc *p, uint32_t val) { p->flags |= val & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; } diff --git a/drivers/net/ena/base/ena_defs/ena_common_defs.h b/drivers/net/ena/base/ena_defs/ena_common_defs.h index 95e0f389..072e6c1f 100644 --- a/drivers/net/ena/base/ena_defs/ena_common_defs.h +++ b/drivers/net/ena/base/ena_defs/ena_common_defs.h @@ -34,17 +34,13 @@ #ifndef _ENA_COMMON_H_ #define _ENA_COMMON_H_ -/* spec version */ -#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* spec version major */ -#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* spec version minor */ +#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */ +#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */ /* ENA operates with 48-bit memory addresses. ena_mem_addr_t */ struct ena_common_mem_addr { - /* word 0 : low 32 bit of the memory address */ uint32_t mem_addr_low; - /* word 1 : */ - /* high 16 bits of the memory address */ uint16_t mem_addr_high; /* MBZ */ diff --git a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h index 6bc3d6a7..4cf0b205 100644 --- a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h +++ b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h @@ -34,35 +34,30 @@ #ifndef _ENA_ETH_IO_H_ #define _ENA_ETH_IO_H_ -/* Layer 3 protocol index */ enum ena_eth_io_l3_proto_index { - ENA_ETH_IO_L3_PROTO_UNKNOWN = 0, + ENA_ETH_IO_L3_PROTO_UNKNOWN = 0, - ENA_ETH_IO_L3_PROTO_IPV4 = 8, + ENA_ETH_IO_L3_PROTO_IPV4 = 8, - ENA_ETH_IO_L3_PROTO_IPV6 = 11, + ENA_ETH_IO_L3_PROTO_IPV6 = 11, - ENA_ETH_IO_L3_PROTO_FCOE = 21, + ENA_ETH_IO_L3_PROTO_FCOE = 21, - ENA_ETH_IO_L3_PROTO_ROCE = 22, + ENA_ETH_IO_L3_PROTO_ROCE = 22, }; -/* Layer 4 protocol index */ enum ena_eth_io_l4_proto_index { - ENA_ETH_IO_L4_PROTO_UNKNOWN = 0, + ENA_ETH_IO_L4_PROTO_UNKNOWN = 0, - ENA_ETH_IO_L4_PROTO_TCP = 12, + ENA_ETH_IO_L4_PROTO_TCP = 12, - ENA_ETH_IO_L4_PROTO_UDP = 13, + ENA_ETH_IO_L4_PROTO_UDP = 13, - ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23, + ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23, }; -/* ENA IO Queue Tx descriptor */ struct ena_eth_io_tx_desc { - /* word 0 : */ - /* length, request id and control flags - * 15:0 : length - Buffer length in bytes, must + /* 15:0 : length - Buffer length in bytes, must * include any packet trailers that the ENA supposed * to update like End-to-End CRC, Authentication GMAC * etc. This length must not include the @@ -85,9 +80,7 @@ struct ena_eth_io_tx_desc { */ uint32_t len_ctrl; - /* word 1 : */ - /* ethernet control - * 3:0 : l3_proto_idx - L3 protocol. This field + /* 3:0 : l3_proto_idx - L3 protocol. This field * required when l3_csum_en,l3_csum or tso_en are set. * 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and * DF flags of the IPv4 header is 0. Otherwise must @@ -119,10 +112,8 @@ struct ena_eth_io_tx_desc { */ uint32_t meta_ctrl; - /* word 2 : Buffer address bits[31:0] */ uint32_t buff_addr_lo; - /* word 3 : */ /* address high and header size * 15:0 : addr_hi - Buffer Pointer[47:32] * 23:16 : reserved16_w2 @@ -141,20 +132,16 @@ struct ena_eth_io_tx_desc { uint32_t buff_addr_hi_hdr_sz; }; -/* ENA IO Queue Tx Meta descriptor */ struct ena_eth_io_tx_meta_desc { - /* word 0 : */ - /* length, request id and control flags - * 9:0 : req_id_lo - Request ID[9:0] + /* 9:0 : req_id_lo - Request ID[9:0] * 11:10 : reserved10 - MBZ * 12 : reserved12 - MBZ * 13 : reserved13 - MBZ * 14 : ext_valid - if set, offset fields in Word2 - * are valid Also MSS High in Word 0 and Outer L3 - * Offset High in WORD 0 and bits [31:24] in Word 3 - * 15 : word3_valid - If set Crypto Info[23:0] of - * Word 3 is valid - * 19:16 : mss_hi_ptp + * are valid Also MSS High in Word 0 and bits [31:24] + * in Word 3 + * 15 : reserved15 + * 19:16 : mss_hi * 20 : eth_meta_type - 0: Tx Metadata Descriptor, 1: * Extended Metadata Descriptor * 21 : meta_store - Store extended metadata in queue @@ -175,19 +162,13 @@ struct ena_eth_io_tx_meta_desc { */ uint32_t len_ctrl; - /* word 1 : */ - /* word 1 - * 5:0 : req_id_hi + /* 5:0 : req_id_hi * 31:6 : reserved6 - MBZ */ uint32_t word1; - /* word 2 : */ - /* word 2 - * 7:0 : l3_hdr_len - the header length L3 IP header. - * 15:8 : l3_hdr_off - the offset of the first byte - * in the L3 header from the beginning of the to-be - * transmitted packet. + /* 7:0 : l3_hdr_len + * 15:8 : l3_hdr_off * 21:16 : l4_hdr_len_in_words - counts the L4 header * length in words. there is an explicit assumption * that L4 header appears right after L3 header and @@ -196,13 +177,10 @@ struct ena_eth_io_tx_meta_desc { */ uint32_t word2; - /* word 3 : */ uint32_t reserved; }; -/* ENA IO Queue Tx completions descriptor */ struct ena_eth_io_tx_cdesc { - /* word 0 : */ /* Request ID[15:0] */ uint16_t req_id; @@ -214,24 +192,19 @@ struct ena_eth_io_tx_cdesc { */ uint8_t flags; - /* word 1 : */ uint16_t sub_qid; - /* indicates location of submission queue head */ uint16_t sq_head_idx; }; -/* ENA IO Queue Rx descriptor */ struct ena_eth_io_rx_desc { - /* word 0 : */ /* In bytes. 0 means 64KB */ uint16_t length; /* MBZ */ uint8_t reserved2; - /* control flags - * 0 : phase + /* 0 : phase * 1 : reserved1 - MBZ * 2 : first - Indicates first descriptor in * transaction @@ -242,32 +215,27 @@ struct ena_eth_io_rx_desc { */ uint8_t ctrl; - /* word 1 : */ uint16_t req_id; /* MBZ */ uint16_t reserved6; - /* word 2 : Buffer address bits[31:0] */ uint32_t buff_addr_lo; - /* word 3 : */ - /* Buffer Address bits[47:16] */ uint16_t buff_addr_hi; /* MBZ */ uint16_t reserved16_w3; }; -/* ENA IO Queue Rx Completion Base Descriptor (4-word format). Note: all - * ethernet parsing information are valid only when last=1 +/* 4-word format Note: all ethernet parsing information are valid only when + * last=1 */ struct ena_eth_io_rx_cdesc_base { - /* word 0 : */ - /* 4:0 : l3_proto_idx - L3 protocol index - * 6:5 : src_vlan_cnt - Source VLAN count + /* 4:0 : l3_proto_idx + * 6:5 : src_vlan_cnt * 7 : reserved7 - MBZ - * 12:8 : l4_proto_idx - L4 protocol index + * 12:8 : l4_proto_idx * 13 : l3_csum_err - when set, either the L3 * checksum error detected, or, the controller didn't * validate the checksum. This bit is valid only when @@ -292,56 +260,43 @@ struct ena_eth_io_rx_cdesc_base { */ uint32_t status; - /* word 1 : */ uint16_t length; uint16_t req_id; - /* word 2 : 32-bit hash result */ + /* 32-bit hash result */ uint32_t hash; - /* word 3 : */ - /* submission queue number */ uint16_t sub_qid; uint16_t reserved; }; -/* ENA IO Queue Rx Completion Descriptor (8-word format) */ +/* 8-word format */ struct ena_eth_io_rx_cdesc_ext { - /* words 0:3 : Rx Completion Extended */ struct ena_eth_io_rx_cdesc_base base; - /* word 4 : Completed Buffer address bits[31:0] */ uint32_t buff_addr_lo; - /* word 5 : */ - /* the buffer address used bits[47:32] */ uint16_t buff_addr_hi; uint16_t reserved16; - /* word 6 : Reserved */ uint32_t reserved_w6; - /* word 7 : Reserved */ uint32_t reserved_w7; }; -/* ENA Interrupt Unmask Register */ struct ena_eth_io_intr_reg { - /* word 0 : */ - /* 14:0 : rx_intr_delay - rx interrupt delay value - * 29:15 : tx_intr_delay - tx interrupt delay value - * 30 : intr_unmask - if set, unmasks interrupt + /* 14:0 : rx_intr_delay + * 29:15 : tx_intr_delay + * 30 : intr_unmask * 31 : reserved */ uint32_t intr_control; }; -/* ENA NUMA Node configuration register */ struct ena_eth_io_numa_node_cfg_reg { - /* word 0 : */ /* 7:0 : numa * 30:8 : reserved * 31 : enabled @@ -388,10 +343,8 @@ struct ena_eth_io_numa_node_cfg_reg { #define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0) #define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14 #define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14) -#define ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT 15 -#define ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK BIT(15) -#define ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT 16 -#define ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK GENMASK(19, 16) +#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16 +#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16) #define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20 #define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20) #define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21 @@ -463,803 +416,544 @@ struct ena_eth_io_numa_node_cfg_reg { #define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31) #if !defined(ENA_DEFS_LINUX_MAINLINE) -static inline uint32_t get_ena_eth_io_tx_desc_length( - const struct ena_eth_io_tx_desc *p) +static inline uint32_t get_ena_eth_io_tx_desc_length(const struct ena_eth_io_tx_desc *p) { return p->len_ctrl & ENA_ETH_IO_TX_DESC_LENGTH_MASK; } -static inline void set_ena_eth_io_tx_desc_length( - struct ena_eth_io_tx_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_desc_length(struct ena_eth_io_tx_desc *p, uint32_t val) { p->len_ctrl |= val & ENA_ETH_IO_TX_DESC_LENGTH_MASK; } -static inline uint32_t get_ena_eth_io_tx_desc_req_id_hi( - const struct ena_eth_io_tx_desc *p) +static inline uint32_t get_ena_eth_io_tx_desc_req_id_hi(const struct ena_eth_io_tx_desc *p) { - return (p->len_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK) - >> ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT; + return (p->len_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK) >> ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT; } -static inline void set_ena_eth_io_tx_desc_req_id_hi( - struct ena_eth_io_tx_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_desc_req_id_hi(struct ena_eth_io_tx_desc *p, uint32_t val) { - p->len_ctrl |= - (val << ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) - & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK; + p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK; } -static inline uint32_t get_ena_eth_io_tx_desc_meta_desc( - const struct ena_eth_io_tx_desc *p) +static inline uint32_t get_ena_eth_io_tx_desc_meta_desc(const struct ena_eth_io_tx_desc *p) { - return (p->len_ctrl & ENA_ETH_IO_TX_DESC_META_DESC_MASK) - >> ENA_ETH_IO_TX_DESC_META_DESC_SHIFT; + return (p->len_ctrl & ENA_ETH_IO_TX_DESC_META_DESC_MASK) >> ENA_ETH_IO_TX_DESC_META_DESC_SHIFT; } -static inline void set_ena_eth_io_tx_desc_meta_desc( - struct ena_eth_io_tx_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_desc_meta_desc(struct ena_eth_io_tx_desc *p, uint32_t val) { - p->len_ctrl |= - (val << ENA_ETH_IO_TX_DESC_META_DESC_SHIFT) - & ENA_ETH_IO_TX_DESC_META_DESC_MASK; + p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_DESC_META_DESC_MASK; } -static inline uint32_t get_ena_eth_io_tx_desc_phase( - const struct ena_eth_io_tx_desc *p) +static inline uint32_t get_ena_eth_io_tx_desc_phase(const struct ena_eth_io_tx_desc *p) { - return (p->len_ctrl & ENA_ETH_IO_TX_DESC_PHASE_MASK) - >> ENA_ETH_IO_TX_DESC_PHASE_SHIFT; + return (p->len_ctrl & ENA_ETH_IO_TX_DESC_PHASE_MASK) >> ENA_ETH_IO_TX_DESC_PHASE_SHIFT; } -static inline void set_ena_eth_io_tx_desc_phase( - struct ena_eth_io_tx_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_desc_phase(struct ena_eth_io_tx_desc *p, uint32_t val) { - p->len_ctrl |= - (val << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) - & ENA_ETH_IO_TX_DESC_PHASE_MASK; + p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_DESC_PHASE_MASK; } -static inline uint32_t get_ena_eth_io_tx_desc_first( - const struct ena_eth_io_tx_desc *p) +static inline uint32_t get_ena_eth_io_tx_desc_first(const struct ena_eth_io_tx_desc *p) { - return (p->len_ctrl & ENA_ETH_IO_TX_DESC_FIRST_MASK) - >> ENA_ETH_IO_TX_DESC_FIRST_SHIFT; + return (p->len_ctrl & ENA_ETH_IO_TX_DESC_FIRST_MASK) >> ENA_ETH_IO_TX_DESC_FIRST_SHIFT; } -static inline void set_ena_eth_io_tx_desc_first( - struct ena_eth_io_tx_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_desc_first(struct ena_eth_io_tx_desc *p, uint32_t val) { - p->len_ctrl |= - (val << ENA_ETH_IO_TX_DESC_FIRST_SHIFT) - & ENA_ETH_IO_TX_DESC_FIRST_MASK; + p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_DESC_FIRST_MASK; } -static inline uint32_t get_ena_eth_io_tx_desc_last( - const struct ena_eth_io_tx_desc *p) +static inline uint32_t get_ena_eth_io_tx_desc_last(const struct ena_eth_io_tx_desc *p) { - return (p->len_ctrl & ENA_ETH_IO_TX_DESC_LAST_MASK) - >> ENA_ETH_IO_TX_DESC_LAST_SHIFT; + return (p->len_ctrl & ENA_ETH_IO_TX_DESC_LAST_MASK) >> ENA_ETH_IO_TX_DESC_LAST_SHIFT; } -static inline void set_ena_eth_io_tx_desc_last( - struct ena_eth_io_tx_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_desc_last(struct ena_eth_io_tx_desc *p, uint32_t val) { - p->len_ctrl |= - (val << ENA_ETH_IO_TX_DESC_LAST_SHIFT) - & ENA_ETH_IO_TX_DESC_LAST_MASK; + p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_DESC_LAST_MASK; } -static inline uint32_t get_ena_eth_io_tx_desc_comp_req( - const struct ena_eth_io_tx_desc *p) +static inline uint32_t get_ena_eth_io_tx_desc_comp_req(const struct ena_eth_io_tx_desc *p) { - return (p->len_ctrl & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK) - >> ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT; + return (p->len_ctrl & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT; } -static inline void set_ena_eth_io_tx_desc_comp_req( - struct ena_eth_io_tx_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_desc_comp_req(struct ena_eth_io_tx_desc *p, uint32_t val) { - p->len_ctrl |= - (val << ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT) - & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK; + p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK; } -static inline uint32_t get_ena_eth_io_tx_desc_l3_proto_idx( - const struct ena_eth_io_tx_desc *p) +static inline uint32_t get_ena_eth_io_tx_desc_l3_proto_idx(const struct ena_eth_io_tx_desc *p) { return p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK; } -static inline void set_ena_eth_io_tx_desc_l3_proto_idx( - struct ena_eth_io_tx_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_desc_l3_proto_idx(struct ena_eth_io_tx_desc *p, uint32_t val) { p->meta_ctrl |= val & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK; } -static inline uint32_t get_ena_eth_io_tx_desc_DF( - const struct ena_eth_io_tx_desc *p) +static inline uint32_t get_ena_eth_io_tx_desc_DF(const struct ena_eth_io_tx_desc *p) { - return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_DF_MASK) - >> ENA_ETH_IO_TX_DESC_DF_SHIFT; + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_DF_MASK) >> ENA_ETH_IO_TX_DESC_DF_SHIFT; } -static inline void set_ena_eth_io_tx_desc_DF( - struct ena_eth_io_tx_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_desc_DF(struct ena_eth_io_tx_desc *p, uint32_t val) { - p->meta_ctrl |= - (val << ENA_ETH_IO_TX_DESC_DF_SHIFT) - & ENA_ETH_IO_TX_DESC_DF_MASK; + p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_DF_SHIFT) & ENA_ETH_IO_TX_DESC_DF_MASK; } -static inline uint32_t get_ena_eth_io_tx_desc_tso_en( - const struct ena_eth_io_tx_desc *p) +static inline uint32_t get_ena_eth_io_tx_desc_tso_en(const struct ena_eth_io_tx_desc *p) { - return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TSO_EN_MASK) - >> ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT; + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TSO_EN_MASK) >> ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT; } -static inline void set_ena_eth_io_tx_desc_tso_en( - struct ena_eth_io_tx_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_desc_tso_en(struct ena_eth_io_tx_desc *p, uint32_t val) { - p->meta_ctrl |= - (val << ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) - & ENA_ETH_IO_TX_DESC_TSO_EN_MASK; + p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) & ENA_ETH_IO_TX_DESC_TSO_EN_MASK; } -static inline uint32_t get_ena_eth_io_tx_desc_l4_proto_idx( - const struct ena_eth_io_tx_desc *p) +static inline uint32_t get_ena_eth_io_tx_desc_l4_proto_idx(const struct ena_eth_io_tx_desc *p) { - return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK) - >> ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT; + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT; } -static inline void set_ena_eth_io_tx_desc_l4_proto_idx( - struct ena_eth_io_tx_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_desc_l4_proto_idx(struct ena_eth_io_tx_desc *p, uint32_t val) { - p->meta_ctrl |= - (val << ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) - & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK; + p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK; } -static inline uint32_t get_ena_eth_io_tx_desc_l3_csum_en( - const struct ena_eth_io_tx_desc *p) +static inline uint32_t get_ena_eth_io_tx_desc_l3_csum_en(const struct ena_eth_io_tx_desc *p) { - return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK) - >> ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT; + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK) >> ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT; } -static inline void set_ena_eth_io_tx_desc_l3_csum_en( - struct ena_eth_io_tx_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_desc_l3_csum_en(struct ena_eth_io_tx_desc *p, uint32_t val) { - p->meta_ctrl |= - (val << ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) - & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK; + p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK; } -static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_en( - const struct ena_eth_io_tx_desc *p) +static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_en(const struct ena_eth_io_tx_desc *p) { - return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK) - >> ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT; + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK) >> ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT; } -static inline void set_ena_eth_io_tx_desc_l4_csum_en( - struct ena_eth_io_tx_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_desc_l4_csum_en(struct ena_eth_io_tx_desc *p, uint32_t val) { - p->meta_ctrl |= - (val << ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) - & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK; + p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK; } -static inline uint32_t get_ena_eth_io_tx_desc_ethernet_fcs_dis( - const struct ena_eth_io_tx_desc *p) +static inline uint32_t get_ena_eth_io_tx_desc_ethernet_fcs_dis(const struct ena_eth_io_tx_desc *p) { - return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK) - >> ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT; + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK) >> ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT; } -static inline void set_ena_eth_io_tx_desc_ethernet_fcs_dis( - struct ena_eth_io_tx_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_desc_ethernet_fcs_dis(struct ena_eth_io_tx_desc *p, uint32_t val) { - p->meta_ctrl |= - (val << ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT) - & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK; + p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT) & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK; } -static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_partial( - const struct ena_eth_io_tx_desc *p) +static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_partial(const struct ena_eth_io_tx_desc *p) { - return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK) - >> ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT; + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK) >> ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT; } -static inline void set_ena_eth_io_tx_desc_l4_csum_partial( - struct ena_eth_io_tx_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_desc_l4_csum_partial(struct ena_eth_io_tx_desc *p, uint32_t val) { - p->meta_ctrl |= - (val << ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) - & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK; + p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK; } -static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo( - const struct ena_eth_io_tx_desc *p) +static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo(const struct ena_eth_io_tx_desc *p) { - return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK) - >> ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT; + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK) >> ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT; } -static inline void set_ena_eth_io_tx_desc_req_id_lo( - struct ena_eth_io_tx_desc *p, uint32_t val) +static inline void set_ena_eth_io_tx_desc_req_id_lo(struct ena_eth_io_tx_desc *p, uint32_t val) { - p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) - & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK; + p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK; } -static inline uint32_t get_ena_eth_io_tx_desc_addr_hi( - const struct ena_eth_io_tx_desc *p) +static inline uint32_t get_ena_eth_io_tx_desc_addr_hi(const struct ena_eth_io_tx_desc *p) { return p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK; } -static inline void set_ena_eth_io_tx_desc_addr_hi( - struct ena_eth_io_tx_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_desc_addr_hi(struct ena_eth_io_tx_desc *p, uint32_t val) { p->buff_addr_hi_hdr_sz |= val & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK; } -static inline uint32_t get_ena_eth_io_tx_desc_header_length( - const struct ena_eth_io_tx_desc *p) +static inline uint32_t get_ena_eth_io_tx_desc_header_length(const struct ena_eth_io_tx_desc *p) { - return (p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK) - >> ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT; + return (p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK) >> ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT; } -static inline void set_ena_eth_io_tx_desc_header_length( - struct ena_eth_io_tx_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_desc_header_length(struct ena_eth_io_tx_desc *p, uint32_t val) { - p->buff_addr_hi_hdr_sz |= - (val << ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) - & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK; + p->buff_addr_hi_hdr_sz |= (val << ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK; } -static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_lo( - const struct ena_eth_io_tx_meta_desc *p) +static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_lo(const struct ena_eth_io_tx_meta_desc *p) { return p->len_ctrl & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK; } -static inline void set_ena_eth_io_tx_meta_desc_req_id_lo( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_meta_desc_req_id_lo(struct ena_eth_io_tx_meta_desc *p, uint32_t val) { p->len_ctrl |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK; } -static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid( - const struct ena_eth_io_tx_meta_desc *p) -{ - return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK) - >> ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT; -} - -static inline void set_ena_eth_io_tx_meta_desc_ext_valid( - struct ena_eth_io_tx_meta_desc *p, uint32_t val) -{ - p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT) - & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK; -} - -static inline uint32_t get_ena_eth_io_tx_meta_desc_word3_valid( - const struct ena_eth_io_tx_meta_desc *p) +static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid(const struct ena_eth_io_tx_meta_desc *p) { - return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK) - >> ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT; + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK) >> ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT; } -static inline void set_ena_eth_io_tx_meta_desc_word3_valid( - struct ena_eth_io_tx_meta_desc *p, uint32_t val) +static inline void set_ena_eth_io_tx_meta_desc_ext_valid(struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT) - & ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK; + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT) & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK; } -static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_hi_ptp( - const struct ena_eth_io_tx_meta_desc *p) +static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_hi(const struct ena_eth_io_tx_meta_desc *p) { - return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK) - >> ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT; + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK) >> ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT; } -static inline void set_ena_eth_io_tx_meta_desc_mss_hi_ptp( - struct ena_eth_io_tx_meta_desc *p, uint32_t val) +static inline void set_ena_eth_io_tx_meta_desc_mss_hi(struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT) - & ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK; + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK; } -static inline uint32_t get_ena_eth_io_tx_meta_desc_eth_meta_type( - const struct ena_eth_io_tx_meta_desc *p) +static inline uint32_t get_ena_eth_io_tx_meta_desc_eth_meta_type(const struct ena_eth_io_tx_meta_desc *p) { - return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK) - >> ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT; + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK) >> ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT; } -static inline void set_ena_eth_io_tx_meta_desc_eth_meta_type( - struct ena_eth_io_tx_meta_desc *p, uint32_t val) +static inline void set_ena_eth_io_tx_meta_desc_eth_meta_type(struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT) - & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK; + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT) & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK; } -static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_store( - const struct ena_eth_io_tx_meta_desc *p) +static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_store(const struct ena_eth_io_tx_meta_desc *p) { - return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK) - >> ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT; + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK) >> ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT; } -static inline void set_ena_eth_io_tx_meta_desc_meta_store( - struct ena_eth_io_tx_meta_desc *p, uint32_t val) +static inline void set_ena_eth_io_tx_meta_desc_meta_store(struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT) - & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; } -static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_desc( - const struct ena_eth_io_tx_meta_desc *p) +static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_desc(const struct ena_eth_io_tx_meta_desc *p) { - return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK) - >> ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT; + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK) >> ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT; } -static inline void set_ena_eth_io_tx_meta_desc_meta_desc( - struct ena_eth_io_tx_meta_desc *p, uint32_t val) +static inline void set_ena_eth_io_tx_meta_desc_meta_desc(struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT) - & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK; + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK; } -static inline uint32_t get_ena_eth_io_tx_meta_desc_phase( - const struct ena_eth_io_tx_meta_desc *p) +static inline uint32_t get_ena_eth_io_tx_meta_desc_phase(const struct ena_eth_io_tx_meta_desc *p) { - return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_PHASE_MASK) - >> ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT; + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_PHASE_MASK) >> ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT; } -static inline void set_ena_eth_io_tx_meta_desc_phase( - struct ena_eth_io_tx_meta_desc *p, uint32_t val) +static inline void set_ena_eth_io_tx_meta_desc_phase(struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) - & ENA_ETH_IO_TX_META_DESC_PHASE_MASK; + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_META_DESC_PHASE_MASK; } -static inline uint32_t get_ena_eth_io_tx_meta_desc_first( - const struct ena_eth_io_tx_meta_desc *p) +static inline uint32_t get_ena_eth_io_tx_meta_desc_first(const struct ena_eth_io_tx_meta_desc *p) { - return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_FIRST_MASK) - >> ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT; + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_FIRST_MASK) >> ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT; } -static inline void set_ena_eth_io_tx_meta_desc_first( - struct ena_eth_io_tx_meta_desc *p, uint32_t val) +static inline void set_ena_eth_io_tx_meta_desc_first(struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT) - & ENA_ETH_IO_TX_META_DESC_FIRST_MASK; + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_META_DESC_FIRST_MASK; } -static inline uint32_t get_ena_eth_io_tx_meta_desc_last( - const struct ena_eth_io_tx_meta_desc *p) +static inline uint32_t get_ena_eth_io_tx_meta_desc_last(const struct ena_eth_io_tx_meta_desc *p) { - return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_LAST_MASK) - >> ENA_ETH_IO_TX_META_DESC_LAST_SHIFT; + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_LAST_MASK) >> ENA_ETH_IO_TX_META_DESC_LAST_SHIFT; } -static inline void set_ena_eth_io_tx_meta_desc_last( - struct ena_eth_io_tx_meta_desc *p, uint32_t val) +static inline void set_ena_eth_io_tx_meta_desc_last(struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT) - & ENA_ETH_IO_TX_META_DESC_LAST_MASK; + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_META_DESC_LAST_MASK; } -static inline uint32_t get_ena_eth_io_tx_meta_desc_comp_req( - const struct ena_eth_io_tx_meta_desc *p) +static inline uint32_t get_ena_eth_io_tx_meta_desc_comp_req(const struct ena_eth_io_tx_meta_desc *p) { - return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK) - >> ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT; + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT; } -static inline void set_ena_eth_io_tx_meta_desc_comp_req( - struct ena_eth_io_tx_meta_desc *p, uint32_t val) +static inline void set_ena_eth_io_tx_meta_desc_comp_req(struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT) - & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK; + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK; } -static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_hi( - const struct ena_eth_io_tx_meta_desc *p) +static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_hi(const struct ena_eth_io_tx_meta_desc *p) { return p->word1 & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK; } -static inline void set_ena_eth_io_tx_meta_desc_req_id_hi( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_meta_desc_req_id_hi(struct ena_eth_io_tx_meta_desc *p, uint32_t val) { p->word1 |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK; } -static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_len( - const struct ena_eth_io_tx_meta_desc *p) +static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_len(const struct ena_eth_io_tx_meta_desc *p) { return p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK; } -static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_len( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_len(struct ena_eth_io_tx_meta_desc *p, uint32_t val) { p->word2 |= val & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK; } -static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_off( - const struct ena_eth_io_tx_meta_desc *p) +static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_off(const struct ena_eth_io_tx_meta_desc *p) { - return (p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK) - >> ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT; + return (p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK) >> ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT; } -static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_off( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_off(struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->word2 |= - (val << ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) - & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK; + p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK; } -static inline uint32_t get_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words( - const struct ena_eth_io_tx_meta_desc *p) +static inline uint32_t get_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(const struct ena_eth_io_tx_meta_desc *p) { - return (p->word2 & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK) - >> ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT; + return (p->word2 & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK) >> ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT; } -static inline void set_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->word2 |= - (val << ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) - & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK; + p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK; } -static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_lo( - const struct ena_eth_io_tx_meta_desc *p) +static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_lo(const struct ena_eth_io_tx_meta_desc *p) { - return (p->word2 & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK) - >> ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT; + return (p->word2 & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK) >> ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT; } -static inline void set_ena_eth_io_tx_meta_desc_mss_lo( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) +static inline void set_ena_eth_io_tx_meta_desc_mss_lo(struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->word2 |= - (val << ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) - & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK; + p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK; } -static inline uint8_t get_ena_eth_io_tx_cdesc_phase( - const struct ena_eth_io_tx_cdesc *p) +static inline uint8_t get_ena_eth_io_tx_cdesc_phase(const struct ena_eth_io_tx_cdesc *p) { return p->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK; } -static inline void set_ena_eth_io_tx_cdesc_phase( - struct ena_eth_io_tx_cdesc *p, - uint8_t val) +static inline void set_ena_eth_io_tx_cdesc_phase(struct ena_eth_io_tx_cdesc *p, uint8_t val) { p->flags |= val & ENA_ETH_IO_TX_CDESC_PHASE_MASK; } -static inline uint8_t get_ena_eth_io_rx_desc_phase( - const struct ena_eth_io_rx_desc *p) +static inline uint8_t get_ena_eth_io_rx_desc_phase(const struct ena_eth_io_rx_desc *p) { return p->ctrl & ENA_ETH_IO_RX_DESC_PHASE_MASK; } -static inline void set_ena_eth_io_rx_desc_phase( - struct ena_eth_io_rx_desc *p, - uint8_t val) +static inline void set_ena_eth_io_rx_desc_phase(struct ena_eth_io_rx_desc *p, uint8_t val) { p->ctrl |= val & ENA_ETH_IO_RX_DESC_PHASE_MASK; } -static inline uint8_t get_ena_eth_io_rx_desc_first( - const struct ena_eth_io_rx_desc *p) +static inline uint8_t get_ena_eth_io_rx_desc_first(const struct ena_eth_io_rx_desc *p) { - return (p->ctrl & ENA_ETH_IO_RX_DESC_FIRST_MASK) - >> ENA_ETH_IO_RX_DESC_FIRST_SHIFT; + return (p->ctrl & ENA_ETH_IO_RX_DESC_FIRST_MASK) >> ENA_ETH_IO_RX_DESC_FIRST_SHIFT; } -static inline void set_ena_eth_io_rx_desc_first( - struct ena_eth_io_rx_desc *p, - uint8_t val) +static inline void set_ena_eth_io_rx_desc_first(struct ena_eth_io_rx_desc *p, uint8_t val) { - p->ctrl |= - (val << ENA_ETH_IO_RX_DESC_FIRST_SHIFT) - & ENA_ETH_IO_RX_DESC_FIRST_MASK; + p->ctrl |= (val << ENA_ETH_IO_RX_DESC_FIRST_SHIFT) & ENA_ETH_IO_RX_DESC_FIRST_MASK; } -static inline uint8_t get_ena_eth_io_rx_desc_last( - const struct ena_eth_io_rx_desc *p) +static inline uint8_t get_ena_eth_io_rx_desc_last(const struct ena_eth_io_rx_desc *p) { - return (p->ctrl & ENA_ETH_IO_RX_DESC_LAST_MASK) - >> ENA_ETH_IO_RX_DESC_LAST_SHIFT; + return (p->ctrl & ENA_ETH_IO_RX_DESC_LAST_MASK) >> ENA_ETH_IO_RX_DESC_LAST_SHIFT; } -static inline void set_ena_eth_io_rx_desc_last( - struct ena_eth_io_rx_desc *p, - uint8_t val) +static inline void set_ena_eth_io_rx_desc_last(struct ena_eth_io_rx_desc *p, uint8_t val) { - p->ctrl |= - (val << ENA_ETH_IO_RX_DESC_LAST_SHIFT) - & ENA_ETH_IO_RX_DESC_LAST_MASK; + p->ctrl |= (val << ENA_ETH_IO_RX_DESC_LAST_SHIFT) & ENA_ETH_IO_RX_DESC_LAST_MASK; } -static inline uint8_t get_ena_eth_io_rx_desc_comp_req( - const struct ena_eth_io_rx_desc *p) +static inline uint8_t get_ena_eth_io_rx_desc_comp_req(const struct ena_eth_io_rx_desc *p) { - return (p->ctrl & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK) - >> ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT; + return (p->ctrl & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT; } -static inline void set_ena_eth_io_rx_desc_comp_req( - struct ena_eth_io_rx_desc *p, - uint8_t val) +static inline void set_ena_eth_io_rx_desc_comp_req(struct ena_eth_io_rx_desc *p, uint8_t val) { - p->ctrl |= - (val << ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT) - & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; + p->ctrl |= (val << ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; } -static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_proto_idx( - const struct ena_eth_io_rx_cdesc_base *p) +static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_proto_idx(const struct ena_eth_io_rx_cdesc_base *p) { return p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK; } -static inline void set_ena_eth_io_rx_cdesc_base_l3_proto_idx( - struct ena_eth_io_rx_cdesc_base *p, - uint32_t val) +static inline void set_ena_eth_io_rx_cdesc_base_l3_proto_idx(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { p->status |= val & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK; } -static inline uint32_t get_ena_eth_io_rx_cdesc_base_src_vlan_cnt( - const struct ena_eth_io_rx_cdesc_base *p) +static inline uint32_t get_ena_eth_io_rx_cdesc_base_src_vlan_cnt(const struct ena_eth_io_rx_cdesc_base *p) { - return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK) - >> ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT; + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT; } -static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt( - struct ena_eth_io_rx_cdesc_base *p, - uint32_t val) +static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= - (val << ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT) - & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK; + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK; } -static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx( - const struct ena_eth_io_rx_cdesc_base *p) +static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx(const struct ena_eth_io_rx_cdesc_base *p) { - return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) - >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; } -static inline void set_ena_eth_io_rx_cdesc_base_l4_proto_idx( - struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +static inline void set_ena_eth_io_rx_cdesc_base_l4_proto_idx(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT) - & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK; + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK; } -static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum_err( - const struct ena_eth_io_rx_cdesc_base *p) +static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum_err(const struct ena_eth_io_rx_cdesc_base *p) { - return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) - >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT; + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT; } -static inline void set_ena_eth_io_rx_cdesc_base_l3_csum_err( - struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +static inline void set_ena_eth_io_rx_cdesc_base_l3_csum_err(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT) - & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK; + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK; } -static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_err( - const struct ena_eth_io_rx_cdesc_base *p) +static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_err(const struct ena_eth_io_rx_cdesc_base *p) { - return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) - >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT; + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT; } -static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_err( - struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_err(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT) - & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK; + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK; } -static inline uint32_t get_ena_eth_io_rx_cdesc_base_ipv4_frag( - const struct ena_eth_io_rx_cdesc_base *p) +static inline uint32_t get_ena_eth_io_rx_cdesc_base_ipv4_frag(const struct ena_eth_io_rx_cdesc_base *p) { - return (p->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) - >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT; + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT; } -static inline void set_ena_eth_io_rx_cdesc_base_ipv4_frag( - struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +static inline void set_ena_eth_io_rx_cdesc_base_ipv4_frag(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT) - & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK; + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK; } -static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase( - const struct ena_eth_io_rx_cdesc_base *p) +static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase(const struct ena_eth_io_rx_cdesc_base *p) { - return (p->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) - >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT; + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT; } -static inline void set_ena_eth_io_rx_cdesc_base_phase( - struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +static inline void set_ena_eth_io_rx_cdesc_base_phase(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT) - & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK; + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK; } -static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum2( - const struct ena_eth_io_rx_cdesc_base *p) +static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum2(const struct ena_eth_io_rx_cdesc_base *p) { - return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK) - >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT; + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT; } -static inline void set_ena_eth_io_rx_cdesc_base_l3_csum2( - struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +static inline void set_ena_eth_io_rx_cdesc_base_l3_csum2(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT) - & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK; + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK; } -static inline uint32_t get_ena_eth_io_rx_cdesc_base_first( - const struct ena_eth_io_rx_cdesc_base *p) +static inline uint32_t get_ena_eth_io_rx_cdesc_base_first(const struct ena_eth_io_rx_cdesc_base *p) { - return (p->status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) - >> ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT; + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT; } -static inline void set_ena_eth_io_rx_cdesc_base_first( - struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +static inline void set_ena_eth_io_rx_cdesc_base_first(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT) - & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK; + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK; } -static inline uint32_t get_ena_eth_io_rx_cdesc_base_last( - const struct ena_eth_io_rx_cdesc_base *p) +static inline uint32_t get_ena_eth_io_rx_cdesc_base_last(const struct ena_eth_io_rx_cdesc_base *p) { - return (p->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) - >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; } -static inline void set_ena_eth_io_rx_cdesc_base_last( - struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +static inline void set_ena_eth_io_rx_cdesc_base_last(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT) - & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK; + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK; } -static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer( - const struct ena_eth_io_rx_cdesc_base *p) +static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer(const struct ena_eth_io_rx_cdesc_base *p) { - return (p->status & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK) - >> ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT; + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT; } -static inline void set_ena_eth_io_rx_cdesc_base_buffer( - struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +static inline void set_ena_eth_io_rx_cdesc_base_buffer(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT) - & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK; + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK; } -static inline uint32_t get_ena_eth_io_intr_reg_rx_intr_delay( - const struct ena_eth_io_intr_reg *p) +static inline uint32_t get_ena_eth_io_intr_reg_rx_intr_delay(const struct ena_eth_io_intr_reg *p) { return p->intr_control & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK; } -static inline void set_ena_eth_io_intr_reg_rx_intr_delay( - struct ena_eth_io_intr_reg *p, uint32_t val) +static inline void set_ena_eth_io_intr_reg_rx_intr_delay(struct ena_eth_io_intr_reg *p, uint32_t val) { p->intr_control |= val & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK; } -static inline uint32_t get_ena_eth_io_intr_reg_tx_intr_delay( - const struct ena_eth_io_intr_reg *p) +static inline uint32_t get_ena_eth_io_intr_reg_tx_intr_delay(const struct ena_eth_io_intr_reg *p) { - return (p->intr_control & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK) - >> ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT; + return (p->intr_control & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK) >> ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT; } -static inline void set_ena_eth_io_intr_reg_tx_intr_delay( - struct ena_eth_io_intr_reg *p, uint32_t val) +static inline void set_ena_eth_io_intr_reg_tx_intr_delay(struct ena_eth_io_intr_reg *p, uint32_t val) { - p->intr_control |= (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) - & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK; + p->intr_control |= (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK; } -static inline uint32_t get_ena_eth_io_intr_reg_intr_unmask( - const struct ena_eth_io_intr_reg *p) +static inline uint32_t get_ena_eth_io_intr_reg_intr_unmask(const struct ena_eth_io_intr_reg *p) { - return (p->intr_control & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK) - >> ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT; + return (p->intr_control & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK) >> ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT; } -static inline void set_ena_eth_io_intr_reg_intr_unmask( - struct ena_eth_io_intr_reg *p, uint32_t val) +static inline void set_ena_eth_io_intr_reg_intr_unmask(struct ena_eth_io_intr_reg *p, uint32_t val) { - p->intr_control |= (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT) - & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; + p->intr_control |= (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT) & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; } -static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_numa( - const struct ena_eth_io_numa_node_cfg_reg *p) +static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_numa(const struct ena_eth_io_numa_node_cfg_reg *p) { return p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK; } -static inline void set_ena_eth_io_numa_node_cfg_reg_numa( - struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val) +static inline void set_ena_eth_io_numa_node_cfg_reg_numa(struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val) { p->numa_cfg |= val & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK; } -static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_enabled( - const struct ena_eth_io_numa_node_cfg_reg *p) +static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_enabled(const struct ena_eth_io_numa_node_cfg_reg *p) { - return (p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK) - >> ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT; + return (p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK) >> ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT; } -static inline void set_ena_eth_io_numa_node_cfg_reg_enabled( - struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val) +static inline void set_ena_eth_io_numa_node_cfg_reg_enabled(struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val) { - p->numa_cfg |= (val << ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT) - & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK; + p->numa_cfg |= (val << ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT) & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK; } #endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */ diff --git a/drivers/net/ena/base/ena_defs/ena_gen_info.h b/drivers/net/ena/base/ena_defs/ena_gen_info.h index 3d252096..e87bcfd8 100644 --- a/drivers/net/ena/base/ena_defs/ena_gen_info.h +++ b/drivers/net/ena/base/ena_defs/ena_gen_info.h @@ -31,5 +31,5 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#define ENA_GEN_DATE "Sun Jun 5 10:24:39 IDT 2016" -#define ENA_GEN_COMMIT "17146ed" +#define ENA_GEN_DATE "Sun Oct 23 12:27:32 IDT 2016" +#define ENA_GEN_COMMIT "79d82fa" diff --git a/drivers/net/ena/base/ena_defs/ena_includes.h b/drivers/net/ena/base/ena_defs/ena_includes.h index a86c876f..30a920a8 100644 --- a/drivers/net/ena/base/ena_defs/ena_includes.h +++ b/drivers/net/ena/base/ena_defs/ena_includes.h @@ -35,5 +35,3 @@ #include "ena_regs_defs.h" #include "ena_admin_defs.h" #include "ena_eth_io_defs.h" -#include "ena_efa_admin_defs.h" -#include "ena_efa_io_defs.h" diff --git a/drivers/net/ena/base/ena_defs/ena_regs_defs.h b/drivers/net/ena/base/ena_defs/ena_regs_defs.h index d0241278..b0870f25 100644 --- a/drivers/net/ena/base/ena_defs/ena_regs_defs.h +++ b/drivers/net/ena/base/ena_defs/ena_regs_defs.h @@ -34,6 +34,38 @@ #ifndef _ENA_REGS_H_ #define _ENA_REGS_H_ +enum ena_regs_reset_reason_types { + ENA_REGS_RESET_NORMAL = 0, + + ENA_REGS_RESET_KEEP_ALIVE_TO = 1, + + ENA_REGS_RESET_ADMIN_TO = 2, + + ENA_REGS_RESET_MISS_TX_CMPL = 3, + + ENA_REGS_RESET_INV_RX_REQ_ID = 4, + + ENA_REGS_RESET_INV_TX_REQ_ID = 5, + + ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6, + + ENA_REGS_RESET_INIT_ERR = 7, + + ENA_REGS_RESET_DRIVER_INVALID_STATE = 8, + + ENA_REGS_RESET_OS_TRIGGER = 9, + + ENA_REGS_RESET_OS_NETDEV_WD = 10, + + ENA_REGS_RESET_SHUTDOWN = 11, + + ENA_REGS_RESET_USER_TRIGGER = 12, + + ENA_REGS_RESET_GENERIC = 13, + + ENA_REGS_RESET_MISS_INTERRUPT = 14, +}; + /* ena_registers offsets */ #define ENA_REGS_VERSION_OFF 0x0 #define ENA_REGS_CONTROLLER_VERSION_OFF 0x4 @@ -80,6 +112,8 @@ #define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e #define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8 #define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00 +#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16 +#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000 /* aq_caps register */ #define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff @@ -104,6 +138,8 @@ #define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4 #define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3 #define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8 +#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28 +#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000 /* dev_sts register */ #define ENA_REGS_DEV_STS_READY_MASK 0x1 diff --git a/drivers/net/ena/base/ena_eth_com.c b/drivers/net/ena/base/ena_eth_com.c index 290a5666..4c4989a3 100644 --- a/drivers/net/ena/base/ena_eth_com.c +++ b/drivers/net/ena/base/ena_eth_com.c @@ -43,11 +43,10 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( head_masked = io_cq->head & (io_cq->q_depth - 1); expected_phase = io_cq->phase; - cdesc = (struct ena_eth_io_rx_cdesc_base *) - ((unsigned char *)io_cq->cdesc_addr.virt_addr + cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr + (head_masked * io_cq->cdesc_entry_size_in_bytes)); - desc_phase = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> + desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT; if (desc_phase != expected_phase) @@ -74,7 +73,7 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) offset = tail_masked * io_sq->desc_entry_size; - return (unsigned char *)io_sq->desc_addr.virt_addr + offset; + return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset); } static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq) @@ -86,8 +85,8 @@ static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq) if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) return; - memcpy_toio((unsigned char *)io_sq->desc_addr.pbuf_dev_addr + offset, - (unsigned char *)io_sq->desc_addr.virt_addr + offset, + memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset, + io_sq->desc_addr.virt_addr + offset, io_sq->desc_entry_size); } @@ -125,11 +124,11 @@ static inline struct ena_eth_io_rx_cdesc_base * { idx &= (io_cq->q_depth - 1); return (struct ena_eth_io_rx_cdesc_base *) - ((unsigned char *)io_cq->cdesc_addr.virt_addr + - idx * io_cq->cdesc_entry_size_in_bytes); + ((uintptr_t)io_cq->cdesc_addr.virt_addr + + idx * io_cq->cdesc_entry_size_in_bytes); } -static inline int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, +static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, u16 *first_cdesc_idx) { struct ena_eth_io_rx_cdesc_base *cdesc; @@ -143,7 +142,7 @@ static inline int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, ena_com_cq_inc_head(io_cq); count++; - last = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> + last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; } while (!last); @@ -183,9 +182,8 @@ static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, return false; } -static inline void ena_com_create_and_store_tx_meta_desc( - struct ena_com_io_sq *io_sq, - struct ena_com_tx_ctx *ena_tx_ctx) +static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx) { struct ena_eth_io_tx_meta_desc *meta_desc = NULL; struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; @@ -203,8 +201,8 @@ static inline void ena_com_create_and_store_tx_meta_desc( ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK; /* bits 10-13 of the mss */ meta_desc->len_ctrl |= ((ena_meta->mss >> 10) << - ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT) & - ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK; + ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) & + ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK; /* Extended meta desc */ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK; @@ -237,11 +235,11 @@ static inline void ena_com_create_and_store_tx_meta_desc( static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, struct ena_eth_io_rx_cdesc_base *cdesc) { - ena_rx_ctx->l3_proto = (enum ena_eth_io_l3_proto_index)(cdesc->status & - ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK); - ena_rx_ctx->l4_proto = (enum ena_eth_io_l4_proto_index) - ((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> - ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT); + ena_rx_ctx->l3_proto = cdesc->status & + ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK; + ena_rx_ctx->l4_proto = + (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; ena_rx_ctx->l3_csum_err = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT; @@ -280,8 +278,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, bool have_meta; u64 addr_hi; - ENA_ASSERT(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX, - "wrong Q type"); + ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, + "wrong Q type"); /* num_bufs +1 for potential meta desc */ if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) { @@ -410,8 +408,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, u16 nb_hw_desc; u16 i; - ENA_ASSERT(io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_RX, - "wrong Q type"); + ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, + "wrong Q type"); nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx); if (nb_hw_desc == 0) { @@ -455,8 +453,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, { struct ena_eth_io_rx_desc *desc; - ENA_ASSERT(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_RX, - "wrong Q type"); + ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, + "wrong Q type"); if (unlikely(ena_com_sq_empty_space(io_sq) == 0)) return ENA_COM_NO_SPACE; @@ -475,8 +473,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, desc->buff_addr_lo = (u32)ena_buf->paddr; desc->buff_addr_hi = - ((ena_buf->paddr & - GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); + ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); ena_com_sq_update_tail(io_sq); @@ -493,20 +490,37 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) expected_phase = io_cq->phase; cdesc = (struct ena_eth_io_tx_cdesc *) - ((unsigned char *)io_cq->cdesc_addr.virt_addr - + (masked_head * io_cq->cdesc_entry_size_in_bytes)); + ((uintptr_t)io_cq->cdesc_addr.virt_addr + + (masked_head * io_cq->cdesc_entry_size_in_bytes)); /* When the current completion descriptor phase isn't the same as the * expected, it mean that the device still didn't update * this completion. */ - cdesc_phase = cdesc->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK; + cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; if (cdesc_phase != expected_phase) return ENA_COM_TRY_AGAIN; + if (unlikely(cdesc->req_id >= io_cq->q_depth)) { + ena_trc_err("Invalid req id %d\n", cdesc->req_id); + return ENA_COM_INVAL; + } + ena_com_cq_inc_head(io_cq); - *req_id = cdesc->req_id; + *req_id = READ_ONCE(cdesc->req_id); return 0; } + +bool ena_com_cq_empty(struct ena_com_io_cq *io_cq) +{ + struct ena_eth_io_rx_cdesc_base *cdesc; + + cdesc = ena_com_get_next_rx_cdesc(io_cq); + if (cdesc) + return false; + else + return true; +} + diff --git a/drivers/net/ena/base/ena_eth_com.h b/drivers/net/ena/base/ena_eth_com.h index 71a880c0..56ea4ae6 100644 --- a/drivers/net/ena/base/ena_eth_com.h +++ b/drivers/net/ena/base/ena_eth_com.h @@ -92,10 +92,12 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id); +bool ena_com_cq_empty(struct ena_com_io_cq *io_cq); + static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, struct ena_eth_io_intr_reg *intr_reg) { - ENA_REG_WRITE32(intr_reg->intr_control, io_cq->unmask_reg); + ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg); } static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq) @@ -118,7 +120,7 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail); - ENA_REG_WRITE32(tail, io_sq->db_addr); + ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr); return 0; } @@ -135,7 +137,7 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq) if (io_cq->cq_head_db_reg && need_update) { ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n", io_cq->qid, head); - ENA_REG_WRITE32(head, io_cq->cq_head_db_reg); + ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg); io_cq->last_head_update = head; } @@ -153,7 +155,7 @@ static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq, numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK) | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK; - ENA_REG_WRITE32(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg); + ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg); } static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem) diff --git a/drivers/net/ena/base/ena_plat.h b/drivers/net/ena/base/ena_plat.h index b5b64545..f829936b 100644 --- a/drivers/net/ena/base/ena_plat.h +++ b/drivers/net/ena/base/ena_plat.h @@ -43,7 +43,11 @@ #include "ena_plat_dpdk.h" #endif #elif defined(__FreeBSD__) +#if defined(_KERNEL) +#include "ena_plat_fbsd.h" +#else #include "ena_plat_dpdk.h" +#endif #elif defined(_WIN32) #include "ena_plat_windows.h" #else diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h index 8cba319e..900ba1a6 100644 --- a/drivers/net/ena/base/ena_plat_dpdk.h +++ b/drivers/net/ena/base/ena_plat_dpdk.h @@ -73,10 +73,10 @@ typedef uint64_t dma_addr_t; #define ENA_COM_INVAL -EINVAL #define ENA_COM_NO_SPACE -ENOSPC #define ENA_COM_NO_DEVICE -ENODEV -#define ENA_COM_PERMISSION -EPERM #define ENA_COM_TIMER_EXPIRED -ETIME #define ENA_COM_FAULT -EFAULT #define ENA_COM_TRY_AGAIN -EAGAIN +#define ENA_COM_UNSUPPORTED -EOPNOTSUPP #define ____cacheline_aligned __rte_cache_aligned @@ -116,11 +116,13 @@ typedef uint64_t dma_addr_t; #define ENA_MIN16(x, y) RTE_MIN((x), (y)) #define ENA_MIN8(x, y) RTE_MIN((x), (y)) +#define BITS_PER_LONG_LONG (__SIZEOF_LONG_LONG__ * 8) #define U64_C(x) x ## ULL #define BIT(nr) (1UL << (nr)) #define BITS_PER_LONG (__SIZEOF_LONG__ * 8) #define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) -#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) +#define GENMASK_ULL(h, l) (((~0ULL) - (1ULL << (l)) + 1) & \ + (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) #ifdef RTE_LIBRTE_ENA_COM_DEBUG #define ena_trc_dbg(format, arg...) \ @@ -138,6 +140,15 @@ typedef uint64_t dma_addr_t; #define ena_trc_err(format, arg...) do { } while (0) #endif /* RTE_LIBRTE_ENA_COM_DEBUG */ +#define ENA_WARN(cond, format, arg...) \ +do { \ + if (unlikely(cond)) { \ + ena_trc_err( \ + "Warn failed on %s:%s:%d:" format, \ + __FILE__, __func__, __LINE__, ##arg); \ + } \ +} while (0) + /* Spinlock related methods */ #define ena_spinlock_t rte_spinlock_t #define ENA_SPINLOCK_INIT(spinlock) rte_spinlock_init(&spinlock) @@ -177,10 +188,21 @@ typedef uint64_t dma_addr_t; #define ENA_WAIT_EVENT_SIGNAL(waitevent) pthread_cond_signal(&waitevent.cond) /* pthread condition doesn't need to be rearmed after usage */ #define ENA_WAIT_EVENT_CLEAR(...) +#define ENA_WAIT_EVENT_DESTROY(waitqueue) ((void)(waitqueue)) #define ena_wait_event_t ena_wait_queue_t #define ENA_MIGHT_SLEEP() +#define ENA_TIME_EXPIRE(timeout) (timeout < rte_get_timer_cycles()) +#define ENA_GET_SYSTEM_TIMEOUT(timeout_us) \ + (timeout_us * rte_get_timer_hz() / 1000000 + rte_get_timer_cycles()) + +/* + * Each rte_memzone should have unique name. + * To satisfy it, count number of allocations and add it to name. + */ +extern uint32_t ena_alloc_cnt; + #define ENA_MEM_ALLOC_COHERENT(dmadev, size, virt, phys, handle) \ do { \ const struct rte_memzone *mz; \ @@ -188,47 +210,57 @@ typedef uint64_t dma_addr_t; ENA_TOUCH(dmadev); ENA_TOUCH(handle); \ snprintf(z_name, sizeof(z_name), \ "ena_alloc_%d", ena_alloc_cnt++); \ - mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, 0); \ - memset(mz->addr, 0, size); \ - virt = mz->addr; \ - phys = mz->iova; \ + mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, \ + RTE_MEMZONE_IOVA_CONTIG); \ handle = mz; \ + if (mz == NULL) { \ + virt = NULL; \ + phys = 0; \ + } else { \ + memset(mz->addr, 0, size); \ + virt = mz->addr; \ + phys = mz->iova; \ + } \ } while (0) #define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, handle) \ ({ ENA_TOUCH(size); ENA_TOUCH(phys); \ ENA_TOUCH(dmadev); \ rte_memzone_free(handle); }) -#define ENA_MEM_ALLOC_COHERENT_NODE(dmadev, size, virt, phys, node, dev_node) \ +#define ENA_MEM_ALLOC_COHERENT_NODE( \ + dmadev, size, virt, phys, mem_handle, node, dev_node) \ do { \ const struct rte_memzone *mz; \ char z_name[RTE_MEMZONE_NAMESIZE]; \ ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \ snprintf(z_name, sizeof(z_name), \ "ena_alloc_%d", ena_alloc_cnt++); \ - mz = rte_memzone_reserve(z_name, size, node, 0); \ - memset(mz->addr, 0, size); \ - virt = mz->addr; \ - phys = mz->iova; \ + mz = rte_memzone_reserve(z_name, size, node, \ + RTE_MEMZONE_IOVA_CONTIG); \ + mem_handle = mz; \ + if (mz == NULL) { \ + virt = NULL; \ + phys = 0; \ + } else { \ + memset(mz->addr, 0, size); \ + virt = mz->addr; \ + phys = mz->iova; \ + } \ } while (0) #define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) \ do { \ - const struct rte_memzone *mz; \ - char z_name[RTE_MEMZONE_NAMESIZE]; \ ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \ - snprintf(z_name, sizeof(z_name), \ - "ena_alloc_%d", ena_alloc_cnt++); \ - mz = rte_memzone_reserve(z_name, size, node, 0); \ - memset(mz->addr, 0, size); \ - virt = mz->addr; \ + virt = rte_zmalloc_socket(NULL, size, 0, node); \ } while (0) #define ENA_MEM_ALLOC(dmadev, size) rte_zmalloc(NULL, size, 1) #define ENA_MEM_FREE(dmadev, ptr) ({ENA_TOUCH(dmadev); rte_free(ptr); }) -#define ENA_REG_WRITE32(value, reg) rte_write32_relaxed((value), (reg)) -#define ENA_REG_READ32(reg) rte_read32_relaxed((reg)) +#define ENA_REG_WRITE32(bus, value, reg) \ + ({ (void)(bus); rte_write32_relaxed((value), (reg)); }) +#define ENA_REG_READ32(bus, reg) \ + ({ (void)(bus); rte_read32_relaxed((reg)); }) #define ATOMIC32_INC(i32_ptr) rte_atomic32_inc(i32_ptr) #define ATOMIC32_DEC(i32_ptr) rte_atomic32_dec(i32_ptr) @@ -244,4 +276,11 @@ typedef uint64_t dma_addr_t; #define PTR_ERR(error) ((long)(void *)error) #define might_sleep() +#define lower_32_bits(x) ((uint32_t)(x)) +#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16)) + +#ifndef READ_ONCE +#define READ_ONCE(var) (*((volatile typeof(var) *)(&(var)))) +#endif + #endif /* DPDK_ENA_COM_ENA_PLAT_DPDK_H_ */ diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index 34b2a8d7..c255dc6d 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -54,7 +54,7 @@ #include #define DRV_MODULE_VER_MAJOR 1 -#define DRV_MODULE_VER_MINOR 0 +#define DRV_MODULE_VER_MINOR 1 #define DRV_MODULE_VER_SUBMINOR 0 #define ENA_IO_TXQ_IDX(q) (2 * (q)) @@ -85,6 +85,9 @@ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#define ENA_MAX_RING_DESC ENA_DEFAULT_RING_SIZE +#define ENA_MIN_RING_DESC 128 + enum ethtool_stringset { ETH_SS_TEST = 0, ETH_SS_STATS, @@ -114,6 +117,12 @@ struct ena_stats { #define ENA_STAT_GLOBAL_ENTRY(stat) \ ENA_STAT_ENTRY(stat, dev) +/* + * Each rte_memzone should have unique name. + * To satisfy it, count number of allocation and add it to name. + */ +uint32_t ena_alloc_cnt; + static const struct ena_stats ena_stats_global_strings[] = { ENA_STAT_GLOBAL_ENTRY(tx_timeout), ENA_STAT_GLOBAL_ENTRY(io_suspend), @@ -195,8 +204,11 @@ static const struct rte_pci_id pci_id_ena_map[] = { { .device_id = 0 }, }; +static struct ena_aenq_handlers aenq_handlers; + static int ena_device_init(struct ena_com_dev *ena_dev, - struct ena_com_dev_get_features_ctx *get_feat_ctx); + struct ena_com_dev_get_features_ctx *get_feat_ctx, + bool *wd_state); static int ena_dev_configure(struct rte_eth_dev *dev); static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); @@ -215,7 +227,9 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); static void ena_init_rings(struct ena_adapter *adapter); static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int ena_start(struct rte_eth_dev *dev); +static void ena_stop(struct rte_eth_dev *dev); static void ena_close(struct rte_eth_dev *dev); +static int ena_dev_reset(struct rte_eth_dev *dev); static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static void ena_rx_queue_release_all(struct rte_eth_dev *dev); static void ena_tx_queue_release_all(struct rte_eth_dev *dev); @@ -238,10 +252,8 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); static int ena_get_sset_count(struct rte_eth_dev *dev, int sset); -static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter, - uint64_t offloads); -static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter, - uint64_t offloads); +static void ena_interrupt_handler_rte(void *cb_arg); +static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); static const struct eth_dev_ops ena_dev_ops = { .dev_configure = ena_dev_configure, @@ -249,12 +261,14 @@ static const struct eth_dev_ops ena_dev_ops = { .rx_queue_setup = ena_rx_queue_setup, .tx_queue_setup = ena_tx_queue_setup, .dev_start = ena_start, + .dev_stop = ena_stop, .link_update = ena_link_update, .stats_get = ena_stats_get, .mtu_set = ena_mtu_set, .rx_queue_release = ena_rx_queue_release, .tx_queue_release = ena_tx_queue_release, .dev_close = ena_close, + .dev_reset = ena_dev_reset, .reta_update = ena_rss_reta_update, .reta_query = ena_rss_reta_query, }; @@ -264,11 +278,15 @@ static const struct eth_dev_ops ena_dev_ops = { static inline int ena_cpu_to_node(int cpu) { struct rte_config *config = rte_eal_get_configuration(); + struct rte_fbarray *arr = &config->mem_config->memzones; + const struct rte_memzone *mz; + + if (unlikely(cpu >= RTE_MAX_MEMZONE)) + return NUMA_NO_NODE; - if (likely(cpu < RTE_MAX_MEMZONE)) - return config->mem_config->memzone[cpu].socket_id; + mz = rte_fbarray_get(arr, cpu); - return NUMA_NO_NODE; + return mz->socket_id; } static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, @@ -346,9 +364,6 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, ena_meta->mss = mbuf->tso_segsz; ena_meta->l3_hdr_len = mbuf->l3_len; ena_meta->l3_hdr_offset = mbuf->l2_len; - /* this param needed only for TSO */ - ena_meta->l3_outer_hdr_len = 0; - ena_meta->l3_outer_hdr_offset = 0; ena_tx_ctx->meta_valid = true; } else { @@ -356,6 +371,40 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, } } +static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) +{ + if (likely(req_id < rx_ring->ring_size)) + return 0; + + RTE_LOG(ERR, PMD, "Invalid rx req_id: %hu\n", req_id); + + rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; + rx_ring->adapter->trigger_reset = true; + + return -EFAULT; +} + +static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) +{ + struct ena_tx_buffer *tx_info = NULL; + + if (likely(req_id < tx_ring->ring_size)) { + tx_info = &tx_ring->tx_buffer_info[req_id]; + if (likely(tx_info->mbuf)) + return 0; + } + + if (tx_info) + RTE_LOG(ERR, PMD, "tx_info doesn't have valid mbuf\n"); + else + RTE_LOG(ERR, PMD, "Invalid req_id: %hu\n", req_id); + + /* Trigger device reset */ + tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; + tx_ring->adapter->trigger_reset = true; + return -EFAULT; +} + static void ena_config_host_info(struct ena_com_dev *ena_dev) { struct ena_admin_host_info *host_info; @@ -387,9 +436,12 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) rc = ena_com_set_host_attributes(ena_dev); if (rc) { - RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); - if (rc != -EPERM) - goto err; + if (rc == -ENA_COM_UNSUPPORTED) + RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); + else + RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); + + goto err; } return; @@ -440,9 +492,12 @@ static void ena_config_debug_area(struct ena_adapter *adapter) rc = ena_com_set_host_attributes(&adapter->ena_dev); if (rc) { - RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); - if (rc != -EPERM) - goto err; + if (rc == -ENA_COM_UNSUPPORTED) + RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); + else + RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); + + goto err; } return; @@ -455,12 +510,76 @@ static void ena_close(struct rte_eth_dev *dev) struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); - adapter->state = ENA_ADAPTER_STATE_STOPPED; + ena_stop(dev); + adapter->state = ENA_ADAPTER_STATE_CLOSED; ena_rx_queue_release_all(dev); ena_tx_queue_release_all(dev); } +static int +ena_dev_reset(struct rte_eth_dev *dev) +{ + struct rte_mempool *mb_pool_rx[ENA_MAX_NUM_QUEUES]; + struct rte_eth_dev *eth_dev; + struct rte_pci_device *pci_dev; + struct rte_intr_handle *intr_handle; + struct ena_com_dev *ena_dev; + struct ena_com_dev_get_features_ctx get_feat_ctx; + struct ena_adapter *adapter; + int nb_queues; + int rc, i; + bool wd_state; + + adapter = (struct ena_adapter *)(dev->data->dev_private); + ena_dev = &adapter->ena_dev; + eth_dev = adapter->rte_dev; + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + intr_handle = &pci_dev->intr_handle; + nb_queues = eth_dev->data->nb_rx_queues; + + ena_com_set_admin_running_state(ena_dev, false); + + rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); + if (rc) + RTE_LOG(ERR, PMD, "Device reset failed\n"); + + for (i = 0; i < nb_queues; i++) + mb_pool_rx[i] = adapter->rx_ring[i].mb_pool; + + ena_rx_queue_release_all(eth_dev); + ena_tx_queue_release_all(eth_dev); + + rte_intr_disable(intr_handle); + + ena_com_abort_admin_commands(ena_dev); + ena_com_wait_for_abort_completion(ena_dev); + ena_com_admin_destroy(ena_dev); + ena_com_mmio_reg_read_request_destroy(ena_dev); + + rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); + if (rc) { + PMD_INIT_LOG(CRIT, "Cannot initialize device\n"); + return rc; + } + adapter->wd_state = wd_state; + + rte_intr_enable(intr_handle); + ena_com_set_admin_polling_mode(ena_dev, false); + ena_com_admin_aenq_enable(ena_dev); + + for (i = 0; i < nb_queues; ++i) + ena_rx_queue_setup(eth_dev, i, adapter->rx_ring_size, 0, NULL, + mb_pool_rx[i]); + + for (i = 0; i < nb_queues; ++i) + ena_tx_queue_setup(eth_dev, i, adapter->tx_ring_size, 0, NULL); + + adapter->trigger_reset = false; + + return 0; +} + static int ena_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) @@ -468,7 +587,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); struct ena_com_dev *ena_dev = &adapter->ena_dev; - int ret, i; + int rc, i; u16 entry_value; int conf_idx; int idx; @@ -480,8 +599,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, RTE_LOG(WARNING, PMD, "indirection table %d is bigger than supported (%d)\n", reta_size, ENA_RX_RSS_TABLE_SIZE); - ret = -EINVAL; - goto err; + return -EINVAL; } for (i = 0 ; i < reta_size ; i++) { @@ -493,29 +611,28 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { entry_value = ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); - ret = ena_com_indirect_table_fill_entry(ena_dev, - i, - entry_value); - if (unlikely(ret && (ret != ENA_COM_PERMISSION))) { + + rc = ena_com_indirect_table_fill_entry(ena_dev, + i, + entry_value); + if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { RTE_LOG(ERR, PMD, "Cannot fill indirect table\n"); - ret = -ENOTSUP; - goto err; + return rc; } } } - ret = ena_com_indirect_table_set(ena_dev); - if (unlikely(ret && (ret != ENA_COM_PERMISSION))) { + rc = ena_com_indirect_table_set(ena_dev); + if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); - ret = -ENOTSUP; - goto err; + return rc; } RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n", __func__, reta_size, adapter->rte_dev->data->port_id); -err: - return ret; + + return 0; } /* Query redirection table. */ @@ -526,7 +643,7 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); struct ena_com_dev *ena_dev = &adapter->ena_dev; - int ret; + int rc; int i; u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; int reta_conf_idx; @@ -536,11 +653,10 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) return -EINVAL; - ret = ena_com_indirect_table_get(ena_dev, indirect_table); - if (unlikely(ret && (ret != ENA_COM_PERMISSION))) { + rc = ena_com_indirect_table_get(ena_dev, indirect_table); + if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { RTE_LOG(ERR, PMD, "cannot get indirect table\n"); - ret = -ENOTSUP; - goto err; + return -ENOTSUP; } for (i = 0 ; i < reta_size ; i++) { @@ -550,8 +666,8 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, reta_conf[reta_conf_idx].reta[reta_idx] = ENA_IO_RXQ_IDX_REV(indirect_table[i]); } -err: - return ret; + + return 0; } static int ena_rss_init_default(struct ena_adapter *adapter) @@ -571,7 +687,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter) val = i % nb_rx_queues; rc = ena_com_indirect_table_fill_entry(ena_dev, i, ENA_IO_RXQ_IDX(val)); - if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { + if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { RTE_LOG(ERR, PMD, "Cannot fill indirect table\n"); goto err_fill_indir; } @@ -579,19 +695,19 @@ static int ena_rss_init_default(struct ena_adapter *adapter) rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, ENA_HASH_KEY_SIZE, 0xFFFFFFFF); - if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { + if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { RTE_LOG(INFO, PMD, "Cannot fill hash function\n"); goto err_fill_indir; } rc = ena_com_set_default_hash_ctrl(ena_dev); - if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { + if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { RTE_LOG(INFO, PMD, "Cannot fill hash control\n"); goto err_fill_indir; } rc = ena_com_indirect_table_set(ena_dev); - if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { + if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); goto err_fill_indir; } @@ -650,6 +766,10 @@ static void ena_rx_queue_release(void *queue) rte_free(ring->rx_buffer_info); ring->rx_buffer_info = NULL; + if (ring->empty_rx_reqs) + rte_free(ring->empty_rx_reqs); + ring->empty_rx_reqs = NULL; + ring->configured = 0; RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n", @@ -723,9 +843,12 @@ static int ena_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) { struct rte_eth_link *link = &dev->data->dev_link; + struct ena_adapter *adapter; + + adapter = (struct ena_adapter *)(dev->data->dev_private); - link->link_status = 1; - link->link_speed = ETH_SPEED_NUM_10G; + link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; + link->link_speed = ETH_SPEED_NUM_NONE; link->link_duplex = ETH_LINK_FULL_DUPLEX; return 0; @@ -737,13 +860,18 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev, struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); struct ena_ring *queues = NULL; + int nb_queues; int i = 0; int rc = 0; - queues = (ring_type == ENA_RING_TYPE_RX) ? - adapter->rx_ring : adapter->tx_ring; - - for (i = 0; i < adapter->num_queues; i++) { + if (ring_type == ENA_RING_TYPE_RX) { + queues = adapter->rx_ring; + nb_queues = dev->data->nb_rx_queues; + } else { + queues = adapter->tx_ring; + nb_queues = dev->data->nb_tx_queues; + } + for (i = 0; i < nb_queues; i++) { if (queues[i].configured) { if (ring_type == ENA_RING_TYPE_RX) { ena_assert_msg( @@ -761,7 +889,7 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev, PMD_INIT_LOG(ERR, "failed to restart queue %d type(%d)", i, ring_type); - return -1; + return rc; } } } @@ -785,9 +913,11 @@ static int ena_check_valid_conf(struct ena_adapter *adapter) { uint32_t max_frame_len = ena_get_mtu_conf(adapter); - if (max_frame_len > adapter->max_mtu) { - PMD_INIT_LOG(ERR, "Unsupported MTU of %d", max_frame_len); - return -1; + if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) { + PMD_INIT_LOG(ERR, "Unsupported MTU of %d. " + "max mtu: %d, min mtu: %d\n", + max_frame_len, adapter->max_mtu, ENA_MIN_MTU); + return ENA_COM_UNSUPPORTED; } return 0; @@ -795,6 +925,7 @@ static int ena_check_valid_conf(struct ena_adapter *adapter) static int ena_calc_queue_size(struct ena_com_dev *ena_dev, + u16 *max_tx_sgl_size, struct ena_com_dev_get_features_ctx *get_feat_ctx) { uint32_t queue_size = ENA_DEFAULT_RING_SIZE; @@ -812,11 +943,14 @@ ena_calc_queue_size(struct ena_com_dev *ena_dev, if (!rte_is_power_of_2(queue_size)) queue_size = rte_align32pow2(queue_size >> 1); - if (queue_size == 0) { + if (unlikely(queue_size == 0)) { PMD_INIT_LOG(ERR, "Invalid queue size"); return -EFAULT; } + *max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, + get_feat_ctx->max_queues.max_packet_tx_descs); + return queue_size; } @@ -881,12 +1015,12 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) ena_dev = &adapter->ena_dev; ena_assert_msg(ena_dev != NULL, "Uninitialized device"); - if (mtu > ena_get_mtu_conf(adapter)) { + if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) { RTE_LOG(ERR, PMD, - "Given MTU (%d) exceeds maximum MTU supported (%d)\n", - mtu, ena_get_mtu_conf(adapter)); - rc = -EINVAL; - goto err; + "Invalid MTU setting. new_mtu: %d " + "max mtu: %d min mtu: %d\n", + mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU); + return -EINVAL; } rc = ena_com_set_dev_mtu(ena_dev, mtu); @@ -895,7 +1029,6 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) else RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu); -err: return rc; } @@ -903,14 +1036,9 @@ static int ena_start(struct rte_eth_dev *dev) { struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); + uint64_t ticks; int rc = 0; - if (!(adapter->state == ENA_ADAPTER_STATE_CONFIG || - adapter->state == ENA_ADAPTER_STATE_STOPPED)) { - PMD_INIT_LOG(ERR, "API violation"); - return -1; - } - rc = ena_check_valid_conf(adapter); if (rc) return rc; @@ -924,7 +1052,7 @@ static int ena_start(struct rte_eth_dev *dev) return rc; if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & - ETH_MQ_RX_RSS_FLAG) { + ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) { rc = ena_rss_init_default(adapter); if (rc) return rc; @@ -932,11 +1060,28 @@ static int ena_start(struct rte_eth_dev *dev) ena_stats_restart(dev); + adapter->timestamp_wd = rte_get_timer_cycles(); + adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; + + ticks = rte_get_timer_hz(); + rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), + ena_timer_wd_callback, adapter); + adapter->state = ENA_ADAPTER_STATE_RUNNING; return 0; } +static void ena_stop(struct rte_eth_dev *dev) +{ + struct ena_adapter *adapter = + (struct ena_adapter *)(dev->data->dev_private); + + rte_timer_stop_sync(&adapter->timer_wd); + + adapter->state = ENA_ADAPTER_STATE_STOPPED; +} + static int ena_queue_restart(struct ena_ring *ring) { int rc, bufs_num; @@ -954,7 +1099,7 @@ static int ena_queue_restart(struct ena_ring *ring) rc = ena_populate_rx_queue(ring, bufs_num); if (rc != bufs_num) { PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); - return (-1); + return ENA_COM_FAULT; } return 0; @@ -984,12 +1129,12 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(CRIT, PMD, "API violation. Queue %d is already configured\n", queue_idx); - return -1; + return ENA_COM_FAULT; } if (!rte_is_power_of_2(nb_desc)) { RTE_LOG(ERR, PMD, - "Unsupported size of RX queue: %d is not a power of 2.", + "Unsupported size of TX queue: %d is not a power of 2.", nb_desc); return -EINVAL; } @@ -1001,12 +1146,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, return -EINVAL; } - if (tx_conf->txq_flags == ETH_TXQ_FLAGS_IGNORE && - !ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) { - RTE_LOG(ERR, PMD, "Unsupported queue offloads\n"); - return -EINVAL; - } - ena_qid = ENA_IO_TXQ_IDX(queue_idx); ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; @@ -1021,6 +1160,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(ERR, PMD, "failed to create io TX queue #%d (qid:%d) rc: %d\n", queue_idx, ena_qid, rc); + return rc; } txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; @@ -1032,10 +1172,11 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(ERR, PMD, "Failed to get TX queue handlers. TX queue num %d rc: %d\n", queue_idx, rc); - ena_com_destroy_io_queue(ena_dev, ena_qid); - goto err; + goto err_destroy_io_queue; } + ena_com_update_numa_node(txq->ena_com_io_cq, ctx.numa_node); + txq->port_id = dev->data->port_id; txq->next_to_clean = 0; txq->next_to_use = 0; @@ -1047,7 +1188,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (!txq->tx_buffer_info) { RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n"); - return -ENOMEM; + rc = -ENOMEM; + goto err_destroy_io_queue; } txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", @@ -1055,18 +1197,29 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (!txq->empty_tx_reqs) { RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n"); - rte_free(txq->tx_buffer_info); - return -ENOMEM; + rc = -ENOMEM; + goto err_free; } + for (i = 0; i < txq->ring_size; i++) txq->empty_tx_reqs[i] = i; - txq->offloads = tx_conf->offloads; + if (tx_conf != NULL) { + txq->offloads = + tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + } /* Store pointer to this queue in upper layer */ txq->configured = 1; dev->data->tx_queues[queue_idx] = txq; -err: + + return 0; + +err_free: + rte_free(txq->tx_buffer_info); + +err_destroy_io_queue: + ena_com_destroy_io_queue(ena_dev, ena_qid); return rc; } @@ -1074,7 +1227,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, __rte_unused unsigned int socket_id, - const struct rte_eth_rxconf *rx_conf, + __rte_unused const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { struct ena_com_create_io_ctx ctx = @@ -1085,7 +1238,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, (struct ena_adapter *)(dev->data->dev_private); struct ena_ring *rxq = NULL; uint16_t ena_qid = 0; - int rc = 0; + int i, rc = 0; struct ena_com_dev *ena_dev = &adapter->ena_dev; rxq = &adapter->rx_ring[queue_idx]; @@ -1093,12 +1246,12 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(CRIT, PMD, "API violation. Queue %d is already configured\n", queue_idx); - return -1; + return ENA_COM_FAULT; } if (!rte_is_power_of_2(nb_desc)) { RTE_LOG(ERR, PMD, - "Unsupported size of TX queue: %d is not a power of 2.", + "Unsupported size of RX queue: %d is not a power of 2.", nb_desc); return -EINVAL; } @@ -1110,11 +1263,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, return -EINVAL; } - if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) { - RTE_LOG(ERR, PMD, "Unsupported queue offloads\n"); - return -EINVAL; - } - ena_qid = ENA_IO_RXQ_IDX(queue_idx); ctx.qid = ena_qid; @@ -1125,9 +1273,11 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, ctx.numa_node = ena_cpu_to_node(queue_idx); rc = ena_com_create_io_queue(ena_dev, &ctx); - if (rc) + if (rc) { RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n", queue_idx, rc); + return rc; + } rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; @@ -1140,6 +1290,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, "Failed to get RX queue handlers. RX queue num %d rc: %d\n", queue_idx, rc); ena_com_destroy_io_queue(ena_dev, ena_qid); + return rc; } rxq->port_id = dev->data->port_id; @@ -1153,9 +1304,24 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (!rxq->rx_buffer_info) { RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n"); + ena_com_destroy_io_queue(ena_dev, ena_qid); return -ENOMEM; } + rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs", + sizeof(uint16_t) * nb_desc, + RTE_CACHE_LINE_SIZE); + if (!rxq->empty_rx_reqs) { + RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n"); + rte_free(rxq->rx_buffer_info); + rxq->rx_buffer_info = NULL; + ena_com_destroy_io_queue(ena_dev, ena_qid); + return -ENOMEM; + } + + for (i = 0; i < nb_desc; i++) + rxq->empty_tx_reqs[i] = i; + /* Store pointer to this queue in upper layer */ rxq->configured = 1; dev->data->rx_queues[queue_idx] = rxq; @@ -1170,7 +1336,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) uint16_t ring_size = rxq->ring_size; uint16_t ring_mask = ring_size - 1; uint16_t next_to_use = rxq->next_to_use; - uint16_t in_use; + uint16_t in_use, req_id; struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0]; if (unlikely(!count)) @@ -1198,12 +1364,18 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) struct ena_com_buf ebuf; rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]); + + req_id = rxq->empty_rx_reqs[next_to_use_masked]; + rc = validate_rx_req_id(rxq, req_id); + if (unlikely(rc < 0)) + break; + /* prepare physical address for DMA transaction */ ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; /* pass resource to device */ rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, - &ebuf, next_to_use_masked); + &ebuf, req_id); if (unlikely(rc)) { rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf), count - i); @@ -1213,9 +1385,17 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) next_to_use++; } + if (unlikely(i < count)) + RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d " + "buffers (from %d)\n", rxq->id, i, count); + /* When we submitted free recources to device... */ - if (i > 0) { - /* ...let HW know that it can fill buffers with data */ + if (likely(i > 0)) { + /* ...let HW know that it can fill buffers with data + * + * Add memory barrier to make sure the desc were written before + * issue a doorbell + */ rte_wmb(); ena_com_write_sq_doorbell(rxq->ena_com_io_sq); @@ -1226,8 +1406,10 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) } static int ena_device_init(struct ena_com_dev *ena_dev, - struct ena_com_dev_get_features_ctx *get_feat_ctx) + struct ena_com_dev_get_features_ctx *get_feat_ctx, + bool *wd_state) { + uint32_t aenq_groups; int rc; bool readless_supported; @@ -1247,7 +1429,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, ena_com_set_mmio_read_mode(ena_dev, readless_supported); /* reset device */ - rc = ena_com_dev_reset(ena_dev); + rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); if (rc) { RTE_LOG(ERR, PMD, "cannot reset device\n"); goto err_mmio_read_less; @@ -1263,7 +1445,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); /* ENA device administration layer init */ - rc = ena_com_admin_init(ena_dev, NULL, true); + rc = ena_com_admin_init(ena_dev, &aenq_handlers, true); if (rc) { RTE_LOG(ERR, PMD, "cannot initialize ena admin queue with device\n"); @@ -1286,6 +1468,21 @@ static int ena_device_init(struct ena_com_dev *ena_dev, goto err_admin_init; } + aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | + BIT(ENA_ADMIN_NOTIFICATION) | + BIT(ENA_ADMIN_KEEP_ALIVE) | + BIT(ENA_ADMIN_FATAL_ERROR) | + BIT(ENA_ADMIN_WARNING); + + aenq_groups &= get_feat_ctx->aenq.supported_groups; + rc = ena_com_set_aenq_config(ena_dev, aenq_groups); + if (rc) { + RTE_LOG(ERR, PMD, "Cannot configure aenq groups rc: %d\n", rc); + goto err_admin_init; + } + + *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); + return 0; err_admin_init: @@ -1297,16 +1494,89 @@ err_mmio_read_less: return rc; } +static void ena_interrupt_handler_rte(void *cb_arg) +{ + struct ena_adapter *adapter = (struct ena_adapter *)cb_arg; + struct ena_com_dev *ena_dev = &adapter->ena_dev; + + ena_com_admin_q_comp_intr_handler(ena_dev); + if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) + ena_com_aenq_intr_handler(ena_dev, adapter); +} + +static void check_for_missing_keep_alive(struct ena_adapter *adapter) +{ + if (!adapter->wd_state) + return; + + if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) + return; + + if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= + adapter->keep_alive_timeout)) { + RTE_LOG(ERR, PMD, "Keep alive timeout\n"); + adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; + adapter->trigger_reset = true; + } +} + +/* Check if admin queue is enabled */ +static void check_for_admin_com_state(struct ena_adapter *adapter) +{ + if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { + RTE_LOG(ERR, PMD, "ENA admin queue is not in running state!\n"); + adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; + adapter->trigger_reset = true; + } +} + +static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, + void *arg) +{ + struct ena_adapter *adapter = (struct ena_adapter *)arg; + struct rte_eth_dev *dev = adapter->rte_dev; + + check_for_missing_keep_alive(adapter); + check_for_admin_com_state(adapter); + + if (unlikely(adapter->trigger_reset)) { + RTE_LOG(ERR, PMD, "Trigger reset is on\n"); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, + NULL); + } +} + +static int ena_calc_io_queue_num(__rte_unused struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + int io_sq_num, io_cq_num, io_queue_num; + + io_sq_num = get_feat_ctx->max_queues.max_sq_num; + io_cq_num = get_feat_ctx->max_queues.max_cq_num; + + io_queue_num = RTE_MIN(io_sq_num, io_cq_num); + + if (unlikely(io_queue_num == 0)) { + RTE_LOG(ERR, PMD, "Number of IO queues should not be 0\n"); + return -EFAULT; + } + + return io_queue_num; +} + static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev; + struct rte_intr_handle *intr_handle; struct ena_adapter *adapter = (struct ena_adapter *)(eth_dev->data->dev_private); struct ena_com_dev *ena_dev = &adapter->ena_dev; struct ena_com_dev_get_features_ctx get_feat_ctx; int queue_size, rc; + u16 tx_sgl_size = 0; static int adapters_found; + bool wd_state; memset(adapter, 0, sizeof(struct ena_adapter)); ena_dev = &adapter->ena_dev; @@ -1330,19 +1600,16 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) pci_dev->addr.devid, pci_dev->addr.function); + intr_handle = &pci_dev->intr_handle; + adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; - /* Present ENA_MEM_BAR indicates available LLQ mode. - * Use corresponding policy - */ - if (adapter->dev_mem_base) - ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; - else if (adapter->regs) - ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; - else + if (!adapter->regs) { PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)", ENA_REGS_BAR); + return -ENXIO; + } ena_dev->reg_bar = adapter->regs; ena_dev->dmadev = adapter->pdev; @@ -1353,36 +1620,28 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) adapter->id_number); /* device specific initialization routine */ - rc = ena_device_init(ena_dev, &get_feat_ctx); + rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); if (rc) { PMD_INIT_LOG(CRIT, "Failed to init ENA device"); - return -1; - } - - if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { - if (get_feat_ctx.max_queues.max_llq_num == 0) { - PMD_INIT_LOG(ERR, - "Trying to use LLQ but llq_num is 0.\n" - "Fall back into regular queues."); - ena_dev->tx_mem_queue_type = - ENA_ADMIN_PLACEMENT_POLICY_HOST; - adapter->num_queues = - get_feat_ctx.max_queues.max_sq_num; - } else { - adapter->num_queues = - get_feat_ctx.max_queues.max_llq_num; - } - } else { - adapter->num_queues = get_feat_ctx.max_queues.max_sq_num; + goto err; } + adapter->wd_state = wd_state; - queue_size = ena_calc_queue_size(ena_dev, &get_feat_ctx); - if ((queue_size <= 0) || (adapter->num_queues <= 0)) - return -EFAULT; + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + adapter->num_queues = ena_calc_io_queue_num(ena_dev, + &get_feat_ctx); + + queue_size = ena_calc_queue_size(ena_dev, &tx_sgl_size, &get_feat_ctx); + if (queue_size <= 0 || adapter->num_queues <= 0) { + rc = -EFAULT; + goto err_device_destroy; + } adapter->tx_ring_size = queue_size; adapter->rx_ring_size = queue_size; + adapter->max_tx_sgl_size = tx_sgl_size; + /* prepare ring structures */ ena_init_rings(adapter); @@ -1405,58 +1664,77 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) RTE_CACHE_LINE_SIZE); if (!adapter->drv_stats) { RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n"); - return -ENOMEM; + rc = -ENOMEM; + goto err_delete_debug_area; } + rte_intr_callback_register(intr_handle, + ena_interrupt_handler_rte, + adapter); + rte_intr_enable(intr_handle); + ena_com_set_admin_polling_mode(ena_dev, false); + ena_com_admin_aenq_enable(ena_dev); + + if (adapters_found == 0) + rte_timer_subsystem_init(); + rte_timer_init(&adapter->timer_wd); + adapters_found++; adapter->state = ENA_ADAPTER_STATE_INIT; return 0; + +err_delete_debug_area: + ena_com_delete_debug_area(ena_dev); + +err_device_destroy: + ena_com_delete_host_info(ena_dev); + ena_com_admin_destroy(ena_dev); + +err: + return rc; } -static int ena_dev_configure(struct rte_eth_dev *dev) +static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) { + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ena_adapter *adapter = - (struct ena_adapter *)(dev->data->dev_private); - uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; - uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; + (struct ena_adapter *)(eth_dev->data->dev_private); - if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) { - RTE_LOG(ERR, PMD, "Some Tx offloads are not supported " - "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", - tx_offloads, adapter->tx_supported_offloads); - return -ENOTSUP; - } + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; - if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) { - RTE_LOG(ERR, PMD, "Some Rx offloads are not supported " - "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", - rx_offloads, adapter->rx_supported_offloads); - return -ENOTSUP; - } + if (adapter->state != ENA_ADAPTER_STATE_CLOSED) + ena_close(eth_dev); - if (!(adapter->state == ENA_ADAPTER_STATE_INIT || - adapter->state == ENA_ADAPTER_STATE_STOPPED)) { - PMD_INIT_LOG(ERR, "Illegal adapter state: %d", - adapter->state); - return -1; - } + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + eth_dev->tx_pkt_prepare = NULL; - switch (adapter->state) { - case ENA_ADAPTER_STATE_INIT: - case ENA_ADAPTER_STATE_STOPPED: - adapter->state = ENA_ADAPTER_STATE_CONFIG; - break; - case ENA_ADAPTER_STATE_CONFIG: - RTE_LOG(WARNING, PMD, - "Ivalid driver state while trying to configure device\n"); - break; - default: - break; - } + rte_free(adapter->drv_stats); + adapter->drv_stats = NULL; + + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + ena_interrupt_handler_rte, + adapter); + + adapter->state = ENA_ADAPTER_STATE_FREE; + + return 0; +} - adapter->tx_selected_offloads = tx_offloads; - adapter->rx_selected_offloads = rx_offloads; +static int ena_dev_configure(struct rte_eth_dev *dev) +{ + struct ena_adapter *adapter = + (struct ena_adapter *)(dev->data->dev_private); + + adapter->state = ENA_ADAPTER_STATE_CONFIG; + + adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; + adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; return 0; } @@ -1473,6 +1751,7 @@ static void ena_init_rings(struct ena_adapter *adapter) ring->id = i; ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; + ring->sgl_size = adapter->max_tx_sgl_size; } for (i = 0; i < adapter->num_queues; i++) { @@ -1485,32 +1764,6 @@ static void ena_init_rings(struct ena_adapter *adapter) } } -static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter, - uint64_t offloads) -{ - uint64_t port_offloads = adapter->tx_selected_offloads; - - /* Check if port supports all requested offloads. - * True if all offloads selected for queue are set for port. - */ - if ((offloads & port_offloads) != offloads) - return false; - return true; -} - -static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter, - uint64_t offloads) -{ - uint64_t port_offloads = adapter->rx_selected_offloads; - - /* Check if port supports all requested offloads. - * True if all offloads selected for queue are set for port. - */ - if ((offloads & port_offloads) != offloads) - return false; - return true; -} - static void ena_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { @@ -1527,8 +1780,6 @@ static void ena_infos_get(struct rte_eth_dev *dev, ena_dev = &adapter->ena_dev; ena_assert_msg(ena_dev != NULL, "Uninitialized device"); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); - dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | @@ -1581,6 +1832,16 @@ static void ena_infos_get(struct rte_eth_dev *dev, adapter->tx_supported_offloads = tx_feat; adapter->rx_supported_offloads = rx_feat; + + dev_info->rx_desc_lim.nb_max = ENA_MAX_RING_DESC; + dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; + + dev_info->tx_desc_lim.nb_max = ENA_MAX_RING_DESC; + dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; + dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, + feat.max_queues.max_packet_tx_descs); + dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, + feat.max_queues.max_packet_tx_descs); } static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, @@ -1591,6 +1852,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, unsigned int ring_mask = ring_size - 1; uint16_t next_to_clean = rx_ring->next_to_clean; uint16_t desc_in_use = 0; + uint16_t req_id; unsigned int recv_idx = 0; struct rte_mbuf *mbuf = NULL; struct rte_mbuf *mbuf_head = NULL; @@ -1624,6 +1886,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, &ena_rx_ctx); if (unlikely(rc)) { RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc); + rx_ring->adapter->trigger_reset = true; return 0; } @@ -1631,12 +1894,17 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, break; while (segments < ena_rx_ctx.descs) { - mbuf = rx_buff_info[next_to_clean & ring_mask]; + req_id = ena_rx_ctx.ena_bufs[segments].req_id; + rc = validate_rx_req_id(rx_ring, req_id); + if (unlikely(rc)) + break; + + mbuf = rx_buff_info[req_id]; mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len; mbuf->data_off = RTE_PKTMBUF_HEADROOM; mbuf->refcnt = 1; mbuf->next = NULL; - if (segments == 0) { + if (unlikely(segments == 0)) { mbuf->nb_segs = ena_rx_ctx.descs; mbuf->port = rx_ring->port_id; mbuf->pkt_len = 0; @@ -1648,6 +1916,8 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, mbuf_head->pkt_len += mbuf->data_len; mbuf_prev = mbuf; + rx_ring->empty_rx_reqs[next_to_clean & ring_mask] = + req_id; segments++; next_to_clean++; } @@ -1741,6 +2011,46 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return i; } +static void ena_update_hints(struct ena_adapter *adapter, + struct ena_admin_ena_hw_hints *hints) +{ + if (hints->admin_completion_tx_timeout) + adapter->ena_dev.admin_queue.completion_timeout = + hints->admin_completion_tx_timeout * 1000; + + if (hints->mmio_read_timeout) + /* convert to usec */ + adapter->ena_dev.mmio_read.reg_read_to = + hints->mmio_read_timeout * 1000; + + if (hints->driver_watchdog_timeout) { + if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) + adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; + else + // Convert msecs to ticks + adapter->keep_alive_timeout = + (hints->driver_watchdog_timeout * + rte_get_timer_hz()) / 1000; + } +} + +static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring, + struct rte_mbuf *mbuf) +{ + int num_segments, rc; + + num_segments = mbuf->nb_segs; + + if (likely(num_segments < tx_ring->sgl_size)) + return 0; + + rc = rte_pktmbuf_linearize(mbuf); + if (unlikely(rc)) + RTE_LOG(WARNING, PMD, "Mbuf linearize failed\n"); + + return rc; +} + static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -1771,6 +2081,10 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { mbuf = tx_pkts[sent_idx]; + rc = ena_check_and_linearize_mbuf(tx_ring, mbuf); + if (unlikely(rc)) + break; + req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask]; tx_info = &tx_ring->tx_buffer_info[req_id]; tx_info->mbuf = mbuf; @@ -1848,6 +2162,10 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Clear complete packets */ while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { + rc = validate_tx_req_id(tx_ring, req_id); + if (rc) + break; + /* Get Tx info & store how many descs were processed */ tx_info = &tx_ring->tx_buffer_info[req_id]; total_tx_descs += tx_info->tx_descs; @@ -1875,6 +2193,9 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return sent_idx; } +/********************************************************************* + * PMD configuration + *********************************************************************/ static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { @@ -1884,12 +2205,13 @@ static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_remove(pci_dev, NULL); + return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); } static struct rte_pci_driver rte_ena_pmd = { .id_table = pci_id_ena_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_WC_ACTIVATE, .probe = eth_ena_pci_probe, .remove = eth_ena_pci_remove, }; @@ -1898,9 +2220,7 @@ RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); -RTE_INIT(ena_init_log); -static void -ena_init_log(void) +RTE_INIT(ena_init_log) { ena_logtype_init = rte_log_register("pmd.net.ena.init"); if (ena_logtype_init >= 0) @@ -1909,3 +2229,75 @@ ena_init_log(void) if (ena_logtype_driver >= 0) rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE); } + +/****************************************************************************** + ******************************** AENQ Handlers ******************************* + *****************************************************************************/ +static void ena_update_on_link_change(void *adapter_data, + struct ena_admin_aenq_entry *aenq_e) +{ + struct rte_eth_dev *eth_dev; + struct ena_adapter *adapter; + struct ena_admin_aenq_link_change_desc *aenq_link_desc; + uint32_t status; + + adapter = (struct ena_adapter *)adapter_data; + aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; + eth_dev = adapter->rte_dev; + + status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); + adapter->link_status = status; + + ena_link_update(eth_dev, 0); + _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); +} + +static void ena_notification(void *data, + struct ena_admin_aenq_entry *aenq_e) +{ + struct ena_adapter *adapter = (struct ena_adapter *)data; + struct ena_admin_ena_hw_hints *hints; + + if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) + RTE_LOG(WARNING, PMD, "Invalid group(%x) expected %x\n", + aenq_e->aenq_common_desc.group, + ENA_ADMIN_NOTIFICATION); + + switch (aenq_e->aenq_common_desc.syndrom) { + case ENA_ADMIN_UPDATE_HINTS: + hints = (struct ena_admin_ena_hw_hints *) + (&aenq_e->inline_data_w4); + ena_update_hints(adapter, hints); + break; + default: + RTE_LOG(ERR, PMD, "Invalid aenq notification link state %d\n", + aenq_e->aenq_common_desc.syndrom); + } +} + +static void ena_keep_alive(void *adapter_data, + __rte_unused struct ena_admin_aenq_entry *aenq_e) +{ + struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; + + adapter->timestamp_wd = rte_get_timer_cycles(); +} + +/** + * This handler will called for unknown event group or unimplemented handlers + **/ +static void unimplemented_aenq_handler(__rte_unused void *data, + __rte_unused struct ena_admin_aenq_entry *aenq_e) +{ + RTE_LOG(ERR, PMD, "Unknown event was received or event with " + "unimplemented handler\n"); +} + +static struct ena_aenq_handlers aenq_handlers = { + .handlers = { + [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, + [ENA_ADMIN_NOTIFICATION] = ena_notification, + [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive + }, + .unimplemented_handler = unimplemented_aenq_handler +}; diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h index 394d05e0..2dc8129e 100644 --- a/drivers/net/ena/ena_ethdev.h +++ b/drivers/net/ena/ena_ethdev.h @@ -34,8 +34,10 @@ #ifndef _ENA_ETHDEV_H_ #define _ENA_ETHDEV_H_ +#include #include #include +#include #include "ena_com.h" @@ -48,8 +50,13 @@ #define ENA_NAME_MAX_LEN 20 #define ENA_PKT_MAX_BUFS 17 +#define ENA_MIN_MTU 128 + #define ENA_MMIO_DISABLE_REG_READ BIT(0) +#define ENA_WD_TIMEOUT_SEC 3 +#define ENA_DEVICE_KALIVE_TIMEOUT (ENA_WD_TIMEOUT_SEC * rte_get_timer_hz()) + struct ena_adapter; enum ena_ring_type { @@ -70,8 +77,12 @@ struct ena_ring { enum ena_ring_type type; enum ena_admin_placement_policy_type tx_mem_queue_type; - /* Holds the empty requests for TX OOO completions */ - uint16_t *empty_tx_reqs; + /* Holds the empty requests for TX/RX OOO completions */ + union { + uint16_t *empty_tx_reqs; + uint16_t *empty_rx_reqs; + }; + union { struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */ struct rte_mbuf **rx_buffer_info; /* contex of rx packet */ @@ -92,14 +103,16 @@ struct ena_ring { int configured; struct ena_adapter *adapter; uint64_t offloads; + u16 sgl_size; } __rte_cache_aligned; enum ena_adapter_state { ENA_ADAPTER_STATE_FREE = 0, ENA_ADAPTER_STATE_INIT = 1, - ENA_ADAPTER_STATE_RUNNING = 2, + ENA_ADAPTER_STATE_RUNNING = 2, ENA_ADAPTER_STATE_STOPPED = 3, ENA_ADAPTER_STATE_CONFIG = 4, + ENA_ADAPTER_STATE_CLOSED = 5, }; struct ena_driver_stats { @@ -157,6 +170,7 @@ struct ena_adapter { /* TX */ struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned; int tx_ring_size; + u16 max_tx_sgl_size; /* RX */ struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned; @@ -180,6 +194,18 @@ struct ena_adapter { uint64_t tx_selected_offloads; uint64_t rx_supported_offloads; uint64_t rx_selected_offloads; + + bool link_status; + + enum ena_regs_reset_reason_types reset_reason; + + struct rte_timer timer_wd; + uint64_t timestamp_wd; + uint64_t keep_alive_timeout; + + bool trigger_reset; + + bool wd_state; }; #endif /* _ENA_ETHDEV_H_ */ diff --git a/drivers/net/ena/meson.build b/drivers/net/ena/meson.build new file mode 100644 index 00000000..091ca6e3 --- /dev/null +++ b/drivers/net/ena/meson.build @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +allow_experimental_apis = true +sources = files('ena_ethdev.c', + 'base/ena_com.c', + 'base/ena_eth_com.c') + +deps += ['timer'] + +includes += include_directories('base', 'base/ena_defs') diff --git a/drivers/net/enic/base/cq_desc.h b/drivers/net/enic/base/cq_desc.h index 7e138027..ae8847c6 100644 --- a/drivers/net/enic/base/cq_desc.h +++ b/drivers/net/enic/base/cq_desc.h @@ -38,6 +38,7 @@ struct cq_desc { #define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1) #define CQ_DESC_COLOR_MASK 1 #define CQ_DESC_COLOR_SHIFT 7 +#define CQ_DESC_COLOR_MASK_NOSHIFT 0x80 #define CQ_DESC_Q_NUM_BITS 10 #define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1) #define CQ_DESC_COMP_NDX_BITS 12 diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c index 05b595eb..16e8814a 100644 --- a/drivers/net/enic/base/vnic_dev.c +++ b/drivers/net/enic/base/vnic_dev.c @@ -10,6 +10,7 @@ #include "vnic_dev.h" #include "vnic_resource.h" #include "vnic_devcmd.h" +#include "vnic_nic.h" #include "vnic_stats.h" @@ -484,7 +485,7 @@ int vnic_dev_capable_adv_filters(struct vnic_dev *vdev) * Retrun true in filter_tags if supported */ int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode, - u8 *filter_tags) + u8 *filter_actions) { u64 args[4]; int err; @@ -492,14 +493,10 @@ int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode, err = vnic_dev_advanced_filters_cap(vdev, args, 4); - /* determine if filter tags are available */ - if (err) - *filter_tags = 0; - if ((args[2] == FILTER_CAP_MODE_V1) && - (args[3] & FILTER_ACTION_FILTER_ID_FLAG)) - *filter_tags = 1; - else - *filter_tags = 0; + /* determine supported filter actions */ + *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */ + if (args[2] == FILTER_CAP_MODE_V1) + *filter_actions = args[3]; if (err || ((args[0] == 1) && (args[1] == 0))) { /* Adv filter Command not supported or adv filters available but @@ -531,6 +528,22 @@ parse_max_level: return 0; } +void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk, + bool *weak) +{ + u64 a0 = CMD_NIC_CFG, a1 = 0; + int wait = 1000; + int err; + + *cfg_chk = false; + *weak = false; + err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); + if (err == 0 && a0 != 0 && a1 != 0) { + *cfg_chk = true; + *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK); + } +} + int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd) { u64 a0 = (u32)cmd, a1 = 0; @@ -587,17 +600,9 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) { u64 a0, a1; int wait = 1000; - static u32 instance; - char name[NAME_MAX]; - if (!vdev->stats) { - snprintf((char *)name, sizeof(name), - "vnic_stats-%u", instance++); - vdev->stats = vdev->alloc_consistent(vdev->priv, - sizeof(struct vnic_stats), &vdev->stats_pa, (u8 *)name); - if (!vdev->stats) - return -ENOMEM; - } + if (!vdev->stats) + return -ENOMEM; *stats = vdev->stats; a0 = vdev->stats_pa; @@ -922,6 +927,18 @@ u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev) return vdev->intr_coal_timer_info.max_usec; } +int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev) +{ + char name[NAME_MAX]; + static u32 instance; + + snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++); + vdev->stats = vdev->alloc_consistent(vdev->priv, + sizeof(struct vnic_stats), + &vdev->stats_pa, (u8 *)name); + return vdev->stats == NULL ? -ENOMEM : 0; +} + void vnic_dev_unregister(struct vnic_dev *vdev) { if (vdev) { @@ -1044,3 +1061,36 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, return ret; } + +int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config) +{ + u64 a0 = overlay; + u64 a1 = config; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait); +} + +int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, + u16 vxlan_udp_port_number) +{ + u64 a1 = vxlan_udp_port_number; + u64 a0 = overlay; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait); +} + +int vnic_dev_capable_vxlan(struct vnic_dev *vdev) +{ + u64 a0 = VIC_FEATURE_VXLAN; + u64 a1 = 0; + int wait = 1000; + int ret; + + ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait); + /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */ + return ret == 0 && + (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) == + (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ); +} diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h index 8c099206..270a47bd 100644 --- a/drivers/net/enic/base/vnic_dev.h +++ b/drivers/net/enic/base/vnic_dev.h @@ -6,6 +6,8 @@ #ifndef _VNIC_DEV_H_ #define _VNIC_DEV_H_ +#include + #include #include @@ -108,7 +110,9 @@ int vnic_dev_fw_info(struct vnic_dev *vdev, int vnic_dev_capable_adv_filters(struct vnic_dev *vdev); int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd); int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode, - u8 *filter_tags); + u8 *filter_actions); +void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk, + bool *weak); int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev); int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size, void *value); @@ -165,6 +169,7 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar, unsigned int num_bars); struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev); +int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev); int vnic_dev_cmd_init(struct vnic_dev *vdev, int fallback); int vnic_dev_get_size(void); int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op); @@ -177,10 +182,9 @@ int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status); int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, struct filter_v2 *data, struct filter_action_v2 *action_v2); -#ifdef ENIC_VXLAN -int vnic_dev_overlay_offload_enable_disable(struct vnic_dev *vdev, +int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config); int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, u16 vxlan_udp_port_number); -#endif +int vnic_dev_capable_vxlan(struct vnic_dev *vdev); #endif /* _VNIC_DEV_H_ */ diff --git a/drivers/net/enic/base/vnic_devcmd.h b/drivers/net/enic/base/vnic_devcmd.h index 6b95bc48..a22d8a76 100644 --- a/drivers/net/enic/base/vnic_devcmd.h +++ b/drivers/net/enic/base/vnic_devcmd.h @@ -138,9 +138,27 @@ enum vnic_devcmd_cmd { /* del VLAN id in (u16)a0 */ CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15), - /* nic_cfg in (u32)a0 */ + /* + * nic_cfg in (u32)a0 + * + * Capability query: + * out: (u64) a0= 1 if a1 is valid + * (u64) a1= (NIC_CFG bits supported) | (flags << 32) + * (flags are CMD_NIC_CFG_CAPF_xxx) + */ CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16), + /* + * nic_cfg_chk (same as nic_cfg, but may return error) + * in (u32)a0 + * + * Capability query: + * out: (u64) a0= 1 if a1 is valid + * (u64) a1= (NIC_CFG bits supported) | (flags << 32) + * (flags are CMD_NIC_CFG_CAPF_xxx) + */ + CMD_NIC_CFG_CHK = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16), + /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */ CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17), @@ -600,10 +618,14 @@ enum filter_cap_mode { /* flags for CMD_OPEN */ #define CMD_OPENF_OPROM 0x1 /* open coming from option rom */ +#define CMD_OPENF_IG_DESCCACHE 0x2 /* Do not flush IG DESC cache */ /* flags for CMD_INIT */ #define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */ +/* flags for CMD_NIC_CFG */ +#define CMD_NIC_CFG_CAPF_UDP_WEAK (1ULL << 0) /* Bodega-style UDP RSS */ + /* flags for CMD_PACKET_FILTER */ #define CMD_PFILTER_DIRECTED 0x01 #define CMD_PFILTER_MULTICAST 0x02 @@ -840,7 +862,9 @@ struct filter_action { #define FILTER_ACTION_RQ_STEERING_FLAG (1 << 0) #define FILTER_ACTION_FILTER_ID_FLAG (1 << 1) +#define FILTER_ACTION_DROP_FLAG (1 << 2) #define FILTER_ACTION_V2_ALL (FILTER_ACTION_RQ_STEERING_FLAG \ + | FILTER_ACTION_DROP_FLAG \ | FILTER_ACTION_FILTER_ID_FLAG) /* Version 2 of filter action must be a strict extension of struct filter_action @@ -1077,6 +1101,18 @@ typedef enum { VIC_FEATURE_MAX, } vic_feature_t; +/* + * These flags are used in args[1] of devcmd CMD_GET_SUPP_FEATURE_VER + * to indicate the host driver about the VxLAN and Multi WQ features + * supported + */ +#define FEATURE_VXLAN_IPV6_INNER (1 << 0) +#define FEATURE_VXLAN_IPV6_OUTER (1 << 1) +#define FEATURE_VXLAN_MULTI_WQ (1 << 2) + +#define FEATURE_VXLAN_IPV6 (FEATURE_VXLAN_IPV6_INNER | \ + FEATURE_VXLAN_IPV6_OUTER) + /* * CMD_CONFIG_GRPINTR subcommands */ diff --git a/drivers/net/enic/base/vnic_enet.h b/drivers/net/enic/base/vnic_enet.h index 26918335..901f3b46 100644 --- a/drivers/net/enic/base/vnic_enet.h +++ b/drivers/net/enic/base/vnic_enet.h @@ -52,6 +52,9 @@ struct vnic_enet_config { #define VENETF_VXLAN 0x10000 /* VxLAN offload */ #define VENETF_NVGRE 0x20000 /* NVGRE offload */ #define VENETF_GRPINTR 0x40000 /* group interrupt */ +#define VENETF_NICSWITCH 0x80000 /* NICSWITCH enabled */ +#define VENETF_RSSHASH_UDPIPV4 0x100000 /* Hash on UDP + IPv4 fields */ +#define VENETF_RSSHASH_UDPIPV6 0x200000 /* Hash on UDP + IPv6 fields */ #define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */ #define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */ diff --git a/drivers/net/enic/base/vnic_nic.h b/drivers/net/enic/base/vnic_nic.h index a753b3a5..16040852 100644 --- a/drivers/net/enic/base/vnic_nic.h +++ b/drivers/net/enic/base/vnic_nic.h @@ -27,12 +27,14 @@ #define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL #define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24 +#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV4 (1 << 0) #define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1) #define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2) #define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3) #define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4) -#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5) -#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6) +#define NIC_CFG_RSS_HASH_TYPE_RSVD1 (1 << 5) +#define NIC_CFG_RSS_HASH_TYPE_RSVD2 (1 << 6) +#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV6 (1 << 7) static inline void vnic_set_nic_cfg(u32 *nic_cfg, u8 rss_default_cpu, u8 rss_hash_type, diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h index d774bb0d..d8e67f74 100644 --- a/drivers/net/enic/base/vnic_rq.h +++ b/drivers/net/enic/base/vnic_rq.h @@ -6,6 +6,7 @@ #ifndef _VNIC_RQ_H_ #define _VNIC_RQ_H_ +#include #include "vnic_dev.h" #include "vnic_cq.h" @@ -51,6 +52,8 @@ struct vnic_rq { struct vnic_dev *vdev; struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */ struct vnic_dev_ring ring; + struct rte_mbuf **free_mbufs; /* reserve of free mbufs */ + int num_free_mbufs; struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */ unsigned int mbuf_next_idx; /* next mb to consume */ void *os_buf_head; @@ -69,6 +72,7 @@ struct vnic_rq { struct rte_mbuf *pkt_last_seg; unsigned int max_mbufs_per_pkt; uint16_t tot_nb_desc; + bool need_initial_post; }; static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c index d61c4c6e..c9bf3572 100644 --- a/drivers/net/enic/base/vnic_wq.c +++ b/drivers/net/enic/base/vnic_wq.c @@ -32,8 +32,8 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq) { unsigned int count = wq->ring.desc_count; /* Allocate the mbuf ring */ - wq->bufs = (struct vnic_wq_buf *)rte_zmalloc_socket("wq->bufs", - sizeof(struct vnic_wq_buf) * count, + wq->bufs = (struct rte_mbuf **)rte_zmalloc_socket("wq->bufs", + sizeof(struct rte_mbuf *) * count, RTE_CACHE_LINE_SIZE, wq->socket_id); wq->head_idx = 0; wq->tail_idx = 0; @@ -113,6 +113,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, vnic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable, error_interrupt_offset); + wq->cq_pend = 0; wq->last_completed_index = 0; } @@ -145,9 +146,9 @@ int vnic_wq_disable(struct vnic_wq *wq) } void vnic_wq_clean(struct vnic_wq *wq, - void (*buf_clean)(struct vnic_wq_buf *buf)) + void (*buf_clean)(struct rte_mbuf **buf)) { - struct vnic_wq_buf *buf; + struct rte_mbuf **buf; unsigned int to_clean = wq->tail_idx; buf = &wq->bufs[to_clean]; diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h index 7c069c06..236cf696 100644 --- a/drivers/net/enic/base/vnic_wq.h +++ b/drivers/net/enic/base/vnic_wq.h @@ -36,23 +36,20 @@ struct vnic_wq_ctrl { u32 pad9; }; -/* 16 bytes */ -struct vnic_wq_buf { - struct rte_mempool *pool; - void *mb; -}; - struct vnic_wq { unsigned int index; + uint64_t tx_offload_notsup_mask; struct vnic_dev *vdev; struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ struct vnic_dev_ring ring; - struct vnic_wq_buf *bufs; + struct rte_mbuf **bufs; unsigned int head_idx; + unsigned int cq_pend; unsigned int tail_idx; unsigned int socket_id; const struct rte_memzone *cqmsg_rz; uint16_t last_completed_index; + uint64_t offloads; }; static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) @@ -163,5 +160,5 @@ unsigned int vnic_wq_error_status(struct vnic_wq *wq); void vnic_wq_enable(struct vnic_wq *wq); int vnic_wq_disable(struct vnic_wq *wq); void vnic_wq_clean(struct vnic_wq *wq, - void (*buf_clean)(struct vnic_wq_buf *buf)); + void (*buf_clean)(struct rte_mbuf **buf)); #endif /* _VNIC_WQ_H_ */ diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h index c083985e..7c27bd51 100644 --- a/drivers/net/enic/enic.h +++ b/drivers/net/enic/enic.h @@ -17,6 +17,7 @@ #include "vnic_rss.h" #include "enic_res.h" #include "cq_enet_desc.h" +#include #include #include @@ -49,6 +50,18 @@ #define ENICPMD_FDIR_MAX 64 +/* HW default VXLAN port */ +#define ENIC_DEFAULT_VXLAN_PORT 4789 + +/* + * Interrupt 0: LSC and errors + * Interrupt 1: rx queue 0 + * Interrupt 2: rx queue 1 + * ... + */ +#define ENICPMD_LSC_INTR_OFFSET 0 +#define ENICPMD_RXQ_INTR_OFFSET 1 + struct enic_fdir_node { struct rte_eth_fdir_filter filter; u16 fltr_id; @@ -92,6 +105,7 @@ struct enic { struct vnic_dev *vdev; unsigned int port_id; + bool overlay_offload; struct rte_eth_dev *rte_dev; struct enic_fdir fdir; char bdf_name[ENICPMD_BDF_LENGTH]; @@ -109,7 +123,13 @@ struct enic { u16 max_mtu; u8 adv_filters; u32 flow_filter_mode; - u8 filter_tags; + u8 filter_actions; /* HW supported actions */ + bool vxlan; + bool disable_overlay; /* devargs disable_overlay=1 */ + bool nic_cfg_chk; /* NIC_CFG_CHK available */ + bool udp_rss_weak; /* Bodega style UDP RSS */ + uint8_t ig_vlan_rewrite_mode; /* devargs ig-vlan-rewrite */ + uint16_t vxlan_port; /* current vxlan port pushed to NIC */ unsigned int flags; unsigned int priv_flags; @@ -126,9 +146,9 @@ struct enic { struct vnic_cq *cq; unsigned int cq_count; /* equals rq_count + wq_count */ - /* interrupt resource */ - struct vnic_intr intr; - unsigned int intr_count; + /* interrupt vectors (len = conf_intr_count) */ + struct vnic_intr *intr; + unsigned int intr_count; /* equals enabled interrupts (lsc + rxqs) */ /* software counters */ struct enic_soft_stats soft_stats; @@ -146,8 +166,34 @@ struct enic { LIST_HEAD(enic_flows, rte_flow) flows; rte_spinlock_t flows_lock; + + /* RSS */ + uint16_t reta_size; + uint8_t hash_key_size; + uint64_t flow_type_rss_offloads; /* 0 indicates RSS not supported */ + /* + * Keep a copy of current RSS config for queries, as we cannot retrieve + * it from the NIC. + */ + uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */ + uint8_t rss_enable; + uint64_t rss_hf; /* ETH_RSS flags */ + union vnic_rss_key rss_key; + union vnic_rss_cpu rss_cpu; + + uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */ + uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */ + uint64_t tx_queue_offload_capa; /* DEV_TX_OFFLOAD flags */ + uint64_t tx_offload_mask; /* PKT_TX flags accepted */ }; +/* Compute ethdev's max packet size from MTU */ +static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu) +{ + /* ethdev max size includes eth and crc whereas NIC MTU does not */ + return mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; +} + /* Get the CQ index from a Start of Packet(SOP) RQ index */ static inline unsigned int enic_sop_rq_idx_to_cq_idx(unsigned int sop_idx) { @@ -220,53 +266,61 @@ enic_ring_incr(uint32_t n_descriptors, uint32_t idx) return idx; } -extern void enic_fdir_stats_get(struct enic *enic, - struct rte_eth_fdir_stats *stats); -extern int enic_fdir_add_fltr(struct enic *enic, - struct rte_eth_fdir_filter *params); -extern int enic_fdir_del_fltr(struct enic *enic, - struct rte_eth_fdir_filter *params); -extern void enic_free_wq(void *txq); -extern int enic_alloc_intr_resources(struct enic *enic); -extern int enic_setup_finish(struct enic *enic); -extern int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, - unsigned int socket_id, uint16_t nb_desc); -extern void enic_start_wq(struct enic *enic, uint16_t queue_idx); -extern int enic_stop_wq(struct enic *enic, uint16_t queue_idx); -extern void enic_start_rq(struct enic *enic, uint16_t queue_idx); -extern int enic_stop_rq(struct enic *enic, uint16_t queue_idx); -extern void enic_free_rq(void *rxq); -extern int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, - unsigned int socket_id, struct rte_mempool *mp, - uint16_t nb_desc, uint16_t free_thresh); -extern int enic_set_rss_nic_cfg(struct enic *enic); -extern int enic_set_vnic_res(struct enic *enic); -extern int enic_enable(struct enic *enic); -extern int enic_disable(struct enic *enic); -extern void enic_remove(struct enic *enic); -extern int enic_get_link_status(struct enic *enic); -extern int enic_dev_stats_get(struct enic *enic, - struct rte_eth_stats *r_stats); -extern void enic_dev_stats_clear(struct enic *enic); -extern void enic_add_packet_filter(struct enic *enic); +void enic_fdir_stats_get(struct enic *enic, + struct rte_eth_fdir_stats *stats); +int enic_fdir_add_fltr(struct enic *enic, + struct rte_eth_fdir_filter *params); +int enic_fdir_del_fltr(struct enic *enic, + struct rte_eth_fdir_filter *params); +void enic_free_wq(void *txq); +int enic_alloc_intr_resources(struct enic *enic); +int enic_setup_finish(struct enic *enic); +int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, + unsigned int socket_id, uint16_t nb_desc); +void enic_start_wq(struct enic *enic, uint16_t queue_idx); +int enic_stop_wq(struct enic *enic, uint16_t queue_idx); +void enic_start_rq(struct enic *enic, uint16_t queue_idx); +int enic_stop_rq(struct enic *enic, uint16_t queue_idx); +void enic_free_rq(void *rxq); +int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, + unsigned int socket_id, struct rte_mempool *mp, + uint16_t nb_desc, uint16_t free_thresh); +int enic_set_vnic_res(struct enic *enic); +int enic_init_rss_nic_cfg(struct enic *enic); +int enic_set_rss_conf(struct enic *enic, + struct rte_eth_rss_conf *rss_conf); +int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu); +int enic_set_vlan_strip(struct enic *enic); +int enic_enable(struct enic *enic); +int enic_disable(struct enic *enic); +void enic_remove(struct enic *enic); +int enic_get_link_status(struct enic *enic); +int enic_dev_stats_get(struct enic *enic, + struct rte_eth_stats *r_stats); +void enic_dev_stats_clear(struct enic *enic); +void enic_add_packet_filter(struct enic *enic); int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr); -void enic_del_mac_address(struct enic *enic, int mac_index); -extern unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq); -extern void enic_send_pkt(struct enic *enic, struct vnic_wq *wq, - struct rte_mbuf *tx_pkt, unsigned short len, - uint8_t sop, uint8_t eop, uint8_t cq_entry, - uint16_t ol_flags, uint16_t vlan_tag); - -extern void enic_post_wq_index(struct vnic_wq *wq); -extern int enic_probe(struct enic *enic); -extern int enic_clsf_init(struct enic *enic); -extern void enic_clsf_destroy(struct enic *enic); +int enic_del_mac_address(struct enic *enic, int mac_index); +unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq); +void enic_send_pkt(struct enic *enic, struct vnic_wq *wq, + struct rte_mbuf *tx_pkt, unsigned short len, + uint8_t sop, uint8_t eop, uint8_t cq_entry, + uint16_t ol_flags, uint16_t vlan_tag); + +void enic_post_wq_index(struct vnic_wq *wq); +int enic_probe(struct enic *enic); +int enic_clsf_init(struct enic *enic); +void enic_clsf_destroy(struct enic *enic); uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); uint16_t enic_dummy_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c index 3ef1d083..9d95201e 100644 --- a/drivers/net/enic/enic_clsf.c +++ b/drivers/net/enic/enic_clsf.c @@ -111,7 +111,6 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, struct rte_eth_fdir_masks *masks) { struct filter_generic_1 *gp = &fltr->u.generic_1; - int i; fltr->type = FILTER_DPDK_1; memset(gp, 0, sizeof(*gp)); @@ -273,18 +272,14 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, ipv6_mask.proto = masks->ipv6_mask.proto; ipv6_val.proto = input->flow.ipv6_flow.proto; } - for (i = 0; i < 4; i++) { - *(uint32_t *)&ipv6_mask.src_addr[i * 4] = - masks->ipv6_mask.src_ip[i]; - *(uint32_t *)&ipv6_val.src_addr[i * 4] = - input->flow.ipv6_flow.src_ip[i]; - } - for (i = 0; i < 4; i++) { - *(uint32_t *)&ipv6_mask.dst_addr[i * 4] = - masks->ipv6_mask.src_ip[i]; - *(uint32_t *)&ipv6_val.dst_addr[i * 4] = - input->flow.ipv6_flow.dst_ip[i]; - } + memcpy(ipv6_mask.src_addr, masks->ipv6_mask.src_ip, + sizeof(ipv6_mask.src_addr)); + memcpy(ipv6_val.src_addr, input->flow.ipv6_flow.src_ip, + sizeof(ipv6_val.src_addr)); + memcpy(ipv6_mask.dst_addr, masks->ipv6_mask.dst_ip, + sizeof(ipv6_mask.dst_addr)); + memcpy(ipv6_val.dst_addr, input->flow.ipv6_flow.dst_ip, + sizeof(ipv6_val.dst_addr)); if (input->flow.ipv6_flow.tc) { ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12; ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12; diff --git a/drivers/net/enic/enic_compat.h b/drivers/net/enic/enic_compat.h index c0af1ed2..ceb1b096 100644 --- a/drivers/net/enic/enic_compat.h +++ b/drivers/net/enic/enic_compat.h @@ -56,6 +56,11 @@ #define dev_debug(x, args...) dev_printk(DEBUG, args) extern int enicpmd_logtype_flow; +extern int enicpmd_logtype_init; + +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, enicpmd_logtype_init, \ + "%s" fmt "\n", __func__, ##args) #define __le16 u16 #define __le32 u32 diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c index d84714ef..b3d57771 100644 --- a/drivers/net/enic/enic_ethdev.c +++ b/drivers/net/enic/enic_ethdev.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include "vnic_intr.h" @@ -23,10 +24,6 @@ int enicpmd_logtype_init; int enicpmd_logtype_flow; -#define PMD_INIT_LOG(level, fmt, args...) \ - rte_log(RTE_LOG_ ## level, enicpmd_logtype_init, \ - "%s" fmt "\n", __func__, ##args) - #define ENICPMD_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") /* @@ -39,9 +36,10 @@ static const struct rte_pci_id pci_id_enic_map[] = { {.vendor_id = 0, /* sentinel */}, }; -RTE_INIT(enicpmd_init_log); -static void -enicpmd_init_log(void) +#define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay" +#define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite" + +RTE_INIT(enicpmd_init_log) { enicpmd_logtype_init = rte_log_register("pmd.net.enic.init"); if (enicpmd_logtype_init >= 0) @@ -181,17 +179,21 @@ static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, - __rte_unused const struct rte_eth_txconf *tx_conf) + const struct rte_eth_txconf *tx_conf) { int ret; struct enic *enic = pmd_priv(eth_dev); + struct vnic_wq *wq; if (rte_eal_process_type() != RTE_PROC_PRIMARY) return -E_RTE_SECONDARY; ENICPMD_FUNC_TRACE(); RTE_ASSERT(queue_idx < enic->conf_wq_count); - eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx]; + wq = &enic->wq[queue_idx]; + wq->offloads = tx_conf->offloads | + eth_dev->data->dev_conf.txmode.offloads; + eth_dev->data->tx_queues[queue_idx] = (void *)wq; ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc); if (ret) { @@ -318,52 +320,40 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, return enicpmd_dev_setup_intr(enic); } -static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev, - uint16_t vlan_id, int on) -{ - struct enic *enic = pmd_priv(eth_dev); - int err; - - ENICPMD_FUNC_TRACE(); - if (on) - err = enic_add_vlan(enic, vlan_id); - else - err = enic_del_vlan(enic, vlan_id); - return err; -} - static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) { struct enic *enic = pmd_priv(eth_dev); + uint64_t offloads; ENICPMD_FUNC_TRACE(); + offloads = eth_dev->data->dev_conf.rxmode.offloads; if (mask & ETH_VLAN_STRIP_MASK) { - if (eth_dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_STRIP) + if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP) enic->ig_vlan_strip_en = 1; else enic->ig_vlan_strip_en = 0; } - enic_set_rss_nic_cfg(enic); - - if (mask & ETH_VLAN_FILTER_MASK) { + if ((mask & ETH_VLAN_FILTER_MASK) && + (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { dev_warning(enic, "Configuration of VLAN filter is not supported\n"); } - if (mask & ETH_VLAN_EXTEND_MASK) { + if ((mask & ETH_VLAN_EXTEND_MASK) && + (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)) { dev_warning(enic, "Configuration of extended VLAN is not supported\n"); } - return 0; + return enic_set_vlan_strip(enic); } static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev) { int ret; + int mask; struct enic *enic = pmd_priv(eth_dev); if (rte_eal_process_type() != RTE_PROC_PRIMARY) @@ -378,9 +368,21 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev) enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM); - ret = enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK); - - return ret; + /* All vlan offload masks to apply the current settings */ + mask = ETH_VLAN_STRIP_MASK | + ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK; + ret = enicpmd_vlan_offload_set(eth_dev, mask); + if (ret) { + dev_err(enic, "Failed to configure VLAN offloads\n"); + return ret; + } + /* + * Initialize RSS with the default reta and key. If the user key is + * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the + * default key. + */ + return enic_init_rss_nic_cfg(enic); } /* Start the device. @@ -410,10 +412,9 @@ static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev) ENICPMD_FUNC_TRACE(); enic_disable(enic); + memset(&link, 0, sizeof(link)); - rte_atomic64_cmpset((uint64_t *)ð_dev->data->dev_link, - *(uint64_t *)ð_dev->data->dev_link, - *(uint64_t *)&link); + rte_eth_linkstatus_set(eth_dev, &link); } /* @@ -459,27 +460,52 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, struct enic *enic = pmd_priv(eth_dev); ENICPMD_FUNC_TRACE(); - device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */ device_info->max_rx_queues = enic->conf_rq_count / 2; device_info->max_tx_queues = enic->conf_wq_count; device_info->min_rx_bufsize = ENIC_MIN_MTU; - device_info->max_rx_pktlen = enic->max_mtu + ETHER_HDR_LEN + 4; + /* "Max" mtu is not a typo. HW receives packet sizes up to the + * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is + * a hint to the driver to size receive buffers accordingly so that + * larger-than-vnic-mtu packets get truncated.. For DPDK, we let + * the user decide the buffer size via rxmode.max_rx_pkt_len, basically + * ignoring vNIC mtu. + */ + device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu); device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR; - device_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; - device_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + device_info->rx_offload_capa = enic->rx_offload_capa; + device_info->tx_offload_capa = enic->tx_offload_capa; + device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa; device_info->default_rxconf = (struct rte_eth_rxconf) { .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH }; + device_info->reta_size = enic->reta_size; + device_info->hash_key_size = enic->hash_key_size; + device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads; + device_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = enic->config.rq_desc_count, + .nb_min = ENIC_MIN_RQ_DESCS, + .nb_align = ENIC_ALIGN_DESCS, + }; + device_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = enic->config.wq_desc_count, + .nb_min = ENIC_MIN_WQ_DESCS, + .nb_align = ENIC_ALIGN_DESCS, + .nb_seg_max = ENIC_TX_XMIT_MAX, + .nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC, + }; + device_info->default_rxportconf = (struct rte_eth_dev_portconf) { + .burst_size = ENIC_DEFAULT_RX_BURST, + .ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max, + ENIC_DEFAULT_RX_RING_SIZE), + .nb_queues = ENIC_DEFAULT_RX_RINGS, + }; + device_info->default_txportconf = (struct rte_eth_dev_portconf) { + .burst_size = ENIC_DEFAULT_TX_BURST, + .ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max, + ENIC_DEFAULT_TX_RING_SIZE), + .nb_queues = ENIC_DEFAULT_TX_RINGS, + }; } static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev) @@ -496,7 +522,8 @@ static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev) RTE_PTYPE_UNKNOWN }; - if (dev->rx_pkt_burst == enic_recv_pkts) + if (dev->rx_pkt_burst == enic_recv_pkts || + dev->rx_pkt_burst == enic_noscatter_recv_pkts) return ptypes; return NULL; } @@ -571,7 +598,24 @@ static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index) return; ENICPMD_FUNC_TRACE(); - enic_del_mac_address(enic, index); + if (enic_del_mac_address(enic, index)) + dev_err(enic, "del mac addr failed\n"); +} + +static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev, + struct ether_addr *addr) +{ + struct enic *enic = pmd_priv(eth_dev); + int ret; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -E_RTE_SECONDARY; + + ENICPMD_FUNC_TRACE(); + ret = enic_del_mac_address(enic, 0); + if (ret) + return ret; + return enic_set_mac_address(enic, addr->addr_bytes); } static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) @@ -582,6 +626,242 @@ static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) return enic_set_mtu(enic, mtu); } +static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 + *reta_conf, + uint16_t reta_size) +{ + struct enic *enic = pmd_priv(dev); + uint16_t i, idx, shift; + + ENICPMD_FUNC_TRACE(); + if (reta_size != ENIC_RSS_RETA_SIZE) { + dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n", + reta_size, ENIC_RSS_RETA_SIZE); + return -EINVAL; + } + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx( + enic->rss_cpu.cpu[i / 4].b[i % 4]); + } + + return 0; +} + +static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 + *reta_conf, + uint16_t reta_size) +{ + struct enic *enic = pmd_priv(dev); + union vnic_rss_cpu rss_cpu; + uint16_t i, idx, shift; + + ENICPMD_FUNC_TRACE(); + if (reta_size != ENIC_RSS_RETA_SIZE) { + dev_err(enic, "reta_update: wrong reta_size. given=%u" + " expected=%u\n", + reta_size, ENIC_RSS_RETA_SIZE); + return -EINVAL; + } + /* + * Start with the current reta and modify it per reta_conf, as we + * need to push the entire reta even if we only modify one entry. + */ + rss_cpu = enic->rss_cpu; + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + rss_cpu.cpu[i / 4].b[i % 4] = + enic_rte_rq_idx_to_sop_idx( + reta_conf[idx].reta[shift]); + } + return enic_set_rss_reta(enic, &rss_cpu); +} + +static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct enic *enic = pmd_priv(dev); + + ENICPMD_FUNC_TRACE(); + return enic_set_rss_conf(enic, rss_conf); +} + +static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct enic *enic = pmd_priv(dev); + + ENICPMD_FUNC_TRACE(); + if (rss_conf == NULL) + return -EINVAL; + if (rss_conf->rss_key != NULL && + rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) { + dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u" + " expected=%u+\n", + rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE); + return -EINVAL; + } + rss_conf->rss_hf = enic->rss_hf; + if (rss_conf->rss_key != NULL) { + int i; + for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) { + rss_conf->rss_key[i] = + enic->rss_key.key[i / 10].b[i % 10]; + } + rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE; + } + return 0; +} + +static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct enic *enic = pmd_priv(dev); + struct vnic_rq *rq_sop; + struct vnic_rq *rq_data; + struct rte_eth_rxconf *conf; + uint16_t sop_queue_idx; + uint16_t data_queue_idx; + + ENICPMD_FUNC_TRACE(); + sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id); + data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id); + rq_sop = &enic->rq[sop_queue_idx]; + rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */ + qinfo->mp = rq_sop->mp; + qinfo->scattered_rx = rq_sop->data_queue_enable; + qinfo->nb_desc = rq_sop->ring.desc_count; + if (qinfo->scattered_rx) + qinfo->nb_desc += rq_data->ring.desc_count; + conf = &qinfo->conf; + memset(conf, 0, sizeof(*conf)); + conf->rx_free_thresh = rq_sop->rx_free_thresh; + conf->rx_drop_en = 1; + /* + * Except VLAN stripping (port setting), all the checksum offloads + * are always enabled. + */ + conf->offloads = enic->rx_offload_capa; + if (!enic->ig_vlan_strip_en) + conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + /* rx_thresh and other fields are not applicable for enic */ +} + +static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct enic *enic = pmd_priv(dev); + struct vnic_wq *wq = &enic->wq[tx_queue_id]; + + ENICPMD_FUNC_TRACE(); + qinfo->nb_desc = wq->ring.desc_count; + memset(&qinfo->conf, 0, sizeof(qinfo->conf)); + qinfo->conf.offloads = wq->offloads; + /* tx_thresh, and all the other fields are not applicable for enic */ +} + +static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, + uint16_t rx_queue_id) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]); + return 0; +} + +static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, + uint16_t rx_queue_id) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]); + return 0; +} + +static int udp_tunnel_common_check(struct enic *enic, + struct rte_eth_udp_tunnel *tnl) +{ + if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) + return -ENOTSUP; + if (!enic->overlay_offload) { + PMD_INIT_LOG(DEBUG, " vxlan (overlay offload) is not " + "supported\n"); + return -ENOTSUP; + } + return 0; +} + +static int update_vxlan_port(struct enic *enic, uint16_t port) +{ + if (vnic_dev_overlay_offload_cfg(enic->vdev, + OVERLAY_CFG_VXLAN_PORT_UPDATE, + port)) { + PMD_INIT_LOG(DEBUG, " failed to update vxlan port\n"); + return -EINVAL; + } + PMD_INIT_LOG(DEBUG, " updated vxlan port to %u\n", port); + enic->vxlan_port = port; + return 0; +} + +static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev, + struct rte_eth_udp_tunnel *tnl) +{ + struct enic *enic = pmd_priv(eth_dev); + int ret; + + ENICPMD_FUNC_TRACE(); + ret = udp_tunnel_common_check(enic, tnl); + if (ret) + return ret; + /* + * The NIC has 1 configurable VXLAN port number. "Adding" a new port + * number replaces it. + */ + if (tnl->udp_port == enic->vxlan_port || tnl->udp_port == 0) { + PMD_INIT_LOG(DEBUG, " %u is already configured or invalid\n", + tnl->udp_port); + return -EINVAL; + } + return update_vxlan_port(enic, tnl->udp_port); +} + +static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev, + struct rte_eth_udp_tunnel *tnl) +{ + struct enic *enic = pmd_priv(eth_dev); + int ret; + + ENICPMD_FUNC_TRACE(); + ret = udp_tunnel_common_check(enic, tnl); + if (ret) + return ret; + /* + * Clear the previously set port number and restore the + * hardware default port number. Some drivers disable VXLAN + * offloads when there are no configured port numbers. But + * enic does not do that as VXLAN is part of overlay offload, + * which is tied to inner RSS and TSO. + */ + if (tnl->udp_port != enic->vxlan_port) { + PMD_INIT_LOG(DEBUG, " %u is not a configured vxlan port\n", + tnl->udp_port); + return -EINVAL; + } + return update_vxlan_port(enic, ENIC_DEFAULT_VXLAN_PORT); +} + static const struct eth_dev_ops enicpmd_eth_dev_ops = { .dev_configure = enicpmd_dev_configure, .dev_start = enicpmd_dev_start, @@ -600,7 +880,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = { .dev_infos_get = enicpmd_dev_info_get, .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get, .mtu_set = enicpmd_mtu_set, - .vlan_filter_set = enicpmd_vlan_filter_set, + .vlan_filter_set = NULL, .vlan_tpid_set = NULL, .vlan_offload_set = enicpmd_vlan_offload_set, .vlan_strip_queue_set = NULL, @@ -614,6 +894,10 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = { .rx_descriptor_done = NULL, .tx_queue_setup = enicpmd_dev_tx_queue_setup, .tx_queue_release = enicpmd_dev_tx_queue_release, + .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable, + .rxq_info_get = enicpmd_dev_rxq_info_get, + .txq_info_get = enicpmd_dev_txq_info_get, .dev_led_on = NULL, .dev_led_off = NULL, .flow_ctrl_get = NULL, @@ -621,9 +905,97 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = { .priority_flow_ctrl_set = NULL, .mac_addr_add = enicpmd_add_mac_addr, .mac_addr_remove = enicpmd_remove_mac_addr, + .mac_addr_set = enicpmd_set_mac_addr, .filter_ctrl = enicpmd_dev_filter_ctrl, + .reta_query = enicpmd_dev_rss_reta_query, + .reta_update = enicpmd_dev_rss_reta_update, + .rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get, + .rss_hash_update = enicpmd_dev_rss_hash_update, + .udp_tunnel_port_add = enicpmd_dev_udp_tunnel_port_add, + .udp_tunnel_port_del = enicpmd_dev_udp_tunnel_port_del, }; +static int enic_parse_disable_overlay(__rte_unused const char *key, + const char *value, + void *opaque) +{ + struct enic *enic; + + enic = (struct enic *)opaque; + if (strcmp(value, "0") == 0) { + enic->disable_overlay = false; + } else if (strcmp(value, "1") == 0) { + enic->disable_overlay = true; + } else { + dev_err(enic, "Invalid value for " ENIC_DEVARG_DISABLE_OVERLAY + ": expected=0|1 given=%s\n", value); + return -EINVAL; + } + return 0; +} + +static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key, + const char *value, + void *opaque) +{ + struct enic *enic; + + enic = (struct enic *)opaque; + if (strcmp(value, "trunk") == 0) { + /* Trunk mode: always tag */ + enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK; + } else if (strcmp(value, "untag") == 0) { + /* Untag default VLAN mode: untag if VLAN = default VLAN */ + enic->ig_vlan_rewrite_mode = + IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN; + } else if (strcmp(value, "priority") == 0) { + /* + * Priority-tag default VLAN mode: priority tag (VLAN header + * with ID=0) if VLAN = default + */ + enic->ig_vlan_rewrite_mode = + IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN; + } else if (strcmp(value, "pass") == 0) { + /* Pass through mode: do not touch tags */ + enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU; + } else { + dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE + ": expected=trunk|untag|priority|pass given=%s\n", + value); + return -EINVAL; + } + return 0; +} + +static int enic_check_devargs(struct rte_eth_dev *dev) +{ + static const char *const valid_keys[] = { + ENIC_DEVARG_DISABLE_OVERLAY, + ENIC_DEVARG_IG_VLAN_REWRITE, + NULL}; + struct enic *enic = pmd_priv(dev); + struct rte_kvargs *kvlist; + + ENICPMD_FUNC_TRACE(); + + enic->disable_overlay = false; + enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU; + if (!dev->device->devargs) + return 0; + kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); + if (!kvlist) + return -EINVAL; + if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY, + enic_parse_disable_overlay, enic) < 0 || + rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE, + enic_parse_ig_vlan_rewrite, enic) < 0) { + rte_kvargs_free(kvlist); + return -EINVAL; + } + rte_kvargs_free(kvlist); + return 0; +} + struct enic *enicpmd_list_head = NULL; /* Initialize the driver * It returns 0 on success. @@ -633,6 +1005,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) struct rte_pci_device *pdev; struct rte_pci_addr *addr; struct enic *enic = pmd_priv(eth_dev); + int err; ENICPMD_FUNC_TRACE(); @@ -651,6 +1024,9 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x", addr->domain, addr->bus, addr->devid, addr->function); + err = enic_check_devargs(eth_dev); + if (err) + return err; return enic_probe(enic); } @@ -676,3 +1052,6 @@ static struct rte_pci_driver rte_enic_pmd = { RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map); RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_enic, + ENIC_DEVARG_DISABLE_OVERLAY "=0|1 " + ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass"); diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c index 28923b0e..0cf04aef 100644 --- a/drivers/net/enic/enic_flow.c +++ b/drivers/net/enic/enic_flow.c @@ -3,6 +3,7 @@ */ #include +#include #include #include #include @@ -273,21 +274,33 @@ static const enum rte_flow_action_type enic_supported_actions_v1[] = { }; /** Supported actions for newer NICs */ -static const enum rte_flow_action_type enic_supported_actions_v2[] = { +static const enum rte_flow_action_type enic_supported_actions_v2_id[] = { RTE_FLOW_ACTION_TYPE_QUEUE, RTE_FLOW_ACTION_TYPE_MARK, RTE_FLOW_ACTION_TYPE_FLAG, RTE_FLOW_ACTION_TYPE_END, }; +static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = { + RTE_FLOW_ACTION_TYPE_QUEUE, + RTE_FLOW_ACTION_TYPE_MARK, + RTE_FLOW_ACTION_TYPE_FLAG, + RTE_FLOW_ACTION_TYPE_DROP, + RTE_FLOW_ACTION_TYPE_END, +}; + /** Action capabilities indexed by NIC version information */ static const struct enic_action_cap enic_action_cap[] = { [FILTER_ACTION_RQ_STEERING_FLAG] = { .actions = enic_supported_actions_v1, .copy_fn = enic_copy_action_v1, }, - [FILTER_ACTION_V2_ALL] = { - .actions = enic_supported_actions_v2, + [FILTER_ACTION_FILTER_ID_FLAG] = { + .actions = enic_supported_actions_v2_id, + .copy_fn = enic_copy_action_v2, + }, + [FILTER_ACTION_DROP_FLAG] = { + .actions = enic_supported_actions_v2_drop, .copy_fn = enic_copy_action_v2, }, }; @@ -544,16 +557,21 @@ enic_copy_item_vlan_v2(const struct rte_flow_item *item, if (!spec) return 0; - /* Don't support filtering in tpid */ - if (mask) { - if (mask->tpid != 0) - return ENOTSUP; - } else { + if (!mask) mask = &rte_flow_item_vlan_mask; - RTE_ASSERT(mask->tpid == 0); - } if (*inner_ofst == 0) { + struct ether_hdr *eth_mask = + (void *)gp->layer[FILTER_GENERIC_1_L2].mask; + struct ether_hdr *eth_val = + (void *)gp->layer[FILTER_GENERIC_1_L2].val; + + /* Outer TPID cannot be matched */ + if (eth_mask->ether_type) + return ENOTSUP; + eth_mask->ether_type = mask->inner_type; + eth_val->ether_type = spec->inner_type; + /* Outer header. Use the vlan mask/val fields */ gp->mask_vlan = mask->tci; gp->val_vlan = spec->tci; @@ -952,6 +970,9 @@ static int enic_copy_action_v1(const struct rte_flow_action actions[], struct filter_action_v2 *enic_action) { + enum { FATE = 1, }; + uint32_t overlap = 0; + FLOW_TRACE(); for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { @@ -963,6 +984,10 @@ enic_copy_action_v1(const struct rte_flow_action actions[], const struct rte_flow_action_queue *queue = (const struct rte_flow_action_queue *) actions->conf; + + if (overlap & FATE) + return ENOTSUP; + overlap |= FATE; enic_action->rq_idx = enic_rte_rq_idx_to_sop_idx(queue->index); break; @@ -972,6 +997,8 @@ enic_copy_action_v1(const struct rte_flow_action actions[], break; } } + if (!(overlap & FATE)) + return ENOTSUP; enic_action->type = FILTER_ACTION_RQ_STEERING; return 0; } @@ -989,6 +1016,9 @@ static int enic_copy_action_v2(const struct rte_flow_action actions[], struct filter_action_v2 *enic_action) { + enum { FATE = 1, MARK = 2, }; + uint32_t overlap = 0; + FLOW_TRACE(); for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { @@ -997,6 +1027,10 @@ enic_copy_action_v2(const struct rte_flow_action actions[], const struct rte_flow_action_queue *queue = (const struct rte_flow_action_queue *) actions->conf; + + if (overlap & FATE) + return ENOTSUP; + overlap |= FATE; enic_action->rq_idx = enic_rte_rq_idx_to_sop_idx(queue->index); enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG; @@ -1007,6 +1041,9 @@ enic_copy_action_v2(const struct rte_flow_action actions[], (const struct rte_flow_action_mark *) actions->conf; + if (overlap & MARK) + return ENOTSUP; + overlap |= MARK; /* ENIC_MAGIC_FILTER_ID is reserved and is the highest * in the range of allows mark ids. */ @@ -1017,10 +1054,20 @@ enic_copy_action_v2(const struct rte_flow_action actions[], break; } case RTE_FLOW_ACTION_TYPE_FLAG: { + if (overlap & MARK) + return ENOTSUP; + overlap |= MARK; enic_action->filter_id = ENIC_MAGIC_FILTER_ID; enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG; break; } + case RTE_FLOW_ACTION_TYPE_DROP: { + if (overlap & FATE) + return ENOTSUP; + overlap |= FATE; + enic_action->flags |= FILTER_ACTION_DROP_FLAG; + break; + } case RTE_FLOW_ACTION_TYPE_VOID: continue; default: @@ -1028,6 +1075,8 @@ enic_copy_action_v2(const struct rte_flow_action actions[], break; } } + if (!(overlap & FATE)) + return ENOTSUP; enic_action->type = FILTER_ACTION_V2; return 0; } @@ -1059,10 +1108,14 @@ enic_get_filter_cap(struct enic *enic) static const struct enic_action_cap * enic_get_action_cap(struct enic *enic) { - static const struct enic_action_cap *ea; - - if (enic->filter_tags) - ea = &enic_action_cap[FILTER_ACTION_V2_ALL]; + const struct enic_action_cap *ea; + uint8_t actions; + + actions = enic->filter_actions; + if (actions & FILTER_ACTION_DROP_FLAG) + ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG]; + else if (actions & FILTER_ACTION_FILTER_ID_FLAG) + ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG]; else ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG]; return ea; @@ -1268,6 +1321,12 @@ enic_flow_parse(struct rte_eth_dev *dev, NULL, "egress is not supported"); return -rte_errno; + } else if (attrs->transfer) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, + "transfer is not supported"); + return -rte_errno; } else if (!attrs->ingress) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index ec9d343f..fd940c58 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -69,12 +69,12 @@ enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq) } } -static void enic_free_wq_buf(struct vnic_wq_buf *buf) +static void enic_free_wq_buf(struct rte_mbuf **buf) { - struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb; + struct rte_mbuf *mbuf = *buf; rte_pktmbuf_free_seg(mbuf); - buf->mb = NULL; + *buf = NULL; } static void enic_log_q_error(struct enic *enic) @@ -162,13 +162,12 @@ int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) return 0; } -void enic_del_mac_address(struct enic *enic, int mac_index) +int enic_del_mac_address(struct enic *enic, int mac_index) { struct rte_eth_dev *eth_dev = enic->rte_dev; uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes; - if (vnic_dev_del_addr(enic->vdev, mac_addr)) - dev_err(enic, "del mac addr failed\n"); + return vnic_dev_del_addr(enic->vdev, mac_addr); } int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr) @@ -200,10 +199,15 @@ void enic_init_vnic_resources(struct enic *enic) { unsigned int error_interrupt_enable = 1; unsigned int error_interrupt_offset = 0; + unsigned int rxq_interrupt_enable = 0; + unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET; unsigned int index = 0; unsigned int cq_idx; struct vnic_rq *data_rq; + if (enic->rte_dev->data->dev_conf.intr_conf.rxq) + rxq_interrupt_enable = 1; + for (index = 0; index < enic->rq_count; index++) { cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index)); @@ -225,11 +229,13 @@ void enic_init_vnic_resources(struct enic *enic) 0 /* cq_head */, 0 /* cq_tail */, 1 /* cq_tail_color */, - 0 /* interrupt_enable */, + rxq_interrupt_enable, 1 /* cq_entry_enable */, 0 /* cq_message_enable */, - 0 /* interrupt offset */, + rxq_interrupt_offset, 0 /* cq_message_addr */); + if (rxq_interrupt_enable) + rxq_interrupt_offset++; } for (index = 0; index < enic->wq_count; index++) { @@ -237,6 +243,9 @@ void enic_init_vnic_resources(struct enic *enic) enic_cq_wq(enic, index), error_interrupt_enable, error_interrupt_offset); + /* Compute unsupported ol flags for enic_prep_pkts() */ + enic->wq[index].tx_offload_notsup_mask = + PKT_TX_OFFLOAD_MASK ^ enic->tx_offload_mask; cq_idx = enic_cq_wq(enic, index); vnic_cq_init(&enic->cq[cq_idx], @@ -252,10 +261,12 @@ void enic_init_vnic_resources(struct enic *enic) (u64)enic->wq[index].cqmsg_rz->iova); } - vnic_intr_init(&enic->intr, - enic->config.intr_timer_usec, - enic->config.intr_timer_type, - /*mask_on_assertion*/1); + for (index = 0; index < enic->intr_count; index++) { + vnic_intr_init(&enic->intr[index], + enic->config.intr_timer_usec, + enic->config.intr_timer_type, + /*mask_on_assertion*/1); + } } @@ -266,6 +277,8 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) struct rq_enet_desc *rqd = rq->ring.descs; unsigned i; dma_addr_t dma_addr; + uint32_t max_rx_pkt_len; + uint16_t rq_buf_len; if (!rq->in_use) return 0; @@ -273,6 +286,18 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index, rq->ring.desc_count); + /* + * If *not* using scatter and the mbuf size is greater than the + * requested max packet size (max_rx_pkt_len), then reduce the + * posted buffer size to max_rx_pkt_len. HW still receives packets + * larger than max_rx_pkt_len, but they will be truncated, which we + * drop in the rx handler. Not ideal, but better than returning + * large packets when the user is not expecting them. + */ + max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len; + rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM; + if (max_rx_pkt_len < rq_buf_len && !rq->data_queue_enable) + rq_buf_len = max_rx_pkt_len; for (i = 0; i < rq->ring.desc_count; i++, rqd++) { mb = rte_mbuf_raw_alloc(rq->mp); if (mb == NULL) { @@ -287,9 +312,29 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) rq_enet_desc_enc(rqd, dma_addr, (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP : RQ_ENET_TYPE_NOT_SOP), - mb->buf_len - RTE_PKTMBUF_HEADROOM); + rq_buf_len); rq->mbuf_ring[i] = mb; } + /* + * Do not post the buffers to the NIC until we enable the RQ via + * enic_start_rq(). + */ + rq->need_initial_post = true; + /* Initialize fetch index while RQ is disabled */ + iowrite32(0, &rq->ctrl->fetch_index); + return 0; +} + +/* + * Post the Rx buffers for the first time. enic_alloc_rx_queue_mbufs() has + * allocated the buffers and filled the RQ descriptor ring. Just need to push + * the post index to the NIC. + */ +static void +enic_initial_post_rx(struct enic *enic, struct vnic_rq *rq) +{ + if (!rq->in_use || !rq->need_initial_post) + return; /* make sure all prior writes are complete before doing the PIO write */ rte_rmb(); @@ -302,11 +347,8 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n", enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold); iowrite32(rq->posted_index, &rq->ctrl->posted_index); - iowrite32(0, &rq->ctrl->fetch_index); rte_rmb(); - - return 0; - + rq->need_initial_post = false; } static void * @@ -319,8 +361,8 @@ enic_alloc_consistent(void *priv, size_t size, struct enic *enic = (struct enic *)priv; struct enic_memzone_entry *mze; - rz = rte_memzone_reserve_aligned((const char *)name, - size, SOCKET_ID_ANY, 0, ENIC_ALIGN); + rz = rte_memzone_reserve_aligned((const char *)name, size, + SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN); if (!rz) { pr_err("%s : Failed to allocate memory requested for %s\n", __func__, name); @@ -379,16 +421,14 @@ enic_free_consistent(void *priv, int enic_link_update(struct enic *enic) { struct rte_eth_dev *eth_dev = enic->rte_dev; - int ret; - int link_status = 0; + struct rte_eth_link link; - link_status = enic_get_link_status(enic); - ret = (link_status == enic->link_status); - enic->link_status = link_status; - eth_dev->data->dev_link.link_status = link_status; - eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; - eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev); - return ret; + memset(&link, 0, sizeof(link)); + link.link_status = enic_get_link_status(enic); + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = vnic_dev_port_speed(enic->vdev); + + return rte_eth_linkstatus_set(eth_dev, &link); } static void @@ -397,13 +437,98 @@ enic_intr_handler(void *arg) struct rte_eth_dev *dev = (struct rte_eth_dev *)arg; struct enic *enic = pmd_priv(dev); - vnic_intr_return_all_credits(&enic->intr); + vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]); enic_link_update(enic); _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); enic_log_q_error(enic); } +static int enic_rxq_intr_init(struct enic *enic) +{ + struct rte_intr_handle *intr_handle; + uint32_t rxq_intr_count, i; + int err; + + intr_handle = enic->rte_dev->intr_handle; + if (!enic->rte_dev->data->dev_conf.intr_conf.rxq) + return 0; + /* + * Rx queue interrupts only work when we have MSI-X interrupts, + * one per queue. Sharing one interrupt is technically + * possible with VIC, but it is not worth the complications it brings. + */ + if (!rte_intr_cap_multiple(intr_handle)) { + dev_err(enic, "Rx queue interrupts require MSI-X interrupts" + " (vfio-pci driver)\n"); + return -ENOTSUP; + } + rxq_intr_count = enic->intr_count - ENICPMD_RXQ_INTR_OFFSET; + err = rte_intr_efd_enable(intr_handle, rxq_intr_count); + if (err) { + dev_err(enic, "Failed to enable event fds for Rx queue" + " interrupts\n"); + return err; + } + intr_handle->intr_vec = rte_zmalloc("enic_intr_vec", + rxq_intr_count * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + dev_err(enic, "Failed to allocate intr_vec\n"); + return -ENOMEM; + } + for (i = 0; i < rxq_intr_count; i++) + intr_handle->intr_vec[i] = i + ENICPMD_RXQ_INTR_OFFSET; + return 0; +} + +static void enic_rxq_intr_deinit(struct enic *enic) +{ + struct rte_intr_handle *intr_handle; + + intr_handle = enic->rte_dev->intr_handle; + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } +} + +static void enic_prep_wq_for_simple_tx(struct enic *enic, uint16_t queue_idx) +{ + struct wq_enet_desc *desc; + struct vnic_wq *wq; + unsigned int i; + + /* + * Fill WQ descriptor fields that never change. Every descriptor is + * one packet, so set EOP. Also set CQ_ENTRY every ENIC_WQ_CQ_THRESH + * descriptors (i.e. request one completion update every 32 packets). + */ + wq = &enic->wq[queue_idx]; + desc = (struct wq_enet_desc *)wq->ring.descs; + for (i = 0; i < wq->ring.desc_count; i++, desc++) { + desc->header_length_flags = 1 << WQ_ENET_FLAGS_EOP_SHIFT; + if (i % ENIC_WQ_CQ_THRESH == ENIC_WQ_CQ_THRESH - 1) + desc->header_length_flags |= + (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT); + } +} + +static void pick_rx_handler(struct enic *enic) +{ + struct rte_eth_dev *eth_dev; + + /* Use the non-scatter, simplified RX handler if possible. */ + eth_dev = enic->rte_dev; + if (enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0) { + PMD_INIT_LOG(DEBUG, " use the non-scatter Rx handler"); + eth_dev->rx_pkt_burst = &enic_noscatter_recv_pkts; + } else { + PMD_INIT_LOG(DEBUG, " use the normal Rx handler"); + eth_dev->rx_pkt_burst = &enic_recv_pkts; + } +} + int enic_enable(struct enic *enic) { unsigned int index; @@ -420,6 +545,9 @@ int enic_enable(struct enic *enic) if (eth_dev->data->dev_conf.intr_conf.lsc) vnic_dev_notify_set(enic->vdev, 0); + err = enic_rxq_intr_init(enic); + if (err) + return err; if (enic_clsf_init(enic)) dev_warning(enic, "Init of hash table for clsf failed."\ "Flow director feature will not work\n"); @@ -443,6 +571,22 @@ int enic_enable(struct enic *enic) } } + /* + * Use the simple TX handler if possible. All offloads must be + * disabled. + */ + if (eth_dev->data->dev_conf.txmode.offloads == 0) { + PMD_INIT_LOG(DEBUG, " use the simple tx handler"); + eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts; + for (index = 0; index < enic->wq_count; index++) + enic_prep_wq_for_simple_tx(enic, index); + } else { + PMD_INIT_LOG(DEBUG, " use the default tx handler"); + eth_dev->tx_pkt_burst = &enic_xmit_pkts; + } + + pick_rx_handler(enic); + for (index = 0; index < enic->wq_count; index++) enic_start_wq(enic, index); for (index = 0; index < enic->rq_count; index++) @@ -457,7 +601,8 @@ int enic_enable(struct enic *enic) enic_intr_handler, (void *)enic->rte_dev); rte_intr_enable(&(enic->pdev->intr_handle)); - vnic_intr_unmask(&enic->intr); + /* Unmask LSC interrupt */ + vnic_intr_unmask(&enic->intr[ENICPMD_LSC_INTR_OFFSET]); return 0; } @@ -465,17 +610,21 @@ int enic_enable(struct enic *enic) int enic_alloc_intr_resources(struct enic *enic) { int err; + unsigned int i; dev_info(enic, "vNIC resources used: "\ "wq %d rq %d cq %d intr %d\n", enic->wq_count, enic_vnic_rq_count(enic), enic->cq_count, enic->intr_count); - err = vnic_intr_alloc(enic->vdev, &enic->intr, 0); - if (err) - enic_free_vnic_resources(enic); - - return err; + for (i = 0; i < enic->intr_count; i++) { + err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i); + if (err) { + enic_free_vnic_resources(enic); + return err; + } + } + return 0; } void enic_free_rq(void *rxq) @@ -490,6 +639,19 @@ void enic_free_rq(void *rxq) enic = vnic_dev_priv(rq_sop->vdev); rq_data = &enic->rq[rq_sop->data_queue_idx]; + if (rq_sop->free_mbufs) { + struct rte_mbuf **mb; + int i; + + mb = rq_sop->free_mbufs; + for (i = ENIC_RX_BURST_MAX - rq_sop->num_free_mbufs; + i < ENIC_RX_BURST_MAX; i++) + rte_pktmbuf_free(mb[i]); + rte_free(rq_sop->free_mbufs); + rq_sop->free_mbufs = NULL; + rq_sop->num_free_mbufs = 0; + } + enic_rxmbuf_queue_release(enic, rq_sop); if (rq_data->in_use) enic_rxmbuf_queue_release(enic, rq_data); @@ -539,10 +701,13 @@ void enic_start_rq(struct enic *enic, uint16_t queue_idx) rq_data = &enic->rq[rq_sop->data_queue_idx]; struct rte_eth_dev *eth_dev = enic->rte_dev; - if (rq_data->in_use) + if (rq_data->in_use) { vnic_rq_enable(rq_data); + enic_initial_post_rx(enic, rq_data); + } rte_mb(); vnic_rq_enable(rq_sop); + enic_initial_post_rx(enic, rq_sop); eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; } @@ -581,7 +746,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, unsigned int mbuf_size, mbufs_per_pkt; unsigned int nb_sop_desc, nb_data_desc; uint16_t min_sop, max_sop, min_data, max_data; - uint16_t mtu = enic->rte_dev->data->mtu; + uint32_t max_rx_pkt_len; rq_sop->is_sop = 1; rq_sop->data_queue_idx = data_queue_idx; @@ -599,22 +764,42 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM); + /* max_rx_pkt_len includes the ethernet header and CRC. */ + max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len; if (enic->rte_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx); - /* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */ - mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) + - (mbuf_size - 1)) / mbuf_size; + /* ceil((max pkt len)/mbuf_size) */ + mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size; } else { dev_info(enic, "Scatter rx mode disabled\n"); mbufs_per_pkt = 1; + if (max_rx_pkt_len > mbuf_size) { + dev_warning(enic, "The maximum Rx packet size (%u) is" + " larger than the mbuf size (%u), and" + " scatter is disabled. Larger packets will" + " be truncated.\n", + max_rx_pkt_len, mbuf_size); + } } if (mbufs_per_pkt > 1) { dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx); rq_sop->data_queue_enable = 1; rq_data->in_use = 1; + /* + * HW does not directly support rxmode.max_rx_pkt_len. HW always + * receives packet sizes up to the "max" MTU. + * If not using scatter, we can achieve the effect of dropping + * larger packets by reducing the size of posted buffers. + * See enic_alloc_rx_queue_mbufs(). + */ + if (max_rx_pkt_len < + enic_mtu_to_max_rx_pktlen(enic->max_mtu)) { + dev_warning(enic, "rxmode.max_rx_pkt_len is ignored" + " when scatter rx mode is in use.\n"); + } } else { dev_info(enic, "Rq %u Scatter rx mode not being used\n", queue_idx); @@ -623,20 +808,20 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, } /* number of descriptors have to be a multiple of 32 */ - nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F; - nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F; + nb_sop_desc = (nb_desc / mbufs_per_pkt) & ENIC_ALIGN_DESCS_MASK; + nb_data_desc = (nb_desc - nb_sop_desc) & ENIC_ALIGN_DESCS_MASK; rq_sop->max_mbufs_per_pkt = mbufs_per_pkt; rq_data->max_mbufs_per_pkt = mbufs_per_pkt; if (mbufs_per_pkt > 1) { - min_sop = 64; + min_sop = ENIC_RX_BURST_MAX; max_sop = ((enic->config.rq_desc_count / - (mbufs_per_pkt - 1)) & ~0x1F); + (mbufs_per_pkt - 1)) & ENIC_ALIGN_DESCS_MASK); min_data = min_sop * (mbufs_per_pkt - 1); max_data = enic->config.rq_desc_count; } else { - min_sop = 64; + min_sop = ENIC_RX_BURST_MAX; max_sop = enic->config.rq_desc_count; min_data = 0; max_data = 0; @@ -654,8 +839,9 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, nb_data_desc = max_data; } if (mbufs_per_pkt > 1) { - dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n", - mtu, mbuf_size, min_sop + min_data, + dev_info(enic, "For max packet size %u and mbuf size %u valid" + " rx descriptor range is %u to %u\n", + max_rx_pkt_len, mbuf_size, min_sop + min_data, max_sop + max_data); } dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n", @@ -706,10 +892,21 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, goto err_free_sop_mbuf; } + rq_sop->free_mbufs = (struct rte_mbuf **) + rte_zmalloc_socket("rq->free_mbufs", + sizeof(struct rte_mbuf *) * + ENIC_RX_BURST_MAX, + RTE_CACHE_LINE_SIZE, rq_sop->socket_id); + if (rq_sop->free_mbufs == NULL) + goto err_free_data_mbuf; + rq_sop->num_free_mbufs = 0; + rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */ return 0; +err_free_data_mbuf: + rte_free(rq_data->mbuf_ring); err_free_sop_mbuf: rte_free(rq_sop->mbuf_ring); err_free_cq: @@ -749,25 +946,15 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, static int instance; wq->socket_id = socket_id; - if (nb_desc) { - if (nb_desc > enic->config.wq_desc_count) { - dev_warning(enic, - "WQ %d - number of tx desc in cmd line (%d)"\ - "is greater than that in the UCSM/CIMC adapter"\ - "policy. Applying the value in the adapter "\ - "policy (%d)\n", - queue_idx, nb_desc, enic->config.wq_desc_count); - } else if (nb_desc != enic->config.wq_desc_count) { - enic->config.wq_desc_count = nb_desc; - dev_info(enic, - "TX Queues - effective number of descs:%d\n", - nb_desc); - } - } + /* + * rte_eth_tx_queue_setup() checks min, max, and alignment. So just + * print an info message for diagnostics. + */ + dev_info(enic, "TX Queues - effective number of descs:%d\n", nb_desc); /* Allocate queue resources */ err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx, - enic->config.wq_desc_count, + nb_desc, sizeof(struct wq_enet_desc)); if (err) { dev_err(enic, "error in allocation of wq\n"); @@ -775,7 +962,7 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, } err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index, - socket_id, enic->config.wq_desc_count, + socket_id, nb_desc, sizeof(struct cq_enet_wq_desc)); if (err) { vnic_wq_free(wq); @@ -788,9 +975,8 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, instance++); wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name, - sizeof(uint32_t), - SOCKET_ID_ANY, 0, - ENIC_ALIGN); + sizeof(uint32_t), SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN); if (!wq->cqmsg_rz) return -ENOMEM; @@ -802,8 +988,11 @@ int enic_disable(struct enic *enic) unsigned int i; int err; - vnic_intr_mask(&enic->intr); - (void)vnic_intr_masked(&enic->intr); /* flush write */ + for (i = 0; i < enic->intr_count; i++) { + vnic_intr_mask(&enic->intr[i]); + (void)vnic_intr_masked(&enic->intr[i]); /* flush write */ + } + enic_rxq_intr_deinit(enic); rte_intr_disable(&enic->pdev->intr_handle); rte_intr_callback_unregister(&enic->pdev->intr_handle, enic_intr_handler, @@ -846,7 +1035,8 @@ int enic_disable(struct enic *enic) vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); for (i = 0; i < enic->cq_count; i++) vnic_cq_clean(&enic->cq[i]); - vnic_intr_clean(&enic->intr); + for (i = 0; i < enic->intr_count; i++) + vnic_intr_clean(&enic->intr[i]); return 0; } @@ -879,9 +1069,10 @@ static int enic_dev_wait(struct vnic_dev *vdev, static int enic_dev_open(struct enic *enic) { int err; + int flags = CMD_OPENF_IG_DESCCACHE; err = enic_dev_wait(enic->vdev, vnic_dev_open, - vnic_dev_open_done, 0); + vnic_dev_open_done, flags); if (err) dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n", err); @@ -889,44 +1080,42 @@ static int enic_dev_open(struct enic *enic) return err; } -static int enic_set_rsskey(struct enic *enic) +static int enic_set_rsskey(struct enic *enic, uint8_t *user_key) { dma_addr_t rss_key_buf_pa; union vnic_rss_key *rss_key_buf_va = NULL; - static union vnic_rss_key rss_key = { - .key = { - [0] = {.b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}}, - [1] = {.b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}}, - [2] = {.b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}}, - [3] = {.b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}}, - } - }; - int err; + int err, i; u8 name[NAME_MAX]; + RTE_ASSERT(user_key != NULL); snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name); rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key), &rss_key_buf_pa, name); if (!rss_key_buf_va) return -ENOMEM; - rte_memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key)); + for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) + rss_key_buf_va->key[i / 10].b[i % 10] = user_key[i]; err = enic_set_rss_key(enic, rss_key_buf_pa, sizeof(union vnic_rss_key)); + /* Save for later queries */ + if (!err) { + rte_memcpy(&enic->rss_key, rss_key_buf_va, + sizeof(union vnic_rss_key)); + } enic_free_consistent(enic, sizeof(union vnic_rss_key), rss_key_buf_va, rss_key_buf_pa); return err; } -static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) +int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu) { dma_addr_t rss_cpu_buf_pa; union vnic_rss_cpu *rss_cpu_buf_va = NULL; - int i; int err; u8 name[NAME_MAX]; @@ -936,9 +1125,7 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) if (!rss_cpu_buf_va) return -ENOMEM; - for (i = 0; i < (1 << rss_hash_bits); i++) - (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] = - enic_rte_rq_idx_to_sop_idx(i % enic->rq_count); + rte_memcpy(rss_cpu_buf_va, rss_cpu, sizeof(union vnic_rss_cpu)); err = enic_set_rss_cpu(enic, rss_cpu_buf_pa, @@ -947,6 +1134,9 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) enic_free_consistent(enic, sizeof(union vnic_rss_cpu), rss_cpu_buf_va, rss_cpu_buf_pa); + /* Save for later queries */ + if (!err) + rte_memcpy(&enic->rss_cpu, rss_cpu, sizeof(union vnic_rss_cpu)); return err; } @@ -956,8 +1146,6 @@ static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, const u8 tso_ipid_split_en = 0; int err; - /* Enable VLAN tag stripping */ - err = enic_set_nic_cfg(enic, rss_default_cpu, rss_hash_type, rss_hash_bits, rss_base_cpu, @@ -967,47 +1155,50 @@ static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, return err; } -int enic_set_rss_nic_cfg(struct enic *enic) +/* Initialize RSS with defaults, called from dev_configure */ +int enic_init_rss_nic_cfg(struct enic *enic) { - const u8 rss_default_cpu = 0; - const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | - NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 | - NIC_CFG_RSS_HASH_TYPE_IPV6 | - NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; - const u8 rss_hash_bits = 7; - const u8 rss_base_cpu = 0; - u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); - - if (rss_enable) { - if (!enic_set_rsskey(enic)) { - if (enic_set_rsscpu(enic, rss_hash_bits)) { - rss_enable = 0; - dev_warning(enic, "RSS disabled, "\ - "Failed to set RSS cpu indirection table."); - } - } else { - rss_enable = 0; - dev_warning(enic, - "RSS disabled, Failed to set RSS key.\n"); + static uint8_t default_rss_key[] = { + 85, 67, 83, 97, 119, 101, 115, 111, 109, 101, + 80, 65, 76, 79, 117, 110, 105, 113, 117, 101, + 76, 73, 78, 85, 88, 114, 111, 99, 107, 115, + 69, 78, 73, 67, 105, 115, 99, 111, 111, 108, + }; + struct rte_eth_rss_conf rss_conf; + union vnic_rss_cpu rss_cpu; + int ret, i; + + rss_conf = enic->rte_dev->data->dev_conf.rx_adv_conf.rss_conf; + /* + * If setting key for the first time, and the user gives us none, then + * push the default key to NIC. + */ + if (rss_conf.rss_key == NULL) { + rss_conf.rss_key = default_rss_key; + rss_conf.rss_key_len = ENIC_RSS_HASH_KEY_SIZE; + } + ret = enic_set_rss_conf(enic, &rss_conf); + if (ret) { + dev_err(enic, "Failed to configure RSS\n"); + return ret; + } + if (enic->rss_enable) { + /* If enabling RSS, use the default reta */ + for (i = 0; i < ENIC_RSS_RETA_SIZE; i++) { + rss_cpu.cpu[i / 4].b[i % 4] = + enic_rte_rq_idx_to_sop_idx(i % enic->rq_count); } + ret = enic_set_rss_reta(enic, &rss_cpu); + if (ret) + dev_err(enic, "Failed to set RSS indirection table\n"); } - - return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type, - rss_hash_bits, rss_base_cpu, rss_enable); + return ret; } int enic_setup_finish(struct enic *enic) { - int ret; - enic_init_soft_stats(enic); - ret = enic_set_rss_nic_cfg(enic); - if (ret) { - dev_err(enic, "Failed to config nic, aborting.\n"); - return -1; - } - /* Default conf */ vnic_dev_packet_filter(enic->vdev, 1 /* directed */, @@ -1022,6 +1213,115 @@ int enic_setup_finish(struct enic *enic) return 0; } +static int enic_rss_conf_valid(struct enic *enic, + struct rte_eth_rss_conf *rss_conf) +{ + /* RSS is disabled per VIC settings. Ignore rss_conf. */ + if (enic->flow_type_rss_offloads == 0) + return 0; + if (rss_conf->rss_key != NULL && + rss_conf->rss_key_len != ENIC_RSS_HASH_KEY_SIZE) { + dev_err(enic, "Given rss_key is %d bytes, it must be %d\n", + rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE); + return -EINVAL; + } + if (rss_conf->rss_hf != 0 && + (rss_conf->rss_hf & enic->flow_type_rss_offloads) == 0) { + dev_err(enic, "Given rss_hf contains none of the supported" + " types\n"); + return -EINVAL; + } + return 0; +} + +/* Set hash type and key according to rss_conf */ +int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf) +{ + struct rte_eth_dev *eth_dev; + uint64_t rss_hf; + u8 rss_hash_type; + u8 rss_enable; + int ret; + + RTE_ASSERT(rss_conf != NULL); + ret = enic_rss_conf_valid(enic, rss_conf); + if (ret) { + dev_err(enic, "RSS configuration (rss_conf) is invalid\n"); + return ret; + } + + eth_dev = enic->rte_dev; + rss_hash_type = 0; + rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads; + if (enic->rq_count > 1 && + (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) && + rss_hf != 0) { + rss_enable = 1; + if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_OTHER)) + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4; + if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4; + if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4; + if (enic->udp_rss_weak) { + /* + * 'TCP' is not a typo. The "weak" version of + * UDP RSS requires both the TCP and UDP bits + * be set. It does enable TCP RSS as well. + */ + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4; + } + } + if (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_IPV6_EX | + ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER)) + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6; + if (rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; + if (rss_hf & (ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX)) { + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6; + if (enic->udp_rss_weak) + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; + } + } else { + rss_enable = 0; + rss_hf = 0; + } + + /* Set the hash key if provided */ + if (rss_enable && rss_conf->rss_key) { + ret = enic_set_rsskey(enic, rss_conf->rss_key); + if (ret) { + dev_err(enic, "Failed to set RSS key\n"); + return ret; + } + } + + ret = enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, rss_hash_type, + ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU, + rss_enable); + if (!ret) { + enic->rss_hf = rss_hf; + enic->rss_hash_type = rss_hash_type; + enic->rss_enable = rss_enable; + } else { + dev_err(enic, "Failed to update RSS configurations." + " hash=0x%x\n", rss_hash_type); + } + return ret; +} + +int enic_set_vlan_strip(struct enic *enic) +{ + /* + * Unfortunately, VLAN strip on/off and RSS on/off are configured + * together. So, re-do niccfg, preserving the current RSS settings. + */ + return enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, enic->rss_hash_type, + ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU, + enic->rss_enable); +} + void enic_add_packet_filter(struct enic *enic) { /* Args -> directed, multicast, broadcast, promisc, allmulti */ @@ -1043,6 +1343,7 @@ static void enic_dev_deinit(struct enic *enic) rte_free(eth_dev->data->mac_addrs); rte_free(enic->cq); + rte_free(enic->intr); rte_free(enic->rq); rte_free(enic->wq); } @@ -1052,12 +1353,16 @@ int enic_set_vnic_res(struct enic *enic) { struct rte_eth_dev *eth_dev = enic->rte_dev; int rc = 0; - unsigned int required_rq, required_wq, required_cq; + unsigned int required_rq, required_wq, required_cq, required_intr; /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */ required_rq = eth_dev->data->nb_rx_queues * 2; required_wq = eth_dev->data->nb_tx_queues; required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues; + required_intr = 1; /* 1 for LSC even if intr_conf.lsc is 0 */ + if (eth_dev->data->dev_conf.intr_conf.rxq) { + required_intr += eth_dev->data->nb_rx_queues; + } if (enic->conf_rq_count < required_rq) { dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n", @@ -1076,11 +1381,18 @@ int enic_set_vnic_res(struct enic *enic) required_cq, enic->conf_cq_count); rc = -EINVAL; } + if (enic->conf_intr_count < required_intr) { + dev_err(dev, "Not enough Interrupts to support Rx queue" + " interrupts. Required:%u, Configured:%u\n", + required_intr, enic->conf_intr_count); + rc = -EINVAL; + } if (rc == 0) { enic->rq_count = eth_dev->data->nb_rx_queues; enic->wq_count = eth_dev->data->nb_tx_queues; enic->cq_count = enic->rq_count + enic->wq_count; + enic->intr_count = required_intr; } return rc; @@ -1176,20 +1488,26 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu) "MTU (%u) is greater than value configured in NIC (%u)\n", new_mtu, config_mtu); - /* The easy case is when scatter is disabled. However if the MTU - * becomes greater than the mbuf data size, packet drops will ensue. + /* Update the MTU and maximum packet length */ + eth_dev->data->mtu = new_mtu; + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = + enic_mtu_to_max_rx_pktlen(new_mtu); + + /* + * If the device has not started (enic_enable), nothing to do. + * Later, enic_enable() will set up RQs reflecting the new maximum + * packet length. */ - if (!(enic->rte_dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_SCATTER)) { - eth_dev->data->mtu = new_mtu; + if (!eth_dev->data->dev_started) goto set_mtu_done; - } - /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to - * change Rx scatter mode if necessary for better performance. I.e. if - * MTU was greater than the mbuf size and now it's less, scatter Rx - * doesn't have to be used and vice versa. - */ + /* + * The device has started, re-do RQs on the fly. In the process, we + * pick up the new maximum packet length. + * + * Some applications rely on the ability to change MTU without stopping + * the device. So keep this behavior for now. + */ rte_spinlock_lock(&enic->mtu_lock); /* Stop traffic on all RQs */ @@ -1214,12 +1532,12 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu) /* now it is safe to reconfigure the RQs */ - /* update the mtu */ - eth_dev->data->mtu = new_mtu; /* free and reallocate RQs with the new MTU */ for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) { rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)]; + if (!rq->in_use) + continue; enic_free_rq(rq); rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp, @@ -1240,7 +1558,7 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu) /* put back the real receive function */ rte_mb(); - eth_dev->rx_pkt_burst = enic_recv_pkts; + pick_rx_handler(enic); rte_mb(); /* restart Rx traffic */ @@ -1282,6 +1600,8 @@ static int enic_dev_init(struct enic *enic) /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */ enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) * enic->conf_cq_count, 8); + enic->intr = rte_zmalloc("enic_vnic_intr", sizeof(struct vnic_intr) * + enic->conf_intr_count, 8); enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) * enic->conf_rq_count, 8); enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) * @@ -1290,6 +1610,10 @@ static int enic_dev_init(struct enic *enic) dev_err(enic, "failed to allocate vnic_cq, aborting.\n"); return -1; } + if (enic->conf_intr_count > 0 && enic->intr == NULL) { + dev_err(enic, "failed to allocate vnic_intr, aborting.\n"); + return -1; + } if (enic->conf_rq_count > 0 && enic->rq == NULL) { dev_err(enic, "failed to allocate vnic_rq, aborting.\n"); return -1; @@ -1319,6 +1643,38 @@ static int enic_dev_init(struct enic *enic) /* set up link status checking */ vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */ + enic->overlay_offload = false; + if (!enic->disable_overlay && enic->vxlan && + /* 'VXLAN feature' enables VXLAN, NVGRE, and GENEVE. */ + vnic_dev_overlay_offload_ctrl(enic->vdev, + OVERLAY_FEATURE_VXLAN, + OVERLAY_OFFLOAD_ENABLE) == 0) { + enic->tx_offload_capa |= + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO; + /* + * Do not add PKT_TX_OUTER_{IPV4,IPV6} as they are not + * 'offload' flags (i.e. not part of PKT_TX_OFFLOAD_MASK). + */ + enic->tx_offload_mask |= + PKT_TX_OUTER_IP_CKSUM | + PKT_TX_TUNNEL_MASK; + enic->overlay_offload = true; + enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT; + dev_info(enic, "Overlay offload is enabled\n"); + /* + * Reset the vxlan port to the default, as the NIC firmware + * does not reset it automatically and keeps the old setting. + */ + if (vnic_dev_overlay_offload_cfg(enic->vdev, + OVERLAY_CFG_VXLAN_PORT_UPDATE, + ENIC_DEFAULT_VXLAN_PORT)) { + dev_err(enic, "failed to update vxlan port\n"); + return -EINVAL; + } + } + return 0; } @@ -1351,6 +1707,15 @@ int enic_probe(struct enic *enic) enic_alloc_consistent, enic_free_consistent); + /* + * Allocate the consistent memory for stats upfront so both primary and + * secondary processes can dump stats. + */ + err = vnic_dev_alloc_stats_mem(enic->vdev); + if (err) { + dev_err(enic, "Failed to allocate cmd memory, aborting\n"); + goto err_out_unregister; + } /* Issue device open to get device in known state */ err = enic_dev_open(enic); if (err) { @@ -1359,8 +1724,10 @@ int enic_probe(struct enic *enic) } /* Set ingress vlan rewrite mode before vnic initialization */ + dev_debug(enic, "Set ig_vlan_rewrite_mode=%u\n", + enic->ig_vlan_rewrite_mode); err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev, - IG_VLAN_REWRITE_MODE_PASS_THRU); + enic->ig_vlan_rewrite_mode); if (err) { dev_err(enic, "Failed to set ingress vlan rewrite mode, aborting.\n"); diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c index c99d6183..8d493ffe 100644 --- a/drivers/net/enic/enic_res.c +++ b/drivers/net/enic/enic_res.c @@ -76,19 +76,26 @@ int enic_get_vnic_config(struct enic *enic) ? "" : "not ")); err = vnic_dev_capable_filter_mode(enic->vdev, &enic->flow_filter_mode, - &enic->filter_tags); + &enic->filter_actions); if (err) { dev_err(enic_get_dev(enic), "Error getting filter modes, %d\n", err); return err; } + vnic_dev_capable_udp_rss_weak(enic->vdev, &enic->nic_cfg_chk, + &enic->udp_rss_weak); - dev_info(enic, "Flow api filter mode: %s, Filter tagging %savailable\n", + dev_info(enic, "Flow api filter mode: %s Actions: %s%s%s\n", ((enic->flow_filter_mode == FILTER_DPDK_1) ? "DPDK" : ((enic->flow_filter_mode == FILTER_USNIC_IP) ? "USNIC" : ((enic->flow_filter_mode == FILTER_IPV4_5TUPLE) ? "5TUPLE" : "NONE"))), - ((enic->filter_tags) ? "" : "not ")); + ((enic->filter_actions & FILTER_ACTION_RQ_STEERING_FLAG) ? + "steer " : ""), + ((enic->filter_actions & FILTER_ACTION_FILTER_ID_FLAG) ? + "tag " : ""), + ((enic->filter_actions & FILTER_ACTION_DROP_FLAG) ? + "drop " : "")); c->wq_desc_count = min_t(u32, ENIC_MAX_WQ_DESCS, @@ -117,7 +124,10 @@ int enic_get_vnic_config(struct enic *enic) "loopback tag 0x%04x\n", ENIC_SETTING(enic, TXCSUM) ? "yes" : "no", ENIC_SETTING(enic, RXCSUM) ? "yes" : "no", - ENIC_SETTING(enic, RSS) ? "yes" : "no", + ENIC_SETTING(enic, RSS) ? + (ENIC_SETTING(enic, RSSHASH_UDPIPV4) ? "+UDP" : + ((enic->udp_rss_weak ? "+udp" : + "yes"))) : "no", c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" : c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" : c->intr_mode == VENET_INTR_MODE_ANY ? "any" : @@ -128,6 +138,74 @@ int enic_get_vnic_config(struct enic *enic) c->intr_timer_usec, c->loop_tag); + /* RSS settings from vNIC */ + enic->reta_size = ENIC_RSS_RETA_SIZE; + enic->hash_key_size = ENIC_RSS_HASH_KEY_SIZE; + enic->flow_type_rss_offloads = 0; + if (ENIC_SETTING(enic, RSSHASH_IPV4)) + /* + * IPV4 hash type handles both non-frag and frag packet types. + * TCP/UDP is controlled via a separate flag below. + */ + enic->flow_type_rss_offloads |= ETH_RSS_IPV4 | + ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER; + if (ENIC_SETTING(enic, RSSHASH_TCPIPV4)) + enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_TCP; + if (ENIC_SETTING(enic, RSSHASH_IPV6)) + /* + * The VIC adapter can perform RSS on IPv6 packets with and + * without extension headers. An IPv6 "fragment" is an IPv6 + * packet with the fragment extension header. + */ + enic->flow_type_rss_offloads |= ETH_RSS_IPV6 | + ETH_RSS_IPV6_EX | ETH_RSS_FRAG_IPV6 | + ETH_RSS_NONFRAG_IPV6_OTHER; + if (ENIC_SETTING(enic, RSSHASH_TCPIPV6)) + enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_TCP | + ETH_RSS_IPV6_TCP_EX; + if (enic->udp_rss_weak) + enic->flow_type_rss_offloads |= + ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_IPV6_UDP_EX; + if (ENIC_SETTING(enic, RSSHASH_UDPIPV4)) + enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_UDP; + if (ENIC_SETTING(enic, RSSHASH_UDPIPV6)) + enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_IPV6_UDP_EX; + + /* Zero offloads if RSS is not enabled */ + if (!ENIC_SETTING(enic, RSS)) + enic->flow_type_rss_offloads = 0; + + enic->vxlan = ENIC_SETTING(enic, VXLAN) && + vnic_dev_capable_vxlan(enic->vdev); + /* + * Default hardware capabilities. enic_dev_init() may add additional + * flags if it enables overlay offloads. + */ + enic->tx_queue_offload_capa = 0; + enic->tx_offload_capa = + enic->tx_queue_offload_capa | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + enic->rx_offload_capa = + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + enic->tx_offload_mask = + PKT_TX_VLAN_PKT | + PKT_TX_IP_CKSUM | + PKT_TX_L4_MASK | + PKT_TX_TCP_SEG; + return 0; } @@ -161,6 +239,7 @@ int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en, u8 ig_vlan_strip_en) { + enum vnic_devcmd_cmd cmd; u64 a0, a1; u32 nic_cfg; int wait = 1000; @@ -171,8 +250,8 @@ int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, a0 = nic_cfg; a1 = 0; - - return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait); + cmd = enic->nic_cfg_chk ? CMD_NIC_CFG_CHK : CMD_NIC_CFG; + return vnic_dev_cmd(enic->vdev, cmd, &a0, &a1, wait); } int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len) @@ -202,7 +281,8 @@ void enic_free_vnic_resources(struct enic *enic) vnic_rq_free(&enic->rq[i]); for (i = 0; i < enic->cq_count; i++) vnic_cq_free(&enic->cq[i]); - vnic_intr_free(&enic->intr); + for (i = 0; i < enic->intr_count; i++) + vnic_intr_free(&enic->intr[i]); } void enic_get_res_counts(struct enic *enic) diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h index cf3a6fde..3786bc0e 100644 --- a/drivers/net/enic/enic_res.h +++ b/drivers/net/enic/enic_res.h @@ -16,6 +16,13 @@ #define ENIC_MIN_RQ_DESCS 64 #define ENIC_MAX_RQ_DESCS 4096 +/* A descriptor ring has a multiple of 32 descriptors */ +#define ENIC_ALIGN_DESCS 32 +#define ENIC_ALIGN_DESCS_MASK ~(ENIC_ALIGN_DESCS - 1) + +/* Request a completion index every 32 buffers (roughly packets) */ +#define ENIC_WQ_CQ_THRESH 32 + #define ENIC_MIN_MTU 68 /* Does not include (possible) inserted VLAN tag and FCS */ @@ -30,6 +37,21 @@ #define ENIC_NON_TSO_MAX_DESC 16 #define ENIC_DEFAULT_RX_FREE_THRESH 32 #define ENIC_TX_XMIT_MAX 64 +#define ENIC_RX_BURST_MAX 64 + +/* Defaults for dev_info.default_{rx,tx}portconf */ +#define ENIC_DEFAULT_RX_BURST 32 +#define ENIC_DEFAULT_RX_RINGS 1 +#define ENIC_DEFAULT_RX_RING_SIZE 512 +#define ENIC_DEFAULT_TX_BURST 32 +#define ENIC_DEFAULT_TX_RINGS 1 +#define ENIC_DEFAULT_TX_RING_SIZE 512 + +#define ENIC_RSS_DEFAULT_CPU 0 +#define ENIC_RSS_BASE_CPU 0 +#define ENIC_RSS_HASH_BITS 7 +#define ENIC_RSS_RETA_SIZE (1 << ENIC_RSS_HASH_BITS) +#define ENIC_RSS_HASH_KEY_SIZE 40 #define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c index 2fe5a3fa..7129e121 100644 --- a/drivers/net/enic/enic_rxtx.c +++ b/drivers/net/enic/enic_rxtx.c @@ -15,15 +15,6 @@ #include #include -#define ENIC_TX_OFFLOAD_MASK ( \ - PKT_TX_VLAN_PKT | \ - PKT_TX_IP_CKSUM | \ - PKT_TX_L4_MASK | \ - PKT_TX_TCP_SEG) - -#define ENIC_TX_OFFLOAD_NOTSUP_MASK \ - (PKT_TX_OFFLOAD_MASK ^ ENIC_TX_OFFLOAD_MASK) - #define RTE_PMD_USE_PREFETCH #ifdef RTE_PMD_USE_PREFETCH @@ -130,30 +121,103 @@ enic_cq_rx_check_err(struct cq_desc *cqd) /* Lookup table to translate RX CQ flags to mbuf flags. */ static inline uint32_t -enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd) +enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl) { struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; uint8_t cqrd_flags = cqrd->flags; + /* + * Odd-numbered entries are for tunnel packets. All packet type info + * applies to the inner packet, and there is no info on the outer + * packet. The outer flags in these entries exist only to avoid + * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf + * afterwards. + * + * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set + * RTE_PTYPE_TUNNEL_GRENAT.. + */ static const uint32_t cq_type_table[128] __rte_cache_aligned = { [0x00] = RTE_PTYPE_UNKNOWN, + [0x01] = RTE_PTYPE_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER, [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG, + [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP, + [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP, + [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, - [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP, - [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP, + [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, + [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, + [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG, + [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP, + [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP, + [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, - [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP, - [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP, + [0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, + [0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, + [0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, /* All others reserved */ }; cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP; - return cq_type_table[cqrd_flags]; + return cq_type_table[cqrd_flags + tnl]; } static inline void @@ -200,10 +264,18 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) uint32_t l4_flags; l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK; - if (enic_cq_rx_desc_ipv4_csum_ok(cqrd)) - pkt_flags |= PKT_RX_IP_CKSUM_GOOD; - else if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) - pkt_flags |= PKT_RX_IP_CKSUM_BAD; + /* + * When overlay offload is enabled, the NIC may + * set ipv4_csum_ok=1 if the inner packet is IPv6.. + * So, explicitly check for IPv4 before checking + * ipv4_csum_ok. + */ + if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) { + if (enic_cq_rx_desc_ipv4_csum_ok(cqrd)) + pkt_flags |= PKT_RX_IP_CKSUM_GOOD; + else + pkt_flags |= PKT_RX_IP_CKSUM_BAD; + } if (l4_flags == RTE_PTYPE_L4_UDP || l4_flags == RTE_PTYPE_L4_TCP) { @@ -238,13 +310,14 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, struct vnic_rq *rq; struct enic *enic = vnic_dev_priv(sop_rq->vdev); uint16_t cq_idx; - uint16_t rq_idx; + uint16_t rq_idx, max_rx; uint16_t rq_num; struct rte_mbuf *nmb, *rxmb; uint16_t nb_rx = 0; struct vnic_cq *cq; volatile struct cq_desc *cqd_ptr; uint8_t color; + uint8_t tnl; uint16_t seg_length; struct rte_mbuf *first_seg = sop_rq->pkt_first_seg; struct rte_mbuf *last_seg = sop_rq->pkt_last_seg; @@ -252,19 +325,23 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)]; cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx; + color = cq->last_color; data_rq = &enic->rq[sop_rq->data_queue_idx]; - while (nb_rx < nb_pkts) { + /* Receive until the end of the ring, at most. */ + max_rx = RTE_MIN(nb_pkts, cq->ring.desc_count - cq_idx); + + while (max_rx) { volatile struct rq_enet_desc *rqd_ptr; struct cq_desc cqd; uint8_t packet_error; uint16_t ciflags; + max_rx--; + /* Check for pkts available */ - color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT) - & CQ_DESC_COLOR_MASK; - if (color == cq->last_color) + if ((cqd_ptr->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color) break; /* Get the cq descriptor and extract rq info from it */ @@ -288,13 +365,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, /* Get the mbuf to return and replace with one just allocated */ rxmb = rq->mbuf_ring[rq_idx]; rq->mbuf_ring[rq_idx] = nmb; - - /* Increment cqd, rqd, mbuf_table index */ cq_idx++; - if (unlikely(cq_idx == cq->ring.desc_count)) { - cq_idx = 0; - cq->last_color = cq->last_color ? 0 : 1; - } /* Prefetch next mbuf & desc while processing current one */ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx; @@ -336,10 +407,22 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, continue; } + /* + * When overlay offload is enabled, CQ.fcoe indicates the + * packet is tunnelled. + */ + tnl = enic->overlay_offload && + (ciflags & CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0; /* cq rx flags are only valid if eop bit is set */ - first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); + first_seg->packet_type = + enic_cq_rx_flags_to_pkt_type(&cqd, tnl); enic_cq_rx_to_pkt_flags(&cqd, first_seg); + /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */ + if (tnl) { + first_seg->packet_type &= ~(RTE_PTYPE_L3_MASK | + RTE_PTYPE_L4_MASK); + } if (unlikely(packet_error)) { rte_pktmbuf_free(first_seg); rte_atomic64_inc(&enic->soft_stats.rx_packet_errors); @@ -354,6 +437,10 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, /* store the mbuf address into the next entry of the array */ rx_pkts[nb_rx++] = first_seg; } + if (unlikely(cq_idx == cq->ring.desc_count)) { + cq_idx = 0; + cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT; + } sop_rq->pkt_first_seg = first_seg; sop_rq->pkt_last_seg = last_seg; @@ -387,9 +474,123 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return nb_rx; } +uint16_t +enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct rte_mbuf *mb, **rx, **rxmb; + uint16_t cq_idx, nb_rx, max_rx; + struct cq_enet_rq_desc *cqd; + struct rq_enet_desc *rqd; + unsigned int port_id; + struct vnic_cq *cq; + struct vnic_rq *rq; + struct enic *enic; + uint8_t color; + bool overlay; + bool tnl; + + rq = rx_queue; + enic = vnic_dev_priv(rq->vdev); + cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + cq_idx = cq->to_clean; + + /* + * Fill up the reserve of free mbufs. Below, we restock the receive + * ring with these mbufs to avoid allocation failures. + */ + if (rq->num_free_mbufs == 0) { + if (rte_mempool_get_bulk(rq->mp, (void **)rq->free_mbufs, + ENIC_RX_BURST_MAX)) + return 0; + rq->num_free_mbufs = ENIC_RX_BURST_MAX; + } + + /* Receive until the end of the ring, at most. */ + max_rx = RTE_MIN(nb_pkts, rq->num_free_mbufs); + max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx); + + cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx; + color = cq->last_color; + rxmb = rq->mbuf_ring + cq_idx; + port_id = enic->port_id; + overlay = enic->overlay_offload; + + rx = rx_pkts; + while (max_rx) { + max_rx--; + if ((cqd->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color) + break; + if (unlikely(cqd->bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) { + rte_pktmbuf_free(*rxmb++); + rte_atomic64_inc(&enic->soft_stats.rx_packet_errors); + cqd++; + continue; + } + + mb = *rxmb++; + /* prefetch mbuf data for caller */ + rte_packet_prefetch(RTE_PTR_ADD(mb->buf_addr, + RTE_PKTMBUF_HEADROOM)); + mb->data_len = cqd->bytes_written_flags & + CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; + mb->pkt_len = mb->data_len; + mb->port = port_id; + tnl = overlay && (cqd->completed_index_flags & + CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0; + mb->packet_type = + enic_cq_rx_flags_to_pkt_type((struct cq_desc *)cqd, + tnl); + enic_cq_rx_to_pkt_flags((struct cq_desc *)cqd, mb); + /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */ + if (tnl) { + mb->packet_type &= ~(RTE_PTYPE_L3_MASK | + RTE_PTYPE_L4_MASK); + } + cqd++; + *rx++ = mb; + } + /* Number of descriptors visited */ + nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx; + if (nb_rx == 0) + return 0; + rqd = ((struct rq_enet_desc *)rq->ring.descs) + cq_idx; + rxmb = rq->mbuf_ring + cq_idx; + cq_idx += nb_rx; + rq->rx_nb_hold += nb_rx; + if (unlikely(cq_idx == cq->ring.desc_count)) { + cq_idx = 0; + cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT; + } + cq->to_clean = cq_idx; + + memcpy(rxmb, rq->free_mbufs + ENIC_RX_BURST_MAX - rq->num_free_mbufs, + sizeof(struct rte_mbuf *) * nb_rx); + rq->num_free_mbufs -= nb_rx; + while (nb_rx) { + nb_rx--; + mb = *rxmb++; + mb->data_off = RTE_PKTMBUF_HEADROOM; + rqd->address = mb->buf_iova + RTE_PKTMBUF_HEADROOM; + rqd++; + } + if (rq->rx_nb_hold > rq->rx_free_thresh) { + rq->posted_index = enic_ring_add(rq->ring.desc_count, + rq->posted_index, + rq->rx_nb_hold); + rq->rx_nb_hold = 0; + rte_wmb(); + iowrite32_relaxed(rq->posted_index, + &rq->ctrl->posted_index); + } + + return rx - rx_pkts; +} + static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) { - struct vnic_wq_buf *buf; + struct rte_mbuf *buf; struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS]; unsigned int nb_to_free, nb_free = 0, i; struct rte_mempool *pool; @@ -399,13 +600,10 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index) + 1; tail_idx = wq->tail_idx; - buf = &wq->bufs[tail_idx]; - pool = ((struct rte_mbuf *)buf->mb)->pool; + pool = wq->bufs[tail_idx]->pool; for (i = 0; i < nb_to_free; i++) { - buf = &wq->bufs[tail_idx]; - m = rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb)); - buf->mb = NULL; - + buf = wq->bufs[tail_idx]; + m = rte_pktmbuf_prefree_seg(buf); if (unlikely(m == NULL)) { tail_idx = enic_ring_incr(desc_count, tail_idx); continue; @@ -443,9 +641,10 @@ unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq) return 0; } -uint16_t enic_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, +uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { + struct vnic_wq *wq = (struct vnic_wq *)tx_queue; int32_t ret; uint16_t i; uint64_t ol_flags; @@ -453,9 +652,13 @@ uint16_t enic_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, for (i = 0; i != nb_pkts; i++) { m = tx_pkts[i]; + if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) { + rte_errno = EINVAL; + return i; + } ol_flags = m->ol_flags; - if (ol_flags & ENIC_TX_OFFLOAD_NOTSUP_MASK) { - rte_errno = -ENOTSUP; + if (ol_flags & wq->tx_offload_notsup_mask) { + rte_errno = ENOTSUP; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG @@ -489,12 +692,11 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint64_t ol_flags_mask; unsigned int wq_desc_avail; int head_idx; - struct vnic_wq_buf *buf; unsigned int desc_count; struct wq_enet_desc *descs, *desc_p, desc_tmp; uint16_t mss; uint8_t vlan_tag_insert; - uint8_t eop; + uint8_t eop, cq; uint64_t bus_addr; uint8_t offload_mode; uint16_t header_len; @@ -558,6 +760,11 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, offload_mode = WQ_ENET_OFFLOAD_MODE_TSO; mss = tx_pkt->tso_segsz; + /* For tunnel, need the size of outer+inner headers */ + if (ol_flags & PKT_TX_TUNNEL_MASK) { + header_len += tx_pkt->outer_l2_len + + tx_pkt->outer_l3_len; + } } if ((ol_flags & ol_flags_mask) && (header_len == 0)) { @@ -572,15 +779,18 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, break; } } - - + wq->cq_pend++; + cq = 0; + if (eop && wq->cq_pend >= ENIC_WQ_CQ_THRESH) { + cq = 1; + wq->cq_pend = 0; + } wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len, - offload_mode, eop, eop, 0, vlan_tag_insert, + offload_mode, eop, cq, 0, vlan_tag_insert, vlan_id, 0); *desc_p = desc_tmp; - buf = &wq->bufs[head_idx]; - buf->mb = (void *)tx_pkt; + wq->bufs[head_idx] = tx_pkt; head_idx = enic_ring_incr(desc_count, head_idx); wq_desc_avail--; @@ -589,20 +799,26 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_pkt->next) { data_len = tx_pkt->data_len; - if (tx_pkt->next == NULL) + wq->cq_pend++; + cq = 0; + if (tx_pkt->next == NULL) { eop = 1; + if (wq->cq_pend >= ENIC_WQ_CQ_THRESH) { + cq = 1; + wq->cq_pend = 0; + } + } desc_p = descs + head_idx; bus_addr = (dma_addr_t)(tx_pkt->buf_iova + tx_pkt->data_off); wq_enet_desc_enc((struct wq_enet_desc *) &desc_tmp, bus_addr, data_len, - mss, 0, offload_mode, eop, eop, + mss, 0, offload_mode, eop, cq, 0, vlan_tag_insert, vlan_id, 0); *desc_p = desc_tmp; - buf = &wq->bufs[head_idx]; - buf->mb = (void *)tx_pkt; + wq->bufs[head_idx] = tx_pkt; head_idx = enic_ring_incr(desc_count, head_idx); wq_desc_avail--; } @@ -618,4 +834,81 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return index; } +static void enqueue_simple_pkts(struct rte_mbuf **pkts, + struct wq_enet_desc *desc, + uint16_t n, + struct enic *enic) +{ + struct rte_mbuf *p; + + while (n) { + n--; + p = *pkts++; + desc->address = p->buf_iova + p->data_off; + desc->length = p->pkt_len; + /* + * The app should not send oversized + * packets. tx_pkt_prepare includes a check as + * well. But some apps ignore the device max size and + * tx_pkt_prepare. Oversized packets cause WQ errrors + * and the NIC ends up disabling the whole WQ. So + * truncate packets.. + */ + if (unlikely(p->pkt_len > ENIC_TX_MAX_PKT_SIZE)) { + desc->length = ENIC_TX_MAX_PKT_SIZE; + rte_atomic64_inc(&enic->soft_stats.tx_oversized); + } + desc++; + } +} + +uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + unsigned int head_idx, desc_count; + struct wq_enet_desc *desc; + struct vnic_wq *wq; + struct enic *enic; + uint16_t rem, n; + + wq = (struct vnic_wq *)tx_queue; + enic = vnic_dev_priv(wq->vdev); + enic_cleanup_wq(enic, wq); + /* Will enqueue this many packets in this call */ + nb_pkts = RTE_MIN(nb_pkts, wq->ring.desc_avail); + if (nb_pkts == 0) + return 0; + + head_idx = wq->head_idx; + desc_count = wq->ring.desc_count; + + /* Descriptors until the end of the ring */ + n = desc_count - head_idx; + n = RTE_MIN(nb_pkts, n); + /* Save mbuf pointers to free later */ + memcpy(wq->bufs + head_idx, tx_pkts, sizeof(struct rte_mbuf *) * n); + + /* Enqueue until the ring end */ + rem = nb_pkts - n; + desc = ((struct wq_enet_desc *)wq->ring.descs) + head_idx; + enqueue_simple_pkts(tx_pkts, desc, n, enic); + + /* Wrap to the start of the ring */ + if (rem) { + tx_pkts += n; + memcpy(wq->bufs, tx_pkts, sizeof(struct rte_mbuf *) * rem); + desc = (struct wq_enet_desc *)wq->ring.descs; + enqueue_simple_pkts(tx_pkts, desc, rem, enic); + } + rte_wmb(); + + /* Update head_idx and desc_avail */ + wq->ring.desc_avail -= nb_pkts; + head_idx += nb_pkts; + if (head_idx >= desc_count) + head_idx -= desc_count; + wq->head_idx = head_idx; + iowrite32_relaxed(head_idx, &wq->ctrl->posted_index); + return nb_pkts; +} diff --git a/drivers/net/enic/meson.build b/drivers/net/enic/meson.build new file mode 100644 index 00000000..bfd4e237 --- /dev/null +++ b/drivers/net/enic/meson.build @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cisco Systems, Inc. + +sources = files( + 'base/vnic_cq.c', + 'base/vnic_dev.c', + 'base/vnic_intr.c', + 'base/vnic_rq.c', + 'base/vnic_rss.c', + 'base/vnic_wq.c', + 'enic_clsf.c', + 'enic_ethdev.c', + 'enic_flow.c', + 'enic_main.c', + 'enic_res.c', + 'enic_rxtx.c', + ) +deps += ['hash'] +includes += include_directories('base') diff --git a/drivers/net/failsafe/Makefile b/drivers/net/failsafe/Makefile index bd2f0198..81802d09 100644 --- a/drivers/net/failsafe/Makefile +++ b/drivers/net/failsafe/Makefile @@ -1,33 +1,6 @@ -# BSD LICENSE -# -# Copyright 2017 6WIND S.A. -# Copyright 2017 Mellanox. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND S.A. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2017 6WIND S.A. +# Copyright 2017 Mellanox Technologies, Ltd include $(RTE_SDK)/mk/rte.vars.mk diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c index c499bfb9..657919f9 100644 --- a/drivers/net/failsafe/failsafe.c +++ b/drivers/net/failsafe/failsafe.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -13,6 +13,8 @@ #include "failsafe_private.h" +int failsafe_logtype; + const char pmd_failsafe_driver_name[] = FAILSAFE_DRIVER_NAME; static const struct rte_eth_link eth_link = { .link_speed = ETH_SPEED_NUM_10G, @@ -202,16 +204,25 @@ fs_eth_dev_create(struct rte_vdev_device *vdev) } snprintf(priv->my_owner.name, sizeof(priv->my_owner.name), FAILSAFE_OWNER_NAME); + DEBUG("Failsafe port %u owner info: %s_%016"PRIX64, dev->data->port_id, + priv->my_owner.name, priv->my_owner.id); + ret = rte_eth_dev_callback_register(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, + failsafe_eth_new_event_callback, + dev); + if (ret) { + ERROR("Failed to register NEW callback"); + goto free_args; + } ret = failsafe_eal_init(dev); if (ret) - goto free_args; + goto unregister_new_callback; ret = fs_mutex_init(priv); if (ret) - goto free_args; + goto unregister_new_callback; ret = failsafe_hotplug_alarm_install(dev); if (ret) { ERROR("Could not set up plug-in event detection"); - goto free_args; + goto unregister_new_callback; } mac = &dev->data->mac_addrs[0]; if (mac_from_arg) { @@ -224,7 +235,7 @@ fs_eth_dev_create(struct rte_vdev_device *vdev) mac); if (ret) { ERROR("Failed to set default MAC address"); - goto free_args; + goto cancel_alarm; } } } else { @@ -257,7 +268,13 @@ fs_eth_dev_create(struct rte_vdev_device *vdev) .fd = -1, .type = RTE_INTR_HANDLE_EXT, }; + rte_eth_dev_probing_finish(dev); return 0; +cancel_alarm: + failsafe_hotplug_alarm_cancel(dev); +unregister_new_callback: + rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, + failsafe_eth_new_event_callback, dev); free_args: failsafe_args_free(dev); free_subs: @@ -277,6 +294,8 @@ fs_rte_eth_free(const char *name) dev = rte_eth_dev_allocated(name); if (dev == NULL) return -ENODEV; + rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, + failsafe_eth_new_event_callback, dev); ret = failsafe_eal_uninit(dev); if (ret) ERROR("Error while uninitializing sub-EAL"); @@ -294,10 +313,26 @@ static int rte_pmd_failsafe_probe(struct rte_vdev_device *vdev) { const char *name; + struct rte_eth_dev *eth_dev; name = rte_vdev_device_name(vdev); INFO("Initializing " FAILSAFE_DRIVER_NAME " for %s", name); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(rte_vdev_device_args(vdev)) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + ERROR("Failed to probe %s", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &failsafe_ops; + eth_dev->device = &vdev->device; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + return fs_eth_dev_create(vdev); } @@ -318,3 +353,10 @@ static struct rte_vdev_driver failsafe_drv = { RTE_PMD_REGISTER_VDEV(net_failsafe, failsafe_drv); RTE_PMD_REGISTER_PARAM_STRING(net_failsafe, PMD_FAILSAFE_PARAM_STRING); + +RTE_INIT(failsafe_init_log) +{ + failsafe_logtype = rte_log_register("pmd.net.failsafe"); + if (failsafe_logtype >= 0) + rte_log_set_level(failsafe_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/failsafe/failsafe_args.c b/drivers/net/failsafe/failsafe_args.c index 366dbea1..626883ce 100644 --- a/drivers/net/failsafe/failsafe_args.c +++ b/drivers/net/failsafe/failsafe_args.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -14,6 +14,7 @@ #include #include #include +#include #include "failsafe_private.h" @@ -62,7 +63,7 @@ fs_parse_device(struct sub_device *sdev, char *args) d = &sdev->devargs; DEBUG("%s", args); - ret = rte_eal_devargs_parse(args, d); + ret = rte_devargs_parse(d, args); if (ret) { DEBUG("devargs parsing failed with code %d", ret); return ret; @@ -340,7 +341,7 @@ fs_remove_sub_devices_definition(char params[DEVARGS_MAXLEN]) a = b + 1; } out: - snprintf(params, DEVARGS_MAXLEN, "%s", buffer); + strlcpy(params, buffer, DEVARGS_MAXLEN); return 0; } @@ -392,7 +393,7 @@ failsafe_args_parse(struct rte_eth_dev *dev, const char *params) ret = 0; priv->subs_tx = FAILSAFE_MAX_ETHPORTS; /* default parameters */ - n = snprintf(mut_params, sizeof(mut_params), "%s", params); + n = strlcpy(mut_params, params, sizeof(mut_params)); if (n >= sizeof(mut_params)) { ERROR("Parameter string too long (>=%zu)", sizeof(mut_params)); diff --git a/drivers/net/failsafe/failsafe_eal.c b/drivers/net/failsafe/failsafe_eal.c index c3d67312..ce1633f1 100644 --- a/drivers/net/failsafe/failsafe_eal.c +++ b/drivers/net/failsafe/failsafe_eal.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -18,8 +18,9 @@ fs_ethdev_portid_get(const char *name, uint16_t *port_id) return -EINVAL; } len = strlen(name); - RTE_ETH_FOREACH_DEV(pid) { - if (!strncmp(name, rte_eth_devices[pid].device->name, len)) { + for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) { + if (rte_eth_dev_is_valid_port(pid) && + !strncmp(name, rte_eth_devices[pid].device->name, len)) { *port_id = pid; return 0; } @@ -41,6 +42,8 @@ fs_bus_init(struct rte_eth_dev *dev) continue; da = &sdev->devargs; if (fs_ethdev_portid_get(da->name, &pid) != 0) { + struct rte_eth_dev_owner pid_owner; + ret = rte_eal_hotplug_add(da->bus->name, da->name, da->args); @@ -55,12 +58,26 @@ fs_bus_init(struct rte_eth_dev *dev) ERROR("sub_device %d init went wrong", i); return -ENODEV; } + /* + * The NEW callback tried to take ownership, check + * whether it succeed or didn't. + */ + rte_eth_dev_owner_get(pid, &pid_owner); + if (pid_owner.id != PRIV(dev)->my_owner.id) { + INFO("sub_device %d owner(%s_%016"PRIX64") is not my," + " owner(%s_%016"PRIX64"), will try again later", + i, pid_owner.name, pid_owner.id, + PRIV(dev)->my_owner.name, + PRIV(dev)->my_owner.id); + continue; + } } else { + /* The sub-device port was found. */ char devstr[DEVARGS_MAXLEN] = ""; struct rte_devargs *probed_da = rte_eth_devices[pid].device->devargs; - /* Take control of device probed by EAL options. */ + /* Take control of probed device. */ free(da->args); memset(da, 0, sizeof(*da)); if (probed_da != NULL) @@ -69,7 +86,7 @@ fs_bus_init(struct rte_eth_dev *dev) else snprintf(devstr, sizeof(devstr), "%s", rte_eth_devices[pid].device->name); - ret = rte_eal_devargs_parse(devstr, da); + ret = rte_devargs_parse(da, devstr); if (ret) { ERROR("Probed devargs parsing failed with code" " %d", ret); @@ -77,28 +94,28 @@ fs_bus_init(struct rte_eth_dev *dev) } INFO("Taking control of a probed sub device" " %d named %s", i, da->name); - } - ret = rte_eth_dev_owner_set(pid, &PRIV(dev)->my_owner); - if (ret < 0) { - INFO("sub_device %d owner set failed (%s)," - " will try again later", i, strerror(-ret)); - continue; - } else if (strncmp(rte_eth_devices[pid].device->name, da->name, - strlen(da->name)) != 0) { - /* - * The device probably was removed and its port id was - * reallocated before ownership set. - */ - rte_eth_dev_owner_unset(pid, PRIV(dev)->my_owner.id); - INFO("sub_device %d was probably removed before taking" - " ownership, will try again later", i); - continue; + ret = rte_eth_dev_owner_set(pid, &PRIV(dev)->my_owner); + if (ret < 0) { + INFO("sub_device %d owner set failed (%s), " + "will try again later", i, strerror(-ret)); + continue; + } else if (strncmp(rte_eth_devices[pid].device->name, + da->name, strlen(da->name)) != 0) { + /* + * The device probably was removed and its port + * id was reallocated before ownership set. + */ + rte_eth_dev_owner_unset(pid, + PRIV(dev)->my_owner.id); + INFO("sub_device %d was removed before taking" + " ownership, will try again later", i); + continue; + } } ETH(sdev) = &rte_eth_devices[pid]; SUB_ID(sdev) = i; sdev->fs_dev = dev; sdev->dev = ETH(sdev)->device; - ETH(sdev)->state = RTE_ETH_DEV_DEFERRED; sdev->state = DEV_PROBED; } return 0; diff --git a/drivers/net/failsafe/failsafe_ether.c b/drivers/net/failsafe/failsafe_ether.c index 2c0bf936..5b5cb3b4 100644 --- a/drivers/net/failsafe/failsafe_ether.c +++ b/drivers/net/failsafe/failsafe_ether.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -260,6 +260,7 @@ fs_dev_remove(struct sub_device *sdev) sdev->state = DEV_ACTIVE; /* fallthrough */ case DEV_ACTIVE: + failsafe_eth_dev_unregister_callbacks(sdev); rte_eth_dev_close(PORT_ID(sdev)); sdev->state = DEV_PROBED; /* fallthrough */ @@ -320,6 +321,35 @@ fs_rxtx_clean(struct sub_device *sdev) return 1; } +void +failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev) +{ + int ret; + + if (sdev == NULL) + return; + if (sdev->rmv_callback) { + ret = rte_eth_dev_callback_unregister(PORT_ID(sdev), + RTE_ETH_EVENT_INTR_RMV, + failsafe_eth_rmv_event_callback, + sdev); + if (ret) + WARN("Failed to unregister RMV callback for sub_device" + " %d", SUB_ID(sdev)); + sdev->rmv_callback = 0; + } + if (sdev->lsc_callback) { + ret = rte_eth_dev_callback_unregister(PORT_ID(sdev), + RTE_ETH_EVENT_INTR_LSC, + failsafe_eth_lsc_event_callback, + sdev); + if (ret) + WARN("Failed to unregister LSC callback for sub_device" + " %d", SUB_ID(sdev)); + sdev->lsc_callback = 0; + } +} + void failsafe_dev_remove(struct rte_eth_dev *dev) { @@ -463,3 +493,26 @@ failsafe_eth_lsc_event_callback(uint16_t port_id __rte_unused, else return 0; } + +/* Take sub-device ownership before it becomes exposed to the application. */ +int +failsafe_eth_new_event_callback(uint16_t port_id, + enum rte_eth_event_type event __rte_unused, + void *cb_arg, void *out __rte_unused) +{ + struct rte_eth_dev *fs_dev = cb_arg; + struct sub_device *sdev; + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + uint8_t i; + + FOREACH_SUBDEV_STATE(sdev, i, fs_dev, DEV_PARSED) { + if (sdev->state >= DEV_PROBED) + continue; + if (strcmp(sdev->devargs.name, dev->device->name) != 0) + continue; + rte_eth_dev_owner_set(port_id, &PRIV(fs_dev)->my_owner); + /* The actual owner will be checked after the port probing. */ + break; + } + return 0; +} diff --git a/drivers/net/failsafe/failsafe_flow.c b/drivers/net/failsafe/failsafe_flow.c index ec8c909b..bfe42fce 100644 --- a/drivers/net/failsafe/failsafe_flow.c +++ b/drivers/net/failsafe/failsafe_flow.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -174,7 +174,7 @@ fs_flow_flush(struct rte_eth_dev *dev, static int fs_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, - enum rte_flow_action_type type, + const struct rte_flow_action *action, void *arg, struct rte_flow_error *error) { @@ -185,7 +185,7 @@ fs_flow_query(struct rte_eth_dev *dev, if (sdev != NULL) { int ret = rte_flow_query(PORT_ID(sdev), flow->flows[SUB_ID(sdev)], - type, arg, error); + action, arg, error); if ((ret = fs_err(sdev, ret))) { fs_unlock(dev, 0); diff --git a/drivers/net/failsafe/failsafe_intr.c b/drivers/net/failsafe/failsafe_intr.c index 6b7f9c1a..fc6ec37f 100644 --- a/drivers/net/failsafe/failsafe_intr.c +++ b/drivers/net/failsafe/failsafe_intr.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2018 Mellanox Technologies, Ltd. + * Copyright 2018 Mellanox Technologies, Ltd */ /** diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c index 057e435c..24e91c93 100644 --- a/drivers/net/failsafe/failsafe_ops.c +++ b/drivers/net/failsafe/failsafe_ops.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -13,6 +13,7 @@ #include #include #include +#include #include "failsafe_private.h" @@ -81,30 +82,22 @@ static struct rte_eth_dev_info default_infos = { DEV_TX_OFFLOAD_MULTI_SEGS | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM, - .flow_type_rss_offloads = 0x0, + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO, + .flow_type_rss_offloads = + ETH_RSS_IP | + ETH_RSS_UDP | + ETH_RSS_TCP, }; static int fs_dev_configure(struct rte_eth_dev *dev) { struct sub_device *sdev; - uint64_t supp_tx_offloads; - uint64_t tx_offloads; uint8_t i; int ret; fs_lock(dev, 0); - supp_tx_offloads = PRIV(dev)->infos.tx_offload_capa; - tx_offloads = dev->data->dev_conf.txmode.offloads; - if ((tx_offloads & supp_tx_offloads) != tx_offloads) { - rte_errno = ENOTSUP; - ERROR("Some Tx offloads are not supported, " - "requested 0x%" PRIx64 " supported 0x%" PRIx64, - tx_offloads, supp_tx_offloads); - fs_unlock(dev, 0); - return -rte_errno; - } FOREACH_SUBDEV(sdev, i, dev) { int rmv_interrupt = 0; int lsc_interrupt = 0; @@ -145,7 +138,7 @@ fs_dev_configure(struct rte_eth_dev *dev) fs_unlock(dev, 0); return ret; } - if (rmv_interrupt) { + if (rmv_interrupt && sdev->rmv_callback == 0) { ret = rte_eth_dev_callback_register(PORT_ID(sdev), RTE_ETH_EVENT_INTR_RMV, failsafe_eth_rmv_event_callback, @@ -153,9 +146,11 @@ fs_dev_configure(struct rte_eth_dev *dev) if (ret) WARN("Failed to register RMV callback for sub_device %d", SUB_ID(sdev)); + else + sdev->rmv_callback = 1; } dev->data->dev_conf.intr_conf.rmv = 0; - if (lsc_interrupt) { + if (lsc_interrupt && sdev->lsc_callback == 0) { ret = rte_eth_dev_callback_register(PORT_ID(sdev), RTE_ETH_EVENT_INTR_LSC, failsafe_eth_lsc_event_callback, @@ -163,6 +158,8 @@ fs_dev_configure(struct rte_eth_dev *dev) if (ret) WARN("Failed to register LSC callback for sub_device %d", SUB_ID(sdev)); + else + sdev->lsc_callback = 1; } dev->data->dev_conf.intr_conf.lsc = lsc_enabled; sdev->state = DEV_ACTIVE; @@ -289,6 +286,7 @@ fs_dev_close(struct rte_eth_dev *dev) PRIV(dev)->state = DEV_ACTIVE - 1; FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { DEBUG("Closing sub_device %d", i); + failsafe_eth_dev_unregister_callbacks(sdev); rte_eth_dev_close(PORT_ID(sdev)); sdev->state = DEV_ACTIVE - 1; } @@ -296,25 +294,6 @@ fs_dev_close(struct rte_eth_dev *dev) fs_unlock(dev, 0); } -static bool -fs_rxq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads) -{ - uint64_t port_offloads; - uint64_t queue_supp_offloads; - uint64_t port_supp_offloads; - - port_offloads = dev->data->dev_conf.rxmode.offloads; - queue_supp_offloads = PRIV(dev)->infos.rx_queue_offload_capa; - port_supp_offloads = PRIV(dev)->infos.rx_offload_capa; - if ((offloads & (queue_supp_offloads | port_supp_offloads)) != - offloads) - return false; - /* Verify we have no conflict with port offloads */ - if ((port_offloads ^ offloads) & port_supp_offloads) - return false; - return true; -} - static void fs_rx_queue_release(void *queue) { @@ -367,19 +346,6 @@ fs_rx_queue_setup(struct rte_eth_dev *dev, fs_rx_queue_release(rxq); dev->data->rx_queues[rx_queue_id] = NULL; } - /* Verify application offloads are valid for our port and queue. */ - if (fs_rxq_offloads_valid(dev, rx_conf->offloads) == false) { - rte_errno = ENOTSUP; - ERROR("Rx queue offloads 0x%" PRIx64 - " don't match port offloads 0x%" PRIx64 - " or supported offloads 0x%" PRIx64, - rx_conf->offloads, - dev->data->dev_conf.rxmode.offloads, - PRIV(dev)->infos.rx_offload_capa | - PRIV(dev)->infos.rx_queue_offload_capa); - fs_unlock(dev, 0); - return -rte_errno; - } rxq = rte_zmalloc(NULL, sizeof(*rxq) + sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, @@ -498,25 +464,6 @@ unlock: return rc; } -static bool -fs_txq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads) -{ - uint64_t port_offloads; - uint64_t queue_supp_offloads; - uint64_t port_supp_offloads; - - port_offloads = dev->data->dev_conf.txmode.offloads; - queue_supp_offloads = PRIV(dev)->infos.tx_queue_offload_capa; - port_supp_offloads = PRIV(dev)->infos.tx_offload_capa; - if ((offloads & (queue_supp_offloads | port_supp_offloads)) != - offloads) - return false; - /* Verify we have no conflict with port offloads */ - if ((port_offloads ^ offloads) & port_supp_offloads) - return false; - return true; -} - static void fs_tx_queue_release(void *queue) { @@ -556,24 +503,6 @@ fs_tx_queue_setup(struct rte_eth_dev *dev, fs_tx_queue_release(txq); dev->data->tx_queues[tx_queue_id] = NULL; } - /* - * Don't verify queue offloads for applications which - * use the old API. - */ - if (tx_conf != NULL && - (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) && - fs_txq_offloads_valid(dev, tx_conf->offloads) == false) { - rte_errno = ENOTSUP; - ERROR("Tx queue offloads 0x%" PRIx64 - " don't match port offloads 0x%" PRIx64 - " or supported offloads 0x%" PRIx64, - tx_conf->offloads, - dev->data->dev_conf.txmode.offloads, - PRIV(dev)->infos.tx_offload_capa | - PRIV(dev)->infos.tx_queue_offload_capa); - fs_unlock(dev, 0); - return -rte_errno; - } txq = rte_zmalloc("ethdev TX queue", sizeof(*txq) + sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, @@ -804,26 +733,29 @@ fs_dev_infos_get(struct rte_eth_dev *dev, } else { uint64_t rx_offload_capa; uint64_t rxq_offload_capa; + uint64_t rss_hf_offload_capa; rx_offload_capa = default_infos.rx_offload_capa; rxq_offload_capa = default_infos.rx_queue_offload_capa; + rss_hf_offload_capa = default_infos.flow_type_rss_offloads; FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos); rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa; rxq_offload_capa &= PRIV(dev)->infos.rx_queue_offload_capa; + rss_hf_offload_capa &= + PRIV(dev)->infos.flow_type_rss_offloads; } sdev = TX_SUBDEV(dev); rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos); PRIV(dev)->infos.rx_offload_capa = rx_offload_capa; PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa; + PRIV(dev)->infos.flow_type_rss_offloads = rss_hf_offload_capa; PRIV(dev)->infos.tx_offload_capa &= default_infos.tx_offload_capa; PRIV(dev)->infos.tx_queue_offload_capa &= default_infos.tx_queue_offload_capa; - PRIV(dev)->infos.flow_type_rss_offloads &= - default_infos.flow_type_rss_offloads; } rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos)); } @@ -997,16 +929,52 @@ fs_mac_addr_add(struct rte_eth_dev *dev, return 0; } -static void +static int fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { struct sub_device *sdev; uint8_t i; + int ret; fs_lock(dev, 0); - FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) - rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); + ret = fs_err(sdev, ret); + if (ret) { + ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d", + i, ret); + fs_unlock(dev, 0); + return ret; + } + } + fs_unlock(dev, 0); + + return 0; +} + +static int +fs_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf); + ret = fs_err(sdev, ret); + if (ret) { + ERROR("Operation rte_eth_dev_rss_hash_update" + " failed for sub_device %d with error %d", + i, ret); + fs_unlock(dev, 0); + return ret; + } + } fs_unlock(dev, 0); + + return 0; } static int @@ -1068,5 +1036,6 @@ const struct eth_dev_ops failsafe_ops = { .mac_addr_remove = fs_mac_addr_remove, .mac_addr_add = fs_mac_addr_add, .mac_addr_set = fs_mac_addr_set, + .rss_hash_update = fs_rss_hash_update, .filter_ctrl = fs_filter_ctrl, }; diff --git a/drivers/net/failsafe/failsafe_private.h b/drivers/net/failsafe/failsafe_private.h index 2d16ba4c..886af861 100644 --- a/drivers/net/failsafe/failsafe_private.h +++ b/drivers/net/failsafe/failsafe_private.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef _RTE_ETH_FAILSAFE_PRIVATE_H_ @@ -119,6 +119,10 @@ struct sub_device { volatile unsigned int remove:1; /* flow isolation state */ int flow_isolated:1; + /* RMV callback registration state */ + unsigned int rmv_callback:1; + /* LSC callback registration state */ + unsigned int lsc_callback:1; }; struct fs_priv { @@ -211,6 +215,7 @@ int failsafe_eal_uninit(struct rte_eth_dev *dev); /* ETH_DEV */ int failsafe_eth_dev_state_sync(struct rte_eth_dev *dev); +void failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev); void failsafe_dev_remove(struct rte_eth_dev *dev); void failsafe_stats_increment(struct rte_eth_stats *to, struct rte_eth_stats *from); @@ -220,6 +225,9 @@ int failsafe_eth_rmv_event_callback(uint16_t port_id, int failsafe_eth_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *out); +int failsafe_eth_new_event_callback(uint16_t port_id, + enum rte_eth_event_type event, + void *cb_arg, void *out); /* GLOBALS */ @@ -326,8 +334,12 @@ extern int mac_from_arg; #define FS_THREADID_FMT "lu" #endif -#define LOG__(level, m, ...) \ - RTE_LOG(level, PMD, "net_failsafe: " m "%c", __VA_ARGS__) +extern int failsafe_logtype; + +#define LOG__(l, m, ...) \ + rte_log(RTE_LOG_ ## l, failsafe_logtype, \ + "net_failsafe: " m "%c", __VA_ARGS__) + #define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n') #define DEBUG(...) LOG_(DEBUG, __VA_ARGS__) #define INFO(...) LOG_(INFO, __VA_ARGS__) diff --git a/drivers/net/failsafe/failsafe_rxtx.c b/drivers/net/failsafe/failsafe_rxtx.c index 363cf7ba..7bd0f963 100644 --- a/drivers/net/failsafe/failsafe_rxtx.c +++ b/drivers/net/failsafe/failsafe_rxtx.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include diff --git a/drivers/net/failsafe/meson.build b/drivers/net/failsafe/meson.build new file mode 100644 index 00000000..a249ff4a --- /dev/null +++ b/drivers/net/failsafe/meson.build @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +cflags += '-std=gnu99' +cflags += '-D_DEFAULT_SOURCE' +cflags += '-D_XOPEN_SOURCE=700' +cflags += '-pedantic' +if host_machine.system() == 'linux' + cflags += '-DLINUX' +else + cflags += '-DBSD' +endif + +allow_experimental_apis = true + +sources = files('failsafe_args.c', + 'failsafe.c', + 'failsafe_eal.c', + 'failsafe_ether.c', + 'failsafe_flow.c', + 'failsafe_intr.c', + 'failsafe_ops.c', + 'failsafe_rxtx.c') diff --git a/drivers/net/fm10k/Makefile b/drivers/net/fm10k/Makefile index b059a700..d657dff8 100644 --- a/drivers/net/fm10k/Makefile +++ b/drivers/net/fm10k/Makefile @@ -19,7 +19,8 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # -CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259 +CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869 +CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259 else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h index 30dad3e2..dc814855 100644 --- a/drivers/net/fm10k/fm10k.h +++ b/drivers/net/fm10k/fm10k.h @@ -106,9 +106,6 @@ #define FM10K_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET #define FM10K_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET -#define FM10K_SIMPLE_TX_FLAG ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ - ETH_TXQ_FLAGS_NOOFFLOADS) - struct fm10k_macvlan_filter_info { uint16_t vlan_num; /* Total VLAN number */ uint16_t mac_num; /* Total mac number */ @@ -180,6 +177,7 @@ struct fm10k_rx_queue { uint8_t drop_en; uint8_t rx_deferred_start; /* don't start this queue in dev start. */ uint16_t rx_ftag_en; /* indicates FTAG RX supported */ + uint64_t offloads; /* offloads of DEV_RX_OFFLOAD_* */ }; /* @@ -211,7 +209,7 @@ struct fm10k_tx_queue { uint16_t next_rs; /* Next pos to set RS flag */ uint16_t next_dd; /* Next pos to check DD flag */ volatile uint32_t *tail_ptr; - uint32_t txq_flags; /* Holds flags for this TXq */ + uint64_t offloads; /* Offloads of DEV_TX_OFFLOAD_* */ uint16_t nb_desc; uint16_t port_id; uint8_t tx_deferred_start; /** don't start this queue in dev start. */ @@ -328,6 +326,13 @@ uint16_t fm10k_recv_scattered_pkts(void *rx_queue, int fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset); +int +fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); + +int +fm10k_dev_tx_descriptor_status(void *rx_queue, uint16_t offset); + + uint16_t fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c index 94237610..541a49b7 100644 --- a/drivers/net/fm10k/fm10k_ethdev.c +++ b/drivers/net/fm10k/fm10k_ethdev.c @@ -60,6 +60,13 @@ static void fm10k_set_tx_function(struct rte_eth_dev *dev); static int fm10k_check_ftag(struct rte_devargs *devargs); static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete); +static void fm10k_dev_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev); +static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev); +static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev); +static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev); + struct fm10k_xstats_name_off { char name[RTE_ETH_XSTATS_NAME_SIZE]; unsigned offset; @@ -444,8 +451,12 @@ fm10k_dev_configure(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); - if (dev->data->dev_conf.rxmode.hw_strip_crc == 0) + /* KEEP_CRC offload flag is not supported by PMD + * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed + */ + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) PMD_INIT_LOG(WARNING, "fm10k always strip CRC"); + /* multipe queue mode checking */ ret = fm10k_check_mq_mode(dev); if (ret != 0) { @@ -454,6 +465,8 @@ fm10k_dev_configure(struct rte_eth_dev *dev) return ret; } + dev->data->scattered_rx = 0; + return 0; } @@ -756,7 +769,7 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev) /* It adds dual VLAN length for supporting dual VLAN */ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + 2 * FM10K_VLAN_TAG_SIZE) > buf_size || - dev->data->dev_conf.rxmode.enable_scatter) { + rxq->offloads & DEV_RX_OFFLOAD_SCATTER) { uint32_t reg; dev->data->scattered_rx = 1; reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i)); @@ -797,52 +810,50 @@ static int fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int err = -1; + int err; uint32_t reg; struct fm10k_rx_queue *rxq; PMD_INIT_FUNC_TRACE(); - if (rx_queue_id < dev->data->nb_rx_queues) { - rxq = dev->data->rx_queues[rx_queue_id]; - err = rx_queue_reset(rxq); - if (err == -ENOMEM) { - PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err); - return err; - } else if (err == -EINVAL) { - PMD_INIT_LOG(ERR, "Invalid buffer address alignment :" - " %d", err); - return err; - } + rxq = dev->data->rx_queues[rx_queue_id]; + err = rx_queue_reset(rxq); + if (err == -ENOMEM) { + PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err); + return err; + } else if (err == -EINVAL) { + PMD_INIT_LOG(ERR, "Invalid buffer address alignment :" + " %d", err); + return err; + } - /* Setup the HW Rx Head and Tail Descriptor Pointers - * Note: this must be done AFTER the queue is enabled on real - * hardware, but BEFORE the queue is enabled when using the - * emulation platform. Do it in both places for now and remove - * this comment and the following two register writes when the - * emulation platform is no longer being used. - */ - FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0); - FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1); + /* Setup the HW Rx Head and Tail Descriptor Pointers + * Note: this must be done AFTER the queue is enabled on real + * hardware, but BEFORE the queue is enabled when using the + * emulation platform. Do it in both places for now and remove + * this comment and the following two register writes when the + * emulation platform is no longer being used. + */ + FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0); + FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1); - /* Set PF ownership flag for PF devices */ - reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id)); - if (hw->mac.type == fm10k_mac_pf) - reg |= FM10K_RXQCTL_PF; - reg |= FM10K_RXQCTL_ENABLE; - /* enable RX queue */ - FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg); - FM10K_WRITE_FLUSH(hw); + /* Set PF ownership flag for PF devices */ + reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id)); + if (hw->mac.type == fm10k_mac_pf) + reg |= FM10K_RXQCTL_PF; + reg |= FM10K_RXQCTL_ENABLE; + /* enable RX queue */ + FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg); + FM10K_WRITE_FLUSH(hw); - /* Setup the HW Rx Head and Tail Descriptor Pointers - * Note: this must be done AFTER the queue is enabled - */ - FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0); - FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1); - dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - } + /* Setup the HW Rx Head and Tail Descriptor Pointers + * Note: this must be done AFTER the queue is enabled + */ + FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0); + FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - return err; + return 0; } static int @@ -852,14 +863,12 @@ fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) PMD_INIT_FUNC_TRACE(); - if (rx_queue_id < dev->data->nb_rx_queues) { - /* Disable RX queue */ - rx_queue_disable(hw, rx_queue_id); + /* Disable RX queue */ + rx_queue_disable(hw, rx_queue_id); - /* Free mbuf and clean HW ring */ - rx_queue_clean(dev->data->rx_queues[rx_queue_id]); - dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; - } + /* Free mbuf and clean HW ring */ + rx_queue_clean(dev->data->rx_queues[rx_queue_id]); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } @@ -871,28 +880,23 @@ fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) /** @todo - this should be defined in the shared code */ #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY; - int err = 0; + struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id]; PMD_INIT_FUNC_TRACE(); - if (tx_queue_id < dev->data->nb_tx_queues) { - struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id]; + q->ops->reset(q); - q->ops->reset(q); + /* reset head and tail pointers */ + FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0); + FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0); - /* reset head and tail pointers */ - FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0); - FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0); - - /* enable TX queue */ - FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id), - FM10K_TXDCTL_ENABLE | txdctl); - FM10K_WRITE_FLUSH(hw); - dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - } else - err = -1; + /* enable TX queue */ + FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id), + FM10K_TXDCTL_ENABLE | txdctl); + FM10K_WRITE_FLUSH(hw); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - return err; + return 0; } static int @@ -902,11 +906,9 @@ fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) PMD_INIT_FUNC_TRACE(); - if (tx_queue_id < dev->data->nb_tx_queues) { - tx_queue_disable(hw, tx_queue_id); - tx_queue_clean(dev->data->tx_queues[tx_queue_id]); - dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; - } + tx_queue_disable(hw, tx_queue_id); + tx_queue_clean(dev->data->tx_queues[tx_queue_id]); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } @@ -1233,13 +1235,11 @@ fm10k_link_update(struct rte_eth_dev *dev, FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); - /* The speed is ~50Gbps per Gen3 x8 PCIe interface. For now, we - * leave the speed undefined since there is no 50Gbps Ethernet. - */ - dev->data->dev_link.link_speed = 0; + dev->data->dev_link.link_speed = ETH_SPEED_NUM_50G; dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; dev->data->dev_link.link_status = dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP; + dev->data->dev_link.link_autoneg = ETH_LINK_FIXED; return 0; } @@ -1377,7 +1377,6 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); - dev_info->pci_dev = pdev; dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE; dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE; dev_info->max_rx_queues = hw->mac.max_queues; @@ -1389,17 +1388,12 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, dev_info->vmdq_queue_base = 0; dev_info->max_vmdq_pools = ETH_32_POOLS; dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = fm10k_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = fm10k_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t); dev_info->reta_size = FM10K_MAX_RSS_INDICES; @@ -1412,6 +1406,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, }, .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0), .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -1422,7 +1417,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, }, .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0), .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0), - .txq_flags = FM10K_SIMPLE_TX_FLAG, + .offloads = 0, }; dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { @@ -1571,19 +1566,22 @@ static int fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask) { if (mask & ETH_VLAN_STRIP_MASK) { - if (!dev->data->dev_conf.rxmode.hw_vlan_strip) + if (!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP)) PMD_INIT_LOG(ERR, "VLAN stripping is " "always on in fm10k"); } if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND) PMD_INIT_LOG(ERR, "VLAN QinQ is not " "supported in fm10k"); } if (mask & ETH_VLAN_FILTER_MASK) { - if (!dev->data->dev_conf.rxmode.hw_vlan_filter) + if (!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER)) PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k"); } @@ -1781,6 +1779,27 @@ mempool_element_size_valid(struct rte_mempool *mp) return 1; } +static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return (uint64_t)(DEV_RX_OFFLOAD_SCATTER); +} + +static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_HEADER_SPLIT); +} + static int fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc, unsigned int socket_id, @@ -1791,9 +1810,12 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); struct fm10k_rx_queue *q; const struct rte_memzone *mz; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); + offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; + /* make sure the mempool element size can account for alignment. */ if (!mempool_element_size_valid(mp)) { PMD_INIT_LOG(ERR, "Error : Mempool element size is too small"); @@ -1838,6 +1860,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, q->queue_id = queue_id; q->tail_ptr = (volatile uint32_t *) &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)]; + q->offloads = offloads; if (handle_rxconf(q, conf)) return -EINVAL; @@ -1947,6 +1970,24 @@ handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf) return 0; } +static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return 0; +} + +static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO); +} + static int fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc, unsigned int socket_id, @@ -1955,9 +1996,12 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct fm10k_tx_queue *q; const struct rte_memzone *mz; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); + offloads = conf->offloads | dev->data->dev_conf.txmode.offloads; + /* make sure a valid number of descriptors have been requested */ if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC, FM10K_MULT_TX_DESC, nb_desc)) { @@ -1994,7 +2038,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, q->nb_desc = nb_desc; q->port_id = dev->data->port_id; q->queue_id = queue_id; - q->txq_flags = conf->txq_flags; + q->offloads = offloads; q->ops = &def_txq_ops; q->tail_ptr = (volatile uint32_t *) &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)]; @@ -2784,6 +2828,8 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = { .tx_queue_setup = fm10k_tx_queue_setup, .tx_queue_release = fm10k_tx_queue_release, .rx_descriptor_done = fm10k_dev_rx_descriptor_done, + .rx_descriptor_status = fm10k_dev_rx_descriptor_status, + .tx_descriptor_status = fm10k_dev_tx_descriptor_status, .rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable, .rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable, .reta_update = fm10k_reta_update, @@ -2860,7 +2906,7 @@ fm10k_set_tx_function(struct rte_eth_dev *dev) uint16_t tx_ftag_en = 0; if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - /* primary process has set the ftag flag and txq_flags */ + /* primary process has set the ftag flag and offloads */ txq = dev->data->tx_queues[0]; if (fm10k_tx_vec_condition_check(txq)) { dev->tx_pkt_burst = fm10k_xmit_pkts; @@ -3237,9 +3283,7 @@ RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k); RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map); RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci"); -RTE_INIT(fm10k_init_log); -static void -fm10k_init_log(void) +RTE_INIT(fm10k_init_log) { fm10k_logtype_init = rte_log_register("pmd.net.fm10k.init"); if (fm10k_logtype_init >= 0) diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c index 9320748c..4a5b46ec 100644 --- a/drivers/net/fm10k/fm10k_rxtx.c +++ b/drivers/net/fm10k/fm10k_rxtx.c @@ -389,6 +389,84 @@ fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) return ret; } +int +fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + volatile union fm10k_rx_desc *rxdp; + struct fm10k_rx_queue *rxq = rx_queue; + uint16_t nb_hold, trigger_last; + uint16_t desc; + int ret; + + if (unlikely(offset >= rxq->nb_desc)) { + PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset); + return 0; + } + + if (rxq->next_trigger < rxq->alloc_thresh) + trigger_last = rxq->next_trigger + + rxq->nb_desc - rxq->alloc_thresh; + else + trigger_last = rxq->next_trigger - rxq->alloc_thresh; + + if (rxq->next_dd < trigger_last) + nb_hold = rxq->next_dd + rxq->nb_desc - trigger_last; + else + nb_hold = rxq->next_dd - trigger_last; + + if (offset >= rxq->nb_desc - nb_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->next_dd + offset; + if (desc >= rxq->nb_desc) + desc -= rxq->nb_desc; + + rxdp = &rxq->hw_ring[desc]; + + ret = !!(rxdp->w.status & + rte_cpu_to_le_16(FM10K_RXD_STATUS_DD)); + + return ret; +} + +int +fm10k_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + volatile struct fm10k_tx_desc *txdp; + struct fm10k_tx_queue *txq = tx_queue; + uint16_t desc; + uint16_t next_rs = txq->nb_desc; + struct fifo rs_tracker = txq->rs_tracker; + struct fifo *r = &rs_tracker; + + if (unlikely(offset >= txq->nb_desc)) + return -EINVAL; + + desc = txq->next_free + offset; + /* go to next desc that has the RS bit */ + desc = (desc / txq->rs_thresh + 1) * + txq->rs_thresh - 1; + + if (desc >= txq->nb_desc) { + desc -= txq->nb_desc; + if (desc >= txq->nb_desc) + desc -= txq->nb_desc; + } + + r->head = r->list; + for ( ; r->head != r->endp; ) { + if (*r->head >= desc && *r->head < next_rs) + next_rs = *r->head; + ++r->head; + } + + txdp = &txq->hw_ring[next_rs]; + if (txdp->flags & FM10K_TXD_FLAG_DONE) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + /* * Free multiple TX mbuf at a time if they are in the same pool * diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c index 498a1781..005fda63 100644 --- a/drivers/net/fm10k/fm10k_rxtx_vec.c +++ b/drivers/net/fm10k/fm10k_rxtx_vec.c @@ -210,7 +210,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev) #ifndef RTE_FM10K_RX_OLFLAGS_ENABLE /* whithout rx ol_flags, no VP flag report */ - if (rxmode->hw_vlan_extend != 0) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) return -1; #endif @@ -219,7 +219,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev) return -1; /* no header split support */ - if (rxmode->header_split == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) return -1; return 0; @@ -695,7 +695,7 @@ int __attribute__((cold)) fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq) { /* Vector TX can't offload any features yet */ - if ((txq->txq_flags & FM10K_SIMPLE_TX_FLAG) != FM10K_SIMPLE_TX_FLAG) + if (txq->offloads != 0) return -1; if (txq->tx_ftag_en) diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile index 5663f5b1..3f869a8d 100644 --- a/drivers/net/i40e/Makefile +++ b/drivers/net/i40e/Makefile @@ -11,6 +11,8 @@ LIB = librte_pmd_i40e.a CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) -DPF_DRIVER -DVF_DRIVER -DINTEGRATED_VF CFLAGS += -DX722_A0_SUPPORT +CFLAGS += -DALLOW_EXPERIMENTAL_API + LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash LDLIBS += -lrte_bus_pci @@ -24,7 +26,7 @@ LIBABIVER := 2 # to disable warnings # ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) -CFLAGS_BASE_DRIVER = -wd593 -wd188 +CFLAGS_BASE_DRIVER = -diag-disable 593 else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) CFLAGS_BASE_DRIVER += -Wno-sign-compare CFLAGS_BASE_DRIVER += -Wno-unused-value @@ -85,6 +87,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += rte_pmd_i40e.c SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_tm.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_vf_representor.c ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2) CC_AVX2_SUPPORT=1 diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h index a482ab90..df66e76a 100644 --- a/drivers/net/i40e/base/i40e_register.h +++ b/drivers/net/i40e/base/i40e_register.h @@ -90,7 +90,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30 #define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT) #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT) #define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */ #define I40E_PF_ARQT_ARQT_SHIFT 0 #define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT) @@ -113,7 +113,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT) #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT) #define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */ #define I40E_PF_ATQT_ATQT_SHIFT 0 #define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT) @@ -140,7 +140,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 #define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT) #define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT) #define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ARQT_MAX_INDEX 127 #define I40E_VF_ARQT_ARQT_SHIFT 0 @@ -168,7 +168,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT) #define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT) #define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ATQT_MAX_INDEX 127 #define I40E_VF_ATQT_ATQT_SHIFT 0 @@ -291,7 +291,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) #define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 -#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) +#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) #define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */ #define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 #define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) @@ -535,7 +535,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 -#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) +#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) #define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MSRWD_MAX_INDEX 3 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 @@ -1274,14 +1274,14 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 -#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) #define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */ #define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 #define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) #define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16 #define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT) #define I40E_PFLAN_QALLOC_VALID_SHIFT 31 -#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT) +#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT) #define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ #define I40E_QRX_ENA_MAX_INDEX 1535 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0 @@ -1692,7 +1692,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_GLNVM_SRCTL_START_SHIFT 30 #define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT) #define I40E_GLNVM_SRCTL_DONE_SHIFT 31 -#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT) +#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT) #define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */ #define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 #define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT) @@ -3059,7 +3059,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8 #define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT) #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 -#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT) +#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT) #define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VP_MDET_RX_MAX_INDEX 127 #define I40E_VP_MDET_RX_VALID_SHIFT 0 @@ -3196,7 +3196,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 #define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) #define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) +#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) #define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ #define I40E_VF_ARQT1_ARQT_SHIFT 0 #define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) @@ -3219,7 +3219,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 #define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) #define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) +#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) #define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ #define I40E_VF_ATQT1_ATQT_SHIFT 0 #define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 508b4171..85a6a867 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -41,6 +42,8 @@ #define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb" #define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list" +#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver" +#define ETH_I40E_QUEUE_NUM_PER_VF_ARG "queue-num-per-vf" #define I40E_CLEAR_PXE_WAIT_MS 200 @@ -213,7 +216,7 @@ /* Bit mask of Extended Tag enable/disable */ #define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT) -static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev); +static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params); static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev); static int i40e_dev_configure(struct rte_eth_dev *dev); static int i40e_dev_start(struct rte_eth_dev *dev); @@ -369,7 +372,12 @@ static int i40e_get_eeprom_length(struct rte_eth_dev *dev); static int i40e_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom); -static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, +static int i40e_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo); +static int i40e_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info); + +static int i40e_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); @@ -395,6 +403,13 @@ static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev); int i40e_logtype_init; int i40e_logtype_driver; +static const char *const valid_keys[] = { + ETH_I40E_FLOATING_VEB_ARG, + ETH_I40E_FLOATING_VEB_LIST_ARG, + ETH_I40E_SUPPORT_MULTI_DRIVER, + ETH_I40E_QUEUE_NUM_PER_VF_ARG, + NULL}; + static const struct rte_pci_id pci_id_i40e_map[] = { { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) }, { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) }, @@ -489,6 +504,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .get_reg = i40e_get_regs, .get_eeprom_length = i40e_get_eeprom_length, .get_eeprom = i40e_get_eeprom, + .get_module_info = i40e_get_module_info, + .get_module_eeprom = i40e_get_module_eeprom, .mac_addr_set = i40e_set_default_mac_addr, .mtu_set = i40e_dev_mtu_set, .tm_ops_get = i40e_tm_ops_get, @@ -607,16 +624,74 @@ static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = { #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \ sizeof(rte_i40e_txq_prio_strings[0])) -static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, +static int +eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_probe(pci_dev, - sizeof(struct i40e_adapter), eth_i40e_dev_init); + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 }; + int i, retval; + + if (pci_dev->device.devargs) { + retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, + ð_da); + if (retval) + return retval; + } + + retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, + sizeof(struct i40e_adapter), + eth_dev_pci_specific_init, pci_dev, + eth_i40e_dev_init, NULL); + + if (retval || eth_da.nb_representor_ports < 1) + return retval; + + /* probe VF representor ports */ + struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated( + pci_dev->device.name); + + if (pf_ethdev == NULL) + return -ENODEV; + + for (i = 0; i < eth_da.nb_representor_ports; i++) { + struct i40e_vf_representor representor = { + .vf_id = eth_da.representor_ports[i], + .switch_domain_id = I40E_DEV_PRIVATE_TO_PF( + pf_ethdev->data->dev_private)->switch_domain_id, + .adapter = I40E_DEV_PRIVATE_TO_ADAPTER( + pf_ethdev->data->dev_private) + }; + + /* representor port net_bdf_port */ + snprintf(name, sizeof(name), "net_%s_representor_%d", + pci_dev->device.name, eth_da.representor_ports[i]); + + retval = rte_eth_dev_create(&pci_dev->device, name, + sizeof(struct i40e_vf_representor), NULL, NULL, + i40e_vf_representor_init, &representor); + + if (retval) + PMD_DRV_LOG(ERR, "failed to create i40e vf " + "representor %s.", name); + } + + return 0; } static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit); + struct rte_eth_dev *ethdev; + + ethdev = rte_eth_dev_allocated(pci_dev->device.name); + if (!ethdev) + return -ENODEV; + + + if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) + return rte_eth_dev_destroy(ethdev, i40e_vf_representor_uninit); + else + return rte_eth_dev_destroy(ethdev, eth_i40e_dev_uninit); } static struct rte_pci_driver rte_i40e_pmd = { @@ -627,41 +702,21 @@ static struct rte_pci_driver rte_i40e_pmd = { .remove = eth_i40e_pci_remove, }; -static inline int -rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -static inline int -rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - static inline void -i40e_write_global_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) +i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr, + uint32_t reg_val) { + uint32_t ori_reg_val; + struct rte_eth_dev *dev; + + ori_reg_val = i40e_read_rx_ctl(hw, reg_addr); + dev = ((struct i40e_adapter *)hw->back)->eth_dev; i40e_write_rx_ctl(hw, reg_addr, reg_val); - PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified " - "with value 0x%08x", - reg_addr, reg_val); + if (ori_reg_val != reg_val) + PMD_DRV_LOG(WARNING, + "i40e device %s changed global register [0x%08x]." + " original: 0x%08x, new: 0x%08x", + dev->device->name, reg_addr, ori_reg_val, reg_val); } RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd); @@ -688,7 +743,6 @@ static inline void i40e_GLQF_reg_init(struct i40e_hw *hw) */ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029); I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420); - i40e_global_cfg_warning(I40E_WARNING_QINQ_PARSER); } static inline void i40e_config_automask(struct i40e_pf *pf) @@ -807,7 +861,7 @@ config_vf_floating_veb(struct rte_devargs *devargs, if (devargs == NULL) return; - kvlist = rte_kvargs_parse(devargs->args, NULL); + kvlist = rte_kvargs_parse(devargs->args, valid_keys); if (kvlist == NULL) return; @@ -848,7 +902,7 @@ is_floating_veb_supported(struct rte_devargs *devargs) if (devargs == NULL) return 0; - kvlist = rte_kvargs_parse(devargs->args, NULL); + kvlist = rte_kvargs_parse(devargs->args, valid_keys); if (kvlist == NULL) return 0; @@ -1055,8 +1109,6 @@ i40e_init_queue_region_conf(struct rte_eth_dev *dev) memset(info, 0, sizeof(struct i40e_queue_regions)); } -#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver" - static int i40e_parse_multi_drv_handler(__rte_unused const char *key, const char *value, @@ -1088,9 +1140,8 @@ static int i40e_support_multi_driver(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - static const char *const valid_keys[] = { - ETH_I40E_SUPPORT_MULTI_DRIVER, NULL}; struct rte_kvargs *kvlist; + int kvargs_count; /* Enable global configuration by default */ pf->support_multi_driver = false; @@ -1102,7 +1153,13 @@ i40e_support_multi_driver(struct rte_eth_dev *dev) if (!kvlist) return -EINVAL; - if (rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER) > 1) + kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER); + if (!kvargs_count) { + rte_kvargs_free(kvlist); + return 0; + } + + if (kvargs_count > 1) PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only " "the first invalid or last valid one is used !", ETH_I40E_SUPPORT_MULTI_DRIVER); @@ -1118,7 +1175,34 @@ i40e_support_multi_driver(struct rte_eth_dev *dev) } static int -eth_i40e_dev_init(struct rte_eth_dev *dev) +i40e_aq_debug_write_global_register(struct i40e_hw *hw, + uint32_t reg_addr, uint64_t reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + uint64_t ori_reg_val; + struct rte_eth_dev *dev; + int ret; + + ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Fail to debug read from 0x%08x", + reg_addr); + return -EIO; + } + dev = ((struct i40e_adapter *)hw->back)->eth_dev; + + if (ori_reg_val != reg_val) + PMD_DRV_LOG(WARNING, + "i40e device %s changed global register [0x%08x]." + " original: 0x%"PRIx64", after: 0x%"PRIx64, + dev->device->name, reg_addr, ori_reg_val, reg_val); + + return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details); +} + +static int +eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) { struct rte_pci_device *pci_dev; struct rte_intr_handle *intr_handle; @@ -1170,6 +1254,13 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) hw->bus.func = pci_dev->addr.function; hw->adapter_stopped = 0; + /* + * Switch Tag value should not be identical to either the First Tag + * or Second Tag values. So set something other than common Ethertype + * for internal switching. + */ + hw->switch_tag = 0xffff; + /* Check if need to support multi-driver */ i40e_support_multi_driver(dev); @@ -1224,7 +1315,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) /* initialise the L3_MAP register */ if (!pf->support_multi_driver) { - ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40), + ret = i40e_aq_debug_write_global_register(hw, + I40E_GLQF_L3_MAP(40), 0x00000028, NULL); if (ret) PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", @@ -1232,7 +1324,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) PMD_INIT_LOG(DEBUG, "Global register 0x%08x is changed with 0x28", I40E_GLQF_L3_MAP(40)); - i40e_global_cfg_warning(I40E_WARNING_QINQ_CLOUD_FILTER); } /* Need the special FW version to support floating VEB */ @@ -1519,7 +1610,6 @@ void i40e_flex_payload_reg_set_default(struct i40e_hw *hw) I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000); I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000); I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000); - i40e_global_cfg_warning(I40E_WARNING_DIS_FLX_PLD); } static int @@ -1533,6 +1623,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) struct rte_flow *p_flow; int ret; uint8_t aq_fail = 0; + int retries = 0; PMD_INIT_FUNC_TRACE(); @@ -1544,6 +1635,10 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) pci_dev = RTE_ETH_DEV_TO_PCI(dev); intr_handle = &pci_dev->intr_handle; + ret = rte_eth_switch_domain_free(pf->switch_domain_id); + if (ret) + PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret); + if (hw->adapter_stopped == 0) i40e_dev_close(dev); @@ -1574,9 +1669,20 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) /* disable uio intr before callback unregister */ rte_intr_disable(intr_handle); - /* register callback func to eal lib */ - rte_intr_callback_unregister(intr_handle, - i40e_dev_interrupt_handler, dev); + /* unregister callback func to eal lib */ + do { + ret = rte_intr_callback_unregister(intr_handle, + i40e_dev_interrupt_handler, dev); + if (ret >= 0) { + break; + } else if (ret != -EAGAIN) { + PMD_INIT_LOG(ERR, + "intr callback unregister failed: %d", + ret); + return ret; + } + i40e_msec_delay(500); + } while (retries++ < 5); i40e_rm_ethtype_filter_list(pf); i40e_rm_tunnel_filter_list(pf); @@ -1746,8 +1852,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, /* Write first RX queue to Link list register as the head element */ if (vsi->type != I40E_VSI_SRIOV) { uint16_t interval = - i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1, - pf->support_multi_driver); + i40e_calc_itr_interval(1, pf->support_multi_driver); if (msix_vect == I40E_MISC_VEC_ID) { I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0, @@ -1943,27 +2048,40 @@ i40e_phy_conf_link(struct i40e_hw *hw, struct i40e_aq_get_phy_abilities_resp phy_ab; struct i40e_aq_set_phy_config phy_conf; enum i40e_aq_phy_type cnt; + uint8_t avail_speed; uint32_t phy_type_mask = 0; const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX | I40E_AQ_PHY_FLAG_PAUSE_RX | I40E_AQ_PHY_FLAG_PAUSE_RX | I40E_AQ_PHY_FLAG_LOW_POWER; - const uint8_t advt = I40E_LINK_SPEED_40GB | - I40E_LINK_SPEED_25GB | - I40E_LINK_SPEED_10GB | - I40E_LINK_SPEED_1GB | - I40E_LINK_SPEED_100MB; int ret = -ENOTSUP; + /* To get phy capabilities of available speeds. */ + status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab, + NULL); + if (status) { + PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n", + status); + return ret; + } + avail_speed = phy_ab.link_speed; + /* To get the current phy config. */ status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab, NULL); - if (status) + if (status) { + PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n", + status); return ret; + } - /* If link already up, no need to set up again */ - if (is_up && phy_ab.phy_type != 0) + /* If link needs to go up and it is in autoneg mode the speed is OK, + * no need to set up again. + */ + if (is_up && phy_ab.phy_type != 0 && + abilities & I40E_AQ_PHY_AN_ENABLED && + phy_ab.link_speed != 0) return I40E_SUCCESS; memset(&phy_conf, 0, sizeof(phy_conf)); @@ -1972,18 +2090,20 @@ i40e_phy_conf_link(struct i40e_hw *hw, abilities &= ~mask; abilities |= phy_ab.abilities & mask; - /* update ablities and speed */ - if (abilities & I40E_AQ_PHY_AN_ENABLED) - phy_conf.link_speed = advt; - else - phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed; - phy_conf.abilities = abilities; + /* If link needs to go up, but the force speed is not supported, + * Warn users and config the default available speeds. + */ + if (is_up && !(force_speed & avail_speed)) { + PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n"); + phy_conf.link_speed = avail_speed; + } else { + phy_conf.link_speed = is_up ? force_speed : avail_speed; + } - - /* To enable link, phy_type mask needs to include each type */ - for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++) + /* PHY type mask needs to include each type except PHY type extension */ + for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++) phy_type_mask |= 1 << cnt; /* use get_phy_abilities_resp value for the rest */ @@ -2016,11 +2136,18 @@ i40e_apply_link_speed(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_conf *conf = &dev->data->dev_conf; + if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) { + conf->link_speeds = ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_20G | + ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_100M; + } speed = i40e_parse_link_speeds(conf->link_speeds); - abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; - if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED)) - abilities |= I40E_AQ_PHY_AN_ENABLED; - abilities |= I40E_AQ_PHY_LINK_ENABLED; + abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK | + I40E_AQ_PHY_AN_ENABLED | + I40E_AQ_PHY_LINK_ENABLED; return i40e_phy_conf_link(hw, abilities, speed, true); } @@ -2137,13 +2264,6 @@ i40e_dev_start(struct rte_eth_dev *dev) } /* Apply link configure */ - if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M | - ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | - ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | - ETH_LINK_SPEED_40G)) { - PMD_DRV_LOG(ERR, "Invalid link setting"); - goto err_up; - } ret = i40e_apply_link_speed(dev); if (I40E_SUCCESS != ret) { PMD_DRV_LOG(ERR, "Fail to apply link setting"); @@ -2286,6 +2406,8 @@ i40e_dev_close(struct rte_eth_dev *dev) i40e_pf_disable_irq0(hw); rte_intr_disable(intr_handle); + i40e_fdir_teardown(pf); + /* shutdown and destroy the HMC */ i40e_shutdown_lan_hmc(hw); @@ -2297,7 +2419,6 @@ i40e_dev_close(struct rte_eth_dev *dev) pf->vmdq = NULL; /* release all the existing VSIs and VEBs */ - i40e_fdir_teardown(pf); i40e_vsi_release(pf->main_vsi); /* shutdown the adminq */ @@ -2339,7 +2460,7 @@ i40e_dev_reset(struct rte_eth_dev *dev) if (ret) return ret; - ret = eth_i40e_dev_init(dev); + ret = eth_i40e_dev_init(dev, NULL); return ret; } @@ -2437,84 +2558,143 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev) return i40e_phy_conf_link(hw, abilities, speed, false); } -int -i40e_dev_link_update(struct rte_eth_dev *dev, - int wait_to_complete) +static __rte_always_inline void +update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link) +{ +/* Link status registers and values*/ +#define I40E_PRTMAC_LINKSTA 0x001E2420 +#define I40E_REG_LINK_UP 0x40000080 +#define I40E_PRTMAC_MACC 0x001E24E0 +#define I40E_REG_MACC_25GB 0x00020000 +#define I40E_REG_SPEED_MASK 0x38000000 +#define I40E_REG_SPEED_100MB 0x00000000 +#define I40E_REG_SPEED_1GB 0x08000000 +#define I40E_REG_SPEED_10GB 0x10000000 +#define I40E_REG_SPEED_20GB 0x20000000 +#define I40E_REG_SPEED_25_40GB 0x18000000 + uint32_t link_speed; + uint32_t reg_val; + + reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA); + link_speed = reg_val & I40E_REG_SPEED_MASK; + reg_val &= I40E_REG_LINK_UP; + link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0; + + if (unlikely(link->link_status == 0)) + return; + + /* Parse the link status */ + switch (link_speed) { + case I40E_REG_SPEED_100MB: + link->link_speed = ETH_SPEED_NUM_100M; + break; + case I40E_REG_SPEED_1GB: + link->link_speed = ETH_SPEED_NUM_1G; + break; + case I40E_REG_SPEED_10GB: + link->link_speed = ETH_SPEED_NUM_10G; + break; + case I40E_REG_SPEED_20GB: + link->link_speed = ETH_SPEED_NUM_20G; + break; + case I40E_REG_SPEED_25_40GB: + reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC); + + if (reg_val & I40E_REG_MACC_25GB) + link->link_speed = ETH_SPEED_NUM_25G; + else + link->link_speed = ETH_SPEED_NUM_40G; + + break; + default: + PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed); + break; + } +} + +static __rte_always_inline void +update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link, + bool enable_lse, int wait_to_complete) { -#define CHECK_INTERVAL 100 /* 100ms */ -#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); +#define CHECK_INTERVAL 100 /* 100ms */ +#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ + uint32_t rep_cnt = MAX_REPEAT_TIME; struct i40e_link_status link_status; - struct rte_eth_link link, old; int status; - unsigned rep_cnt = MAX_REPEAT_TIME; - bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; - memset(&link, 0, sizeof(link)); - memset(&old, 0, sizeof(old)); memset(&link_status, 0, sizeof(link_status)); - rte_i40e_dev_atomic_read_link_status(dev, &old); do { + memset(&link_status, 0, sizeof(link_status)); + /* Get link status information from hardware */ status = i40e_aq_get_link_info(hw, enable_lse, &link_status, NULL); - if (status != I40E_SUCCESS) { - link.link_speed = ETH_SPEED_NUM_100M; - link.link_duplex = ETH_LINK_FULL_DUPLEX; + if (unlikely(status != I40E_SUCCESS)) { + link->link_speed = ETH_SPEED_NUM_100M; + link->link_duplex = ETH_LINK_FULL_DUPLEX; PMD_DRV_LOG(ERR, "Failed to get link info"); - goto out; + return; } - link.link_status = link_status.link_info & I40E_AQ_LINK_UP; - if (!wait_to_complete || link.link_status) + link->link_status = link_status.link_info & I40E_AQ_LINK_UP; + if (!wait_to_complete || link->link_status) break; rte_delay_ms(CHECK_INTERVAL); } while (--rep_cnt); - if (!link.link_status) - goto out; - - /* i40e uses full duplex only */ - link.link_duplex = ETH_LINK_FULL_DUPLEX; - /* Parse the link status */ switch (link_status.link_speed) { case I40E_LINK_SPEED_100MB: - link.link_speed = ETH_SPEED_NUM_100M; + link->link_speed = ETH_SPEED_NUM_100M; break; case I40E_LINK_SPEED_1GB: - link.link_speed = ETH_SPEED_NUM_1G; + link->link_speed = ETH_SPEED_NUM_1G; break; case I40E_LINK_SPEED_10GB: - link.link_speed = ETH_SPEED_NUM_10G; + link->link_speed = ETH_SPEED_NUM_10G; break; case I40E_LINK_SPEED_20GB: - link.link_speed = ETH_SPEED_NUM_20G; + link->link_speed = ETH_SPEED_NUM_20G; break; case I40E_LINK_SPEED_25GB: - link.link_speed = ETH_SPEED_NUM_25G; + link->link_speed = ETH_SPEED_NUM_25G; break; case I40E_LINK_SPEED_40GB: - link.link_speed = ETH_SPEED_NUM_40G; + link->link_speed = ETH_SPEED_NUM_40G; break; default: - link.link_speed = ETH_SPEED_NUM_100M; + link->link_speed = ETH_SPEED_NUM_100M; break; } +} +int +i40e_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_link link; + bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; + int ret; + + memset(&link, 0, sizeof(link)); + + /* i40e uses full duplex only */ + link.link_duplex = ETH_LINK_FULL_DUPLEX; link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); -out: - rte_i40e_dev_atomic_write_link_status(dev, &link); - if (link.link_status == old.link_status) - return -1; + if (!wait_to_complete && !enable_lse) + update_link_reg(hw, &link); + else + update_link_aq(hw, &link, enable_lse, wait_to_complete); + ret = rte_eth_linkstatus_set(dev, &link); i40e_notify_all_vfs_link_status(dev); - return 0; + return ret; } /* Get all the statistics of a VSI */ @@ -3169,13 +3349,13 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) struct i40e_vsi *vsi = pf->main_vsi; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - dev_info->pci_dev = pci_dev; dev_info->max_rx_queues = vsi->nb_qps; dev_info->max_tx_queues = vsi->nb_qps; dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN; dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX; dev_info->max_mac_addrs = vsi->max_macaddrs; dev_info->max_vfs = pci_dev->max_vfs; + dev_info->rx_queue_offload_capa = 0; dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_QINQ_STRIP | @@ -3183,7 +3363,13 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_CRC_STRIP; + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_VLAN_EXTEND | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_JUMBO_FRAME; + + dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT | @@ -3196,7 +3382,13 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_IPIP_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO; + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS | + dev_info->tx_queue_offload_capa; + dev_info->dev_capa = + RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); dev_info->reta_size = pf->hash_lut_size; @@ -3210,6 +3402,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -3220,8 +3413,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + .offloads = 0, }; dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { @@ -3248,15 +3440,42 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_tx_queues += dev_info->vmdq_queue_num; } - if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) + if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) { /* For XL710 */ dev_info->speed_capa = ETH_LINK_SPEED_40G; - else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) + dev_info->default_rxportconf.nb_queues = 2; + dev_info->default_txportconf.nb_queues = 2; + if (dev->data->nb_rx_queues == 1) + dev_info->default_rxportconf.ring_size = 2048; + else + dev_info->default_rxportconf.ring_size = 1024; + if (dev->data->nb_tx_queues == 1) + dev_info->default_txportconf.ring_size = 1024; + else + dev_info->default_txportconf.ring_size = 512; + + } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) { /* For XXV710 */ dev_info->speed_capa = ETH_LINK_SPEED_25G; - else + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_rxportconf.ring_size = 256; + dev_info->default_txportconf.ring_size = 256; + } else { /* For X710 */ dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) { + dev_info->default_rxportconf.ring_size = 512; + dev_info->default_txportconf.ring_size = 256; + } else { + dev_info->default_rxportconf.ring_size = 256; + dev_info->default_txportconf.ring_size = 256; + } + } + dev_info->default_rxportconf.burst_size = 32; + dev_info->default_txportconf.burst_size = 32; } static int @@ -3307,7 +3526,8 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev, return 0; } - ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), + ret = i40e_aq_debug_write_global_register(hw, + I40E_GL_SWT_L2TAGCTRL(reg_id), reg_w, NULL); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, @@ -3329,7 +3549,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend; + int qinq = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND; int ret = 0; if ((vlan_type != ETH_VLAN_TYPE_INNER && @@ -3367,7 +3588,6 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, /* If NVM API < 1.7, keep the register setting */ ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type, tpid, qinq); - i40e_global_cfg_warning(I40E_WARNING_TPID); return ret; } @@ -3377,9 +3597,11 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_vsi *vsi = pf->main_vsi; + struct rte_eth_rxmode *rxmode; + rxmode = &dev->data->dev_conf.rxmode; if (mask & ETH_VLAN_FILTER_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) i40e_vsi_config_vlan_filter(vsi, TRUE); else i40e_vsi_config_vlan_filter(vsi, FALSE); @@ -3387,14 +3609,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) if (mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) i40e_vsi_config_vlan_stripping(vsi, TRUE); else i40e_vsi_config_vlan_stripping(vsi, FALSE); } if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) { i40e_vsi_config_double_vlan(vsi, TRUE); /* Set global registers with default ethertype. */ i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, @@ -3611,7 +3833,6 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW, pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] << I40E_KILOSHIFT); - i40e_global_cfg_warning(I40E_WARNING_FLOW_CTL); } else { PMD_DRV_LOG(ERR, "Water marker configuration is not supported."); @@ -3641,6 +3862,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev, struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_mac_filter_info mac_filter; struct i40e_vsi *vsi; + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; int ret; /* If VMDQ not enabled or configured, return */ @@ -3659,7 +3881,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev, } rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN); - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; else mac_filter.filter_type = RTE_MAC_PERFECT_MATCH; @@ -4010,8 +4232,8 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, return I40E_ERR_PARAM; snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand()); - mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0, - alignment, RTE_PGSIZE_2M); + mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M); if (!mz) return I40E_ERR_NO_MEMORY; @@ -4147,7 +4369,6 @@ i40e_get_cap(struct i40e_hw *hw) } #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4 -#define QUEUE_NUM_PER_VF_ARG "queue-num-per-vf" static int i40e_pf_parse_vf_queue_number_handler(const char *key, const char *value, @@ -4181,9 +4402,9 @@ static int i40e_pf_parse_vf_queue_number_handler(const char *key, static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev) { - static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, NULL}; struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct rte_kvargs *kvlist; + int kvargs_count; /* set default queue number per VF as 4 */ pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF; @@ -4195,12 +4416,18 @@ static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev) if (kvlist == NULL) return -(EINVAL); - if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1) + kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG); + if (!kvargs_count) { + rte_kvargs_free(kvlist); + return 0; + } + + if (kvargs_count > 1) PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only " "the first invalid or last valid one is used !", - QUEUE_NUM_PER_VF_ARG); + ETH_I40E_QUEUE_NUM_PER_VF_ARG); - rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG, + rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG, i40e_pf_parse_vf_queue_number_handler, pf); rte_kvargs_free(kvlist); @@ -5669,6 +5896,12 @@ i40e_pf_setup(struct i40e_pf *pf) PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret); return ret; } + + ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id); + if (ret) + PMD_INIT_LOG(WARNING, + "failed to allocate switch domain for device %d", ret); + if (pf->flags & I40E_FLAG_FDIR) { /* make queue allocated first, let FDIR use queue pair 0*/ ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR); @@ -7372,6 +7605,7 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf) struct i40e_aqc_replace_cloud_filters_cmd filter_replace; struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; enum i40e_status_code status = I40E_SUCCESS; if (pf->support_multi_driver) { @@ -7415,13 +7649,14 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf) status = i40e_aq_replace_cloud_filters(hw, &filter_replace, &filter_replace_buf); - if (!status) { - i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER); - PMD_DRV_LOG(DEBUG, "Global configuration modification: " - "cloud l1 type is changed from 0x%x to 0x%x", + if (!status && (filter_replace.old_filter_type != + filter_replace.new_filter_type)) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type." + " original: 0x%x, new: 0x%x", + dev->device->name, filter_replace.old_filter_type, filter_replace.new_filter_type); - } + return status; } @@ -7431,6 +7666,7 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf) struct i40e_aqc_replace_cloud_filters_cmd filter_replace; struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; enum i40e_status_code status = I40E_SUCCESS; if (pf->support_multi_driver) { @@ -7459,10 +7695,13 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf) &filter_replace_buf); if (status < 0) return status; - PMD_DRV_LOG(DEBUG, "Global configuration modification: " - "cloud filter type is changed from 0x%x to 0x%x", - filter_replace.old_filter_type, - filter_replace.new_filter_type); + if (filter_replace.old_filter_type != + filter_replace.new_filter_type) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type." + " original: 0x%x, new: 0x%x", + dev->device->name, + filter_replace.old_filter_type, + filter_replace.new_filter_type); /* For MPLSoGRE */ memset(&filter_replace, 0, @@ -7485,13 +7724,14 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf) status = i40e_aq_replace_cloud_filters(hw, &filter_replace, &filter_replace_buf); - if (!status) { - i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER); - PMD_DRV_LOG(DEBUG, "Global configuration modification: " - "cloud filter type is changed from 0x%x to 0x%x", + if (!status && (filter_replace.old_filter_type != + filter_replace.new_filter_type)) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type." + " original: 0x%x, new: 0x%x", + dev->device->name, filter_replace.old_filter_type, filter_replace.new_filter_type); - } + return status; } @@ -7501,6 +7741,7 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf) struct i40e_aqc_replace_cloud_filters_cmd filter_replace; struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; enum i40e_status_code status = I40E_SUCCESS; if (pf->support_multi_driver) { @@ -7536,10 +7777,13 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf) &filter_replace_buf); if (status < 0) return status; - PMD_DRV_LOG(DEBUG, "Global configuration modification: " - "cloud l1 type is changed from 0x%x to 0x%x", - filter_replace.old_filter_type, - filter_replace.new_filter_type); + if (filter_replace.old_filter_type != + filter_replace.new_filter_type) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type." + " original: 0x%x, new: 0x%x", + dev->device->name, + filter_replace.old_filter_type, + filter_replace.new_filter_type); /* for GTP-U */ memset(&filter_replace, 0, @@ -7568,13 +7812,14 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf) status = i40e_aq_replace_cloud_filters(hw, &filter_replace, &filter_replace_buf); - if (!status) { - i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER); - PMD_DRV_LOG(DEBUG, "Global configuration modification: " - "cloud l1 type is changed from 0x%x to 0x%x", + if (!status && (filter_replace.old_filter_type != + filter_replace.new_filter_type)) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type." + " original: 0x%x, new: 0x%x", + dev->device->name, filter_replace.old_filter_type, filter_replace.new_filter_type); - } + return status; } @@ -7584,6 +7829,7 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf) struct i40e_aqc_replace_cloud_filters_cmd filter_replace; struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; enum i40e_status_code status = I40E_SUCCESS; if (pf->support_multi_driver) { @@ -7611,10 +7857,13 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf) &filter_replace_buf); if (status < 0) return status; - PMD_DRV_LOG(DEBUG, "Global configuration modification: " - "cloud filter type is changed from 0x%x to 0x%x", - filter_replace.old_filter_type, - filter_replace.new_filter_type); + if (filter_replace.old_filter_type != + filter_replace.new_filter_type) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type." + " original: 0x%x, new: 0x%x", + dev->device->name, + filter_replace.old_filter_type, + filter_replace.new_filter_type); /* for GTP-U */ memset(&filter_replace, 0, @@ -7636,13 +7885,14 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf) status = i40e_aq_replace_cloud_filters(hw, &filter_replace, &filter_replace_buf); - if (!status) { - i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER); - PMD_DRV_LOG(DEBUG, "Global configuration modification: " - "cloud filter type is changed from 0x%x to 0x%x", + if (!status && (filter_replace.old_filter_type != + filter_replace.new_filter_type)) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type." + " original: 0x%x, new: 0x%x", + dev->device->name, filter_replace.old_filter_type, filter_replace.new_filter_type); - } + return status; } @@ -8194,14 +8444,14 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) } if (reg != val) { - ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2), + ret = i40e_aq_debug_write_global_register(hw, + I40E_GL_PRS_FVBM(2), reg, NULL); if (ret != 0) return ret; PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed " "with value 0x%08x", I40E_GL_PRS_FVBM(2), reg); - i40e_global_cfg_warning(I40E_WARNING_GRE_KEY_LEN); } else { ret = 0; } @@ -8467,7 +8717,6 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw, I40E_GLQF_HSYM(j), reg); } - i40e_global_cfg_warning(I40E_WARNING_HSYM); } } @@ -8493,7 +8742,6 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw, goto out; i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg); - i40e_global_cfg_warning(I40E_WARNING_QF_CTL); out: I40E_WRITE_FLUSH(hw); @@ -9086,12 +9334,17 @@ void i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) { uint32_t reg = i40e_read_rx_ctl(hw, addr); + struct rte_eth_dev *dev; - PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg); - if (reg != val) - i40e_write_global_rx_ctl(hw, addr, val); - PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr, - (uint32_t)i40e_read_rx_ctl(hw, addr)); + dev = ((struct i40e_adapter *)hw->back)->eth_dev; + if (reg != val) { + i40e_write_rx_ctl(hw, addr, val); + PMD_DRV_LOG(WARNING, + "i40e device %s changed global register [0x%08x]." + " original: 0x%08x, new: 0x%08x", + dev->device->name, addr, reg, + (uint32_t)i40e_read_rx_ctl(hw, addr)); + } } static void @@ -9165,12 +9418,6 @@ i40e_filter_input_set_init(struct i40e_pf *pf) pf->hash_input_set[pctype] = input_set; pf->fdir.input_set[pctype] = input_set; } - - if (!pf->support_multi_driver) { - i40e_global_cfg_warning(I40E_WARNING_HASH_INSET); - i40e_global_cfg_warning(I40E_WARNING_FD_MSK); - i40e_global_cfg_warning(I40E_WARNING_HASH_MSK); - } } int @@ -9236,7 +9483,6 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw, i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), (uint32_t)((inset_reg >> I40E_32_BIT_WIDTH) & UINT32_MAX)); - i40e_global_cfg_warning(I40E_WARNING_HASH_INSET); for (i = 0; i < num; i++) i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), @@ -9245,7 +9491,6 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw, for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), 0); - i40e_global_cfg_warning(I40E_WARNING_HASH_MSK); I40E_WRITE_FLUSH(hw); pf->hash_input_set[pctype] = input_set; @@ -9326,7 +9571,6 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf, i40e_check_write_global_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0); - i40e_global_cfg_warning(I40E_WARNING_FD_MSK); } else { PMD_DRV_LOG(ERR, "FDIR bit mask is not supported."); } @@ -9809,6 +10053,60 @@ i40e_pctype_to_flowtype(const struct i40e_adapter *adapter, #define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606 #define I40E_GL_SWR_PM_UP_THR 0x269FBC +/* + * GL_SWR_PM_UP_THR: + * The value is not impacted from the link speed, its value is set according + * to the total number of ports for a better pipe-monitor configuration. + */ +static bool +i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value) +{ +#define I40E_GL_SWR_PM_EF_DEVICE(dev) \ + .device_id = (dev), \ + .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE + +#define I40E_GL_SWR_PM_SF_DEVICE(dev) \ + .device_id = (dev), \ + .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE + + static const struct { + uint16_t device_id; + uint32_t val; + } swr_pm_table[] = { + { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) }, + { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) }, + { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) }, + { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) }, + + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) }, + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) }, + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) }, + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) }, + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) }, + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) }, + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) }, + }; + uint32_t i; + + if (value == NULL) { + PMD_DRV_LOG(ERR, "value is NULL"); + return false; + } + + for (i = 0; i < RTE_DIM(swr_pm_table); i++) { + if (hw->device_id == swr_pm_table[i].device_id) { + *value = swr_pm_table[i].val; + + PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR " + "value - 0x%08x", + hw->device_id, *value); + return true; + } + } + + return false; +} + static int i40e_dev_sync_phy_type(struct i40e_hw *hw) { @@ -9873,13 +10171,16 @@ i40e_configure_registers(struct i40e_hw *hw) } if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) { - if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */ - I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */ - reg_table[i].val = - I40E_GL_SWR_PM_UP_THR_SF_VALUE; - else /* For X710 */ - reg_table[i].val = - I40E_GL_SWR_PM_UP_THR_EF_VALUE; + uint32_t cfg_val; + + if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) { + PMD_DRV_LOG(DEBUG, "Device 0x%x skips " + "GL_SWR_PM_UP_THR value fixup", + hw->device_id); + continue; + } + + reg_table[i].val = cfg_val; } ret = i40e_aq_debug_read_register(hw, reg_table[i].addr, @@ -10321,9 +10622,8 @@ i40e_start_timecounters(struct rte_eth_dev *dev) uint32_t tsync_inc_h; /* Get current link speed. */ - memset(&link, 0, sizeof(link)); i40e_dev_link_update(dev, 1); - rte_i40e_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); switch (link.link_speed) { case ETH_SPEED_NUM_40G: @@ -11249,8 +11549,148 @@ static int i40e_get_eeprom(struct rte_eth_dev *dev, return 0; } -static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr) +static int i40e_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t sff8472_comp = 0; + uint32_t sff8472_swap = 0; + uint32_t sff8636_rev = 0; + i40e_status status; + uint32_t type = 0; + + /* Check if firmware supports reading module EEPROM. */ + if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) { + PMD_DRV_LOG(ERR, + "Module EEPROM memory read not supported. " + "Please update the NVM image.\n"); + return -EINVAL; + } + + status = i40e_update_link_info(hw); + if (status) + return -EIO; + + if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) { + PMD_DRV_LOG(ERR, + "Cannot read module EEPROM memory. " + "No module connected.\n"); + return -EINVAL; + } + + type = hw->phy.link_info.module_type[0]; + + switch (type) { + case I40E_MODULE_TYPE_SFP: + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + I40E_I2C_EEPROM_DEV_ADDR, + I40E_MODULE_SFF_8472_COMP, + &sff8472_comp, NULL); + if (status) + return -EIO; + + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + I40E_I2C_EEPROM_DEV_ADDR, + I40E_MODULE_SFF_8472_SWAP, + &sff8472_swap, NULL); + if (status) + return -EIO; + + /* Check if the module requires address swap to access + * the other EEPROM memory page. + */ + if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) { + PMD_DRV_LOG(WARNING, + "Module address swap to access " + "page 0xA2 is not supported.\n"); + modinfo->type = RTE_ETH_MODULE_SFF_8079; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; + } else if (sff8472_comp == 0x00) { + /* Module is not SFF-8472 compliant */ + modinfo->type = RTE_ETH_MODULE_SFF_8079; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; + } else { + modinfo->type = RTE_ETH_MODULE_SFF_8472; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; + } + break; + case I40E_MODULE_TYPE_QSFP_PLUS: + /* Read from memory page 0. */ + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + 0, + I40E_MODULE_REVISION_ADDR, + &sff8636_rev, NULL); + if (status) + return -EIO; + /* Determine revision compliance byte */ + if (sff8636_rev > 0x02) { + /* Module is SFF-8636 compliant */ + modinfo->type = RTE_ETH_MODULE_SFF_8636; + modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; + } else { + modinfo->type = RTE_ETH_MODULE_SFF_8436; + modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; + } + break; + case I40E_MODULE_TYPE_QSFP28: + modinfo->type = RTE_ETH_MODULE_SFF_8636; + modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; + break; + default: + PMD_DRV_LOG(ERR, "Module type unrecognized\n"); + return -EINVAL; + } + return 0; +} + +static int i40e_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + bool is_sfp = false; + i40e_status status; + uint8_t *data = info->data; + uint32_t value = 0; + uint32_t i; + + if (!info || !info->length || !data) + return -EINVAL; + + if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP) + is_sfp = true; + + for (i = 0; i < info->length; i++) { + u32 offset = i + info->offset; + u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0; + + /* Check if we need to access the other memory page */ + if (is_sfp) { + if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) { + offset -= RTE_ETH_MODULE_SFF_8079_LEN; + addr = I40E_I2C_EEPROM_DEV_ADDR2; + } + } else { + while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) { + /* Compute memory page number and offset. */ + offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2; + addr++; + } + } + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + addr, offset, &value, NULL); + if (status) + return -EIO; + data[i] = (uint8_t)value; + } + return 0; +} + +static int i40e_set_default_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *mac_addr) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -11261,7 +11701,7 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, if (!is_valid_assigned_ether_addr(mac_addr)) { PMD_DRV_LOG(ERR, "Tried to set invalid MAC address."); - return; + return -EINVAL; } TAILQ_FOREACH(f, &vsi->mac_list, next) { @@ -11271,25 +11711,31 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, if (f == NULL) { PMD_DRV_LOG(ERR, "Failed to find filter for default mac"); - return; + return -EIO; } mac_filter = f->mac_info; ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to delete mac filter"); - return; + return -EIO; } memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN); ret = i40e_vsi_add_mac(vsi, &mac_filter); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to add mac filter"); - return; + return -EIO; } memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN); - i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL, - mac_addr->addr_bytes, NULL); + ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL, + mac_addr->addr_bytes, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to change mac"); + return -EIO; + } + + return 0; } static int @@ -11312,9 +11758,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) } if (frame_size > ETHER_MAX_LEN) - dev_data->dev_conf.rxmode.jumbo_frame = 1; + dev_data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev_data->dev_conf.rxmode.jumbo_frame = 0; + dev_data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size; @@ -11413,7 +11861,7 @@ i40e_rss_filter_restore(struct i40e_pf *pf) { struct i40e_rte_flow_rss_conf *conf = &pf->rss_info; - if (conf->num) + if (conf->conf.queue_num) i40e_config_rss_filter(pf, conf, TRUE); } @@ -11456,7 +11904,8 @@ i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index) static int i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, uint32_t pkg_size, uint32_t proto_num, - struct rte_pmd_i40e_proto_info *proto) + struct rte_pmd_i40e_proto_info *proto, + enum rte_pmd_i40e_package_op op) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); uint32_t pctype_num; @@ -11469,6 +11918,12 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, uint32_t i, j, n; int ret; + if (op != RTE_PMD_I40E_PKG_OP_WR_ADD && + op != RTE_PMD_I40E_PKG_OP_WR_DEL) { + PMD_DRV_LOG(ERR, "Unsupported operation."); + return -1; + } + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, (uint8_t *)&pctype_num, sizeof(pctype_num), RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM); @@ -11531,8 +11986,13 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, i40e_find_customized_pctype(pf, I40E_CUSTOMIZED_GTPU); if (new_pctype) { - new_pctype->pctype = pctype_value; - new_pctype->valid = true; + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) { + new_pctype->pctype = pctype_value; + new_pctype->valid = true; + } else { + new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID; + new_pctype->valid = false; + } } } @@ -11542,8 +12002,9 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, static int i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, - uint32_t pkg_size, uint32_t proto_num, - struct rte_pmd_i40e_proto_info *proto) + uint32_t pkg_size, uint32_t proto_num, + struct rte_pmd_i40e_proto_info *proto, + enum rte_pmd_i40e_package_op op) { struct rte_pmd_i40e_ptype_mapping *ptype_mapping; uint16_t port_id = dev->data->port_id; @@ -11556,6 +12017,17 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, bool in_tunnel; int ret; + if (op != RTE_PMD_I40E_PKG_OP_WR_ADD && + op != RTE_PMD_I40E_PKG_OP_WR_DEL) { + PMD_DRV_LOG(ERR, "Unsupported operation."); + return -1; + } + + if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) { + rte_pmd_i40e_ptype_mapping_reset(port_id); + return 0; + } + /* get information about new ptype num */ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, (uint8_t *)&ptype_num, sizeof(ptype_num), @@ -11705,7 +12177,8 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, ptype_mapping[i].sw_ptype |= RTE_PTYPE_TUNNEL_GRENAT; in_tunnel = true; - } else if (!strncasecmp(name, "L2TPV2CTL", 9)) { + } else if (!strncasecmp(name, "L2TPV2CTL", 9) || + !strncasecmp(name, "L2TPV2", 6)) { ptype_mapping[i].sw_ptype |= RTE_PTYPE_TUNNEL_L2TP; in_tunnel = true; @@ -11728,7 +12201,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg, - uint32_t pkg_size) + uint32_t pkg_size, enum rte_pmd_i40e_package_op op) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); uint32_t proto_num; @@ -11737,6 +12210,12 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg, uint32_t i; int ret; + if (op != RTE_PMD_I40E_PKG_OP_WR_ADD && + op != RTE_PMD_I40E_PKG_OP_WR_DEL) { + PMD_DRV_LOG(ERR, "Unsupported operation."); + return; + } + /* get information about protocol number */ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, (uint8_t *)&proto_num, sizeof(proto_num), @@ -11770,20 +12249,23 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg, /* Check if GTP is supported. */ for (i = 0; i < proto_num; i++) { if (!strncmp(proto[i].name, "GTP", 3)) { - pf->gtp_support = true; + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) + pf->gtp_support = true; + else + pf->gtp_support = false; break; } } /* Update customized pctype info */ ret = i40e_update_customized_pctype(dev, pkg, pkg_size, - proto_num, proto); + proto_num, proto, op); if (ret) PMD_DRV_LOG(INFO, "No pctype is updated."); /* Update customized ptype info */ ret = i40e_update_customized_ptype(dev, pkg, pkg_size, - proto_num, proto); + proto_num, proto, op); if (ret) PMD_DRV_LOG(INFO, "No ptype is updated."); @@ -11840,6 +12322,7 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) struct i40e_aqc_replace_cloud_filters_cmd filter_replace; struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; if (pf->support_multi_driver) { PMD_DRV_LOG(ERR, "Replace cloud filter is not supported."); @@ -11876,10 +12359,14 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) &filter_replace_buf); if (ret != I40E_SUCCESS) return ret; - PMD_DRV_LOG(DEBUG, "Global configuration modification: " - "cloud l1 type is changed from 0x%x to 0x%x", - filter_replace.old_filter_type, - filter_replace.new_filter_type); + + if (filter_replace.old_filter_type != + filter_replace.new_filter_type) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type." + " original: 0x%x, new: 0x%x", + dev->device->name, + filter_replace.old_filter_type, + filter_replace.new_filter_type); /* Apply the second L2 cloud filter */ memset(&filter_replace, 0, @@ -11901,16 +12388,51 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; ret = i40e_aq_replace_cloud_filters(hw, &filter_replace, &filter_replace_buf); - if (!ret) { - i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER); - PMD_DRV_LOG(DEBUG, "Global configuration modification: " - "cloud filter type is changed from 0x%x to 0x%x", + if (!ret && (filter_replace.old_filter_type != + filter_replace.new_filter_type)) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type." + " original: 0x%x, new: 0x%x", + dev->device->name, filter_replace.old_filter_type, filter_replace.new_filter_type); - } + return ret; } +int +i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in) +{ + if (in->key_len > RTE_DIM(out->key) || + in->queue_num > RTE_DIM(out->queue)) + return -EINVAL; + out->conf = (struct rte_flow_action_rss){ + .func = in->func, + .level = in->level, + .types = in->types, + .key_len = in->key_len, + .queue_num = in->queue_num, + .key = memcpy(out->key, in->key, in->key_len), + .queue = memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num), + }; + return 0; +} + +int +i40e_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with) +{ + return (comp->func == with->func && + comp->level == with->level && + comp->types == with->types && + comp->key_len == with->key_len && + comp->queue_num == with->queue_num && + !memcmp(comp->key, with->key, with->key_len) && + !memcmp(comp->queue, with->queue, + sizeof(*with->queue) * with->queue_num)); +} + int i40e_config_rss_filter(struct i40e_pf *pf, struct i40e_rte_flow_rss_conf *conf, bool add) @@ -11918,12 +12440,16 @@ i40e_config_rss_filter(struct i40e_pf *pf, struct i40e_hw *hw = I40E_PF_TO_HW(pf); uint32_t i, lut = 0; uint16_t j, num; - struct rte_eth_rss_conf rss_conf = conf->rss_conf; + struct rte_eth_rss_conf rss_conf = { + .rss_key = conf->conf.key_len ? + (void *)(uintptr_t)conf->conf.key : NULL, + .rss_key_len = conf->conf.key_len, + .rss_hf = conf->conf.types, + }; struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; if (!add) { - if (memcmp(conf, rss_info, - sizeof(struct i40e_rte_flow_rss_conf)) == 0) { + if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) { i40e_pf_disable_rss(pf); memset(rss_info, 0, sizeof(struct i40e_rte_flow_rss_conf)); @@ -11932,7 +12458,7 @@ i40e_config_rss_filter(struct i40e_pf *pf, return -EINVAL; } - if (rss_info->num) + if (rss_info->conf.queue_num) return -EINVAL; /* If both VMDQ and RSS enabled, not all of PF queues are configured. @@ -11943,7 +12469,7 @@ i40e_config_rss_filter(struct i40e_pf *pf, else num = pf->dev_data->nb_rx_queues; - num = RTE_MIN(num, conf->num); + num = RTE_MIN(num, conf->conf.queue_num); PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured", num); @@ -11956,7 +12482,7 @@ i40e_config_rss_filter(struct i40e_pf *pf, for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) { if (j == num) j = 0; - lut = (lut << 8) | (conf->queue[j] & ((0x1 << + lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 << hw->func_caps.rss_table_entry_width) - 1)); if ((i & 3) == 3) I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut); @@ -11981,15 +12507,13 @@ i40e_config_rss_filter(struct i40e_pf *pf, i40e_hw_rss_hash_set(pf, &rss_conf); - rte_memcpy(rss_info, - conf, sizeof(struct i40e_rte_flow_rss_conf)); + if (i40e_rss_conf_init(rss_info, &conf->conf)) + return -EINVAL; return 0; } -RTE_INIT(i40e_init_log); -static void -i40e_init_log(void) +RTE_INIT(i40e_init_log) { i40e_logtype_init = rte_log_register("pmd.net.i40e.init"); if (i40e_logtype_init >= 0) @@ -12000,5 +12524,7 @@ i40e_init_log(void) } RTE_PMD_REGISTER_PARAM_STRING(net_i40e, - QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16" + ETH_I40E_FLOATING_VEB_ARG "=1" + ETH_I40E_FLOATING_VEB_LIST_ARG "=" + ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16" ETH_I40E_SUPPORT_MULTI_DRIVER "=1"); diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index 99efb670..3fffe5a5 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -5,12 +5,18 @@ #ifndef _I40E_ETHDEV_H_ #define _I40E_ETHDEV_H_ +#include + #include #include #include #include +#include #include #include +#include "rte_pmd_i40e.h" + +#include "base/i40e_register.h" #define I40E_VLAN_TAG_SIZE 4 @@ -22,6 +28,7 @@ #define I40E_NUM_DESC_ALIGN 32 #define I40E_BUF_SIZE_MIN 1024 #define I40E_FRAME_SIZE_MAX 9728 +#define I40E_TSO_FRAME_SIZE_MAX 262144 #define I40E_QUEUE_BASE_ADDR_UNIT 128 /* number of VSIs and queue default setting */ #define I40E_MAX_QP_NUM_PER_VF 16 @@ -80,11 +87,19 @@ #define I40E_WRITE_GLB_REG(hw, reg, value) \ do { \ + uint32_t ori_val; \ + struct rte_eth_dev *dev; \ + ori_val = I40E_READ_REG((hw), (reg)); \ + dev = ((struct i40e_adapter *)hw->back)->eth_dev; \ I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((hw), \ (reg)), (value)); \ - PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified " \ - "with value 0x%08x", \ - (reg), (value)); \ + if (ori_val != value) \ + PMD_DRV_LOG(WARNING, \ + "i40e device %s changed global " \ + "register [0x%08x]. original: 0x%08x, " \ + "new: 0x%08x ", \ + (dev->device->name), (reg), \ + (ori_val), (value)); \ } while (0) /* index flex payload per layer */ @@ -170,7 +185,7 @@ enum i40e_flxpld_layer_idx { #define I40E_ITR_INDEX_NONE 3 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ #define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */ -#define I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT 8160 /* 8160 us */ +#define I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ /* Special FW support this floating VEB feature */ #define FLOATING_VEB_SUPPORTED_FW_MAJ 5 #define FLOATING_VEB_SUPPORTED_FW_MIN 0 @@ -877,9 +892,11 @@ struct i40e_customized_pctype { }; struct i40e_rte_flow_rss_conf { - struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */ + struct rte_flow_action_rss conf; /**< RSS parameters. */ uint16_t queue_region_conf; /**< Queue region config flag */ - uint16_t num; /**< Number of entries in queue[]. */ + uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ? + I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t)]; /* Hash key. */ uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */ }; @@ -956,6 +973,8 @@ struct i40e_pf { bool gtp_support; /* 1 - support GTP-C and GTP-U */ /* customer customized pctype */ struct i40e_customized_pctype customized_pctype[I40E_CUSTOMIZED_MAX]; + /* Switch Domain Id */ + uint16_t switch_domain_id; }; enum pending_msg { @@ -1005,6 +1024,9 @@ struct i40e_vf { uint16_t promisc_flags; /* Promiscuous setting */ uint32_t vlan[I40E_VFTA_SIZE]; /* VLAN bit map */ + struct ether_addr mc_addrs[I40E_NUM_MACADDR_MAX]; /* Multicast addrs */ + uint16_t mc_addrs_num; /* Multicast mac addresses number */ + /* Event from pf */ bool dev_closed; bool link_up; @@ -1058,6 +1080,20 @@ struct i40e_adapter { uint64_t pctypes_mask; }; +/** + * Strucute to store private data for each VF representor instance + */ +struct i40e_vf_representor { + uint16_t switch_domain_id; + /**< Virtual Function ID */ + uint16_t vf_id; + /**< Virtual Function ID */ + struct i40e_adapter *adapter; + /**< Private data store of assocaiated physical function */ + struct i40e_eth_stats stats_offset; + /**< Zero-point of VF statistics*/ +}; + extern const struct rte_flow_ops i40e_flow_ops; union i40e_filter_t { @@ -1079,22 +1115,6 @@ struct i40e_valid_pattern { parse_filter_t parse_filter; }; -enum I40E_WARNING_IDX { - I40E_WARNING_DIS_FLX_PLD, - I40E_WARNING_ENA_FLX_PLD, - I40E_WARNING_QINQ_PARSER, - I40E_WARNING_QINQ_CLOUD_FILTER, - I40E_WARNING_TPID, - I40E_WARNING_FLOW_CTL, - I40E_WARNING_GRE_KEY_LEN, - I40E_WARNING_QF_CTL, - I40E_WARNING_HASH_INSET, - I40E_WARNING_HSYM, - I40E_WARNING_HASH_MSK, - I40E_WARNING_FD_MSK, - I40E_WARNING_RPL_CLD_FILTER, -}; - int i40e_dev_switch_queues(struct i40e_pf *pf, bool on); int i40e_vsi_release(struct i40e_vsi *vsi); struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, @@ -1206,7 +1226,8 @@ void i40e_tm_conf_uninit(struct rte_eth_dev *dev); struct i40e_customized_pctype* i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index); void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg, - uint32_t pkg_size); + uint32_t pkg_size, + enum rte_pmd_i40e_package_op op); int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb); int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev, struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on); @@ -1214,8 +1235,14 @@ void i40e_init_queue_region_conf(struct rte_eth_dev *dev); void i40e_flex_payload_reg_set_default(struct i40e_hw *hw); int i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len); int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size); +int i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in); +int i40e_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with); int i40e_config_rss_filter(struct i40e_pf *pf, struct i40e_rte_flow_rss_conf *conf, bool add); +int i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params); +int i40e_vf_representor_uninit(struct rte_eth_dev *ethdev); #define I40E_DEV_TO_PCI(eth_dev) \ RTE_DEV_TO_PCI((eth_dev)->device) @@ -1292,50 +1319,23 @@ i40e_align_floor(int n) } static inline uint16_t -i40e_calc_itr_interval(int16_t interval, bool is_pf, bool is_multi_drv) +i40e_calc_itr_interval(bool is_pf, bool is_multi_drv) { - if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) { - if (is_multi_drv) { - interval = I40E_QUEUE_ITR_INTERVAL_MAX; - } else { - if (is_pf) - interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; - else - interval = I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT; - } + uint16_t interval = 0; + + if (is_multi_drv) { + interval = I40E_QUEUE_ITR_INTERVAL_MAX; + } else { + if (is_pf) + interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; + else + interval = I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT; } /* Convert to hardware count, as writing each 1 represents 2 us */ return interval / 2; } -static inline void -i40e_global_cfg_warning(enum I40E_WARNING_IDX idx) -{ - const char *warning; - static const char *const warning_list[] = { - [I40E_WARNING_DIS_FLX_PLD] = "disable FDIR flexible payload", - [I40E_WARNING_ENA_FLX_PLD] = "enable FDIR flexible payload", - [I40E_WARNING_QINQ_PARSER] = "support QinQ parser", - [I40E_WARNING_QINQ_CLOUD_FILTER] = "support QinQ cloud filter", - [I40E_WARNING_TPID] = "support TPID configuration", - [I40E_WARNING_FLOW_CTL] = "configure water marker", - [I40E_WARNING_GRE_KEY_LEN] = "support GRE key length setting", - [I40E_WARNING_QF_CTL] = "support hash function setting", - [I40E_WARNING_HASH_INSET] = "configure hash input set", - [I40E_WARNING_HSYM] = "set symmetric hash", - [I40E_WARNING_HASH_MSK] = "configure hash mask", - [I40E_WARNING_FD_MSK] = "configure fdir mask", - [I40E_WARNING_RPL_CLD_FILTER] = "replace cloud filter", - }; - - warning = warning_list[idx]; - - RTE_LOG(WARNING, PMD, - "Global register is changed during %s\n", - warning); -} - #define I40E_VALID_FLOW(flow_type) \ ((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \ diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index fd003fe0..001c301b 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -44,6 +44,8 @@ #define I40EVF_BUSY_WAIT_COUNT 50 #define MAX_RESET_WAIT_CNT 20 +#define I40EVF_ALARM_INTERVAL 50000 /* us */ + struct i40evf_arq_msg_info { enum virtchnl_ops ops; enum i40e_status_code result; @@ -120,7 +122,7 @@ static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev, static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); -static void i40evf_set_default_mac_addr(struct rte_eth_dev *dev, +static int i40evf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); static int i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); @@ -130,6 +132,14 @@ static void i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg, uint16_t msglen); +static int +i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev, + struct ether_addr *mc_addr_set, + uint32_t nb_mc_addr, bool add); +static int +i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, + uint32_t nb_mc_addr); + /* Default hash key buffer for RSS */ static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1]; @@ -195,6 +205,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = { .txq_info_get = i40e_txq_info_get, .mac_addr_add = i40evf_add_mac_addr, .mac_addr_remove = i40evf_del_mac_addr, + .set_mc_addr_list = i40evf_set_mc_addr_list, .reta_update = i40evf_dev_rss_reta_update, .reta_query = i40evf_dev_rss_reta_query, .rss_hash_update = i40evf_dev_rss_hash_update, @@ -1036,20 +1047,6 @@ static const struct rte_pci_id pci_id_i40evf_map[] = { { .vendor_id = 0, /* sentinel */ }, }; -static inline int -i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - /* Disable IRQ0 */ static inline void i40evf_disable_irq0(struct i40e_hw *hw) @@ -1138,7 +1135,7 @@ i40evf_init_vf(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); uint16_t interval = - i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0, 0); + i40e_calc_itr_interval(0, 0); vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); vf->dev_data = dev->data; @@ -1375,7 +1372,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev) * void */ static void -i40evf_dev_interrupt_handler(void *param) +i40evf_dev_alarm_handler(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1404,6 +1401,8 @@ i40evf_dev_interrupt_handler(void *param) done: i40evf_enable_irq0(hw); + rte_eal_alarm_set(I40EVF_ALARM_INTERVAL, + i40evf_dev_alarm_handler, dev); } static int @@ -1447,12 +1446,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) return -1; } - /* register callback func to eal lib */ - rte_intr_callback_register(&pci_dev->intr_handle, - i40evf_dev_interrupt_handler, (void *)eth_dev); - - /* enable uio intr after callback register */ - rte_intr_enable(&pci_dev->intr_handle); + rte_eal_alarm_set(I40EVF_ALARM_INTERVAL, + i40evf_dev_alarm_handler, eth_dev); /* configure and enable device interrupt */ i40evf_enable_irq0(hw); @@ -1541,7 +1536,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev) /* For non-DPDK PF drivers, VF has no ability to disable HW * CRC strip, and is implicitly enabled by the PF. */ - if (!conf->rxmode.hw_strip_crc) { + if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) { vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) && (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) { @@ -1575,7 +1570,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask) /* Vlan stripping setting */ if (mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ - if (dev_conf->rxmode.hw_vlan_strip) + if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) i40evf_enable_vlan_strip(dev); else i40evf_disable_vlan_strip(dev); @@ -1588,37 +1583,35 @@ static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct i40e_rx_queue *rxq; - int err = 0; + int err; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); - if (rx_queue_id < dev->data->nb_rx_queues) { - rxq = dev->data->rx_queues[rx_queue_id]; - - err = i40e_alloc_rx_queue_mbufs(rxq); - if (err) { - PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); - return err; - } + rxq = dev->data->rx_queues[rx_queue_id]; - rte_wmb(); + err = i40e_alloc_rx_queue_mbufs(rxq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); + return err; + } - /* Init the RX tail register. */ - I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); - I40EVF_WRITE_FLUSH(hw); + rte_wmb(); - /* Ready to switch the queue on */ - err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE); + /* Init the RX tail register. */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + I40EVF_WRITE_FLUSH(hw); - if (err) - PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", - rx_queue_id); - else - dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + /* Ready to switch the queue on */ + err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", + rx_queue_id); + return err; } + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - return err; + return 0; } static int @@ -1627,45 +1620,39 @@ i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct i40e_rx_queue *rxq; int err; - if (rx_queue_id < dev->data->nb_rx_queues) { - rxq = dev->data->rx_queues[rx_queue_id]; - - err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE); + rxq = dev->data->rx_queues[rx_queue_id]; - if (err) { - PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", - rx_queue_id); - return err; - } - - i40e_rx_queue_release_mbufs(rxq); - i40e_reset_rx_queue(rxq); - dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", + rx_queue_id); + return err; } + i40e_rx_queue_release_mbufs(rxq); + i40e_reset_rx_queue(rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) { - int err = 0; + int err; PMD_INIT_FUNC_TRACE(); - if (tx_queue_id < dev->data->nb_tx_queues) { - - /* Ready to switch the queue on */ - err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE); - - if (err) - PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", - tx_queue_id); - else - dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + /* Ready to switch the queue on */ + err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", + tx_queue_id); + return err; } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - return err; + return 0; } static int @@ -1674,22 +1661,19 @@ i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) struct i40e_tx_queue *txq; int err; - if (tx_queue_id < dev->data->nb_tx_queues) { - txq = dev->data->tx_queues[tx_queue_id]; - - err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE); + txq = dev->data->tx_queues[tx_queue_id]; - if (err) { - PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", - tx_queue_id); - return err; - } - - i40e_tx_queue_release_mbufs(txq); - i40e_reset_tx_queue(txq); - dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", + tx_queue_id); + return err; } + i40e_tx_queue_release_mbufs(txq); + i40e_reset_tx_queue(txq); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } @@ -1732,7 +1716,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq) /** * Check if the jumbo frame and maximum packet length are set correctly */ - if (dev_data->dev_conf.rxmode.jumbo_frame == 1) { + if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (rxq->max_pkt_len <= ETHER_MAX_LEN || rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) { PMD_DRV_LOG(ERR, "maximum packet length must be " @@ -1752,7 +1736,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq) } } - if (dev_data->dev_conf.rxmode.enable_scatter || + if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) || (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) { dev_data->scattered_rx = 1; } @@ -1841,7 +1825,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t interval = - i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0, 0); + i40e_calc_itr_interval(0, 0); uint16_t msix_intr; msix_intr = intr_handle->intr_vec[queue_id]; @@ -1864,8 +1848,6 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) I40EVF_WRITE_FLUSH(hw); - rte_intr_enable(&pci_dev->intr_handle); - return 0; } @@ -2012,23 +1994,18 @@ i40evf_dev_start(struct rte_eth_dev *dev) /* Set all mac addrs */ i40evf_add_del_all_mac_addr(dev, TRUE); + /* Set all multicast addresses */ + i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num, + TRUE); if (i40evf_start_queues(dev) != 0) { PMD_DRV_LOG(ERR, "enable queues failed"); goto err_mac; } - /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt - * is mapped to VFIO vector 0 in i40evf_dev_init( ). - * If previous VFIO interrupt mapping set in i40evf_dev_init( ) is - * not cleared, it will fail when rte_intr_enable( ) tries to map Rx - * queue interrupt to other VFIO vectors. - * So clear uio/vfio intr/evevnfd first to avoid failure. - */ - if (dev->data->dev_conf.intr_conf.rxq != 0) { - rte_intr_disable(intr_handle); + /* only enable interrupt in rx interrupt mode */ + if (dev->data->dev_conf.intr_conf.rxq != 0) rte_intr_enable(intr_handle); - } i40evf_enable_queues_intr(dev); @@ -2036,6 +2013,8 @@ i40evf_dev_start(struct rte_eth_dev *dev) err_mac: i40evf_add_del_all_mac_addr(dev, FALSE); + i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num, + FALSE); err_queue: return -1; } @@ -2046,9 +2025,13 @@ i40evf_dev_stop(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); + if (dev->data->dev_conf.intr_conf.rxq != 0) + rte_intr_disable(intr_handle); + if (hw->adapter_stopped == 1) return; i40evf_stop_queues(dev); @@ -2063,6 +2046,9 @@ i40evf_dev_stop(struct rte_eth_dev *dev) } /* remove all mac addrs */ i40evf_add_del_all_mac_addr(dev, FALSE); + /* remove all multicast addresses */ + i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num, + FALSE); hw->adapter_stopped = 1; } @@ -2078,6 +2064,7 @@ i40evf_dev_link_update(struct rte_eth_dev *dev, * while Linux driver does not */ + memset(&new_link, 0, sizeof(new_link)); /* Linux driver PF host */ switch (vf->link_speed) { case I40E_LINK_SPEED_100MB: @@ -2107,11 +2094,9 @@ i40evf_dev_link_update(struct rte_eth_dev *dev, new_link.link_status = vf->link_up ? ETH_LINK_UP : ETH_LINK_DOWN; new_link.link_autoneg = - dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED; - - i40evf_dev_atomic_write_link_status(dev, &new_link); + !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); - return 0; + return rte_eth_linkstatus_set(dev, &new_link); } static void @@ -2179,8 +2164,6 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - memset(dev_info, 0, sizeof(*dev_info)); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs; dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs; dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN; @@ -2189,6 +2172,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->reta_size = ETH_RSS_RETA_SIZE_64; dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask; dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX; + dev_info->rx_queue_offload_capa = 0; dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_QINQ_STRIP | @@ -2196,7 +2180,13 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_CRC_STRIP; + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_VLAN_FILTER; + + dev_info->tx_queue_offload_capa = 0; dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT | @@ -2209,7 +2199,8 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_IPIP_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO; + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -2219,6 +2210,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -2229,8 +2221,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + .offloads = 0, }; dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { @@ -2276,19 +2267,20 @@ static void i40evf_dev_close(struct rte_eth_dev *dev) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev); i40evf_dev_stop(dev); i40e_dev_free_queues(dev); + /* + * disable promiscuous mode before reset vf + * it is a workaround solution when work with kernel driver + * and it is not the normal way + */ + i40evf_dev_promiscuous_disable(dev); + i40evf_dev_allmulticast_disable(dev); + i40evf_reset_vf(hw); i40e_shutdown_adminq(hw); - /* disable uio intr before callback unregister */ - rte_intr_disable(intr_handle); - - /* unregister callback func from eal lib */ - rte_intr_callback_unregister(intr_handle, - i40evf_dev_interrupt_handler, dev); i40evf_disable_irq0(hw); } @@ -2649,16 +2641,17 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) } if (frame_size > ETHER_MAX_LEN) - dev_data->dev_conf.rxmode.jumbo_frame = 1; + dev_data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev_data->dev_conf.rxmode.jumbo_frame = 0; - + dev_data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size; return ret; } -static void +static int i40evf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { @@ -2667,15 +2660,99 @@ i40evf_set_default_mac_addr(struct rte_eth_dev *dev, if (!is_valid_assigned_ether_addr(mac_addr)) { PMD_DRV_LOG(ERR, "Tried to set invalid MAC address."); - return; + return -EINVAL; } if (vf->flags & I40E_FLAG_VF_MAC_BY_PF) - return; + return -EPERM; i40evf_del_mac_addr_by_addr(dev, (struct ether_addr *)hw->mac.addr); - i40evf_add_mac_addr(dev, mac_addr, 0, 0); + if (i40evf_add_mac_addr(dev, mac_addr, 0, 0) != 0) + return -EIO; ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr); + return 0; +} + +static int +i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev, + struct ether_addr *mc_addrs, + uint32_t mc_addrs_num, bool add) +{ + struct virtchnl_ether_addr_list *list; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + + (I40E_NUM_MACADDR_MAX * sizeof(struct virtchnl_ether_addr))]; + uint32_t i; + int err; + struct vf_cmd_info args; + + if (mc_addrs == NULL || mc_addrs_num == 0) + return 0; + + if (mc_addrs_num > I40E_NUM_MACADDR_MAX) + return -EINVAL; + + list = (struct virtchnl_ether_addr_list *)cmd_buffer; + list->vsi_id = vf->vsi_res->vsi_id; + list->num_elements = mc_addrs_num; + + for (i = 0; i < mc_addrs_num; i++) { + if (!I40E_IS_MULTICAST(mc_addrs[i].addr_bytes)) { + PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x", + mc_addrs[i].addr_bytes[0], + mc_addrs[i].addr_bytes[1], + mc_addrs[i].addr_bytes[2], + mc_addrs[i].addr_bytes[3], + mc_addrs[i].addr_bytes[4], + mc_addrs[i].addr_bytes[5]); + return -EINVAL; + } + + memcpy(list->list[i].addr, mc_addrs[i].addr_bytes, + sizeof(list->list[i].addr)); + } + + args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : VIRTCHNL_OP_DEL_ETH_ADDR; + args.in_args = cmd_buffer; + args.in_args_size = sizeof(struct virtchnl_ether_addr_list) + + i * sizeof(struct virtchnl_ether_addr); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR"); + return err; + } + + return 0; +} + +static int +i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addrs, + uint32_t mc_addrs_num) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int err; + + /* flush previous addresses */ + err = i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num, + FALSE); + if (err) + return err; + + vf->mc_addrs_num = 0; + + /* add new ones */ + err = i40evf_add_del_mc_addr_list(dev, mc_addrs, mc_addrs_num, + TRUE); + if (err) + return err; + + vf->mc_addrs_num = mc_addrs_num; + memcpy(vf->mc_addrs, mc_addrs, mc_addrs_num * sizeof(*mc_addrs)); + + return 0; } diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c index b83a0cff..d41601a1 100644 --- a/drivers/net/i40e/i40e_fdir.c +++ b/drivers/net/i40e/i40e_fdir.c @@ -525,8 +525,7 @@ i40e_set_flx_pld_cfg(struct i40e_pf *pf, flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) | (num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) | (layer_idx * I40E_MAX_FLXPLD_FIED); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort); - i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort); } for (i = 0; i < num; i++) { diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index 16c47cf7..c67b264d 100644 --- a/drivers/net/i40e/i40e_flow.c +++ b/drivers/net/i40e/i40e_flow.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -53,6 +54,7 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev, struct rte_flow_error *error, struct rte_eth_ethertype_filter *filter); static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, struct rte_flow_error *error, struct i40e_fdir_filter_conf *filter); @@ -1939,7 +1941,8 @@ static uint16_t i40e_get_outer_vlan(struct rte_eth_dev *dev) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend; + int qinq = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND; uint64_t reg_r = 0; uint16_t reg_id; uint16_t tpid; @@ -2259,8 +2262,7 @@ i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf, flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) | (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) | (layer_idx * I40E_MAX_FLXPLD_FIED); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort); - i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort); } /* Set flex pit */ @@ -2400,7 +2402,7 @@ i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf, break; } - if (cus_pctype) + if (cus_pctype && cus_pctype->valid) return cus_pctype->pctype; return I40E_FILTER_PCTYPE_INVALID; @@ -2419,6 +2421,7 @@ i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf, */ static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, struct rte_flow_error *error, struct i40e_fdir_filter_conf *filter) @@ -2490,16 +2493,22 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, "Invalid MAC_addr mask."); return -rte_errno; } + } + if (eth_spec && eth_mask && eth_mask->type) { + enum rte_flow_item_type next = (item + 1)->type; - if ((eth_mask->type & UINT16_MAX) == - UINT16_MAX) { - input_set |= I40E_INSET_LAST_ETHER_TYPE; - filter->input.flow.l2_flow.ether_type = - eth_spec->type; + if (eth_mask->type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid type mask."); + return -rte_errno; } ether_type = rte_be_to_cpu_16(eth_spec->type); - if (ether_type == ETHER_TYPE_IPv4 || + + if (next == RTE_FLOW_ITEM_TYPE_VLAN || + ether_type == ETHER_TYPE_IPv4 || ether_type == ETHER_TYPE_IPv6 || ether_type == ETHER_TYPE_ARP || ether_type == outer_tpid) { @@ -2509,6 +2518,9 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, "Unsupported ether_type."); return -rte_errno; } + input_set |= I40E_INSET_LAST_ETHER_TYPE; + filter->input.flow.l2_flow.ether_type = + eth_spec->type; } pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD; @@ -2518,6 +2530,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_VLAN: vlan_spec = item->spec; vlan_mask = item->mask; + + RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE)); if (vlan_spec && vlan_mask) { if (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) { @@ -2526,6 +2540,33 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, vlan_spec->tci; } } + if (vlan_spec && vlan_mask && vlan_mask->inner_type) { + if (vlan_mask->inner_type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid inner_type" + " mask."); + return -rte_errno; + } + + ether_type = + rte_be_to_cpu_16(vlan_spec->inner_type); + + if (ether_type == ETHER_TYPE_IPv4 || + ether_type == ETHER_TYPE_IPv6 || + ether_type == ETHER_TYPE_ARP || + ether_type == outer_tpid) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported inner_type."); + return -rte_errno; + } + input_set |= I40E_INSET_LAST_ETHER_TYPE; + filter->input.flow.l2_flow.ether_type = + vlan_spec->inner_type; + } pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD; layer_idx = I40E_FLXPLD_L2_IDX; @@ -2918,6 +2959,16 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_VF: vf_spec = item->spec; + if (!attr->transfer) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Matching VF traffic" + " without affecting it" + " (transfer attribute)" + " is unsupported"); + return -rte_errno; + } filter->input.flow_ext.is_vf = 1; filter->input.flow_ext.dst_id = vf_spec->id; if (filter->input.flow_ext.is_vf && @@ -3080,7 +3131,8 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev, &filter->fdir_filter; int ret; - ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter); + ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error, + fdir_filter); if (ret) return ret; @@ -3284,7 +3336,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_VLAN: vlan_spec = item->spec; vlan_mask = item->mask; - if (!(vlan_spec && vlan_mask)) { + if (!(vlan_spec && vlan_mask) || + vlan_mask->inner_type) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -3514,7 +3567,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_VLAN: vlan_spec = item->spec; vlan_mask = item->mask; - if (!(vlan_spec && vlan_mask)) { + if (!(vlan_spec && vlan_mask) || + vlan_mask->inner_type) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -4022,7 +4076,8 @@ i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev, vlan_spec = item->spec; vlan_mask = item->mask; - if (!(vlan_spec && vlan_mask)) { + if (!(vlan_spec && vlan_mask) || + vlan_mask->inner_type) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -4150,7 +4205,8 @@ i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev, if (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) { info->region[0].user_priority[0] = - (vlan_spec->tci >> 13) & 0x7; + (rte_be_to_cpu_16( + vlan_spec->tci) >> 13) & 0x7; info->region[0].user_priority_num = 1; info->queue_region_number = 1; *action_flag = 0; @@ -4169,6 +4225,19 @@ i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev, return 0; } +/** + * This function is used to parse rss queue index, total queue number and + * hash functions, If the purpose of this configuration is for queue region + * configuration, it will set queue_region_conf flag to TRUE, else to FALSE. + * In queue region configuration, it also need to parse hardware flowtype + * and user_priority from configuration, it will also cheeck the validity + * of these parameters. For example, The queue region sizes should + * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the + * hw_flowtype or PCTYPE max index should be 63, the user priority + * max index should be 7, and so on. And also, queue index should be + * continuous sequence and queue region index should be part of rss + * queue index for this port. + */ static int i40e_flow_parse_rss_action(struct rte_eth_dev *dev, const struct rte_flow_action *actions, @@ -4205,7 +4274,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev, if (action_flag) { for (n = 0; n < 64; n++) { - if (rss->rss_conf->rss_hf & (hf_bit << n)) { + if (rss->types & (hf_bit << n)) { conf_info->region[0].hw_flowtype[0] = n; conf_info->region[0].flowtype_num = 1; conf_info->queue_region_number = 1; @@ -4214,41 +4283,73 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev, } } + /** + * Do some queue region related parameters check + * in order to keep queue index for queue region to be + * continuous sequence and also to be part of RSS + * queue index for this port. + */ + if (conf_info->queue_region_number) { + for (i = 0; i < rss->queue_num; i++) { + for (j = 0; j < rss_info->conf.queue_num; j++) { + if (rss->queue[i] == rss_info->conf.queue[j]) + break; + } + if (j == rss_info->conf.queue_num) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "no valid queues"); + return -rte_errno; + } + } + + for (i = 0; i < rss->queue_num - 1; i++) { + if (rss->queue[i + 1] != rss->queue[i] + 1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "no valid queues"); + return -rte_errno; + } + } + } + + /* Parse queue region related parameters from configuration */ for (n = 0; n < conf_info->queue_region_number; n++) { if (conf_info->region[n].user_priority_num || conf_info->region[n].flowtype_num) { - if (!((rte_is_power_of_2(rss->num)) && - rss->num <= 64)) { - PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the " - "total number of queues do not exceed the VSI allocation"); + if (!((rte_is_power_of_2(rss->queue_num)) && + rss->queue_num <= 64)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the " + "total number of queues do not exceed the VSI allocation"); return -rte_errno; } if (conf_info->region[n].user_priority[n] >= I40E_MAX_USER_PRIORITY) { - PMD_DRV_LOG(ERR, "the user priority max index is 7"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "the user priority max index is 7"); return -rte_errno; } if (conf_info->region[n].hw_flowtype[n] >= I40E_FILTER_PCTYPE_MAX) { - PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63"); - return -rte_errno; - } - - if (rss_info->num < rss->num || - rss_info->queue[0] < rss->queue[0] || - (rss->queue[0] + rss->num > - rss_info->num + rss_info->queue[0])) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, - "no valid queues"); + "the hw_flowtype or PCTYPE max index is 63"); return -rte_errno; } for (i = 0; i < info->queue_region_number; i++) { - if (info->region[i].queue_num == rss->num && + if (info->region[i].queue_num == + rss->queue_num && info->region[i].queue_start_index == rss->queue[0]) break; @@ -4256,12 +4357,15 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev, if (i == info->queue_region_number) { if (i > I40E_REGION_MAX_INDEX) { - PMD_DRV_LOG(ERR, "the queue region max index is 7"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "the queue region max index is 7"); return -rte_errno; } info->region[i].queue_num = - rss->num; + rss->queue_num; info->region[i].queue_start_index = rss->queue[0]; info->region[i].region_id = @@ -4301,10 +4405,13 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev, rss_config->queue_region_conf = TRUE; } + /** + * Return function if this flow is used for queue region configuration + */ if (rss_config->queue_region_conf) return 0; - if (!rss || !rss->num) { + if (!rss || !rss->queue_num) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, @@ -4312,7 +4419,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev, return -rte_errno; } - for (n = 0; n < rss->num; n++) { + for (n = 0; n < rss->queue_num; n++) { if (rss->queue[n] >= dev->data->nb_rx_queues) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -4321,15 +4428,29 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev, return -rte_errno; } } - if (rss->rss_conf) - rss_config->rss_conf = *rss->rss_conf; - else - rss_config->rss_conf.rss_hf = - pf->adapter->flow_types_mask; - for (n = 0; n < rss->num; ++n) - rss_config->queue[n] = rss->queue[n]; - rss_config->num = rss->num; + /* Parse RSS related parameters from configuration */ + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key too large"); + if (rss->queue_num > RTE_DIM(rss_config->queue)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + if (i40e_rss_conf_init(rss_config, rss)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS context initialization failure"); + index++; /* check if the next not void action is END */ @@ -4385,14 +4506,15 @@ i40e_config_rss_filter_set(struct rte_eth_dev *dev, { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; if (conf->queue_region_conf) { - i40e_flush_queue_region_all_conf(dev, hw, pf, 1); + ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1); conf->queue_region_conf = 0; } else { - i40e_config_rss_filter(pf, conf, 1); + ret = i40e_config_rss_filter(pf, conf, 1); } - return 0; + return ret; } static int @@ -4545,6 +4667,8 @@ i40e_flow_create(struct rte_eth_dev *dev, case RTE_ETH_FILTER_HASH: ret = i40e_config_rss_filter_set(dev, &cons_filter.rss_conf); + if (ret) + goto free_flow; flow->rule = &pf->rss_info; break; default: @@ -4846,7 +4970,7 @@ i40e_flow_flush_rss_filter(struct rte_eth_dev *dev) ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0); - if (rss_info->num) + if (rss_info->conf.queue_num) ret = i40e_config_rss_filter(pf, rss_info, FALSE); return ret; } diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 1217e5a6..2a28ee34 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -40,9 +40,6 @@ /* Base address of the HW descriptor ring should be 128B aligned. */ #define I40E_RING_BASE_ALIGN 128 -#define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ - ETH_TXQ_FLAGS_NOOFFLOADS) - #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) #ifdef RTE_LIBRTE_IEEE1588 @@ -1240,7 +1237,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq) for (i = 0; i < txq->tx_rs_thresh; i++) rte_prefetch0((txep + i)->mbuf); - if (txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) { + if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) { for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { rte_mempool_put(txep->mbuf->pool, txep->mbuf); txep->mbuf = NULL; @@ -1442,13 +1439,15 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, /* Check for m->nb_segs to not exceed the limits. */ if (!(ol_flags & PKT_TX_TCP_SEG)) { - if (m->nb_segs > I40E_TX_MAX_SEG || - m->nb_segs > I40E_TX_MAX_MTU_SEG) { + if (m->nb_segs > I40E_TX_MAX_MTU_SEG || + m->pkt_len > I40E_FRAME_SIZE_MAX) { rte_errno = -EINVAL; return i; } - } else if ((m->tso_segsz < I40E_MIN_TSO_MSS) || - (m->tso_segsz > I40E_MAX_TSO_MSS)) { + } else if (m->nb_segs > I40E_TX_MAX_SEG || + m->tso_segsz < I40E_MIN_TSO_MSS || + m->tso_segsz > I40E_MAX_TSO_MSS || + m->pkt_len > I40E_TSO_FRAME_SIZE_MAX) { /* MSS outside the range (256B - 9674B) are considered * malicious */ @@ -1461,6 +1460,12 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, return i; } + /* check the size of packet */ + if (m->pkt_len < I40E_TX_MIN_PKT_LEN) { + rte_errno = -EINVAL; + return i; + } + #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { @@ -1527,38 +1532,36 @@ int i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct i40e_rx_queue *rxq; - int err = -1; + int err; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); - if (rx_queue_id < dev->data->nb_rx_queues) { - rxq = dev->data->rx_queues[rx_queue_id]; - - err = i40e_alloc_rx_queue_mbufs(rxq); - if (err) { - PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); - return err; - } + rxq = dev->data->rx_queues[rx_queue_id]; - rte_wmb(); + err = i40e_alloc_rx_queue_mbufs(rxq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); + return err; + } - /* Init the RX tail regieter. */ - I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + rte_wmb(); - err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE); + /* Init the RX tail regieter. */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); - if (err) { - PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", - rx_queue_id); + err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", + rx_queue_id); - i40e_rx_queue_release_mbufs(rxq); - i40e_reset_rx_queue(rxq); - } else - dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + i40e_rx_queue_release_mbufs(rxq); + i40e_reset_rx_queue(rxq); + return err; } + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - return err; + return 0; } int @@ -1568,24 +1571,21 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) int err; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (rx_queue_id < dev->data->nb_rx_queues) { - rxq = dev->data->rx_queues[rx_queue_id]; + rxq = dev->data->rx_queues[rx_queue_id]; - /* - * rx_queue_id is queue id application refers to, while - * rxq->reg_idx is the real queue index. - */ - err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE); - - if (err) { - PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", - rx_queue_id); - return err; - } - i40e_rx_queue_release_mbufs(rxq); - i40e_reset_rx_queue(rxq); - dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + /* + * rx_queue_id is queue id application refers to, while + * rxq->reg_idx is the real queue index. + */ + err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", + rx_queue_id); + return err; } + i40e_rx_queue_release_mbufs(rxq); + i40e_reset_rx_queue(rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } @@ -1593,28 +1593,27 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) int i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) { - int err = -1; + int err; struct i40e_tx_queue *txq; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); - if (tx_queue_id < dev->data->nb_tx_queues) { - txq = dev->data->tx_queues[tx_queue_id]; + txq = dev->data->tx_queues[tx_queue_id]; - /* - * tx_queue_id is queue id application refers to, while - * rxq->reg_idx is the real queue index. - */ - err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE); - if (err) - PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", - tx_queue_id); - else - dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + /* + * tx_queue_id is queue id application refers to, while + * rxq->reg_idx is the real queue index. + */ + err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", + tx_queue_id); + return err; } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - return err; + return 0; } int @@ -1624,26 +1623,23 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) int err; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (tx_queue_id < dev->data->nb_tx_queues) { - txq = dev->data->tx_queues[tx_queue_id]; + txq = dev->data->tx_queues[tx_queue_id]; - /* - * tx_queue_id is queue id application refers to, while - * txq->reg_idx is the real queue index. - */ - err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE); - - if (err) { - PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of", - tx_queue_id); - return err; - } - - i40e_tx_queue_release_mbufs(txq); - i40e_reset_tx_queue(txq); - dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + /* + * tx_queue_id is queue id application refers to, while + * txq->reg_idx is the real queue index. + */ + err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of", + tx_queue_id); + return err; } + i40e_tx_queue_release_mbufs(txq); + i40e_reset_tx_queue(txq); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } @@ -1692,6 +1688,75 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev) return NULL; } +static int +i40e_dev_first_queue(uint16_t idx, void **queues, int num) +{ + uint16_t i; + + for (i = 0; i < num; i++) { + if (i != idx && queues[i]) + return 0; + } + + return 1; +} + +static int +i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, + struct i40e_rx_queue *rxq) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + int use_def_burst_func = + check_rx_burst_bulk_alloc_preconditions(rxq); + uint16_t buf_size = + (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - + RTE_PKTMBUF_HEADROOM); + int use_scattered_rx = + ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size); + + if (i40e_rx_queue_init(rxq) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to do RX queue initialization"); + return -EINVAL; + } + + if (i40e_dev_first_queue(rxq->queue_id, + dev->data->rx_queues, + dev->data->nb_rx_queues)) { + /** + * If it is the first queue to setup, + * set all flags to default and call + * i40e_set_rx_function. + */ + ad->rx_bulk_alloc_allowed = true; + ad->rx_vec_allowed = true; + dev->data->scattered_rx = use_scattered_rx; + if (use_def_burst_func) + ad->rx_bulk_alloc_allowed = false; + i40e_set_rx_function(dev); + return 0; + } + + /* check bulk alloc conflict */ + if (ad->rx_bulk_alloc_allowed && use_def_burst_func) { + PMD_DRV_LOG(ERR, "Can't use default burst."); + return -EINVAL; + } + /* check scatterred conflict */ + if (!dev->data->scattered_rx && use_scattered_rx) { + PMD_DRV_LOG(ERR, "Scattered rx is required."); + return -EINVAL; + } + /* check vector conflict */ + if (ad->rx_vec_allowed && i40e_rxq_vec_setup(rxq)) { + PMD_DRV_LOG(ERR, "Failed vector rx setup."); + return -EINVAL; + } + + return 0; +} + int i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1712,6 +1777,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t len, i; uint16_t reg_idx, base, bsf, tc_mapping; int q_offset, use_def_burst_func = 1; + uint64_t offloads; + + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); @@ -1760,11 +1828,14 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->queue_id = queue_idx; rxq->reg_idx = reg_idx; rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? - 0 : ETHER_CRC_LEN); + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) + rxq->crc_len = ETHER_CRC_LEN; + else + rxq->crc_len = 0; rxq->drop_en = rx_conf->rx_drop_en; rxq->vsi = vsi; rxq->rx_deferred_start = rx_conf->rx_deferred_start; + rxq->offloads = offloads; /* Allocate the maximun number of RX ring hardware descriptor. */ len = I40E_MAX_RING_DESC; @@ -1808,25 +1879,6 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, i40e_reset_rx_queue(rxq); rxq->q_set = TRUE; - dev->data->rx_queues[queue_idx] = rxq; - - use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq); - - if (!use_def_burst_func) { -#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC - PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " - "satisfied. Rx Burst Bulk Alloc function will be " - "used on port=%d, queue=%d.", - rxq->port_id, rxq->queue_id); -#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ - } else { - PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " - "not satisfied, Scattered Rx is requested, " - "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is " - "not enabled on port=%d, queue=%d.", - rxq->port_id, rxq->queue_id); - ad->rx_bulk_alloc_allowed = false; - } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (!(vsi->enabled_tc & (1 << i))) @@ -1841,6 +1893,34 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->dcb_tc = i; } + if (dev->data->dev_started) { + if (i40e_dev_rx_queue_setup_runtime(dev, rxq)) { + i40e_dev_rx_queue_release(rxq); + return -EINVAL; + } + } else { + use_def_burst_func = + check_rx_burst_bulk_alloc_preconditions(rxq); + if (!use_def_burst_func) { +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + PMD_INIT_LOG(DEBUG, + "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function will be " + "used on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); +#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ + } else { + PMD_INIT_LOG(DEBUG, + "Rx Burst Bulk Alloc Preconditions are " + "not satisfied, Scattered Rx is requested, " + "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is " + "not enabled on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); + ad->rx_bulk_alloc_allowed = false; + } + } + + dev->data->rx_queues[queue_idx] = rxq; return 0; } @@ -1972,6 +2052,52 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) return RTE_ETH_TX_DESC_FULL; } +static int +i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev, + struct i40e_tx_queue *txq) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + if (i40e_tx_queue_init(txq) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to do TX queue initialization"); + return -EINVAL; + } + + if (i40e_dev_first_queue(txq->queue_id, + dev->data->tx_queues, + dev->data->nb_tx_queues)) { + /** + * If it is the first queue to setup, + * set all flags and call + * i40e_set_tx_function. + */ + i40e_set_tx_function_flag(dev, txq); + i40e_set_tx_function(dev); + return 0; + } + + /* check vector conflict */ + if (ad->tx_vec_allowed) { + if (txq->tx_rs_thresh > RTE_I40E_TX_MAX_FREE_BUF_SZ || + i40e_txq_vec_setup(txq)) { + PMD_DRV_LOG(ERR, "Failed vector tx setup."); + return -EINVAL; + } + } + /* check simple tx conflict */ + if (ad->tx_simple_allowed) { + if ((txq->offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) != 0 || + txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) { + PMD_DRV_LOG(ERR, "No-simple tx is required."); + return -EINVAL; + } + } + + return 0; +} + int i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1989,6 +2115,9 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_rs_thresh, tx_free_thresh; uint16_t reg_idx, i, base, bsf, tc_mapping; int q_offset; + uint64_t offloads; + + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); @@ -2123,7 +2252,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->queue_id = queue_idx; txq->reg_idx = reg_idx; txq->port_id = dev->data->port_id; - txq->txq_flags = tx_conf->txq_flags; + txq->offloads = offloads; txq->vsi = vsi; txq->tx_deferred_start = tx_conf->tx_deferred_start; @@ -2144,10 +2273,6 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, i40e_reset_tx_queue(txq); txq->q_set = TRUE; - dev->data->tx_queues[queue_idx] = txq; - - /* Use a simple TX queue without offloads or multi segs if possible */ - i40e_set_tx_function_flag(dev, txq); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (!(vsi->enabled_tc & (1 << i))) @@ -2162,6 +2287,20 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->dcb_tc = i; } + if (dev->data->dev_started) { + if (i40e_dev_tx_queue_setup_runtime(dev, txq)) { + i40e_dev_tx_queue_release(txq); + return -EINVAL; + } + } else { + /** + * Use a simple TX queue without offloads or + * multi segs if possible + */ + i40e_set_tx_function_flag(dev, txq); + } + dev->data->tx_queues[queue_idx] = txq; + return 0; } @@ -2189,8 +2328,8 @@ i40e_memzone_reserve(const char *name, uint32_t len, int socket_id) if (mz) return mz; - mz = rte_memzone_reserve_aligned(name, len, - socket_id, 0, I40E_RING_BASE_ALIGN); + mz = rte_memzone_reserve_aligned(name, len, socket_id, + RTE_MEMZONE_IOVA_CONTIG, I40E_RING_BASE_ALIGN); return mz; } @@ -2469,7 +2608,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq) len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len; rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len); - if (data->dev_conf.rxmode.jumbo_frame == 1) { + if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (rxq->max_pkt_len <= ETHER_MAX_LEN || rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) { PMD_DRV_LOG(ERR, "maximum packet length must " @@ -2747,6 +2886,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; qinfo->conf.rx_drop_en = rxq->drop_en; qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; + qinfo->conf.offloads = rxq->offloads; } void @@ -2765,8 +2905,8 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_free_thresh = txq->tx_free_thresh; qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; - qinfo->conf.txq_flags = txq->txq_flags; qinfo->conf.tx_deferred_start = txq->tx_deferred_start; + qinfo->conf.offloads = txq->offloads; } void __attribute__((cold)) @@ -2889,19 +3029,24 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq) struct i40e_adapter *ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); - /* Use a simple Tx queue (no offloads, no multi segs) if possible */ - if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) - && (txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST)) { - if (txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ) { - PMD_INIT_LOG(DEBUG, "Vector tx" - " can be enabled on this txq."); - - } else { - ad->tx_vec_allowed = false; - } - } else { - ad->tx_simple_allowed = false; - } + /* Use a simple Tx queue if possible (only fast free is allowed) */ + ad->tx_simple_allowed = + (txq->offloads == + (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) && + txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST); + ad->tx_vec_allowed = (ad->tx_simple_allowed && + txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ); + + if (ad->tx_vec_allowed) + PMD_INIT_LOG(DEBUG, "Vector Tx can be enabled on Tx queue %u.", + txq->queue_id); + else if (ad->tx_simple_allowed) + PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.", + txq->queue_id); + else + PMD_INIT_LOG(DEBUG, + "Neither simple nor vector Tx enabled on Tx queue %u\n", + txq->queue_id); } void __attribute__((cold)) diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index 34cd7923..3fc619af 100644 --- a/drivers/net/i40e/i40e_rxtx.h +++ b/drivers/net/i40e/i40e_rxtx.h @@ -30,6 +30,8 @@ #define I40E_TX_MAX_SEG UINT8_MAX #define I40E_TX_MAX_MTU_SEG 8 +#define I40E_TX_MIN_PKT_LEN 17 + #undef container_of #define container_of(ptr, type, member) ({ \ typeof(((type *)0)->member)(*__mptr) = (ptr); \ @@ -107,6 +109,7 @@ struct i40e_rx_queue { bool rx_deferred_start; /**< don't start this queue in dev start */ uint16_t rx_using_sse; /**rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH) + if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH) i40e_rxq_rearm(rxq); /* Before we start moving massive data around, check to see if diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h index 3ffedcb9..63cb1774 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_common.h +++ b/drivers/net/i40e/i40e_rxtx_vec_common.h @@ -202,11 +202,11 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev) /* - no csum error report support * - no header split support */ - if (rxmode->header_split == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) return -1; /* no QinQ support */ - if (rxmode->hw_vlan_extend == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) return -1; return 0; diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c index e549d1e8..83572ef8 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_neon.c +++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c @@ -1,35 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. - * Copyright(c) 2016, Linaro Limited - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation. + * Copyright(c) 2016-2018, Linaro Limited. */ #include diff --git a/drivers/net/i40e/i40e_vf_representor.c b/drivers/net/i40e/i40e_vf_representor.c new file mode 100644 index 00000000..f9f13161 --- /dev/null +++ b/drivers/net/i40e/i40e_vf_representor.c @@ -0,0 +1,531 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation. + */ + +#include +#include +#include +#include + +#include "base/i40e_type.h" +#include "base/virtchnl.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" +#include "rte_pmd_i40e.h" + +static int +i40e_vf_representor_link_update(struct rte_eth_dev *ethdev, + int wait_to_complete) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + return i40e_dev_link_update(representor->adapter->eth_dev, + wait_to_complete); +} +static void +i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev, + struct rte_eth_dev_info *dev_info) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + /* get dev info for the vdev */ + dev_info->device = ethdev->device; + + dev_info->max_rx_queues = ethdev->data->nb_rx_queues; + dev_info->max_tx_queues = ethdev->data->nb_tx_queues; + + dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN; + dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX; + dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + dev_info->reta_size = ETH_RSS_RETA_SIZE_64; + dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL; + dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX; + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_QINQ_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_QINQ_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IPIP_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = I40E_DEFAULT_RX_PTHRESH, + .hthresh = I40E_DEFAULT_RX_HTHRESH, + .wthresh = I40E_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + .offloads = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = I40E_DEFAULT_TX_PTHRESH, + .hthresh = I40E_DEFAULT_TX_HTHRESH, + .wthresh = I40E_DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH, + .offloads = 0, + }; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = I40E_MAX_RING_DESC, + .nb_min = I40E_MIN_RING_DESC, + .nb_align = I40E_ALIGN_RING_DESC, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = I40E_MAX_RING_DESC, + .nb_min = I40E_MIN_RING_DESC, + .nb_align = I40E_ALIGN_RING_DESC, + }; + + dev_info->switch_info.name = + representor->adapter->eth_dev->device->name; + dev_info->switch_info.domain_id = representor->switch_domain_id; + dev_info->switch_info.port_id = representor->vf_id; +} + +static int +i40e_vf_representor_dev_configure(__rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static int +i40e_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static void +i40e_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev) +{ +} + +static int +i40e_vf_representor_rx_queue_setup(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t rx_queue_id, + __rte_unused uint16_t nb_rx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + __rte_unused struct rte_mempool *mb_pool) +{ + return 0; +} + +static int +i40e_vf_representor_tx_queue_setup(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t rx_queue_id, + __rte_unused uint16_t nb_rx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *tx_conf) +{ + return 0; +} + +static void +i40evf_stat_update_48(uint64_t *offset, + uint64_t *stat) +{ + if (*stat >= *offset) + *stat = *stat - *offset; + else + *stat = (uint64_t)((*stat + + ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset); + + *stat &= I40E_48_BIT_MASK; +} + +static void +i40evf_stat_update_32(uint64_t *offset, + uint64_t *stat) +{ + if (*stat >= *offset) + *stat = (uint64_t)(*stat - *offset); + else + *stat = (uint64_t)((*stat + + ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset); +} + +static int +rte_pmd_i40e_get_vf_native_stats(uint16_t port, + uint16_t vf_id, + struct i40e_eth_stats *stats) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + i40e_update_vsi_stats(vsi); + memcpy(stats, &vsi->eth_stats, sizeof(vsi->eth_stats)); + + return 0; +} + +static int +i40e_vf_representor_stats_get(struct rte_eth_dev *ethdev, + struct rte_eth_stats *stats) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + struct i40e_eth_stats native_stats; + int ret; + + ret = rte_pmd_i40e_get_vf_native_stats( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, &native_stats); + if (ret == 0) { + i40evf_stat_update_48( + &representor->stats_offset.rx_bytes, + &native_stats.rx_bytes); + i40evf_stat_update_48( + &representor->stats_offset.rx_unicast, + &native_stats.rx_unicast); + i40evf_stat_update_48( + &representor->stats_offset.rx_multicast, + &native_stats.rx_multicast); + i40evf_stat_update_48( + &representor->stats_offset.rx_broadcast, + &native_stats.rx_broadcast); + i40evf_stat_update_32( + &representor->stats_offset.rx_discards, + &native_stats.rx_discards); + i40evf_stat_update_32( + &representor->stats_offset.rx_unknown_protocol, + &native_stats.rx_unknown_protocol); + i40evf_stat_update_48( + &representor->stats_offset.tx_bytes, + &native_stats.tx_bytes); + i40evf_stat_update_48( + &representor->stats_offset.tx_unicast, + &native_stats.tx_unicast); + i40evf_stat_update_48( + &representor->stats_offset.tx_multicast, + &native_stats.tx_multicast); + i40evf_stat_update_48( + &representor->stats_offset.tx_broadcast, + &native_stats.tx_broadcast); + i40evf_stat_update_32( + &representor->stats_offset.tx_errors, + &native_stats.tx_errors); + i40evf_stat_update_32( + &representor->stats_offset.tx_discards, + &native_stats.tx_discards); + + stats->ipackets = native_stats.rx_unicast + + native_stats.rx_multicast + + native_stats.rx_broadcast; + stats->opackets = native_stats.tx_unicast + + native_stats.tx_multicast + + native_stats.tx_broadcast; + stats->ibytes = native_stats.rx_bytes; + stats->obytes = native_stats.tx_bytes; + stats->ierrors = native_stats.rx_discards; + stats->oerrors = native_stats.tx_errors + native_stats.tx_discards; + } + return ret; +} + +static void +i40e_vf_representor_stats_reset(struct rte_eth_dev *ethdev) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_i40e_get_vf_native_stats( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, &representor->stats_offset); +} + +static void +i40e_vf_representor_promiscuous_enable(struct rte_eth_dev *ethdev) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_i40e_set_vf_unicast_promisc( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, 1); +} + +static void +i40e_vf_representor_promiscuous_disable(struct rte_eth_dev *ethdev) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_i40e_set_vf_unicast_promisc( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, 0); +} + +static void +i40e_vf_representor_allmulticast_enable(struct rte_eth_dev *ethdev) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_i40e_set_vf_multicast_promisc( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, 1); +} + +static void +i40e_vf_representor_allmulticast_disable(struct rte_eth_dev *ethdev) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_i40e_set_vf_multicast_promisc( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, 0); +} + +static void +i40e_vf_representor_mac_addr_remove(struct rte_eth_dev *ethdev, uint32_t index) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_i40e_remove_vf_mac_addr( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, ðdev->data->mac_addrs[index]); +} + +static int +i40e_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev, + struct ether_addr *mac_addr) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + return rte_pmd_i40e_set_vf_mac_addr( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, mac_addr); +} + +static int +i40e_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev, + uint16_t vlan_id, int on) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + uint64_t vf_mask = 1ULL << representor->vf_id; + + return rte_pmd_i40e_set_vf_vlan_filter( + representor->adapter->eth_dev->data->port_id, + vlan_id, vf_mask, on); +} + +static int +i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + struct rte_eth_dev *pdev; + struct i40e_pf_vf *vf; + struct i40e_vsi *vsi; + struct i40e_pf *pf; + uint32_t vfid; + + pdev = representor->adapter->eth_dev; + vfid = representor->vf_id; + + if (!is_i40e_supported(pdev)) { + PMD_DRV_LOG(ERR, "Invalid PF dev."); + return -EINVAL; + } + + pf = I40E_DEV_PRIVATE_TO_PF(pdev->data->dev_private); + + if (vfid >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vf = &pf->vfs[vfid]; + vsi = vf->vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + if (mask & ETH_VLAN_FILTER_MASK) { + /* Enable or disable VLAN filtering offload */ + if (ethdev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) + return i40e_vsi_config_vlan_filter(vsi, TRUE); + else + return i40e_vsi_config_vlan_filter(vsi, FALSE); + } + + if (mask & ETH_VLAN_STRIP_MASK) { + /* Enable or disable VLAN stripping offload */ + if (ethdev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP) + return i40e_vsi_config_vlan_stripping(vsi, TRUE); + else + return i40e_vsi_config_vlan_stripping(vsi, FALSE); + } + + return -EINVAL; +} + +static void +i40e_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev, + __rte_unused uint16_t rx_queue_id, int on) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_i40e_set_vf_vlan_stripq( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, on); +} + +static int +i40e_vf_representor_vlan_pvid_set(struct rte_eth_dev *ethdev, uint16_t vlan_id, + __rte_unused int on) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + return rte_pmd_i40e_set_vf_vlan_insert( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, vlan_id); +} + +struct eth_dev_ops i40e_representor_dev_ops = { + .dev_infos_get = i40e_vf_representor_dev_infos_get, + + .dev_start = i40e_vf_representor_dev_start, + .dev_configure = i40e_vf_representor_dev_configure, + .dev_stop = i40e_vf_representor_dev_stop, + + .rx_queue_setup = i40e_vf_representor_rx_queue_setup, + .tx_queue_setup = i40e_vf_representor_tx_queue_setup, + + .link_update = i40e_vf_representor_link_update, + + .stats_get = i40e_vf_representor_stats_get, + .stats_reset = i40e_vf_representor_stats_reset, + + .promiscuous_enable = i40e_vf_representor_promiscuous_enable, + .promiscuous_disable = i40e_vf_representor_promiscuous_disable, + + .allmulticast_enable = i40e_vf_representor_allmulticast_enable, + .allmulticast_disable = i40e_vf_representor_allmulticast_disable, + + .mac_addr_remove = i40e_vf_representor_mac_addr_remove, + .mac_addr_set = i40e_vf_representor_mac_addr_set, + + .vlan_filter_set = i40e_vf_representor_vlan_filter_set, + .vlan_offload_set = i40e_vf_representor_vlan_offload_set, + .vlan_strip_queue_set = i40e_vf_representor_vlan_strip_queue_set, + .vlan_pvid_set = i40e_vf_representor_vlan_pvid_set + +}; + +static uint16_t +i40e_vf_representor_rx_burst(__rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +static uint16_t +i40e_vf_representor_tx_burst(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +int +i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + struct i40e_pf *pf; + struct i40e_pf_vf *vf; + struct rte_eth_link *link; + + representor->vf_id = + ((struct i40e_vf_representor *)init_params)->vf_id; + representor->switch_domain_id = + ((struct i40e_vf_representor *)init_params)->switch_domain_id; + representor->adapter = + ((struct i40e_vf_representor *)init_params)->adapter; + + pf = I40E_DEV_PRIVATE_TO_PF( + representor->adapter->eth_dev->data->dev_private); + + if (representor->vf_id >= pf->vf_num) + return -ENODEV; + + /** representor shares the same driver as it's PF device */ + ethdev->device->driver = representor->adapter->eth_dev->device->driver; + + /* Set representor device ops */ + ethdev->dev_ops = &i40e_representor_dev_ops; + + /* No data-path, but need stub Rx/Tx functions to avoid crash + * when testing with the likes of testpmd. + */ + ethdev->rx_pkt_burst = i40e_vf_representor_rx_burst; + ethdev->tx_pkt_burst = i40e_vf_representor_tx_burst; + + vf = &pf->vfs[representor->vf_id]; + + if (!vf->vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -ENODEV; + } + + ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; + + /* Setting the number queues allocated to the VF */ + ethdev->data->nb_rx_queues = vf->vsi->nb_qps; + ethdev->data->nb_tx_queues = vf->vsi->nb_qps; + + ethdev->data->mac_addrs = &vf->mac_addr; + + /* Link state. Inherited from PF */ + link = &representor->adapter->eth_dev->data->dev_link; + + ethdev->data->dev_link.link_speed = link->link_speed; + ethdev->data->dev_link.link_duplex = link->link_duplex; + ethdev->data->dev_link.link_status = link->link_status; + ethdev->data->dev_link.link_autoneg = link->link_autoneg; + + return 0; +} + +int +i40e_vf_representor_uninit(struct rte_eth_dev *ethdev __rte_unused) +{ + return 0; +} diff --git a/drivers/net/i40e/meson.build b/drivers/net/i40e/meson.build index 8764b0e5..d783f362 100644 --- a/drivers/net/i40e/meson.build +++ b/drivers/net/i40e/meson.build @@ -1,10 +1,13 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation +version = 2 + cflags += ['-DPF_DRIVER', '-DVF_DRIVER', '-DINTEGRATED_VF', - '-DX722_A0_SUPPORT'] + '-DX722_A0_SUPPORT', + '-DALLOW_EXPERIMENTAL_API'] subdir('base') objs = [base_objs] @@ -17,10 +20,12 @@ sources = files( 'i40e_fdir.c', 'i40e_flow.c', 'i40e_tm.c', + 'i40e_vf_representor.c', 'rte_pmd_i40e.c' ) deps += ['hash'] +includes += include_directories('base') if arch_subdir == 'x86' dpdk_conf.set('RTE_LIBRTE_I40E_INC_VECTOR', 1) @@ -36,11 +41,10 @@ if arch_subdir == 'x86' 'i40e_rxtx_vec_avx2.c', dependencies: [static_rte_ethdev, static_rte_kvargs, static_rte_hash], - c_args: '-mavx2') + include_directories: includes, + c_args: [cflags, '-mavx2']) objs += i40e_avx2_lib.extract_objects('i40e_rxtx_vec_avx2.c') endif endif -includes += include_directories('base') - install_headers('rte_pmd_i40e.h') diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c index dae59e6d..bba62b1c 100644 --- a/drivers/net/i40e/rte_pmd_i40e.c +++ b/drivers/net/i40e/rte_pmd_i40e.c @@ -570,6 +570,49 @@ rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id, return 0; } +static const struct ether_addr null_mac_addr; + +int +rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id, + struct ether_addr *mac_addr) +{ + struct rte_eth_dev *dev; + struct i40e_pf_vf *vf; + struct i40e_vsi *vsi; + struct i40e_pf *pf; + + if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS) + return -EINVAL; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) + return -EINVAL; + + vf = &pf->vfs[vf_id]; + vsi = vf->vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + if (is_same_ether_addr(mac_addr, &vf->mac_addr)) + /* Reset the mac with NULL address */ + ether_addr_copy(&null_mac_addr, &vf->mac_addr); + + /* Remove the mac */ + i40e_vsi_delete_mac(vsi, mac_addr); + + return 0; +} + /* Set vlan strip on/off for specific VF from host */ int rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on) @@ -1530,6 +1573,11 @@ i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec) return 1; } } + /* profile with group id 0xff is compatible with any other profile */ + if ((pinfo->track_id & group_mask) == group_mask) { + rte_free(buff); + return 0; + } for (i = 0; i < p_list->p_count; i++) { p = &p_list->p_info[i]; if ((p->track_id & group_mask) == 0) { @@ -1540,6 +1588,8 @@ i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec) } for (i = 0; i < p_list->p_count; i++) { p = &p_list->p_info[i]; + if ((p->track_id & group_mask) == group_mask) + continue; if ((pinfo->track_id & group_mask) != (p->track_id & group_mask)) { PMD_DRV_LOG(INFO, "Profile of different group exists."); @@ -1603,8 +1653,6 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff, return -EINVAL; } - i40e_update_customized_info(dev, buff, size); - /* Find metadata segment */ metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA, pkg_hdr); @@ -1661,6 +1709,7 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff, PMD_DRV_LOG(ERR, "Profile of group 0 already exists."); else if (is_exist == 3) PMD_DRV_LOG(ERR, "Profile of different group already exists"); + i40e_update_customized_info(dev, buff, size, op); rte_free(profile_info_sec); return -EEXIST; } @@ -1708,6 +1757,10 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff, } } + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD || + op == RTE_PMD_I40E_PKG_OP_WR_DEL) + i40e_update_customized_info(dev, buff, size, op); + rte_free(profile_info_sec); return status; } @@ -3071,6 +3124,7 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype, { struct rte_eth_dev *dev; struct i40e_hw *hw; + struct i40e_pf *pf; uint64_t inset_reg; uint32_t mask_reg[2]; int i; @@ -3086,10 +3140,12 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype, return -EINVAL; hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - /* Clear mask first */ - for (i = 0; i < 2; i++) - i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0); + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Input set configuration is not supported."); + return -ENOTSUP; + } inset_reg = inset->inset; for (i = 0; i < 2; i++) @@ -3098,14 +3154,15 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype, switch (inset_type) { case INSET_HASH: - i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), - (uint32_t)(inset_reg & UINT32_MAX)); - i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), - (uint32_t)((inset_reg >> - I40E_32_BIT_WIDTH) & UINT32_MAX)); + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); for (i = 0; i < 2; i++) - i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), - mask_reg[i]); + i40e_check_write_global_reg(hw, + I40E_GLQF_HASH_MSK(i, pctype), + mask_reg[i]); break; case INSET_FDIR: i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0), @@ -3114,8 +3171,9 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype, (uint32_t)((inset_reg >> I40E_32_BIT_WIDTH) & UINT32_MAX)); for (i = 0; i < 2; i++) - i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), - mask_reg[i]); + i40e_check_write_global_reg(hw, + I40E_GLQF_FD_MSK(i, pctype), + mask_reg[i]); break; case INSET_FDIR_FLX: i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype), diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h index d248adb1..be4a6024 100644 --- a/drivers/net/i40e/rte_pmd_i40e.h +++ b/drivers/net/i40e/rte_pmd_i40e.h @@ -455,6 +455,24 @@ int rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, int rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id, struct ether_addr *mac_addr); +/** + * Remove the VF MAC address. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * VF id. + * @param mac_addr + * VF MAC address. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if *vf* or *mac_addr* is invalid. + */ +int +rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id, + struct ether_addr *mac_addr); + /** * Enable/Disable vf vlan strip for all queues in a pool * diff --git a/drivers/net/ifc/Makefile b/drivers/net/ifc/Makefile new file mode 100644 index 00000000..39b36ae5 --- /dev/null +++ b/drivers/net/ifc/Makefile @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_ifc.a + +LDLIBS += -lpthread +LDLIBS += -lrte_eal -lrte_pci -lrte_vhost -lrte_bus_pci + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -DALLOW_EXPERIMENTAL_API + +# +# Add extra flags for base driver source files to disable warnings in them +# +BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))) + +VPATH += $(SRCDIR)/base + +EXPORT_MAP := rte_pmd_ifc_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_IFC_PMD) += ifcvf_vdpa.c +SRCS-$(CONFIG_RTE_LIBRTE_IFC_PMD) += ifcvf.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/ifc/base/ifcvf.c b/drivers/net/ifc/base/ifcvf.c new file mode 100644 index 00000000..4b22d9ed --- /dev/null +++ b/drivers/net/ifc/base/ifcvf.c @@ -0,0 +1,298 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include "ifcvf.h" +#include "ifcvf_osdep.h" + +STATIC void * +get_cap_addr(struct ifcvf_hw *hw, struct ifcvf_pci_cap *cap) +{ + u8 bar = cap->bar; + u32 length = cap->length; + u32 offset = cap->offset; + + if (bar > IFCVF_PCI_MAX_RESOURCE - 1) { + DEBUGOUT("invalid bar: %u\n", bar); + return NULL; + } + + if (offset + length < offset) { + DEBUGOUT("offset(%u) + length(%u) overflows\n", + offset, length); + return NULL; + } + + if (offset + length > hw->mem_resource[cap->bar].len) { + DEBUGOUT("offset(%u) + length(%u) overflows bar length(%u)", + offset, length, (u32)hw->mem_resource[cap->bar].len); + return NULL; + } + + return hw->mem_resource[bar].addr + offset; +} + +int +ifcvf_init_hw(struct ifcvf_hw *hw, PCI_DEV *dev) +{ + int ret; + u8 pos; + struct ifcvf_pci_cap cap; + + ret = PCI_READ_CONFIG_BYTE(dev, &pos, PCI_CAPABILITY_LIST); + if (ret < 0) { + DEBUGOUT("failed to read pci capability list\n"); + return -1; + } + + while (pos) { + ret = PCI_READ_CONFIG_RANGE(dev, (u32 *)&cap, + sizeof(cap), pos); + if (ret < 0) { + DEBUGOUT("failed to read cap at pos: %x", pos); + break; + } + + if (cap.cap_vndr != PCI_CAP_ID_VNDR) + goto next; + + DEBUGOUT("cfg type: %u, bar: %u, offset: %u, " + "len: %u\n", cap.cfg_type, cap.bar, + cap.offset, cap.length); + + switch (cap.cfg_type) { + case IFCVF_PCI_CAP_COMMON_CFG: + hw->common_cfg = get_cap_addr(hw, &cap); + break; + case IFCVF_PCI_CAP_NOTIFY_CFG: + PCI_READ_CONFIG_DWORD(dev, &hw->notify_off_multiplier, + pos + sizeof(cap)); + hw->notify_base = get_cap_addr(hw, &cap); + hw->notify_region = cap.bar; + break; + case IFCVF_PCI_CAP_ISR_CFG: + hw->isr = get_cap_addr(hw, &cap); + break; + case IFCVF_PCI_CAP_DEVICE_CFG: + hw->dev_cfg = get_cap_addr(hw, &cap); + break; + } +next: + pos = cap.cap_next; + } + + hw->lm_cfg = hw->mem_resource[4].addr; + + if (hw->common_cfg == NULL || hw->notify_base == NULL || + hw->isr == NULL || hw->dev_cfg == NULL) { + DEBUGOUT("capability incomplete\n"); + return -1; + } + + DEBUGOUT("capability mapping:\ncommon cfg: %p\n" + "notify base: %p\nisr cfg: %p\ndevice cfg: %p\n" + "multiplier: %u\n", + hw->common_cfg, hw->dev_cfg, + hw->isr, hw->notify_base, + hw->notify_off_multiplier); + + return 0; +} + +STATIC u8 +ifcvf_get_status(struct ifcvf_hw *hw) +{ + return IFCVF_READ_REG8(&hw->common_cfg->device_status); +} + +STATIC void +ifcvf_set_status(struct ifcvf_hw *hw, u8 status) +{ + IFCVF_WRITE_REG8(status, &hw->common_cfg->device_status); +} + +STATIC void +ifcvf_reset(struct ifcvf_hw *hw) +{ + ifcvf_set_status(hw, 0); + + /* flush status write */ + while (ifcvf_get_status(hw)) + msec_delay(1); +} + +STATIC void +ifcvf_add_status(struct ifcvf_hw *hw, u8 status) +{ + if (status != 0) + status |= ifcvf_get_status(hw); + + ifcvf_set_status(hw, status); + ifcvf_get_status(hw); +} + +u64 +ifcvf_get_features(struct ifcvf_hw *hw) +{ + u32 features_lo, features_hi; + struct ifcvf_pci_common_cfg *cfg = hw->common_cfg; + + IFCVF_WRITE_REG32(0, &cfg->device_feature_select); + features_lo = IFCVF_READ_REG32(&cfg->device_feature); + + IFCVF_WRITE_REG32(1, &cfg->device_feature_select); + features_hi = IFCVF_READ_REG32(&cfg->device_feature); + + return ((u64)features_hi << 32) | features_lo; +} + +STATIC void +ifcvf_set_features(struct ifcvf_hw *hw, u64 features) +{ + struct ifcvf_pci_common_cfg *cfg = hw->common_cfg; + + IFCVF_WRITE_REG32(0, &cfg->guest_feature_select); + IFCVF_WRITE_REG32(features & ((1ULL << 32) - 1), &cfg->guest_feature); + + IFCVF_WRITE_REG32(1, &cfg->guest_feature_select); + IFCVF_WRITE_REG32(features >> 32, &cfg->guest_feature); +} + +STATIC int +ifcvf_config_features(struct ifcvf_hw *hw) +{ + u64 host_features; + + host_features = ifcvf_get_features(hw); + hw->req_features &= host_features; + + ifcvf_set_features(hw, hw->req_features); + ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_FEATURES_OK); + + if (!(ifcvf_get_status(hw) & IFCVF_CONFIG_STATUS_FEATURES_OK)) { + DEBUGOUT("failed to set FEATURES_OK status\n"); + return -1; + } + + return 0; +} + +STATIC void +io_write64_twopart(u64 val, u32 *lo, u32 *hi) +{ + IFCVF_WRITE_REG32(val & ((1ULL << 32) - 1), lo); + IFCVF_WRITE_REG32(val >> 32, hi); +} + +STATIC int +ifcvf_hw_enable(struct ifcvf_hw *hw) +{ + struct ifcvf_pci_common_cfg *cfg; + u8 *lm_cfg; + u32 i; + u16 notify_off; + + cfg = hw->common_cfg; + lm_cfg = hw->lm_cfg; + + IFCVF_WRITE_REG16(0, &cfg->msix_config); + if (IFCVF_READ_REG16(&cfg->msix_config) == IFCVF_MSI_NO_VECTOR) { + DEBUGOUT("msix vec alloc failed for device config\n"); + return -1; + } + + for (i = 0; i < hw->nr_vring; i++) { + IFCVF_WRITE_REG16(i, &cfg->queue_select); + io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo, + &cfg->queue_desc_hi); + io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo, + &cfg->queue_avail_hi); + io_write64_twopart(hw->vring[i].used, &cfg->queue_used_lo, + &cfg->queue_used_hi); + IFCVF_WRITE_REG16(hw->vring[i].size, &cfg->queue_size); + + *(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET + + (i / 2) * IFCVF_LM_CFG_SIZE + (i % 2) * 4) = + (u32)hw->vring[i].last_avail_idx | + ((u32)hw->vring[i].last_used_idx << 16); + + IFCVF_WRITE_REG16(i + 1, &cfg->queue_msix_vector); + if (IFCVF_READ_REG16(&cfg->queue_msix_vector) == + IFCVF_MSI_NO_VECTOR) { + DEBUGOUT("queue %u, msix vec alloc failed\n", + i); + return -1; + } + + notify_off = IFCVF_READ_REG16(&cfg->queue_notify_off); + hw->notify_addr[i] = (void *)((u8 *)hw->notify_base + + notify_off * hw->notify_off_multiplier); + IFCVF_WRITE_REG16(1, &cfg->queue_enable); + } + + return 0; +} + +STATIC void +ifcvf_hw_disable(struct ifcvf_hw *hw) +{ + u32 i; + struct ifcvf_pci_common_cfg *cfg; + u32 ring_state; + + cfg = hw->common_cfg; + + IFCVF_WRITE_REG16(IFCVF_MSI_NO_VECTOR, &cfg->msix_config); + for (i = 0; i < hw->nr_vring; i++) { + IFCVF_WRITE_REG16(i, &cfg->queue_select); + IFCVF_WRITE_REG16(0, &cfg->queue_enable); + IFCVF_WRITE_REG16(IFCVF_MSI_NO_VECTOR, &cfg->queue_msix_vector); + ring_state = *(u32 *)(hw->lm_cfg + IFCVF_LM_RING_STATE_OFFSET + + (i / 2) * IFCVF_LM_CFG_SIZE + (i % 2) * 4); + hw->vring[i].last_avail_idx = (u16)ring_state; + hw->vring[i].last_used_idx = (u16)(ring_state >> 16); + } +} + +int +ifcvf_start_hw(struct ifcvf_hw *hw) +{ + ifcvf_reset(hw); + ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_ACK); + ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_DRIVER); + + if (ifcvf_config_features(hw) < 0) + return -1; + + if (ifcvf_hw_enable(hw) < 0) + return -1; + + ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_DRIVER_OK); + return 0; +} + +void +ifcvf_stop_hw(struct ifcvf_hw *hw) +{ + ifcvf_hw_disable(hw); + ifcvf_reset(hw); +} + +void +ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid) +{ + IFCVF_WRITE_REG16(qid, hw->notify_addr[qid]); +} + +u8 +ifcvf_get_notify_region(struct ifcvf_hw *hw) +{ + return hw->notify_region; +} + +u64 +ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid) +{ + return (u8 *)hw->notify_addr[qid] - + (u8 *)hw->mem_resource[hw->notify_region].addr; +} diff --git a/drivers/net/ifc/base/ifcvf.h b/drivers/net/ifc/base/ifcvf.h new file mode 100644 index 00000000..badacb61 --- /dev/null +++ b/drivers/net/ifc/base/ifcvf.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#ifndef _IFCVF_H_ +#define _IFCVF_H_ + +#include "ifcvf_osdep.h" + +#define IFCVF_VENDOR_ID 0x1AF4 +#define IFCVF_DEVICE_ID 0x1041 +#define IFCVF_SUBSYS_VENDOR_ID 0x8086 +#define IFCVF_SUBSYS_DEVICE_ID 0x001A + +#define IFCVF_MAX_QUEUES 1 +#define VIRTIO_F_IOMMU_PLATFORM 33 + +/* Common configuration */ +#define IFCVF_PCI_CAP_COMMON_CFG 1 +/* Notifications */ +#define IFCVF_PCI_CAP_NOTIFY_CFG 2 +/* ISR Status */ +#define IFCVF_PCI_CAP_ISR_CFG 3 +/* Device specific configuration */ +#define IFCVF_PCI_CAP_DEVICE_CFG 4 +/* PCI configuration access */ +#define IFCVF_PCI_CAP_PCI_CFG 5 + +#define IFCVF_CONFIG_STATUS_RESET 0x00 +#define IFCVF_CONFIG_STATUS_ACK 0x01 +#define IFCVF_CONFIG_STATUS_DRIVER 0x02 +#define IFCVF_CONFIG_STATUS_DRIVER_OK 0x04 +#define IFCVF_CONFIG_STATUS_FEATURES_OK 0x08 +#define IFCVF_CONFIG_STATUS_FAILED 0x80 + +#define IFCVF_MSI_NO_VECTOR 0xffff +#define IFCVF_PCI_MAX_RESOURCE 6 + +#define IFCVF_LM_CFG_SIZE 0x40 +#define IFCVF_LM_RING_STATE_OFFSET 0x20 + +#define IFCVF_LM_LOGGING_CTRL 0x0 + +#define IFCVF_LM_BASE_ADDR_LOW 0x10 +#define IFCVF_LM_BASE_ADDR_HIGH 0x14 +#define IFCVF_LM_END_ADDR_LOW 0x18 +#define IFCVF_LM_END_ADDR_HIGH 0x1c + +#define IFCVF_LM_DISABLE 0x0 +#define IFCVF_LM_ENABLE_VF 0x1 +#define IFCVF_LM_ENABLE_PF 0x3 + +#define IFCVF_32_BIT_MASK 0xffffffff + + +struct ifcvf_pci_cap { + u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */ + u8 cap_next; /* Generic PCI field: next ptr. */ + u8 cap_len; /* Generic PCI field: capability length */ + u8 cfg_type; /* Identifies the structure. */ + u8 bar; /* Where to find it. */ + u8 padding[3]; /* Pad to full dword. */ + u32 offset; /* Offset within bar. */ + u32 length; /* Length of the structure, in bytes. */ +}; + +struct ifcvf_pci_notify_cap { + struct ifcvf_pci_cap cap; + u32 notify_off_multiplier; /* Multiplier for queue_notify_off. */ +}; + +struct ifcvf_pci_common_cfg { + /* About the whole device. */ + u32 device_feature_select; + u32 device_feature; + u32 guest_feature_select; + u32 guest_feature; + u16 msix_config; + u16 num_queues; + u8 device_status; + u8 config_generation; + + /* About a specific virtqueue. */ + u16 queue_select; + u16 queue_size; + u16 queue_msix_vector; + u16 queue_enable; + u16 queue_notify_off; + u32 queue_desc_lo; + u32 queue_desc_hi; + u32 queue_avail_lo; + u32 queue_avail_hi; + u32 queue_used_lo; + u32 queue_used_hi; +}; + +struct ifcvf_net_config { + u8 mac[6]; + u16 status; + u16 max_virtqueue_pairs; +} __attribute__((packed)); + +struct ifcvf_pci_mem_resource { + u64 phys_addr; /**< Physical address, 0 if not resource. */ + u64 len; /**< Length of the resource. */ + u8 *addr; /**< Virtual address, NULL when not mapped. */ +}; + +struct vring_info { + u64 desc; + u64 avail; + u64 used; + u16 size; + u16 last_avail_idx; + u16 last_used_idx; +}; + +struct ifcvf_hw { + u64 req_features; + u8 notify_region; + u32 notify_off_multiplier; + struct ifcvf_pci_common_cfg *common_cfg; + struct ifcvf_net_device_config *dev_cfg; + u8 *isr; + u16 *notify_base; + u16 *notify_addr[IFCVF_MAX_QUEUES * 2]; + u8 *lm_cfg; + struct vring_info vring[IFCVF_MAX_QUEUES * 2]; + u8 nr_vring; + struct ifcvf_pci_mem_resource mem_resource[IFCVF_PCI_MAX_RESOURCE]; +}; + +int +ifcvf_init_hw(struct ifcvf_hw *hw, PCI_DEV *dev); + +u64 +ifcvf_get_features(struct ifcvf_hw *hw); + +int +ifcvf_start_hw(struct ifcvf_hw *hw); + +void +ifcvf_stop_hw(struct ifcvf_hw *hw); + +void +ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid); + +u8 +ifcvf_get_notify_region(struct ifcvf_hw *hw); + +u64 +ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid); + +#endif /* _IFCVF_H_ */ diff --git a/drivers/net/ifc/base/ifcvf_osdep.h b/drivers/net/ifc/base/ifcvf_osdep.h new file mode 100644 index 00000000..cf151ef5 --- /dev/null +++ b/drivers/net/ifc/base/ifcvf_osdep.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#ifndef _IFCVF_OSDEP_H_ +#define _IFCVF_OSDEP_H_ + +#include +#include + +#include +#include +#include +#include +#include + +#define DEBUGOUT(S, args...) RTE_LOG(DEBUG, PMD, S, ##args) +#define STATIC static + +#define msec_delay rte_delay_ms + +#define IFCVF_READ_REG8(reg) rte_read8(reg) +#define IFCVF_WRITE_REG8(val, reg) rte_write8((val), (reg)) +#define IFCVF_READ_REG16(reg) rte_read16(reg) +#define IFCVF_WRITE_REG16(val, reg) rte_write16((val), (reg)) +#define IFCVF_READ_REG32(reg) rte_read32(reg) +#define IFCVF_WRITE_REG32(val, reg) rte_write32((val), (reg)) + +typedef struct rte_pci_device PCI_DEV; + +#define PCI_READ_CONFIG_BYTE(dev, val, where) \ + rte_pci_read_config(dev, val, 1, where) + +#define PCI_READ_CONFIG_DWORD(dev, val, where) \ + rte_pci_read_config(dev, val, 4, where) + +typedef uint8_t u8; +typedef int8_t s8; +typedef uint16_t u16; +typedef int16_t s16; +typedef uint32_t u32; +typedef int32_t s32; +typedef int64_t s64; +typedef uint64_t u64; + +static inline int +PCI_READ_CONFIG_RANGE(PCI_DEV *dev, uint32_t *val, int size, int where) +{ + return rte_pci_read_config(dev, val, size, where); +} + +#endif /* _IFCVF_OSDEP_H_ */ diff --git a/drivers/net/ifc/ifcvf_vdpa.c b/drivers/net/ifc/ifcvf_vdpa.c new file mode 100644 index 00000000..88d81403 --- /dev/null +++ b/drivers/net/ifc/ifcvf_vdpa.c @@ -0,0 +1,793 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "base/ifcvf.h" + +#define DRV_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ifcvf_vdpa_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + +#ifndef PAGE_SIZE +#define PAGE_SIZE 4096 +#endif + +static int ifcvf_vdpa_logtype; + +struct ifcvf_internal { + struct rte_vdpa_dev_addr dev_addr; + struct rte_pci_device *pdev; + struct ifcvf_hw hw; + int vfio_container_fd; + int vfio_group_fd; + int vfio_dev_fd; + pthread_t tid; /* thread for notify relay */ + int epfd; + int vid; + int did; + uint16_t max_queues; + uint64_t features; + rte_atomic32_t started; + rte_atomic32_t dev_attached; + rte_atomic32_t running; + rte_spinlock_t lock; +}; + +struct internal_list { + TAILQ_ENTRY(internal_list) next; + struct ifcvf_internal *internal; +}; + +TAILQ_HEAD(internal_list_head, internal_list); +static struct internal_list_head internal_list = + TAILQ_HEAD_INITIALIZER(internal_list); + +static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER; + +static struct internal_list * +find_internal_resource_by_did(int did) +{ + int found = 0; + struct internal_list *list; + + pthread_mutex_lock(&internal_list_lock); + + TAILQ_FOREACH(list, &internal_list, next) { + if (did == list->internal->did) { + found = 1; + break; + } + } + + pthread_mutex_unlock(&internal_list_lock); + + if (!found) + return NULL; + + return list; +} + +static struct internal_list * +find_internal_resource_by_dev(struct rte_pci_device *pdev) +{ + int found = 0; + struct internal_list *list; + + pthread_mutex_lock(&internal_list_lock); + + TAILQ_FOREACH(list, &internal_list, next) { + if (pdev == list->internal->pdev) { + found = 1; + break; + } + } + + pthread_mutex_unlock(&internal_list_lock); + + if (!found) + return NULL; + + return list; +} + +static int +ifcvf_vfio_setup(struct ifcvf_internal *internal) +{ + struct rte_pci_device *dev = internal->pdev; + char devname[RTE_DEV_NAME_MAX_LEN] = {0}; + int iommu_group_num; + int ret = 0; + int i; + + internal->vfio_dev_fd = -1; + internal->vfio_group_fd = -1; + internal->vfio_container_fd = -1; + + rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN); + rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname, + &iommu_group_num); + + internal->vfio_container_fd = rte_vfio_container_create(); + if (internal->vfio_container_fd < 0) + return -1; + + internal->vfio_group_fd = rte_vfio_container_group_bind( + internal->vfio_container_fd, iommu_group_num); + if (internal->vfio_group_fd < 0) + goto err; + + if (rte_pci_map_device(dev)) + goto err; + + internal->vfio_dev_fd = dev->intr_handle.vfio_dev_fd; + + for (i = 0; i < RTE_MIN(PCI_MAX_RESOURCE, IFCVF_PCI_MAX_RESOURCE); + i++) { + internal->hw.mem_resource[i].addr = + internal->pdev->mem_resource[i].addr; + internal->hw.mem_resource[i].phys_addr = + internal->pdev->mem_resource[i].phys_addr; + internal->hw.mem_resource[i].len = + internal->pdev->mem_resource[i].len; + } + ret = ifcvf_init_hw(&internal->hw, internal->pdev); + + return ret; + +err: + rte_vfio_container_destroy(internal->vfio_container_fd); + return -1; +} + +static int +ifcvf_dma_map(struct ifcvf_internal *internal, int do_map) +{ + uint32_t i; + int ret; + struct rte_vhost_memory *mem = NULL; + int vfio_container_fd; + + ret = rte_vhost_get_mem_table(internal->vid, &mem); + if (ret < 0) { + DRV_LOG(ERR, "failed to get VM memory layout."); + goto exit; + } + + vfio_container_fd = internal->vfio_container_fd; + + for (i = 0; i < mem->nregions; i++) { + struct rte_vhost_mem_region *reg; + + reg = &mem->regions[i]; + DRV_LOG(INFO, "%s, region %u: HVA 0x%" PRIx64 ", " + "GPA 0x%" PRIx64 ", size 0x%" PRIx64 ".", + do_map ? "DMA map" : "DMA unmap", i, + reg->host_user_addr, reg->guest_phys_addr, reg->size); + + if (do_map) { + ret = rte_vfio_container_dma_map(vfio_container_fd, + reg->host_user_addr, reg->guest_phys_addr, + reg->size); + if (ret < 0) { + DRV_LOG(ERR, "DMA map failed."); + goto exit; + } + } else { + ret = rte_vfio_container_dma_unmap(vfio_container_fd, + reg->host_user_addr, reg->guest_phys_addr, + reg->size); + if (ret < 0) { + DRV_LOG(ERR, "DMA unmap failed."); + goto exit; + } + } + } + +exit: + if (mem) + free(mem); + return ret; +} + +static uint64_t +qva_to_gpa(int vid, uint64_t qva) +{ + struct rte_vhost_memory *mem = NULL; + struct rte_vhost_mem_region *reg; + uint32_t i; + uint64_t gpa = 0; + + if (rte_vhost_get_mem_table(vid, &mem) < 0) + goto exit; + + for (i = 0; i < mem->nregions; i++) { + reg = &mem->regions[i]; + + if (qva >= reg->host_user_addr && + qva < reg->host_user_addr + reg->size) { + gpa = qva - reg->host_user_addr + reg->guest_phys_addr; + break; + } + } + +exit: + if (mem) + free(mem); + return gpa; +} + +static int +vdpa_ifcvf_start(struct ifcvf_internal *internal) +{ + struct ifcvf_hw *hw = &internal->hw; + int i, nr_vring; + int vid; + struct rte_vhost_vring vq; + uint64_t gpa; + + vid = internal->vid; + nr_vring = rte_vhost_get_vring_num(vid); + rte_vhost_get_negotiated_features(vid, &hw->req_features); + + for (i = 0; i < nr_vring; i++) { + rte_vhost_get_vhost_vring(vid, i, &vq); + gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc); + if (gpa == 0) { + DRV_LOG(ERR, "Fail to get GPA for descriptor ring."); + return -1; + } + hw->vring[i].desc = gpa; + + gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail); + if (gpa == 0) { + DRV_LOG(ERR, "Fail to get GPA for available ring."); + return -1; + } + hw->vring[i].avail = gpa; + + gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used); + if (gpa == 0) { + DRV_LOG(ERR, "Fail to get GPA for used ring."); + return -1; + } + hw->vring[i].used = gpa; + + hw->vring[i].size = vq.size; + rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx, + &hw->vring[i].last_used_idx); + } + hw->nr_vring = i; + + return ifcvf_start_hw(&internal->hw); +} + +static void +vdpa_ifcvf_stop(struct ifcvf_internal *internal) +{ + struct ifcvf_hw *hw = &internal->hw; + uint32_t i; + int vid; + + vid = internal->vid; + ifcvf_stop_hw(hw); + + for (i = 0; i < hw->nr_vring; i++) + rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx, + hw->vring[i].last_used_idx); +} + +#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \ + sizeof(int) * (IFCVF_MAX_QUEUES * 2 + 1)) +static int +vdpa_enable_vfio_intr(struct ifcvf_internal *internal) +{ + int ret; + uint32_t i, nr_vring; + char irq_set_buf[MSIX_IRQ_SET_BUF_LEN]; + struct vfio_irq_set *irq_set; + int *fd_ptr; + struct rte_vhost_vring vring; + + nr_vring = rte_vhost_get_vring_num(internal->vid); + + irq_set = (struct vfio_irq_set *)irq_set_buf; + irq_set->argsz = sizeof(irq_set_buf); + irq_set->count = nr_vring + 1; + irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | + VFIO_IRQ_SET_ACTION_TRIGGER; + irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; + irq_set->start = 0; + fd_ptr = (int *)&irq_set->data; + fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle.fd; + + for (i = 0; i < nr_vring; i++) { + rte_vhost_get_vhost_vring(internal->vid, i, &vring); + fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd; + } + + ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); + if (ret) { + DRV_LOG(ERR, "Error enabling MSI-X interrupts: %s", + strerror(errno)); + return -1; + } + + return 0; +} + +static int +vdpa_disable_vfio_intr(struct ifcvf_internal *internal) +{ + int ret; + char irq_set_buf[MSIX_IRQ_SET_BUF_LEN]; + struct vfio_irq_set *irq_set; + + irq_set = (struct vfio_irq_set *)irq_set_buf; + irq_set->argsz = sizeof(irq_set_buf); + irq_set->count = 0; + irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER; + irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; + irq_set->start = 0; + + ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); + if (ret) { + DRV_LOG(ERR, "Error disabling MSI-X interrupts: %s", + strerror(errno)); + return -1; + } + + return 0; +} + +static void * +notify_relay(void *arg) +{ + int i, kickfd, epfd, nfds = 0; + uint32_t qid, q_num; + struct epoll_event events[IFCVF_MAX_QUEUES * 2]; + struct epoll_event ev; + uint64_t buf; + int nbytes; + struct rte_vhost_vring vring; + struct ifcvf_internal *internal = (struct ifcvf_internal *)arg; + struct ifcvf_hw *hw = &internal->hw; + + q_num = rte_vhost_get_vring_num(internal->vid); + + epfd = epoll_create(IFCVF_MAX_QUEUES * 2); + if (epfd < 0) { + DRV_LOG(ERR, "failed to create epoll instance."); + return NULL; + } + internal->epfd = epfd; + + for (qid = 0; qid < q_num; qid++) { + ev.events = EPOLLIN | EPOLLPRI; + rte_vhost_get_vhost_vring(internal->vid, qid, &vring); + ev.data.u64 = qid | (uint64_t)vring.kickfd << 32; + if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) { + DRV_LOG(ERR, "epoll add error: %s", strerror(errno)); + return NULL; + } + } + + for (;;) { + nfds = epoll_wait(epfd, events, q_num, -1); + if (nfds < 0) { + if (errno == EINTR) + continue; + DRV_LOG(ERR, "epoll_wait return fail\n"); + return NULL; + } + + for (i = 0; i < nfds; i++) { + qid = events[i].data.u32; + kickfd = (uint32_t)(events[i].data.u64 >> 32); + do { + nbytes = read(kickfd, &buf, 8); + if (nbytes < 0) { + if (errno == EINTR || + errno == EWOULDBLOCK || + errno == EAGAIN) + continue; + DRV_LOG(INFO, "Error reading " + "kickfd: %s", + strerror(errno)); + } + break; + } while (1); + + ifcvf_notify_queue(hw, qid); + } + } + + return NULL; +} + +static int +setup_notify_relay(struct ifcvf_internal *internal) +{ + int ret; + + ret = pthread_create(&internal->tid, NULL, notify_relay, + (void *)internal); + if (ret) { + DRV_LOG(ERR, "failed to create notify relay pthread."); + return -1; + } + return 0; +} + +static int +unset_notify_relay(struct ifcvf_internal *internal) +{ + void *status; + + if (internal->tid) { + pthread_cancel(internal->tid); + pthread_join(internal->tid, &status); + } + internal->tid = 0; + + if (internal->epfd >= 0) + close(internal->epfd); + internal->epfd = -1; + + return 0; +} + +static int +update_datapath(struct ifcvf_internal *internal) +{ + int ret; + + rte_spinlock_lock(&internal->lock); + + if (!rte_atomic32_read(&internal->running) && + (rte_atomic32_read(&internal->started) && + rte_atomic32_read(&internal->dev_attached))) { + ret = ifcvf_dma_map(internal, 1); + if (ret) + goto err; + + ret = vdpa_enable_vfio_intr(internal); + if (ret) + goto err; + + ret = setup_notify_relay(internal); + if (ret) + goto err; + + ret = vdpa_ifcvf_start(internal); + if (ret) + goto err; + + rte_atomic32_set(&internal->running, 1); + } else if (rte_atomic32_read(&internal->running) && + (!rte_atomic32_read(&internal->started) || + !rte_atomic32_read(&internal->dev_attached))) { + vdpa_ifcvf_stop(internal); + + ret = unset_notify_relay(internal); + if (ret) + goto err; + + ret = vdpa_disable_vfio_intr(internal); + if (ret) + goto err; + + ret = ifcvf_dma_map(internal, 0); + if (ret) + goto err; + + rte_atomic32_set(&internal->running, 0); + } + + rte_spinlock_unlock(&internal->lock); + return 0; +err: + rte_spinlock_unlock(&internal->lock); + return ret; +} + +static int +ifcvf_dev_config(int vid) +{ + int did; + struct internal_list *list; + struct ifcvf_internal *internal; + + did = rte_vhost_get_vdpa_device_id(vid); + list = find_internal_resource_by_did(did); + if (list == NULL) { + DRV_LOG(ERR, "Invalid device id: %d", did); + return -1; + } + + internal = list->internal; + internal->vid = vid; + rte_atomic32_set(&internal->dev_attached, 1); + update_datapath(internal); + + return 0; +} + +static int +ifcvf_dev_close(int vid) +{ + int did; + struct internal_list *list; + struct ifcvf_internal *internal; + + did = rte_vhost_get_vdpa_device_id(vid); + list = find_internal_resource_by_did(did); + if (list == NULL) { + DRV_LOG(ERR, "Invalid device id: %d", did); + return -1; + } + + internal = list->internal; + rte_atomic32_set(&internal->dev_attached, 0); + update_datapath(internal); + + return 0; +} + +static int +ifcvf_get_vfio_group_fd(int vid) +{ + int did; + struct internal_list *list; + + did = rte_vhost_get_vdpa_device_id(vid); + list = find_internal_resource_by_did(did); + if (list == NULL) { + DRV_LOG(ERR, "Invalid device id: %d", did); + return -1; + } + + return list->internal->vfio_group_fd; +} + +static int +ifcvf_get_vfio_device_fd(int vid) +{ + int did; + struct internal_list *list; + + did = rte_vhost_get_vdpa_device_id(vid); + list = find_internal_resource_by_did(did); + if (list == NULL) { + DRV_LOG(ERR, "Invalid device id: %d", did); + return -1; + } + + return list->internal->vfio_dev_fd; +} + +static int +ifcvf_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size) +{ + int did; + struct internal_list *list; + struct ifcvf_internal *internal; + struct vfio_region_info reg = { .argsz = sizeof(reg) }; + int ret; + + did = rte_vhost_get_vdpa_device_id(vid); + list = find_internal_resource_by_did(did); + if (list == NULL) { + DRV_LOG(ERR, "Invalid device id: %d", did); + return -1; + } + + internal = list->internal; + + reg.index = ifcvf_get_notify_region(&internal->hw); + ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ®); + if (ret) { + DRV_LOG(ERR, "Get not get device region info: %s", + strerror(errno)); + return -1; + } + + *offset = ifcvf_get_queue_notify_off(&internal->hw, qid) + reg.offset; + *size = 0x1000; + + return 0; +} + +static int +ifcvf_get_queue_num(int did, uint32_t *queue_num) +{ + struct internal_list *list; + + list = find_internal_resource_by_did(did); + if (list == NULL) { + DRV_LOG(ERR, "Invalid device id: %d", did); + return -1; + } + + *queue_num = list->internal->max_queues; + + return 0; +} + +static int +ifcvf_get_vdpa_features(int did, uint64_t *features) +{ + struct internal_list *list; + + list = find_internal_resource_by_did(did); + if (list == NULL) { + DRV_LOG(ERR, "Invalid device id: %d", did); + return -1; + } + + *features = list->internal->features; + + return 0; +} + +#define VDPA_SUPPORTED_PROTOCOL_FEATURES \ + (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \ + 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | \ + 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | \ + 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | \ + 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) +static int +ifcvf_get_protocol_features(int did __rte_unused, uint64_t *features) +{ + *features = VDPA_SUPPORTED_PROTOCOL_FEATURES; + return 0; +} + +struct rte_vdpa_dev_ops ifcvf_ops = { + .get_queue_num = ifcvf_get_queue_num, + .get_features = ifcvf_get_vdpa_features, + .get_protocol_features = ifcvf_get_protocol_features, + .dev_conf = ifcvf_dev_config, + .dev_close = ifcvf_dev_close, + .set_vring_state = NULL, + .set_features = NULL, + .migration_done = NULL, + .get_vfio_group_fd = ifcvf_get_vfio_group_fd, + .get_vfio_device_fd = ifcvf_get_vfio_device_fd, + .get_notify_area = ifcvf_get_notify_area, +}; + +static int +ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + uint64_t features; + struct ifcvf_internal *internal = NULL; + struct internal_list *list = NULL; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + list = rte_zmalloc("ifcvf", sizeof(*list), 0); + if (list == NULL) + goto error; + + internal = rte_zmalloc("ifcvf", sizeof(*internal), 0); + if (internal == NULL) + goto error; + + internal->pdev = pci_dev; + rte_spinlock_init(&internal->lock); + if (ifcvf_vfio_setup(internal) < 0) + return -1; + + internal->max_queues = IFCVF_MAX_QUEUES; + features = ifcvf_get_features(&internal->hw); + internal->features = (features & + ~(1ULL << VIRTIO_F_IOMMU_PLATFORM)) | + (1ULL << VHOST_USER_F_PROTOCOL_FEATURES); + + internal->dev_addr.pci_addr = pci_dev->addr; + internal->dev_addr.type = PCI_ADDR; + list->internal = internal; + + pthread_mutex_lock(&internal_list_lock); + TAILQ_INSERT_TAIL(&internal_list, list, next); + pthread_mutex_unlock(&internal_list_lock); + + internal->did = rte_vdpa_register_device(&internal->dev_addr, + &ifcvf_ops); + if (internal->did < 0) + goto error; + + rte_atomic32_set(&internal->started, 1); + update_datapath(internal); + + return 0; + +error: + rte_free(list); + rte_free(internal); + return -1; +} + +static int +ifcvf_pci_remove(struct rte_pci_device *pci_dev) +{ + struct ifcvf_internal *internal; + struct internal_list *list; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + list = find_internal_resource_by_dev(pci_dev); + if (list == NULL) { + DRV_LOG(ERR, "Invalid device: %s", pci_dev->name); + return -1; + } + + internal = list->internal; + rte_atomic32_set(&internal->started, 0); + update_datapath(internal); + + rte_pci_unmap_device(internal->pdev); + rte_vfio_container_destroy(internal->vfio_container_fd); + rte_vdpa_unregister_device(internal->did); + + pthread_mutex_lock(&internal_list_lock); + TAILQ_REMOVE(&internal_list, list, next); + pthread_mutex_unlock(&internal_list_lock); + + rte_free(list); + rte_free(internal); + + return 0; +} + +/* + * IFCVF has the same vendor ID and device ID as virtio net PCI + * device, with its specific subsystem vendor ID and device ID. + */ +static const struct rte_pci_id pci_id_ifcvf_map[] = { + { .class_id = RTE_CLASS_ANY_ID, + .vendor_id = IFCVF_VENDOR_ID, + .device_id = IFCVF_DEVICE_ID, + .subsystem_vendor_id = IFCVF_SUBSYS_VENDOR_ID, + .subsystem_device_id = IFCVF_SUBSYS_DEVICE_ID, + }, + + { .vendor_id = 0, /* sentinel */ + }, +}; + +static struct rte_pci_driver rte_ifcvf_vdpa = { + .id_table = pci_id_ifcvf_map, + .drv_flags = 0, + .probe = ifcvf_pci_probe, + .remove = ifcvf_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_ifcvf, rte_ifcvf_vdpa); +RTE_PMD_REGISTER_PCI_TABLE(net_ifcvf, pci_id_ifcvf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ifcvf, "* vfio-pci"); + +RTE_INIT(ifcvf_vdpa_init_log) +{ + ifcvf_vdpa_logtype = rte_log_register("pmd.net.ifcvf_vdpa"); + if (ifcvf_vdpa_logtype >= 0) + rte_log_set_level(ifcvf_vdpa_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/ifc/meson.build b/drivers/net/ifc/meson.build new file mode 100644 index 00000000..72df070a --- /dev/null +++ b/drivers/net/ifc/meson.build @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +build = dpdk_conf.has('RTE_LIBRTE_VHOST') +allow_experimental_apis = true +sources = files('ifcvf_vdpa.c', 'base/ifcvf.c') +includes += include_directories('base') +deps += 'vhost' diff --git a/drivers/net/ifc/rte_pmd_ifc_version.map b/drivers/net/ifc/rte_pmd_ifc_version.map new file mode 100644 index 00000000..9b9ab1a4 --- /dev/null +++ b/drivers/net/ifc/rte_pmd_ifc_version.map @@ -0,0 +1,4 @@ +DPDK_18.05 { + + local: *; +}; diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile index d0804fc5..7b6af353 100644 --- a/drivers/net/ixgbe/Makefile +++ b/drivers/net/ixgbe/Makefile @@ -20,9 +20,10 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # -CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259 +CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869 +CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259 -CFLAGS_ixgbe_rxtx.o += -wd3656 +CFLAGS_ixgbe_rxtx.o += -diag-disable 3656 else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) # @@ -103,6 +104,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ipsec.c endif SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf_representor.c # install this header file SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 44832585..26b19273 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -60,9 +59,6 @@ */ #define IXGBE_FC_LO 0x40 -/* Default minimum inter-interrupt interval for EITR configuration */ -#define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT 0x79E - /* Timer value included in XOFF frames. */ #define IXGBE_FC_PAUSE 0x680 @@ -101,8 +97,6 @@ #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) -#define IXGBE_HKEY_MAX_INDEX 10 - /* Additional timesync values. */ #define NSEC_PER_SEC 1000000000L #define IXGBE_INCVAL_10GB 0x66666666 @@ -118,7 +112,6 @@ #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 #define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 -#define DEFAULT_ETAG_ETYPE 0x893f #define IXGBE_ETAG_ETYPE 0x00005084 #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff #define IXGBE_ETAG_ETYPE_VALID 0x80000000 @@ -133,7 +126,7 @@ #define IXGBE_EXVET_VET_EXT_SHIFT 16 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 -static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev); +static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params); static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); @@ -196,6 +189,9 @@ static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on); static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); +static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, + int mask); +static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask); static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); @@ -228,7 +224,7 @@ static void ixgbe_dev_interrupt_delayed_handler(void *param); static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool); static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); -static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, +static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); static bool is_device_supported(struct rte_eth_dev *dev, @@ -244,8 +240,8 @@ static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, static void ixgbevf_dev_stop(struct rte_eth_dev *dev); static void ixgbevf_dev_close(struct rte_eth_dev *dev); static int ixgbevf_dev_reset(struct rte_eth_dev *dev); -static void ixgbevf_intr_disable(struct ixgbe_hw *hw); -static void ixgbevf_intr_enable(struct ixgbe_hw *hw); +static void ixgbevf_intr_disable(struct rte_eth_dev *dev); +static void ixgbevf_intr_enable(struct rte_eth_dev *dev); static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); @@ -253,6 +249,7 @@ static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); +static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask); static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, @@ -286,7 +283,7 @@ static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool); static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); -static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, +static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); static int ixgbe_syn_filter_get(struct rte_eth_dev *dev, struct rte_eth_syn_filter *filter); @@ -328,6 +325,11 @@ static int ixgbe_get_eeprom(struct rte_eth_dev *dev, static int ixgbe_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom); +static int ixgbe_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo); +static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info); + static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); static int ixgbevf_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs); @@ -565,6 +567,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .get_eeprom_length = ixgbe_get_eeprom_length, .get_eeprom = ixgbe_get_eeprom, .set_eeprom = ixgbe_set_eeprom, + .get_module_info = ixgbe_get_module_info, + .get_module_eeprom = ixgbe_get_module_eeprom, .get_dcb_info = ixgbe_dev_get_dcb_info, .timesync_adjust_time = ixgbe_timesync_adjust_time, .timesync_read_time = ixgbe_timesync_read_time, @@ -787,58 +791,6 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ sizeof(rte_ixgbevf_stats_strings[0])) -/** - * Atomically reads the link status information from global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - /* * This function is the same as ixgbe_is_sfp() in base/ixgbe.h. */ @@ -1096,7 +1048,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) * It returns 0 on success. */ static int -eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) +eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; @@ -1339,6 +1291,8 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw; + int retries = 0; + int ret; PMD_INIT_FUNC_TRACE(); @@ -1359,8 +1313,20 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) /* disable uio intr before callback unregister */ rte_intr_disable(intr_handle); - rte_intr_callback_unregister(intr_handle, - ixgbe_dev_interrupt_handler, eth_dev); + + do { + ret = rte_intr_callback_unregister(intr_handle, + ixgbe_dev_interrupt_handler, eth_dev); + if (ret >= 0) { + break; + } else if (ret != -EAGAIN) { + PMD_INIT_LOG(ERR, + "intr callback unregister failed: %d", + ret); + return ret; + } + rte_delay_ms(100); + } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); /* uninitialize PF if max_vfs not zero */ ixgbe_pf_host_uninit(eth_dev); @@ -1522,7 +1488,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) } l2_tn_info->e_tag_en = FALSE; l2_tn_info->e_tag_fwd_en = FALSE; - l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE; + l2_tn_info->e_tag_ether_type = ETHER_TYPE_ETAG; return 0; } @@ -1641,7 +1607,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) ixgbevf_dev_stats_reset(eth_dev); /* Disable the interrupts for VF */ - ixgbevf_intr_disable(hw); + ixgbevf_intr_disable(eth_dev); hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ diag = hw->mac.ops.reset_hw(hw); @@ -1710,7 +1676,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) rte_intr_callback_register(intr_handle, ixgbevf_dev_interrupt_handler, eth_dev); rte_intr_enable(intr_handle); - ixgbevf_intr_enable(hw); + ixgbevf_intr_enable(eth_dev); PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", eth_dev->data->port_id, pci_dev->id.vendor_id, @@ -1743,7 +1709,7 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) eth_dev->tx_pkt_burst = NULL; /* Disable the interrupts for VF */ - ixgbevf_intr_disable(hw); + ixgbevf_intr_disable(eth_dev); rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; @@ -1755,16 +1721,81 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) return 0; } -static int eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, - struct rte_pci_device *pci_dev) -{ - return rte_eth_dev_pci_generic_probe(pci_dev, - sizeof(struct ixgbe_adapter), eth_ixgbe_dev_init); +static int +eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_dev *pf_ethdev; + struct rte_eth_devargs eth_da; + int i, retval; + + if (pci_dev->device.devargs) { + retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, + ð_da); + if (retval) + return retval; + } else + memset(ð_da, 0, sizeof(eth_da)); + + retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, + sizeof(struct ixgbe_adapter), + eth_dev_pci_specific_init, pci_dev, + eth_ixgbe_dev_init, NULL); + + if (retval || eth_da.nb_representor_ports < 1) + return retval; + + pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name); + if (pf_ethdev == NULL) + return -ENODEV; + + /* probe VF representor ports */ + for (i = 0; i < eth_da.nb_representor_ports; i++) { + struct ixgbe_vf_info *vfinfo; + struct ixgbe_vf_representor representor; + + vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA( + pf_ethdev->data->dev_private); + if (vfinfo == NULL) { + PMD_DRV_LOG(ERR, + "no virtual functions supported by PF"); + break; + } + + representor.vf_id = eth_da.representor_ports[i]; + representor.switch_domain_id = vfinfo->switch_domain_id; + representor.pf_ethdev = pf_ethdev; + + /* representor port net_bdf_port */ + snprintf(name, sizeof(name), "net_%s_representor_%d", + pci_dev->device.name, + eth_da.representor_ports[i]); + + retval = rte_eth_dev_create(&pci_dev->device, name, + sizeof(struct ixgbe_vf_representor), NULL, NULL, + ixgbe_vf_representor_init, &representor); + + if (retval) + PMD_DRV_LOG(ERR, "failed to create ixgbe vf " + "representor %s.", name); + } + + return 0; } static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbe_dev_uninit); + struct rte_eth_dev *ethdev; + + ethdev = rte_eth_dev_allocated(pci_dev->device.name); + if (!ethdev) + return -ENODEV; + + if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) + return rte_eth_dev_destroy(ethdev, ixgbe_vf_representor_uninit); + else + return rte_eth_dev_destroy(ethdev, eth_ixgbe_dev_uninit); } static struct rte_pci_driver rte_ixgbe_pmd = { @@ -1947,10 +1978,13 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) rxq = dev->data->rx_queues[queue]; - if (on) + if (on) { rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; - else + rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } else { rxq->vlan_flags = PKT_RX_VLAN; + rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + } } static void @@ -2001,64 +2035,6 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); } -void -ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev) -{ - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t ctrl; - uint16_t i; - struct ixgbe_rx_queue *rxq; - - PMD_INIT_FUNC_TRACE(); - - if (hw->mac.type == ixgbe_mac_82598EB) { - ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - ctrl &= ~IXGBE_VLNCTRL_VME; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); - } else { - /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ - for (i = 0; i < dev->data->nb_rx_queues; i++) { - rxq = dev->data->rx_queues[i]; - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); - ctrl &= ~IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); - - /* record those setting for HW strip per queue */ - ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0); - } - } -} - -void -ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev) -{ - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t ctrl; - uint16_t i; - struct ixgbe_rx_queue *rxq; - - PMD_INIT_FUNC_TRACE(); - - if (hw->mac.type == ixgbe_mac_82598EB) { - ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - ctrl |= IXGBE_VLNCTRL_VME; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); - } else { - /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ - for (i = 0; i < dev->data->nb_rx_queues; i++) { - rxq = dev->data->rx_queues[i]; - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); - ctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); - - /* record those setting for HW strip per queue */ - ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1); - } - } -} - static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) { @@ -2114,25 +2090,93 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) */ } -static int -ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) +void +ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + uint32_t ctrl; + uint16_t i; + struct ixgbe_rx_queue *rxq; + bool on; + + PMD_INIT_FUNC_TRACE(); + + if (hw->mac.type == ixgbe_mac_82598EB) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + ctrl |= IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); + } else { + ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + ctrl &= ~IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); + } + } else { + /* + * Other 10G NIC, the VLAN strip can be setup + * per queue in RXDCTL + */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + ctrl |= IXGBE_RXDCTL_VME; + on = TRUE; + } else { + ctrl &= ~IXGBE_RXDCTL_VME; + on = FALSE; + } + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); + + /* record those setting for HW strip per queue */ + ixgbe_vlan_hw_strip_bitmap_set(dev, i, on); + } + } +} + +static void +ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) { + uint16_t i; + struct rte_eth_rxmode *rxmode; + struct ixgbe_rx_queue *rxq; + if (mask & ETH_VLAN_STRIP_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_strip) - ixgbe_vlan_hw_strip_enable_all(dev); + rxmode = &dev->data->dev_conf.rxmode; + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } else - ixgbe_vlan_hw_strip_disable_all(dev); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + } + } +} + +static int +ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) +{ + struct rte_eth_rxmode *rxmode; + rxmode = &dev->data->dev_conf.rxmode; + + if (mask & ETH_VLAN_STRIP_MASK) { + ixgbe_vlan_hw_strip_config(dev); } if (mask & ETH_VLAN_FILTER_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ixgbe_vlan_hw_filter_enable(dev); else ixgbe_vlan_hw_filter_disable(dev); } if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) ixgbe_vlan_hw_extend_enable(dev); else ixgbe_vlan_hw_extend_disable(dev); @@ -2141,6 +2185,16 @@ ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) return 0; } +static int +ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + ixgbe_config_vlan_strip_on_all_queues(dev, mask); + + ixgbe_vlan_offload_config(dev, mask); + + return 0; +} + static void ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) { @@ -2289,11 +2343,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { const struct rte_eth_dcb_rx_conf *conf; - if (nb_rx_q != IXGBE_DCB_NB_QUEUES) { - PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.", - IXGBE_DCB_NB_QUEUES); - return -EINVAL; - } conf = &dev_conf->rx_adv_conf.dcb_rx_conf; if (!(conf->nb_tcs == ETH_4_TCS || conf->nb_tcs == ETH_8_TCS)) { @@ -2307,11 +2356,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { const struct rte_eth_dcb_tx_conf *conf; - if (nb_tx_q != IXGBE_DCB_NB_QUEUES) { - PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.", - IXGBE_DCB_NB_QUEUES); - return -EINVAL; - } conf = &dev_conf->tx_adv_conf.dcb_tx_conf; if (!(conf->nb_tcs == ETH_4_TCS || conf->nb_tcs == ETH_8_TCS)) { @@ -2480,6 +2524,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) uint32_t intr_vector = 0; int err, link_up = 0, negotiate = 0; uint32_t speed = 0; + uint32_t allowed_speeds = 0; int mask = 0; int status; uint16_t vf, idx; @@ -2561,7 +2606,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; - err = ixgbe_vlan_offload_set(dev, mask); + err = ixgbe_vlan_offload_config(dev, mask); if (err) { PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); goto error; @@ -2628,9 +2673,21 @@ ixgbe_dev_start(struct rte_eth_dev *dev) if (err) goto error; + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G | + ETH_LINK_SPEED_10G; + break; + default: + allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_10G; + } + link_speeds = &dev->data->dev_conf.link_speeds; - if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | - ETH_LINK_SPEED_10G)) { + if (*link_speeds & ~allowed_speeds) { PMD_INIT_LOG(ERR, "Invalid link setting"); goto error; } @@ -2656,6 +2713,10 @@ ixgbe_dev_start(struct rte_eth_dev *dev) } else { if (*link_speeds & ETH_LINK_SPEED_10G) speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_5G) + speed |= IXGBE_LINK_SPEED_5GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_2_5G) + speed |= IXGBE_LINK_SPEED_2_5GB_FULL; if (*link_speeds & ETH_LINK_SPEED_1G) speed |= IXGBE_LINK_SPEED_1GB_FULL; if (*link_speeds & ETH_LINK_SPEED_100M) @@ -2666,6 +2727,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev) if (err) goto error; + ixgbe_dev_link_update(dev, 0); + skip_link_setup: if (rte_intr_allow_others(intr_handle)) { @@ -2757,7 +2820,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) /* Clear recorded link status */ memset(&link, 0, sizeof(link)); - rte_ixgbe_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); if (!rte_intr_allow_others(intr_handle)) /* resume to the default handler */ @@ -2881,7 +2944,7 @@ ixgbe_dev_reset(struct rte_eth_dev *dev) if (ret) return ret; - ret = eth_ixgbe_dev_init(dev); + ret = eth_ixgbe_dev_init(dev, NULL); return ret; } @@ -3057,9 +3120,18 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, } /* Flow Director Stats registers */ - hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); - hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); - + if (hw->mac.type != ixgbe_mac_82598EB) { + hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); + hw_stats->fdirustat_add += IXGBE_READ_REG(hw, + IXGBE_FDIRUSTAT) & 0xFFFF; + hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw, + IXGBE_FDIRUSTAT) >> 16) & 0xFFFF; + hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw, + IXGBE_FDIRFSTAT) & 0xFFFF; + hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw, + IXGBE_FDIRFSTAT) >> 16) & 0xFFFF; + } /* MACsec Stats registers */ macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT); macsec_stats->out_pkts_encrypted += @@ -3625,7 +3697,6 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_conf *dev_conf = &dev->data->dev_conf; - dev_info->pci_dev = pci_dev; dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; if (RTE_ETH_DEV_SRIOV(dev).active == 0) { @@ -3647,54 +3718,11 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) else dev_info->max_vmdq_pools = ETH_64_POOLS; dev_info->vmdq_queue_num = dev_info->max_rx_queues; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_CRC_STRIP; - - /* - * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV - * mode. - */ - if ((hw->mac.type == ixgbe_mac_82599EB || - hw->mac.type == ixgbe_mac_X540) && - !RTE_ETH_DEV_SRIOV(dev).active) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO; - - if (hw->mac.type == ixgbe_mac_82599EB || - hw->mac.type == ixgbe_mac_X540) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP; - - if (hw->mac.type == ixgbe_mac_X550 || - hw->mac.type == ixgbe_mac_X550EM_x || - hw->mac.type == ixgbe_mac_X550EM_a) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; - - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; - - if (hw->mac.type == ixgbe_mac_82599EB || - hw->mac.type == ixgbe_mac_X540) - dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT; - - if (hw->mac.type == ixgbe_mac_X550 || - hw->mac.type == ixgbe_mac_X550EM_x || - hw->mac.type == ixgbe_mac_X550EM_a) - dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; - -#ifdef RTE_LIBRTE_SECURITY - if (dev->security_ctx) { - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY; - dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; - } -#endif + dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); + dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | + dev_info->rx_queue_offload_capa); + dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); + dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -3704,6 +3732,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -3714,8 +3743,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + .offloads = 0, }; dev_info->rx_desc_lim = rx_desc_lim; @@ -3736,6 +3764,14 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa |= ETH_LINK_SPEED_2_5G; dev_info->speed_capa |= ETH_LINK_SPEED_5G; } + + /* Driver-preferred Rx/Tx parameters */ + dev_info->default_rxportconf.burst_size = 32; + dev_info->default_txportconf.burst_size = 32; + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_rxportconf.ring_size = 256; + dev_info->default_txportconf.ring_size = 256; } static const uint32_t * @@ -3784,7 +3820,6 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = pci_dev; dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ @@ -3796,17 +3831,11 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, dev_info->max_vmdq_pools = ETH_16_POOLS; else dev_info->max_vmdq_pools = ETH_64_POOLS; - dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_CRC_STRIP; - dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); + dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | + dev_info->rx_queue_offload_capa); + dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); + dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -3816,6 +3845,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, }, .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -3826,8 +3856,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, }, .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + .offloads = 0, }; dev_info->rx_desc_lim = rx_desc_lim; @@ -3863,7 +3892,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs * before the link status is correct */ - if (mac->type == ixgbe_mac_82599_vf) { + if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) { int i; for (i = 0; i < 5; i++) { @@ -3941,12 +3970,12 @@ out: } /* return 0 means link status changed, -1 means not changed */ -static int +int ixgbe_dev_link_update_share(struct rte_eth_dev *dev, int wait_to_complete, int vf) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_eth_link link, old; + struct rte_eth_link link; ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); @@ -3956,12 +3985,11 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, int wait = 1; bool autoneg = false; + memset(&link, 0, sizeof(link)); link.link_status = ETH_LINK_DOWN; - link.link_speed = 0; + link.link_speed = ETH_SPEED_NUM_NONE; link.link_duplex = ETH_LINK_HALF_DUPLEX; link.link_autoneg = ETH_LINK_AUTONEG; - memset(&old, 0, sizeof(old)); - rte_ixgbe_dev_atomic_read_link_status(dev, &old); hw->mac.get_link_status = true; @@ -3985,19 +4013,14 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, if (diag != 0) { link.link_speed = ETH_SPEED_NUM_100M; link.link_duplex = ETH_LINK_FULL_DUPLEX; - rte_ixgbe_dev_atomic_write_link_status(dev, &link); - if (link.link_status == old.link_status) - return -1; - return 0; + return rte_eth_linkstatus_set(dev, &link); } if (link_up == 0) { - rte_ixgbe_dev_atomic_write_link_status(dev, &link); intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; - if (link.link_status == old.link_status) - return -1; - return 0; + return rte_eth_linkstatus_set(dev, &link); } + intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; link.link_status = ETH_LINK_UP; link.link_duplex = ETH_LINK_FULL_DUPLEX; @@ -4029,12 +4052,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, link.link_speed = ETH_SPEED_NUM_10G; break; } - rte_ixgbe_dev_atomic_write_link_status(dev, &link); - - if (link.link_status == old.link_status) - return -1; - return 0; + return rte_eth_linkstatus_set(dev, &link); } static int @@ -4233,8 +4252,8 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_eth_link link; - memset(&link, 0, sizeof(link)); - rte_ixgbe_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); + if (link.link_status) { PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", (int)(dev->data->port_id), @@ -4269,7 +4288,6 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); int64_t timeout; - struct rte_eth_link link; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -4286,9 +4304,10 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, } if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { + struct rte_eth_link link; + /* get the link status before link update, for predicting later */ - memset(&link, 0, sizeof(link)); - rte_ixgbe_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); ixgbe_dev_link_update(dev, 0); @@ -4853,14 +4872,15 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) ixgbe_clear_rar(hw, index); } -static void +static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); ixgbe_remove_rar(dev, 0); - ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs); + + return 0; } static bool @@ -4909,10 +4929,12 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) /* switch to jumbo mode if needed */ if (frame_size > ETHER_MAX_LEN) { - dev->data->dev_conf.rxmode.jumbo_frame = 1; + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; hlreg0 |= IXGBE_HLREG0_JUMBOEN; } else { - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; } IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); @@ -4932,19 +4954,32 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) * Virtual Function operations */ static void -ixgbevf_intr_disable(struct ixgbe_hw *hw) +ixgbevf_intr_disable(struct rte_eth_dev *dev) { + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + PMD_INIT_FUNC_TRACE(); /* Clear interrupt mask to stop from interrupts being generated */ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); IXGBE_WRITE_FLUSH(hw); + + /* Clear mask value. */ + intr->mask = 0; } static void -ixgbevf_intr_enable(struct ixgbe_hw *hw) +ixgbevf_intr_enable(struct rte_eth_dev *dev) { + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + PMD_INIT_FUNC_TRACE(); /* VF enable interrupt autoclean */ @@ -4953,6 +4988,9 @@ ixgbevf_intr_enable(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); IXGBE_WRITE_FLUSH(hw); + + /* Save IXGBE_VTEIMS value to mask. */ + intr->mask = IXGBE_VF_IRQ_ENABLE_MASK; } static int @@ -4970,14 +5008,14 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev) * Keep the persistent behavior the same as Host PF */ #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC - if (!conf->rxmode.hw_strip_crc) { + if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) { PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); - conf->rxmode.hw_strip_crc = 1; + conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP; } #else - if (conf->rxmode.hw_strip_crc) { + if (!rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) { PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); - conf->rxmode.hw_strip_crc = 0; + conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP; } #endif @@ -5030,7 +5068,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) /* Set HW strip */ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; - err = ixgbevf_vlan_offload_set(dev, mask); + err = ixgbevf_vlan_offload_config(dev, mask); if (err) { PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); ixgbe_dev_clear_queues(dev); @@ -5039,6 +5077,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) ixgbevf_dev_rxtx_start(dev); + ixgbevf_dev_link_update(dev, 0); + /* check and configure queue intr-vector mapping */ if (rte_intr_cap_multiple(intr_handle) && dev->data->dev_conf.intr_conf.rxq) { @@ -5074,7 +5114,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) rte_intr_enable(intr_handle); /* Re-enable interrupt for VF */ - ixgbevf_intr_enable(hw); + ixgbevf_intr_enable(dev); return 0; } @@ -5088,7 +5128,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); - ixgbevf_intr_disable(hw); + ixgbevf_intr_disable(dev); hw->adapter_stopped = 1; ixgbe_stop_adapter(hw); @@ -5226,24 +5266,34 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) } static int -ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) +ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask) { - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_rx_queue *rxq; uint16_t i; int on = 0; /* VF function only support hw strip feature, others are not support */ if (mask & ETH_VLAN_STRIP_MASK) { - on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip); - - for (i = 0; i < hw->mac.max_rx_queues; i++) + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); ixgbevf_vlan_strip_queue_set(dev, i, on); + } } return 0; } +static int +ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + ixgbe_config_vlan_strip_on_all_queues(dev, mask); + + ixgbevf_vlan_offload_config(dev, mask); + + return 0; +} + int ixgbe_vt_check(struct ixgbe_hw *hw) { @@ -5580,17 +5630,17 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - uint32_t mask; + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t vec = IXGBE_MISC_VEC_ID; - mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); if (rte_intr_allow_others(intr_handle)) vec = IXGBE_RX_VEC_START; - mask |= (1 << vec); + intr->mask |= (1 << vec); RTE_SET_USED(queue_id); - IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); rte_intr_enable(intr_handle); @@ -5600,19 +5650,19 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { - uint32_t mask; + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t vec = IXGBE_MISC_VEC_ID; - mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); if (rte_intr_allow_others(intr_handle)) vec = IXGBE_RX_VEC_START; - mask &= ~(1 << vec); + intr->mask &= ~(1 << vec); RTE_SET_USED(queue_id); - IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); return 0; } @@ -5778,6 +5828,13 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) if (vector_idx < base + intr_handle->nb_efd - 1) vector_idx++; } + + /* As RX queue setting above show, all queues use the vector 0. + * Set only the ITR value of IXGBE_MISC_VEC_ID. + */ + IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID), + IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) + | IXGBE_EITR_CNT_WDIS); } /** @@ -5799,8 +5856,12 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) /* won't configure msix register if no mapping is done * between intr vector and event fd + * but if misx has been enabled already, need to configure + * auto clean, auto mask and throttling. */ - if (!rte_intr_dp_is_en(intr_handle)) + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + if (!rte_intr_dp_is_en(intr_handle) && + !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT))) return; if (rte_intr_allow_others(intr_handle)) @@ -5824,30 +5885,34 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) /* Populate the IVAR table and set the ITR values to the * corresponding register. */ - for (queue_id = 0; queue_id < dev->data->nb_rx_queues; - queue_id++) { - /* by default, 1:1 mapping */ - ixgbe_set_ivar_map(hw, 0, queue_id, vec); - intr_handle->intr_vec[queue_id] = vec; - if (vec < base + intr_handle->nb_efd - 1) - vec++; - } + if (rte_intr_dp_is_en(intr_handle)) { + for (queue_id = 0; queue_id < dev->data->nb_rx_queues; + queue_id++) { + /* by default, 1:1 mapping */ + ixgbe_set_ivar_map(hw, 0, queue_id, vec); + intr_handle->intr_vec[queue_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, - IXGBE_MISC_VEC_ID); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); - break; - default: - break; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_set_ivar_map(hw, -1, + IXGBE_IVAR_OTHER_CAUSES_INDEX, + IXGBE_MISC_VEC_ID); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); + break; + default: + break; + } } IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), - IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF); + IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) + | IXGBE_EITR_CNT_WDIS); /* set up to autoclear timer, and the vectors */ mask = IXGBE_EIMS_ENABLE_MASK; @@ -5863,6 +5928,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t tx_rate) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_rxmode *rxmode; uint32_t rf_dec, rf_int; uint32_t bcnrc_val; uint16_t link_speed = dev->data->dev_link.link_speed; @@ -5884,14 +5950,14 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, bcnrc_val = 0; } + rxmode = &dev->data->dev_conf.rxmode; /* * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise * set as 0x4. */ - if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) && - (dev->data->dev_conf.rxmode.max_rx_pkt_len >= - IXGBE_MAX_JUMBO_FRAME_SIZE)) + if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) && + (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE)) IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_JUMBO_FRAME); else @@ -5983,12 +6049,14 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) } } -static void +static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); + + return 0; } int @@ -6238,7 +6306,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) /* refuse mtu that requires the support of scattered packets when this * feature has not been enabled before. */ - if (!rx_conf->enable_scatter && + if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) && (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) return -EINVAL; @@ -6812,9 +6880,8 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev) uint32_t shift = 0; /* Get current link speed. */ - memset(&link, 0, sizeof(link)); ixgbe_dev_link_update(dev, 1); - rte_ixgbe_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); switch (link.link_speed) { case ETH_SPEED_NUM_100M: @@ -7166,6 +7233,78 @@ ixgbe_set_eeprom(struct rte_eth_dev *dev, return eeprom->ops.write_buffer(hw, first, length, data); } +static int +ixgbe_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t status; + uint8_t sff8472_rev, addr_mode; + bool page_swap = false; + + /* Check whether we support SFF-8472 or not */ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_SFF_8472_COMP, + &sff8472_rev); + if (status != 0) + return -EIO; + + /* addressing mode is not supported */ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_SFF_8472_SWAP, + &addr_mode); + if (status != 0) + return -EIO; + + if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { + PMD_DRV_LOG(ERR, + "Address change required to access page 0xA2, " + "but not supported. Please report the module " + "type to the driver maintainers."); + page_swap = true; + } + + if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { + /* We have a SFP, but it does not support SFF-8472 */ + modinfo->type = RTE_ETH_MODULE_SFF_8079; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + modinfo->type = RTE_ETH_MODULE_SFF_8472; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int +ixgbe_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID; + uint8_t databyte = 0xFF; + uint8_t *data = info->data; + uint32_t i = 0; + + if (info->length == 0) + return -EINVAL; + + for (i = info->offset; i < info->offset + info->length; i++) { + if (i < RTE_ETH_MODULE_SFF_8079_LEN) + status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); + else + status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); + + if (status != 0) + return -EIO; + + data[i - info->offset] = databyte; + } + + return 0; +} + uint16_t ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { switch (mac_type) { @@ -8169,7 +8308,7 @@ ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - ixgbevf_intr_disable(hw); + ixgbevf_intr_disable(dev); /* read-on-clear nic registers here */ eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); @@ -8186,7 +8325,6 @@ ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) static int ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); @@ -8195,7 +8333,7 @@ ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) intr->flags &= ~IXGBE_FLAG_MAILBOX; } - ixgbevf_intr_enable(hw); + ixgbevf_intr_enable(dev); return 0; } @@ -8334,7 +8472,7 @@ ixgbe_rss_filter_restore(struct rte_eth_dev *dev) struct ixgbe_filter_info *filter_info = IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - if (filter_info->rss_info.num) + if (filter_info->rss_info.conf.queue_num) ixgbe_config_rss_filter(dev, &filter_info->rss_info, TRUE); } @@ -8446,9 +8584,7 @@ RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); -RTE_INIT(ixgbe_init_log); -static void -ixgbe_init_log(void) +RTE_INIT(ixgbe_init_log) { ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init"); if (ixgbe_logtype_init >= 0) diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h index c56d6524..d0b93968 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.h +++ b/drivers/net/ixgbe/ixgbe_ethdev.h @@ -4,6 +4,9 @@ #ifndef _IXGBE_ETHDEV_H_ #define _IXGBE_ETHDEV_H_ + +#include + #include "base/ixgbe_type.h" #include "base/ixgbe_dcb.h" #include "base/ixgbe_dcb_82599.h" @@ -12,6 +15,7 @@ #ifdef RTE_LIBRTE_SECURITY #include "ixgbe_ipsec.h" #endif +#include #include #include #include @@ -39,6 +43,7 @@ #define IXGBE_EXTENDED_VLAN (uint32_t)(1 << 26) /* EXTENDED VLAN ENABLE */ #define IXGBE_VFTA_SIZE 128 #define IXGBE_VLAN_TAG_SIZE 4 +#define IXGBE_HKEY_MAX_INDEX 10 #define IXGBE_MAX_RX_QUEUE_NUM 128 #define IXGBE_MAX_INTR_QUEUE_NUM 15 #define IXGBE_VMDQ_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM @@ -57,6 +62,7 @@ (((us) * 1000 / IXGBE_EITR_INTERVAL_UNIT_NS << IXGBE_EITR_ITR_INT_SHIFT) & \ IXGBE_EITR_ITR_INT_MASK) +#define IXGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */ /* Loopback operation modes */ /* 82599 specific loopback operation types */ @@ -94,6 +100,11 @@ #define IXGBE_5TUPLE_MAX_PRI 7 #define IXGBE_5TUPLE_MIN_PRI 1 +/* bit of VXLAN tunnel type | 7 bits of zeros | 8 bits of zeros*/ +#define IXGBE_FDIR_VXLAN_TUNNEL_TYPE 0x8000 +/* bit of NVGRE tunnel type | 7 bits of zeros | 8 bits of zeros*/ +#define IXGBE_FDIR_NVGRE_TUNNEL_TYPE 0x0 + #define IXGBE_RSS_OFFLOAD_ALL ( \ ETH_RSS_IPV4 | \ ETH_RSS_NONFRAG_IPV4_TCP | \ @@ -196,8 +207,8 @@ struct ixgbe_hw_fdir_info { }; struct ixgbe_rte_flow_rss_conf { - struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */ - uint16_t num; /**< Number of entries in queue[]. */ + struct rte_flow_action_rss conf; /**< RSS parameters. */ + uint8_t key[IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */ uint16_t queue[IXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */ }; @@ -253,6 +264,7 @@ struct ixgbe_vf_info { uint16_t vlan_count; uint8_t spoofchk_enabled; uint8_t api_version; + uint16_t switch_domain_id; }; /* @@ -480,6 +492,15 @@ struct ixgbe_adapter { struct ixgbe_tm_conf tm_conf; }; +struct ixgbe_vf_representor { + uint16_t vf_id; + uint16_t switch_domain_id; + struct rte_eth_dev *pf_ethdev; +}; + +int ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params); +int ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev); + #define IXGBE_DEV_PRIVATE_TO_HW(adapter)\ (&((struct ixgbe_adapter *)adapter)->hw) @@ -652,6 +673,10 @@ int ixgbe_fdir_filter_program(struct rte_eth_dev *dev, void ixgbe_configure_dcb(struct rte_eth_dev *dev); +int +ixgbe_dev_link_update_share(struct rte_eth_dev *dev, + int wait_to_complete, int vf); + /* * misc function prototypes */ @@ -659,9 +684,7 @@ void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev); void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev); -void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev); - -void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev); +void ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev); void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev); @@ -698,6 +721,10 @@ void ixgbe_tm_conf_init(struct rte_eth_dev *dev); void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev); int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t tx_rate); +int ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in); +int ixgbe_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with); int ixgbe_config_rss_filter(struct rte_eth_dev *dev, struct ixgbe_rte_flow_rss_conf *conf, bool add); diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c index d5e51797..e559f0fa 100644 --- a/drivers/net/ixgbe/ixgbe_fdir.c +++ b/drivers/net/ixgbe/ixgbe_fdir.c @@ -394,9 +394,12 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev) IXGBE_FDIRIP6M_TNI_VNI; if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) { - mac_mask = info->mask.mac_addr_byte_mask; - fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT) - & IXGBE_FDIRIP6M_INNER_MAC; + fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC; + mac_mask = info->mask.mac_addr_byte_mask & + (IXGBE_FDIRIP6M_INNER_MAC >> + IXGBE_FDIRIP6M_INNER_MAC_SHIFT); + fdiripv6m &= ~((mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT) & + IXGBE_FDIRIP6M_INNER_MAC); switch (info->mask.tunnel_type_mask) { case 0: @@ -771,10 +774,19 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter, input->formatted.inner_mac, fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes, sizeof(input->formatted.inner_mac)); - input->formatted.tunnel_type = - fdir_filter->input.flow.tunnel_flow.tunnel_type; + if (fdir_filter->input.flow.tunnel_flow.tunnel_type == + RTE_FDIR_TUNNEL_TYPE_VXLAN) + input->formatted.tunnel_type = + IXGBE_FDIR_VXLAN_TUNNEL_TYPE; + else if (fdir_filter->input.flow.tunnel_flow.tunnel_type == + RTE_FDIR_TUNNEL_TYPE_NVGRE) + input->formatted.tunnel_type = + IXGBE_FDIR_NVGRE_TUNNEL_TYPE; + else + PMD_DRV_LOG(ERR, " invalid tunnel type arguments."); + input->formatted.tni_vni = - fdir_filter->input.flow.tunnel_flow.tunnel_id; + fdir_filter->input.flow.tunnel_flow.tunnel_id >> 8; } return 0; @@ -1001,8 +1013,7 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0); } else { /* tunnel mode */ - if (input->formatted.tunnel_type != - RTE_FDIR_TUNNEL_TYPE_NVGRE) + if (input->formatted.tunnel_type) tunnel_type = 0x80000000; tunnel_type |= addr_high; IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low); @@ -1010,6 +1021,9 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni); } + IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, 0); + IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, 0); + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, 0); } /* record vlan (little-endian) and flex_bytes(big-endian) */ diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index dcbfb38b..1adf1b80 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -264,8 +264,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } /* Skip Ethernet */ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, @@ -298,8 +298,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { - vlan_spec = (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, @@ -346,7 +346,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; + ipv4_mask = item->mask; /** * Only support src & dst addresses, protocol, * others should be masked. @@ -368,7 +368,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->src_ip_mask = ipv4_mask->hdr.src_addr; filter->proto_mask = ipv4_mask->hdr.next_proto_id; - ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec; + ipv4_spec = item->spec; filter->dst_ip = ipv4_spec->hdr.dst_addr; filter->src_ip = ipv4_spec->hdr.src_addr; filter->proto = ipv4_spec->hdr.next_proto_id; @@ -413,7 +413,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_mask = item->mask; /** * Only support src & dst ports, tcp flags, @@ -447,12 +447,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + tcp_spec = item->spec; filter->dst_port = tcp_spec->hdr.dst_port; filter->src_port = tcp_spec->hdr.src_port; filter->tcp_flags = tcp_spec->hdr.tcp_flags; } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { - udp_mask = (const struct rte_flow_item_udp *)item->mask; + udp_mask = item->mask; /** * Only support src & dst ports, @@ -471,11 +471,11 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port_mask = udp_mask->hdr.dst_port; filter->src_port_mask = udp_mask->hdr.src_port; - udp_spec = (const struct rte_flow_item_udp *)item->spec; + udp_spec = item->spec; filter->dst_port = udp_spec->hdr.dst_port; filter->src_port = udp_spec->hdr.src_port; } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) { - sctp_mask = (const struct rte_flow_item_sctp *)item->mask; + sctp_mask = item->mask; /** * Only support src & dst ports, @@ -494,7 +494,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port_mask = sctp_mask->hdr.dst_port; filter->src_port_mask = sctp_mask->hdr.src_port; - sctp_spec = (const struct rte_flow_item_sctp *)item->spec; + sctp_spec = item->spec; filter->dst_port = sctp_spec->hdr.dst_port; filter->src_port = sctp_spec->hdr.src_port; } else { @@ -557,6 +557,15 @@ action: return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -699,8 +708,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /* Mask bits of source MAC address must be full of 0. * Mask bits of destination MAC address must be full @@ -786,6 +795,14 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* Not supported */ + if (attr->transfer) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Not supported */ if (attr->priority) { rte_flow_error_set(error, EINVAL, @@ -1000,8 +1017,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_spec = item->spec; + tcp_mask = item->mask; if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) || tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port || @@ -1078,6 +1095,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Support 2 priorities, the lowest or highest. */ if (!attr->priority) { filter->hig_pri = 0; @@ -1198,8 +1224,8 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, return -rte_errno; } - e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec; - e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask; + e_tag_spec = item->spec; + e_tag_mask = item->mask; /* Only care about GRP and E cid base. */ if (e_tag_mask->epcp_edei_in_ecid_b || @@ -1249,6 +1275,15 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* not supported */ if (attr->priority) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); @@ -1353,6 +1388,15 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* not supported */ if (attr->priority) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1447,12 +1491,9 @@ static inline uint8_t signature_match(const struct rte_flow_item pattern[]) break; if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) { - spec = - (const struct rte_flow_item_fuzzy *)item->spec; - last = - (const struct rte_flow_item_fuzzy *)item->last; - mask = - (const struct rte_flow_item_fuzzy *)item->mask; + spec = item->spec; + last = item->last; + mask = item->mask; if (!spec || !mask) return 0; @@ -1632,7 +1673,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - eth_spec = (const struct rte_flow_item_eth *)item->spec; + eth_spec = item->spec; /* Get the dst MAC. */ for (j = 0; j < ETHER_ADDR_LEN; j++) { @@ -1645,7 +1686,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->mask) { rule->b_mask = TRUE; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_mask = item->mask; /* Ether type should be masked. */ if (eth_mask->type || @@ -1698,7 +1739,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } } else { - if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_VLAN) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1725,8 +1767,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } - vlan_spec = (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci; @@ -1772,8 +1814,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } rule->b_mask = TRUE; - ipv4_mask = - (const struct rte_flow_item_ipv4 *)item->mask; + ipv4_mask = item->mask; if (ipv4_mask->hdr.version_ihl || ipv4_mask->hdr.type_of_service || ipv4_mask->hdr.total_length || @@ -1793,8 +1834,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - ipv4_spec = - (const struct rte_flow_item_ipv4 *)item->spec; + ipv4_spec = item->spec; rule->ixgbe_fdir.formatted.dst_ip[0] = ipv4_spec->hdr.dst_addr; rule->ixgbe_fdir.formatted.src_ip[0] = @@ -1844,8 +1884,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, } rule->b_mask = TRUE; - ipv6_mask = - (const struct rte_flow_item_ipv6 *)item->mask; + ipv6_mask = item->mask; if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len || ipv6_mask->hdr.proto || @@ -1885,8 +1924,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - ipv6_spec = - (const struct rte_flow_item_ipv6 *)item->spec; + ipv6_spec = item->spec; rte_memcpy(rule->ixgbe_fdir.formatted.src_ip, ipv6_spec->hdr.src_addr, 16); rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip, @@ -1938,7 +1976,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } rule->b_mask = TRUE; - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_mask = item->mask; if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack || tcp_mask->hdr.data_off || @@ -1957,7 +1995,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + tcp_spec = item->spec; rule->ixgbe_fdir.formatted.src_port = tcp_spec->hdr.src_port; rule->ixgbe_fdir.formatted.dst_port = @@ -2003,7 +2041,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } rule->b_mask = TRUE; - udp_mask = (const struct rte_flow_item_udp *)item->mask; + udp_mask = item->mask; if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2017,7 +2055,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - udp_spec = (const struct rte_flow_item_udp *)item->spec; + udp_spec = item->spec; rule->ixgbe_fdir.formatted.src_port = udp_spec->hdr.src_port; rule->ixgbe_fdir.formatted.dst_port = @@ -2068,8 +2106,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } rule->b_mask = TRUE; - sctp_mask = - (const struct rte_flow_item_sctp *)item->mask; + sctp_mask = item->mask; if (sctp_mask->hdr.tag || sctp_mask->hdr.cksum) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2083,8 +2120,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - sctp_spec = - (const struct rte_flow_item_sctp *)item->spec; + sctp_spec = item->spec; rule->ixgbe_fdir.formatted.src_port = sctp_spec->hdr.src_port; rule->ixgbe_fdir.formatted.dst_port = @@ -2092,8 +2128,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, } /* others even sctp port is not supported */ } else { - sctp_mask = - (const struct rte_flow_item_sctp *)item->mask; + sctp_mask = item->mask; if (sctp_mask && (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port || @@ -2136,7 +2171,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } - raw_mask = (const struct rte_flow_item_raw *)item->mask; + raw_mask = item->mask; /* check mask */ if (raw_mask->relative != 0x1 || @@ -2152,7 +2187,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } - raw_spec = (const struct rte_flow_item_raw *)item->spec; + raw_spec = item->spec; /* check spec */ if (raw_spec->relative != 0 || @@ -2403,7 +2438,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* Get the VxLAN info */ if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) { rule->ixgbe_fdir.formatted.tunnel_type = - RTE_FDIR_TUNNEL_TYPE_VXLAN; + IXGBE_FDIR_VXLAN_TUNNEL_TYPE; /* Only care about VNI, others should be masked. */ if (!item->mask) { @@ -2425,8 +2460,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* Tunnel type is always meaningful. */ rule->mask.tunnel_type_mask = 1; - vxlan_mask = - (const struct rte_flow_item_vxlan *)item->mask; + vxlan_mask = item->mask; if (vxlan_mask->flags) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2452,20 +2486,17 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - vxlan_spec = (const struct rte_flow_item_vxlan *) - item->spec; + vxlan_spec = item->spec; rte_memcpy(((uint8_t *) - &rule->ixgbe_fdir.formatted.tni_vni + 1), + &rule->ixgbe_fdir.formatted.tni_vni), vxlan_spec->vni, RTE_DIM(vxlan_spec->vni)); - rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32( - rule->ixgbe_fdir.formatted.tni_vni); } } /* Get the NVGRE info */ if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) { rule->ixgbe_fdir.formatted.tunnel_type = - RTE_FDIR_TUNNEL_TYPE_NVGRE; + IXGBE_FDIR_NVGRE_TUNNEL_TYPE; /** * Only care about flags0, flags1, protocol and TNI, @@ -2490,8 +2521,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* Tunnel type is always meaningful. */ rule->mask.tunnel_type_mask = 1; - nvgre_mask = - (const struct rte_flow_item_nvgre *)item->mask; + nvgre_mask = item->mask; if (nvgre_mask->flow_id) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2534,8 +2564,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - nvgre_spec = - (const struct rte_flow_item_nvgre *)item->spec; + nvgre_spec = item->spec; if (nvgre_spec->c_k_s_rsvd0_ver != rte_cpu_to_be_16(0x2000) && nvgre_mask->c_k_s_rsvd0_ver) { @@ -2557,7 +2586,6 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* tni is a 24-bits bit field */ rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni, nvgre_spec->tni, RTE_DIM(nvgre_spec->tni)); - rule->ixgbe_fdir.formatted.tni_vni <<= 8; } } @@ -2591,7 +2619,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, return -rte_errno; } rule->b_mask = TRUE; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_mask = item->mask; /* Ether type should be masked. */ if (eth_mask->type) { @@ -2632,7 +2660,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - eth_spec = (const struct rte_flow_item_eth *)item->spec; + eth_spec = item->spec; /* Get the dst MAC. */ for (j = 0; j < ETHER_ADDR_LEN; j++) { @@ -2671,8 +2699,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, return -rte_errno; } - vlan_spec = (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci; @@ -2775,7 +2803,7 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev, rss = (const struct rte_flow_action_rss *)act->conf; - if (!rss || !rss->num) { + if (!rss || !rss->queue_num) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, @@ -2783,7 +2811,7 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev, return -rte_errno; } - for (n = 0; n < rss->num; n++) { + for (n = 0; n < rss->queue_num; n++) { if (rss->queue[n] >= dev->data->nb_rx_queues) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -2792,14 +2820,27 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev, return -rte_errno; } } - if (rss->rss_conf) - rss_conf->rss_conf = *rss->rss_conf; - else - rss_conf->rss_conf.rss_hf = IXGBE_RSS_OFFLOAD_ALL; - for (n = 0; n < rss->num; ++n) - rss_conf->queue[n] = rss->queue[n]; - rss_conf->num = rss->num; + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key must be exactly 40 bytes"); + if (rss->queue_num > RTE_DIM(rss_conf->queue)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + if (ixgbe_rss_conf_init(rss_conf, rss)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS context initialization failure"); /* check if the next not void item is END */ act = next_no_void_action(actions, act); @@ -2830,6 +2871,15 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); rte_flow_error_set(error, EINVAL, @@ -2848,7 +2898,7 @@ ixgbe_clear_rss_filter(struct rte_eth_dev *dev) struct ixgbe_filter_info *filter_info = IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - if (filter_info->rss_info.num) + if (filter_info->rss_info.conf.queue_num) ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE); } @@ -3167,9 +3217,8 @@ ixgbe_flow_create(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "failed to allocate memory"); goto out; } - rte_memcpy(&rss_filter_ptr->filter_info, - &rss_conf, - sizeof(struct ixgbe_rte_flow_rss_conf)); + ixgbe_rss_conf_init(&rss_filter_ptr->filter_info, + &rss_conf.conf); TAILQ_INSERT_TAIL(&filter_rss_list, rss_filter_ptr, entries); flow->rule = rss_filter_ptr; diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c index 176ec0fd..08405f1e 100644 --- a/drivers/net/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ixgbe/ixgbe_ipsec.c @@ -598,13 +598,18 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t reg; + uint64_t rx_offloads; + uint64_t tx_offloads; + + rx_offloads = dev->data->dev_conf.rxmode.offloads; + tx_offloads = dev->data->dev_conf.txmode.offloads; /* sanity checks */ - if (dev->data->dev_conf.rxmode.enable_lro) { + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) { PMD_DRV_LOG(ERR, "RSC and IPsec not supported"); return -1; } - if (!dev->data->dev_conf.rxmode.hw_strip_crc) { + if (rte_eth_dev_must_keep_crc(rx_offloads)) { PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec"); return -1; } @@ -624,7 +629,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev) reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg); - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) { + if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) { IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0); reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); if (reg != 0) { @@ -632,7 +637,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev) return -1; } } - if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY) { + if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) { IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD); reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c index ea997371..4b833ffa 100644 --- a/drivers/net/ixgbe/ixgbe_pf.c +++ b/drivers/net/ixgbe/ixgbe_pf.c @@ -90,6 +90,8 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev) if (*vfinfo == NULL) rte_panic("Cannot allocate memory for private VF data\n"); + rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id); + memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info)); memset(uta_info, 0, sizeof(struct ixgbe_uta_info)); hw->mac.mc_filter_type = 0; @@ -122,11 +124,10 @@ void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev) { struct ixgbe_vf_info **vfinfo; uint16_t vf_num; + int ret; PMD_INIT_FUNC_TRACE(); - vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private); - RTE_ETH_DEV_SRIOV(eth_dev).active = 0; RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0; RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0; @@ -136,6 +137,14 @@ void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev) if (vf_num == 0) return; + vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private); + if (*vfinfo == NULL) + return; + + ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id); + if (ret) + PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret); + rte_free(*vfinfo); *vfinfo = NULL; } @@ -329,10 +338,7 @@ set_rx_mode(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - if (dev->data->dev_conf.rxmode.hw_vlan_strip) - ixgbe_vlan_hw_strip_enable_all(dev); - else - ixgbe_vlan_hw_strip_disable_all(dev); + ixgbe_vlan_hw_strip_config(dev); } static inline void diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index 6c582b4b..f82b74a9 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -1420,7 +1420,7 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags) /* * Check if VLAN present only. * Do not check whether L3/L4 rx checksum done by NIC or not, - * That can be found from rte_eth_rxmode.hw_ip_checksum flag + * That can be found from rte_eth_rxmode.offloads flag */ pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0; @@ -2379,7 +2379,7 @@ void __attribute__((cold)) ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) { /* Use a simple Tx queue (no offloads, no multi segs) if possible */ - if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) && + if ((txq->offloads == 0) && #ifdef RTE_LIBRTE_SECURITY !(txq->using_ipsec) && #endif @@ -2398,9 +2398,8 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) } else { PMD_INIT_LOG(DEBUG, "Using full-featured tx code path"); PMD_INIT_LOG(DEBUG, - " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]", - (unsigned long)txq->txq_flags, - (unsigned long)IXGBE_SIMPLE_FLAGS); + " - offloads = 0x%" PRIx64, + txq->offloads); PMD_INIT_LOG(DEBUG, " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]", (unsigned long)txq->tx_rs_thresh, @@ -2410,6 +2409,45 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) } } +uint64_t +ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return 0; +} + +uint64_t +ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev) +{ + uint64_t tx_offload_capa; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS; + + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) + tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT; + + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + +#ifdef RTE_LIBRTE_SECURITY + if (dev->security_ctx) + tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; +#endif + return tx_offload_capa; +} + int __attribute__((cold)) ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -2421,10 +2459,13 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq; struct ixgbe_hw *hw; uint16_t tx_rs_thresh, tx_free_thresh; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + /* * Validate number of transmit descriptors. * It must not exceed hardware maximum, and must be multiple @@ -2550,7 +2591,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); txq->port_id = dev->data->port_id; - txq->txq_flags = tx_conf->txq_flags; + txq->offloads = offloads; txq->ops = &def_txq_ops; txq->tx_deferred_start = tx_conf->tx_deferred_start; #ifdef RTE_LIBRTE_SECURITY @@ -2769,6 +2810,82 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) #endif } +static int +ixgbe_is_vf(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + switch (hw->mac.type) { + case ixgbe_mac_82599_vf: + case ixgbe_mac_X540_vf: + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return 1; + default: + return 0; + } +} + +uint64_t +ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev) +{ + uint64_t offloads = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != ixgbe_mac_82598EB) + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + + return offloads; +} + +uint64_t +ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev) +{ + uint64_t offloads; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + offloads = DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_SCATTER; + + if (hw->mac.type == ixgbe_mac_82598EB) + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + + if (ixgbe_is_vf(dev) == 0) + offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_EXTEND); + + /* + * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV + * mode. + */ + if ((hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) && + !RTE_ETH_DEV_SRIOV(dev).active) + offloads |= DEV_RX_OFFLOAD_TCP_LRO; + + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) + offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP; + + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; + +#ifdef RTE_LIBRTE_SECURITY + if (dev->security_ctx) + offloads |= DEV_RX_OFFLOAD_SECURITY; +#endif + + return offloads; +} + int __attribute__((cold)) ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -2783,10 +2900,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t len; struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)dev->data->dev_private; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + /* * Validate number of receive descriptors. * It must not exceed hardware maximum, and must be multiple @@ -2816,10 +2936,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? - 0 : ETHER_CRC_LEN); + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) + rxq->crc_len = ETHER_CRC_LEN; + else + rxq->crc_len = 0; rxq->drop_en = rx_conf->rx_drop_en; rxq->rx_deferred_start = rx_conf->rx_deferred_start; + rxq->offloads = offloads; /* * The packet type in RX descriptor is different for different NICs. @@ -4574,7 +4697,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) rsc_capable = true; - if (!rsc_capable && rx_conf->enable_lro) { + if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) { PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't " "support it"); return -EINVAL; @@ -4582,7 +4705,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */ - if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) { + if (rte_eth_dev_must_keep_crc(rx_conf->offloads) && + (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) { /* * According to chapter of 4.6.7.2.1 of the Spec Rev. * 3.0 RSC configuration requires HW CRC stripping being @@ -4596,7 +4720,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* RFCTL configuration */ rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); - if ((rsc_capable) && (rx_conf->enable_lro)) + if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) /* * Since NFS packets coalescing is not supported - clear * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is @@ -4609,7 +4733,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); /* If LRO hasn't been requested - we are done here. */ - if (!rx_conf->enable_lro) + if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) return 0; /* Set RDRXCTL.RSCACKC bit */ @@ -4666,7 +4790,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) * at most 500us latency for a single RSC aggregation. */ eitr &= ~IXGBE_EITR_ITR_INT_MASK; - eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS; + eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT); + eitr |= IXGBE_EITR_CNT_WDIS; IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl); IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl); @@ -4729,15 +4854,15 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) * Configure CRC stripping, if any. */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - if (rx_conf->hw_strip_crc) - hlreg0 |= IXGBE_HLREG0_RXCRCSTRP; - else + if (rte_eth_dev_must_keep_crc(rx_conf->offloads)) hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP; + else + hlreg0 |= IXGBE_HLREG0_RXCRCSTRP; /* * Configure jumbo frame support, if any. */ - if (rx_conf->jumbo_frame == 1) { + if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { hlreg0 |= IXGBE_HLREG0_JUMBOEN; maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); maxfrs &= 0x0000FFFF; @@ -4757,6 +4882,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + /* + * Assume no header split and no VLAN strip support + * on any Rx queue first . + */ + rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; /* Setup RX queues */ for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; @@ -4765,7 +4895,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) * Reset crc_len in case it was changed after queue setup by a * call to configure. */ - rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN; + rxq->crc_len = rte_eth_dev_must_keep_crc(rx_conf->offloads) ? + ETHER_CRC_LEN : 0; /* Setup the Base and Length of the Rx Descriptor Rings */ bus_addr = rxq->rx_ring_phys_addr; @@ -4779,28 +4910,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0); /* Configure the SRRCTL register */ -#ifdef RTE_HEADER_SPLIT_ENABLE - /* - * Configure Header Split - */ - if (rx_conf->header_split) { - if (hw->mac.type == ixgbe_mac_82599EB) { - /* Must setup the PSRTYPE register */ - uint32_t psrtype; - - psrtype = IXGBE_PSRTYPE_TCPHDR | - IXGBE_PSRTYPE_UDPHDR | - IXGBE_PSRTYPE_IPV4HDR | - IXGBE_PSRTYPE_IPV6HDR; - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype); - } - srrctl = ((rx_conf->split_hdr_size << - IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & - IXGBE_SRRCTL_BSIZEHDR_MASK); - srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - } else -#endif - srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; /* Set if packets are dropped when no descriptors available */ if (rxq->drop_en) @@ -4826,9 +4936,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) if (dev->data->dev_conf.rxmode.max_rx_pkt_len + 2 * IXGBE_VLAN_TAG_SIZE > buf_size) dev->data->scattered_rx = 1; + if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; } - if (rx_conf->enable_scatter) + if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) dev->data->scattered_rx = 1; /* @@ -4843,7 +4955,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) */ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); rxcsum |= IXGBE_RXCSUM_PCSD; - if (rx_conf->hw_ip_checksum) + if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) rxcsum |= IXGBE_RXCSUM_IPPCSE; else rxcsum &= ~IXGBE_RXCSUM_IPPCSE; @@ -4853,10 +4965,10 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) if (hw->mac.type == ixgbe_mac_82599EB || hw->mac.type == ixgbe_mac_X540) { rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); - if (rx_conf->hw_strip_crc) - rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; - else + if (rte_eth_dev_must_keep_crc(rx_conf->offloads)) rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP; + else + rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); } @@ -5064,34 +5176,30 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (rx_queue_id < dev->data->nb_rx_queues) { - rxq = dev->data->rx_queues[rx_queue_id]; - - /* Allocate buffers for descriptor rings */ - if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) { - PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d", - rx_queue_id); - return -1; - } - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); - rxdctl |= IXGBE_RXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl); + rxq = dev->data->rx_queues[rx_queue_id]; - /* Wait until RX Enable ready */ - poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; - do { - rte_delay_ms(1); - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); - } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); - if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", - rx_queue_id); - rte_wmb(); - IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0); - IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1); - dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - } else + /* Allocate buffers for descriptor rings */ + if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) { + PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d", + rx_queue_id); return -1; + } + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl); + + /* Wait until RX Enable ready */ + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id); + rte_wmb(); + IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; return 0; } @@ -5112,30 +5220,26 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (rx_queue_id < dev->data->nb_rx_queues) { - rxq = dev->data->rx_queues[rx_queue_id]; + rxq = dev->data->rx_queues[rx_queue_id]; - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); - rxdctl &= ~IXGBE_RXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl); - /* Wait until RX Enable bit clear */ - poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; - do { - rte_delay_ms(1); - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); - } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE)); - if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", - rx_queue_id); + /* Wait until RX Enable bit clear */ + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id); - rte_delay_us(RTE_IXGBE_WAIT_100_US); + rte_delay_us(RTE_IXGBE_WAIT_100_US); - ixgbe_rx_queue_release_mbufs(rxq); - ixgbe_reset_rx_queue(adapter, rxq); - dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; - } else - return -1; + ixgbe_rx_queue_release_mbufs(rxq); + ixgbe_reset_rx_queue(adapter, rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } @@ -5155,30 +5259,27 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (tx_queue_id < dev->data->nb_tx_queues) { - txq = dev->data->tx_queues[tx_queue_id]; - txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); - txdctl |= IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); + txq = dev->data->tx_queues[tx_queue_id]; + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); + txdctl |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); - /* Wait until TX Enable ready */ - if (hw->mac.type == ixgbe_mac_82599EB) { - poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; - do { - rte_delay_ms(1); - txdctl = IXGBE_READ_REG(hw, - IXGBE_TXDCTL(txq->reg_idx)); - } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); - if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not enable " - "Tx Queue %d", tx_queue_id); - } - rte_wmb(); - IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0); - IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0); - dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - } else - return -1; + /* Wait until TX Enable ready */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = IXGBE_READ_REG(hw, + IXGBE_TXDCTL(txq->reg_idx)); + } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", + tx_queue_id); + } + rte_wmb(); + IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; return 0; } @@ -5198,9 +5299,6 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (tx_queue_id >= dev->data->nb_tx_queues) - return -1; - txq = dev->data->tx_queues[tx_queue_id]; /* Wait until TX queue is empty */ @@ -5214,8 +5312,9 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) IXGBE_TDT(txq->reg_idx)); } while (--poll_ms && (txtdh != txtdt)); if (!poll_ms) - PMD_INIT_LOG(ERR, "Tx Queue %d is not empty " - "when stopping.", tx_queue_id); + PMD_INIT_LOG(ERR, + "Tx Queue %d is not empty when stopping.", + tx_queue_id); } txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); @@ -5231,8 +5330,8 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) IXGBE_TXDCTL(txq->reg_idx)); } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE)); if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not disable " - "Tx Queue %d", tx_queue_id); + PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d", + tx_queue_id); } if (txq->ops != NULL) { @@ -5259,6 +5358,7 @@ ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; qinfo->conf.rx_drop_en = rxq->drop_en; qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; + qinfo->conf.offloads = rxq->offloads; } void @@ -5277,7 +5377,7 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_free_thresh = txq->tx_free_thresh; qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; - qinfo->conf.txq_flags = txq->txq_flags; + qinfo->conf.offloads = txq->offloads; qinfo->conf.tx_deferred_start = txq->tx_deferred_start; } @@ -5289,6 +5389,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; struct ixgbe_rx_queue *rxq; + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; uint64_t bus_addr; uint32_t srrctl, psrtype = 0; uint16_t buf_size; @@ -5328,6 +5429,11 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) ixgbevf_rlpml_set_vf(hw, (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len); + /* + * Assume no header split and no VLAN strip support + * on any Rx queue first . + */ + rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; /* Setup RX queues */ for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; @@ -5351,18 +5457,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) /* Configure the SRRCTL register */ -#ifdef RTE_HEADER_SPLIT_ENABLE - /* - * Configure Header Split - */ - if (dev->data->dev_conf.rxmode.header_split) { - srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size << - IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & - IXGBE_SRRCTL_BSIZEHDR_MASK); - srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - } else -#endif - srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; /* Set if packets are dropped when no descriptors available */ if (rxq->drop_en) @@ -5387,24 +5482,18 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) << IXGBE_SRRCTL_BSIZEPKT_SHIFT); - if (dev->data->dev_conf.rxmode.enable_scatter || + if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER || /* It adds dual VLAN length for supporting dual VLAN */ - (dev->data->dev_conf.rxmode.max_rx_pkt_len + + (rxmode->max_rx_pkt_len + 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->data->scattered_rx = 1; } - } -#ifdef RTE_HEADER_SPLIT_ENABLE - if (dev->data->dev_conf.rxmode.header_split) - /* Must setup the PSRTYPE register */ - psrtype = IXGBE_PSRTYPE_TCPHDR | - IXGBE_PSRTYPE_UDPHDR | - IXGBE_PSRTYPE_IPV4HDR | - IXGBE_PSRTYPE_IPV6HDR; -#endif + if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } /* Set RQPL for VF RSS according to max Rx queue */ psrtype |= (dev->data->nb_rx_queues >> 1) << @@ -5521,6 +5610,40 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) } } +int +ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in) +{ + if (in->key_len > RTE_DIM(out->key) || + in->queue_num > RTE_DIM(out->queue)) + return -EINVAL; + out->conf = (struct rte_flow_action_rss){ + .func = in->func, + .level = in->level, + .types = in->types, + .key_len = in->key_len, + .queue_num = in->queue_num, + .key = memcpy(out->key, in->key, in->key_len), + .queue = memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num), + }; + return 0; +} + +int +ixgbe_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with) +{ + return (comp->func == with->func && + comp->level == with->level && + comp->types == with->types && + comp->key_len == with->key_len && + comp->queue_num == with->queue_num && + !memcmp(comp->key, with->key, with->key_len) && + !memcmp(comp->queue, with->queue, + sizeof(*with->queue) * with->queue_num)); +} + int ixgbe_config_rss_filter(struct rte_eth_dev *dev, struct ixgbe_rte_flow_rss_conf *conf, bool add) @@ -5531,7 +5654,12 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev, uint16_t j; uint16_t sp_reta_size; uint32_t reta_reg; - struct rte_eth_rss_conf rss_conf = conf->rss_conf; + struct rte_eth_rss_conf rss_conf = { + .rss_key = conf->conf.key_len ? + (void *)(uintptr_t)conf->conf.key : NULL, + .rss_key_len = conf->conf.key_len, + .rss_hf = conf->conf.types, + }; struct ixgbe_filter_info *filter_info = IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); @@ -5541,8 +5669,8 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev, sp_reta_size = ixgbe_reta_size_get(hw->mac.type); if (!add) { - if (memcmp(conf, &filter_info->rss_info, - sizeof(struct ixgbe_rte_flow_rss_conf)) == 0) { + if (ixgbe_action_rss_same(&filter_info->rss_info.conf, + &conf->conf)) { ixgbe_rss_disable(dev); memset(&filter_info->rss_info, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); @@ -5551,7 +5679,7 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev, return -EINVAL; } - if (filter_info->rss_info.num) + if (filter_info->rss_info.conf.queue_num) return -EINVAL; /* Fill in redirection table * The byte-swap is needed because NIC registers are in @@ -5561,9 +5689,9 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev, for (i = 0, j = 0; i < sp_reta_size; i++, j++) { reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); - if (j == conf->num) + if (j == conf->conf.queue_num) j = 0; - reta = (reta << 8) | conf->queue[j]; + reta = (reta << 8) | conf->conf.queue[j]; if ((i & 3) == 3) IXGBE_WRITE_REG(hw, reta_reg, rte_bswap32(reta)); @@ -5580,8 +5708,8 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev, rss_conf.rss_key = rss_intel_key; /* Default hash key */ ixgbe_hw_rss_hash_set(hw, &rss_conf); - rte_memcpy(&filter_info->rss_info, - conf, sizeof(struct ixgbe_rte_flow_rss_conf)); + if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf)) + return -EINVAL; return 0; } diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index 69c718bc..39378f75 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.h +++ b/drivers/net/ixgbe/ixgbe_rxtx.h @@ -129,6 +129,7 @@ struct ixgbe_rx_queue { uint8_t rx_deferred_start; /**< not in global dev start. */ /** flags to set in mbuf when a vlan is detected. */ uint64_t vlan_flags; + uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */ /** need to alloc dummy mbuf, for wraparound when scanning hw ring */ struct rte_mbuf fake_mbuf; /** hold packets to return to application */ @@ -221,7 +222,7 @@ struct ixgbe_tx_queue { uint8_t pthresh; /**< Prefetch threshold register. */ uint8_t hthresh; /**< Host threshold register. */ uint8_t wthresh; /**< Write-back threshold reg. */ - uint32_t txq_flags; /**< Holds flags for this TXq */ + uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */ uint32_t ctx_curr; /**< Hardware context states. */ /** Hardware context0 history. */ struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM]; @@ -239,20 +240,6 @@ struct ixgbe_txq_ops { void (*reset)(struct ixgbe_tx_queue *txq); }; -/* - * The "simple" TX queue functions require that the following - * flags are set when the TX queue is configured: - * - ETH_TXQ_FLAGS_NOMULTSEGS - * - ETH_TXQ_FLAGS_NOVLANOFFL - * - ETH_TXQ_FLAGS_NOXSUMSCTP - * - ETH_TXQ_FLAGS_NOXSUMUDP - * - ETH_TXQ_FLAGS_NOXSUMTCP - * and that the RS bit threshold (tx_rs_thresh) is at least equal to - * RTE_PMD_IXGBE_TX_MAX_BURST. - */ -#define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ - ETH_TXQ_FLAGS_NOOFFLOADS) - /* * Populate descriptors with the following info: * 1.) buffer_addr = phys_addr + headroom @@ -305,6 +292,11 @@ extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX]; uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq); - #endif /* RTE_IXGBE_INC_VECTOR */ + +uint64_t ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev); +uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev); +uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev); +uint64_t ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev); + #endif /* _IXGBE_RXTX_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h index 414840a2..a97c2718 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h @@ -278,17 +278,12 @@ static inline int ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev) { #ifndef RTE_LIBRTE_IEEE1588 - struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf; /* no fdir support */ if (fconf->mode != RTE_FDIR_MODE_NONE) return -1; - /* no header split support */ - if (rxmode->header_split == 1) - return -1; - return 0; #else RTE_SET_USED(dev); diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c index e0f9998f..edb13835 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c @@ -515,7 +515,7 @@ ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; /* no csum error report support */ - if (rxmode->hw_ip_checksum == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) return -1; return ixgbe_rx_vec_dev_conf_condition_check_default(dev); diff --git a/drivers/net/ixgbe/ixgbe_vf_representor.c b/drivers/net/ixgbe/ixgbe_vf_representor.c new file mode 100644 index 00000000..db516d99 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_vf_representor.c @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation. + */ + +#include +#include +#include + +#include "base/ixgbe_type.h" +#include "base/ixgbe_vf.h" +#include "ixgbe_ethdev.h" +#include "ixgbe_rxtx.h" +#include "rte_pmd_ixgbe.h" + + +static int +ixgbe_vf_representor_link_update(struct rte_eth_dev *ethdev, + int wait_to_complete) +{ + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; + + return ixgbe_dev_link_update_share(representor->pf_ethdev, + wait_to_complete, 0); +} + +static int +ixgbe_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev, + struct ether_addr *mac_addr) +{ + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; + + return rte_pmd_ixgbe_set_vf_mac_addr( + representor->pf_ethdev->data->port_id, + representor->vf_id, mac_addr); +} + +static void +ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev, + struct rte_eth_dev_info *dev_info) +{ + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; + + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW( + representor->pf_ethdev->data->dev_private); + + dev_info->device = representor->pf_ethdev->device; + + dev_info->min_rx_bufsize = 1024; + /**< Minimum size of RX buffer. */ + dev_info->max_rx_pktlen = 9728; + /**< Maximum configurable length of RX pkt. */ + dev_info->max_rx_queues = IXGBE_VF_MAX_RX_QUEUES; + /**< Maximum number of RX queues. */ + dev_info->max_tx_queues = IXGBE_VF_MAX_TX_QUEUES; + /**< Maximum number of TX queues. */ + + dev_info->max_mac_addrs = hw->mac.num_rar_entries; + /**< Maximum number of MAC addresses. */ + + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + /**< Device RX offload capabilities. */ + + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + /**< Device TX offload capabilities. */ + + dev_info->speed_capa = + representor->pf_ethdev->data->dev_link.link_speed; + /**< Supported speeds bitmap (ETH_LINK_SPEED_). */ + + dev_info->switch_info.name = + representor->pf_ethdev->device->name; + dev_info->switch_info.domain_id = representor->switch_domain_id; + dev_info->switch_info.port_id = representor->vf_id; +} + +static int ixgbe_vf_representor_dev_configure( + __rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static int ixgbe_vf_representor_rx_queue_setup( + __rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t rx_queue_id, + __rte_unused uint16_t nb_rx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + __rte_unused struct rte_mempool *mb_pool) +{ + return 0; +} + +static int ixgbe_vf_representor_tx_queue_setup( + __rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t rx_queue_id, + __rte_unused uint16_t nb_rx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *tx_conf) +{ + return 0; +} + +static int ixgbe_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static void ixgbe_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev) +{ +} + +static int +ixgbe_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev, + uint16_t vlan_id, int on) +{ + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; + uint64_t vf_mask = 1ULL << representor->vf_id; + + return rte_pmd_ixgbe_set_vf_vlan_filter( + representor->pf_ethdev->data->port_id, vlan_id, vf_mask, on); +} + +static void +ixgbe_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev, + __rte_unused uint16_t rx_queue_id, int on) +{ + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_ixgbe_set_vf_vlan_stripq(representor->pf_ethdev->data->port_id, + representor->vf_id, on); +} + +struct eth_dev_ops ixgbe_vf_representor_dev_ops = { + .dev_infos_get = ixgbe_vf_representor_dev_infos_get, + + .dev_start = ixgbe_vf_representor_dev_start, + .dev_configure = ixgbe_vf_representor_dev_configure, + .dev_stop = ixgbe_vf_representor_dev_stop, + + .rx_queue_setup = ixgbe_vf_representor_rx_queue_setup, + .tx_queue_setup = ixgbe_vf_representor_tx_queue_setup, + + .link_update = ixgbe_vf_representor_link_update, + + .vlan_filter_set = ixgbe_vf_representor_vlan_filter_set, + .vlan_strip_queue_set = ixgbe_vf_representor_vlan_strip_queue_set, + + .mac_addr_set = ixgbe_vf_representor_mac_addr_set, +}; + +static uint16_t +ixgbe_vf_representor_rx_burst(__rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +static uint16_t +ixgbe_vf_representor_tx_burst(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +int +ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params) +{ + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; + + struct ixgbe_vf_info *vf_data; + struct rte_pci_device *pci_dev; + struct rte_eth_link *link; + + if (!representor) + return -ENOMEM; + + representor->vf_id = + ((struct ixgbe_vf_representor *)init_params)->vf_id; + representor->switch_domain_id = + ((struct ixgbe_vf_representor *)init_params)->switch_domain_id; + representor->pf_ethdev = + ((struct ixgbe_vf_representor *)init_params)->pf_ethdev; + + pci_dev = RTE_ETH_DEV_TO_PCI(representor->pf_ethdev); + + if (representor->vf_id >= pci_dev->max_vfs) + return -ENODEV; + + ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; + + /* Set representor device ops */ + ethdev->dev_ops = &ixgbe_vf_representor_dev_ops; + + /* No data-path, but need stub Rx/Tx functions to avoid crash + * when testing with the likes of testpmd. + */ + ethdev->rx_pkt_burst = ixgbe_vf_representor_rx_burst; + ethdev->tx_pkt_burst = ixgbe_vf_representor_tx_burst; + + /* Setting the number queues allocated to the VF */ + ethdev->data->nb_rx_queues = IXGBE_VF_MAX_RX_QUEUES; + ethdev->data->nb_tx_queues = IXGBE_VF_MAX_RX_QUEUES; + + /* Reference VF mac address from PF data structure */ + vf_data = *IXGBE_DEV_PRIVATE_TO_P_VFDATA( + representor->pf_ethdev->data->dev_private); + + ethdev->data->mac_addrs = (struct ether_addr *) + vf_data[representor->vf_id].vf_mac_addresses; + + /* Link state. Inherited from PF */ + link = &representor->pf_ethdev->data->dev_link; + + ethdev->data->dev_link.link_speed = link->link_speed; + ethdev->data->dev_link.link_duplex = link->link_duplex; + ethdev->data->dev_link.link_status = link->link_status; + ethdev->data->dev_link.link_autoneg = link->link_autoneg; + + return 0; +} + +int +ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev __rte_unused) +{ + return 0; +} diff --git a/drivers/net/ixgbe/meson.build b/drivers/net/ixgbe/meson.build index 60af0bae..02d5ef5e 100644 --- a/drivers/net/ixgbe/meson.build +++ b/drivers/net/ixgbe/meson.build @@ -1,6 +1,8 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation +version = 2 + cflags += ['-DRTE_LIBRTE_IXGBE_BYPASS'] subdir('base') @@ -17,6 +19,7 @@ sources = files( 'ixgbe_pf.c', 'ixgbe_rxtx.c', 'ixgbe_tm.c', + 'ixgbe_vf_representor.c', 'rte_pmd_ixgbe.c' ) diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c index d8ca8ca3..3a874f9a 100644 --- a/drivers/net/ixgbe/rte_pmd_ixgbe.c +++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c @@ -5,6 +5,7 @@ #include #include "base/ixgbe_api.h" +#include "base/ixgbe_x550.h" #include "ixgbe_ethdev.h" #include "rte_pmd_ixgbe.h" @@ -880,6 +881,34 @@ rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port, return 0; } +int __rte_experimental +rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t fctrl; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + dev = &rte_eth_devices[port]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!hw) + return -ENOTSUP; + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + + /* If 'enable' set the SBP bit else clear it */ + if (enable) + fctrl |= IXGBE_FCTRL_SBP; + else + fctrl &= ~(IXGBE_FCTRL_SBP); + + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + return 0; +} + #ifdef RTE_LIBRTE_IXGBE_BYPASS int rte_pmd_ixgbe_bypass_init(uint16_t port_id) @@ -1012,3 +1041,204 @@ rte_pmd_ixgbe_bypass_wd_reset(uint16_t port_id) return ixgbe_bypass_wd_reset(dev); } #endif + +/** + * rte_pmd_ixgbe_acquire_swfw - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore and get the shared phy token as needed + */ +STATIC s32 rte_pmd_ixgbe_acquire_swfw(struct ixgbe_hw *hw, u32 mask) +{ + int retries = FW_PHY_TOKEN_RETRIES; + s32 status = IXGBE_SUCCESS; + + while (--retries) { + status = ixgbe_acquire_swfw_semaphore(hw, mask); + if (status) { + PMD_DRV_LOG(ERR, "Get SWFW sem failed, Status = %d\n", + status); + return status; + } + status = ixgbe_get_phy_token(hw); + if (status == IXGBE_SUCCESS) + return IXGBE_SUCCESS; + + if (status == IXGBE_ERR_TOKEN_RETRY) + PMD_DRV_LOG(ERR, "Get PHY token failed, Status = %d\n", + status); + + ixgbe_release_swfw_semaphore(hw, mask); + if (status != IXGBE_ERR_TOKEN_RETRY) { + PMD_DRV_LOG(ERR, + "Retry get PHY token failed, Status=%d\n", + status); + return status; + } + } + PMD_DRV_LOG(ERR, "swfw acquisition retries failed!: PHY ID = 0x%08X\n", + hw->phy.id); + return status; +} + +/** + * rte_pmd_ixgbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore and puts the shared phy token as needed + */ +STATIC void rte_pmd_ixgbe_release_swfw(struct ixgbe_hw *hw, u32 mask) +{ + ixgbe_put_phy_token(hw); + ixgbe_release_swfw_semaphore(hw, mask); +} + +int __rte_experimental +rte_pmd_ixgbe_mdio_lock(uint16_t port) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + u32 swfw_mask; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + dev = &rte_eth_devices[port]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!hw) + return -ENOTSUP; + + if (hw->bus.lan_id) + swfw_mask = IXGBE_GSSR_PHY1_SM; + else + swfw_mask = IXGBE_GSSR_PHY0_SM; + + if (rte_pmd_ixgbe_acquire_swfw(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + + return IXGBE_SUCCESS; +} + +int __rte_experimental +rte_pmd_ixgbe_mdio_unlock(uint16_t port) +{ + struct rte_eth_dev *dev; + struct ixgbe_hw *hw; + u32 swfw_mask; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!hw) + return -ENOTSUP; + + if (hw->bus.lan_id) + swfw_mask = IXGBE_GSSR_PHY1_SM; + else + swfw_mask = IXGBE_GSSR_PHY0_SM; + + rte_pmd_ixgbe_release_swfw(hw, swfw_mask); + + return IXGBE_SUCCESS; +} + +int __rte_experimental +rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr, + uint32_t dev_type, uint16_t *phy_data) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + u32 i, data, command; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + dev = &rte_eth_devices[port]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!hw) + return -ENOTSUP; + + /* Setup and write the read command */ + command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) | + IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC | + IXGBE_MSCA_MDI_COMMAND; + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the access completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if (!(command & IXGBE_MSCA_MDI_COMMAND)) + break; + } + if (command & IXGBE_MSCA_MDI_COMMAND) + return IXGBE_ERR_PHY; + + /* Read operation is complete. Get the data from MSRWD */ + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data >>= IXGBE_MSRWD_READ_DATA_SHIFT; + *phy_data = (u16)data; + + return 0; +} + +int __rte_experimental +rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr, + uint32_t dev_type, uint16_t phy_data) +{ + struct ixgbe_hw *hw; + u32 i, command; + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + dev = &rte_eth_devices[port]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!hw) + return -ENOTSUP; + + /* Put the data in the MDI single read and write data register*/ + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); + + /* Setup and write the write command */ + command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) | + IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | + IXGBE_MSCA_MDI_COMMAND; + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the access completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if (!(command & IXGBE_MSCA_MDI_COMMAND)) + break; + } + if (command & IXGBE_MSCA_MDI_COMMAND) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "PHY write cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } + return 0; +} diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h index 11a9f334..72a941f9 100644 --- a/drivers/net/ixgbe/rte_pmd_ixgbe.h +++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h @@ -573,6 +573,77 @@ int rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port, uint32_t *wd_timeout); */ int rte_pmd_ixgbe_bypass_wd_reset(uint16_t port); +/** + * Acquire swfw semaphore lock for MDIO access + * + * @param port + * The port identifier of the Ethernet device. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-ENODEV) if *port* invalid. + * - (IXGBE_ERR_SWFW_SYNC) If sw/fw semaphore acquisition failed + */ +int __rte_experimental +rte_pmd_ixgbe_mdio_lock(uint16_t port); + +/** + * Release swfw semaphore lock used for MDIO access + * + * @param port + * The port identifier of the Ethernet device. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-ENODEV) if *port* invalid. + */ +int __rte_experimental +rte_pmd_ixgbe_mdio_unlock(uint16_t port); + +/** + * Read PHY register using MDIO without MDIO lock + * The lock must be taken separately before calling this + * API + * @param port + * The port identifier of the Ethernet device. + * @param reg_addr + * 32 bit PHY Register + * @param dev_type + * Used to define device base address + * @param phy_data + * Pointer for reading PHY register data + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-ENODEV) if *port* invalid. + * - (IXGBE_ERR_PHY) If PHY read command failed + */ +int __rte_experimental +rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr, + uint32_t dev_type, uint16_t *phy_data); + +/** + * Write data to PHY register using without MDIO lock + * The lock must be taken separately before calling this + * API + * + * @param port + * The port identifier of the Ethernet device. + * @param reg_addr + * 32 bit PHY Register + * @param dev_type + * Used to define device base address + * @param phy_data + * Data to write to PHY register + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-ENODEV) if *port* invalid. + * - (IXGBE_ERR_PHY) If PHY read command failed + */ +int __rte_experimental +rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr, + uint32_t dev_type, uint16_t phy_data); /** * Response sent back to ixgbe driver from user app after callback @@ -637,4 +708,17 @@ enum { ((x) > RTE_PMD_IXGBE_BYPASS_TMT_OFF && \ (x) < RTE_PMD_IXGBE_BYPASS_TMT_NUM)) +/** + * @param port + * The port identifier of the Ethernet device. + * @param enable + * 0 to disable and nonzero to enable 'SBP' bit in FCTRL register + * to receive all packets + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-ENOTSUP) if hardware doesn't support this feature. + */ +int __rte_experimental +rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable); #endif /* _PMD_IXGBE_H_ */ diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map index bf776742..c814f96d 100644 --- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map +++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map @@ -52,3 +52,13 @@ DPDK_17.08 { rte_pmd_ixgbe_bypass_wd_timeout_show; rte_pmd_ixgbe_bypass_wd_timeout_store; } DPDK_17.05; + +EXPERIMENTAL { + global: + + rte_pmd_ixgbe_mdio_lock; + rte_pmd_ixgbe_mdio_unlock; + rte_pmd_ixgbe_mdio_unlocked_read; + rte_pmd_ixgbe_mdio_unlocked_write; + rte_pmd_ixgbe_upd_fctrl_sbp; +}; diff --git a/drivers/net/kni/Makefile b/drivers/net/kni/Makefile index 01eaef05..562e8d2d 100644 --- a/drivers/net/kni/Makefile +++ b/drivers/net/kni/Makefile @@ -10,6 +10,7 @@ LIB = librte_pmd_kni.a CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) +CFLAGS += -DALLOW_EXPERIMENTAL_API LDLIBS += -lpthread LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_kni diff --git a/drivers/net/kni/meson.build b/drivers/net/kni/meson.build new file mode 100644 index 00000000..0f784c6d --- /dev/null +++ b/drivers/net/kni/meson.build @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +# this driver can be built if-and-only-if KNI library is buildable +build = dpdk_conf.has('RTE_LIBRTE_KNI') +allow_experimental_apis = true +sources = files('rte_eth_kni.c') +deps += 'kni' diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c index dc4e65f5..085bb845 100644 --- a/drivers/net/kni/rte_eth_kni.c +++ b/drivers/net/kni/rte_eth_kni.c @@ -61,10 +61,15 @@ static const struct rte_eth_link pmd_link = { .link_speed = ETH_SPEED_NUM_10G, .link_duplex = ETH_LINK_FULL_DUPLEX, .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_AUTONEG, + .link_autoneg = ETH_LINK_FIXED, }; static int is_kni_initialized; +static int eth_kni_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, eth_kni_logtype, \ + "%s(): " fmt "\n", __func__, ##args) static uint16_t eth_kni_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) { @@ -126,8 +131,8 @@ eth_kni_start(struct rte_eth_dev *dev) internals->kni = rte_kni_alloc(mb_pool, &conf, NULL); if (internals->kni == NULL) { - RTE_LOG(ERR, PMD, - "Fail to create kni interface for port: %d\n", + PMD_LOG(ERR, + "Fail to create kni interface for port: %d", port_id); return -1; } @@ -149,11 +154,12 @@ eth_kni_dev_start(struct rte_eth_dev *dev) } if (internals->no_request_thread == 0) { - ret = pthread_create(&internals->thread, NULL, + ret = rte_ctrl_thread_create(&internals->thread, + "kni_handle_req", NULL, kni_handle_request, internals); if (ret) { - RTE_LOG(ERR, PMD, - "Fail to create kni request thread\n"); + PMD_LOG(ERR, + "Fail to create kni request thread"); return -1; } } @@ -174,11 +180,11 @@ eth_kni_dev_stop(struct rte_eth_dev *dev) ret = pthread_cancel(internals->thread); if (ret) - RTE_LOG(ERR, PMD, "Can't cancel the thread\n"); + PMD_LOG(ERR, "Can't cancel the thread"); ret = pthread_join(internals->thread, NULL); if (ret) - RTE_LOG(ERR, PMD, "Can't join the thread\n"); + PMD_LOG(ERR, "Can't join the thread"); internals->stop_thread = 0; } @@ -201,7 +207,7 @@ eth_kni_dev_info(struct rte_eth_dev *dev __rte_unused, dev_info->max_rx_queues = KNI_MAX_QUEUE_PER_PORT; dev_info->max_tx_queues = KNI_MAX_QUEUE_PER_PORT; dev_info->min_rx_bufsize = 0; - dev_info->pci_dev = NULL; + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP; } static int @@ -337,25 +343,17 @@ eth_kni_create(struct rte_vdev_device *vdev, struct pmd_internals *internals; struct rte_eth_dev_data *data; struct rte_eth_dev *eth_dev; - const char *name; - RTE_LOG(INFO, PMD, "Creating kni ethdev on numa socket %u\n", + PMD_LOG(INFO, "Creating kni ethdev on numa socket %u", numa_node); - name = rte_vdev_device_name(vdev); - data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); - if (data == NULL) - return NULL; - /* reserve an ethdev entry */ eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*internals)); - if (eth_dev == NULL) { - rte_free(data); + if (!eth_dev) return NULL; - } internals = eth_dev->data->dev_private; - rte_memcpy(data, eth_dev->data, sizeof(*data)); + data = eth_dev->data; data->nb_rx_queues = 1; data->nb_tx_queues = 1; data->dev_link = pmd_link; @@ -363,7 +361,6 @@ eth_kni_create(struct rte_vdev_device *vdev, eth_random_addr(internals->eth_addr.addr_bytes); - eth_dev->data = data; eth_dev->dev_ops = ð_kni_ops; internals->no_request_thread = args->no_request_thread; @@ -412,7 +409,21 @@ eth_kni_probe(struct rte_vdev_device *vdev) name = rte_vdev_device_name(vdev); params = rte_vdev_device_args(vdev); - RTE_LOG(INFO, PMD, "Initializing eth_kni for %s\n", name); + PMD_LOG(INFO, "Initializing eth_kni for %s", name); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(params) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + PMD_LOG(ERR, "Failed to probe %s", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = ð_kni_ops; + eth_dev->device = &vdev->device; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } ret = eth_kni_kvargs_process(&args, params); if (ret < 0) @@ -429,6 +440,7 @@ eth_kni_probe(struct rte_vdev_device *vdev) eth_dev->rx_pkt_burst = eth_kni_rx; eth_dev->tx_pkt_burst = eth_kni_tx; + rte_eth_dev_probing_finish(eth_dev); return 0; kni_uninit: @@ -446,7 +458,7 @@ eth_kni_remove(struct rte_vdev_device *vdev) const char *name; name = rte_vdev_device_name(vdev); - RTE_LOG(INFO, PMD, "Un-Initializing eth_kni for %s\n", name); + PMD_LOG(INFO, "Un-Initializing eth_kni for %s", name); /* find the ethdev entry */ eth_dev = rte_eth_dev_allocated(name); @@ -459,7 +471,6 @@ eth_kni_remove(struct rte_vdev_device *vdev) rte_kni_release(internals->kni); rte_free(internals); - rte_free(eth_dev->data); rte_eth_dev_release_port(eth_dev); @@ -477,3 +488,10 @@ static struct rte_vdev_driver eth_kni_drv = { RTE_PMD_REGISTER_VDEV(net_kni, eth_kni_drv); RTE_PMD_REGISTER_PARAM_STRING(net_kni, ETH_KNI_NO_REQUEST_THREAD_ARG "="); + +RTE_INIT(eth_kni_init_log) +{ + eth_kni_logtype = rte_log_register("pmd.net.kni"); + if (eth_kni_logtype >= 0) + rte_log_set_level(eth_kni_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/liquidio/Makefile b/drivers/net/liquidio/Makefile index fc5f18ad..f1092851 100644 --- a/drivers/net/liquidio/Makefile +++ b/drivers/net/liquidio/Makefile @@ -15,7 +15,7 @@ LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs LDLIBS += -lrte_bus_pci -EXPORT_MAP := rte_pmd_lio_version.map +EXPORT_MAP := rte_pmd_liquidio_version.map LIBABIVER := 1 diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c index e1a20cd6..93e89007 100644 --- a/drivers/net/liquidio/lio_ethdev.c +++ b/drivers/net/liquidio/lio_ethdev.c @@ -373,8 +373,6 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev, struct lio_device *lio_dev = LIO_DEV(eth_dev); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - devinfo->pci_dev = pci_dev; - switch (pci_dev->id.subsystem_device_id) { /* CN23xx 10G cards */ case PCI_SUBSYS_DEV_ID_CN2350_210: @@ -478,9 +476,11 @@ lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) } if (frame_len > ETHER_MAX_LEN) - eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; + eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len; eth_dev->data->mtu = mtu; @@ -904,32 +904,6 @@ lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on) return 0; } -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param eth_dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -lio_dev_atomic_write_link_status(struct rte_eth_dev *eth_dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = ð_dev->data->dev_link; - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - static uint64_t lio_hweight64(uint64_t w) { @@ -949,23 +923,19 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete __rte_unused) { struct lio_device *lio_dev = LIO_DEV(eth_dev); - struct rte_eth_link link, old; + struct rte_eth_link link; /* Initialize */ + memset(&link, 0, sizeof(link)); link.link_status = ETH_LINK_DOWN; link.link_speed = ETH_SPEED_NUM_NONE; link.link_duplex = ETH_LINK_HALF_DUPLEX; link.link_autoneg = ETH_LINK_AUTONEG; - memset(&old, 0, sizeof(old)); /* Return what we found */ if (lio_dev->linfo.link.s.link_up == 0) { /* Interface is down */ - if (lio_dev_atomic_write_link_status(eth_dev, &link)) - return -1; - if (link.link_status == old.link_status) - return -1; - return 0; + return rte_eth_linkstatus_set(eth_dev, &link); } link.link_status = ETH_LINK_UP; /* Interface is up */ @@ -982,13 +952,7 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev, link.link_duplex = ETH_LINK_HALF_DUPLEX; } - if (lio_dev_atomic_write_link_status(eth_dev, &link)) - return -1; - - if (link.link_status == old.link_status) - return -1; - - return 0; + return rte_eth_linkstatus_set(eth_dev, &link); } /** @@ -1441,6 +1405,11 @@ lio_dev_start(struct rte_eth_dev *eth_dev) /* Configure RSS if device configured with multiple RX queues. */ lio_dev_mq_rx_configure(eth_dev); + /* Before update the link info, + * must set linfo.link.link_status64 to 0. + */ + lio_dev->linfo.link.link_status64 = 0; + /* start polling for lsc */ ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check, @@ -2146,19 +2115,8 @@ static int lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { - struct rte_eth_dev *eth_dev; - int ret; - - eth_dev = rte_eth_dev_pci_allocate(pci_dev, - sizeof(struct lio_device)); - if (eth_dev == NULL) - return -ENOMEM; - - ret = lio_eth_dev_init(eth_dev); - if (ret) - rte_eth_dev_pci_release(eth_dev); - - return ret; + return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct lio_device), + lio_eth_dev_init); } static int @@ -2185,9 +2143,7 @@ RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map); RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci"); -RTE_INIT(lio_init_log); -static void -lio_init_log(void) +RTE_INIT(lio_init_log) { lio_logtype_init = rte_log_register("pmd.net.liquidio.init"); if (lio_logtype_init >= 0) diff --git a/drivers/net/liquidio/meson.build b/drivers/net/liquidio/meson.build new file mode 100644 index 00000000..9ae48e21 --- /dev/null +++ b/drivers/net/liquidio/meson.build @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +sources = files('base/lio_23xx_vf.c', + 'base/lio_mbox.c', + 'lio_ethdev.c', + 'lio_rxtx.c') +includes += include_directories('base') diff --git a/drivers/net/liquidio/rte_pmd_lio_version.map b/drivers/net/liquidio/rte_pmd_lio_version.map deleted file mode 100644 index 8591cc0b..00000000 --- a/drivers/net/liquidio/rte_pmd_lio_version.map +++ /dev/null @@ -1,4 +0,0 @@ -DPDK_17.05 { - - local: *; -}; diff --git a/drivers/net/liquidio/rte_pmd_liquidio_version.map b/drivers/net/liquidio/rte_pmd_liquidio_version.map new file mode 100644 index 00000000..8591cc0b --- /dev/null +++ b/drivers/net/liquidio/rte_pmd_liquidio_version.map @@ -0,0 +1,4 @@ +DPDK_17.05 { + + local: *; +}; diff --git a/drivers/net/meson.build b/drivers/net/meson.build index 704cbe3c..9c28ed4d 100644 --- a/drivers/net/meson.build +++ b/drivers/net/meson.build @@ -1,10 +1,33 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation -drivers = ['af_packet', 'bonding', - 'e1000', 'fm10k', 'i40e', 'ixgbe', +drivers = ['af_packet', + 'ark', + 'avp', + 'axgbe', 'bonding', + 'bnx2x', + 'bnxt', + 'cxgbe', + 'dpaa', 'dpaa2', + 'e1000', + 'ena', + 'enic', + 'failsafe', + 'fm10k', 'i40e', + 'ifc', + 'ixgbe', + 'kni', + 'liquidio', + 'mvpp2', + 'netvsc', + 'nfp', 'null', 'octeontx', 'pcap', 'ring', - 'sfc', 'thunderx'] + 'sfc', + 'softnic', + 'szedata2', + 'thunderx', + 'vhost', + 'virtio'] std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc std_deps += ['bus_pci'] # very many PMDs depend on PCI, so make std std_deps += ['bus_vdev'] # same with vdev bus diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile index cc800493..92e93225 100644 --- a/drivers/net/mlx4/Makefile +++ b/drivers/net/mlx4/Makefile @@ -1,33 +1,6 @@ -# BSD LICENSE -# +# SPDX-License-Identifier: BSD-3-Clause # Copyright 2012 6WIND S.A. -# Copyright 2012 Mellanox -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND S.A. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# Copyright 2012 Mellanox Technologies, Ltd include $(RTE_SDK)/mk/rte.vars.mk @@ -64,6 +37,7 @@ CFLAGS += -D_BSD_SOURCE CFLAGS += -D_DEFAULT_SOURCE CFLAGS += -D_XOPEN_SOURCE=600 CFLAGS += $(WERROR_FLAGS) +CFLAGS += -DALLOW_EXPERIMENTAL_API ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS),y) CFLAGS += -DMLX4_GLUE='"$(LIB_GLUE)"' CFLAGS += -DMLX4_GLUE_VERSION='"$(LIB_GLUE_VERSION)"' @@ -95,10 +69,6 @@ else CFLAGS += -DNDEBUG -UPEDANTIC endif -ifdef CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE -CFLAGS += -DMLX4_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE) -endif - include $(RTE_SDK)/mk/rte.lib.mk # Generate and clean-up mlx4_autoconf.h. @@ -115,6 +85,11 @@ mlx4_autoconf.h.new: FORCE mlx4_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh $Q $(RM) -f -- '$@' $Q : > '$@' + $Q sh -- '$<' '$@' \ + HAVE_IBV_MLX4_WQE_LSO_SEG \ + infiniband/mlx4dv.h \ + type 'struct mlx4_wqe_lso_seg' \ + $(AUTOCONF_OUTPUT) # Create mlx4_autoconf.h or update it in case it differs from the new one. @@ -132,10 +107,15 @@ ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS),y) $(LIB): $(LIB_GLUE) +ifeq ($(LINK_USING_CC),1) +GLUE_LDFLAGS := $(call linkerprefix,$(LDFLAGS)) +else +GLUE_LDFLAGS := $(LDFLAGS) +endif $(LIB_GLUE): mlx4_glue.o - $Q $(LD) $(LDFLAGS) $(EXTRA_LDFLAGS) \ + $Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \ -Wl,-h,$(LIB_GLUE) \ - -s -shared -o $@ $< -libverbs -lmlx4 + -shared -o $@ $< -libverbs -lmlx4 mlx4_glue.o: mlx4_autoconf.h diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c index ee93dafe..defc0d4b 100644 --- a/drivers/net/mlx4/mlx4.c +++ b/drivers/net/mlx4/mlx4.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2012 6WIND S.A. - * Copyright 2012 Mellanox + * Copyright 2012 Mellanox Technologies, Ltd */ /** @@ -44,9 +44,15 @@ #include "mlx4.h" #include "mlx4_glue.h" #include "mlx4_flow.h" +#include "mlx4_mr.h" #include "mlx4_rxtx.h" #include "mlx4_utils.h" +struct mlx4_dev_list mlx4_mem_event_cb_list = + LIST_HEAD_INITIALIZER(mlx4_mem_event_cb_list); + +rte_rwlock_t mlx4_mem_event_rwlock = RTE_RWLOCK_INITIALIZER; + /** Configuration structure for device arguments. */ struct mlx4_conf { struct { @@ -61,6 +67,8 @@ const char *pmd_mlx4_init_params[] = { NULL, }; +static void mlx4_dev_stop(struct rte_eth_dev *dev); + /** * DPDK callback for Ethernet device configuration. * @@ -123,6 +131,9 @@ mlx4_dev_start(struct rte_eth_dev *dev) (void *)dev, strerror(-ret)); goto err; } +#ifndef NDEBUG + mlx4_mr_dump_dev(dev); +#endif ret = mlx4_rxq_intr_enable(priv); if (ret) { ERROR("%p: interrupt handler installation failed", @@ -143,8 +154,7 @@ mlx4_dev_start(struct rte_eth_dev *dev) dev->rx_pkt_burst = mlx4_rx_burst; return 0; err: - /* Rollback. */ - priv->started = 0; + mlx4_dev_stop(dev); return ret; } @@ -194,10 +204,12 @@ mlx4_dev_close(struct rte_eth_dev *dev) dev->tx_pkt_burst = mlx4_tx_burst_removed; rte_wmb(); mlx4_flow_clean(priv); + mlx4_rss_deinit(priv); for (i = 0; i != dev->data->nb_rx_queues; ++i) mlx4_rx_queue_release(dev->data->rx_queues[i]); for (i = 0; i != dev->data->nb_tx_queues; ++i) mlx4_tx_queue_release(dev->data->tx_queues[i]); + mlx4_mr_release(dev); if (priv->pd != NULL) { assert(priv->ctx != NULL); claim_zero(mlx4_glue->dealloc_pd(priv->pd)); @@ -385,6 +397,99 @@ free_kvlist: return ret; } +/** + * Interpret RSS capabilities reported by device. + * + * This function returns the set of usable Verbs RSS hash fields, kernel + * quirks taken into account. + * + * @param ctx + * Verbs context. + * @param pd + * Verbs protection domain. + * @param device_attr_ex + * Extended device attributes to interpret. + * + * @return + * Usable RSS hash fields mask in Verbs format. + */ +static uint64_t +mlx4_hw_rss_sup(struct ibv_context *ctx, struct ibv_pd *pd, + struct ibv_device_attr_ex *device_attr_ex) +{ + uint64_t hw_rss_sup = device_attr_ex->rss_caps.rx_hash_fields_mask; + struct ibv_cq *cq = NULL; + struct ibv_wq *wq = NULL; + struct ibv_rwq_ind_table *ind = NULL; + struct ibv_qp *qp = NULL; + + if (!hw_rss_sup) { + WARN("no RSS capabilities reported; disabling support for UDP" + " RSS and inner VXLAN RSS"); + return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 | + IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 | + IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP; + } + if (!(hw_rss_sup & IBV_RX_HASH_INNER)) + return hw_rss_sup; + /* + * Although reported as supported, missing code in some Linux + * versions (v4.15, v4.16) prevents the creation of hash QPs with + * inner capability. + * + * There is no choice but to attempt to instantiate a temporary RSS + * context in order to confirm its support. + */ + cq = mlx4_glue->create_cq(ctx, 1, NULL, NULL, 0); + wq = cq ? mlx4_glue->create_wq + (ctx, + &(struct ibv_wq_init_attr){ + .wq_type = IBV_WQT_RQ, + .max_wr = 1, + .max_sge = 1, + .pd = pd, + .cq = cq, + }) : NULL; + ind = wq ? mlx4_glue->create_rwq_ind_table + (ctx, + &(struct ibv_rwq_ind_table_init_attr){ + .log_ind_tbl_size = 0, + .ind_tbl = &wq, + .comp_mask = 0, + }) : NULL; + qp = ind ? mlx4_glue->create_qp_ex + (ctx, + &(struct ibv_qp_init_attr_ex){ + .comp_mask = + (IBV_QP_INIT_ATTR_PD | + IBV_QP_INIT_ATTR_RX_HASH | + IBV_QP_INIT_ATTR_IND_TABLE), + .qp_type = IBV_QPT_RAW_PACKET, + .pd = pd, + .rwq_ind_tbl = ind, + .rx_hash_conf = { + .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE, + .rx_hash_key = mlx4_rss_hash_key_default, + .rx_hash_fields_mask = hw_rss_sup, + }, + }) : NULL; + if (!qp) { + WARN("disabling unusable inner RSS capability due to kernel" + " quirk"); + hw_rss_sup &= ~IBV_RX_HASH_INNER; + } else { + claim_zero(mlx4_glue->destroy_qp(qp)); + } + if (ind) + claim_zero(mlx4_glue->destroy_rwq_ind_table(ind)); + if (wq) + claim_zero(mlx4_glue->destroy_wq(wq)); + if (cq) + claim_zero(mlx4_glue->destroy_cq(cq)); + return hw_rss_sup; +} + static struct rte_pci_driver mlx4_driver; /** @@ -470,14 +575,14 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) ibv_dev = list[i]; DEBUG("device opened"); if (mlx4_glue->query_device(attr_ctx, &device_attr)) { - rte_errno = ENODEV; + err = ENODEV; goto error; } INFO("%u port(s) detected", device_attr.phys_port_cnt); conf.ports.present |= (UINT64_C(1) << device_attr.phys_port_cnt) - 1; if (mlx4_args(pci_dev->device.devargs, &conf)) { ERROR("failed to process device arguments"); - rte_errno = EINVAL; + err = EINVAL; goto error; } /* Use all ports when none are defined */ @@ -485,7 +590,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) conf.ports.enabled = conf.ports.present; /* Retrieve extended device attributes. */ if (mlx4_glue->query_device_ex(attr_ctx, NULL, &device_attr_ex)) { - rte_errno = ENODEV; + err = ENODEV; goto error; } assert(device_attr.max_sge >= MLX4_MAX_SGE); @@ -504,18 +609,18 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) DEBUG("using port %u", port); ctx = mlx4_glue->open_device(ibv_dev); if (ctx == NULL) { - rte_errno = ENODEV; + err = ENODEV; goto port_error; } /* Check port status. */ err = mlx4_glue->query_port(ctx, port, &port_attr); if (err) { - rte_errno = err; - ERROR("port query failed: %s", strerror(rte_errno)); + err = ENODEV; + ERROR("port query failed: %s", strerror(err)); goto port_error; } if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { - rte_errno = ENOTSUP; + err = ENOTSUP; ERROR("port %d is not configured in Ethernet mode", port); goto port_error; @@ -525,15 +630,16 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) port, mlx4_glue->port_state_str(port_attr.state), port_attr.state); /* Make asynchronous FD non-blocking to handle interrupts. */ - if (mlx4_fd_set_non_blocking(ctx->async_fd) < 0) { + err = mlx4_fd_set_non_blocking(ctx->async_fd); + if (err) { ERROR("cannot make asynchronous FD non-blocking: %s", - strerror(rte_errno)); + strerror(err)); goto port_error; } /* Allocate protection domain. */ pd = mlx4_glue->alloc_pd(ctx); if (pd == NULL) { - rte_errno = ENOMEM; + err = ENOMEM; ERROR("PD allocation failure"); goto port_error; } @@ -542,7 +648,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) sizeof(*priv), RTE_CACHE_LINE_SIZE); if (priv == NULL) { - rte_errno = ENOMEM; + err = ENOMEM; ERROR("priv allocation failure"); goto port_error; } @@ -562,26 +668,32 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) (device_attr.vendor_part_id == PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO); DEBUG("L2 tunnel checksum offloads are %ssupported", - (priv->hw_csum_l2tun ? "" : "not ")); - priv->hw_rss_sup = device_attr_ex.rss_caps.rx_hash_fields_mask; - if (!priv->hw_rss_sup) { - WARN("no RSS capabilities reported; disabling support" - " for UDP RSS and inner VXLAN RSS"); - /* Fake support for all possible RSS hash fields. */ - priv->hw_rss_sup = ~UINT64_C(0); - priv->hw_rss_sup = mlx4_conv_rss_hf(priv, -1); - /* Filter out known unsupported fields. */ - priv->hw_rss_sup &= - ~(uint64_t)(IBV_RX_HASH_SRC_PORT_UDP | - IBV_RX_HASH_DST_PORT_UDP | - IBV_RX_HASH_INNER); - } + priv->hw_csum_l2tun ? "" : "not "); + priv->hw_rss_sup = mlx4_hw_rss_sup(priv->ctx, priv->pd, + &device_attr_ex); DEBUG("supported RSS hash fields mask: %016" PRIx64, priv->hw_rss_sup); + priv->hw_rss_max_qps = + device_attr_ex.rss_caps.max_rwq_indirection_table_size; + DEBUG("MAX RSS queues %d", priv->hw_rss_max_qps); + priv->hw_fcs_strip = !!(device_attr_ex.raw_packet_caps & + IBV_RAW_PACKET_CAP_SCATTER_FCS); + DEBUG("FCS stripping toggling is %ssupported", + priv->hw_fcs_strip ? "" : "not "); + priv->tso = + ((device_attr_ex.tso_caps.max_tso > 0) && + (device_attr_ex.tso_caps.supported_qpts & + (1 << IBV_QPT_RAW_PACKET))); + if (priv->tso) + priv->tso_max_payload_sz = + device_attr_ex.tso_caps.max_tso; + DEBUG("TSO is %ssupported", + priv->tso ? "" : "not "); /* Configure the first MAC address by default. */ - if (mlx4_get_mac(priv, &mac.addr_bytes)) { + err = mlx4_get_mac(priv, &mac.addr_bytes); + if (err) { ERROR("cannot get MAC address, is mlx4_en loaded?" - " (rte_errno: %s)", strerror(rte_errno)); + " (error: %s)", strerror(err)); goto port_error; } INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", @@ -614,8 +726,8 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) eth_dev = rte_eth_dev_allocate(name); } if (eth_dev == NULL) { + err = ENOMEM; ERROR("can not allocate rte ethdev"); - rte_errno = ENOMEM; goto port_error; } eth_dev->data->dev_private = priv; @@ -649,6 +761,24 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) /* Update link status once if waiting for LSC. */ if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) mlx4_link_update(eth_dev, 0); + /* + * Once the device is added to the list of memory event + * callback, its global MR cache table cannot be expanded + * on the fly because of deadlock. If it overflows, lookup + * should be done by searching MR list linearly, which is slow. + */ + err = mlx4_mr_btree_init(&priv->mr.cache, + MLX4_MR_BTREE_CACHE_N * 2, + eth_dev->device->numa_node); + if (err) { + /* rte_errno is already set. */ + goto port_error; + } + /* Add device to memory callback list. */ + rte_rwlock_write_lock(&mlx4_mem_event_rwlock); + LIST_INSERT_HEAD(&mlx4_mem_event_cb_list, priv, mem_event_cb); + rte_rwlock_write_unlock(&mlx4_mem_event_rwlock); + rte_eth_dev_probing_finish(eth_dev); continue; port_error: rte_free(priv); @@ -660,8 +790,6 @@ port_error: rte_eth_dev_release_port(eth_dev); break; } - if (i == device_attr.phys_port_cnt) - return 0; /* * XXX if something went wrong in the loop above, there is a resource * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as @@ -673,8 +801,9 @@ error: claim_zero(mlx4_glue->close_device(attr_ctx)); if (list) mlx4_glue->free_device_list(list); - assert(rte_errno >= 0); - return -rte_errno; + if (err) + rte_errno = err; + return -err; } static const struct rte_pci_id mlx4_pci_id_map[] = { @@ -707,12 +836,54 @@ static struct rte_pci_driver mlx4_driver = { #ifdef RTE_LIBRTE_MLX4_DLOPEN_DEPS +/** + * Suffix RTE_EAL_PMD_PATH with "-glue". + * + * This function performs a sanity check on RTE_EAL_PMD_PATH before + * suffixing its last component. + * + * @param buf[out] + * Output buffer, should be large enough otherwise NULL is returned. + * @param size + * Size of @p out. + * + * @return + * Pointer to @p buf or @p NULL in case suffix cannot be appended. + */ +static char * +mlx4_glue_path(char *buf, size_t size) +{ + static const char *const bad[] = { "/", ".", "..", NULL }; + const char *path = RTE_EAL_PMD_PATH; + size_t len = strlen(path); + size_t off; + int i; + + while (len && path[len - 1] == '/') + --len; + for (off = len; off && path[off - 1] != '/'; --off) + ; + for (i = 0; bad[i]; ++i) + if (!strncmp(path + off, bad[i], (int)(len - off))) + goto error; + i = snprintf(buf, size, "%.*s-glue", (int)len, path); + if (i == -1 || (size_t)i >= size) + goto error; + return buf; +error: + ERROR("unable to append \"-glue\" to last component of" + " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\")," + " please re-configure DPDK"); + return NULL; +} + /** * Initialization routine for run-time dependency on rdma-core. */ static int mlx4_glue_init(void) { + char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")]; const char *path[] = { /* * A basic security check is necessary before trusting @@ -720,7 +891,13 @@ mlx4_glue_init(void) */ (geteuid() == getuid() && getegid() == getgid() ? getenv("MLX4_GLUE_PATH") : NULL), - RTE_EAL_PMD_PATH, + /* + * When RTE_EAL_PMD_PATH is set, use its glue-suffixed + * variant, otherwise let dlopen() look up libraries on its + * own. + */ + (*RTE_EAL_PMD_PATH ? + mlx4_glue_path(glue_path, sizeof(glue_path)) : ""), }; unsigned int i = 0; void *handle = NULL; @@ -790,9 +967,7 @@ glue_error: /** * Driver initialization routine. */ -RTE_INIT(rte_mlx4_pmd_init); -static void -rte_mlx4_pmd_init(void) +RTE_INIT(rte_mlx4_pmd_init) { /* * MLX4_DEVICE_FATAL_CLEANUP tells ibv_destroy functions we @@ -828,6 +1003,8 @@ rte_mlx4_pmd_init(void) } mlx4_glue->fork_init(); rte_pci_register(&mlx4_driver); + rte_mem_event_callback_register("MLX4_MEM_EVENT_CB", + mlx4_mr_mem_event_cb, NULL); } RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__); diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 19c8a223..e6fb934f 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2012 6WIND S.A. - * Copyright 2012 Mellanox + * Copyright 2012 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX4_H_ @@ -23,7 +23,9 @@ #include #include #include -#include +#include + +#include "mlx4_mr.h" #ifndef IBV_RX_HASH_INNER /** This is not necessarily defined by supported RDMA core versions. */ @@ -42,20 +44,12 @@ /** Fixed RSS hash key size in bytes. Cannot be modified. */ #define MLX4_RSS_HASH_KEY_SIZE 40 -/** - * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP - * from which buffers are to be transmitted will have to be mapped by this - * driver to their own Memory Region (MR). This is a slow operation. - * - * This value is always 1 for RX queues. - */ -#ifndef MLX4_PMD_TX_MP_CACHE -#define MLX4_PMD_TX_MP_CACHE 8 -#endif - /** Interrupt alarm timeout value in microseconds. */ #define MLX4_INTR_ALARM_TIMEOUT 100000 +/* Maximum packet headers size (L2+L3+L4) for TSO. */ +#define MLX4_MAX_TSO_HEADER 192 + /** Port parameter. */ #define MLX4_PMD_PORT_KVARG "port" @@ -78,20 +72,12 @@ struct rxq; struct txq; struct rte_flow; -/** Memory region descriptor. */ -struct mlx4_mr { - LIST_ENTRY(mlx4_mr) next; /**< Next entry in list. */ - uintptr_t start; /**< Base address for memory region. */ - uintptr_t end; /**< End address for memory region. */ - uint32_t lkey; /**< L_Key extracted from @p mr. */ - uint32_t refcnt; /**< Reference count for this object. */ - struct priv *priv; /**< Back pointer to private data. */ - struct ibv_mr *mr; /**< Memory region associated with @p mp. */ - struct rte_mempool *mp; /**< Target memory pool (mempool). */ -}; +LIST_HEAD(mlx4_dev_list, priv); +LIST_HEAD(mlx4_mr_list, mlx4_mr); /** Private data structure. */ struct priv { + LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */ struct rte_eth_dev *dev; /**< Ethernet device. */ struct ibv_context *ctx; /**< Verbs context. */ struct ibv_device_attr device_attr; /**< Device properties. */ @@ -103,15 +89,25 @@ struct priv { uint32_t vf:1; /**< This is a VF device. */ uint32_t intr_alarm:1; /**< An interrupt alarm is scheduled. */ uint32_t isolated:1; /**< Toggle isolated mode. */ + uint32_t rss_init:1; /**< Common RSS context is initialized. */ uint32_t hw_csum:1; /**< Checksum offload is supported. */ uint32_t hw_csum_l2tun:1; /**< Checksum support for L2 tunnels. */ + uint32_t hw_fcs_strip:1; /**< FCS stripping toggling is supported. */ + uint32_t tso:1; /**< Transmit segmentation offload is supported. */ + uint32_t tso_max_payload_sz; /**< Max supported TSO payload size. */ + uint32_t hw_rss_max_qps; /**< Max Rx Queues supported by RSS. */ uint64_t hw_rss_sup; /**< Supported RSS hash fields (Verbs format). */ struct rte_intr_handle intr_handle; /**< Port interrupt handle. */ struct mlx4_drop *drop; /**< Shared resources for drop flow rules. */ + struct { + uint32_t dev_gen; /* Generation number to flush local caches. */ + rte_rwlock_t rwlock; /* MR Lock. */ + struct mlx4_mr_btree cache; /* Global MR cache table. */ + struct mlx4_mr_list mr_list; /* Registered MR list. */ + struct mlx4_mr_list mr_free_list; /* Freed MR list. */ + } mr; LIST_HEAD(, mlx4_rss) rss; /**< Shared targets for Rx flow rules. */ LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */ - LIST_HEAD(, mlx4_mr) mr; /**< Registered memory regions. */ - rte_spinlock_t mr_lock; /**< Lock for @p mr access. */ struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES]; /**< Configured MAC addresses. Unused entries are zeroed. */ }; @@ -131,7 +127,7 @@ void mlx4_allmulticast_disable(struct rte_eth_dev *dev); void mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); int mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t vmdq); -void mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr); +int mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr); int mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); int mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); void mlx4_stats_reset(struct rte_eth_dev *dev); @@ -154,11 +150,4 @@ void mlx4_rxq_intr_disable(struct priv *priv); int mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx); int mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx); -/* mlx4_mr.c */ - -struct mlx4_mr *mlx4_mr_get(struct priv *priv, struct rte_mempool *mp); -void mlx4_mr_put(struct mlx4_mr *mr); -uint32_t mlx4_txq_add_mr(struct txq *txq, struct rte_mempool *mp, - uint32_t i); - #endif /* RTE_PMD_MLX4_H_ */ diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c index 3bc69273..30deb3ef 100644 --- a/drivers/net/mlx4/mlx4_ethdev.c +++ b/drivers/net/mlx4/mlx4_ethdev.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ /** @@ -39,6 +39,7 @@ #include #include #include +#include #include "mlx4.h" #include "mlx4_flow.h" @@ -120,7 +121,7 @@ try_dev_id: goto try_dev_id; dev_port_prev = dev_port; if (dev_port == (priv->port - 1u)) - snprintf(match, sizeof(match), "%s", name); + strlcpy(match, name, sizeof(match)); } closedir(dir); if (match[0] == '\0') { @@ -131,167 +132,6 @@ try_dev_id: return 0; } -/** - * Read from sysfs entry. - * - * @param[in] priv - * Pointer to private structure. - * @param[in] entry - * Entry name relative to sysfs path. - * @param[out] buf - * Data output buffer. - * @param size - * Buffer size. - * - * @return - * Number of bytes read on success, negative errno value otherwise and - * rte_errno is set. - */ -static int -mlx4_sysfs_read(const struct priv *priv, const char *entry, - char *buf, size_t size) -{ - char ifname[IF_NAMESIZE]; - FILE *file; - int ret; - - ret = mlx4_get_ifname(priv, &ifname); - if (ret) - return ret; - - MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path, - ifname, entry); - - file = fopen(path, "rb"); - if (file == NULL) { - rte_errno = errno; - return -rte_errno; - } - ret = fread(buf, 1, size, file); - if ((size_t)ret < size && ferror(file)) { - rte_errno = EIO; - ret = -rte_errno; - } else { - ret = size; - } - fclose(file); - return ret; -} - -/** - * Write to sysfs entry. - * - * @param[in] priv - * Pointer to private structure. - * @param[in] entry - * Entry name relative to sysfs path. - * @param[in] buf - * Data buffer. - * @param size - * Buffer size. - * - * @return - * Number of bytes written on success, negative errno value otherwise and - * rte_errno is set. - */ -static int -mlx4_sysfs_write(const struct priv *priv, const char *entry, - char *buf, size_t size) -{ - char ifname[IF_NAMESIZE]; - FILE *file; - int ret; - - ret = mlx4_get_ifname(priv, &ifname); - if (ret) - return ret; - - MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path, - ifname, entry); - - file = fopen(path, "wb"); - if (file == NULL) { - rte_errno = errno; - return -rte_errno; - } - ret = fwrite(buf, 1, size, file); - if ((size_t)ret < size || ferror(file)) { - rte_errno = EIO; - ret = -rte_errno; - } else { - ret = size; - } - fclose(file); - return ret; -} - -/** - * Get unsigned long sysfs property. - * - * @param priv - * Pointer to private structure. - * @param[in] name - * Entry name relative to sysfs path. - * @param[out] value - * Value output buffer. - * - * @return - * 0 on success, negative errno value otherwise and rte_errno is set. - */ -static int -mlx4_get_sysfs_ulong(struct priv *priv, const char *name, unsigned long *value) -{ - int ret; - unsigned long value_ret; - char value_str[32]; - - ret = mlx4_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1)); - if (ret < 0) { - DEBUG("cannot read %s value from sysfs: %s", - name, strerror(rte_errno)); - return ret; - } - value_str[ret] = '\0'; - errno = 0; - value_ret = strtoul(value_str, NULL, 0); - if (errno) { - rte_errno = errno; - DEBUG("invalid %s value `%s': %s", name, value_str, - strerror(rte_errno)); - return -rte_errno; - } - *value = value_ret; - return 0; -} - -/** - * Set unsigned long sysfs property. - * - * @param priv - * Pointer to private structure. - * @param[in] name - * Entry name relative to sysfs path. - * @param value - * Value to set. - * - * @return - * 0 on success, negative errno value otherwise and rte_errno is set. - */ -static int -mlx4_set_sysfs_ulong(struct priv *priv, const char *name, unsigned long value) -{ - int ret; - MKSTR(value_str, "%lu", value); - - ret = mlx4_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1)); - if (ret < 0) { - DEBUG("cannot write %s `%s' (%lu) to sysfs: %s", - name, value_str, value, strerror(rte_errno)); - return ret; - } - return 0; -} - /** * Perform ifreq ioctl() on associated Ethernet device. * @@ -361,12 +201,12 @@ mlx4_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]) int mlx4_mtu_get(struct priv *priv, uint16_t *mtu) { - unsigned long ulong_mtu = 0; - int ret = mlx4_get_sysfs_ulong(priv, "mtu", &ulong_mtu); + struct ifreq request; + int ret = mlx4_ifreq(priv, SIOCGIFMTU, &request); if (ret) return ret; - *mtu = ulong_mtu; + *mtu = request.ifr_mtu; return 0; } @@ -385,20 +225,13 @@ int mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct priv *priv = dev->data->dev_private; - uint16_t new_mtu; - int ret = mlx4_set_sysfs_ulong(priv, "mtu", mtu); + struct ifreq request = { .ifr_mtu = mtu, }; + int ret = mlx4_ifreq(priv, SIOCSIFMTU, &request); if (ret) return ret; - ret = mlx4_mtu_get(priv, &new_mtu); - if (ret) - return ret; - if (new_mtu == mtu) { - priv->mtu = mtu; - return 0; - } - rte_errno = EINVAL; - return -rte_errno; + priv->mtu = mtu; + return 0; } /** @@ -417,14 +250,14 @@ mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) static int mlx4_set_flags(struct priv *priv, unsigned int keep, unsigned int flags) { - unsigned long tmp = 0; - int ret = mlx4_get_sysfs_ulong(priv, "flags", &tmp); + struct ifreq request; + int ret = mlx4_ifreq(priv, SIOCGIFFLAGS, &request); if (ret) return ret; - tmp &= keep; - tmp |= (flags & (~keep)); - return mlx4_set_sysfs_ulong(priv, "flags", tmp); + request.ifr_flags &= keep; + request.ifr_flags |= flags & ~keep; + return mlx4_ifreq(priv, SIOCSIFFLAGS, &request); } /** @@ -701,11 +534,14 @@ mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) * Pointer to Ethernet device structure. * @param mac_addr * MAC address to register. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. */ -void +int mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { - mlx4_mac_addr_add(dev, mac_addr, 0, 0); + return mlx4_mac_addr_add(dev, mac_addr, 0, 0); } /** @@ -723,7 +559,6 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) unsigned int max; char ifname[IF_NAMESIZE]; - info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); /* FIXME: we should ask the device for these values. */ info->min_rx_bufsize = 32; info->max_rx_pktlen = 65536; @@ -752,6 +587,7 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) ETH_LINK_SPEED_20G | ETH_LINK_SPEED_40G | ETH_LINK_SPEED_56G; + info->flow_type_rss_offloads = mlx4_conv_rss_types(priv, 0, 1); } /** @@ -878,7 +714,7 @@ mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete) } link_speed = ethtool_cmd_speed(&edata); if (link_speed == -1) - dev_link.link_speed = 0; + dev_link.link_speed = ETH_SPEED_NUM_NONE; else dev_link.link_speed = link_speed; dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ? diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c index 2d55bfe0..b40e7e5c 100644 --- a/drivers/net/mlx4/mlx4_flow.c +++ b/drivers/net/mlx4/mlx4_flow.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ /** @@ -76,69 +76,94 @@ struct mlx4_drop { }; /** - * Convert DPDK RSS hash fields to their Verbs equivalent. + * Convert supported RSS hash field types between DPDK and Verbs formats. * - * This function returns the supported (default) set when @p rss_hf has - * special value (uint64_t)-1. + * This function returns the supported (default) set when @p types has + * special value 0. * * @param priv * Pointer to private structure. - * @param rss_hf - * Hash fields in DPDK format (see struct rte_eth_rss_conf). + * @param types + * Depending on @p verbs_to_dpdk, hash types in either DPDK (see struct + * rte_eth_rss_conf) or Verbs format. + * @param verbs_to_dpdk + * A zero value converts @p types from DPDK to Verbs, a nonzero value + * performs the reverse operation. * * @return - * A valid Verbs RSS hash fields mask for mlx4 on success, (uint64_t)-1 - * otherwise and rte_errno is set. + * Converted RSS hash fields on success, (uint64_t)-1 otherwise and + * rte_errno is set. */ uint64_t -mlx4_conv_rss_hf(struct priv *priv, uint64_t rss_hf) +mlx4_conv_rss_types(struct priv *priv, uint64_t types, int verbs_to_dpdk) { - enum { IPV4, IPV6, TCP, UDP, }; - const uint64_t in[] = { - [IPV4] = (ETH_RSS_IPV4 | - ETH_RSS_FRAG_IPV4 | - ETH_RSS_NONFRAG_IPV4_TCP | - ETH_RSS_NONFRAG_IPV4_UDP | - ETH_RSS_NONFRAG_IPV4_OTHER), - [IPV6] = (ETH_RSS_IPV6 | - ETH_RSS_FRAG_IPV6 | - ETH_RSS_NONFRAG_IPV6_TCP | - ETH_RSS_NONFRAG_IPV6_UDP | - ETH_RSS_NONFRAG_IPV6_OTHER | - ETH_RSS_IPV6_EX | - ETH_RSS_IPV6_TCP_EX | - ETH_RSS_IPV6_UDP_EX), - [TCP] = (ETH_RSS_NONFRAG_IPV4_TCP | - ETH_RSS_NONFRAG_IPV6_TCP | - ETH_RSS_IPV6_TCP_EX), - [UDP] = (ETH_RSS_NONFRAG_IPV4_UDP | - ETH_RSS_NONFRAG_IPV6_UDP | - ETH_RSS_IPV6_UDP_EX), + enum { + INNER, + IPV4, IPV4_1, IPV4_2, IPV6, IPV6_1, IPV6_2, IPV6_3, + TCP, UDP, + IPV4_TCP, IPV4_UDP, IPV6_TCP, IPV6_TCP_1, IPV6_UDP, IPV6_UDP_1, }; - const uint64_t out[RTE_DIM(in)] = { - [IPV4] = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4, - [IPV6] = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6, - [TCP] = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP, - [UDP] = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP, + enum { + VERBS_IPV4 = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4, + VERBS_IPV6 = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6, + VERBS_TCP = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP, + VERBS_UDP = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP, }; + static const uint64_t dpdk[] = { + [INNER] = 0, + [IPV4] = ETH_RSS_IPV4, + [IPV4_1] = ETH_RSS_FRAG_IPV4, + [IPV4_2] = ETH_RSS_NONFRAG_IPV4_OTHER, + [IPV6] = ETH_RSS_IPV6, + [IPV6_1] = ETH_RSS_FRAG_IPV6, + [IPV6_2] = ETH_RSS_NONFRAG_IPV6_OTHER, + [IPV6_3] = ETH_RSS_IPV6_EX, + [TCP] = 0, + [UDP] = 0, + [IPV4_TCP] = ETH_RSS_NONFRAG_IPV4_TCP, + [IPV4_UDP] = ETH_RSS_NONFRAG_IPV4_UDP, + [IPV6_TCP] = ETH_RSS_NONFRAG_IPV6_TCP, + [IPV6_TCP_1] = ETH_RSS_IPV6_TCP_EX, + [IPV6_UDP] = ETH_RSS_NONFRAG_IPV6_UDP, + [IPV6_UDP_1] = ETH_RSS_IPV6_UDP_EX, + }; + static const uint64_t verbs[RTE_DIM(dpdk)] = { + [INNER] = IBV_RX_HASH_INNER, + [IPV4] = VERBS_IPV4, + [IPV4_1] = VERBS_IPV4, + [IPV4_2] = VERBS_IPV4, + [IPV6] = VERBS_IPV6, + [IPV6_1] = VERBS_IPV6, + [IPV6_2] = VERBS_IPV6, + [IPV6_3] = VERBS_IPV6, + [TCP] = VERBS_TCP, + [UDP] = VERBS_UDP, + [IPV4_TCP] = VERBS_IPV4 | VERBS_TCP, + [IPV4_UDP] = VERBS_IPV4 | VERBS_UDP, + [IPV6_TCP] = VERBS_IPV6 | VERBS_TCP, + [IPV6_TCP_1] = VERBS_IPV6 | VERBS_TCP, + [IPV6_UDP] = VERBS_IPV6 | VERBS_UDP, + [IPV6_UDP_1] = VERBS_IPV6 | VERBS_UDP, + }; + const uint64_t *in = verbs_to_dpdk ? verbs : dpdk; + const uint64_t *out = verbs_to_dpdk ? dpdk : verbs; uint64_t seen = 0; uint64_t conv = 0; unsigned int i; - for (i = 0; i != RTE_DIM(in); ++i) - if (rss_hf & in[i]) { - seen |= rss_hf & in[i]; + if (!types) { + if (!verbs_to_dpdk) + return priv->hw_rss_sup; + types = priv->hw_rss_sup; + } + for (i = 0; i != RTE_DIM(dpdk); ++i) + if (in[i] && (types & in[i]) == in[i]) { + seen |= types & in[i]; conv |= out[i]; } - if ((conv & priv->hw_rss_sup) == conv) { - if (rss_hf == (uint64_t)-1) { - /* Include inner RSS by default if supported. */ - conv |= priv->hw_rss_sup & IBV_RX_HASH_INNER; - return conv; - } - if (!(rss_hf & ~seen)) - return conv; - } + if ((verbs_to_dpdk || (conv & priv->hw_rss_sup) == conv) && + !(types & ~seen)) + return conv; rte_errno = ENOTSUP; return (uint64_t)-1; } @@ -362,6 +387,9 @@ error: * Additional mlx4-specific constraints on supported fields: * * - No support for partial masks. + * - Due to HW/FW limitation, flow rule priority is not taken into account + * when matching UDP destination ports, doing is therefore only supported + * at the highest priority level (0). * * @param[in, out] flow * Flow rule handle to update. @@ -393,6 +421,11 @@ mlx4_flow_merge_udp(struct rte_flow *flow, msg = "mlx4 does not support matching partial UDP fields"; goto error; } + if (mask && mask->hdr.dst_port && flow->priority) { + msg = "combining UDP destination port matching with a nonzero" + " priority level is not supported"; + goto error; + } if (!flow->ibv_attr) return 0; ++flow->ibv_attr->num_of_specs; @@ -637,6 +670,7 @@ mlx4_flow_prepare(struct priv *priv, struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) }; struct rte_flow *flow = &temp; const char *msg = NULL; + int overlap; if (attr->group) return rte_flow_error_set @@ -651,12 +685,18 @@ mlx4_flow_prepare(struct priv *priv, return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, "egress is not supported"); + if (attr->transfer) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, "transfer is not supported"); if (!attr->ingress) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL, "only ingress is supported"); fill: + overlap = 0; proc = mlx4_flow_proc_item_list; + flow->priority = attr->priority; /* Go over pattern. */ for (item = pattern; item->type; ++item) { const struct mlx4_flow_proc_item *next = NULL; @@ -702,14 +742,24 @@ fill: } /* Go over actions list. */ for (action = actions; action->type; ++action) { + /* This one may appear anywhere multiple times. */ + if (action->type == RTE_FLOW_ACTION_TYPE_VOID) + continue; + /* Fate-deciding actions may appear exactly once. */ + if (overlap) { + msg = "cannot combine several fate-deciding actions," + " choose between DROP, QUEUE or RSS"; + goto exit_action_not_supported; + } + overlap = 1; switch (action->type) { const struct rte_flow_action_queue *queue; const struct rte_flow_action_rss *rss; - const struct rte_eth_rss_conf *rss_conf; + const uint8_t *rss_key; + uint32_t rss_key_len; + uint64_t fields; unsigned int i; - case RTE_FLOW_ACTION_TYPE_VOID: - continue; case RTE_FLOW_ACTION_TYPE_DROP: flow->drop = 1; break; @@ -736,54 +786,68 @@ fill: break; rss = action->conf; /* Default RSS configuration if none is provided. */ - rss_conf = - rss->rss_conf ? - rss->rss_conf : - &(struct rte_eth_rss_conf){ - .rss_key = mlx4_rss_hash_key_default, - .rss_key_len = MLX4_RSS_HASH_KEY_SIZE, - .rss_hf = -1, - }; + if (rss->key_len) { + rss_key = rss->key; + rss_key_len = rss->key_len; + } else { + rss_key = mlx4_rss_hash_key_default; + rss_key_len = MLX4_RSS_HASH_KEY_SIZE; + } /* Sanity checks. */ - for (i = 0; i < rss->num; ++i) + for (i = 0; i < rss->queue_num; ++i) if (rss->queue[i] >= priv->dev->data->nb_rx_queues) break; - if (i != rss->num) { + if (i != rss->queue_num) { msg = "queue index target beyond number of" " configured Rx queues"; goto exit_action_not_supported; } - if (!rte_is_power_of_2(rss->num)) { + if (!rte_is_power_of_2(rss->queue_num)) { msg = "for RSS, mlx4 requires the number of" " queues to be a power of two"; goto exit_action_not_supported; } - if (rss_conf->rss_key_len != - sizeof(flow->rss->key)) { + if (rss_key_len != sizeof(flow->rss->key)) { msg = "mlx4 supports exactly one RSS hash key" " length: " MLX4_STR_EXPAND(MLX4_RSS_HASH_KEY_SIZE); goto exit_action_not_supported; } - for (i = 1; i < rss->num; ++i) + for (i = 1; i < rss->queue_num; ++i) if (rss->queue[i] - rss->queue[i - 1] != 1) break; - if (i != rss->num) { + if (i != rss->queue_num) { msg = "mlx4 requires RSS contexts to use" " consecutive queue indices only"; goto exit_action_not_supported; } - if (rss->queue[0] % rss->num) { + if (rss->queue[0] % rss->queue_num) { msg = "mlx4 requires the first queue of a RSS" " context to be aligned on a multiple" " of the context size"; goto exit_action_not_supported; } + if (rss->func && + rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) { + msg = "the only supported RSS hash function" + " is Toeplitz"; + goto exit_action_not_supported; + } + if (rss->level) { + msg = "a nonzero RSS encapsulation level is" + " not supported"; + goto exit_action_not_supported; + } + rte_errno = 0; + fields = mlx4_conv_rss_types(priv, rss->types, 0); + if (fields == (uint64_t)-1 && rte_errno) { + msg = "unsupported RSS hash type requested"; + goto exit_action_not_supported; + } flow->rss = mlx4_rss_get - (priv, - mlx4_conv_rss_hf(priv, rss_conf->rss_hf), - rss_conf->rss_key, rss->num, rss->queue); + (priv, fields, rss_key, rss->queue_num, + rss->queue); if (!flow->rss) { msg = "either invalid parameters or not enough" " resources for additional multi-queue" @@ -795,10 +859,9 @@ fill: goto exit_action_not_supported; } } - if (!flow->rss && !flow->drop) - return rte_flow_error_set - (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "no valid action"); + /* When fate is unknown, drop traffic. */ + if (!overlap) + flow->drop = 1; /* Validation ends here. */ if (!addr) { if (flow->rss) @@ -820,11 +883,14 @@ fill: }, }; - if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec))) + if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec))) { + if (temp.rss) + mlx4_rss_put(temp.rss); return rte_flow_error_set (error, -rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "flow rule handle allocation failure"); + } /* Most fields will be updated by second pass. */ *flow = (struct rte_flow){ .ibv_attr = temp.ibv_attr, @@ -1264,14 +1330,20 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error) */ uint32_t queues = rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1; - alignas(struct rte_flow_action_rss) uint8_t rss_conf_data - [offsetof(struct rte_flow_action_rss, queue) + - sizeof(((struct rte_flow_action_rss *)0)->queue[0]) * queues]; - struct rte_flow_action_rss *rss_conf = (void *)rss_conf_data; + uint16_t queue[queues]; + struct rte_flow_action_rss action_rss = { + .func = RTE_ETH_HASH_FUNCTION_DEFAULT, + .level = 0, + .types = 0, + .key_len = MLX4_RSS_HASH_KEY_SIZE, + .queue_num = queues, + .key = mlx4_rss_hash_key_default, + .queue = queue, + }; struct rte_flow_action actions[] = { { .type = RTE_FLOW_ACTION_TYPE_RSS, - .conf = rss_conf, + .conf = &action_rss, }, { .type = RTE_FLOW_ACTION_TYPE_END, @@ -1293,12 +1365,8 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error) if (!queues) goto error; /* Prepare default RSS configuration. */ - *rss_conf = (struct rte_flow_action_rss){ - .rss_conf = NULL, /* Rely on default fallback settings. */ - .num = queues, - }; for (i = 0; i != queues; ++i) - rss_conf->queue[i] = i; + queue[i] = i; /* * Set up VLAN item if filtering is enabled and at least one VLAN * filter is configured. @@ -1357,7 +1425,7 @@ next_vlan: if (j != sizeof(mac->addr_bytes)) continue; if (flow->rss->queues != queues || - memcmp(flow->rss->queue_id, rss_conf->queue, + memcmp(flow->rss->queue_id, action_rss.queue, queues * sizeof(flow->rss->queue_id[0]))) continue; break; @@ -1397,7 +1465,7 @@ next_vlan: if (flow && flow->internal) { assert(flow->rss); if (flow->rss->queues != queues || - memcmp(flow->rss->queue_id, rss_conf->queue, + memcmp(flow->rss->queue_id, action_rss.queue, queues * sizeof(flow->rss->queue_id[0]))) flow = NULL; } diff --git a/drivers/net/mlx4/mlx4_flow.h b/drivers/net/mlx4/mlx4_flow.h index 00188a65..2917ebe9 100644 --- a/drivers/net/mlx4/mlx4_flow.h +++ b/drivers/net/mlx4/mlx4_flow.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX4_FLOW_H_ @@ -42,12 +42,14 @@ struct rte_flow { uint32_t promisc:1; /**< This rule matches everything. */ uint32_t allmulti:1; /**< This rule matches all multicast traffic. */ uint32_t drop:1; /**< This rule drops packets. */ + uint32_t priority; /**< Flow rule priority. */ struct mlx4_rss *rss; /**< Rx target. */ }; /* mlx4_flow.c */ -uint64_t mlx4_conv_rss_hf(struct priv *priv, uint64_t rss_hf); +uint64_t mlx4_conv_rss_types(struct priv *priv, uint64_t types, + int verbs_to_dpdk); int mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error); void mlx4_flow_clean(struct priv *priv); int mlx4_filter_ctrl(struct rte_eth_dev *dev, diff --git a/drivers/net/mlx4/mlx4_glue.c b/drivers/net/mlx4/mlx4_glue.c index 3b79d320..67b3bfac 100644 --- a/drivers/net/mlx4/mlx4_glue.c +++ b/drivers/net/mlx4/mlx4_glue.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2018 6WIND S.A. - * Copyright 2018 Mellanox + * Copyright 2018 Mellanox Technologies, Ltd */ #include diff --git a/drivers/net/mlx4/mlx4_glue.h b/drivers/net/mlx4/mlx4_glue.h index 368f906b..668ca867 100644 --- a/drivers/net/mlx4/mlx4_glue.h +++ b/drivers/net/mlx4/mlx4_glue.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2018 6WIND S.A. - * Copyright 2018 Mellanox + * Copyright 2018 Mellanox Technologies, Ltd */ #ifndef MLX4_GLUE_H_ diff --git a/drivers/net/mlx4/mlx4_intr.c b/drivers/net/mlx4/mlx4_intr.c index 2141992e..eeb982a0 100644 --- a/drivers/net/mlx4/mlx4_intr.c +++ b/drivers/net/mlx4/mlx4_intr.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ /** diff --git a/drivers/net/mlx4/mlx4_mr.c b/drivers/net/mlx4/mlx4_mr.c index 9a1e4de3..d23d3c61 100644 --- a/drivers/net/mlx4/mlx4_mr.c +++ b/drivers/net/mlx4/mlx4_mr.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ /** @@ -30,237 +30,1152 @@ #include #include #include -#include +#include #include "mlx4_glue.h" +#include "mlx4_mr.h" #include "mlx4_rxtx.h" #include "mlx4_utils.h" -struct mlx4_check_mempool_data { +struct mr_find_contig_memsegs_data { + uintptr_t addr; + uintptr_t start; + uintptr_t end; + const struct rte_memseg_list *msl; +}; + +struct mr_update_mp_data { + struct rte_eth_dev *dev; + struct mlx4_mr_ctrl *mr_ctrl; int ret; - char *start; - char *end; }; /** - * Called by mlx4_check_mempool() when iterating the memory chunks. - * - * @param[in] mp - * Pointer to memory pool (unused). - * @param[in, out] data - * Pointer to shared buffer with mlx4_check_mempool(). - * @param[in] memhdr - * Pointer to mempool chunk header. - * @param mem_idx - * Mempool element index (unused). + * Expand B-tree table to a given size. Can't be called with holding + * memory_hotplug_lock or priv->mr.rwlock due to rte_realloc(). + * + * @param bt + * Pointer to B-tree structure. + * @param n + * Number of entries for expansion. + * + * @return + * 0 on success, -1 on failure. */ -static void -mlx4_check_mempool_cb(struct rte_mempool *mp, void *opaque, - struct rte_mempool_memhdr *memhdr, - unsigned int mem_idx) +static int +mr_btree_expand(struct mlx4_mr_btree *bt, int n) +{ + void *mem; + int ret = 0; + + if (n <= bt->size) + return ret; + /* + * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is + * used inside if there's no room to expand. Because this is a quite + * rare case and a part of very slow path, it is very acceptable. + * Initially cache_bh[] will be given practically enough space and once + * it is expanded, expansion wouldn't be needed again ever. + */ + mem = rte_realloc(bt->table, n * sizeof(struct mlx4_mr_cache), 0); + if (mem == NULL) { + /* Not an error, B-tree search will be skipped. */ + WARN("failed to expand MR B-tree (%p) table", (void *)bt); + ret = -1; + } else { + DEBUG("expanded MR B-tree table (size=%u)", n); + bt->table = mem; + bt->size = n; + } + return ret; +} + +/** + * Look up LKey from given B-tree lookup table, store the last index and return + * searched LKey. + * + * @param bt + * Pointer to B-tree structure. + * @param[out] idx + * Pointer to index. Even on search failure, returns index where it stops + * searching so that index can be used when inserting a new entry. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static uint32_t +mr_btree_lookup(struct mlx4_mr_btree *bt, uint16_t *idx, uintptr_t addr) +{ + struct mlx4_mr_cache *lkp_tbl; + uint16_t n; + uint16_t base = 0; + + assert(bt != NULL); + lkp_tbl = *bt->table; + n = bt->len; + /* First entry must be NULL for comparison. */ + assert(bt->len > 0 || (lkp_tbl[0].start == 0 && + lkp_tbl[0].lkey == UINT32_MAX)); + /* Binary search. */ + do { + register uint16_t delta = n >> 1; + + if (addr < lkp_tbl[base + delta].start) { + n = delta; + } else { + base += delta; + n -= delta; + } + } while (n > 1); + assert(addr >= lkp_tbl[base].start); + *idx = base; + if (addr < lkp_tbl[base].end) + return lkp_tbl[base].lkey; + /* Not found. */ + return UINT32_MAX; +} + +/** + * Insert an entry to B-tree lookup table. + * + * @param bt + * Pointer to B-tree structure. + * @param entry + * Pointer to new entry to insert. + * + * @return + * 0 on success, -1 on failure. + */ +static int +mr_btree_insert(struct mlx4_mr_btree *bt, struct mlx4_mr_cache *entry) { - struct mlx4_check_mempool_data *data = opaque; + struct mlx4_mr_cache *lkp_tbl; + uint16_t idx = 0; + size_t shift; - (void)mp; - (void)mem_idx; - /* It already failed, skip the next chunks. */ - if (data->ret != 0) + assert(bt != NULL); + assert(bt->len <= bt->size); + assert(bt->len > 0); + lkp_tbl = *bt->table; + /* Find out the slot for insertion. */ + if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) { + DEBUG("abort insertion to B-tree(%p): already exist at" + " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", + (void *)bt, idx, entry->start, entry->end, entry->lkey); + /* Already exist, return. */ + return 0; + } + /* If table is full, return error. */ + if (unlikely(bt->len == bt->size)) { + bt->overflow = 1; + return -1; + } + /* Insert entry. */ + ++idx; + shift = (bt->len - idx) * sizeof(struct mlx4_mr_cache); + if (shift) + memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift); + lkp_tbl[idx] = *entry; + bt->len++; + DEBUG("inserted B-tree(%p)[%u]," + " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", + (void *)bt, idx, entry->start, entry->end, entry->lkey); + return 0; +} + +/** + * Initialize B-tree and allocate memory for lookup table. + * + * @param bt + * Pointer to B-tree structure. + * @param n + * Number of entries to allocate. + * @param socket + * NUMA socket on which memory must be allocated. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx4_mr_btree_init(struct mlx4_mr_btree *bt, int n, int socket) +{ + if (bt == NULL) { + rte_errno = EINVAL; + return -rte_errno; + } + memset(bt, 0, sizeof(*bt)); + bt->table = rte_calloc_socket("B-tree table", + n, sizeof(struct mlx4_mr_cache), + 0, socket); + if (bt->table == NULL) { + rte_errno = ENOMEM; + ERROR("failed to allocate memory for btree cache on socket %d", + socket); + return -rte_errno; + } + bt->size = n; + /* First entry must be NULL for binary search. */ + (*bt->table)[bt->len++] = (struct mlx4_mr_cache) { + .lkey = UINT32_MAX, + }; + DEBUG("initialized B-tree %p with table %p", + (void *)bt, (void *)bt->table); + return 0; +} + +/** + * Free B-tree resources. + * + * @param bt + * Pointer to B-tree structure. + */ +void +mlx4_mr_btree_free(struct mlx4_mr_btree *bt) +{ + if (bt == NULL) return; - /* It is the first chunk. */ - if (data->start == NULL && data->end == NULL) { - data->start = memhdr->addr; - data->end = data->start + memhdr->len; + DEBUG("freeing B-tree %p with table %p", (void *)bt, (void *)bt->table); + rte_free(bt->table); + memset(bt, 0, sizeof(*bt)); +} + +#ifndef NDEBUG +/** + * Dump all the entries in a B-tree + * + * @param bt + * Pointer to B-tree structure. + */ +void +mlx4_mr_btree_dump(struct mlx4_mr_btree *bt) +{ + int idx; + struct mlx4_mr_cache *lkp_tbl; + + if (bt == NULL) return; + lkp_tbl = *bt->table; + for (idx = 0; idx < bt->len; ++idx) { + struct mlx4_mr_cache *entry = &lkp_tbl[idx]; + + DEBUG("B-tree(%p)[%u]," + " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", + (void *)bt, idx, entry->start, entry->end, entry->lkey); } - if (data->end == memhdr->addr) { - data->end += memhdr->len; - return; +} +#endif + +/** + * Find virtually contiguous memory chunk in a given MR. + * + * @param dev + * Pointer to MR structure. + * @param[out] entry + * Pointer to returning MR cache entry. If not found, this will not be + * updated. + * @param start_idx + * Start index of the memseg bitmap. + * + * @return + * Next index to go on lookup. + */ +static int +mr_find_next_chunk(struct mlx4_mr *mr, struct mlx4_mr_cache *entry, + int base_idx) +{ + uintptr_t start = 0; + uintptr_t end = 0; + uint32_t idx = 0; + + for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) { + if (rte_bitmap_get(mr->ms_bmp, idx)) { + const struct rte_memseg_list *msl; + const struct rte_memseg *ms; + + msl = mr->msl; + ms = rte_fbarray_get(&msl->memseg_arr, + mr->ms_base_idx + idx); + assert(msl->page_sz == ms->hugepage_sz); + if (!start) + start = ms->addr_64; + end = ms->addr_64 + ms->hugepage_sz; + } else if (start) { + /* Passed the end of a fragment. */ + break; + } } - if (data->start == (char *)memhdr->addr + memhdr->len) { - data->start -= memhdr->len; - return; + if (start) { + /* Found one chunk. */ + entry->start = start; + entry->end = end; + entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey); } - /* Error, mempool is not virtually contiguous. */ - data->ret = -1; + return idx; } /** - * Check if a mempool can be used: it must be virtually contiguous. + * Insert a MR to the global B-tree cache. It may fail due to low-on-memory. + * Then, this entry will have to be searched by mr_lookup_dev_list() in + * mlx4_mr_create() on miss. * - * @param[in] mp - * Pointer to memory pool. - * @param[out] start - * Pointer to the start address of the mempool virtual memory area. - * @param[out] end - * Pointer to the end address of the mempool virtual memory area. + * @param dev + * Pointer to Ethernet device. + * @param mr + * Pointer to MR to insert. * * @return - * 0 on success (mempool is virtually contiguous), -1 on error. + * 0 on success, -1 on failure. */ static int -mlx4_check_mempool(struct rte_mempool *mp, uintptr_t *start, uintptr_t *end) +mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx4_mr *mr) { - struct mlx4_check_mempool_data data; + struct priv *priv = dev->data->dev_private; + unsigned int n; - memset(&data, 0, sizeof(data)); - rte_mempool_mem_iter(mp, mlx4_check_mempool_cb, &data); - *start = (uintptr_t)data.start; - *end = (uintptr_t)data.end; - return data.ret; + DEBUG("port %u inserting MR(%p) to global cache", + dev->data->port_id, (void *)mr); + for (n = 0; n < mr->ms_bmp_n; ) { + struct mlx4_mr_cache entry = { 0, }; + + /* Find a contiguous chunk and advance the index. */ + n = mr_find_next_chunk(mr, &entry, n); + if (!entry.end) + break; + if (mr_btree_insert(&priv->mr.cache, &entry) < 0) { + /* + * Overflowed, but the global table cannot be expanded + * because of deadlock. + */ + return -1; + } + } + return 0; } /** - * Obtain a memory region from a memory pool. + * Look up address in the original global MR list. * - * If a matching memory region already exists, it is returned with its - * reference count incremented, otherwise a new one is registered. + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry. If no match, this will not be updated. + * @param addr + * Search key. * - * @param priv - * Pointer to private structure. - * @param mp - * Pointer to memory pool. + * @return + * Found MR on match, NULL otherwise. + */ +static struct mlx4_mr * +mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, + uintptr_t addr) +{ + struct priv *priv = dev->data->dev_private; + struct mlx4_mr *mr; + + /* Iterate all the existing MRs. */ + LIST_FOREACH(mr, &priv->mr.mr_list, mr) { + unsigned int n; + + if (mr->ms_n == 0) + continue; + for (n = 0; n < mr->ms_bmp_n; ) { + struct mlx4_mr_cache ret = { 0, }; + + n = mr_find_next_chunk(mr, &ret, n); + if (addr >= ret.start && addr < ret.end) { + /* Found. */ + *entry = ret; + return mr; + } + } + } + return NULL; +} + +/** + * Look up address on device. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry. If no match, this will not be updated. + * @param addr + * Search key. * * @return - * Memory region pointer, NULL in case of error and rte_errno is set. + * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. */ -struct mlx4_mr * -mlx4_mr_get(struct priv *priv, struct rte_mempool *mp) +static uint32_t +mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, + uintptr_t addr) { - const struct rte_memseg *ms = rte_eal_get_physmem_layout(); - uintptr_t start; - uintptr_t end; - unsigned int i; + struct priv *priv = dev->data->dev_private; + uint16_t idx; + uint32_t lkey = UINT32_MAX; struct mlx4_mr *mr; - if (mlx4_check_mempool(mp, &start, &end) != 0) { - rte_errno = EINVAL; - ERROR("mempool %p: not virtually contiguous", - (void *)mp); - return NULL; + /* + * If the global cache has overflowed since it failed to expand the + * B-tree table, it can't have all the existing MRs. Then, the address + * has to be searched by traversing the original MR list instead, which + * is very slow path. Otherwise, the global cache is all inclusive. + */ + if (!unlikely(priv->mr.cache.overflow)) { + lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr); + if (lkey != UINT32_MAX) + *entry = (*priv->mr.cache.table)[idx]; + } else { + /* Falling back to the slowest path. */ + mr = mr_lookup_dev_list(dev, entry, addr); + if (mr != NULL) + lkey = entry->lkey; } - DEBUG("mempool %p area start=%p end=%p size=%zu", - (void *)mp, (void *)start, (void *)end, - (size_t)(end - start)); - /* Round start and end to page boundary if found in memory segments. */ - for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) { - uintptr_t addr = (uintptr_t)ms[i].addr; - size_t len = ms[i].len; - unsigned int align = ms[i].hugepage_sz; - - if ((start > addr) && (start < addr + len)) - start = RTE_ALIGN_FLOOR(start, align); - if ((end > addr) && (end < addr + len)) - end = RTE_ALIGN_CEIL(end, align); + assert(lkey == UINT32_MAX || (addr >= entry->start && + addr < entry->end)); + return lkey; +} + +/** + * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free() + * can raise memory free event and the callback function will spin on the lock. + * + * @param mr + * Pointer to MR to free. + */ +static void +mr_free(struct mlx4_mr *mr) +{ + if (mr == NULL) + return; + DEBUG("freeing MR(%p):", (void *)mr); + if (mr->ibv_mr != NULL) + claim_zero(mlx4_glue->dereg_mr(mr->ibv_mr)); + if (mr->ms_bmp != NULL) + rte_bitmap_free(mr->ms_bmp); + rte_free(mr); +} + +/** + * Releass resources of detached MR having no online entry. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +mlx4_mr_garbage_collect(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx4_mr *mr_next; + struct mlx4_mr_list free_list = LIST_HEAD_INITIALIZER(free_list); + + /* + * MR can't be freed with holding the lock because rte_free() could call + * memory free callback function. This will be a deadlock situation. + */ + rte_rwlock_write_lock(&priv->mr.rwlock); + /* Detach the whole free list and release it after unlocking. */ + free_list = priv->mr.mr_free_list; + LIST_INIT(&priv->mr.mr_free_list); + rte_rwlock_write_unlock(&priv->mr.rwlock); + /* Release resources. */ + mr_next = LIST_FIRST(&free_list); + while (mr_next != NULL) { + struct mlx4_mr *mr = mr_next; + + mr_next = LIST_NEXT(mr, mr); + mr_free(mr); } - DEBUG("mempool %p using start=%p end=%p size=%zu for MR", - (void *)mp, (void *)start, (void *)end, - (size_t)(end - start)); - rte_spinlock_lock(&priv->mr_lock); - LIST_FOREACH(mr, &priv->mr, next) - if (mp == mr->mp && start >= mr->start && end <= mr->end) - break; - if (mr) { - ++mr->refcnt; - goto release; +} + +/* Called during rte_memseg_contig_walk() by mlx4_mr_create(). */ +static int +mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl, + const struct rte_memseg *ms, size_t len, void *arg) +{ + struct mr_find_contig_memsegs_data *data = arg; + + if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len) + return 0; + /* Found, save it and stop walking. */ + data->start = ms->addr_64; + data->end = ms->addr_64 + len; + data->msl = msl; + return 1; +} + +/** + * Create a new global Memroy Region (MR) for a missing virtual address. + * Register entire virtually contiguous memory chunk around the address. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry, found in the global cache or newly + * created. If failed to create one, this will not be updated. + * @param addr + * Target virtual address to register. + * + * @return + * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. + */ +static uint32_t +mlx4_mr_create(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, + uintptr_t addr) +{ + struct priv *priv = dev->data->dev_private; + struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; + const struct rte_memseg_list *msl; + const struct rte_memseg *ms; + struct mlx4_mr *mr = NULL; + size_t len; + uint32_t ms_n; + uint32_t bmp_size; + void *bmp_mem; + int ms_idx_shift = -1; + unsigned int n; + struct mr_find_contig_memsegs_data data = { + .addr = addr, + }; + struct mr_find_contig_memsegs_data data_re; + + DEBUG("port %u creating a MR using address (%p)", + dev->data->port_id, (void *)addr); + /* + * Release detached MRs if any. This can't be called with holding either + * memory_hotplug_lock or priv->mr.rwlock. MRs on the free list have + * been detached by the memory free event but it couldn't be released + * inside the callback due to deadlock. As a result, releasing resources + * is quite opportunistic. + */ + mlx4_mr_garbage_collect(dev); + /* + * Find out a contiguous virtual address chunk in use, to which the + * given address belongs, in order to register maximum range. In the + * best case where mempools are not dynamically recreated and + * '--socket-mem' is speicified as an EAL option, it is very likely to + * have only one MR(LKey) per a socket and per a hugepage-size even + * though the system memory is highly fragmented. + */ + if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) { + WARN("port %u unable to find virtually contiguous" + " chunk for address (%p)." + " rte_memseg_contig_walk() failed.", + dev->data->port_id, (void *)addr); + rte_errno = ENXIO; + goto err_nolock; } - mr = rte_malloc(__func__, sizeof(*mr), 0); - if (!mr) { +alloc_resources: + /* Addresses must be page-aligned. */ + assert(rte_is_aligned((void *)data.start, data.msl->page_sz)); + assert(rte_is_aligned((void *)data.end, data.msl->page_sz)); + msl = data.msl; + ms = rte_mem_virt2memseg((void *)data.start, msl); + len = data.end - data.start; + assert(msl->page_sz == ms->hugepage_sz); + /* Number of memsegs in the range. */ + ms_n = len / msl->page_sz; + DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR ")," + " page_sz=0x%" PRIx64 ", ms_n=%u", + dev->data->port_id, (void *)addr, + data.start, data.end, msl->page_sz, ms_n); + /* Size of memory for bitmap. */ + bmp_size = rte_bitmap_get_memory_footprint(ms_n); + mr = rte_zmalloc_socket(NULL, + RTE_ALIGN_CEIL(sizeof(*mr), + RTE_CACHE_LINE_SIZE) + + bmp_size, + RTE_CACHE_LINE_SIZE, msl->socket_id); + if (mr == NULL) { + WARN("port %u unable to allocate memory for a new MR of" + " address (%p).", + dev->data->port_id, (void *)addr); rte_errno = ENOMEM; - goto release; + goto err_nolock; } - *mr = (struct mlx4_mr){ - .start = start, - .end = end, - .refcnt = 1, - .priv = priv, - .mr = mlx4_glue->reg_mr(priv->pd, (void *)start, end - start, - IBV_ACCESS_LOCAL_WRITE), - .mp = mp, - }; - if (mr->mr) { - mr->lkey = mr->mr->lkey; - LIST_INSERT_HEAD(&priv->mr, mr, next); - } else { - rte_free(mr); - mr = NULL; - rte_errno = errno ? errno : EINVAL; + mr->msl = msl; + /* + * Save the index of the first memseg and initialize memseg bitmap. To + * see if a memseg of ms_idx in the memseg-list is still valid, check: + * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx) + */ + mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms); + bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE); + mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size); + if (mr->ms_bmp == NULL) { + WARN("port %u unable to initialize bitamp for a new MR of" + " address (%p).", + dev->data->port_id, (void *)addr); + rte_errno = EINVAL; + goto err_nolock; + } + /* + * Should recheck whether the extended contiguous chunk is still valid. + * Because memory_hotplug_lock can't be held if there's any memory + * related calls in a critical path, resource allocation above can't be + * locked. If the memory has been changed at this point, try again with + * just single page. If not, go on with the big chunk atomically from + * here. + */ + rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + data_re = data; + if (len > msl->page_sz && + !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) { + WARN("port %u unable to find virtually contiguous" + " chunk for address (%p)." + " rte_memseg_contig_walk() failed.", + dev->data->port_id, (void *)addr); + rte_errno = ENXIO; + goto err_memlock; + } + if (data.start != data_re.start || data.end != data_re.end) { + /* + * The extended contiguous chunk has been changed. Try again + * with single memseg instead. + */ + data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz); + data.end = data.start + msl->page_sz; + rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + mr_free(mr); + goto alloc_resources; } -release: - rte_spinlock_unlock(&priv->mr_lock); - return mr; + assert(data.msl == data_re.msl); + rte_rwlock_write_lock(&priv->mr.rwlock); + /* + * Check the address is really missing. If other thread already created + * one or it is not found due to overflow, abort and return. + */ + if (mr_lookup_dev(dev, entry, addr) != UINT32_MAX) { + /* + * Insert to the global cache table. It may fail due to + * low-on-memory. Then, this entry will have to be searched + * here again. + */ + mr_btree_insert(&priv->mr.cache, entry); + DEBUG("port %u found MR for %p on final lookup, abort", + dev->data->port_id, (void *)addr); + rte_rwlock_write_unlock(&priv->mr.rwlock); + rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + /* + * Must be unlocked before calling rte_free() because + * mlx4_mr_mem_event_free_cb() can be called inside. + */ + mr_free(mr); + return entry->lkey; + } + /* + * Trim start and end addresses for verbs MR. Set bits for registering + * memsegs but exclude already registered ones. Bitmap can be + * fragmented. + */ + for (n = 0; n < ms_n; ++n) { + uintptr_t start; + struct mlx4_mr_cache ret = { 0, }; + + start = data_re.start + n * msl->page_sz; + /* Exclude memsegs already registered by other MRs. */ + if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) { + /* + * Start from the first unregistered memseg in the + * extended range. + */ + if (ms_idx_shift == -1) { + mr->ms_base_idx += n; + data.start = start; + ms_idx_shift = n; + } + data.end = start + msl->page_sz; + rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift); + ++mr->ms_n; + } + } + len = data.end - data.start; + mr->ms_bmp_n = len / msl->page_sz; + assert(ms_idx_shift + mr->ms_bmp_n <= ms_n); + /* + * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be + * called with holding the memory lock because it doesn't use + * mlx4_alloc_buf_extern() which eventually calls rte_malloc_socket() + * through mlx4_alloc_verbs_buf(). + */ + mr->ibv_mr = mlx4_glue->reg_mr(priv->pd, (void *)data.start, len, + IBV_ACCESS_LOCAL_WRITE); + if (mr->ibv_mr == NULL) { + WARN("port %u fail to create a verbs MR for address (%p)", + dev->data->port_id, (void *)addr); + rte_errno = EINVAL; + goto err_mrlock; + } + assert((uintptr_t)mr->ibv_mr->addr == data.start); + assert(mr->ibv_mr->length == len); + LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr); + DEBUG("port %u MR CREATED (%p) for %p:\n" + " [0x%" PRIxPTR ", 0x%" PRIxPTR ")," + " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u", + dev->data->port_id, (void *)mr, (void *)addr, + data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey), + mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n); + /* Insert to the global cache table. */ + mr_insert_dev_cache(dev, mr); + /* Fill in output data. */ + mr_lookup_dev(dev, entry, addr); + /* Lookup can't fail. */ + assert(entry->lkey != UINT32_MAX); + rte_rwlock_write_unlock(&priv->mr.rwlock); + rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + return entry->lkey; +err_mrlock: + rte_rwlock_write_unlock(&priv->mr.rwlock); +err_memlock: + rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); +err_nolock: + /* + * In case of error, as this can be called in a datapath, a warning + * message per an error is preferable instead. Must be unlocked before + * calling rte_free() because mlx4_mr_mem_event_free_cb() can be called + * inside. + */ + mr_free(mr); + return UINT32_MAX; } /** - * Release a memory region. + * Rebuild the global B-tree cache of device from the original MR list. * - * This function decrements its reference count and destroys it after - * reaching 0. + * @param dev + * Pointer to Ethernet device. + */ +static void +mr_rebuild_dev_cache(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx4_mr *mr; + + DEBUG("port %u rebuild dev cache[]", dev->data->port_id); + /* Flush cache to rebuild. */ + priv->mr.cache.len = 1; + priv->mr.cache.overflow = 0; + /* Iterate all the existing MRs. */ + LIST_FOREACH(mr, &priv->mr.mr_list, mr) + if (mr_insert_dev_cache(dev, mr) < 0) + return; +} + +/** + * Callback for memory free event. Iterate freed memsegs and check whether it + * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a + * result, the MR would be fragmented. If it becomes empty, the MR will be freed + * later by mlx4_mr_garbage_collect(). * - * Note to avoid race conditions given this function may be used from the - * data plane, it's extremely important that each user holds its own - * reference. + * The global cache must be rebuilt if there's any change and this event has to + * be propagated to dataplane threads to flush the local caches. * - * @param mr - * Memory region to release. + * @param dev + * Pointer to Ethernet device. + * @param addr + * Address of freed memory. + * @param len + * Size of freed memory. + */ +static void +mlx4_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len) +{ + struct priv *priv = dev->data->dev_private; + const struct rte_memseg_list *msl; + struct mlx4_mr *mr; + int ms_n; + int i; + int rebuild = 0; + + DEBUG("port %u free callback: addr=%p, len=%zu", + dev->data->port_id, addr, len); + msl = rte_mem_virt2memseg_list(addr); + /* addr and len must be page-aligned. */ + assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz)); + assert(len == RTE_ALIGN(len, msl->page_sz)); + ms_n = len / msl->page_sz; + rte_rwlock_write_lock(&priv->mr.rwlock); + /* Clear bits of freed memsegs from MR. */ + for (i = 0; i < ms_n; ++i) { + const struct rte_memseg *ms; + struct mlx4_mr_cache entry; + uintptr_t start; + int ms_idx; + uint32_t pos; + + /* Find MR having this memseg. */ + start = (uintptr_t)addr + i * msl->page_sz; + mr = mr_lookup_dev_list(dev, &entry, start); + if (mr == NULL) + continue; + ms = rte_mem_virt2memseg((void *)start, msl); + assert(ms != NULL); + assert(msl->page_sz == ms->hugepage_sz); + ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms); + pos = ms_idx - mr->ms_base_idx; + assert(rte_bitmap_get(mr->ms_bmp, pos)); + assert(pos < mr->ms_bmp_n); + DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p", + dev->data->port_id, (void *)mr, pos, (void *)start); + rte_bitmap_clear(mr->ms_bmp, pos); + if (--mr->ms_n == 0) { + LIST_REMOVE(mr, mr); + LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr); + DEBUG("port %u remove MR(%p) from list", + dev->data->port_id, (void *)mr); + } + /* + * MR is fragmented or will be freed. the global cache must be + * rebuilt. + */ + rebuild = 1; + } + if (rebuild) { + mr_rebuild_dev_cache(dev); + /* + * Flush local caches by propagating invalidation across cores. + * rte_smp_wmb() is enough to synchronize this event. If one of + * freed memsegs is seen by other core, that means the memseg + * has been allocated by allocator, which will come after this + * free call. Therefore, this store instruction (incrementing + * generation below) will be guaranteed to be seen by other core + * before the core sees the newly allocated memory. + */ + ++priv->mr.dev_gen; + DEBUG("broadcasting local cache flush, gen=%d", + priv->mr.dev_gen); + rte_smp_wmb(); + } + rte_rwlock_write_unlock(&priv->mr.rwlock); +#ifndef NDEBUG + if (rebuild) + mlx4_mr_dump_dev(dev); +#endif +} + +/** + * Callback for memory event. + * + * @param event_type + * Memory event type. + * @param addr + * Address of memory. + * @param len + * Size of memory. */ void -mlx4_mr_put(struct mlx4_mr *mr) +mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, + size_t len, void *arg __rte_unused) { - struct priv *priv = mr->priv; - - rte_spinlock_lock(&priv->mr_lock); - assert(mr->refcnt); - if (--mr->refcnt) - goto release; - LIST_REMOVE(mr, next); - claim_zero(mlx4_glue->dereg_mr(mr->mr)); - rte_free(mr); -release: - rte_spinlock_unlock(&priv->mr_lock); + struct priv *priv; + + switch (event_type) { + case RTE_MEM_EVENT_FREE: + rte_rwlock_read_lock(&mlx4_mem_event_rwlock); + /* Iterate all the existing mlx4 devices. */ + LIST_FOREACH(priv, &mlx4_mem_event_cb_list, mem_event_cb) + mlx4_mr_mem_event_free_cb(priv->dev, addr, len); + rte_rwlock_read_unlock(&mlx4_mem_event_rwlock); + break; + case RTE_MEM_EVENT_ALLOC: + default: + break; + } } /** - * Add memory region (MR) <-> memory pool (MP) association to txq->mp2mr[]. - * If mp2mr[] is full, remove an entry first. + * Look up address in the global MR cache table. If not found, create a new MR. + * Insert the found/created entry to local bottom-half cache table. + * + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param[out] entry + * Pointer to returning MR cache entry, found in the global cache or newly + * created. If failed to create one, this is not written. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static uint32_t +mlx4_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, + struct mlx4_mr_cache *entry, uintptr_t addr) +{ + struct priv *priv = dev->data->dev_private; + struct mlx4_mr_btree *bt = &mr_ctrl->cache_bh; + uint16_t idx; + uint32_t lkey; + + /* If local cache table is full, try to double it. */ + if (unlikely(bt->len == bt->size)) + mr_btree_expand(bt, bt->size << 1); + /* Look up in the global cache. */ + rte_rwlock_read_lock(&priv->mr.rwlock); + lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr); + if (lkey != UINT32_MAX) { + /* Found. */ + *entry = (*priv->mr.cache.table)[idx]; + rte_rwlock_read_unlock(&priv->mr.rwlock); + /* + * Update local cache. Even if it fails, return the found entry + * to update top-half cache. Next time, this entry will be found + * in the global cache. + */ + mr_btree_insert(bt, entry); + return lkey; + } + rte_rwlock_read_unlock(&priv->mr.rwlock); + /* First time to see the address? Create a new MR. */ + lkey = mlx4_mr_create(dev, entry, addr); + /* + * Update the local cache if successfully created a new global MR. Even + * if failed to create one, there's no action to take in this datapath + * code. As returning LKey is invalid, this will eventually make HW + * fail. + */ + if (lkey != UINT32_MAX) + mr_btree_insert(bt, entry); + return lkey; +} + +/** + * Bottom-half of LKey search on datapath. Firstly search in cache_bh[] and if + * misses, search in the global MR cache table and update the new entry to + * per-queue local caches. + * + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static uint32_t +mlx4_mr_addr2mr_bh(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, + uintptr_t addr) +{ + uint32_t lkey; + uint16_t bh_idx = 0; + /* Victim in top-half cache to replace with new entry. */ + struct mlx4_mr_cache *repl = &mr_ctrl->cache[mr_ctrl->head]; + + /* Binary-search MR translation table. */ + lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr); + /* Update top-half cache. */ + if (likely(lkey != UINT32_MAX)) { + *repl = (*mr_ctrl->cache_bh.table)[bh_idx]; + } else { + /* + * If missed in local lookup table, search in the global cache + * and local cache_bh[] will be updated inside if possible. + * Top-half cache entry will also be updated. + */ + lkey = mlx4_mr_lookup_dev(dev, mr_ctrl, repl, addr); + if (unlikely(lkey == UINT32_MAX)) + return UINT32_MAX; + } + /* Update the most recently used entry. */ + mr_ctrl->mru = mr_ctrl->head; + /* Point to the next victim, the oldest. */ + mr_ctrl->head = (mr_ctrl->head + 1) % MLX4_MR_CACHE_N; + return lkey; +} + +/** + * Bottom-half of LKey search on Rx. + * + * @param rxq + * Pointer to Rx queue structure. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +uint32_t +mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr) +{ + struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; + struct priv *priv = rxq->priv; + + DEBUG("Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p", + rxq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr); + return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr); +} + +/** + * Bottom-half of LKey search on Tx. * * @param txq * Pointer to Tx queue structure. - * @param[in] mp - * Memory pool for which a memory region lkey must be added. - * @param[in] i - * Index in memory pool (MP) where to add memory region (MR). + * @param addr + * Search key. * * @return - * Added mr->lkey on success, (uint32_t)-1 on failure. + * Searched LKey on success, UINT32_MAX on no match. */ uint32_t -mlx4_txq_add_mr(struct txq *txq, struct rte_mempool *mp, uint32_t i) +mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr) { + struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + struct priv *priv = txq->priv; + + DEBUG("Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p", + txq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr); + return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr); +} + +/** + * Flush all of the local cache entries. + * + * @param mr_ctrl + * Pointer to per-queue MR control structure. + */ +void +mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl) +{ + /* Reset the most-recently-used index. */ + mr_ctrl->mru = 0; + /* Reset the linear search array. */ + mr_ctrl->head = 0; + memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache)); + /* Reset the B-tree table. */ + mr_ctrl->cache_bh.len = 1; + mr_ctrl->cache_bh.overflow = 0; + /* Update the generation number. */ + mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr; + DEBUG("mr_ctrl(%p): flushed, cur_gen=%d", + (void *)mr_ctrl, mr_ctrl->cur_gen); +} + +/* Called during rte_mempool_mem_iter() by mlx4_mr_update_mp(). */ +static void +mlx4_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque, + struct rte_mempool_memhdr *memhdr, + unsigned mem_idx __rte_unused) +{ + struct mr_update_mp_data *data = opaque; + uint32_t lkey; + + /* Stop iteration if failed in the previous walk. */ + if (data->ret < 0) + return; + /* Register address of the chunk and update local caches. */ + lkey = mlx4_mr_addr2mr_bh(data->dev, data->mr_ctrl, + (uintptr_t)memhdr->addr); + if (lkey == UINT32_MAX) + data->ret = -1; +} + +/** + * Register entire memory chunks in a Mempool. + * + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param mp + * Pointer to registering Mempool. + * + * @return + * 0 on success, -1 on failure. + */ +int +mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, + struct rte_mempool *mp) +{ + struct mr_update_mp_data data = { + .dev = dev, + .mr_ctrl = mr_ctrl, + .ret = 0, + }; + + rte_mempool_mem_iter(mp, mlx4_mr_update_mp_cb, &data); + return data.ret; +} + +#ifndef NDEBUG +/** + * Dump all the created MRs and the global cache entries. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx4_mr_dump_dev(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; struct mlx4_mr *mr; + int mr_n = 0; + int chunk_n = 0; + + rte_rwlock_read_lock(&priv->mr.rwlock); + /* Iterate all the existing MRs. */ + LIST_FOREACH(mr, &priv->mr.mr_list, mr) { + unsigned int n; + + DEBUG("port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u", + dev->data->port_id, mr_n++, + rte_cpu_to_be_32(mr->ibv_mr->lkey), + mr->ms_n, mr->ms_bmp_n); + if (mr->ms_n == 0) + continue; + for (n = 0; n < mr->ms_bmp_n; ) { + struct mlx4_mr_cache ret = { 0, }; - /* Add a new entry, register MR first. */ - DEBUG("%p: discovered new memory pool \"%s\" (%p)", - (void *)txq, mp->name, (void *)mp); - mr = mlx4_mr_get(txq->priv, mp); - if (unlikely(mr == NULL)) { - DEBUG("%p: unable to configure MR, mlx4_mr_get() failed", - (void *)txq); - return (uint32_t)-1; + n = mr_find_next_chunk(mr, &ret, n); + if (!ret.end) + break; + DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")", + chunk_n++, ret.start, ret.end); + } } - if (unlikely(i == RTE_DIM(txq->mp2mr))) { - /* Table is full, remove oldest entry. */ - DEBUG("%p: MR <-> MP table full, dropping oldest entry.", - (void *)txq); - --i; - mlx4_mr_put(txq->mp2mr[0].mr); - memmove(&txq->mp2mr[0], &txq->mp2mr[1], - (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0]))); + DEBUG("port %u dumping global cache", dev->data->port_id); + mlx4_mr_btree_dump(&priv->mr.cache); + rte_rwlock_read_unlock(&priv->mr.rwlock); +} +#endif + +/** + * Release all the created MRs and resources. Remove device from memory callback + * list. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx4_mr_release(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx4_mr *mr_next = LIST_FIRST(&priv->mr.mr_list); + + /* Remove from memory callback device list. */ + rte_rwlock_write_lock(&mlx4_mem_event_rwlock); + LIST_REMOVE(priv, mem_event_cb); + rte_rwlock_write_unlock(&mlx4_mem_event_rwlock); +#ifndef NDEBUG + mlx4_mr_dump_dev(dev); +#endif + rte_rwlock_write_lock(&priv->mr.rwlock); + /* Detach from MR list and move to free list. */ + while (mr_next != NULL) { + struct mlx4_mr *mr = mr_next; + + mr_next = LIST_NEXT(mr, mr); + LIST_REMOVE(mr, mr); + LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr); } - /* Store the new entry. */ - txq->mp2mr[i].mp = mp; - txq->mp2mr[i].mr = mr; - txq->mp2mr[i].lkey = mr->lkey; - DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, - (void *)txq, mp->name, (void *)mp, txq->mp2mr[i].lkey); - return txq->mp2mr[i].lkey; + LIST_INIT(&priv->mr.mr_list); + /* Free global cache. */ + mlx4_mr_btree_free(&priv->mr.cache); + rte_rwlock_write_unlock(&priv->mr.rwlock); + /* Free all remaining MRs. */ + mlx4_mr_garbage_collect(dev); } diff --git a/drivers/net/mlx4/mlx4_mr.h b/drivers/net/mlx4/mlx4_mr.h new file mode 100644 index 00000000..37a365a8 --- /dev/null +++ b/drivers/net/mlx4/mlx4_mr.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 6WIND S.A. + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#ifndef RTE_PMD_MLX4_MR_H_ +#define RTE_PMD_MLX4_MR_H_ + +#include +#include +#include + +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include + +/* Size of per-queue MR cache array for linear search. */ +#define MLX4_MR_CACHE_N 8 + +/* Size of MR cache table for binary search. */ +#define MLX4_MR_BTREE_CACHE_N 256 + +/* Memory Region object. */ +struct mlx4_mr { + LIST_ENTRY(mlx4_mr) mr; /**< Pointer to the prev/next entry. */ + struct ibv_mr *ibv_mr; /* Verbs Memory Region. */ + const struct rte_memseg_list *msl; + int ms_base_idx; /* Start index of msl->memseg_arr[]. */ + int ms_n; /* Number of memsegs in use. */ + uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */ + struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */ +}; + +/* Cache entry for Memory Region. */ +struct mlx4_mr_cache { + uintptr_t start; /* Start address of MR. */ + uintptr_t end; /* End address of MR. */ + uint32_t lkey; /* rte_cpu_to_be_32(ibv_mr->lkey). */ +} __rte_packed; + +/* MR Cache table for Binary search. */ +struct mlx4_mr_btree { + uint16_t len; /* Number of entries. */ + uint16_t size; /* Total number of entries. */ + int overflow; /* Mark failure of table expansion. */ + struct mlx4_mr_cache (*table)[]; +} __rte_packed; + +/* Per-queue MR control descriptor. */ +struct mlx4_mr_ctrl { + uint32_t *dev_gen_ptr; /* Generation number of device to poll. */ + uint32_t cur_gen; /* Generation number saved to flush caches. */ + uint16_t mru; /* Index of last hit entry in top-half cache. */ + uint16_t head; /* Index of the oldest entry in top-half cache. */ + struct mlx4_mr_cache cache[MLX4_MR_CACHE_N]; /* Cache for top-half. */ + struct mlx4_mr_btree cache_bh; /* Cache for bottom-half. */ +} __rte_packed; + +extern struct mlx4_dev_list mlx4_mem_event_cb_list; +extern rte_rwlock_t mlx4_mem_event_rwlock; + +/* First entry must be NULL for comparison. */ +#define mlx4_mr_btree_len(bt) ((bt)->len - 1) + +int mlx4_mr_btree_init(struct mlx4_mr_btree *bt, int n, int socket); +void mlx4_mr_btree_free(struct mlx4_mr_btree *bt); +void mlx4_mr_btree_dump(struct mlx4_mr_btree *bt); +void mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, + size_t len, void *arg); +int mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, + struct rte_mempool *mp); +void mlx4_mr_dump_dev(struct rte_eth_dev *dev); +void mlx4_mr_release(struct rte_eth_dev *dev); + +/** + * Look up LKey from given lookup table by linear search. Firstly look up the + * last-hit entry. If miss, the entire array is searched. If found, update the + * last-hit index and return LKey. + * + * @param lkp_tbl + * Pointer to lookup table. + * @param[in,out] cached_idx + * Pointer to last-hit index. + * @param n + * Size of lookup table. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static __rte_always_inline uint32_t +mlx4_mr_lookup_cache(struct mlx4_mr_cache *lkp_tbl, uint16_t *cached_idx, + uint16_t n, uintptr_t addr) +{ + uint16_t idx; + + if (likely(addr >= lkp_tbl[*cached_idx].start && + addr < lkp_tbl[*cached_idx].end)) + return lkp_tbl[*cached_idx].lkey; + for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) { + if (addr >= lkp_tbl[idx].start && + addr < lkp_tbl[idx].end) { + /* Found. */ + *cached_idx = idx; + return lkp_tbl[idx].lkey; + } + } + return UINT32_MAX; +} + +#endif /* RTE_PMD_MLX4_MR_H_ */ diff --git a/drivers/net/mlx4/mlx4_prm.h b/drivers/net/mlx4/mlx4_prm.h index 153dda52..aef77ba0 100644 --- a/drivers/net/mlx4/mlx4_prm.h +++ b/drivers/net/mlx4/mlx4_prm.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef MLX4_PRM_H_ @@ -19,6 +19,7 @@ #ifdef PEDANTIC #pragma GCC diagnostic error "-Wpedantic" #endif +#include "mlx4_autoconf.h" /* ConnectX-3 Tx queue basic block. */ #define MLX4_TXBB_SHIFT 6 @@ -40,6 +41,7 @@ /* Work queue element (WQE) flags. */ #define MLX4_WQE_CTRL_IIP_HDR_CSUM (1 << 28) #define MLX4_WQE_CTRL_IL4_HDR_CSUM (1 << 27) +#define MLX4_WQE_CTRL_RR (1 << 6) /* CQE checksum flags. */ enum { @@ -51,6 +53,7 @@ enum { }; /* CQE status flags. */ +#define MLX4_CQE_STATUS_IPV6F (1 << 12) #define MLX4_CQE_STATUS_IPV4 (1 << 22) #define MLX4_CQE_STATUS_IPV4F (1 << 23) #define MLX4_CQE_STATUS_IPV6 (1 << 24) @@ -97,6 +100,19 @@ struct mlx4_cq { int arm_sn; /**< Rx event counter. */ }; +#ifndef HAVE_IBV_MLX4_WQE_LSO_SEG +/* + * WQE LSO segment structure. + * Defined here as backward compatibility for rdma-core v17 and below. + * Similar definition is found in infiniband/mlx4dv.h in rdma-core v18 + * and above. + */ +struct mlx4_wqe_lso_seg { + rte_be32_t mss_hdr_size; + rte_be32_t header[]; +}; +#endif + /** * Retrieve a CQE entry from a CQ. * diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c index 7a036ed8..9737da2e 100644 --- a/drivers/net/mlx4/mlx4_rxq.c +++ b/drivers/net/mlx4/mlx4_rxq.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ /** @@ -88,7 +88,7 @@ mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE] = { */ struct mlx4_rss * mlx4_rss_get(struct priv *priv, uint64_t fields, - uint8_t key[MLX4_RSS_HASH_KEY_SIZE], + const uint8_t key[MLX4_RSS_HASH_KEY_SIZE], uint16_t queues, const uint16_t queue_id[]) { struct mlx4_rss *rss; @@ -336,6 +336,14 @@ mlx4_rss_init(struct priv *priv) unsigned int i; int ret; + if (priv->rss_init) + return 0; + if (priv->dev->data->nb_rx_queues > priv->hw_rss_max_qps) { + ERROR("RSS does not support more than %d queues", + priv->hw_rss_max_qps); + rte_errno = EINVAL; + return -rte_errno; + } /* Prepare range for RSS contexts before creating the first WQ. */ ret = mlx4_glue->dv_set_context_attr (priv->ctx, @@ -418,6 +426,7 @@ wq_num_check: } wq_num_prev = wq_num; } + priv->rss_init = 1; return 0; error: ERROR("cannot initialize common RSS resources (queue %u): %s: %s", @@ -446,6 +455,8 @@ mlx4_rss_deinit(struct priv *priv) { unsigned int i; + if (!priv->rss_init) + return; for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) { struct rxq *rxq = priv->dev->data->rx_queues[i]; @@ -454,6 +465,7 @@ mlx4_rss_deinit(struct priv *priv) mlx4_rxq_detach(rxq); } } + priv->rss_init = 0; } /** @@ -482,6 +494,7 @@ mlx4_rxq_attach(struct rxq *rxq) } struct priv *priv = rxq->priv; + struct rte_eth_dev *dev = priv->dev; const uint32_t elts_n = 1 << rxq->elts_n; const uint32_t sges_n = 1 << rxq->sges_n; struct rte_mbuf *(*elts)[elts_n] = rxq->elts; @@ -491,6 +504,8 @@ mlx4_rxq_attach(struct rxq *rxq) const char *msg; struct ibv_cq *cq = NULL; struct ibv_wq *wq = NULL; + uint32_t create_flags = 0; + uint32_t comp_mask = 0; volatile struct mlx4_wqe_data_seg (*wqes)[]; unsigned int i; int ret; @@ -503,6 +518,11 @@ mlx4_rxq_attach(struct rxq *rxq) msg = "CQ creation failure"; goto error; } + /* By default, FCS (CRC) is stripped by hardware. */ + if (rxq->crc_present) { + create_flags |= IBV_WQ_FLAGS_SCATTER_FCS; + comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; + } wq = mlx4_glue->create_wq (priv->ctx, &(struct ibv_wq_init_attr){ @@ -511,6 +531,8 @@ mlx4_rxq_attach(struct rxq *rxq) .max_sge = sges_n, .pd = priv->pd, .cq = cq, + .comp_mask = comp_mask, + .create_flags = create_flags, }); if (!wq) { ret = errno ? errno : EINVAL; @@ -537,6 +559,11 @@ mlx4_rxq_attach(struct rxq *rxq) msg = "failed to obtain device information from WQ/CQ objects"; goto error; } + /* Pre-register Rx mempool. */ + DEBUG("port %u Rx queue %u registering mp %s having %u chunks", + priv->dev->data->port_id, rxq->stats.idx, + rxq->mp->name, rxq->mp->nb_mem_chunks); + mlx4_mr_update_mp(dev, &rxq->mr_ctrl, rxq->mp); wqes = (volatile struct mlx4_wqe_data_seg (*)[]) ((uintptr_t)dv_rwq.buf.buf + dv_rwq.rq.offset); for (i = 0; i != RTE_DIM(*elts); ++i) { @@ -568,7 +595,7 @@ mlx4_rxq_attach(struct rxq *rxq) .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)), .byte_count = rte_cpu_to_be_32(buf->data_len), - .lkey = rte_cpu_to_be_32(rxq->mr->lkey), + .lkey = mlx4_rx_mb2mr(rxq, buf), }; (*elts)[i] = buf; } @@ -597,6 +624,7 @@ error: claim_zero(mlx4_glue->destroy_wq(wq)); if (cq) claim_zero(mlx4_glue->destroy_cq(cq)); + --rxq->usecnt; rte_errno = ret; ERROR("error while attaching Rx queue %p: %s: %s", (void *)rxq, msg, strerror(ret)); @@ -650,7 +678,9 @@ uint64_t mlx4_get_rx_queue_offloads(struct priv *priv) { uint64_t offloads = DEV_RX_OFFLOAD_SCATTER | - DEV_RX_OFFLOAD_CRC_STRIP; + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_JUMBO_FRAME; if (priv->hw_csum) offloads |= DEV_RX_OFFLOAD_CHECKSUM; @@ -675,26 +705,6 @@ mlx4_get_rx_port_offloads(struct priv *priv) return offloads; } -/** - * Checks if the per-queue offload configuration is valid. - * - * @param priv - * Pointer to private structure. - * @param requested - * Per-queue offloads configuration. - * - * @return - * Nonzero when configuration is valid. - */ -static int -mlx4_check_rx_queue_offloads(struct priv *priv, uint64_t requested) -{ - uint64_t mandatory = priv->dev->data->dev_conf.rxmode.offloads; - uint64_t supported = mlx4_get_rx_port_offloads(priv); - - return !((mandatory ^ requested) & supported); -} - /** * DPDK callback to configure a Rx queue. * @@ -736,20 +746,14 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, }, }; int ret; + uint32_t crc_present; + uint64_t offloads; + + offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; - (void)conf; /* Thresholds configuration (ignored). */ DEBUG("%p: configuring queue %u for %u descriptors", (void *)dev, idx, desc); - if (!mlx4_check_rx_queue_offloads(priv, conf->offloads)) { - rte_errno = ENOTSUP; - ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port " - "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64, - (void *)dev, conf->offloads, - dev->data->dev_conf.rxmode.offloads, - (mlx4_get_rx_port_offloads(priv) | - mlx4_get_rx_queue_offloads(priv))); - return -rte_errno; - } + if (idx >= dev->data->nb_rx_queues) { rte_errno = EOVERFLOW; ERROR("%p: queue index out of range (%u >= %u)", @@ -774,6 +778,23 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, " to the next power of two (%u)", (void *)dev, idx, desc); } + /* By default, FCS (CRC) is stripped by hardware. */ + crc_present = 0; + if (rte_eth_dev_must_keep_crc(offloads)) { + if (priv->hw_fcs_strip) { + crc_present = 1; + } else { + WARN("%p: CRC stripping has been disabled but will still" + " be performed by hardware, make sure MLNX_OFED and" + " firmware are up to date", + (void *)dev); + } + } + DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from" + " incoming frames to hide it", + (void *)dev, + crc_present ? "disabled" : "enabled", + crc_present << 2); /* Allocate and initialize Rx queue. */ mlx4_zmallocv_socket("RXQ", vec, RTE_DIM(vec), socket); if (!rxq) { @@ -790,9 +811,10 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, .elts = elts, /* Toggle Rx checksum offload if hardware supports it. */ .csum = priv->hw_csum && - (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM), + (offloads & DEV_RX_OFFLOAD_CHECKSUM), .csum_l2tun = priv->hw_csum_l2tun && - (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM), + (offloads & DEV_RX_OFFLOAD_CHECKSUM), + .crc_present = crc_present, .l2tun_offload = priv->hw_csum_l2tun, .stats = { .idx = idx, @@ -804,7 +826,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= (mb_len - RTE_PKTMBUF_HEADROOM)) { ; - } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) { + } else if (offloads & DEV_RX_OFFLOAD_SCATTER) { uint32_t size = RTE_PKTMBUF_HEADROOM + dev->data->dev_conf.rxmode.max_rx_pkt_len; @@ -847,11 +869,9 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 1 << rxq->sges_n); goto error; } - /* Use the entire Rx mempool as the memory region. */ - rxq->mr = mlx4_mr_get(priv, mp); - if (!rxq->mr) { - ERROR("%p: MR creation failure: %s", - (void *)dev, strerror(rte_errno)); + if (mlx4_mr_btree_init(&rxq->mr_ctrl.cache_bh, + MLX4_MR_BTREE_CACHE_N, socket)) { + /* rte_errno is already set. */ goto error; } if (dev->data->dev_conf.intr_conf.rxq) { @@ -911,7 +931,6 @@ mlx4_rx_queue_release(void *dpdk_rxq) assert(!rxq->rq_db); if (rxq->channel) claim_zero(mlx4_glue->destroy_comp_channel(rxq->channel)); - if (rxq->mr) - mlx4_mr_put(rxq->mr); + mlx4_mr_btree_free(&rxq->mr_ctrl.cache_bh); rte_free(rxq); } diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c index 8ca8b77c..8c88effc 100644 --- a/drivers/net/mlx4/mlx4_rxtx.c +++ b/drivers/net/mlx4/mlx4_rxtx.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ /** @@ -38,10 +38,29 @@ * DWORD (32 byte) of a TXBB. */ struct pv { - volatile struct mlx4_wqe_data_seg *dseg; + union { + volatile struct mlx4_wqe_data_seg *dseg; + volatile uint32_t *dst; + }; uint32_t val; }; +/** A helper structure for TSO packet handling. */ +struct tso_info { + /** Pointer to the array of saved first DWORD (32 byte) of a TXBB. */ + struct pv *pv; + /** Current entry in the pv array. */ + int pv_counter; + /** Total size of the WQE including padding. */ + uint32_t wqe_size; + /** Size of TSO header to prepend to each packet to send. */ + uint16_t tso_header_size; + /** Total size of the TSO segment in the WQE. */ + uint16_t wqe_tso_seg_size; + /** Raw WQE size in units of 16 Bytes and without padding. */ + uint8_t fence_size; +}; + /** A table to translate Rx completion flags to packet type. */ uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = { /* @@ -52,49 +71,58 @@ uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = { * bit[4] - MLX4_CQE_STATUS_TCP * bit[3] - MLX4_CQE_STATUS_IPV4OPT * bit[2] - MLX4_CQE_STATUS_IPV6 - * bit[1] - MLX4_CQE_STATUS_IPV4F + * bit[1] - MLX4_CQE_STATUS_IPF * bit[0] - MLX4_CQE_STATUS_IPV4 * giving a total of up to 256 entries. */ + /* L2 */ [0x00] = RTE_PTYPE_L2_ETHER, + /* L3 */ [0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG, [0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, [0x03] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, - [0x04] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, - [0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT, + [0x04] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [0x08] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT | + RTE_PTYPE_L4_NONFRAG, + [0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT | + RTE_PTYPE_L4_NONFRAG, [0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG, + [0x0b] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT | + RTE_PTYPE_L4_FRAG, + /* TCP */ [0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP, - [0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_L4_TCP, [0x14] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP, + [0x16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, [0x18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP, [0x19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP, - [0x1a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT | - RTE_PTYPE_L4_TCP, + /* UDP */ [0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP, - [0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_L4_UDP, [0x24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP, + [0x26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, [0x28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP, [0x29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP, - [0x2a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT | - RTE_PTYPE_L4_UDP, /* Tunneled - L3 IPV6 */ [0x80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, [0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, [0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_FRAG, @@ -102,65 +130,58 @@ uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = { RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_FRAG, [0x84] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, [0x88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT, + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_NONFRAG, [0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT, + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_NONFRAG, [0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG, + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_FRAG, + [0x8b] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_FRAG, /* Tunneled - L3 IPV6, TCP */ [0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP, - [0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG | - RTE_PTYPE_INNER_L4_TCP, - [0x93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG | - RTE_PTYPE_INNER_L4_TCP, [0x94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP, + [0x96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, [0x98] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT | - RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP, [0x99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT | - RTE_PTYPE_INNER_L4_TCP, - [0x9a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG | - RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP, /* Tunneled - L3 IPV6, UDP */ - [0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - [0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + [0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG | RTE_PTYPE_INNER_L4_UDP, - [0xa3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG | - RTE_PTYPE_INNER_L4_UDP, - [0xa4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + [0xa4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP, - [0xa8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + [0xa6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0xa8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP, - [0xa9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + [0xa9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP, - [0xaa] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG | - RTE_PTYPE_INNER_L4_UDP, /* Tunneled - L3 IPV4 */ [0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, [0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, [0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_FRAG, @@ -168,65 +189,54 @@ uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = { RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_FRAG, [0xc4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, [0xc8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT, + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_NONFRAG, [0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT, + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_NONFRAG, [0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG, + [0xcb] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_FRAG, /* Tunneled - L3 IPV4, TCP */ - [0xd0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, [0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP, - [0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG | - RTE_PTYPE_INNER_L4_TCP, - [0xd3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG | - RTE_PTYPE_INNER_L4_TCP, [0xd4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP, + [0xd6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, [0xd8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP, [0xd9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP, - [0xda] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG | - RTE_PTYPE_INNER_L4_TCP, /* Tunneled - L3 IPV4, UDP */ - [0xe0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, [0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP, - [0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG | - RTE_PTYPE_INNER_L4_UDP, - [0xe3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG | - RTE_PTYPE_INNER_L4_UDP, [0xe4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP, + [0xe6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, [0xe8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_UDP, [0xe9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP, - [0xea] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG | + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP, }; @@ -263,7 +273,7 @@ mlx4_txq_stamp_freed_wqe(struct mlx4_sq *sq, volatile uint32_t *start, } while (start != (volatile uint32_t *)sq->eob); start = (volatile uint32_t *)sq->buf; /* Flip invalid stamping ownership. */ - stamp ^= RTE_BE32(0x1 << MLX4_SQ_OWNER_BIT); + stamp ^= RTE_BE32(1u << MLX4_SQ_OWNER_BIT); sq->stamp = stamp; if (start == end) return size; @@ -343,24 +353,6 @@ mlx4_txq_complete(struct txq *txq, const unsigned int elts_m, txq->elts_tail = elts_tail; } -/** - * Get memory pool (MP) from mbuf. If mbuf is indirect, the pool from which - * the cloned mbuf is allocated is returned instead. - * - * @param buf - * Pointer to mbuf. - * - * @return - * Memory pool where data is located for given mbuf. - */ -static struct rte_mempool * -mlx4_txq_mb2mp(struct rte_mbuf *buf) -{ - if (unlikely(RTE_MBUF_INDIRECT(buf))) - return rte_mbuf_from_indirect(buf)->pool; - return buf->pool; -} - /** * Write Tx data segment to the SQ. * @@ -378,7 +370,7 @@ mlx4_fill_tx_data_seg(volatile struct mlx4_wqe_data_seg *dseg, uint32_t lkey, uintptr_t addr, rte_be32_t byte_count) { dseg->addr = rte_cpu_to_be_64(addr); - dseg->lkey = rte_cpu_to_be_32(lkey); + dseg->lkey = lkey; #if RTE_CACHE_LINE_SIZE < 64 /* * Need a barrier here before writing the byte_count @@ -394,6 +386,342 @@ mlx4_fill_tx_data_seg(volatile struct mlx4_wqe_data_seg *dseg, dseg->byte_count = byte_count; } +/** + * Obtain and calculate TSO information needed for assembling a TSO WQE. + * + * @param buf + * Pointer to the first packet mbuf. + * @param txq + * Pointer to Tx queue structure. + * @param tinfo + * Pointer to a structure to fill the info with. + * + * @return + * 0 on success, negative value upon error. + */ +static inline int +mlx4_tx_burst_tso_get_params(struct rte_mbuf *buf, + struct txq *txq, + struct tso_info *tinfo) +{ + struct mlx4_sq *sq = &txq->msq; + const uint8_t tunneled = txq->priv->hw_csum_l2tun && + (buf->ol_flags & PKT_TX_TUNNEL_MASK); + + tinfo->tso_header_size = buf->l2_len + buf->l3_len + buf->l4_len; + if (tunneled) + tinfo->tso_header_size += + buf->outer_l2_len + buf->outer_l3_len; + if (unlikely(buf->tso_segsz == 0 || + tinfo->tso_header_size == 0 || + tinfo->tso_header_size > MLX4_MAX_TSO_HEADER || + tinfo->tso_header_size > buf->data_len)) + return -EINVAL; + /* + * Calculate the WQE TSO segment size + * Note: + * 1. An LSO segment must be padded such that the subsequent data + * segment is 16-byte aligned. + * 2. The start address of the TSO segment is always 16 Bytes aligned. + */ + tinfo->wqe_tso_seg_size = RTE_ALIGN(sizeof(struct mlx4_wqe_lso_seg) + + tinfo->tso_header_size, + sizeof(struct mlx4_wqe_data_seg)); + tinfo->fence_size = ((sizeof(struct mlx4_wqe_ctrl_seg) + + tinfo->wqe_tso_seg_size) >> MLX4_SEG_SHIFT) + + buf->nb_segs; + tinfo->wqe_size = + RTE_ALIGN((uint32_t)(tinfo->fence_size << MLX4_SEG_SHIFT), + MLX4_TXBB_SIZE); + /* Validate WQE size and WQE space in the send queue. */ + if (sq->remain_size < tinfo->wqe_size || + tinfo->wqe_size > MLX4_MAX_WQE_SIZE) + return -ENOMEM; + /* Init pv. */ + tinfo->pv = (struct pv *)txq->bounce_buf; + tinfo->pv_counter = 0; + return 0; +} + +/** + * Fill the TSO WQE data segments with info on buffers to transmit . + * + * @param buf + * Pointer to the first packet mbuf. + * @param txq + * Pointer to Tx queue structure. + * @param tinfo + * Pointer to TSO info to use. + * @param dseg + * Pointer to the first data segment in the TSO WQE. + * @param ctrl + * Pointer to the control segment in the TSO WQE. + * + * @return + * 0 on success, negative value upon error. + */ +static inline volatile struct mlx4_wqe_ctrl_seg * +mlx4_tx_burst_fill_tso_dsegs(struct rte_mbuf *buf, + struct txq *txq, + struct tso_info *tinfo, + volatile struct mlx4_wqe_data_seg *dseg, + volatile struct mlx4_wqe_ctrl_seg *ctrl) +{ + uint32_t lkey; + int nb_segs = buf->nb_segs; + int nb_segs_txbb; + struct mlx4_sq *sq = &txq->msq; + struct rte_mbuf *sbuf = buf; + struct pv *pv = tinfo->pv; + int *pv_counter = &tinfo->pv_counter; + volatile struct mlx4_wqe_ctrl_seg *ctrl_next = + (volatile struct mlx4_wqe_ctrl_seg *) + ((volatile uint8_t *)ctrl + tinfo->wqe_size); + uint16_t data_len = sbuf->data_len - tinfo->tso_header_size; + uintptr_t data_addr = rte_pktmbuf_mtod_offset(sbuf, uintptr_t, + tinfo->tso_header_size); + + do { + /* how many dseg entries do we have in the current TXBB ? */ + nb_segs_txbb = (MLX4_TXBB_SIZE - + ((uintptr_t)dseg & (MLX4_TXBB_SIZE - 1))) >> + MLX4_SEG_SHIFT; + switch (nb_segs_txbb) { +#ifndef NDEBUG + default: + /* Should never happen. */ + rte_panic("%p: Invalid number of SGEs(%d) for a TXBB", + (void *)txq, nb_segs_txbb); + /* rte_panic never returns. */ + break; +#endif /* NDEBUG */ + case 4: + /* Memory region key for this memory pool. */ + lkey = mlx4_tx_mb2mr(txq, sbuf); + if (unlikely(lkey == (uint32_t)-1)) + goto err; + dseg->addr = rte_cpu_to_be_64(data_addr); + dseg->lkey = lkey; + /* + * This data segment starts at the beginning of a new + * TXBB, so we need to postpone its byte_count writing + * for later. + */ + pv[*pv_counter].dseg = dseg; + /* + * Zero length segment is treated as inline segment + * with zero data. + */ + pv[(*pv_counter)++].val = + rte_cpu_to_be_32(data_len ? + data_len : + 0x80000000); + if (--nb_segs == 0) + return ctrl_next; + /* Prepare next buf info */ + sbuf = sbuf->next; + dseg++; + data_len = sbuf->data_len; + data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t); + /* fallthrough */ + case 3: + lkey = mlx4_tx_mb2mr(txq, sbuf); + if (unlikely(lkey == (uint32_t)-1)) + goto err; + mlx4_fill_tx_data_seg(dseg, lkey, data_addr, + rte_cpu_to_be_32(data_len ? + data_len : + 0x80000000)); + if (--nb_segs == 0) + return ctrl_next; + /* Prepare next buf info */ + sbuf = sbuf->next; + dseg++; + data_len = sbuf->data_len; + data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t); + /* fallthrough */ + case 2: + lkey = mlx4_tx_mb2mr(txq, sbuf); + if (unlikely(lkey == (uint32_t)-1)) + goto err; + mlx4_fill_tx_data_seg(dseg, lkey, data_addr, + rte_cpu_to_be_32(data_len ? + data_len : + 0x80000000)); + if (--nb_segs == 0) + return ctrl_next; + /* Prepare next buf info */ + sbuf = sbuf->next; + dseg++; + data_len = sbuf->data_len; + data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t); + /* fallthrough */ + case 1: + lkey = mlx4_tx_mb2mr(txq, sbuf); + if (unlikely(lkey == (uint32_t)-1)) + goto err; + mlx4_fill_tx_data_seg(dseg, lkey, data_addr, + rte_cpu_to_be_32(data_len ? + data_len : + 0x80000000)); + if (--nb_segs == 0) + return ctrl_next; + /* Prepare next buf info */ + sbuf = sbuf->next; + dseg++; + data_len = sbuf->data_len; + data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t); + /* fallthrough */ + } + /* Wrap dseg if it points at the end of the queue. */ + if ((volatile uint8_t *)dseg >= sq->eob) + dseg = (volatile struct mlx4_wqe_data_seg *) + ((volatile uint8_t *)dseg - sq->size); + } while (true); +err: + return NULL; +} + +/** + * Fill the packet's l2, l3 and l4 headers to the WQE. + * + * This will be used as the header for each TSO segment that is transmitted. + * + * @param buf + * Pointer to the first packet mbuf. + * @param txq + * Pointer to Tx queue structure. + * @param tinfo + * Pointer to TSO info to use. + * @param ctrl + * Pointer to the control segment in the TSO WQE. + * + * @return + * 0 on success, negative value upon error. + */ +static inline volatile struct mlx4_wqe_data_seg * +mlx4_tx_burst_fill_tso_hdr(struct rte_mbuf *buf, + struct txq *txq, + struct tso_info *tinfo, + volatile struct mlx4_wqe_ctrl_seg *ctrl) +{ + volatile struct mlx4_wqe_lso_seg *tseg = + (volatile struct mlx4_wqe_lso_seg *)(ctrl + 1); + struct mlx4_sq *sq = &txq->msq; + struct pv *pv = tinfo->pv; + int *pv_counter = &tinfo->pv_counter; + int remain_size = tinfo->tso_header_size; + char *from = rte_pktmbuf_mtod(buf, char *); + uint16_t txbb_avail_space; + /* Union to overcome volatile constraints when copying TSO header. */ + union { + volatile uint8_t *vto; + uint8_t *to; + } thdr = { .vto = (volatile uint8_t *)tseg->header, }; + + /* + * TSO data always starts at offset 20 from the beginning of the TXBB + * (16 byte ctrl + 4byte TSO desc). Since each TXBB is 64Byte aligned + * we can write the first 44 TSO header bytes without worry for TxQ + * wrapping or overwriting the first TXBB 32bit word. + */ + txbb_avail_space = MLX4_TXBB_SIZE - + (sizeof(struct mlx4_wqe_ctrl_seg) + + sizeof(struct mlx4_wqe_lso_seg)); + while (remain_size >= (int)(txbb_avail_space + sizeof(uint32_t))) { + /* Copy to end of txbb. */ + rte_memcpy(thdr.to, from, txbb_avail_space); + from += txbb_avail_space; + thdr.to += txbb_avail_space; + /* New TXBB, Check for TxQ wrap. */ + if (thdr.to >= sq->eob) + thdr.vto = sq->buf; + /* New TXBB, stash the first 32bits for later use. */ + pv[*pv_counter].dst = (volatile uint32_t *)thdr.to; + pv[(*pv_counter)++].val = *(uint32_t *)from, + from += sizeof(uint32_t); + thdr.to += sizeof(uint32_t); + remain_size -= txbb_avail_space + sizeof(uint32_t); + /* Avail space in new TXBB is TXBB size - 4 */ + txbb_avail_space = MLX4_TXBB_SIZE - sizeof(uint32_t); + } + if (remain_size > txbb_avail_space) { + rte_memcpy(thdr.to, from, txbb_avail_space); + from += txbb_avail_space; + thdr.to += txbb_avail_space; + remain_size -= txbb_avail_space; + /* New TXBB, Check for TxQ wrap. */ + if (thdr.to >= sq->eob) + thdr.vto = sq->buf; + pv[*pv_counter].dst = (volatile uint32_t *)thdr.to; + rte_memcpy(&pv[*pv_counter].val, from, remain_size); + (*pv_counter)++; + } else if (remain_size) { + rte_memcpy(thdr.to, from, remain_size); + } + tseg->mss_hdr_size = rte_cpu_to_be_32((buf->tso_segsz << 16) | + tinfo->tso_header_size); + /* Calculate data segment location */ + return (volatile struct mlx4_wqe_data_seg *) + ((uintptr_t)tseg + tinfo->wqe_tso_seg_size); +} + +/** + * Write data segments and header for TSO uni/multi segment packet. + * + * @param buf + * Pointer to the first packet mbuf. + * @param txq + * Pointer to Tx queue structure. + * @param ctrl + * Pointer to the WQE control segment. + * + * @return + * Pointer to the next WQE control segment on success, NULL otherwise. + */ +static volatile struct mlx4_wqe_ctrl_seg * +mlx4_tx_burst_tso(struct rte_mbuf *buf, struct txq *txq, + volatile struct mlx4_wqe_ctrl_seg *ctrl) +{ + volatile struct mlx4_wqe_data_seg *dseg; + volatile struct mlx4_wqe_ctrl_seg *ctrl_next; + struct mlx4_sq *sq = &txq->msq; + struct tso_info tinfo; + struct pv *pv; + int pv_counter; + int ret; + + ret = mlx4_tx_burst_tso_get_params(buf, txq, &tinfo); + if (unlikely(ret)) + goto error; + dseg = mlx4_tx_burst_fill_tso_hdr(buf, txq, &tinfo, ctrl); + if (unlikely(dseg == NULL)) + goto error; + if ((uintptr_t)dseg >= (uintptr_t)sq->eob) + dseg = (volatile struct mlx4_wqe_data_seg *) + ((uintptr_t)dseg - sq->size); + ctrl_next = mlx4_tx_burst_fill_tso_dsegs(buf, txq, &tinfo, dseg, ctrl); + if (unlikely(ctrl_next == NULL)) + goto error; + /* Write the first DWORD of each TXBB save earlier. */ + if (likely(tinfo.pv_counter)) { + pv = tinfo.pv; + pv_counter = tinfo.pv_counter; + /* Need a barrier here before writing the first TXBB word. */ + rte_io_wmb(); + do { + --pv_counter; + *pv[pv_counter].dst = pv[pv_counter].val; + } while (pv_counter > 0); + } + ctrl->fence_size = tinfo.fence_size; + sq->remain_size -= tinfo.wqe_size; + return ctrl_next; +error: + txq->stats.odropped++; + return NULL; +} + /** * Write data segments of multi-segment packet. * @@ -437,7 +765,7 @@ mlx4_tx_burst_segs(struct rte_mbuf *buf, struct txq *txq, goto txbb_tail_segs; txbb_head_seg: /* Memory region key (big endian) for this memory pool. */ - lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf)); + lkey = mlx4_tx_mb2mr(txq, sbuf); if (unlikely(lkey == (uint32_t)-1)) { DEBUG("%p: unable to get MP <-> MR association", (void *)txq); @@ -449,7 +777,7 @@ txbb_head_seg: dseg = (volatile struct mlx4_wqe_data_seg *) sq->buf; dseg->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(sbuf, uintptr_t)); - dseg->lkey = rte_cpu_to_be_32(lkey); + dseg->lkey = lkey; /* * This data segment starts at the beginning of a new * TXBB, so we need to postpone its byte_count writing @@ -469,7 +797,7 @@ txbb_tail_segs: /* Jump to default if there are more than two segments remaining. */ switch (nb_segs) { default: - lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf)); + lkey = mlx4_tx_mb2mr(txq, sbuf); if (unlikely(lkey == (uint32_t)-1)) { DEBUG("%p: unable to get MP <-> MR association", (void *)txq); @@ -485,7 +813,7 @@ txbb_tail_segs: nb_segs--; /* fallthrough */ case 2: - lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf)); + lkey = mlx4_tx_mb2mr(txq, sbuf); if (unlikely(lkey == (uint32_t)-1)) { DEBUG("%p: unable to get MP <-> MR association", (void *)txq); @@ -501,7 +829,7 @@ txbb_tail_segs: nb_segs--; /* fallthrough */ case 1: - lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf)); + lkey = mlx4_tx_mb2mr(txq, sbuf); if (unlikely(lkey == (uint32_t)-1)) { DEBUG("%p: unable to get MP <-> MR association", (void *)txq); @@ -587,6 +915,7 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) uint16_t flags16[2]; } srcrb; uint32_t lkey; + bool tso = txq->priv->tso && (buf->ol_flags & PKT_TX_TCP_SEG); /* Clean up old buffer. */ if (likely(elt->buf != NULL)) { @@ -605,13 +934,22 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) } while (tmp != NULL); } RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf); - if (buf->nb_segs == 1) { + if (tso) { + /* Change opcode to TSO */ + owner_opcode &= ~MLX4_OPCODE_CONFIG_CMD; + owner_opcode |= MLX4_OPCODE_LSO | MLX4_WQE_CTRL_RR; + ctrl_next = mlx4_tx_burst_tso(buf, txq, ctrl); + if (!ctrl_next) { + elt->buf = NULL; + break; + } + } else if (buf->nb_segs == 1) { /* Validate WQE space in the send queue. */ if (sq->remain_size < MLX4_TXBB_SIZE) { elt->buf = NULL; break; } - lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(buf)); + lkey = mlx4_tx_mb2mr(txq, buf); if (unlikely(lkey == (uint32_t)-1)) { /* MR does not exist. */ DEBUG("%p: unable to get MP <-> MR association", @@ -639,7 +977,7 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) ctrl_next = (volatile struct mlx4_wqe_ctrl_seg *) ((volatile uint8_t *)ctrl_next - sq->size); /* Flip HW valid ownership. */ - sq->owner_opcode ^= 0x1 << MLX4_SQ_OWNER_BIT; + sq->owner_opcode ^= 1u << MLX4_SQ_OWNER_BIT; } /* * For raw Ethernet, the SOLICIT flag is used to indicate @@ -746,11 +1084,13 @@ rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe, * bit[4] - MLX4_CQE_STATUS_TCP * bit[3] - MLX4_CQE_STATUS_IPV4OPT * bit[2] - MLX4_CQE_STATUS_IPV6 - * bit[1] - MLX4_CQE_STATUS_IPV4F + * bit[1] - MLX4_CQE_STATUS_IPF * bit[0] - MLX4_CQE_STATUS_IPV4 * giving a total of up to 256 entries. */ idx |= ((status & MLX4_CQE_STATUS_PTYPE_MASK) >> 22); + if (status & MLX4_CQE_STATUS_IPV6) + idx |= ((status & MLX4_CQE_STATUS_IPV6F) >> 11); return mlx4_ptype_table[idx]; } @@ -934,11 +1274,14 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) goto skip; } pkt = seg; + assert(len >= (rxq->crc_present << 2)); /* Update packet information. */ pkt->packet_type = rxq_cq_to_pkt_type(cqe, rxq->l2tun_offload); pkt->ol_flags = PKT_RX_RSS_HASH; pkt->hash.rss = cqe->immed_rss_invalid; + if (rxq->crc_present) + len -= ETHER_CRC_LEN; pkt->pkt_len = len; if (rxq->csum | rxq->csum_l2tun) { uint32_t flags = @@ -963,6 +1306,9 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) * changes. */ scat->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t)); + /* If there's only one MR, no need to replace LKey in WQE. */ + if (unlikely(mlx4_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) + scat->lkey = mlx4_rx_mb2mr(rxq, rep); if (len > seg->data_len) { len -= seg->data_len; ++pkt->nb_segs; diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h index c12bd39a..ffa8abfc 100644 --- a/drivers/net/mlx4/mlx4_rxtx.h +++ b/drivers/net/mlx4/mlx4_rxtx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef MLX4_RXTX_H_ @@ -25,6 +25,7 @@ #include "mlx4.h" #include "mlx4_prm.h" +#include "mlx4_mr.h" /** Rx queue counters. */ struct mlx4_rxq_stats { @@ -39,7 +40,6 @@ struct mlx4_rxq_stats { struct rxq { struct priv *priv; /**< Back pointer to private data. */ struct rte_mempool *mp; /**< Memory pool for allocations. */ - struct mlx4_mr *mr; /**< Memory region. */ struct ibv_cq *cq; /**< Completion queue. */ struct ibv_wq *wq; /**< Work queue. */ struct ibv_comp_channel *channel; /**< Rx completion channel. */ @@ -47,11 +47,13 @@ struct rxq { uint16_t port_id; /**< Port ID for incoming packets. */ uint16_t sges_n; /**< Number of segments per packet (log2 value). */ uint16_t elts_n; /**< Mbuf queue size (log2 value). */ + struct mlx4_mr_ctrl mr_ctrl; /* MR control descriptor. */ struct rte_mbuf *(*elts)[]; /**< Rx elements. */ volatile struct mlx4_wqe_data_seg (*wqes)[]; /**< HW queue entries. */ volatile uint32_t *rq_db; /**< RQ doorbell record. */ uint32_t csum:1; /**< Enable checksum offloading. */ uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */ + uint32_t crc_present:1; /**< CRC must be subtracted. */ uint32_t l2tun_offload:1; /**< L2 tunnel offload is enabled. */ struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */ struct mlx4_rxq_stats stats; /**< Rx queue counters. */ @@ -83,12 +85,12 @@ struct txq_elt { }; }; -/** Rx queue counters. */ +/** Tx queue counters. */ struct mlx4_txq_stats { unsigned int idx; /**< Mapping index. */ uint64_t opackets; /**< Total of successfully sent packets. */ uint64_t obytes; /**< Total of successfully sent bytes. */ - uint64_t odropped; /**< Total of packets not sent when Tx ring full. */ + uint64_t odropped; /**< Total number of packets failed to transmit. */ }; /** Tx queue descriptor. */ @@ -100,6 +102,7 @@ struct txq { int elts_comp_cd; /**< Countdown for next completion. */ unsigned int elts_comp_cd_init; /**< Initial value for countdown. */ unsigned int elts_n; /**< (*elts)[] length. */ + struct mlx4_mr_ctrl mr_ctrl; /* MR control descriptor. */ struct txq_elt (*elts)[]; /**< Tx elements. */ struct mlx4_txq_stats stats; /**< Tx queue counters. */ uint32_t max_inline; /**< Max inline send size. */ @@ -108,11 +111,6 @@ struct txq { uint32_t lb:1; /**< Whether packets should be looped back by eSwitch. */ uint8_t *bounce_buf; /**< Memory used for storing the first DWORD of data TXBBs. */ - struct { - const struct rte_mempool *mp; /**< Cached memory pool. */ - struct mlx4_mr *mr; /**< Memory region (for mp). */ - uint32_t lkey; /**< mr->lkey copy. */ - } mp2mr[MLX4_PMD_TX_MP_CACHE]; /**< MP to MR translation table. */ struct priv *priv; /**< Back pointer to private data. */ unsigned int socket; /**< CPU socket ID for allocations. */ struct ibv_cq *cq; /**< Completion queue. */ @@ -126,7 +124,7 @@ uint8_t mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE]; int mlx4_rss_init(struct priv *priv); void mlx4_rss_deinit(struct priv *priv); struct mlx4_rss *mlx4_rss_get(struct priv *priv, uint64_t fields, - uint8_t key[MLX4_RSS_HASH_KEY_SIZE], + const uint8_t key[MLX4_RSS_HASH_KEY_SIZE], uint16_t queues, const uint16_t queue_id[]); void mlx4_rss_put(struct mlx4_rss *rss); int mlx4_rss_attach(struct mlx4_rss *rss); @@ -160,34 +158,70 @@ int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, const struct rte_eth_txconf *conf); void mlx4_tx_queue_release(void *dpdk_txq); +/* mlx4_mr.c */ + +void mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl); +uint32_t mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr); +uint32_t mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr); + /** - * Get memory region (MR) <-> memory pool (MP) association from txq->mp2mr[]. - * Call mlx4_txq_add_mr() if MP is not registered yet. + * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx + * as mempool is pre-configured and static. + * + * @param rxq + * Pointer to Rx queue structure. + * @param addr + * Address to search. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static __rte_always_inline uint32_t +mlx4_rx_addr2mr(struct rxq *rxq, uintptr_t addr) +{ + struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; + uint32_t lkey; + + /* Linear search on MR cache array. */ + lkey = mlx4_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru, + MLX4_MR_CACHE_N, addr); + if (likely(lkey != UINT32_MAX)) + return lkey; + /* Take slower bottom-half (Binary Search) on miss. */ + return mlx4_rx_addr2mr_bh(rxq, addr); +} + +#define mlx4_rx_mb2mr(rxq, mb) mlx4_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr)) + +/** + * Query LKey from a packet buffer for Tx. If not found, add the mempool. * * @param txq * Pointer to Tx queue structure. - * @param[in] mp - * Memory pool for which a memory region lkey must be returned. + * @param addr + * Address to search. * * @return - * mr->lkey on success, (uint32_t)-1 on failure. + * Searched LKey on success, UINT32_MAX on no match. */ -static inline uint32_t -mlx4_txq_mp2mr(struct txq *txq, struct rte_mempool *mp) +static __rte_always_inline uint32_t +mlx4_tx_addr2mr(struct txq *txq, uintptr_t addr) { - unsigned int i; - - for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { - if (unlikely(txq->mp2mr[i].mp == NULL)) { - /* Unknown MP, add a new MR for it. */ - break; - } - if (txq->mp2mr[i].mp == mp) { - /* MP found MP. */ - return txq->mp2mr[i].lkey; - } - } - return mlx4_txq_add_mr(txq, mp, i); + struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + uint32_t lkey; + + /* Check generation bit to see if there's any change on existing MRs. */ + if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen)) + mlx4_mr_flush_local_cache(mr_ctrl); + /* Linear search on MR cache array. */ + lkey = mlx4_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru, + MLX4_MR_CACHE_N, addr); + if (likely(lkey != UINT32_MAX)) + return lkey; + /* Take slower bottom-half (binary search) on miss. */ + return mlx4_tx_addr2mr_bh(txq, addr); } +#define mlx4_tx_mb2mr(rxq, mb) mlx4_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr)) + #endif /* MLX4_RXTX_H_ */ diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c index 071b2d5d..9aa7440d 100644 --- a/drivers/net/mlx4/mlx4_txq.c +++ b/drivers/net/mlx4/mlx4_txq.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ /** @@ -63,64 +63,6 @@ mlx4_txq_free_elts(struct txq *txq) txq->elts_tail = txq->elts_head; } -struct txq_mp2mr_mbuf_check_data { - int ret; -}; - -/** - * Callback function for rte_mempool_obj_iter() to check whether a given - * mempool object looks like a mbuf. - * - * @param[in] mp - * The mempool pointer - * @param[in] arg - * Context data (struct mlx4_txq_mp2mr_mbuf_check_data). Contains the - * return value. - * @param[in] obj - * Object address. - * @param index - * Object index, unused. - */ -static void -mlx4_txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj, - uint32_t index) -{ - struct txq_mp2mr_mbuf_check_data *data = arg; - struct rte_mbuf *buf = obj; - - (void)index; - /* - * Check whether mbuf structure fits element size and whether mempool - * pointer is valid. - */ - if (sizeof(*buf) > mp->elt_size || buf->pool != mp) - data->ret = -1; -} - -/** - * Iterator function for rte_mempool_walk() to register existing mempools and - * fill the MP to MR cache of a Tx queue. - * - * @param[in] mp - * Memory Pool to register. - * @param *arg - * Pointer to Tx queue structure. - */ -static void -mlx4_txq_mp2mr_iter(struct rte_mempool *mp, void *arg) -{ - struct txq *txq = arg; - struct txq_mp2mr_mbuf_check_data data = { - .ret = 0, - }; - - /* Register mempool only if the first element looks like a mbuf. */ - if (rte_mempool_obj_iter(mp, mlx4_txq_mp2mr_mbuf_check, &data) == 0 || - data.ret == -1) - return; - mlx4_txq_mp2mr(txq, mp); -} - /** * Retrieves information needed in order to directly access the Tx queue. * @@ -144,9 +86,9 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv) uint32_t headroom_size = 2048 + (1 << dqp->sq.wqe_shift); /* Continuous headroom size bytes must always stay freed. */ sq->remain_size = sq->size - headroom_size; - sq->owner_opcode = MLX4_OPCODE_SEND | (0 << MLX4_SQ_OWNER_BIT); + sq->owner_opcode = MLX4_OPCODE_SEND | (0u << MLX4_SQ_OWNER_BIT); sq->stamp = rte_cpu_to_be_32(MLX4_SQ_STAMP_VAL | - (0 << MLX4_SQ_OWNER_BIT)); + (0u << MLX4_SQ_OWNER_BIT)); sq->db = dqp->sdb; sq->doorbell_qpn = dqp->doorbell_qpn; cq->buf = dcq->buf.buf; @@ -174,31 +116,17 @@ mlx4_get_tx_port_offloads(struct priv *priv) DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM); } - if (priv->hw_csum_l2tun) + if (priv->tso) + offloads |= DEV_TX_OFFLOAD_TCP_TSO; + if (priv->hw_csum_l2tun) { offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + if (priv->tso) + offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO); + } return offloads; } -/** - * Checks if the per-queue offload configuration is valid. - * - * @param priv - * Pointer to private structure. - * @param requested - * Per-queue offloads configuration. - * - * @return - * Nonzero when configuration is valid. - */ -static int -mlx4_check_tx_queue_offloads(struct priv *priv, uint64_t requested) -{ - uint64_t mandatory = priv->dev->data->dev_conf.txmode.offloads; - uint64_t supported = mlx4_get_tx_port_offloads(priv); - - return !((mandatory ^ requested) & supported); -} - /** * DPDK callback to configure a Tx queue. * @@ -246,23 +174,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, }, }; int ret; + uint64_t offloads; + + offloads = conf->offloads | dev->data->dev_conf.txmode.offloads; DEBUG("%p: configuring queue %u for %u descriptors", (void *)dev, idx, desc); - /* - * Don't verify port offloads for application which - * use the old API. - */ - if ((conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) && - !mlx4_check_tx_queue_offloads(priv, conf->offloads)) { - rte_errno = ENOTSUP; - ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port " - "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64, - (void *)dev, conf->offloads, - dev->data->dev_conf.txmode.offloads, - mlx4_get_tx_port_offloads(priv)); - return -rte_errno; - } + if (idx >= dev->data->nb_tx_queues) { rte_errno = EOVERFLOW; ERROR("%p: queue index out of range (%u >= %u)", @@ -313,11 +231,11 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, .elts_comp_cd_init = RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4), .csum = priv->hw_csum && - (conf->offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM | + (offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM)), .csum_l2tun = priv->hw_csum_l2tun && - (conf->offloads & + (offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM), /* Enable Tx loopback for VF devices. */ .lb = !!priv->vf, @@ -404,8 +322,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, /* Save first wqe pointer in the first element. */ (&(*txq->elts)[0])->wqe = (volatile struct mlx4_wqe_ctrl_seg *)txq->msq.buf; - /* Pre-register known mempools. */ - rte_mempool_walk(mlx4_txq_mp2mr_iter, txq); + if (mlx4_mr_btree_init(&txq->mr_ctrl.cache_bh, + MLX4_MR_BTREE_CACHE_N, socket)) { + /* rte_errno is already set. */ + goto error; + } + /* Save pointer of global generation number to check memory event. */ + txq->mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen; DEBUG("%p: adding Tx queue %p to list", (void *)dev, (void *)txq); dev->data->tx_queues[idx] = txq; return 0; @@ -446,11 +369,6 @@ mlx4_tx_queue_release(void *dpdk_txq) claim_zero(mlx4_glue->destroy_qp(txq->qp)); if (txq->cq) claim_zero(mlx4_glue->destroy_cq(txq->cq)); - for (i = 0; i != RTE_DIM(txq->mp2mr); ++i) { - if (!txq->mp2mr[i].mp) - break; - assert(txq->mp2mr[i].mr); - mlx4_mr_put(txq->mp2mr[i].mr); - } + mlx4_mr_btree_free(&txq->mr_ctrl.cache_bh); rte_free(txq); } diff --git a/drivers/net/mlx4/mlx4_utils.c b/drivers/net/mlx4/mlx4_utils.c index d10812ec..a727d703 100644 --- a/drivers/net/mlx4/mlx4_utils.c +++ b/drivers/net/mlx4/mlx4_utils.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ /** diff --git a/drivers/net/mlx4/mlx4_utils.h b/drivers/net/mlx4/mlx4_utils.h index 9fdbacad..86abb3b7 100644 --- a/drivers/net/mlx4/mlx4_utils.h +++ b/drivers/net/mlx4/mlx4_utils.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef MLX4_UTILS_H_ diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile index 3bc9736c..2e70dec5 100644 --- a/drivers/net/mlx5/Makefile +++ b/drivers/net/mlx5/Makefile @@ -1,33 +1,6 @@ -# BSD LICENSE -# +# SPDX-License-Identifier: BSD-3-Clause # Copyright 2015 6WIND S.A. -# Copyright 2015 Mellanox. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND S.A. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# Copyright 2015 Mellanox Technologies, Ltd include $(RTE_SDK)/mk/rte.vars.mk @@ -35,7 +8,7 @@ include $(RTE_SDK)/mk/rte.vars.mk LIB = librte_pmd_mlx5.a LIB_GLUE = $(LIB_GLUE_BASE).$(LIB_GLUE_VERSION) LIB_GLUE_BASE = librte_pmd_mlx5_glue.so -LIB_GLUE_VERSION = 18.02.0 +LIB_GLUE_VERSION = 18.05.0 # Sources. SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5.c @@ -59,6 +32,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl_flow.c ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y) INSTALL-$(CONFIG_RTE_LIBRTE_MLX5_PMD)-lib += $(LIB_GLUE) @@ -82,6 +57,7 @@ LDLIBS += -ldl else LDLIBS += -libverbs -lmlx5 endif +LDLIBS += -lmnl LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs LDLIBS += -lrte_bus_pci @@ -92,6 +68,9 @@ CFLAGS += -Wno-error=cast-qual EXPORT_MAP := rte_pmd_mlx5_version.map LIBABIVER := 1 +# memseg walk is not part of stable API +CFLAGS += -DALLOW_EXPERIMENTAL_API + # DEBUG which is usually provided on the command-line may enable # CONFIG_RTE_LIBRTE_MLX5_DEBUG. ifeq ($(DEBUG),1) @@ -105,10 +84,6 @@ else CFLAGS += -DNDEBUG -UPEDANTIC endif -ifdef CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE -CFLAGS += -DMLX5_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE) -endif - include $(RTE_SDK)/mk/rte.lib.mk # Generate and clean-up mlx5_autoconf.h. @@ -125,15 +100,30 @@ mlx5_autoconf.h.new: FORCE mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh $Q $(RM) -f -- '$@' $Q sh -- '$<' '$@' \ - HAVE_IBV_DEVICE_VXLAN_SUPPORT \ + HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT \ + infiniband/mlx5dv.h \ + enum MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_IBV_DEVICE_TUNNEL_SUPPORT \ + infiniband/mlx5dv.h \ + enum MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_IBV_DEVICE_MPLS_SUPPORT \ infiniband/verbs.h \ - enum IBV_DEVICE_VXLAN_SUPPORT \ + enum IBV_FLOW_SPEC_MPLS \ $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ HAVE_IBV_WQ_FLAG_RX_END_PADDING \ infiniband/verbs.h \ enum IBV_WQ_FLAG_RX_END_PADDING \ $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_IBV_MLX5_MOD_SWP \ + infiniband/mlx5dv.h \ + type 'struct mlx5dv_sw_parsing_caps' \ + $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ HAVE_IBV_MLX5_MOD_MPW \ infiniband/mlx5dv.h \ @@ -162,7 +152,237 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh $Q sh -- '$<' '$@' \ HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT \ infiniband/verbs.h \ - enum IBV_FLOW_SPEC_ACTION_COUNT \ + type 'struct ibv_counter_set_init_attr' \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_RDMA_NL_NLDEV \ + rdma/rdma_netlink.h \ + enum RDMA_NL_NLDEV \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_RDMA_NLDEV_CMD_GET \ + rdma/rdma_netlink.h \ + enum RDMA_NLDEV_CMD_GET \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_RDMA_NLDEV_CMD_PORT_GET \ + rdma/rdma_netlink.h \ + enum RDMA_NLDEV_CMD_PORT_GET \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_RDMA_NLDEV_ATTR_DEV_INDEX \ + rdma/rdma_netlink.h \ + enum RDMA_NLDEV_ATTR_DEV_INDEX \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_RDMA_NLDEV_ATTR_DEV_NAME \ + rdma/rdma_netlink.h \ + enum RDMA_NLDEV_ATTR_DEV_NAME \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_RDMA_NLDEV_ATTR_PORT_INDEX \ + rdma/rdma_netlink.h \ + enum RDMA_NLDEV_ATTR_PORT_INDEX \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX \ + rdma/rdma_netlink.h \ + enum RDMA_NLDEV_ATTR_NDEV_INDEX \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_IFLA_PHYS_SWITCH_ID \ + linux/if_link.h \ + enum IFLA_PHYS_SWITCH_ID \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_IFLA_PHYS_PORT_NAME \ + linux/if_link.h \ + enum IFLA_PHYS_PORT_NAME \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_ACT \ + linux/pkt_cls.h \ + enum TCA_FLOWER_ACT \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_FLAGS \ + linux/pkt_cls.h \ + enum TCA_FLOWER_FLAGS \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_ETH_TYPE \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_ETH_TYPE \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_ETH_DST \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_ETH_DST \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_ETH_DST_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_ETH_DST_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_ETH_SRC \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_ETH_SRC \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_ETH_SRC_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_IP_PROTO \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_IP_PROTO \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_IPV4_SRC \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_IPV4_SRC \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_IPV4_SRC_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_IPV4_DST \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_IPV4_DST \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_IPV4_DST_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_IPV6_SRC \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_IPV6_SRC \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_IPV6_SRC_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_IPV6_DST \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_IPV6_DST \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_IPV6_DST_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_TCP_SRC \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_TCP_SRC \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_TCP_SRC_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_TCP_DST \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_TCP_DST \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_TCP_DST_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_TCP_DST_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_UDP_SRC \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_UDP_SRC \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_UDP_SRC_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_UDP_DST \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_UDP_DST \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_UDP_DST_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_UDP_DST_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_VLAN_ID \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_VLAN_ID \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_VLAN_PRIO \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_VLAN_PRIO \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_VLAN_ETH_TYPE \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TC_ACT_VLAN \ + linux/tc_act/tc_vlan.h \ + enum TCA_VLAN_PUSH_VLAN_PRIORITY \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_SUPPORTED_40000baseKR4_Full \ + /usr/include/linux/ethtool.h \ + define SUPPORTED_40000baseKR4_Full \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_SUPPORTED_40000baseCR4_Full \ + /usr/include/linux/ethtool.h \ + define SUPPORTED_40000baseCR4_Full \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_SUPPORTED_40000baseSR4_Full \ + /usr/include/linux/ethtool.h \ + define SUPPORTED_40000baseSR4_Full \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_SUPPORTED_40000baseLR4_Full \ + /usr/include/linux/ethtool.h \ + define SUPPORTED_40000baseLR4_Full \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_SUPPORTED_56000baseKR4_Full \ + /usr/include/linux/ethtool.h \ + define SUPPORTED_56000baseKR4_Full \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_SUPPORTED_56000baseCR4_Full \ + /usr/include/linux/ethtool.h \ + define SUPPORTED_56000baseCR4_Full \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_SUPPORTED_56000baseSR4_Full \ + /usr/include/linux/ethtool.h \ + define SUPPORTED_56000baseSR4_Full \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_SUPPORTED_56000baseLR4_Full \ + /usr/include/linux/ethtool.h \ + define SUPPORTED_56000baseLR4_Full \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_STATIC_ASSERT \ + /usr/include/assert.h \ + define static_assert \ $(AUTOCONF_OUTPUT) # Create mlx5_autoconf.h or update it in case it differs from the new one. @@ -181,10 +401,15 @@ ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y) $(LIB): $(LIB_GLUE) +ifeq ($(LINK_USING_CC),1) +GLUE_LDFLAGS := $(call linkerprefix,$(LDFLAGS)) +else +GLUE_LDFLAGS := $(LDFLAGS) +endif $(LIB_GLUE): mlx5_glue.o - $Q $(LD) $(LDFLAGS) $(EXTRA_LDFLAGS) \ + $Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \ -Wl,-h,$(LIB_GLUE) \ - -s -shared -o $@ $< -libverbs -lmlx5 + -shared -o $@ $< -libverbs -lmlx5 mlx5_glue.o: mlx5_autoconf.h diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 6c0985bd..ec63bc6e 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -13,6 +13,8 @@ #include #include #include +#include +#include /* Verbs header. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ @@ -33,6 +35,9 @@ #include #include #include +#include +#include +#include #include "mlx5.h" #include "mlx5_utils.h" @@ -40,10 +45,23 @@ #include "mlx5_autoconf.h" #include "mlx5_defs.h" #include "mlx5_glue.h" +#include "mlx5_mr.h" /* Device parameter to enable RX completion queue compression. */ #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" +/* Device parameter to enable Multi-Packet Rx queue. */ +#define MLX5_RX_MPRQ_EN "mprq_en" + +/* Device parameter to configure log 2 of the number of strides for MPRQ. */ +#define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num" + +/* Device parameter to limit the size of memcpy'd packet for MPRQ. */ +#define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len" + +/* Device parameter to set the minimum number of Rx queues to enable MPRQ. */ +#define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq" + /* Device parameter to configure inline send. */ #define MLX5_TXQ_INLINE "txq_inline" @@ -68,6 +86,15 @@ /* Device parameter to enable hardware Rx vector. */ #define MLX5_RX_VEC_EN "rx_vec_en" +/* Allow L3 VXLAN flow creation. */ +#define MLX5_L3_VXLAN_EN "l3_vxlan_en" + +/* Activate Netlink support in VF mode. */ +#define MLX5_VF_NL_EN "vf_nl_en" + +/* Select port representors to instantiate. */ +#define MLX5_REPRESENTOR "representor" + #ifndef HAVE_IBV_MLX5_MOD_MPW #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2) #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3) @@ -77,6 +104,50 @@ #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4) #endif +static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data"; + +/* Shared memory between primary and secondary processes. */ +struct mlx5_shared_data *mlx5_shared_data; + +/* Spinlock for mlx5_shared_data allocation. */ +static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER; + +/** Driver-specific log messages type. */ +int mlx5_logtype; + +/** + * Prepare shared data between primary and secondary process. + */ +static void +mlx5_prepare_shared_data(void) +{ + const struct rte_memzone *mz; + + rte_spinlock_lock(&mlx5_shared_data_lock); + if (mlx5_shared_data == NULL) { + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + /* Allocate shared memory. */ + mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA, + sizeof(*mlx5_shared_data), + SOCKET_ID_ANY, 0); + } else { + /* Lookup allocated shared memory. */ + mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA); + } + if (mz == NULL) + rte_panic("Cannot allocate mlx5 shared data\n"); + mlx5_shared_data = mz->addr; + /* Initialize shared data. */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + LIST_INIT(&mlx5_shared_data->mem_event_cb_list); + rte_rwlock_init(&mlx5_shared_data->mem_event_rwlock); + } + rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", + mlx5_mr_mem_event_cb, NULL); + } + rte_spinlock_unlock(&mlx5_shared_data_lock); +} + /** * Retrieve integer value from environment variable. * @@ -108,7 +179,7 @@ mlx5_getenv_int(const char *name) * A pointer to the callback data. * * @return - * a pointer to the allocate space. + * Allocated buffer, NULL otherwise and rte_errno is set. */ static void * mlx5_alloc_verbs_buf(size_t size, void *data) @@ -130,7 +201,8 @@ mlx5_alloc_verbs_buf(size_t size, void *data) } assert(data != NULL); ret = rte_malloc_socket(__func__, size, alignment, socket); - DEBUG("Extern alloc size: %lu, align: %lu: %p", size, alignment, ret); + if (!ret && size) + rte_errno = ENOMEM; return ret; } @@ -146,7 +218,6 @@ static void mlx5_free_verbs_buf(void *ptr, void *data __rte_unused) { assert(data != NULL); - DEBUG("Extern free request: %p", ptr); rte_free(ptr); } @@ -165,13 +236,13 @@ mlx5_dev_close(struct rte_eth_dev *dev) unsigned int i; int ret; - priv_lock(priv); - DEBUG("%p: closing device \"%s\"", - (void *)dev, - ((priv->ctx != NULL) ? priv->ctx->device->name : "")); + DRV_LOG(DEBUG, "port %u closing device \"%s\"", + dev->data->port_id, + ((priv->ctx != NULL) ? priv->ctx->device->name : "")); /* In case mlx5_dev_stop() has not been called. */ - priv_dev_interrupt_handler_uninstall(priv, dev); - priv_dev_traffic_disable(priv, dev); + mlx5_dev_interrupt_handler_uninstall(dev); + mlx5_traffic_disable(dev); + mlx5_flow_flush(dev, NULL); /* Prevent crashes when queues are still in use. */ dev->rx_pkt_burst = removed_rx_burst; dev->tx_pkt_burst = removed_tx_burst; @@ -179,7 +250,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) /* XXX race condition if mlx5_rx_burst() is still running. */ usleep(1000); for (i = 0; (i != priv->rxqs_n); ++i) - mlx5_priv_rxq_release(priv, i); + mlx5_rxq_release(dev, i); priv->rxqs_n = 0; priv->rxqs = NULL; } @@ -187,10 +258,12 @@ mlx5_dev_close(struct rte_eth_dev *dev) /* XXX race condition if mlx5_tx_burst() is still running. */ usleep(1000); for (i = 0; (i != priv->txqs_n); ++i) - mlx5_priv_txq_release(priv, i); + mlx5_txq_release(dev, i); priv->txqs_n = 0; priv->txqs = NULL; } + mlx5_mprq_free_mp(dev); + mlx5_mr_release(dev); if (priv->pd != NULL) { assert(priv->ctx != NULL); claim_zero(mlx5_glue->dealloc_pd(priv->pd)); @@ -202,33 +275,64 @@ mlx5_dev_close(struct rte_eth_dev *dev) if (priv->reta_idx != NULL) rte_free(priv->reta_idx); if (priv->primary_socket) - priv_socket_uninit(priv); - ret = mlx5_priv_hrxq_ibv_verify(priv); + mlx5_socket_uninit(dev); + if (priv->config.vf) + mlx5_nl_mac_addr_flush(dev); + if (priv->nl_socket_route >= 0) + close(priv->nl_socket_route); + if (priv->nl_socket_rdma >= 0) + close(priv->nl_socket_rdma); + if (priv->mnl_socket) + mlx5_nl_flow_socket_destroy(priv->mnl_socket); + ret = mlx5_hrxq_ibv_verify(dev); if (ret) - WARN("%p: some Hash Rx queue still remain", (void *)priv); - ret = mlx5_priv_ind_table_ibv_verify(priv); + DRV_LOG(WARNING, "port %u some hash Rx queue still remain", + dev->data->port_id); + ret = mlx5_ind_table_ibv_verify(dev); if (ret) - WARN("%p: some Indirection table still remain", (void *)priv); - ret = mlx5_priv_rxq_ibv_verify(priv); + DRV_LOG(WARNING, "port %u some indirection table still remain", + dev->data->port_id); + ret = mlx5_rxq_ibv_verify(dev); if (ret) - WARN("%p: some Verbs Rx queue still remain", (void *)priv); - ret = mlx5_priv_rxq_verify(priv); + DRV_LOG(WARNING, "port %u some Verbs Rx queue still remain", + dev->data->port_id); + ret = mlx5_rxq_verify(dev); if (ret) - WARN("%p: some Rx Queues still remain", (void *)priv); - ret = mlx5_priv_txq_ibv_verify(priv); + DRV_LOG(WARNING, "port %u some Rx queues still remain", + dev->data->port_id); + ret = mlx5_txq_ibv_verify(dev); if (ret) - WARN("%p: some Verbs Tx queue still remain", (void *)priv); - ret = mlx5_priv_txq_verify(priv); + DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain", + dev->data->port_id); + ret = mlx5_txq_verify(dev); if (ret) - WARN("%p: some Tx Queues still remain", (void *)priv); - ret = priv_flow_verify(priv); + DRV_LOG(WARNING, "port %u some Tx queues still remain", + dev->data->port_id); + ret = mlx5_flow_verify(dev); if (ret) - WARN("%p: some flows still remain", (void *)priv); - ret = priv_mr_verify(priv); - if (ret) - WARN("%p: some Memory Region still remain", (void *)priv); - priv_unlock(priv); + DRV_LOG(WARNING, "port %u some flows still remain", + dev->data->port_id); + if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { + unsigned int c = 0; + unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0); + uint16_t port_id[i]; + + i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i); + while (i--) { + struct priv *opriv = + rte_eth_devices[port_id[i]].data->dev_private; + + if (!opriv || + opriv->domain_id != priv->domain_id || + &rte_eth_devices[port_id[i]] == dev) + continue; + ++c; + } + if (!c) + claim_zero(rte_eth_switch_domain_free(priv->domain_id)); + } memset(priv, 0, sizeof(*priv)); + priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; } const struct eth_dev_ops mlx5_dev_ops = { @@ -260,6 +364,7 @@ const struct eth_dev_ops mlx5_dev_ops = { .mac_addr_remove = mlx5_mac_addr_remove, .mac_addr_add = mlx5_mac_addr_add, .mac_addr_set = mlx5_mac_addr_set, + .set_mc_addr_list = mlx5_set_mc_addr_list, .mtu_set = mlx5_dev_set_mtu, .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, .vlan_offload_set = mlx5_vlan_offload_set, @@ -294,6 +399,10 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = { .dev_set_link_down = mlx5_set_link_down, .dev_set_link_up = mlx5_set_link_up, .dev_close = mlx5_dev_close, + .promiscuous_enable = mlx5_promiscuous_enable, + .promiscuous_disable = mlx5_promiscuous_disable, + .allmulticast_enable = mlx5_allmulticast_enable, + .allmulticast_disable = mlx5_allmulticast_disable, .link_update = mlx5_link_update, .stats_get = mlx5_stats_get, .stats_reset = mlx5_stats_reset, @@ -312,6 +421,7 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = { .mac_addr_remove = mlx5_mac_addr_remove, .mac_addr_add = mlx5_mac_addr_add, .mac_addr_set = mlx5_mac_addr_set, + .set_mc_addr_list = mlx5_set_mc_addr_list, .mtu_set = mlx5_dev_set_mtu, .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, .vlan_offload_set = mlx5_vlan_offload_set, @@ -323,39 +433,6 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = { .is_removed = mlx5_is_removed, }; -static struct { - struct rte_pci_addr pci_addr; /* associated PCI address */ - uint32_t ports; /* physical ports bitfield. */ -} mlx5_dev[32]; - -/** - * Get device index in mlx5_dev[] from PCI bus address. - * - * @param[in] pci_addr - * PCI bus address to look for. - * - * @return - * mlx5_dev[] index on success, -1 on failure. - */ -static int -mlx5_dev_idx(struct rte_pci_addr *pci_addr) -{ - unsigned int i; - int ret = -1; - - assert(pci_addr != NULL); - for (i = 0; (i != RTE_DIM(mlx5_dev)); ++i) { - if ((mlx5_dev[i].pci_addr.domain == pci_addr->domain) && - (mlx5_dev[i].pci_addr.bus == pci_addr->bus) && - (mlx5_dev[i].pci_addr.devid == pci_addr->devid) && - (mlx5_dev[i].pci_addr.function == pci_addr->function)) - return i; - if ((mlx5_dev[i].ports == 0) && (ret == -1)) - ret = i; - } - return ret; -} - /** * Verify and store value for device argument. * @@ -367,7 +444,7 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr) * User data. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_args_check(const char *key, const char *val, void *opaque) @@ -375,14 +452,26 @@ mlx5_args_check(const char *key, const char *val, void *opaque) struct mlx5_dev_config *config = opaque; unsigned long tmp; + /* No-op, port representors are processed in mlx5_dev_spawn(). */ + if (!strcmp(MLX5_REPRESENTOR, key)) + return 0; errno = 0; tmp = strtoul(val, NULL, 0); if (errno) { - WARN("%s: \"%s\" is not a valid integer", key, val); - return errno; + rte_errno = errno; + DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val); + return -rte_errno; } if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { config->cqe_comp = !!tmp; + } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { + config->mprq.enabled = !!tmp; + } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) { + config->mprq.stride_num_n = tmp; + } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) { + config->mprq.max_memcpy_len = tmp; + } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) { + config->mprq.min_rxqs_num = tmp; } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { config->txq_inline = tmp; } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { @@ -397,9 +486,14 @@ mlx5_args_check(const char *key, const char *val, void *opaque) config->tx_vec_en = !!tmp; } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { config->rx_vec_en = !!tmp; + } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) { + config->l3_vxlan_en = !!tmp; + } else if (strcmp(MLX5_VF_NL_EN, key) == 0) { + config->vf_nl_en = !!tmp; } else { - WARN("%s: unknown parameter", key); - return -EINVAL; + DRV_LOG(WARNING, "%s: unknown parameter", key); + rte_errno = EINVAL; + return -rte_errno; } return 0; } @@ -413,13 +507,17 @@ mlx5_args_check(const char *key, const char *val, void *opaque) * Device arguments structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) { const char **params = (const char *[]){ MLX5_RXQ_CQE_COMP_EN, + MLX5_RX_MPRQ_EN, + MLX5_RX_MPRQ_LOG_STRIDE_NUM, + MLX5_RX_MPRQ_MAX_MEMCPY_LEN, + MLX5_RXQS_MIN_MPRQ, MLX5_TXQ_INLINE, MLX5_TXQS_MIN_INLINE, MLX5_TXQ_MPW_EN, @@ -427,6 +525,9 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) MLX5_TXQ_MAX_INLINE_LEN, MLX5_TX_VEC_EN, MLX5_RX_VEC_EN, + MLX5_L3_VXLAN_EN, + MLX5_VF_NL_EN, + MLX5_REPRESENTOR, NULL, }; struct rte_kvargs *kvlist; @@ -444,9 +545,10 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) if (rte_kvargs_count(kvlist, params[i])) { ret = rte_kvargs_process(kvlist, params[i], mlx5_args_check, config); - if (ret != 0) { + if (ret) { + rte_errno = EINVAL; rte_kvargs_free(kvlist); - return ret; + return -rte_errno; } } } @@ -465,50 +567,60 @@ static struct rte_pci_driver mlx5_driver; */ static void *uar_base; +static int +find_lower_va_bound(const struct rte_memseg_list *msl __rte_unused, + const struct rte_memseg *ms, void *arg) +{ + void **addr = arg; + + if (*addr == NULL) + *addr = ms->addr; + else + *addr = RTE_MIN(*addr, ms->addr); + + return 0; +} + /** * Reserve UAR address space for primary process. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_uar_init_primary(struct priv *priv) +mlx5_uar_init_primary(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; void *addr = (void *)0; - int i; - const struct rte_mem_config *mcfg; - int ret; if (uar_base) { /* UAR address space mapped. */ priv->uar_base = uar_base; return 0; } /* find out lower bound of hugepage segments */ - mcfg = rte_eal_get_configuration()->mem_config; - for (i = 0; i < RTE_MAX_MEMSEG && mcfg->memseg[i].addr; i++) { - if (addr) - addr = RTE_MIN(addr, mcfg->memseg[i].addr); - else - addr = mcfg->memseg[i].addr; - } + rte_memseg_walk(find_lower_va_bound, &addr); + /* keep distance to hugepages to minimize potential conflicts. */ - addr = RTE_PTR_SUB(addr, MLX5_UAR_OFFSET + MLX5_UAR_SIZE); + addr = RTE_PTR_SUB(addr, (uintptr_t)(MLX5_UAR_OFFSET + MLX5_UAR_SIZE)); /* anonymous mmap, no real memory consumption. */ addr = mmap(addr, MLX5_UAR_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (addr == MAP_FAILED) { - ERROR("Failed to reserve UAR address space, please adjust " - "MLX5_UAR_SIZE or try --base-virtaddr"); - ret = ENOMEM; - return ret; + DRV_LOG(ERR, + "port %u failed to reserve UAR address space, please" + " adjust MLX5_UAR_SIZE or try --base-virtaddr", + dev->data->port_id); + rte_errno = ENOMEM; + return -rte_errno; } /* Accept either same addr or a new addr returned from mmap if target * range occupied. */ - INFO("Reserved UAR address space: %p", addr); + DRV_LOG(INFO, "port %u reserved UAR address space: %p", + dev->data->port_id, addr); priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */ uar_base = addr; /* process local, don't reserve again. */ return 0; @@ -518,17 +630,17 @@ priv_uar_init_primary(struct priv *priv) * Reserve UAR address space for secondary process, align with * primary process. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_uar_init_secondary(struct priv *priv) +mlx5_uar_init_secondary(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; void *addr; - int ret; assert(priv->uar_base); if (uar_base) { /* already reserved. */ @@ -539,467 +651,802 @@ priv_uar_init_secondary(struct priv *priv) addr = mmap(priv->uar_base, MLX5_UAR_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (addr == MAP_FAILED) { - ERROR("UAR mmap failed: %p size: %llu", - priv->uar_base, MLX5_UAR_SIZE); - ret = ENXIO; - return ret; + DRV_LOG(ERR, "port %u UAR mmap failed: %p size: %llu", + dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE); + rte_errno = ENXIO; + return -rte_errno; } if (priv->uar_base != addr) { - ERROR("UAR address %p size %llu occupied, please adjust " - "MLX5_UAR_OFFSET or try EAL parameter --base-virtaddr", - priv->uar_base, MLX5_UAR_SIZE); - ret = ENXIO; - return ret; + DRV_LOG(ERR, + "port %u UAR address %p size %llu occupied, please" + " adjust MLX5_UAR_OFFSET or try EAL parameter" + " --base-virtaddr", + dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE); + rte_errno = ENXIO; + return -rte_errno; } uar_base = addr; /* process local, don't reserve again */ - INFO("Reserved UAR address space: %p", addr); + DRV_LOG(INFO, "port %u reserved UAR address space: %p", + dev->data->port_id, addr); return 0; } /** - * DPDK callback to register a PCI device. + * Spawn an Ethernet device from Verbs information. * - * This function creates an Ethernet device for each port of a given - * PCI device. - * - * @param[in] pci_drv - * PCI driver structure (mlx5_driver). - * @param[in] pci_dev - * PCI device information. + * @param dpdk_dev + * Backing DPDK device. + * @param ibv_dev + * Verbs device. + * @param vf + * If nonzero, enable VF-specific features. + * @param[in] switch_info + * Switch properties of Ethernet device. * * @return - * 0 on success, negative errno value on failure. + * A valid Ethernet device object on success, NULL otherwise and rte_errno + * is set. The following error is defined: + * + * EBUSY: device is not supposed to be spawned. */ -static int -mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +static struct rte_eth_dev * +mlx5_dev_spawn(struct rte_device *dpdk_dev, + struct ibv_device *ibv_dev, + int vf, + const struct mlx5_switch_info *switch_info) { - struct ibv_device **list; - struct ibv_device *ibv_dev; + struct ibv_context *ctx; + struct ibv_device_attr_ex attr; + struct ibv_port_attr port_attr; + struct ibv_pd *pd = NULL; + struct mlx5dv_context dv_attr = { .comp_mask = 0 }; + struct mlx5_dev_config config = { + .vf = !!vf, + .tx_vec_en = 1, + .rx_vec_en = 1, + .mpw_hdr_dseg = 0, + .txq_inline = MLX5_ARG_UNSET, + .txqs_inline = MLX5_ARG_UNSET, + .inline_max_packet_sz = MLX5_ARG_UNSET, + .vf_nl_en = 1, + .mprq = { + .enabled = 0, + .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N, + .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN, + .min_rxqs_num = MLX5_MPRQ_MIN_RXQS, + }, + }; + struct rte_eth_dev *eth_dev = NULL; + struct priv *priv = NULL; int err = 0; - struct ibv_context *attr_ctx = NULL; - struct ibv_device_attr_ex device_attr; - unsigned int sriov; unsigned int mps; unsigned int cqe_comp; unsigned int tunnel_en = 0; - int idx; - int i; - struct mlx5dv_context attrs_out; + unsigned int mpls_en = 0; + unsigned int swp = 0; + unsigned int mprq = 0; + unsigned int mprq_min_stride_size_n = 0; + unsigned int mprq_max_stride_size_n = 0; + unsigned int mprq_min_stride_num_n = 0; + unsigned int mprq_max_stride_num_n = 0; #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT - struct ibv_counter_set_description cs_desc; + struct ibv_counter_set_description cs_desc = { .counter_type = 0 }; #endif + struct ether_addr mac; + char name[RTE_ETH_NAME_MAX_LEN]; + int own_domain_id = 0; + unsigned int i; - (void)pci_drv; - assert(pci_drv == &mlx5_driver); - /* Get mlx5_dev[] index. */ - idx = mlx5_dev_idx(&pci_dev->addr); - if (idx == -1) { - ERROR("this driver cannot support any more adapters"); - return -ENOMEM; - } - DEBUG("using driver device index %d", idx); - - /* Save PCI address. */ - mlx5_dev[idx].pci_addr = pci_dev->addr; - list = mlx5_glue->get_device_list(&i); - if (list == NULL) { - assert(errno); - if (errno == ENOSYS) - ERROR("cannot list devices, is ib_uverbs loaded?"); - return -errno; - } - assert(i >= 0); - /* - * For each listed device, check related sysfs entry against - * the provided PCI ID. - */ - while (i != 0) { - struct rte_pci_addr pci_addr; + /* Determine if this port representor is supposed to be spawned. */ + if (switch_info->representor && dpdk_dev->devargs) { + struct rte_eth_devargs eth_da; - --i; - DEBUG("checking device \"%s\"", list[i]->name); - if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr)) - continue; - if ((pci_dev->addr.domain != pci_addr.domain) || - (pci_dev->addr.bus != pci_addr.bus) || - (pci_dev->addr.devid != pci_addr.devid) || - (pci_dev->addr.function != pci_addr.function)) - continue; - sriov = ((pci_dev->id.device_id == - PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) || - (pci_dev->id.device_id == - PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) || - (pci_dev->id.device_id == - PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) || - (pci_dev->id.device_id == - PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)); - switch (pci_dev->id.device_id) { - case PCI_DEVICE_ID_MELLANOX_CONNECTX4: - tunnel_en = 1; - break; - case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: - case PCI_DEVICE_ID_MELLANOX_CONNECTX5: - case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: - case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX: - case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: - tunnel_en = 1; - break; - default: - break; + err = rte_eth_devargs_parse(dpdk_dev->devargs->args, ð_da); + if (err) { + rte_errno = -err; + DRV_LOG(ERR, "failed to process device arguments: %s", + strerror(rte_errno)); + return NULL; } - INFO("PCI information matches, using device \"%s\"" - " (SR-IOV: %s)", - list[i]->name, - sriov ? "true" : "false"); - attr_ctx = mlx5_glue->open_device(list[i]); - err = errno; - break; - } - if (attr_ctx == NULL) { - mlx5_glue->free_device_list(list); - switch (err) { - case 0: - ERROR("cannot access device, is mlx5_ib loaded?"); - return -ENODEV; - case EINVAL: - ERROR("cannot use device, are drivers up to date?"); - return -EINVAL; + for (i = 0; i < eth_da.nb_representor_ports; ++i) + if (eth_da.representor_ports[i] == + (uint16_t)switch_info->port_name) + break; + if (i == eth_da.nb_representor_ports) { + rte_errno = EBUSY; + return NULL; } - assert(err > 0); - return -err; } - ibv_dev = list[i]; - - DEBUG("device opened"); + /* Prepare shared data between primary and secondary process. */ + mlx5_prepare_shared_data(); + errno = 0; + ctx = mlx5_glue->open_device(ibv_dev); + if (!ctx) { + rte_errno = errno ? errno : ENODEV; + return NULL; + } +#ifdef HAVE_IBV_MLX5_MOD_SWP + dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP; +#endif /* * Multi-packet send is supported by ConnectX-4 Lx PF as well * as all ConnectX-5 devices. */ - mlx5_glue->dv_query_device(attr_ctx, &attrs_out); - if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) { - if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) { - DEBUG("Enhanced MPW is supported"); +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS; +#endif +#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ; +#endif + mlx5_glue->dv_query_device(ctx, &dv_attr); + if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) { + if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) { + DRV_LOG(DEBUG, "enhanced MPW is supported"); mps = MLX5_MPW_ENHANCED; } else { - DEBUG("MPW is supported"); + DRV_LOG(DEBUG, "MPW is supported"); mps = MLX5_MPW; } } else { - DEBUG("MPW isn't supported"); + DRV_LOG(DEBUG, "MPW isn't supported"); mps = MLX5_MPW_DISABLED; } + config.mps = mps; +#ifdef HAVE_IBV_MLX5_MOD_SWP + if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP) + swp = dv_attr.sw_parsing_caps.sw_parsing_offloads; + DRV_LOG(DEBUG, "SWP support: %u", swp); +#endif + config.swp = !!swp; +#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) { + struct mlx5dv_striding_rq_caps mprq_caps = + dv_attr.striding_rq_caps; + + DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d", + mprq_caps.min_single_stride_log_num_of_bytes); + DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d", + mprq_caps.max_single_stride_log_num_of_bytes); + DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d", + mprq_caps.min_single_wqe_log_num_of_strides); + DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d", + mprq_caps.max_single_wqe_log_num_of_strides); + DRV_LOG(DEBUG, "\tsupported_qpts: %d", + mprq_caps.supported_qpts); + DRV_LOG(DEBUG, "device supports Multi-Packet RQ"); + mprq = 1; + mprq_min_stride_size_n = + mprq_caps.min_single_stride_log_num_of_bytes; + mprq_max_stride_size_n = + mprq_caps.max_single_stride_log_num_of_bytes; + mprq_min_stride_num_n = + mprq_caps.min_single_wqe_log_num_of_strides; + mprq_max_stride_num_n = + mprq_caps.max_single_wqe_log_num_of_strides; + config.mprq.stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, + mprq_min_stride_num_n); + } +#endif if (RTE_CACHE_LINE_SIZE == 128 && - !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) + !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) cqe_comp = 0; else cqe_comp = 1; - if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr)) + config.cqe_comp = cqe_comp; +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) { + tunnel_en = ((dv_attr.tunnel_offloads_caps & + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) && + (dv_attr.tunnel_offloads_caps & + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE)); + } + DRV_LOG(DEBUG, "tunnel offloading is %ssupported", + tunnel_en ? "" : "not "); +#else + DRV_LOG(WARNING, + "tunnel offloading disabled due to old OFED/rdma-core version"); +#endif + config.tunnel_en = tunnel_en; +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + mpls_en = ((dv_attr.tunnel_offloads_caps & + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) && + (dv_attr.tunnel_offloads_caps & + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP)); + DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported", + mpls_en ? "" : "not "); +#else + DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to" + " old OFED/rdma-core version or firmware configuration"); +#endif + config.mpls_en = mpls_en; + err = mlx5_glue->query_device_ex(ctx, NULL, &attr); + if (err) { + DEBUG("ibv_query_device_ex() failed"); goto error; - INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt); - - for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) { - char name[RTE_ETH_NAME_MAX_LEN]; - int len; - uint32_t port = i + 1; /* ports are indexed from one */ - uint32_t test = (1 << i); - struct ibv_context *ctx = NULL; - struct ibv_port_attr port_attr; - struct ibv_pd *pd = NULL; - struct priv *priv = NULL; - struct rte_eth_dev *eth_dev; - struct ibv_device_attr_ex device_attr_ex; - struct ether_addr mac; - uint16_t num_vfs = 0; - struct ibv_device_attr_ex device_attr; - struct mlx5_dev_config config = { - .cqe_comp = cqe_comp, - .mps = mps, - .tunnel_en = tunnel_en, - .tx_vec_en = 1, - .rx_vec_en = 1, - .mpw_hdr_dseg = 0, - .txq_inline = MLX5_ARG_UNSET, - .txqs_inline = MLX5_ARG_UNSET, - .inline_max_packet_sz = MLX5_ARG_UNSET, - }; - - len = snprintf(name, sizeof(name), PCI_PRI_FMT, - pci_dev->addr.domain, pci_dev->addr.bus, - pci_dev->addr.devid, pci_dev->addr.function); - if (device_attr.orig_attr.phys_port_cnt > 1) - snprintf(name + len, sizeof(name), " port %u", i); - - mlx5_dev[idx].ports |= test; - - if (rte_eal_process_type() == RTE_PROC_SECONDARY) { - eth_dev = rte_eth_dev_attach_secondary(name); - if (eth_dev == NULL) { - ERROR("can not attach rte ethdev"); - err = ENOMEM; - goto error; - } - eth_dev->device = &pci_dev->device; - eth_dev->dev_ops = &mlx5_dev_sec_ops; - priv = eth_dev->data->dev_private; - err = priv_uar_init_secondary(priv); - if (err < 0) { - err = -err; - goto error; - } - /* Receive command fd from primary process */ - err = priv_socket_connect(priv); - if (err < 0) { - err = -err; - goto error; - } - /* Remap UAR for Tx queues. */ - err = priv_tx_uar_remap(priv, err); - if (err) - goto error; - /* - * Ethdev pointer is still required as input since - * the primary device is not accessible from the - * secondary process. - */ - eth_dev->rx_pkt_burst = - priv_select_rx_function(priv, eth_dev); - eth_dev->tx_pkt_burst = - priv_select_tx_function(priv, eth_dev); - continue; - } - - DEBUG("using port %u (%08" PRIx32 ")", port, test); - - ctx = mlx5_glue->open_device(ibv_dev); - if (ctx == NULL) { - err = ENODEV; - goto port_error; + } + if (!switch_info->representor) + rte_strlcpy(name, dpdk_dev->name, sizeof(name)); + else + snprintf(name, sizeof(name), "%s_representor_%u", + dpdk_dev->name, switch_info->port_name); + DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name); + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (eth_dev == NULL) { + DRV_LOG(ERR, "can not attach rte ethdev"); + rte_errno = ENOMEM; + err = rte_errno; + goto error; } - - mlx5_glue->query_device_ex(ctx, NULL, &device_attr); - /* Check port status. */ - err = mlx5_glue->query_port(ctx, port, &port_attr); + eth_dev->device = dpdk_dev; + eth_dev->dev_ops = &mlx5_dev_sec_ops; + err = mlx5_uar_init_secondary(eth_dev); if (err) { - ERROR("port query failed: %s", strerror(err)); - goto port_error; + err = rte_errno; + goto error; } - - if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { - ERROR("port %d is not configured in Ethernet mode", - port); - err = EINVAL; - goto port_error; + /* Receive command fd from primary process */ + err = mlx5_socket_connect(eth_dev); + if (err < 0) { + err = rte_errno; + goto error; } - - if (port_attr.state != IBV_PORT_ACTIVE) - DEBUG("port %d is not active: \"%s\" (%d)", - port, mlx5_glue->port_state_str(port_attr.state), - port_attr.state); - - /* Allocate protection domain. */ - pd = mlx5_glue->alloc_pd(ctx); - if (pd == NULL) { - ERROR("PD allocation failure"); - err = ENOMEM; - goto port_error; + /* Remap UAR for Tx queues. */ + err = mlx5_tx_uar_remap(eth_dev, err); + if (err) { + err = rte_errno; + goto error; } - - mlx5_dev[idx].ports |= test; - - /* from rte_ethdev.c */ - priv = rte_zmalloc("ethdev private structure", - sizeof(*priv), - RTE_CACHE_LINE_SIZE); - if (priv == NULL) { - ERROR("priv allocation failure"); - err = ENOMEM; - goto port_error; + /* + * Ethdev pointer is still required as input since + * the primary device is not accessible from the + * secondary process. + */ + eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev); + eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev); + claim_zero(mlx5_glue->close_device(ctx)); + return eth_dev; + } + /* Check port status. */ + err = mlx5_glue->query_port(ctx, 1, &port_attr); + if (err) { + DRV_LOG(ERR, "port query failed: %s", strerror(err)); + goto error; + } + if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { + DRV_LOG(ERR, "port is not configured in Ethernet mode"); + err = EINVAL; + goto error; + } + if (port_attr.state != IBV_PORT_ACTIVE) + DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)", + mlx5_glue->port_state_str(port_attr.state), + port_attr.state); + /* Allocate protection domain. */ + pd = mlx5_glue->alloc_pd(ctx); + if (pd == NULL) { + DRV_LOG(ERR, "PD allocation failure"); + err = ENOMEM; + goto error; + } + priv = rte_zmalloc("ethdev private structure", + sizeof(*priv), + RTE_CACHE_LINE_SIZE); + if (priv == NULL) { + DRV_LOG(ERR, "priv allocation failure"); + err = ENOMEM; + goto error; + } + priv->ctx = ctx; + strncpy(priv->ibdev_name, priv->ctx->device->name, + sizeof(priv->ibdev_name)); + strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path, + sizeof(priv->ibdev_path)); + priv->device_attr = attr; + priv->pd = pd; + priv->mtu = ETHER_MTU; +#ifndef RTE_ARCH_64 + /* Initialize UAR access locks for 32bit implementations. */ + rte_spinlock_init(&priv->uar_lock_cq); + for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) + rte_spinlock_init(&priv->uar_lock[i]); +#endif + /* Some internal functions rely on Netlink sockets, open them now. */ + priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA); + priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE); + priv->nl_sn = 0; + priv->representor = !!switch_info->representor; + priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; + priv->representor_id = + switch_info->representor ? switch_info->port_name : -1; + /* + * Look for sibling devices in order to reuse their switch domain + * if any, otherwise allocate one. + */ + i = mlx5_dev_to_port_id(dpdk_dev, NULL, 0); + if (i > 0) { + uint16_t port_id[i]; + + i = RTE_MIN(mlx5_dev_to_port_id(dpdk_dev, port_id, i), i); + while (i--) { + const struct priv *opriv = + rte_eth_devices[port_id[i]].data->dev_private; + + if (!opriv || + opriv->domain_id == + RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) + continue; + priv->domain_id = opriv->domain_id; + break; } - - priv->ctx = ctx; - strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path, - sizeof(priv->ibdev_path)); - priv->device_attr = device_attr; - priv->port = port; - priv->pd = pd; - priv->mtu = ETHER_MTU; - err = mlx5_args(&config, pci_dev->device.devargs); + } + if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { + err = rte_eth_switch_domain_alloc(&priv->domain_id); if (err) { - ERROR("failed to process device arguments: %s", - strerror(err)); - goto port_error; + err = rte_errno; + DRV_LOG(ERR, "unable to allocate switch domain: %s", + strerror(rte_errno)); + goto error; } - if (mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex)) { - ERROR("ibv_query_device_ex() failed"); - goto port_error; - } - - config.hw_csum = !!(device_attr_ex.device_cap_flags_ex & - IBV_DEVICE_RAW_IP_CSUM); - DEBUG("checksum offloading is %ssupported", - (config.hw_csum ? "" : "not ")); - -#ifdef HAVE_IBV_DEVICE_VXLAN_SUPPORT - config.hw_csum_l2tun = - !!(exp_device_attr.exp_device_cap_flags & - IBV_DEVICE_VXLAN_SUPPORT); -#endif - DEBUG("Rx L2 tunnel checksum offloads are %ssupported", - (config.hw_csum_l2tun ? "" : "not ")); - + own_domain_id = 1; + } + err = mlx5_args(&config, dpdk_dev->devargs); + if (err) { + err = rte_errno; + DRV_LOG(ERR, "failed to process device arguments: %s", + strerror(rte_errno)); + goto error; + } + config.hw_csum = !!(attr.device_cap_flags_ex & IBV_DEVICE_RAW_IP_CSUM); + DRV_LOG(DEBUG, "checksum offloading is %ssupported", + (config.hw_csum ? "" : "not ")); #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT - config.flow_counter_en = !!(device_attr.max_counter_sets); - mlx5_glue->describe_counter_set(ctx, 0, &cs_desc); - DEBUG("counter type = %d, num of cs = %ld, attributes = %d", - cs_desc.counter_type, cs_desc.num_of_cs, - cs_desc.attributes); + config.flow_counter_en = !!attr.max_counter_sets; + mlx5_glue->describe_counter_set(ctx, 0, &cs_desc); + DRV_LOG(DEBUG, "counter type = %d, num of cs = %ld, attributes = %d", + cs_desc.counter_type, cs_desc.num_of_cs, + cs_desc.attributes); #endif - config.ind_table_max_size = - device_attr_ex.rss_caps.max_rwq_indirection_table_size; - /* Remove this check once DPDK supports larger/variable - * indirection tables. */ - if (config.ind_table_max_size > - (unsigned int)ETH_RSS_RETA_SIZE_512) - config.ind_table_max_size = ETH_RSS_RETA_SIZE_512; - DEBUG("maximum RX indirection table size is %u", - config.ind_table_max_size); - config.hw_vlan_strip = !!(device_attr_ex.raw_packet_caps & - IBV_RAW_PACKET_CAP_CVLAN_STRIPPING); - DEBUG("VLAN stripping is %ssupported", - (config.hw_vlan_strip ? "" : "not ")); - - config.hw_fcs_strip = !!(device_attr_ex.raw_packet_caps & - IBV_RAW_PACKET_CAP_SCATTER_FCS); - DEBUG("FCS stripping configuration is %ssupported", - (config.hw_fcs_strip ? "" : "not ")); - + config.ind_table_max_size = + attr.rss_caps.max_rwq_indirection_table_size; + /* + * Remove this check once DPDK supports larger/variable + * indirection tables. + */ + if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512) + config.ind_table_max_size = ETH_RSS_RETA_SIZE_512; + DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", + config.ind_table_max_size); + config.hw_vlan_strip = !!(attr.raw_packet_caps & + IBV_RAW_PACKET_CAP_CVLAN_STRIPPING); + DRV_LOG(DEBUG, "VLAN stripping is %ssupported", + (config.hw_vlan_strip ? "" : "not ")); + config.hw_fcs_strip = !!(attr.raw_packet_caps & + IBV_RAW_PACKET_CAP_SCATTER_FCS); + DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported", + (config.hw_fcs_strip ? "" : "not ")); #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING - config.hw_padding = !!device_attr_ex.rx_pad_end_addr_align; + config.hw_padding = !!attr.rx_pad_end_addr_align; #endif - DEBUG("hardware RX end alignment padding is %ssupported", - (config.hw_padding ? "" : "not ")); - - priv_get_num_vfs(priv, &num_vfs); - config.sriov = (num_vfs || sriov); - config.tso = ((device_attr_ex.tso_caps.max_tso > 0) && - (device_attr_ex.tso_caps.supported_qpts & - (1 << IBV_QPT_RAW_PACKET))); - if (config.tso) - config.tso_max_payload_sz = - device_attr_ex.tso_caps.max_tso; - if (config.mps && !mps) { - ERROR("multi-packet send not supported on this device" - " (" MLX5_TXQ_MPW_EN ")"); - err = ENOTSUP; - goto port_error; - } - INFO("%sMPS is %s", - config.mps == MLX5_MPW_ENHANCED ? "Enhanced " : "", - config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); - if (config.cqe_comp && !cqe_comp) { - WARN("Rx CQE compression isn't supported"); - config.cqe_comp = 0; - } - err = priv_uar_init_primary(priv); - if (err) - goto port_error; - /* Configure the first MAC address by default. */ - if (priv_get_mac(priv, &mac.addr_bytes)) { - ERROR("cannot get MAC address, is mlx5_en loaded?" - " (errno: %s)", strerror(errno)); - err = ENODEV; - goto port_error; + DRV_LOG(DEBUG, "hardware Rx end alignment padding is %ssupported", + (config.hw_padding ? "" : "not ")); + config.tso = (attr.tso_caps.max_tso > 0 && + (attr.tso_caps.supported_qpts & + (1 << IBV_QPT_RAW_PACKET))); + if (config.tso) + config.tso_max_payload_sz = attr.tso_caps.max_tso; + if (config.mps && !mps) { + DRV_LOG(ERR, + "multi-packet send not supported on this device" + " (" MLX5_TXQ_MPW_EN ")"); + err = ENOTSUP; + goto error; + } + DRV_LOG(INFO, "%sMPS is %s", + config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "", + config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); + if (config.cqe_comp && !cqe_comp) { + DRV_LOG(WARNING, "Rx CQE compression isn't supported"); + config.cqe_comp = 0; + } + if (config.mprq.enabled && mprq) { + if (config.mprq.stride_num_n > mprq_max_stride_num_n || + config.mprq.stride_num_n < mprq_min_stride_num_n) { + config.mprq.stride_num_n = + RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, + mprq_min_stride_num_n); + DRV_LOG(WARNING, + "the number of strides" + " for Multi-Packet RQ is out of range," + " setting default value (%u)", + 1 << config.mprq.stride_num_n); } - INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", - priv->port, - mac.addr_bytes[0], mac.addr_bytes[1], - mac.addr_bytes[2], mac.addr_bytes[3], - mac.addr_bytes[4], mac.addr_bytes[5]); + config.mprq.min_stride_size_n = mprq_min_stride_size_n; + config.mprq.max_stride_size_n = mprq_max_stride_size_n; + } else if (config.mprq.enabled && !mprq) { + DRV_LOG(WARNING, "Multi-Packet RQ isn't supported"); + config.mprq.enabled = 0; + } + eth_dev = rte_eth_dev_allocate(name); + if (eth_dev == NULL) { + DRV_LOG(ERR, "can not allocate rte ethdev"); + err = ENOMEM; + goto error; + } + if (priv->representor) + eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; + eth_dev->data->dev_private = priv; + priv->dev_data = eth_dev->data; + eth_dev->data->mac_addrs = priv->mac; + eth_dev->device = dpdk_dev; + eth_dev->device->driver = &mlx5_driver.driver; + err = mlx5_uar_init_primary(eth_dev); + if (err) { + err = rte_errno; + goto error; + } + /* Configure the first MAC address by default. */ + if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { + DRV_LOG(ERR, + "port %u cannot get MAC address, is mlx5_en" + " loaded? (errno: %s)", + eth_dev->data->port_id, strerror(rte_errno)); + err = ENODEV; + goto error; + } + DRV_LOG(INFO, + "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", + eth_dev->data->port_id, + mac.addr_bytes[0], mac.addr_bytes[1], + mac.addr_bytes[2], mac.addr_bytes[3], + mac.addr_bytes[4], mac.addr_bytes[5]); #ifndef NDEBUG - { - char ifname[IF_NAMESIZE]; - - if (priv_get_ifname(priv, &ifname) == 0) - DEBUG("port %u ifname is \"%s\"", - priv->port, ifname); - else - DEBUG("port %u ifname is unknown", priv->port); - } + { + char ifname[IF_NAMESIZE]; + + if (mlx5_get_ifname(eth_dev, &ifname) == 0) + DRV_LOG(DEBUG, "port %u ifname is \"%s\"", + eth_dev->data->port_id, ifname); + else + DRV_LOG(DEBUG, "port %u ifname is unknown", + eth_dev->data->port_id); + } #endif - /* Get actual MTU if possible. */ - priv_get_mtu(priv, &priv->mtu); - DEBUG("port %u MTU is %u", priv->port, priv->mtu); + /* Get actual MTU if possible. */ + err = mlx5_get_mtu(eth_dev, &priv->mtu); + if (err) { + err = rte_errno; + goto error; + } + DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id, + priv->mtu); + /* Initialize burst functions to prevent crashes before link-up. */ + eth_dev->rx_pkt_burst = removed_rx_burst; + eth_dev->tx_pkt_burst = removed_tx_burst; + eth_dev->dev_ops = &mlx5_dev_ops; + /* Register MAC address. */ + claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); + if (vf && config.vf_nl_en) + mlx5_nl_mac_addr_sync(eth_dev); + priv->mnl_socket = mlx5_nl_flow_socket_create(); + if (!priv->mnl_socket) { + err = -rte_errno; + DRV_LOG(WARNING, + "flow rules relying on switch offloads will not be" + " supported: cannot open libmnl socket: %s", + strerror(rte_errno)); + } else { + struct rte_flow_error error; + unsigned int ifindex = mlx5_ifindex(eth_dev); - eth_dev = rte_eth_dev_allocate(name); - if (eth_dev == NULL) { - ERROR("can not allocate rte ethdev"); - err = ENOMEM; - goto port_error; + if (!ifindex) { + err = -rte_errno; + error.message = + "cannot retrieve network interface index"; + } else { + err = mlx5_nl_flow_init(priv->mnl_socket, ifindex, + &error); + } + if (err) { + DRV_LOG(WARNING, + "flow rules relying on switch offloads will" + " not be supported: %s: %s", + error.message, strerror(rte_errno)); + mlx5_nl_flow_socket_destroy(priv->mnl_socket); + priv->mnl_socket = NULL; } - eth_dev->data->dev_private = priv; - eth_dev->data->mac_addrs = priv->mac; - eth_dev->device = &pci_dev->device; - rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->device->driver = &mlx5_driver.driver; - /* - * Initialize burst functions to prevent crashes before link-up. - */ - eth_dev->rx_pkt_burst = removed_rx_burst; - eth_dev->tx_pkt_burst = removed_tx_burst; - priv->dev = eth_dev; - eth_dev->dev_ops = &mlx5_dev_ops; - /* Register MAC address. */ - claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); - TAILQ_INIT(&priv->flows); - TAILQ_INIT(&priv->ctrl_flows); - - /* Hint libmlx5 to use PMD allocator for data plane resources */ - struct mlx5dv_ctx_allocators alctr = { - .alloc = &mlx5_alloc_verbs_buf, - .free = &mlx5_free_verbs_buf, - .data = priv, - }; - mlx5_glue->dv_set_context_attr(ctx, - MLX5DV_CTX_ATTR_BUF_ALLOCATORS, - (void *)((uintptr_t)&alctr)); - - /* Bring Ethernet device up. */ - DEBUG("forcing Ethernet interface up"); - priv_set_flags(priv, ~IFF_UP, IFF_UP); - /* Store device configuration on private structure. */ - priv->config = config; - continue; - -port_error: - if (priv) - rte_free(priv); - if (pd) - claim_zero(mlx5_glue->dealloc_pd(pd)); - if (ctx) - claim_zero(mlx5_glue->close_device(ctx)); - break; } - + TAILQ_INIT(&priv->flows); + TAILQ_INIT(&priv->ctrl_flows); + /* Hint libmlx5 to use PMD allocator for data plane resources */ + struct mlx5dv_ctx_allocators alctr = { + .alloc = &mlx5_alloc_verbs_buf, + .free = &mlx5_free_verbs_buf, + .data = priv, + }; + mlx5_glue->dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS, + (void *)((uintptr_t)&alctr)); + /* Bring Ethernet device up. */ + DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", + eth_dev->data->port_id); + mlx5_set_link_up(eth_dev); /* - * XXX if something went wrong in the loop above, there is a resource - * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as - * long as the dpdk does not provide a way to deallocate a ethdev and a - * way to enumerate the registered ethdevs to free the previous ones. + * Even though the interrupt handler is not installed yet, + * interrupts will still trigger on the asyn_fd from + * Verbs context returned by ibv_open_device(). */ - - /* no port found, complain */ - if (!mlx5_dev[idx].ports) { - err = ENODEV; + mlx5_link_update(eth_dev, 0); + /* Store device configuration on private structure. */ + priv->config = config; + /* Supported Verbs flow priority number detection. */ + err = mlx5_flow_discover_priorities(eth_dev); + if (err < 0) + goto error; + priv->config.flow_prio = err; + /* + * Once the device is added to the list of memory event + * callback, its global MR cache table cannot be expanded + * on the fly because of deadlock. If it overflows, lookup + * should be done by searching MR list linearly, which is slow. + */ + err = mlx5_mr_btree_init(&priv->mr.cache, + MLX5_MR_BTREE_CACHE_N * 2, + eth_dev->device->numa_node); + if (err) { + err = rte_errno; goto error; } - + /* Add device to memory callback list. */ + rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); + LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list, + priv, mem_event_cb); + rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); + return eth_dev; error: - if (attr_ctx) - claim_zero(mlx5_glue->close_device(attr_ctx)); - if (list) - mlx5_glue->free_device_list(list); - assert(err >= 0); - return -err; + if (priv) { + if (priv->nl_socket_route >= 0) + close(priv->nl_socket_route); + if (priv->nl_socket_rdma >= 0) + close(priv->nl_socket_rdma); + if (priv->mnl_socket) + mlx5_nl_flow_socket_destroy(priv->mnl_socket); + if (own_domain_id) + claim_zero(rte_eth_switch_domain_free(priv->domain_id)); + rte_free(priv); + } + if (pd) + claim_zero(mlx5_glue->dealloc_pd(pd)); + if (eth_dev) + rte_eth_dev_release_port(eth_dev); + if (ctx) + claim_zero(mlx5_glue->close_device(ctx)); + assert(err > 0); + rte_errno = err; + return NULL; +} + +/** Data associated with devices to spawn. */ +struct mlx5_dev_spawn_data { + unsigned int ifindex; /**< Network interface index. */ + struct mlx5_switch_info info; /**< Switch information. */ + struct ibv_device *ibv_dev; /**< Associated IB device. */ + struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */ +}; + +/** + * Comparison callback to sort device data. + * + * This is meant to be used with qsort(). + * + * @param a[in] + * Pointer to pointer to first data object. + * @param b[in] + * Pointer to pointer to second data object. + * + * @return + * 0 if both objects are equal, less than 0 if the first argument is less + * than the second, greater than 0 otherwise. + */ +static int +mlx5_dev_spawn_data_cmp(const void *a, const void *b) +{ + const struct mlx5_switch_info *si_a = + &((const struct mlx5_dev_spawn_data *)a)->info; + const struct mlx5_switch_info *si_b = + &((const struct mlx5_dev_spawn_data *)b)->info; + int ret; + + /* Master device first. */ + ret = si_b->master - si_a->master; + if (ret) + return ret; + /* Then representor devices. */ + ret = si_b->representor - si_a->representor; + if (ret) + return ret; + /* Unidentified devices come last in no specific order. */ + if (!si_a->representor) + return 0; + /* Order representors by name. */ + return si_a->port_name - si_b->port_name; +} + +/** + * DPDK callback to register a PCI device. + * + * This function spawns Ethernet devices out of a given PCI device. + * + * @param[in] pci_drv + * PCI driver structure (mlx5_driver). + * @param[in] pci_dev + * PCI device information. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + struct ibv_device **ibv_list; + unsigned int n = 0; + int vf; + int ret; + + assert(pci_drv == &mlx5_driver); + errno = 0; + ibv_list = mlx5_glue->get_device_list(&ret); + if (!ibv_list) { + rte_errno = errno ? errno : ENOSYS; + DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?"); + return -rte_errno; + } + + struct ibv_device *ibv_match[ret + 1]; + + while (ret-- > 0) { + struct rte_pci_addr pci_addr; + + DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name); + if (mlx5_ibv_device_to_pci_addr(ibv_list[ret], &pci_addr)) + continue; + if (pci_dev->addr.domain != pci_addr.domain || + pci_dev->addr.bus != pci_addr.bus || + pci_dev->addr.devid != pci_addr.devid || + pci_dev->addr.function != pci_addr.function) + continue; + DRV_LOG(INFO, "PCI information matches for device \"%s\"", + ibv_list[ret]->name); + ibv_match[n++] = ibv_list[ret]; + } + ibv_match[n] = NULL; + + struct mlx5_dev_spawn_data list[n]; + int nl_route = n ? mlx5_nl_init(NETLINK_ROUTE) : -1; + int nl_rdma = n ? mlx5_nl_init(NETLINK_RDMA) : -1; + unsigned int i; + unsigned int u; + + /* + * The existence of several matching entries (n > 1) means port + * representors have been instantiated. No existing Verbs call nor + * /sys entries can tell them apart, this can only be done through + * Netlink calls assuming kernel drivers are recent enough to + * support them. + * + * In the event of identification failure through Netlink, try again + * through sysfs, then either: + * + * 1. No device matches (n == 0), complain and bail out. + * 2. A single IB device matches (n == 1) and is not a representor, + * assume no switch support. + * 3. Otherwise no safe assumptions can be made; complain louder and + * bail out. + */ + for (i = 0; i != n; ++i) { + list[i].ibv_dev = ibv_match[i]; + list[i].eth_dev = NULL; + if (nl_rdma < 0) + list[i].ifindex = 0; + else + list[i].ifindex = mlx5_nl_ifindex + (nl_rdma, list[i].ibv_dev->name); + if (nl_route < 0 || + !list[i].ifindex || + mlx5_nl_switch_info(nl_route, list[i].ifindex, + &list[i].info) || + ((!list[i].info.representor && !list[i].info.master) && + mlx5_sysfs_switch_info(list[i].ifindex, &list[i].info))) { + list[i].ifindex = 0; + memset(&list[i].info, 0, sizeof(list[i].info)); + continue; + } + } + if (nl_rdma >= 0) + close(nl_rdma); + if (nl_route >= 0) + close(nl_route); + /* Count unidentified devices. */ + for (u = 0, i = 0; i != n; ++i) + if (!list[i].info.master && !list[i].info.representor) + ++u; + if (u) { + if (n == 1 && u == 1) { + /* Case #2. */ + DRV_LOG(INFO, "no switch support detected"); + } else { + /* Case #3. */ + DRV_LOG(ERR, + "unable to tell which of the matching devices" + " is the master (lack of kernel support?)"); + n = 0; + } + } + /* + * Sort list to probe devices in natural order for users convenience + * (i.e. master first, then representors from lowest to highest ID). + */ + if (n) + qsort(list, n, sizeof(*list), mlx5_dev_spawn_data_cmp); + switch (pci_dev->id.device_id) { + case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: + vf = 1; + break; + default: + vf = 0; + } + for (i = 0; i != n; ++i) { + uint32_t restore; + + list[i].eth_dev = mlx5_dev_spawn + (&pci_dev->device, list[i].ibv_dev, vf, &list[i].info); + if (!list[i].eth_dev) { + if (rte_errno != EBUSY) + break; + /* Device is disabled, ignore it. */ + continue; + } + restore = list[i].eth_dev->data->dev_flags; + rte_eth_copy_pci_info(list[i].eth_dev, pci_dev); + /* Restore non-PCI flags cleared by the above call. */ + list[i].eth_dev->data->dev_flags |= restore; + rte_eth_dev_probing_finish(list[i].eth_dev); + } + mlx5_glue->free_device_list(ibv_list); + if (!n) { + DRV_LOG(WARNING, + "no Verbs device matches PCI device " PCI_PRI_FMT "," + " are kernel drivers loaded?", + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); + rte_errno = ENOENT; + ret = -rte_errno; + } else if (i != n) { + DRV_LOG(ERR, + "probe of PCI device " PCI_PRI_FMT " aborted after" + " encountering an error: %s", + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function, + strerror(rte_errno)); + ret = -rte_errno; + /* Roll back. */ + while (i--) { + if (!list[i].eth_dev) + continue; + mlx5_dev_close(list[i].eth_dev); + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + rte_free(list[i].eth_dev->data->dev_private); + claim_zero(rte_eth_dev_release_port(list[i].eth_dev)); + } + /* Restore original error. */ + rte_errno = -ret; + } else { + ret = 0; + } + return ret; } static const struct rte_pci_id mlx5_pci_id_map[] = { @@ -1035,6 +1482,10 @@ static const struct rte_pci_id mlx5_pci_id_map[] = { RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF) }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) + }, { .vendor_id = 0 } @@ -1051,12 +1502,55 @@ static struct rte_pci_driver mlx5_driver = { #ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS +/** + * Suffix RTE_EAL_PMD_PATH with "-glue". + * + * This function performs a sanity check on RTE_EAL_PMD_PATH before + * suffixing its last component. + * + * @param buf[out] + * Output buffer, should be large enough otherwise NULL is returned. + * @param size + * Size of @p out. + * + * @return + * Pointer to @p buf or @p NULL in case suffix cannot be appended. + */ +static char * +mlx5_glue_path(char *buf, size_t size) +{ + static const char *const bad[] = { "/", ".", "..", NULL }; + const char *path = RTE_EAL_PMD_PATH; + size_t len = strlen(path); + size_t off; + int i; + + while (len && path[len - 1] == '/') + --len; + for (off = len; off && path[off - 1] != '/'; --off) + ; + for (i = 0; bad[i]; ++i) + if (!strncmp(path + off, bad[i], (int)(len - off))) + goto error; + i = snprintf(buf, size, "%.*s-glue", (int)len, path); + if (i == -1 || (size_t)i >= size) + goto error; + return buf; +error: + DRV_LOG(ERR, + "unable to append \"-glue\" to last component of" + " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\")," + " please re-configure DPDK"); + return NULL; +} + /** * Initialization routine for run-time dependency on rdma-core. */ static int mlx5_glue_init(void) { + char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")]; const char *path[] = { /* * A basic security check is necessary before trusting @@ -1064,7 +1558,13 @@ mlx5_glue_init(void) */ (geteuid() == getuid() && getegid() == getgid() ? getenv("MLX5_GLUE_PATH") : NULL), - RTE_EAL_PMD_PATH, + /* + * When RTE_EAL_PMD_PATH is set, use its glue-suffixed + * variant, otherwise let dlopen() look up libraries on its + * own. + */ + (*RTE_EAL_PMD_PATH ? + mlx5_glue_path(glue_path, sizeof(glue_path)) : ""), }; unsigned int i = 0; void *handle = NULL; @@ -1095,7 +1595,8 @@ mlx5_glue_init(void) break; if (sizeof(name) != (size_t)ret + 1) continue; - DEBUG("looking for rdma-core glue as \"%s\"", name); + DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"", + name); handle = dlopen(name, RTLD_LAZY); break; } while (1); @@ -1107,7 +1608,7 @@ mlx5_glue_init(void) rte_errno = EINVAL; dlmsg = dlerror(); if (dlmsg) - WARN("cannot load glue library: %s", dlmsg); + DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg); goto glue_error; } sym = dlsym(handle, "mlx5_glue"); @@ -1115,7 +1616,7 @@ mlx5_glue_init(void) rte_errno = EINVAL; dlmsg = dlerror(); if (dlmsg) - ERROR("cannot resolve glue symbol: %s", dlmsg); + DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg); goto glue_error; } mlx5_glue = *sym; @@ -1123,9 +1624,9 @@ mlx5_glue_init(void) glue_error: if (handle) dlclose(handle); - WARN("cannot initialize PMD due to missing run-time" - " dependency on rdma-core libraries (libibverbs," - " libmlx5)"); + DRV_LOG(WARNING, + "cannot initialize PMD due to missing run-time dependency on" + " rdma-core libraries (libibverbs, libmlx5)"); return -rte_errno; } @@ -1134,12 +1635,17 @@ glue_error: /** * Driver initialization routine. */ -RTE_INIT(rte_mlx5_pmd_init); -static void -rte_mlx5_pmd_init(void) +RTE_INIT(rte_mlx5_pmd_init) { - /* Build the static table for ptype conversion. */ + /* Initialize driver log type. */ + mlx5_logtype = rte_log_register("pmd.net.mlx5"); + if (mlx5_logtype >= 0) + rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE); + + /* Build the static tables for Verbs conversion. */ mlx5_set_ptype_table(); + mlx5_set_cksum_table(); + mlx5_set_swp_types_table(); /* * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use * huge pages. Calling ibv_fork_init() during init allows @@ -1150,6 +1656,11 @@ rte_mlx5_pmd_init(void) /* Match the size of Rx completion entry to the size of a cacheline. */ if (RTE_CACHE_LINE_SIZE == 128) setenv("MLX5_CQE_SIZE", "128", 0); + /* + * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to + * cleanup all the Verbs resources even when the device was removed. + */ + setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1); #ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS if (mlx5_glue_init()) return; @@ -1165,8 +1676,9 @@ rte_mlx5_pmd_init(void) } #endif if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) { - ERROR("rdma-core glue \"%s\" mismatch: \"%s\" is required", - mlx5_glue->version, MLX5_GLUE_VERSION); + DRV_LOG(ERR, + "rdma-core glue \"%s\" mismatch: \"%s\" is required", + mlx5_glue->version, MLX5_GLUE_VERSION); return; } mlx5_glue->fork_init(); diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 965c19f2..a3a34cff 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX5_H_ @@ -26,12 +26,13 @@ #include #include #include -#include +#include #include #include #include #include "mlx5_utils.h" +#include "mlx5_mr.h" #include "mlx5_rxtx.h" #include "mlx5_autoconf.h" #include "mlx5_defs.h" @@ -49,8 +50,27 @@ enum { PCI_DEVICE_ID_MELLANOX_CONNECTX5VF = 0x1018, PCI_DEVICE_ID_MELLANOX_CONNECTX5EX = 0x1019, PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF = 0x101a, + PCI_DEVICE_ID_MELLANOX_CONNECTX5BF = 0xa2d2, }; +/** Switch information returned by mlx5_nl_switch_info(). */ +struct mlx5_switch_info { + uint32_t master:1; /**< Master device. */ + uint32_t representor:1; /**< Representor device. */ + int32_t port_name; /**< Representor port name. */ + uint64_t switch_id; /**< Switch identifier. */ +}; + +LIST_HEAD(mlx5_dev_list, priv); + +/* Shared memory between primary and secondary processes. */ +struct mlx5_shared_data { + struct mlx5_dev_list mem_event_cb_list; + rte_rwlock_t mem_event_rwlock; +}; + +extern struct mlx5_shared_data *mlx5_shared_data; + struct mlx5_xstats_ctrl { /* Number of device stats. */ uint16_t stats_n; @@ -75,19 +95,34 @@ TAILQ_HEAD(mlx5_flows, rte_flow); */ struct mlx5_dev_config { unsigned int hw_csum:1; /* Checksum offload is supported. */ - unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */ unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */ unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */ unsigned int hw_padding:1; /* End alignment padding is supported. */ - unsigned int sriov:1; /* This is a VF or PF with VF devices. */ + unsigned int vf:1; /* This is a VF. */ unsigned int mps:2; /* Multi-packet send supported mode. */ - unsigned int tunnel_en:1; /* Whether tunnel is supported. */ + unsigned int tunnel_en:1; + /* Whether tunnel stateless offloads are supported. */ + unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */ unsigned int flow_counter_en:1; /* Whether flow counter is supported. */ unsigned int cqe_comp:1; /* CQE compression is enabled. */ unsigned int tso:1; /* Whether TSO is supported. */ unsigned int tx_vec_en:1; /* Tx vector is enabled. */ unsigned int rx_vec_en:1; /* Rx vector is enabled. */ unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */ + unsigned int l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */ + unsigned int vf_nl_en:1; /* Enable Netlink requests in VF mode. */ + unsigned int swp:1; /* Tx generic tunnel checksum and TSO offload. */ + struct { + unsigned int enabled:1; /* Whether MPRQ is enabled. */ + unsigned int stride_num_n; /* Number of strides. */ + unsigned int min_stride_size_n; /* Min size of a stride. */ + unsigned int max_stride_size_n; /* Max size of a stride. */ + unsigned int max_memcpy_len; + /* Maximum packet size to memcpy Rx packets. */ + unsigned int min_rxqs_num; + /* Rx queue count threshold to enable MPRQ. */ + } mprq; /* Configurations for Multi-Packet RQ. */ + unsigned int flow_prio; /* Number of flow priorities. */ unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */ unsigned int ind_table_max_size; /* Maximum indirection table size. */ int txq_inline; /* Maximum packet size for inlining. */ @@ -113,33 +148,63 @@ struct mlx5_verbs_alloc_ctx { const void *obj; /* Pointer to the DPDK object. */ }; +LIST_HEAD(mlx5_mr_list, mlx5_mr); + +/* Flow drop context necessary due to Verbs API. */ +struct mlx5_drop { + struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */ + struct mlx5_rxq_ibv *rxq; /* Verbs Rx queue. */ +}; + +/** DPDK port to network interface index (ifindex) conversion. */ +struct mlx5_nl_flow_ptoi { + uint16_t port_id; /**< DPDK port ID. */ + unsigned int ifindex; /**< Network interface index. */ +}; + +struct mnl_socket; + struct priv { - struct rte_eth_dev *dev; /* Ethernet device of master process. */ + LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */ + struct rte_eth_dev_data *dev_data; /* Pointer to device data. */ struct ibv_context *ctx; /* Verbs context. */ struct ibv_device_attr_ex device_attr; /* Device properties. */ struct ibv_pd *pd; /* Protection Domain. */ + char ibdev_name[IBV_SYSFS_NAME_MAX]; /* IB device name. */ char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */ struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */ + BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES); + /* Bit-field of MAC addresses owned by the PMD. */ uint16_t vlan_filter[MLX5_MAX_VLAN_IDS]; /* VLAN filters table. */ unsigned int vlan_filter_n; /* Number of configured VLAN filters. */ /* Device properties. */ uint16_t mtu; /* Configured MTU. */ - uint8_t port; /* Physical port number. */ - unsigned int pending_alarm:1; /* An alarm is pending. */ unsigned int isolated:1; /* Whether isolated mode is enabled. */ + unsigned int representor:1; /* Device is a port representor. */ + uint16_t domain_id; /* Switch domain identifier. */ + int32_t representor_id; /* Port representor identifier. */ /* RX/TX queues. */ unsigned int rxqs_n; /* RX queues array size. */ unsigned int txqs_n; /* TX queues array size. */ struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */ struct mlx5_txq_data *(*txqs)[]; /* TX queues. */ + struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */ struct rte_eth_rss_conf rss_conf; /* RSS configuration. */ struct rte_intr_handle intr_handle; /* Interrupt handler. */ unsigned int (*reta_idx)[]; /* RETA index table. */ unsigned int reta_idx_n; /* RETA index size. */ - struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */ + struct mlx5_drop drop_queue; /* Flow drop queues. */ struct mlx5_flows flows; /* RTE Flow rules. */ struct mlx5_flows ctrl_flows; /* Control flow rules. */ - LIST_HEAD(mr, mlx5_mr) mr; /* Memory region. */ + LIST_HEAD(counters, mlx5_flow_counter) flow_counters; + /* Flow counters. */ + struct { + uint32_t dev_gen; /* Generation number to flush local caches. */ + rte_rwlock_t rwlock; /* MR Lock. */ + struct mlx5_mr_btree cache; /* Global MR cache table. */ + struct mlx5_mr_list mr_list; /* Registered MR list. */ + struct mlx5_mr_list mr_free_list; /* Freed MR list. */ + } mr; LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */ LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */ LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */ @@ -149,55 +214,25 @@ struct priv { LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls; uint32_t link_speed_capa; /* Link speed capabilities. */ struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */ - rte_spinlock_t lock; /* Lock for control functions. */ int primary_socket; /* Unix socket for primary process. */ void *uar_base; /* Reserved address space for UAR mapping */ struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */ struct mlx5_dev_config config; /* Device configuration. */ struct mlx5_verbs_alloc_ctx verbs_alloc_ctx; /* Context for Verbs allocator. */ + int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */ + int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */ + uint32_t nl_sn; /* Netlink message sequence number. */ +#ifndef RTE_ARCH_64 + rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */ + rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX]; + /* UAR same-page access control required in 32bit implementations. */ +#endif + struct mnl_socket *mnl_socket; /* Libmnl socket. */ }; -/** - * Lock private structure to protect it from concurrent access in the - * control path. - * - * @param priv - * Pointer to private structure. - */ -static inline void -priv_lock(struct priv *priv) -{ - rte_spinlock_lock(&priv->lock); -} - -/** - * Try to lock private structure to protect it from concurrent access in the - * control path. - * - * @param priv - * Pointer to private structure. - * - * @return - * 1 if the lock is successfully taken; 0 otherwise. - */ -static inline int -priv_trylock(struct priv *priv) -{ - return rte_spinlock_trylock(&priv->lock); -} - -/** - * Unlock private structure. - * - * @param priv - * Pointer to private structure. - */ -static inline void -priv_unlock(struct priv *priv) -{ - rte_spinlock_unlock(&priv->lock); -} +#define PORT_ID(priv) ((priv)->dev_data->port_id) +#define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)]) /* mlx5.c */ @@ -205,131 +240,179 @@ int mlx5_getenv_int(const char *); /* mlx5_ethdev.c */ -struct priv *mlx5_get_priv(struct rte_eth_dev *dev); -int mlx5_is_secondary(void); -int priv_get_ifname(const struct priv *, char (*)[IF_NAMESIZE]); -int priv_ifreq(const struct priv *, int req, struct ifreq *); -int priv_is_ib_cntr(const char *); -int priv_get_cntr_sysfs(struct priv *, const char *, uint64_t *); -int priv_get_num_vfs(struct priv *, uint16_t *); -int priv_get_mtu(struct priv *, uint16_t *); -int priv_set_flags(struct priv *, unsigned int, unsigned int); -int mlx5_dev_configure(struct rte_eth_dev *); -void mlx5_dev_infos_get(struct rte_eth_dev *, struct rte_eth_dev_info *); +int mlx5_get_master_ifname(const struct rte_eth_dev *dev, + char (*ifname)[IF_NAMESIZE]); +int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]); +unsigned int mlx5_ifindex(const struct rte_eth_dev *dev); +int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr, + int master); +int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu); +int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, + unsigned int flags); +int mlx5_dev_configure(struct rte_eth_dev *dev); +void mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info); const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev); -int priv_link_update(struct priv *, int); -int priv_force_link_status_change(struct priv *, int); -int mlx5_link_update(struct rte_eth_dev *, int); -int mlx5_dev_set_mtu(struct rte_eth_dev *, uint16_t); -int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *); -int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *); -int mlx5_ibv_device_to_pci_addr(const struct ibv_device *, - struct rte_pci_addr *); -void mlx5_dev_link_status_handler(void *); -void mlx5_dev_interrupt_handler(void *); -void priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *); -void priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *); +int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete); +int mlx5_force_link_status_change(struct rte_eth_dev *dev, int status); +int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); +int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +int mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, + struct rte_pci_addr *pci_addr); +void mlx5_dev_link_status_handler(void *arg); +void mlx5_dev_interrupt_handler(void *arg); +void mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev); +void mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev); int mlx5_set_link_down(struct rte_eth_dev *dev); int mlx5_set_link_up(struct rte_eth_dev *dev); int mlx5_is_removed(struct rte_eth_dev *dev); -eth_tx_burst_t priv_select_tx_function(struct priv *, struct rte_eth_dev *); -eth_rx_burst_t priv_select_rx_function(struct priv *, struct rte_eth_dev *); +eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev); +eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev); +unsigned int mlx5_dev_to_port_id(const struct rte_device *dev, + uint16_t *port_list, + unsigned int port_list_n); +int mlx5_sysfs_switch_info(unsigned int ifindex, + struct mlx5_switch_info *info); /* mlx5_mac.c */ -int priv_get_mac(struct priv *, uint8_t (*)[ETHER_ADDR_LEN]); -void mlx5_mac_addr_remove(struct rte_eth_dev *, uint32_t); -int mlx5_mac_addr_add(struct rte_eth_dev *, struct ether_addr *, uint32_t, - uint32_t); -void mlx5_mac_addr_set(struct rte_eth_dev *, struct ether_addr *); +int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN]); +void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); +int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, + uint32_t index, uint32_t vmdq); +int mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr); +int mlx5_set_mc_addr_list(struct rte_eth_dev *dev, + struct ether_addr *mc_addr_set, uint32_t nb_mc_addr); /* mlx5_rss.c */ -int mlx5_rss_hash_update(struct rte_eth_dev *, struct rte_eth_rss_conf *); -int mlx5_rss_hash_conf_get(struct rte_eth_dev *, struct rte_eth_rss_conf *); -int priv_rss_reta_index_resize(struct priv *, unsigned int); -int mlx5_dev_rss_reta_query(struct rte_eth_dev *, - struct rte_eth_rss_reta_entry64 *, uint16_t); -int mlx5_dev_rss_reta_update(struct rte_eth_dev *, - struct rte_eth_rss_reta_entry64 *, uint16_t); +int mlx5_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +int mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size); +int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +int mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); /* mlx5_rxmode.c */ -void mlx5_promiscuous_enable(struct rte_eth_dev *); -void mlx5_promiscuous_disable(struct rte_eth_dev *); -void mlx5_allmulticast_enable(struct rte_eth_dev *); -void mlx5_allmulticast_disable(struct rte_eth_dev *); +void mlx5_promiscuous_enable(struct rte_eth_dev *dev); +void mlx5_promiscuous_disable(struct rte_eth_dev *dev); +void mlx5_allmulticast_enable(struct rte_eth_dev *dev); +void mlx5_allmulticast_disable(struct rte_eth_dev *dev); /* mlx5_stats.c */ -void priv_xstats_init(struct priv *); -int mlx5_stats_get(struct rte_eth_dev *, struct rte_eth_stats *); -void mlx5_stats_reset(struct rte_eth_dev *); -int mlx5_xstats_get(struct rte_eth_dev *, - struct rte_eth_xstat *, unsigned int); -void mlx5_xstats_reset(struct rte_eth_dev *); -int mlx5_xstats_get_names(struct rte_eth_dev *, - struct rte_eth_xstat_name *, unsigned int); +void mlx5_xstats_init(struct rte_eth_dev *dev); +int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); +void mlx5_stats_reset(struct rte_eth_dev *dev); +int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, + unsigned int n); +void mlx5_xstats_reset(struct rte_eth_dev *dev); +int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_xstat_name *xstats_names, + unsigned int n); /* mlx5_vlan.c */ -int mlx5_vlan_filter_set(struct rte_eth_dev *, uint16_t, int); -int mlx5_vlan_offload_set(struct rte_eth_dev *, int); -void mlx5_vlan_strip_queue_set(struct rte_eth_dev *, uint16_t, int); +int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); +void mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); +int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask); /* mlx5_trigger.c */ -int mlx5_dev_start(struct rte_eth_dev *); -void mlx5_dev_stop(struct rte_eth_dev *); -int priv_dev_traffic_enable(struct priv *, struct rte_eth_dev *); -int priv_dev_traffic_disable(struct priv *, struct rte_eth_dev *); -int priv_dev_traffic_restart(struct priv *, struct rte_eth_dev *); -int mlx5_traffic_restart(struct rte_eth_dev *); +int mlx5_dev_start(struct rte_eth_dev *dev); +void mlx5_dev_stop(struct rte_eth_dev *dev); +int mlx5_traffic_enable(struct rte_eth_dev *dev); +void mlx5_traffic_disable(struct rte_eth_dev *dev); +int mlx5_traffic_restart(struct rte_eth_dev *dev); /* mlx5_flow.c */ -int mlx5_dev_filter_ctrl(struct rte_eth_dev *, enum rte_filter_type, - enum rte_filter_op, void *); -int mlx5_flow_validate(struct rte_eth_dev *, const struct rte_flow_attr *, - const struct rte_flow_item [], - const struct rte_flow_action [], - struct rte_flow_error *); -struct rte_flow *mlx5_flow_create(struct rte_eth_dev *, - const struct rte_flow_attr *, - const struct rte_flow_item [], - const struct rte_flow_action [], - struct rte_flow_error *); -int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *, - struct rte_flow_error *); -void priv_flow_flush(struct priv *, struct mlx5_flows *); -int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *); -int mlx5_flow_query(struct rte_eth_dev *, struct rte_flow *, - enum rte_flow_action_type, void *, - struct rte_flow_error *); -int mlx5_flow_isolate(struct rte_eth_dev *, int, struct rte_flow_error *); -int priv_flow_start(struct priv *, struct mlx5_flows *); -void priv_flow_stop(struct priv *, struct mlx5_flows *); -int priv_flow_verify(struct priv *); -int mlx5_ctrl_flow_vlan(struct rte_eth_dev *, struct rte_flow_item_eth *, - struct rte_flow_item_eth *, struct rte_flow_item_vlan *, - struct rte_flow_item_vlan *); -int mlx5_ctrl_flow(struct rte_eth_dev *, struct rte_flow_item_eth *, - struct rte_flow_item_eth *); -int priv_flow_create_drop_queue(struct priv *); -void priv_flow_delete_drop_queue(struct priv *); +int mlx5_flow_discover_priorities(struct rte_eth_dev *dev); +void mlx5_flow_print(struct rte_flow *flow); +int mlx5_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error); +void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list); +int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error); +int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, + const struct rte_flow_action *action, void *data, + struct rte_flow_error *error); +int mlx5_flow_isolate(struct rte_eth_dev *dev, int enable, + struct rte_flow_error *error); +int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); +int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list); +void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list); +int mlx5_flow_verify(struct rte_eth_dev *dev); +int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, + struct rte_flow_item_eth *eth_spec, + struct rte_flow_item_eth *eth_mask, + struct rte_flow_item_vlan *vlan_spec, + struct rte_flow_item_vlan *vlan_mask); +int mlx5_ctrl_flow(struct rte_eth_dev *dev, + struct rte_flow_item_eth *eth_spec, + struct rte_flow_item_eth *eth_mask); +int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev); +void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev); /* mlx5_socket.c */ -int priv_socket_init(struct priv *priv); -int priv_socket_uninit(struct priv *priv); -void priv_socket_handle(struct priv *priv); -int priv_socket_connect(struct priv *priv); - -/* mlx5_mr.c */ - -struct mlx5_mr *priv_mr_new(struct priv *, struct rte_mempool *); -struct mlx5_mr *priv_mr_get(struct priv *, struct rte_mempool *); -int priv_mr_release(struct priv *, struct mlx5_mr *); -int priv_mr_verify(struct priv *); +int mlx5_socket_init(struct rte_eth_dev *priv); +void mlx5_socket_uninit(struct rte_eth_dev *priv); +void mlx5_socket_handle(struct rte_eth_dev *priv); +int mlx5_socket_connect(struct rte_eth_dev *priv); + +/* mlx5_nl.c */ + +int mlx5_nl_init(int protocol); +int mlx5_nl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, + uint32_t index); +int mlx5_nl_mac_addr_remove(struct rte_eth_dev *dev, struct ether_addr *mac, + uint32_t index); +void mlx5_nl_mac_addr_sync(struct rte_eth_dev *dev); +void mlx5_nl_mac_addr_flush(struct rte_eth_dev *dev); +int mlx5_nl_promisc(struct rte_eth_dev *dev, int enable); +int mlx5_nl_allmulti(struct rte_eth_dev *dev, int enable); +unsigned int mlx5_nl_ifindex(int nl, const char *name); +int mlx5_nl_switch_info(int nl, unsigned int ifindex, + struct mlx5_switch_info *info); + +/* mlx5_nl_flow.c */ + +int mlx5_nl_flow_transpose(void *buf, + size_t size, + const struct mlx5_nl_flow_ptoi *ptoi, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + struct rte_flow_error *error); +void mlx5_nl_flow_brand(void *buf, uint32_t handle); +int mlx5_nl_flow_create(struct mnl_socket *nl, void *buf, + struct rte_flow_error *error); +int mlx5_nl_flow_destroy(struct mnl_socket *nl, void *buf, + struct rte_flow_error *error); +int mlx5_nl_flow_init(struct mnl_socket *nl, unsigned int ifindex, + struct rte_flow_error *error); +struct mnl_socket *mlx5_nl_flow_socket_create(void); +void mlx5_nl_flow_socket_destroy(struct mnl_socket *nl); #endif /* RTE_PMD_MLX5_H_ */ diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h index c3334ca3..f2a16795 100644 --- a/drivers/net/mlx5/mlx5_defs.h +++ b/drivers/net/mlx5/mlx5_defs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX5_DEFS_H_ @@ -13,8 +13,13 @@ /* Reported driver name. */ #define MLX5_DRIVER_NAME "net_mlx5" +/* Maximum number of simultaneous unicast MAC addresses. */ +#define MLX5_MAX_UC_MAC_ADDRESSES 128 +/* Maximum number of simultaneous Multicast MAC addresses. */ +#define MLX5_MAX_MC_MAC_ADDRESSES 128 /* Maximum number of simultaneous MAC addresses. */ -#define MLX5_MAX_MAC_ADDRESSES 128 +#define MLX5_MAX_MAC_ADDRESSES \ + (MLX5_MAX_UC_MAC_ADDRESSES + MLX5_MAX_MC_MAC_ADDRESSES) /* Maximum number of simultaneous VLAN filters. */ #define MLX5_MAX_VLAN_IDS 128 @@ -32,16 +37,11 @@ */ #define MLX5_TX_COMP_THRESH_INLINE_DIV (1 << 3) -/* - * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP - * from which buffers are to be transmitted will have to be mapped by this - * driver to their own Memory Region (MR). This is a slow operation. - * - * This value is always 1 for RX queues. - */ -#ifndef MLX5_PMD_TX_MP_CACHE -#define MLX5_PMD_TX_MP_CACHE 8 -#endif +/* Size of per-queue MR cache array for linear search. */ +#define MLX5_MR_CACHE_N 8 + +/* Size of MR cache table for binary search. */ +#define MLX5_MR_BTREE_CACHE_N 256 /* * If defined, only use software counters. The PMD will never ask the hardware @@ -58,16 +58,17 @@ #define MLX5_MAX_XSTATS 32 /* Maximum Packet headers size (L2+L3+L4) for TSO. */ -#define MLX5_MAX_TSO_HEADER 128 +#define MLX5_MAX_TSO_HEADER 192 /* Default minimum number of Tx queues for vectorized Tx. */ #define MLX5_VPMD_MIN_TXQS 4 /* Threshold of buffer replenishment for vectorized Rx. */ -#define MLX5_VPMD_RXQ_RPLNSH_THRESH 64U +#define MLX5_VPMD_RXQ_RPLNSH_THRESH(n) \ + (RTE_MIN(MLX5_VPMD_RX_MAX_BURST, (unsigned int)(n) >> 2)) /* Maximum size of burst for vectorized Rx. */ -#define MLX5_VPMD_RX_MAX_BURST MLX5_VPMD_RXQ_RPLNSH_THRESH +#define MLX5_VPMD_RX_MAX_BURST 64U /* * Maximum size of burst for vectorized Tx. This is related to the maximum size @@ -82,17 +83,54 @@ /* Supported RSS */ #define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP)) -/* Maximum number of attempts to query link status before giving up. */ -#define MLX5_MAX_LINK_QUERY_ATTEMPTS 5 +/* Timeout in seconds to get a valid link status. */ +#define MLX5_LINK_STATUS_TIMEOUT 10 /* Reserved address space for UAR mapping. */ -#define MLX5_UAR_SIZE (1ULL << 32) +#define MLX5_UAR_SIZE (1ULL << (sizeof(uintptr_t) * 4)) /* Offset of reserved UAR address space to hugepage memory. Offset is used here * to minimize possibility of address next to hugepage being used by other code * in either primary or secondary process, failing to map TX UAR would make TX * packets invisible to HW. */ -#define MLX5_UAR_OFFSET (1ULL << 32) +#define MLX5_UAR_OFFSET (1ULL << (sizeof(uintptr_t) * 4)) + +/* Maximum number of UAR pages used by a port, + * These are the size and mask for an array of mutexes used to synchronize + * the access to port's UARs on platforms that do not support 64 bit writes. + * In such systems it is possible to issue the 64 bits DoorBells through two + * consecutive writes, each write 32 bits. The access to a UAR page (which can + * be accessible by all threads in the process) must be synchronized + * (for example, using a semaphore). Such a synchronization is not required + * when ringing DoorBells on different UAR pages. + * A port with 512 Tx queues uses 8, 4kBytes, UAR pages which are shared + * among the ports. + */ +#define MLX5_UAR_PAGE_NUM_MAX 64 +#define MLX5_UAR_PAGE_NUM_MASK ((MLX5_UAR_PAGE_NUM_MAX) - 1) + +/* Log 2 of the default number of strides per WQE for Multi-Packet RQ. */ +#define MLX5_MPRQ_STRIDE_NUM_N 6U + +/* Two-byte shift is disabled for Multi-Packet RQ. */ +#define MLX5_MPRQ_TWO_BYTE_SHIFT 0 + +/* + * Minimum size of packet to be memcpy'd instead of being attached as an + * external buffer. + */ +#define MLX5_MPRQ_MEMCPY_DEFAULT_LEN 128 + +/* Minimum number Rx queues to enable Multi-Packet RQ. */ +#define MLX5_MPRQ_MIN_RXQS 12 + +/* Cache size of mempool for Multi-Packet RQ. */ +#define MLX5_MPRQ_MP_CACHE_SZ 32U + +/* Definition of static_assert found in /usr/include/assert.h */ +#ifndef HAVE_STATIC_ASSERT +#define static_assert _Static_assert +#endif #endif /* RTE_PMD_MLX5_DEFS_H_ */ diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 66650769..34c5b95e 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -1,12 +1,13 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #define _GNU_SOURCE #include #include +#include #include #include #include @@ -17,14 +18,13 @@ #include #include #include -#include #include #include #include -#include #include #include #include +#include #include #include @@ -32,14 +32,41 @@ #include #include #include -#include #include +#include +#include #include "mlx5.h" #include "mlx5_glue.h" #include "mlx5_rxtx.h" #include "mlx5_utils.h" +/* Supported speed values found in /usr/include/linux/ethtool.h */ +#ifndef HAVE_SUPPORTED_40000baseKR4_Full +#define SUPPORTED_40000baseKR4_Full (1 << 23) +#endif +#ifndef HAVE_SUPPORTED_40000baseCR4_Full +#define SUPPORTED_40000baseCR4_Full (1 << 24) +#endif +#ifndef HAVE_SUPPORTED_40000baseSR4_Full +#define SUPPORTED_40000baseSR4_Full (1 << 25) +#endif +#ifndef HAVE_SUPPORTED_40000baseLR4_Full +#define SUPPORTED_40000baseLR4_Full (1 << 26) +#endif +#ifndef HAVE_SUPPORTED_56000baseKR4_Full +#define SUPPORTED_56000baseKR4_Full (1 << 27) +#endif +#ifndef HAVE_SUPPORTED_56000baseCR4_Full +#define SUPPORTED_56000baseCR4_Full (1 << 28) +#endif +#ifndef HAVE_SUPPORTED_56000baseSR4_Full +#define SUPPORTED_56000baseSR4_Full (1 << 29) +#endif +#ifndef HAVE_SUPPORTED_56000baseLR4_Full +#define SUPPORTED_56000baseLR4_Full (1 << 30) +#endif + /* Add defines in case the running kernel is not the same as user headers. */ #ifndef ETHTOOL_GLINKSETTINGS struct ethtool_link_settings { @@ -92,19 +119,21 @@ struct ethtool_link_settings { #endif /** - * Get interface name from private structure. + * Get master interface name from private structure. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * @param[out] ifname * Interface name output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]) +mlx5_get_master_ifname(const struct rte_eth_dev *dev, + char (*ifname)[IF_NAMESIZE]) { + struct priv *priv = dev->data->dev_private; DIR *dir; struct dirent *dent; unsigned int dev_type = 0; @@ -115,8 +144,10 @@ priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]) MKSTR(path, "%s/device/net", priv->ibdev_path); dir = opendir(path); - if (dir == NULL) - return -1; + if (dir == NULL) { + rte_errno = errno; + return -rte_errno; + } } while ((dent = readdir(dir)) != NULL) { char *name = dent->d_name; @@ -162,359 +193,198 @@ try_dev_id: if (dev_port == dev_port_prev) goto try_dev_id; dev_port_prev = dev_port; - if (dev_port == (priv->port - 1u)) - snprintf(match, sizeof(match), "%s", name); + if (dev_port == 0) + strlcpy(match, name, sizeof(match)); } closedir(dir); - if (match[0] == '\0') - return -1; + if (match[0] == '\0') { + rte_errno = ENOENT; + return -rte_errno; + } strncpy(*ifname, match, sizeof(*ifname)); return 0; } /** - * Check if the counter is located on ib counters file. + * Get interface name from private structure. * - * @param[in] cntr - * Counter name. + * This is a port representor-aware version of mlx5_get_master_ifname(). * - * @return - * 1 if counter is located on ib counters file , 0 otherwise. - */ -int -priv_is_ib_cntr(const char *cntr) -{ - if (!strcmp(cntr, "out_of_buffer")) - return 1; - return 0; -} - -/** - * Read from sysfs entry. - * - * @param[in] priv - * Pointer to private structure. - * @param[in] entry - * Entry name relative to sysfs path. - * @param[out] buf - * Data output buffer. - * @param size - * Buffer size. + * @param[in] dev + * Pointer to Ethernet device. + * @param[out] ifname + * Interface name output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -priv_sysfs_read(const struct priv *priv, const char *entry, - char *buf, size_t size) +int +mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) { - char ifname[IF_NAMESIZE]; - FILE *file; - int ret; - int err; - - if (priv_get_ifname(priv, &ifname)) - return -1; - - if (priv_is_ib_cntr(entry)) { - MKSTR(path, "%s/ports/1/hw_counters/%s", - priv->ibdev_path, entry); - file = fopen(path, "rb"); - } else { - MKSTR(path, "%s/device/net/%s/%s", - priv->ibdev_path, ifname, entry); - file = fopen(path, "rb"); + struct priv *priv = dev->data->dev_private; + unsigned int ifindex = + priv->nl_socket_rdma >= 0 ? + mlx5_nl_ifindex(priv->nl_socket_rdma, priv->ibdev_name) : 0; + + if (!ifindex) { + if (!priv->representor) + return mlx5_get_master_ifname(dev, ifname); + rte_errno = ENXIO; + return -rte_errno; } - if (file == NULL) - return -1; - ret = fread(buf, 1, size, file); - err = errno; - if (((size_t)ret < size) && (ferror(file))) - ret = -1; - else - ret = size; - fclose(file); - errno = err; - return ret; + if (if_indextoname(ifindex, &(*ifname)[0])) + return 0; + rte_errno = errno; + return -rte_errno; } /** - * Write to sysfs entry. + * Get the interface index from device name. * - * @param[in] priv - * Pointer to private structure. - * @param[in] entry - * Entry name relative to sysfs path. - * @param[in] buf - * Data buffer. - * @param size - * Buffer size. + * @param[in] dev + * Pointer to Ethernet device. * * @return - * 0 on success, -1 on failure and errno is set. + * Nonzero interface index on success, zero otherwise and rte_errno is set. */ -static int -priv_sysfs_write(const struct priv *priv, const char *entry, - char *buf, size_t size) +unsigned int +mlx5_ifindex(const struct rte_eth_dev *dev) { char ifname[IF_NAMESIZE]; - FILE *file; - int ret; - int err; - - if (priv_get_ifname(priv, &ifname)) - return -1; - - MKSTR(path, "%s/device/net/%s/%s", priv->ibdev_path, ifname, entry); - - file = fopen(path, "wb"); - if (file == NULL) - return -1; - ret = fwrite(buf, 1, size, file); - err = errno; - if (((size_t)ret < size) || (ferror(file))) - ret = -1; - else - ret = size; - fclose(file); - errno = err; - return ret; -} - -/** - * Get unsigned long sysfs property. - * - * @param priv - * Pointer to private structure. - * @param[in] name - * Entry name relative to sysfs path. - * @param[out] value - * Value output buffer. - * - * @return - * 0 on success, -1 on failure and errno is set. - */ -static int -priv_get_sysfs_ulong(struct priv *priv, const char *name, unsigned long *value) -{ - int ret; - unsigned long value_ret; - char value_str[32]; + unsigned int ifindex; - ret = priv_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1)); - if (ret == -1) { - DEBUG("cannot read %s value from sysfs: %s", - name, strerror(errno)); - return -1; - } - value_str[ret] = '\0'; - errno = 0; - value_ret = strtoul(value_str, NULL, 0); - if (errno) { - DEBUG("invalid %s value `%s': %s", name, value_str, - strerror(errno)); - return -1; - } - *value = value_ret; - return 0; -} - -/** - * Set unsigned long sysfs property. - * - * @param priv - * Pointer to private structure. - * @param[in] name - * Entry name relative to sysfs path. - * @param value - * Value to set. - * - * @return - * 0 on success, -1 on failure and errno is set. - */ -static int -priv_set_sysfs_ulong(struct priv *priv, const char *name, unsigned long value) -{ - int ret; - MKSTR(value_str, "%lu", value); - - ret = priv_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1)); - if (ret == -1) { - DEBUG("cannot write %s `%s' (%lu) to sysfs: %s", - name, value_str, value, strerror(errno)); - return -1; - } - return 0; + if (mlx5_get_ifname(dev, &ifname)) + return 0; + ifindex = if_nametoindex(ifname); + if (!ifindex) + rte_errno = errno; + return ifindex; } /** * Perform ifreq ioctl() on associated Ethernet device. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * @param req * Request number to pass to ioctl(). * @param[out] ifr * Interface request structure output buffer. + * @param master + * When device is a port representor, perform request on master device + * instead. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr) +mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr, + int master) { int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); - int ret = -1; + int ret = 0; - if (sock == -1) - return ret; - if (priv_get_ifname(priv, &ifr->ifr_name) == 0) - ret = ioctl(sock, req, ifr); + if (sock == -1) { + rte_errno = errno; + return -rte_errno; + } + if (master) + ret = mlx5_get_master_ifname(dev, &ifr->ifr_name); + else + ret = mlx5_get_ifname(dev, &ifr->ifr_name); + if (ret) + goto error; + ret = ioctl(sock, req, ifr); + if (ret == -1) { + rte_errno = errno; + goto error; + } close(sock); - return ret; -} - -/** - * Return the number of active VFs for the current device. - * - * @param[in] priv - * Pointer to private structure. - * @param[out] num_vfs - * Number of active VFs. - * - * @return - * 0 on success, -1 on failure and errno is set. - */ -int -priv_get_num_vfs(struct priv *priv, uint16_t *num_vfs) -{ - /* The sysfs entry name depends on the operating system. */ - const char **name = (const char *[]){ - "device/sriov_numvfs", - "device/mlx5_num_vfs", - NULL, - }; - int ret; - - do { - unsigned long ulong_num_vfs; - - ret = priv_get_sysfs_ulong(priv, *name, &ulong_num_vfs); - if (!ret) - *num_vfs = ulong_num_vfs; - } while (*(++name) && ret); - return ret; + return 0; +error: + close(sock); + return -rte_errno; } /** * Get device MTU. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[out] mtu * MTU value output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_get_mtu(struct priv *priv, uint16_t *mtu) +mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) { - unsigned long ulong_mtu; + struct ifreq request; + int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request, 0); - if (priv_get_sysfs_ulong(priv, "mtu", &ulong_mtu) == -1) - return -1; - *mtu = ulong_mtu; - return 0; -} - -/** - * Read device counter from sysfs. - * - * @param priv - * Pointer to private structure. - * @param name - * Counter name. - * @param[out] cntr - * Counter output buffer. - * - * @return - * 0 on success, -1 on failure and errno is set. - */ -int -priv_get_cntr_sysfs(struct priv *priv, const char *name, uint64_t *cntr) -{ - unsigned long ulong_ctr; - - if (priv_get_sysfs_ulong(priv, name, &ulong_ctr) == -1) - return -1; - *cntr = ulong_ctr; + if (ret) + return ret; + *mtu = request.ifr_mtu; return 0; } /** * Set device MTU. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param mtu * MTU value to set. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_set_mtu(struct priv *priv, uint16_t mtu) +mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { - uint16_t new_mtu; + struct ifreq request = { .ifr_mtu = mtu, }; - if (priv_set_sysfs_ulong(priv, "mtu", mtu) || - priv_get_mtu(priv, &new_mtu)) - return -1; - if (new_mtu == mtu) - return 0; - errno = EINVAL; - return -1; + return mlx5_ifreq(dev, SIOCSIFMTU, &request, 0); } /** * Set device flags. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param keep * Bitmask for flags that must remain untouched. * @param flags * Bitmask for flags to modify. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags) +mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags) { - unsigned long tmp; + struct ifreq request; + int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request, 0); - if (priv_get_sysfs_ulong(priv, "flags", &tmp) == -1) - return -1; - tmp &= keep; - tmp |= (flags & (~keep)); - return priv_set_sysfs_ulong(priv, "flags", tmp); + if (ret) + return ret; + request.ifr_flags &= keep; + request.ifr_flags |= flags & ~keep; + return mlx5_ifreq(dev, SIOCSIFFLAGS, &request, 0); } /** - * Ethernet device configuration. - * - * Prepare the driver for a given number of TX and RX queues. + * DPDK callback for Ethernet device configuration. * * @param dev * Pointer to Ethernet device structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -dev_configure(struct rte_eth_dev *dev) +int +mlx5_dev_configure(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; unsigned int rxqs_n = dev->data->nb_rx_queues; @@ -524,60 +394,49 @@ dev_configure(struct rte_eth_dev *dev) unsigned int reta_idx_n; const uint8_t use_app_rss_key = !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; - uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv); - uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; - uint64_t supp_rx_offloads = - (mlx5_priv_get_rx_port_offloads(priv) | - mlx5_priv_get_rx_queue_offloads(priv)); - uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; - - if ((tx_offloads & supp_tx_offloads) != tx_offloads) { - ERROR("Some Tx offloads are not supported " - "requested 0x%" PRIx64 " supported 0x%" PRIx64, - tx_offloads, supp_tx_offloads); - return ENOTSUP; - } - if ((rx_offloads & supp_rx_offloads) != rx_offloads) { - ERROR("Some Rx offloads are not supported " - "requested 0x%" PRIx64 " supported 0x%" PRIx64, - rx_offloads, supp_rx_offloads); - return ENOTSUP; - } + int ret = 0; + if (use_app_rss_key && (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len != - rss_hash_default_key_len)) { - /* MLX5 RSS only support 40bytes key. */ - return EINVAL; + MLX5_RSS_HASH_KEY_LEN)) { + DRV_LOG(ERR, "port %u RSS key len must be %s Bytes long", + dev->data->port_id, RTE_STR(MLX5_RSS_HASH_KEY_LEN)); + rte_errno = EINVAL; + return -rte_errno; } priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key, - rss_hash_default_key_len, 0); + MLX5_RSS_HASH_KEY_LEN, 0); if (!priv->rss_conf.rss_key) { - ERROR("cannot allocate RSS hash key memory (%u)", rxqs_n); - return ENOMEM; + DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)", + dev->data->port_id, rxqs_n); + rte_errno = ENOMEM; + return -rte_errno; } memcpy(priv->rss_conf.rss_key, use_app_rss_key ? dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key : rss_hash_default_key, - rss_hash_default_key_len); - priv->rss_conf.rss_key_len = rss_hash_default_key_len; + MLX5_RSS_HASH_KEY_LEN); + priv->rss_conf.rss_key_len = MLX5_RSS_HASH_KEY_LEN; priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; priv->rxqs = (void *)dev->data->rx_queues; priv->txqs = (void *)dev->data->tx_queues; if (txqs_n != priv->txqs_n) { - INFO("%p: TX queues number update: %u -> %u", - (void *)dev, priv->txqs_n, txqs_n); + DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u", + dev->data->port_id, priv->txqs_n, txqs_n); priv->txqs_n = txqs_n; } if (rxqs_n > priv->config.ind_table_max_size) { - ERROR("cannot handle this many RX queues (%u)", rxqs_n); - return EINVAL; + DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)", + dev->data->port_id, rxqs_n); + rte_errno = EINVAL; + return -rte_errno; } if (rxqs_n == priv->rxqs_n) return 0; - INFO("%p: RX queues number update: %u -> %u", - (void *)dev, priv->rxqs_n, rxqs_n); + DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u", + dev->data->port_id, priv->rxqs_n, rxqs_n); priv->rxqs_n = rxqs_n; /* If the requested number of RX queues is not a power of two, use the * maximum indirection table size for better balancing. @@ -585,8 +444,9 @@ dev_configure(struct rte_eth_dev *dev) reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ? priv->config.ind_table_max_size : rxqs_n)); - if (priv_rss_reta_index_resize(priv, reta_idx_n)) - return ENOMEM; + ret = mlx5_rss_reta_index_resize(dev, reta_idx_n); + if (ret) + return ret; /* When the number of RX queues is not a power of two, the remaining * table entries are padded with reused WQs and hashes are not spread * uniformly. */ @@ -599,25 +459,42 @@ dev_configure(struct rte_eth_dev *dev) } /** - * DPDK callback for Ethernet device configuration. + * Sets default tuning parameters. * * @param dev - * Pointer to Ethernet device structure. - * - * @return - * 0 on success, negative errno value on failure. + * Pointer to Ethernet device. + * @param[out] info + * Info structure output buffer. */ -int -mlx5_dev_configure(struct rte_eth_dev *dev) +static void +mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) { struct priv *priv = dev->data->dev_private; - int ret; - priv_lock(priv); - ret = dev_configure(dev); - assert(ret >= 0); - priv_unlock(priv); - return -ret; + /* Minimum CPU utilization. */ + info->default_rxportconf.ring_size = 256; + info->default_txportconf.ring_size = 256; + info->default_rxportconf.burst_size = 64; + info->default_txportconf.burst_size = 64; + if (priv->link_speed_capa & ETH_LINK_SPEED_100G) { + info->default_rxportconf.nb_queues = 16; + info->default_txportconf.nb_queues = 16; + if (dev->data->nb_rx_queues > 2 || + dev->data->nb_tx_queues > 2) { + /* Max Throughput. */ + info->default_rxportconf.ring_size = 2048; + info->default_txportconf.ring_size = 2048; + } + } else { + info->default_rxportconf.nb_queues = 8; + info->default_txportconf.nb_queues = 8; + if (dev->data->nb_rx_queues > 2 || + dev->data->nb_tx_queues > 2) { + /* Max Throughput. */ + info->default_rxportconf.ring_size = 4096; + info->default_txportconf.ring_size = 4096; + } + } } /** @@ -636,9 +513,6 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) unsigned int max; char ifname[IF_NAMESIZE]; - info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); - - priv_lock(priv); /* FIXME: we should ask the device for these values. */ info->min_rx_bufsize = 32; info->max_rx_pktlen = 65536; @@ -653,22 +527,54 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) max = 65535; info->max_rx_queues = max; info->max_tx_queues = max; - info->max_mac_addrs = RTE_DIM(priv->mac); - info->rx_queue_offload_capa = - mlx5_priv_get_rx_queue_offloads(priv); - info->rx_offload_capa = (mlx5_priv_get_rx_port_offloads(priv) | + info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES; + info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev); + info->rx_offload_capa = (mlx5_get_rx_port_offloads() | info->rx_queue_offload_capa); - info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv); - if (priv_get_ifname(priv, &ifname) == 0) + info->tx_offload_capa = mlx5_get_tx_port_offloads(dev); + if (mlx5_get_ifname(dev, &ifname) == 0) info->if_index = if_nametoindex(ifname); info->reta_size = priv->reta_idx_n ? priv->reta_idx_n : config->ind_table_max_size; - info->hash_key_size = priv->rss_conf.rss_key_len; + info->hash_key_size = MLX5_RSS_HASH_KEY_LEN; info->speed_capa = priv->link_speed_capa; info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK; - priv_unlock(priv); + mlx5_set_default_params(dev, info); + info->switch_info.name = dev->data->name; + info->switch_info.domain_id = priv->domain_id; + info->switch_info.port_id = priv->representor_id; + if (priv->representor) { + unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0); + uint16_t port_id[i]; + + i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i); + while (i--) { + struct priv *opriv = + rte_eth_devices[port_id[i]].data->dev_private; + + if (!opriv || + opriv->representor || + opriv->domain_id != priv->domain_id) + continue; + /* + * Override switch name with that of the master + * device. + */ + info->switch_info.name = opriv->dev_data->name; + break; + } + } } +/** + * Get supported packet types. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * A pointer to the supported Packet types array. + */ const uint32_t * mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) { @@ -691,6 +597,7 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) }; if (dev->rx_pkt_burst == mlx5_rx_burst || + dev->rx_pkt_burst == mlx5_rx_burst_mprq || dev->rx_pkt_burst == mlx5_rx_burst_vec) return ptypes; return NULL; @@ -701,11 +608,15 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device structure. - * @param wait_to_complete - * Wait for request completion (ignored). + * @param[out] link + * Storage for current link status. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) +mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, + struct rte_eth_link *link) { struct priv *priv = dev->data->dev_private; struct ethtool_cmd edata = { @@ -714,26 +625,28 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) struct ifreq ifr; struct rte_eth_link dev_link; int link_speed = 0; + int ret; - /* priv_lock() is not taken to allow concurrent calls. */ - - (void)wait_to_complete; - if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { - WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); - return -1; + ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr, 1); + if (ret) { + DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; } memset(&dev_link, 0, sizeof(dev_link)); dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); ifr.ifr_data = (void *)&edata; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { - WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", - strerror(errno)); - return -1; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1); + if (ret) { + DRV_LOG(WARNING, + "port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; } link_speed = ethtool_cmd_speed(&edata); if (link_speed == -1) - dev_link.link_speed = 0; + dev_link.link_speed = ETH_SPEED_NUM_NONE; else dev_link.link_speed = link_speed; priv->link_speed_capa = 0; @@ -753,13 +666,13 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); - if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) { - /* Link status changed. */ - dev->data->dev_link = dev_link; - return 0; + if ((dev_link.link_speed && !dev_link.link_status) || + (!dev_link.link_speed && dev_link.link_status)) { + rte_errno = EAGAIN; + return -rte_errno; } - /* Link status is still the same. */ - return -1; + *link = dev_link; + return 0; } /** @@ -767,31 +680,41 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) * * @param dev * Pointer to Ethernet device structure. - * @param wait_to_complete - * Wait for request completion (ignored). + * @param[out] link + * Storage for current link status. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) +mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, + struct rte_eth_link *link) + { struct priv *priv = dev->data->dev_private; struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS }; struct ifreq ifr; struct rte_eth_link dev_link; uint64_t sc; + int ret; - (void)wait_to_complete; - if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { - WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); - return -1; + ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr, 1); + if (ret) { + DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; } memset(&dev_link, 0, sizeof(dev_link)); dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); ifr.ifr_data = (void *)&gcmd; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { - DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s", - strerror(errno)); - return -1; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1); + if (ret) { + DRV_LOG(DEBUG, + "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)" + " failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; } gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords; @@ -802,10 +725,13 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) *ecmd = gcmd; ifr.ifr_data = (void *)ecmd; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { - DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s", - strerror(errno)); - return -1; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1); + if (ret) { + DRV_LOG(DEBUG, + "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)" + " failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; } dev_link.link_speed = ecmd->speed; sc = ecmd->link_mode_masks[0] | @@ -849,121 +775,13 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); - if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) { - /* Link status changed. */ - dev->data->dev_link = dev_link; - return 0; - } - /* Link status is still the same. */ - return -1; -} - -/** - * Enable receiving and transmitting traffic. - * - * @param priv - * Pointer to private structure. - */ -static void -priv_link_start(struct priv *priv) -{ - struct rte_eth_dev *dev = priv->dev; - int err; - - dev->tx_pkt_burst = priv_select_tx_function(priv, dev); - dev->rx_pkt_burst = priv_select_rx_function(priv, dev); - err = priv_dev_traffic_enable(priv, dev); - if (err) - ERROR("%p: error occurred while configuring control flows: %s", - (void *)priv, strerror(err)); - err = priv_flow_start(priv, &priv->flows); - if (err) - ERROR("%p: error occurred while configuring flows: %s", - (void *)priv, strerror(err)); -} - -/** - * Disable receiving and transmitting traffic. - * - * @param priv - * Pointer to private structure. - */ -static void -priv_link_stop(struct priv *priv) -{ - struct rte_eth_dev *dev = priv->dev; - - priv_flow_stop(priv, &priv->flows); - priv_dev_traffic_disable(priv, dev); - dev->rx_pkt_burst = removed_rx_burst; - dev->tx_pkt_burst = removed_tx_burst; -} - -/** - * Retrieve physical link information and update rx/tx_pkt_burst callbacks - * accordingly. - * - * @param priv - * Pointer to private structure. - * @param wait_to_complete - * Wait for request completion (ignored). - */ -int -priv_link_update(struct priv *priv, int wait_to_complete) -{ - struct rte_eth_dev *dev = priv->dev; - struct utsname utsname; - int ver[3]; - int ret; - struct rte_eth_link dev_link = dev->data->dev_link; - - if (uname(&utsname) == -1 || - sscanf(utsname.release, "%d.%d.%d", - &ver[0], &ver[1], &ver[2]) != 3 || - KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0)) - ret = mlx5_link_update_unlocked_gset(dev, wait_to_complete); - else - ret = mlx5_link_update_unlocked_gs(dev, wait_to_complete); - /* If lsc interrupt is disabled, should always be ready for traffic. */ - if (!dev->data->dev_conf.intr_conf.lsc) { - priv_link_start(priv); - return ret; - } - /* Re-select burst callbacks only if link status has been changed. */ - if (!ret && dev_link.link_status != dev->data->dev_link.link_status) { - if (dev->data->dev_link.link_status == ETH_LINK_UP) - priv_link_start(priv); - else - priv_link_stop(priv); - } - return ret; -} - -/** - * Querying the link status till it changes to the desired state. - * Number of query attempts is bounded by MLX5_MAX_LINK_QUERY_ATTEMPTS. - * - * @param priv - * Pointer to private structure. - * @param status - * Link desired status. - * - * @return - * 0 on success, negative errno value on failure. - */ -int -priv_force_link_status_change(struct priv *priv, int status) -{ - int try = 0; - - while (try < MLX5_MAX_LINK_QUERY_ATTEMPTS) { - priv_link_update(priv, 0); - if (priv->dev->data->dev_link.link_status == status) - return 0; - try++; - sleep(1); + if ((dev_link.link_speed && !dev_link.link_status) || + (!dev_link.link_speed && dev_link.link_status)) { + rte_errno = EAGAIN; + return -rte_errno; } - return -EAGAIN; + *link = dev_link; + return 0; } /** @@ -972,17 +790,42 @@ priv_force_link_status_change(struct priv *priv, int status) * @param dev * Pointer to Ethernet device structure. * @param wait_to_complete - * Wait for request completion (ignored). + * Wait for request completion. + * + * @return + * 0 if link status was not updated, positive if it was, a negative errno + * value otherwise and rte_errno is set. */ int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) { - struct priv *priv = dev->data->dev_private; int ret; + struct rte_eth_link dev_link; + time_t start_time = time(NULL); - priv_lock(priv); - ret = priv_link_update(priv, wait_to_complete); - priv_unlock(priv); + do { + ret = mlx5_link_update_unlocked_gs(dev, &dev_link); + if (ret) + ret = mlx5_link_update_unlocked_gset(dev, &dev_link); + if (ret == 0) + break; + /* Handle wait to complete situation. */ + if (wait_to_complete && ret == -EAGAIN) { + if (abs((int)difftime(time(NULL), start_time)) < + MLX5_LINK_STATUS_TIMEOUT) { + usleep(0); + continue; + } else { + rte_errno = EBUSY; + return -rte_errno; + } + } else if (ret < 0) { + return ret; + } + } while (wait_to_complete); + ret = !!memcmp(&dev->data->dev_link, &dev_link, + sizeof(struct rte_eth_link)); + dev->data->dev_link = dev_link; return ret; } @@ -995,39 +838,33 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) * New MTU. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { struct priv *priv = dev->data->dev_private; - uint16_t kern_mtu; - int ret = 0; + uint16_t kern_mtu = 0; + int ret; - priv_lock(priv); - ret = priv_get_mtu(priv, &kern_mtu); + ret = mlx5_get_mtu(dev, &kern_mtu); if (ret) - goto out; + return ret; /* Set kernel interface MTU first. */ - ret = priv_set_mtu(priv, mtu); + ret = mlx5_set_mtu(dev, mtu); if (ret) - goto out; - ret = priv_get_mtu(priv, &kern_mtu); + return ret; + ret = mlx5_get_mtu(dev, &kern_mtu); if (ret) - goto out; + return ret; if (kern_mtu == mtu) { priv->mtu = mtu; - DEBUG("adapter port %u MTU set to %u", priv->port, mtu); + DRV_LOG(DEBUG, "port %u adapter MTU set to %u", + dev->data->port_id, mtu); + return 0; } - priv_unlock(priv); - return 0; -out: - ret = errno; - WARN("cannot set port %u MTU to %u: %s", priv->port, mtu, - strerror(ret)); - priv_unlock(priv); - assert(ret >= 0); - return -ret; + rte_errno = EAGAIN; + return -rte_errno; } /** @@ -1039,12 +876,11 @@ out: * Flow control output buffer. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) { - struct priv *priv = dev->data->dev_private; struct ifreq ifr; struct ethtool_pauseparam ethpause = { .cmd = ETHTOOL_GPAUSEPARAM @@ -1052,15 +888,14 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) int ret; ifr.ifr_data = (void *)ðpause; - priv_lock(priv); - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { - ret = errno; - WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)" - " failed: %s", - strerror(ret)); - goto out; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1); + if (ret) { + DRV_LOG(WARNING, + "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:" + " %s", + dev->data->port_id, strerror(rte_errno)); + return ret; } - fc_conf->autoneg = ethpause.autoneg; if (ethpause.rx_pause && ethpause.tx_pause) fc_conf->mode = RTE_FC_FULL; @@ -1070,12 +905,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) fc_conf->mode = RTE_FC_TX_PAUSE; else fc_conf->mode = RTE_FC_NONE; - ret = 0; - -out: - priv_unlock(priv); - assert(ret >= 0); - return -ret; + return 0; } /** @@ -1087,12 +917,11 @@ out: * Flow control parameters. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) { - struct priv *priv = dev->data->dev_private; struct ifreq ifr; struct ethtool_pauseparam ethpause = { .cmd = ETHTOOL_SPAUSEPARAM @@ -1112,21 +941,15 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) ethpause.tx_pause = 1; else ethpause.tx_pause = 0; - - priv_lock(priv); - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { - ret = errno; - WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" - " failed: %s", - strerror(ret)); - goto out; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 0); + if (ret) { + DRV_LOG(WARNING, + "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" + " failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; } - ret = 0; - -out: - priv_unlock(priv); - assert(ret >= 0); - return -ret; + return 0; } /** @@ -1138,7 +961,7 @@ out: * PCI bus address output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, @@ -1149,8 +972,10 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, MKSTR(path, "%s/device/uevent", device->ibdev_path); file = fopen(path, "rb"); - if (file == NULL) - return -1; + if (file == NULL) { + rte_errno = errno; + return -rte_errno; + } while (fgets(line, sizeof(line), file) == line) { size_t len = strlen(line); int ret; @@ -1179,47 +1004,11 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, return 0; } -/** - * Update the link status. - * - * @param priv - * Pointer to private structure. - * - * @return - * Zero if the callback process can be called immediately. - */ -static int -priv_link_status_update(struct priv *priv) -{ - struct rte_eth_link *link = &priv->dev->data->dev_link; - - priv_link_update(priv, 0); - if (((link->link_speed == 0) && link->link_status) || - ((link->link_speed != 0) && !link->link_status)) { - /* - * Inconsistent status. Event likely occurred before the - * kernel netdevice exposes the new status. - */ - if (!priv->pending_alarm) { - priv->pending_alarm = 1; - rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US, - mlx5_dev_link_status_handler, - priv->dev); - } - return 1; - } else if (unlikely(priv->pending_alarm)) { - /* Link interrupt occurred while alarm is already scheduled. */ - priv->pending_alarm = 0; - rte_eal_alarm_cancel(mlx5_dev_link_status_handler, priv->dev); - } - return 0; -} - /** * Device status handler. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param events * Pointer to event flags holder. * @@ -1227,59 +1016,36 @@ priv_link_status_update(struct priv *priv) * Events bitmap of callback process which can be called immediately. */ static uint32_t -priv_dev_status_handler(struct priv *priv) +mlx5_dev_status_handler(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct ibv_async_event event; uint32_t ret = 0; + if (mlx5_link_update(dev, 0) == -EAGAIN) { + usleep(0); + return 0; + } /* Read all message and acknowledge them. */ for (;;) { if (mlx5_glue->get_async_event(priv->ctx, &event)) break; if ((event.event_type == IBV_EVENT_PORT_ACTIVE || event.event_type == IBV_EVENT_PORT_ERR) && - (priv->dev->data->dev_conf.intr_conf.lsc == 1)) + (dev->data->dev_conf.intr_conf.lsc == 1)) ret |= (1 << RTE_ETH_EVENT_INTR_LSC); else if (event.event_type == IBV_EVENT_DEVICE_FATAL && - priv->dev->data->dev_conf.intr_conf.rmv == 1) + dev->data->dev_conf.intr_conf.rmv == 1) ret |= (1 << RTE_ETH_EVENT_INTR_RMV); else - DEBUG("event type %d on port %d not handled", - event.event_type, event.element.port_num); + DRV_LOG(DEBUG, + "port %u event type %d on not handled", + dev->data->port_id, event.event_type); mlx5_glue->ack_async_event(&event); } - if (ret & (1 << RTE_ETH_EVENT_INTR_LSC)) - if (priv_link_status_update(priv)) - ret &= ~(1 << RTE_ETH_EVENT_INTR_LSC); return ret; } -/** - * Handle delayed link status event. - * - * @param arg - * Registered argument. - */ -void -mlx5_dev_link_status_handler(void *arg) -{ - struct rte_eth_dev *dev = arg; - struct priv *priv = dev->data->dev_private; - int ret; - - while (!priv_trylock(priv)) { - /* Alarm is being canceled. */ - if (priv->pending_alarm == 0) - return; - rte_pause(); - } - priv->pending_alarm = 0; - ret = priv_link_status_update(priv); - priv_unlock(priv); - if (!ret) - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); -} - /** * Handle interrupts from the NIC. * @@ -1292,12 +1058,9 @@ void mlx5_dev_interrupt_handler(void *cb_arg) { struct rte_eth_dev *dev = cb_arg; - struct priv *priv = dev->data->dev_private; uint32_t events; - priv_lock(priv); - events = priv_dev_status_handler(priv); - priv_unlock(priv); + events = mlx5_dev_status_handler(dev); if (events & (1 << RTE_ETH_EVENT_INTR_LSC)) _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); if (events & (1 << RTE_ETH_EVENT_INTR_RMV)) @@ -1314,24 +1077,21 @@ static void mlx5_dev_handler_socket(void *cb_arg) { struct rte_eth_dev *dev = cb_arg; - struct priv *priv = dev->data->dev_private; - priv_lock(priv); - priv_socket_handle(priv); - priv_unlock(priv); + mlx5_socket_handle(dev); } /** * Uninstall interrupt handler. * - * @param priv - * Pointer to private structure. * @param dev - * Pointer to the rte_eth_dev structure. + * Pointer to Ethernet device. */ void -priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) +mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; + if (dev->data->dev_conf.intr_conf.lsc || dev->data->dev_conf.intr_conf.rmv) rte_intr_callback_unregister(&priv->intr_handle, @@ -1339,10 +1099,6 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) if (priv->primary_socket) rte_intr_callback_unregister(&priv->intr_handle_socket, mlx5_dev_handler_socket, dev); - if (priv->pending_alarm) { - priv->pending_alarm = 0; - rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev); - } priv->intr_handle.fd = 0; priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; priv->intr_handle_socket.fd = 0; @@ -1352,21 +1108,24 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) /** * Install interrupt handler. * - * @param priv - * Pointer to private structure. * @param dev - * Pointer to the rte_eth_dev structure. + * Pointer to Ethernet device. */ void -priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) +mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) { - int rc, flags; + struct priv *priv = dev->data->dev_private; + int ret; + int flags; assert(priv->ctx->async_fd > 0); flags = fcntl(priv->ctx->async_fd, F_GETFL); - rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); - if (rc < 0) { - INFO("failed to change file descriptor async event queue"); + ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); + if (ret) { + DRV_LOG(INFO, + "port %u failed to change file descriptor async event" + " queue", + dev->data->port_id); dev->data->dev_conf.intr_conf.lsc = 0; dev->data->dev_conf.intr_conf.rmv = 0; } @@ -1377,9 +1136,11 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) rte_intr_callback_register(&priv->intr_handle, mlx5_dev_interrupt_handler, dev); } - - rc = priv_socket_init(priv); - if (!rc && priv->primary_socket) { + ret = mlx5_socket_init(dev); + if (ret) + DRV_LOG(ERR, "port %u cannot initialise socket: %s", + dev->data->port_id, strerror(rte_errno)); + else if (priv->primary_socket) { priv->intr_handle_socket.fd = priv->primary_socket; priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT; rte_intr_callback_register(&priv->intr_handle_socket, @@ -1387,23 +1148,6 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) } } -/** - * Change the link state (UP / DOWN). - * - * @param priv - * Pointer to private data structure. - * @param up - * Nonzero for link up, otherwise link down. - * - * @return - * 0 on success, errno value on failure. - */ -static int -priv_dev_set_link(struct priv *priv, int up) -{ - return priv_set_flags(priv, ~IFF_UP, up ? IFF_UP : ~IFF_UP); -} - /** * DPDK callback to bring the link DOWN. * @@ -1411,18 +1155,12 @@ priv_dev_set_link(struct priv *priv, int up) * Pointer to Ethernet device structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_set_link_down(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; - int err; - - priv_lock(priv); - err = priv_dev_set_link(priv, 0); - priv_unlock(priv); - return err; + return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP); } /** @@ -1432,63 +1170,68 @@ mlx5_set_link_down(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_set_link_up(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; - int err; - - priv_lock(priv); - err = priv_dev_set_link(priv, 1); - priv_unlock(priv); - return err; + return mlx5_set_flags(dev, ~IFF_UP, IFF_UP); } /** * Configure the TX function to use. * - * @param priv - * Pointer to private data structure. * @param dev - * Pointer to rte_eth_dev structure. + * Pointer to private data structure. * * @return * Pointer to selected Tx burst function. */ eth_tx_burst_t -priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev) +mlx5_select_tx_function(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst; struct mlx5_dev_config *config = &priv->config; uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GRE_TNL_TSO)); + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO)); + int swp = !!(tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)); int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT); assert(priv != NULL); /* Select appropriate TX function. */ - if (vlan_insert || tso) + if (vlan_insert || tso || swp) return tx_pkt_burst; if (config->mps == MLX5_MPW_ENHANCED) { - if (priv_check_vec_tx_support(priv, dev) > 0) { - if (priv_check_raw_vec_tx_support(priv, dev) > 0) + if (mlx5_check_vec_tx_support(dev) > 0) { + if (mlx5_check_raw_vec_tx_support(dev) > 0) tx_pkt_burst = mlx5_tx_burst_raw_vec; else tx_pkt_burst = mlx5_tx_burst_vec; - DEBUG("selected Enhanced MPW TX vectorized function"); + DRV_LOG(DEBUG, + "port %u selected enhanced MPW Tx vectorized" + " function", + dev->data->port_id); } else { tx_pkt_burst = mlx5_tx_burst_empw; - DEBUG("selected Enhanced MPW TX function"); + DRV_LOG(DEBUG, + "port %u selected enhanced MPW Tx function", + dev->data->port_id); } } else if (config->mps && (config->txq_inline > 0)) { tx_pkt_burst = mlx5_tx_burst_mpw_inline; - DEBUG("selected MPW inline TX function"); + DRV_LOG(DEBUG, "port %u selected MPW inline Tx function", + dev->data->port_id); } else if (config->mps) { tx_pkt_burst = mlx5_tx_burst_mpw; - DEBUG("selected MPW TX function"); + DRV_LOG(DEBUG, "port %u selected MPW Tx function", + dev->data->port_id); } return tx_pkt_burst; } @@ -1496,23 +1239,24 @@ priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev) /** * Configure the RX function to use. * - * @param priv - * Pointer to private data structure. * @param dev - * Pointer to rte_eth_dev structure. + * Pointer to private data structure. * * @return * Pointer to selected Rx burst function. */ eth_rx_burst_t -priv_select_rx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev) +mlx5_select_rx_function(struct rte_eth_dev *dev) { eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst; - assert(priv != NULL); - if (priv_check_vec_rx_support(priv) > 0) { + assert(dev != NULL); + if (mlx5_check_vec_rx_support(dev) > 0) { rx_pkt_burst = mlx5_rx_burst_vec; - DEBUG("selected RX vectorized function"); + DRV_LOG(DEBUG, "port %u selected Rx vectorized function", + dev->data->port_id); + } else if (mlx5_mprq_enabled(dev)) { + rx_pkt_burst = mlx5_rx_burst_mprq; } return rx_pkt_burst; } @@ -1536,3 +1280,93 @@ mlx5_is_removed(struct rte_eth_dev *dev) return 1; return 0; } + +/** + * Get port ID list of mlx5 instances sharing a common device. + * + * @param[in] dev + * Device to look for. + * @param[out] port_list + * Result buffer for collected port IDs. + * @param port_list_n + * Maximum number of entries in result buffer. If 0, @p port_list can be + * NULL. + * + * @return + * Number of matching instances regardless of the @p port_list_n + * parameter, 0 if none were found. + */ +unsigned int +mlx5_dev_to_port_id(const struct rte_device *dev, uint16_t *port_list, + unsigned int port_list_n) +{ + uint16_t id; + unsigned int n = 0; + + RTE_ETH_FOREACH_DEV(id) { + struct rte_eth_dev *ldev = &rte_eth_devices[id]; + + if (!ldev->device || + !ldev->device->driver || + strcmp(ldev->device->driver->name, MLX5_DRIVER_NAME) || + ldev->device != dev) + continue; + if (n < port_list_n) + port_list[n] = id; + n++; + } + return n; +} + +/** + * Get switch information associated with network interface. + * + * @param ifindex + * Network interface index. + * @param[out] info + * Switch information object, populated in case of success. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) +{ + char ifname[IF_NAMESIZE]; + FILE *file; + struct mlx5_switch_info data = { .master = 0, }; + bool port_name_set = false; + bool port_switch_id_set = false; + char c; + + if (!if_indextoname(ifindex, ifname)) { + rte_errno = errno; + return -rte_errno; + } + + MKSTR(phys_port_name, "/sys/class/net/%s/phys_port_name", + ifname); + MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id", + ifname); + + file = fopen(phys_port_name, "rb"); + if (file != NULL) { + port_name_set = + fscanf(file, "%d%c", &data.port_name, &c) == 2 && + c == '\n'; + fclose(file); + } + file = fopen(phys_switch_id, "rb"); + if (file == NULL) { + rte_errno = errno; + return -rte_errno; + } + port_switch_id_set = + fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 && + c == '\n'; + fclose(file); + data.master = port_switch_id_set && !port_name_set; + data.representor = port_switch_id_set && port_name_set; + *info = data; + return 0; +} diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 26002c4b..ca4625b6 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -1,9 +1,11 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2016 6WIND S.A. - * Copyright 2016 Mellanox. + * Copyright 2016 Mellanox Technologies, Ltd */ #include +#include +#include #include /* Verbs header. */ @@ -16,6 +18,9 @@ #pragma GCC diagnostic error "-Wpedantic" #endif +#include +#include +#include #include #include #include @@ -27,388 +32,289 @@ #include "mlx5_prm.h" #include "mlx5_glue.h" -/* Define minimal priority for control plane flows. */ -#define MLX5_CTRL_FLOW_PRIORITY 4 - -/* Internet Protocol versions. */ -#define MLX5_IPV4 4 -#define MLX5_IPV6 6 - -#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT -struct ibv_flow_spec_counter_action { - int dummy; -}; -#endif - /* Dev ops structure defined in mlx5.c */ extern const struct eth_dev_ops mlx5_dev_ops; extern const struct eth_dev_ops mlx5_dev_ops_isolate; -static int -mlx5_flow_create_eth(const struct rte_flow_item *item, - const void *default_mask, - void *data); - -static int -mlx5_flow_create_vlan(const struct rte_flow_item *item, - const void *default_mask, - void *data); - -static int -mlx5_flow_create_ipv4(const struct rte_flow_item *item, - const void *default_mask, - void *data); - -static int -mlx5_flow_create_ipv6(const struct rte_flow_item *item, - const void *default_mask, - void *data); - -static int -mlx5_flow_create_udp(const struct rte_flow_item *item, - const void *default_mask, - void *data); - -static int -mlx5_flow_create_tcp(const struct rte_flow_item *item, - const void *default_mask, - void *data); - -static int -mlx5_flow_create_vxlan(const struct rte_flow_item *item, - const void *default_mask, - void *data); - -struct mlx5_flow_parse; - -static void -mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src, - unsigned int size); - -static int -mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id); - -static int -mlx5_flow_create_count(struct priv *priv, struct mlx5_flow_parse *parser); - -/* Hash RX queue types. */ -enum hash_rxq_type { - HASH_RXQ_TCPV4, - HASH_RXQ_UDPV4, - HASH_RXQ_IPV4, - HASH_RXQ_TCPV6, - HASH_RXQ_UDPV6, - HASH_RXQ_IPV6, - HASH_RXQ_ETH, -}; - -/* Initialization data for hash RX queue. */ -struct hash_rxq_init { - uint64_t hash_fields; /* Fields that participate in the hash. */ - uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */ - unsigned int flow_priority; /* Flow priority to use. */ - unsigned int ip_version; /* Internet protocol. */ +/* Pattern outer Layer bits. */ +#define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0) +#define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1) +#define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2) +#define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3) +#define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4) +#define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5) + +/* Pattern inner Layer bits. */ +#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6) +#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7) +#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8) +#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9) +#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10) +#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11) + +/* Pattern tunnel Layer bits. */ +#define MLX5_FLOW_LAYER_VXLAN (1u << 12) +#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13) +#define MLX5_FLOW_LAYER_GRE (1u << 14) +#define MLX5_FLOW_LAYER_MPLS (1u << 15) + +/* Outer Masks. */ +#define MLX5_FLOW_LAYER_OUTER_L3 \ + (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) +#define MLX5_FLOW_LAYER_OUTER_L4 \ + (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP) +#define MLX5_FLOW_LAYER_OUTER \ + (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \ + MLX5_FLOW_LAYER_OUTER_L4) + +/* Tunnel Masks. */ +#define MLX5_FLOW_LAYER_TUNNEL \ + (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \ + MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS) + +/* Inner Masks. */ +#define MLX5_FLOW_LAYER_INNER_L3 \ + (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6) +#define MLX5_FLOW_LAYER_INNER_L4 \ + (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP) +#define MLX5_FLOW_LAYER_INNER \ + (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \ + MLX5_FLOW_LAYER_INNER_L4) + +/* Actions that modify the fate of matching traffic. */ +#define MLX5_FLOW_FATE_DROP (1u << 0) +#define MLX5_FLOW_FATE_QUEUE (1u << 1) +#define MLX5_FLOW_FATE_RSS (1u << 2) + +/* Modify a packet. */ +#define MLX5_FLOW_MOD_FLAG (1u << 0) +#define MLX5_FLOW_MOD_MARK (1u << 1) +#define MLX5_FLOW_MOD_COUNT (1u << 2) + +/* possible L3 layers protocols filtering. */ +#define MLX5_IP_PROTOCOL_TCP 6 +#define MLX5_IP_PROTOCOL_UDP 17 +#define MLX5_IP_PROTOCOL_GRE 47 +#define MLX5_IP_PROTOCOL_MPLS 147 + +/* Priority reserved for default flows. */ +#define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1) + +enum mlx5_expansion { + MLX5_EXPANSION_ROOT, + MLX5_EXPANSION_ROOT_OUTER, + MLX5_EXPANSION_ROOT_ETH_VLAN, + MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN, + MLX5_EXPANSION_OUTER_ETH, + MLX5_EXPANSION_OUTER_ETH_VLAN, + MLX5_EXPANSION_OUTER_VLAN, + MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV4_UDP, + MLX5_EXPANSION_OUTER_IPV4_TCP, + MLX5_EXPANSION_OUTER_IPV6, + MLX5_EXPANSION_OUTER_IPV6_UDP, + MLX5_EXPANSION_OUTER_IPV6_TCP, + MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_VXLAN_GPE, + MLX5_EXPANSION_GRE, + MLX5_EXPANSION_MPLS, + MLX5_EXPANSION_ETH, + MLX5_EXPANSION_ETH_VLAN, + MLX5_EXPANSION_VLAN, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV4_UDP, + MLX5_EXPANSION_IPV4_TCP, + MLX5_EXPANSION_IPV6, + MLX5_EXPANSION_IPV6_UDP, + MLX5_EXPANSION_IPV6_TCP, }; -/* Initialization data for hash RX queues. */ -const struct hash_rxq_init hash_rxq_init[] = { - [HASH_RXQ_TCPV4] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV4 | - IBV_RX_HASH_DST_IPV4 | - IBV_RX_HASH_SRC_PORT_TCP | - IBV_RX_HASH_DST_PORT_TCP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP, - .flow_priority = 0, - .ip_version = MLX5_IPV4, +/** Supported expansion of items. */ +static const struct rte_flow_expand_node mlx5_support_expansion[] = { + [MLX5_EXPANSION_ROOT] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_END, }, - [HASH_RXQ_UDPV4] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV4 | - IBV_RX_HASH_DST_IPV4 | - IBV_RX_HASH_SRC_PORT_UDP | - IBV_RX_HASH_DST_PORT_UDP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP, - .flow_priority = 0, - .ip_version = MLX5_IPV4, + [MLX5_EXPANSION_ROOT_OUTER] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, + MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6), + .type = RTE_FLOW_ITEM_TYPE_END, }, - [HASH_RXQ_IPV4] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV4 | - IBV_RX_HASH_DST_IPV4), - .dpdk_rss_hf = (ETH_RSS_IPV4 | - ETH_RSS_FRAG_IPV4), - .flow_priority = 1, - .ip_version = MLX5_IPV4, + [MLX5_EXPANSION_ROOT_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), + .type = RTE_FLOW_ITEM_TYPE_END, }, - [HASH_RXQ_TCPV6] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV6 | - IBV_RX_HASH_DST_IPV6 | - IBV_RX_HASH_SRC_PORT_TCP | - IBV_RX_HASH_DST_PORT_TCP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP, - .flow_priority = 0, - .ip_version = MLX5_IPV6, + [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN), + .type = RTE_FLOW_ITEM_TYPE_END, }, - [HASH_RXQ_UDPV6] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV6 | - IBV_RX_HASH_DST_IPV6 | - IBV_RX_HASH_SRC_PORT_UDP | - IBV_RX_HASH_DST_PORT_UDP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP, - .flow_priority = 0, - .ip_version = MLX5_IPV6, + [MLX5_EXPANSION_OUTER_ETH] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6, + MLX5_EXPANSION_MPLS), + .type = RTE_FLOW_ITEM_TYPE_ETH, + .rss_types = 0, }, - [HASH_RXQ_IPV6] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV6 | - IBV_RX_HASH_DST_IPV6), - .dpdk_rss_hf = (ETH_RSS_IPV6 | - ETH_RSS_FRAG_IPV6), - .flow_priority = 1, - .ip_version = MLX5_IPV6, + [MLX5_EXPANSION_OUTER_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), + .type = RTE_FLOW_ITEM_TYPE_ETH, + .rss_types = 0, }, - [HASH_RXQ_ETH] = { - .hash_fields = 0, - .dpdk_rss_hf = 0, - .flow_priority = 2, + [MLX5_EXPANSION_OUTER_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6), + .type = RTE_FLOW_ITEM_TYPE_VLAN, }, -}; - -/* Number of entries in hash_rxq_init[]. */ -const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init); - -/** Structure for holding counter stats. */ -struct mlx5_flow_counter_stats { - uint64_t hits; /**< Number of packets matched by the rule. */ - uint64_t bytes; /**< Number of bytes matched by the rule. */ -}; - -/** Structure for Drop queue. */ -struct mlx5_hrxq_drop { - struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */ - struct ibv_qp *qp; /**< Verbs queue pair. */ - struct ibv_wq *wq; /**< Verbs work queue. */ - struct ibv_cq *cq; /**< Verbs completion queue. */ -}; - -/* Flows structures. */ -struct mlx5_flow { - uint64_t hash_fields; /**< Fields that participate in the hash. */ - struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */ - struct ibv_flow *ibv_flow; /**< Verbs flow. */ - struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */ -}; - -/* Drop flows structures. */ -struct mlx5_flow_drop { - struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */ - struct ibv_flow *ibv_flow; /**< Verbs flow. */ -}; - -struct rte_flow { - TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ - uint32_t mark:1; /**< Set if the flow is marked. */ - uint32_t drop:1; /**< Drop queue. */ - uint16_t queues_n; /**< Number of entries in queue[]. */ - uint16_t (*queues)[]; /**< Queues indexes to use. */ - struct rte_eth_rss_conf rss_conf; /**< RSS configuration */ - uint8_t rss_key[40]; /**< copy of the RSS key. */ - struct ibv_counter_set *cs; /**< Holds the counters for the rule. */ - struct mlx5_flow_counter_stats counter_stats;/**mask is not provided. When - * \default_mask is also NULL, the full supported bit-mask (\mask) is - * used instead. - */ - const void *default_mask; - /** Bit-masks size in bytes. */ - const unsigned int mask_sz; - /** - * Conversion function from rte_flow to NIC specific flow. - * - * @param item - * rte_flow item to convert. - * @param default_mask - * Default bit-masks to use when item->mask is not provided. - * @param data - * Internal structure to store the conversion. - * - * @return - * 0 on success, negative value otherwise. - */ - int (*convert)(const struct rte_flow_item *item, - const void *default_mask, - void *data); - /** Size in bytes of the destination structure. */ - const unsigned int dst_sz; - /** List of possible following items. */ - const enum rte_flow_item_type *const items; -}; - -/** Valid action for this PMD. */ -static const enum rte_flow_action_type valid_actions[] = { - RTE_FLOW_ACTION_TYPE_DROP, - RTE_FLOW_ACTION_TYPE_QUEUE, - RTE_FLOW_ACTION_TYPE_MARK, - RTE_FLOW_ACTION_TYPE_FLAG, -#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT - RTE_FLOW_ACTION_TYPE_COUNT, -#endif - RTE_FLOW_ACTION_TYPE_END, -}; - -/** Graph of supported items and associated actions. */ -static const struct mlx5_flow_items mlx5_flow_items[] = { - [RTE_FLOW_ITEM_TYPE_END] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_VXLAN), + [MLX5_EXPANSION_OUTER_IPV4] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT + (MLX5_EXPANSION_OUTER_IPV4_UDP, + MLX5_EXPANSION_OUTER_IPV4_TCP, + MLX5_EXPANSION_GRE), + .type = RTE_FLOW_ITEM_TYPE_IPV4, + .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_OTHER, }, - [RTE_FLOW_ITEM_TYPE_ETH] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_IPV6), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_eth){ - .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", - .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", - .type = -1, - }, - .default_mask = &rte_flow_item_eth_mask, - .mask_sz = sizeof(struct rte_flow_item_eth), - .convert = mlx5_flow_create_eth, - .dst_sz = sizeof(struct ibv_flow_spec_eth), + [MLX5_EXPANSION_OUTER_IPV4_UDP] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_VXLAN_GPE), + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, }, - [RTE_FLOW_ITEM_TYPE_VLAN] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_IPV6), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_vlan){ - .tci = -1, - }, - .default_mask = &rte_flow_item_vlan_mask, - .mask_sz = sizeof(struct rte_flow_item_vlan), - .convert = mlx5_flow_create_vlan, - .dst_sz = 0, + [MLX5_EXPANSION_OUTER_IPV4_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, }, - [RTE_FLOW_ITEM_TYPE_IPV4] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_TCP), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_ipv4){ - .hdr = { - .src_addr = -1, - .dst_addr = -1, - .type_of_service = -1, - .next_proto_id = -1, - }, - }, - .default_mask = &rte_flow_item_ipv4_mask, - .mask_sz = sizeof(struct rte_flow_item_ipv4), - .convert = mlx5_flow_create_ipv4, - .dst_sz = sizeof(struct ibv_flow_spec_ipv4_ext), + [MLX5_EXPANSION_OUTER_IPV6] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT + (MLX5_EXPANSION_OUTER_IPV6_UDP, + MLX5_EXPANSION_OUTER_IPV6_TCP), + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | + ETH_RSS_NONFRAG_IPV6_OTHER, }, - [RTE_FLOW_ITEM_TYPE_IPV6] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_TCP), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_ipv6){ - .hdr = { - .src_addr = { - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - }, - .dst_addr = { - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - }, - .vtc_flow = -1, - .proto = -1, - .hop_limits = -1, - }, - }, - .default_mask = &rte_flow_item_ipv6_mask, - .mask_sz = sizeof(struct rte_flow_item_ipv6), - .convert = mlx5_flow_create_ipv6, - .dst_sz = sizeof(struct ibv_flow_spec_ipv6), + [MLX5_EXPANSION_OUTER_IPV6_UDP] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_VXLAN_GPE), + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, }, - [RTE_FLOW_ITEM_TYPE_UDP] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_udp){ - .hdr = { - .src_port = -1, - .dst_port = -1, - }, - }, - .default_mask = &rte_flow_item_udp_mask, - .mask_sz = sizeof(struct rte_flow_item_udp), - .convert = mlx5_flow_create_udp, - .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp), + [MLX5_EXPANSION_OUTER_IPV6_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, }, - [RTE_FLOW_ITEM_TYPE_TCP] = { - .actions = valid_actions, - .mask = &(const struct rte_flow_item_tcp){ - .hdr = { - .src_port = -1, - .dst_port = -1, - }, - }, - .default_mask = &rte_flow_item_tcp_mask, - .mask_sz = sizeof(struct rte_flow_item_tcp), - .convert = mlx5_flow_create_tcp, - .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp), + [MLX5_EXPANSION_VXLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), + .type = RTE_FLOW_ITEM_TYPE_VXLAN, }, - [RTE_FLOW_ITEM_TYPE_VXLAN] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_vxlan){ - .vni = "\xff\xff\xff", - }, - .default_mask = &rte_flow_item_vxlan_mask, - .mask_sz = sizeof(struct rte_flow_item_vxlan), - .convert = mlx5_flow_create_vxlan, - .dst_sz = sizeof(struct ibv_flow_spec_tunnel), + [MLX5_EXPANSION_VXLAN_GPE] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, + }, + [MLX5_EXPANSION_GRE] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), + .type = RTE_FLOW_ITEM_TYPE_GRE, + }, + [MLX5_EXPANSION_MPLS] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_MPLS, + }, + [MLX5_EXPANSION_ETH] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_ETH, + }, + [MLX5_EXPANSION_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), + .type = RTE_FLOW_ITEM_TYPE_ETH, + }, + [MLX5_EXPANSION_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_VLAN, + }, + [MLX5_EXPANSION_IPV4] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, + MLX5_EXPANSION_IPV4_TCP), + .type = RTE_FLOW_ITEM_TYPE_IPV4, + .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_OTHER, + }, + [MLX5_EXPANSION_IPV4_UDP] = { + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, + }, + [MLX5_EXPANSION_IPV4_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, + }, + [MLX5_EXPANSION_IPV6] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, + MLX5_EXPANSION_IPV6_TCP), + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | + ETH_RSS_NONFRAG_IPV6_OTHER, + }, + [MLX5_EXPANSION_IPV6_UDP] = { + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, + }, + [MLX5_EXPANSION_IPV6_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, }, }; -/** Structure to pass to the conversion function. */ -struct mlx5_flow_parse { - uint32_t inner; /**< Set once VXLAN is encountered. */ - uint32_t create:1; - /**< Whether resources should remain after a validate. */ - uint32_t drop:1; /**< Target is a drop queue. */ - uint32_t mark:1; /**< Mark is present in the flow. */ - uint32_t count:1; /**< Count is present in the flow. */ - uint32_t mark_id; /**< Mark identifier. */ - uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */ - uint16_t queues_n; /**< Number of entries in queue[]. */ - struct rte_eth_rss_conf rss_conf; /**< RSS configuration */ - uint8_t rss_key[40]; /**< copy of the RSS key. */ - enum hash_rxq_type layer; /**< Last pattern layer detected. */ - struct ibv_counter_set *cs; /**< Holds the counter set for the rule */ +/** Handles information leading to a drop fate. */ +struct mlx5_flow_verbs { + LIST_ENTRY(mlx5_flow_verbs) next; + unsigned int size; /**< Size of the attribute. */ struct { - struct ibv_flow_attr *ibv_attr; - /**< Pointer to Verbs attributes. */ - unsigned int offset; - /**< Current position or total size of the attribute. */ - } queue[RTE_DIM(hash_rxq_init)]; + struct ibv_flow_attr *attr; + /**< Pointer to the Specification buffer. */ + uint8_t *specs; /**< Pointer to the specifications. */ + }; + struct ibv_flow *flow; /**< Verbs flow pointer. */ + struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */ + uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */ +}; + +/* Counters information. */ +struct mlx5_flow_counter { + LIST_ENTRY(mlx5_flow_counter) next; /**< Pointer to the next counter. */ + uint32_t shared:1; /**< Share counter ID with other flow rules. */ + uint32_t ref_cnt:31; /**< Reference counter. */ + uint32_t id; /**< Counter ID. */ + struct ibv_counter_set *cs; /**< Holds the counters for the rule. */ + uint64_t hits; /**< Number of packets matched by the rule. */ + uint64_t bytes; /**< Number of bytes matched by the rule. */ +}; + +/* Flow structure. */ +struct rte_flow { + TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ + struct rte_flow_attr attributes; /**< User flow attribute. */ + uint32_t l3_protocol_en:1; /**< Protocol filtering requested. */ + uint32_t layers; + /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */ + uint32_t modifier; + /**< Bit-fields of present modifier see MLX5_FLOW_MOD_*. */ + uint32_t fate; + /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */ + uint8_t l3_protocol; /**< valid when l3_protocol_en is set. */ + LIST_HEAD(verbs, mlx5_flow_verbs) verbs; /**< Verbs flows list. */ + struct mlx5_flow_verbs *cur_verbs; + /**< Current Verbs flow structure being filled. */ + struct mlx5_flow_counter *counter; /**< Holds Verbs flow counter. */ + struct rte_flow_action_rss rss;/**< RSS context. */ + uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ + uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */ + void *nl_flow; /**< Netlink flow buffer if relevant. */ }; static const struct rte_flow_ops mlx5_flow_ops = { @@ -416,12 +322,8 @@ static const struct rte_flow_ops mlx5_flow_ops = { .create = mlx5_flow_create, .destroy = mlx5_flow_destroy, .flush = mlx5_flow_flush, -#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT - .query = mlx5_flow_query, -#else - .query = NULL, -#endif .isolate = mlx5_flow_isolate, + .query = mlx5_flow_query, }; /* Convert FDIR request to Generic flow. */ @@ -435,10 +337,18 @@ struct mlx5_fdir { struct rte_flow_item_ipv4 ipv4; struct rte_flow_item_ipv6 ipv6; } l3; + union { + struct rte_flow_item_ipv4 ipv4; + struct rte_flow_item_ipv6 ipv6; + } l3_mask; union { struct rte_flow_item_udp udp; struct rte_flow_item_tcp tcp; } l4; + union { + struct rte_flow_item_udp udp; + struct rte_flow_item_tcp tcp; + } l4_mask; struct rte_flow_action_queue queue; }; @@ -448,778 +358,459 @@ struct ibv_spec_header { uint16_t size; }; +/* + * Number of sub priorities. + * For each kind of pattern matching i.e. L2, L3, L4 to have a correct + * matching on the NIC (firmware dependent) L4 most have the higher priority + * followed by L3 and ending with L2. + */ +#define MLX5_PRIORITY_MAP_L2 2 +#define MLX5_PRIORITY_MAP_L3 1 +#define MLX5_PRIORITY_MAP_L4 0 +#define MLX5_PRIORITY_MAP_MAX 3 + +/* Map of Verbs to Flow priority with 8 Verbs priorities. */ +static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { + { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, +}; + +/* Map of Verbs to Flow priority with 16 Verbs priorities. */ +static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { + { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, + { 9, 10, 11 }, { 12, 13, 14 }, +}; + +/* Tunnel information. */ +struct mlx5_flow_tunnel_info { + uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ + uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ +}; + +static struct mlx5_flow_tunnel_info tunnels_info[] = { + { + .tunnel = MLX5_FLOW_LAYER_VXLAN, + .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, + }, + { + .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, + .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, + }, + { + .tunnel = MLX5_FLOW_LAYER_GRE, + .ptype = RTE_PTYPE_TUNNEL_GRE, + }, + { + .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, + .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP, + }, + { + .tunnel = MLX5_FLOW_LAYER_MPLS, + .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, + }, +}; + /** - * Check support for a given item. + * Discover the maximum number of priority available. * - * @param item[in] - * Item specification. - * @param mask[in] - * Bit-masks covering supported fields to compare with spec, last and mask in - * \item. - * @param size - * Bit-Mask size in bytes. + * @param[in] dev + * Pointer to Ethernet device. * * @return - * 0 on success. + * number of supported flow priority on success, a negative errno + * value otherwise and rte_errno is set. */ -static int -mlx5_flow_item_validate(const struct rte_flow_item *item, - const uint8_t *mask, unsigned int size) +int +mlx5_flow_discover_priorities(struct rte_eth_dev *dev) { - int ret = 0; - - if (!item->spec && (item->mask || item->last)) - return -1; - if (item->spec && !item->mask) { - unsigned int i; - const uint8_t *spec = item->spec; - - for (i = 0; i < size; ++i) - if ((spec[i] | mask[i]) != mask[i]) - return -1; - } - if (item->last && !item->mask) { - unsigned int i; - const uint8_t *spec = item->last; - - for (i = 0; i < size; ++i) - if ((spec[i] | mask[i]) != mask[i]) - return -1; + struct { + struct ibv_flow_attr attr; + struct ibv_flow_spec_eth eth; + struct ibv_flow_spec_action_drop drop; + } flow_attr = { + .attr = { + .num_of_specs = 2, + }, + .eth = { + .type = IBV_FLOW_SPEC_ETH, + .size = sizeof(struct ibv_flow_spec_eth), + }, + .drop = { + .size = sizeof(struct ibv_flow_spec_action_drop), + .type = IBV_FLOW_SPEC_ACTION_DROP, + }, + }; + struct ibv_flow *flow; + struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev); + uint16_t vprio[] = { 8, 16 }; + int i; + int priority = 0; + + if (!drop) { + rte_errno = ENOTSUP; + return -rte_errno; } - if (item->mask) { - unsigned int i; - const uint8_t *spec = item->spec; - - for (i = 0; i < size; ++i) - if ((spec[i] | mask[i]) != mask[i]) - return -1; + for (i = 0; i != RTE_DIM(vprio); i++) { + flow_attr.attr.priority = vprio[i] - 1; + flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); + if (!flow) + break; + claim_zero(mlx5_glue->destroy_flow(flow)); + priority = vprio[i]; } - if (item->spec && item->last) { - uint8_t spec[size]; - uint8_t last[size]; - const uint8_t *apply = mask; - unsigned int i; - - if (item->mask) - apply = item->mask; - for (i = 0; i < size; ++i) { - spec[i] = ((const uint8_t *)item->spec)[i] & apply[i]; - last[i] = ((const uint8_t *)item->last)[i] & apply[i]; - } - ret = memcmp(spec, last, size); + switch (priority) { + case 8: + priority = RTE_DIM(priority_map_3); + break; + case 16: + priority = RTE_DIM(priority_map_5); + break; + default: + rte_errno = ENOTSUP; + DRV_LOG(ERR, + "port %u verbs maximum priority: %d expected 8/16", + dev->data->port_id, vprio[i]); + return -rte_errno; } - return ret; + mlx5_hrxq_drop_release(dev); + DRV_LOG(INFO, "port %u flow maximum priority: %d", + dev->data->port_id, priority); + return priority; } /** - * Copy the RSS configuration from the user ones, of the rss_conf is null, - * uses the driver one. - * - * @param priv - * Pointer to private structure. - * @param parser - * Internal parser structure. - * @param rss_conf - * User RSS configuration to save. + * Adjust flow priority. * - * @return - * 0 on success, errno value on failure. + * @param dev + * Pointer to Ethernet device. + * @param flow + * Pointer to an rte flow. */ -static int -priv_flow_convert_rss_conf(struct priv *priv, - struct mlx5_flow_parse *parser, - const struct rte_eth_rss_conf *rss_conf) +static void +mlx5_flow_adjust_priority(struct rte_eth_dev *dev, struct rte_flow *flow) { - /* - * This function is also called at the beginning of - * priv_flow_convert_actions() to initialize the parser with the - * device default RSS configuration. - */ - (void)priv; - if (rss_conf) { - if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) - return EINVAL; - if (rss_conf->rss_key_len != 40) - return EINVAL; - if (rss_conf->rss_key_len && rss_conf->rss_key) { - parser->rss_conf.rss_key_len = rss_conf->rss_key_len; - memcpy(parser->rss_key, rss_conf->rss_key, - rss_conf->rss_key_len); - parser->rss_conf.rss_key = parser->rss_key; - } - parser->rss_conf.rss_hf = rss_conf->rss_hf; + struct priv *priv = dev->data->dev_private; + uint32_t priority = flow->attributes.priority; + uint32_t subpriority = flow->cur_verbs->attr->priority; + + switch (priv->config.flow_prio) { + case RTE_DIM(priority_map_3): + priority = priority_map_3[priority][subpriority]; + break; + case RTE_DIM(priority_map_5): + priority = priority_map_5[priority][subpriority]; + break; } - return 0; + flow->cur_verbs->attr->priority = priority; } /** - * Extract attribute to the parser. + * Get a flow counter. * - * @param priv - * Pointer to private structure. - * @param[in] attr - * Flow rule attributes. - * @param[out] error - * Perform verbose error reporting if not NULL. - * @param[in, out] parser - * Internal parser structure. + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] shared + * Indicate if this counter is shared with other flows. + * @param[in] id + * Counter identifier. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * A pointer to the counter, NULL otherwise and rte_errno is set. */ -static int -priv_flow_convert_attributes(struct priv *priv, - const struct rte_flow_attr *attr, - struct rte_flow_error *error, - struct mlx5_flow_parse *parser) +static struct mlx5_flow_counter * +mlx5_flow_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id) { - (void)priv; - (void)parser; - if (attr->group) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_GROUP, - NULL, - "groups are not supported"); - return -rte_errno; + struct priv *priv = dev->data->dev_private; + struct mlx5_flow_counter *cnt; + + LIST_FOREACH(cnt, &priv->flow_counters, next) { + if (!cnt->shared || cnt->shared != shared) + continue; + if (cnt->id != id) + continue; + cnt->ref_cnt++; + return cnt; } - if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, - NULL, - "priorities are not supported"); - return -rte_errno; +#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT + + struct mlx5_flow_counter tmpl = { + .shared = shared, + .id = id, + .cs = mlx5_glue->create_counter_set + (priv->ctx, + &(struct ibv_counter_set_init_attr){ + .counter_set_id = id, + }), + .hits = 0, + .bytes = 0, + }; + + if (!tmpl.cs) { + rte_errno = errno; + return NULL; } - if (attr->egress) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, - NULL, - "egress is not supported"); - return -rte_errno; + cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0); + if (!cnt) { + rte_errno = ENOMEM; + return NULL; } - if (!attr->ingress) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, - NULL, - "only ingress is supported"); - return -rte_errno; + *cnt = tmpl; + LIST_INSERT_HEAD(&priv->flow_counters, cnt, next); + return cnt; +#endif + rte_errno = ENOTSUP; + return NULL; +} + +/** + * Release a flow counter. + * + * @param[in] counter + * Pointer to the counter handler. + */ +static void +mlx5_flow_counter_release(struct mlx5_flow_counter *counter) +{ + if (--counter->ref_cnt == 0) { + claim_zero(mlx5_glue->destroy_counter_set(counter->cs)); + LIST_REMOVE(counter, next); + rte_free(counter); } - return 0; } /** - * Extract actions request to the parser. + * Verify the @p attributes will be correctly understood by the NIC and store + * them in the @p flow if everything is correct. * - * @param priv - * Pointer to private structure. - * @param[in] actions - * Associated actions (list terminated by the END action). + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] attributes + * Pointer to flow attributes + * @param[in, out] flow + * Pointer to the rte_flow structure. * @param[out] error - * Perform verbose error reporting if not NULL. - * @param[in, out] parser - * Internal parser structure. + * Pointer to error structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_actions(struct priv *priv, - const struct rte_flow_action actions[], - struct rte_flow_error *error, - struct mlx5_flow_parse *parser) +mlx5_flow_attributes(struct rte_eth_dev *dev, + const struct rte_flow_attr *attributes, + struct rte_flow *flow, + struct rte_flow_error *error) { - /* - * Add default RSS configuration necessary for Verbs to create QP even - * if no RSS is necessary. - */ - priv_flow_convert_rss_conf(priv, parser, - (const struct rte_eth_rss_conf *) - &priv->rss_conf); - for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) { - if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) { - continue; - } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) { - parser->drop = 1; - } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) { - const struct rte_flow_action_queue *queue = - (const struct rte_flow_action_queue *) - actions->conf; - uint16_t n; - uint16_t found = 0; - - if (!queue || (queue->index > (priv->rxqs_n - 1))) - goto exit_action_not_supported; - for (n = 0; n < parser->queues_n; ++n) { - if (parser->queues[n] == queue->index) { - found = 1; - break; - } - } - if (parser->queues_n > 1 && !found) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "queue action not in RSS queues"); - return -rte_errno; - } - if (!found) { - parser->queues_n = 1; - parser->queues[0] = queue->index; - } - } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) { - const struct rte_flow_action_rss *rss = - (const struct rte_flow_action_rss *) - actions->conf; - uint16_t n; - - if (!rss || !rss->num) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "no valid queues"); - return -rte_errno; - } - if (parser->queues_n == 1) { - uint16_t found = 0; - - assert(parser->queues_n); - for (n = 0; n < rss->num; ++n) { - if (parser->queues[0] == - rss->queue[n]) { - found = 1; - break; - } - } - if (!found) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "queue action not in RSS" - " queues"); - return -rte_errno; - } - } - for (n = 0; n < rss->num; ++n) { - if (rss->queue[n] >= priv->rxqs_n) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "queue id > number of" - " queues"); - return -rte_errno; - } - } - for (n = 0; n < rss->num; ++n) - parser->queues[n] = rss->queue[n]; - parser->queues_n = rss->num; - if (priv_flow_convert_rss_conf(priv, parser, - rss->rss_conf)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "wrong RSS configuration"); - return -rte_errno; - } - } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) { - const struct rte_flow_action_mark *mark = - (const struct rte_flow_action_mark *) - actions->conf; - - if (!mark) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "mark must be defined"); - return -rte_errno; - } else if (mark->id >= MLX5_FLOW_MARK_MAX) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "mark must be between 0" - " and 16777199"); - return -rte_errno; - } - parser->mark = 1; - parser->mark_id = mark->id; - } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) { - parser->mark = 1; - } else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT && - priv->config.flow_counter_en) { - parser->count = 1; - } else { - goto exit_action_not_supported; - } - } - if (parser->drop && parser->mark) - parser->mark = 0; - if (!parser->queues_n && !parser->drop) { - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "no valid action"); - return -rte_errno; - } + uint32_t priority_max = + ((struct priv *)dev->data->dev_private)->config.flow_prio - 1; + + if (attributes->group) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, + "groups is not supported"); + if (attributes->priority != MLX5_FLOW_PRIO_RSVD && + attributes->priority >= priority_max) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + NULL, + "priority out of range"); + if (attributes->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + NULL, + "egress is not supported"); + if (attributes->transfer) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, + "transfer is not supported"); + if (!attributes->ingress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + NULL, + "ingress attribute is mandatory"); + flow->attributes = *attributes; + if (attributes->priority == MLX5_FLOW_PRIO_RSVD) + flow->attributes.priority = priority_max; return 0; -exit_action_not_supported: - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, - actions, "action not supported"); - return -rte_errno; } /** - * Validate items. + * Verify the @p item specifications (spec, last, mask) are compatible with the + * NIC capabilities. * - * @param priv - * Pointer to private structure. - * @param[in] items - * Pattern specification (list terminated by the END pattern item). + * @param[in] item + * Item specification. + * @param[in] mask + * @p item->mask or flow default bit-masks. + * @param[in] nic_mask + * Bit-masks covering supported fields by the NIC to compare with user mask. + * @param[in] size + * Bit-masks size in bytes. * @param[out] error - * Perform verbose error reporting if not NULL. - * @param[in, out] parser - * Internal parser structure. + * Pointer to error structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_items_validate(struct priv *priv, - const struct rte_flow_item items[], - struct rte_flow_error *error, - struct mlx5_flow_parse *parser) +mlx5_flow_item_acceptable(const struct rte_flow_item *item, + const uint8_t *mask, + const uint8_t *nic_mask, + unsigned int size, + struct rte_flow_error *error) { - const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; - (void)priv; - /* Initialise the offsets to start after verbs attribute. */ - for (i = 0; i != hash_rxq_init_n; ++i) - parser->queue[i].offset = sizeof(struct ibv_flow_attr); - for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { - const struct mlx5_flow_items *token = NULL; - unsigned int n; - int err; + assert(nic_mask); + for (i = 0; i < size; ++i) + if ((nic_mask[i] | mask[i]) != nic_mask[i]) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "mask enables non supported" + " bits"); + if (!item->spec && (item->mask || item->last)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "mask/last without a spec is not" + " supported"); + if (item->spec && item->last) { + uint8_t spec[size]; + uint8_t last[size]; + unsigned int i; + int ret; - if (items->type == RTE_FLOW_ITEM_TYPE_VOID) - continue; - for (i = 0; - cur_item->items && - cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END; - ++i) { - if (cur_item->items[i] == items->type) { - token = &mlx5_flow_items[items->type]; - break; - } - } - if (!token) - goto exit_item_not_supported; - cur_item = token; - err = mlx5_flow_item_validate(items, - (const uint8_t *)cur_item->mask, - cur_item->mask_sz); - if (err) - goto exit_item_not_supported; - if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) { - if (parser->inner) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - items, - "cannot recognize multiple" - " VXLAN encapsulations"); - return -rte_errno; - } - parser->inner = IBV_FLOW_SPEC_INNER; - } - if (parser->drop) { - parser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz; - } else { - for (n = 0; n != hash_rxq_init_n; ++n) - parser->queue[n].offset += cur_item->dst_sz; + for (i = 0; i < size; ++i) { + spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; + last[i] = ((const uint8_t *)item->last)[i] & mask[i]; } - } - if (parser->drop) { - parser->queue[HASH_RXQ_ETH].offset += - sizeof(struct ibv_flow_spec_action_drop); - } - if (parser->mark) { - for (i = 0; i != hash_rxq_init_n; ++i) - parser->queue[i].offset += - sizeof(struct ibv_flow_spec_action_tag); - } - if (parser->count) { - unsigned int size = sizeof(struct ibv_flow_spec_counter_action); - - for (i = 0; i != hash_rxq_init_n; ++i) - parser->queue[i].offset += size; + ret = memcmp(spec, last, size); + if (ret != 0) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "range is not supported"); } return 0; -exit_item_not_supported: - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, - items, "item not supported"); - return -rte_errno; } /** - * Allocate memory space to store verbs flow attributes. + * Add a verbs item specification into @p flow. * - * @param priv - * Pointer to private structure. - * @param[in] priority - * Flow priority. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] src + * Create specification. * @param[in] size - * Amount of byte to allocate. - * @param[out] error - * Perform verbose error reporting if not NULL. - * - * @return - * A verbs flow attribute on success, NULL otherwise. + * Size in bytes of the specification to copy. */ -static struct ibv_flow_attr* -priv_flow_convert_allocate(struct priv *priv, - unsigned int priority, - unsigned int size, - struct rte_flow_error *error) +static void +mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size) { - struct ibv_flow_attr *ibv_attr; + struct mlx5_flow_verbs *verbs = flow->cur_verbs; - (void)priv; - ibv_attr = rte_calloc(__func__, 1, size, 0); - if (!ibv_attr) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "cannot allocate verbs spec attributes."); - return NULL; - } - ibv_attr->priority = priority; - return ibv_attr; -} - -/** - * Finalise verbs flow attributes. - * - * @param priv - * Pointer to private structure. - * @param[in, out] parser - * Internal parser structure. - */ -static void -priv_flow_convert_finalise(struct priv *priv, struct mlx5_flow_parse *parser) -{ - const unsigned int ipv4 = - hash_rxq_init[parser->layer].ip_version == MLX5_IPV4; - const enum hash_rxq_type hmin = ipv4 ? HASH_RXQ_TCPV4 : HASH_RXQ_TCPV6; - const enum hash_rxq_type hmax = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6; - const enum hash_rxq_type ohmin = ipv4 ? HASH_RXQ_TCPV6 : HASH_RXQ_TCPV4; - const enum hash_rxq_type ohmax = ipv4 ? HASH_RXQ_IPV6 : HASH_RXQ_IPV4; - const enum hash_rxq_type ip = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6; - unsigned int i; - - (void)priv; - if (parser->layer == HASH_RXQ_ETH) { - goto fill; - } else { - /* - * This layer becomes useless as the pattern define under - * layers. - */ - rte_free(parser->queue[HASH_RXQ_ETH].ibv_attr); - parser->queue[HASH_RXQ_ETH].ibv_attr = NULL; - } - /* Remove opposite kind of layer e.g. IPv6 if the pattern is IPv4. */ - for (i = ohmin; i != (ohmax + 1); ++i) { - if (!parser->queue[i].ibv_attr) - continue; - rte_free(parser->queue[i].ibv_attr); - parser->queue[i].ibv_attr = NULL; - } - /* Remove impossible flow according to the RSS configuration. */ - if (hash_rxq_init[parser->layer].dpdk_rss_hf & - parser->rss_conf.rss_hf) { - /* Remove any other flow. */ - for (i = hmin; i != (hmax + 1); ++i) { - if ((i == parser->layer) || - (!parser->queue[i].ibv_attr)) - continue; - rte_free(parser->queue[i].ibv_attr); - parser->queue[i].ibv_attr = NULL; - } - } else if (!parser->queue[ip].ibv_attr) { - /* no RSS possible with the current configuration. */ - parser->queues_n = 1; - return; - } -fill: - /* - * Fill missing layers in verbs specifications, or compute the correct - * offset to allocate the memory space for the attributes and - * specifications. - */ - for (i = 0; i != hash_rxq_init_n - 1; ++i) { - union { - struct ibv_flow_spec_ipv4_ext ipv4; - struct ibv_flow_spec_ipv6 ipv6; - struct ibv_flow_spec_tcp_udp udp_tcp; - } specs; + if (verbs->specs) { void *dst; - uint16_t size; - if (i == parser->layer) - continue; - if (parser->layer == HASH_RXQ_ETH) { - if (hash_rxq_init[i].ip_version == MLX5_IPV4) { - size = sizeof(struct ibv_flow_spec_ipv4_ext); - specs.ipv4 = (struct ibv_flow_spec_ipv4_ext){ - .type = IBV_FLOW_SPEC_IPV4_EXT, - .size = size, - }; - } else { - size = sizeof(struct ibv_flow_spec_ipv6); - specs.ipv6 = (struct ibv_flow_spec_ipv6){ - .type = IBV_FLOW_SPEC_IPV6, - .size = size, - }; - } - if (parser->queue[i].ibv_attr) { - dst = (void *)((uintptr_t) - parser->queue[i].ibv_attr + - parser->queue[i].offset); - memcpy(dst, &specs, size); - ++parser->queue[i].ibv_attr->num_of_specs; - } - parser->queue[i].offset += size; - } - if ((i == HASH_RXQ_UDPV4) || (i == HASH_RXQ_TCPV4) || - (i == HASH_RXQ_UDPV6) || (i == HASH_RXQ_TCPV6)) { - size = sizeof(struct ibv_flow_spec_tcp_udp); - specs.udp_tcp = (struct ibv_flow_spec_tcp_udp) { - .type = ((i == HASH_RXQ_UDPV4 || - i == HASH_RXQ_UDPV6) ? - IBV_FLOW_SPEC_UDP : - IBV_FLOW_SPEC_TCP), - .size = size, - }; - if (parser->queue[i].ibv_attr) { - dst = (void *)((uintptr_t) - parser->queue[i].ibv_attr + - parser->queue[i].offset); - memcpy(dst, &specs, size); - ++parser->queue[i].ibv_attr->num_of_specs; - } - parser->queue[i].offset += size; - } + dst = (void *)(verbs->specs + verbs->size); + memcpy(dst, src, size); + ++verbs->attr->num_of_specs; } + verbs->size += size; } /** - * Validate and convert a flow supported by the NIC. - * - * @param priv - * Pointer to private structure. - * @param[in] attr - * Flow rule attributes. - * @param[in] pattern - * Pattern specification (list terminated by the END pattern item). - * @param[in] actions - * Associated actions (list terminated by the END action). - * @param[out] error - * Perform verbose error reporting if not NULL. - * @param[in, out] parser - * Internal parser structure. - * - * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. - */ -static int -priv_flow_convert(struct priv *priv, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error, - struct mlx5_flow_parse *parser) -{ - const struct mlx5_flow_items *cur_item = mlx5_flow_items; - unsigned int i; - int ret; - - /* First step. Validate the attributes, items and actions. */ - *parser = (struct mlx5_flow_parse){ - .create = parser->create, - .layer = HASH_RXQ_ETH, - .mark_id = MLX5_FLOW_MARK_DEFAULT, - }; - ret = priv_flow_convert_attributes(priv, attr, error, parser); - if (ret) - return ret; - ret = priv_flow_convert_actions(priv, actions, error, parser); - if (ret) - return ret; - ret = priv_flow_convert_items_validate(priv, items, error, parser); - if (ret) - return ret; - priv_flow_convert_finalise(priv, parser); - /* - * Second step. - * Allocate the memory space to store verbs specifications. - */ - if (parser->drop) { - unsigned int priority = - attr->priority + - hash_rxq_init[HASH_RXQ_ETH].flow_priority; - unsigned int offset = parser->queue[HASH_RXQ_ETH].offset; - - parser->queue[HASH_RXQ_ETH].ibv_attr = - priv_flow_convert_allocate(priv, priority, - offset, error); - if (!parser->queue[HASH_RXQ_ETH].ibv_attr) - return ENOMEM; - parser->queue[HASH_RXQ_ETH].offset = - sizeof(struct ibv_flow_attr); - } else { - for (i = 0; i != hash_rxq_init_n; ++i) { - unsigned int priority = - attr->priority + - hash_rxq_init[i].flow_priority; - unsigned int offset; - - if (!(parser->rss_conf.rss_hf & - hash_rxq_init[i].dpdk_rss_hf) && - (i != HASH_RXQ_ETH)) - continue; - offset = parser->queue[i].offset; - parser->queue[i].ibv_attr = - priv_flow_convert_allocate(priv, priority, - offset, error); - if (!parser->queue[i].ibv_attr) - goto exit_enomem; - parser->queue[i].offset = sizeof(struct ibv_flow_attr); - } - } - /* Third step. Conversion parse, fill the specifications. */ - parser->inner = 0; - for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { - if (items->type == RTE_FLOW_ITEM_TYPE_VOID) - continue; - cur_item = &mlx5_flow_items[items->type]; - ret = cur_item->convert(items, - (cur_item->default_mask ? - cur_item->default_mask : - cur_item->mask), - parser); - if (ret) { - rte_flow_error_set(error, ret, - RTE_FLOW_ERROR_TYPE_ITEM, - items, "item not supported"); - goto exit_free; - } - } - if (parser->mark) - mlx5_flow_create_flag_mark(parser, parser->mark_id); - if (parser->count && parser->create) { - mlx5_flow_create_count(priv, parser); - if (!parser->cs) - goto exit_count_error; - } - /* - * Last step. Complete missing specification to reach the RSS - * configuration. - */ - if (!parser->drop) { - priv_flow_convert_finalise(priv, parser); - } else { - parser->queue[HASH_RXQ_ETH].ibv_attr->priority = - attr->priority + - hash_rxq_init[parser->layer].flow_priority; - } -exit_free: - /* Only verification is expected, all resources should be released. */ - if (!parser->create) { - for (i = 0; i != hash_rxq_init_n; ++i) { - if (parser->queue[i].ibv_attr) { - rte_free(parser->queue[i].ibv_attr); - parser->queue[i].ibv_attr = NULL; - } - } - } - return ret; -exit_enomem: - for (i = 0; i != hash_rxq_init_n; ++i) { - if (parser->queue[i].ibv_attr) { - rte_free(parser->queue[i].ibv_attr); - parser->queue[i].ibv_attr = NULL; - } - } - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot allocate verbs spec attributes."); - return ret; -exit_count_error: - rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create counter."); - return rte_errno; -} - -/** - * Copy the specification created into the flow. - * - * @param parser - * Internal parser structure. - * @param src - * Create specification. - * @param size - * Size in bytes of the specification to copy. + * Adjust verbs hash fields according to the @p flow information. + * + * @param[in, out] flow. + * Pointer to flow structure. + * @param[in] tunnel + * 1 when the hash field is for a tunnel item. + * @param[in] layer_types + * ETH_RSS_* types. + * @param[in] hash_fields + * Item hash fields. */ static void -mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src, - unsigned int size) +mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow, + int tunnel __rte_unused, + uint32_t layer_types, uint64_t hash_fields) { - unsigned int i; - void *dst; - - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!parser->queue[i].ibv_attr) - continue; - /* Specification must be the same l3 type or none. */ - if (parser->layer == HASH_RXQ_ETH || - (hash_rxq_init[parser->layer].ip_version == - hash_rxq_init[i].ip_version) || - (hash_rxq_init[i].ip_version == 0)) { - dst = (void *)((uintptr_t)parser->queue[i].ibv_attr + - parser->queue[i].offset); - memcpy(dst, src, size); - ++parser->queue[i].ibv_attr->num_of_specs; - parser->queue[i].offset += size; - } - } +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0); + if (flow->rss.level == 2 && !tunnel) + hash_fields = 0; + else if (flow->rss.level < 2 && tunnel) + hash_fields = 0; +#endif + if (!(flow->rss.types & layer_types)) + hash_fields = 0; + flow->cur_verbs->hash_fields |= hash_fields; } /** - * Convert Ethernet item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_eth(const struct rte_flow_item *item, - const void *default_mask, - void *data) +mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size, struct rte_flow_error *error) { const struct rte_flow_item_eth *spec = item->spec; const struct rte_flow_item_eth *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; - const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth); + const struct rte_flow_item_eth nic_mask = { + .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .type = RTE_BE16(0xffff), + }; + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + const unsigned int size = sizeof(struct ibv_flow_spec_eth); struct ibv_flow_spec_eth eth = { - .type = parser->inner | IBV_FLOW_SPEC_ETH, - .size = eth_size, + .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0), + .size = size, }; + int ret; - /* Don't update layer for the inner pattern. */ - if (!parser->inner) - parser->layer = HASH_RXQ_ETH; + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L2 layers already configured"); + if (!mask) + mask = &rte_flow_item_eth_mask; + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_eth), + error); + if (ret) + return ret; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; + if (size > flow_size) + return size; if (spec) { unsigned int i; - if (!mask) - mask = default_mask; memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN); memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN); eth.val.ether_type = spec->type; @@ -1233,80 +824,211 @@ mlx5_flow_create_eth(const struct rte_flow_item *item, } eth.val.ether_type &= eth.mask.ether_type; } - mlx5_flow_create_copy(parser, ð, eth_size); - return 0; + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + mlx5_flow_spec_verbs_add(flow, ð, size); + return size; +} + +/** + * Update the VLAN tag in the Verbs Ethernet specification. + * + * @param[in, out] attr + * Pointer to Verbs attributes structure. + * @param[in] eth + * Verbs structure containing the VLAN information to copy. + */ +static void +mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr, + struct ibv_flow_spec_eth *eth) +{ + unsigned int i; + const enum ibv_flow_spec_type search = eth->type; + struct ibv_spec_header *hdr = (struct ibv_spec_header *) + ((uint8_t *)attr + sizeof(struct ibv_flow_attr)); + + for (i = 0; i != attr->num_of_specs; ++i) { + if (hdr->type == search) { + struct ibv_flow_spec_eth *e = + (struct ibv_flow_spec_eth *)hdr; + + e->val.vlan_tag = eth->val.vlan_tag; + e->mask.vlan_tag = eth->mask.vlan_tag; + e->val.ether_type = eth->val.ether_type; + e->mask.ether_type = eth->mask.ether_type; + break; + } + hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size); + } } /** - * Convert VLAN item to Verbs specification. + * Convert the @p item into @p flow (or by updating the already present + * Ethernet Verbs) specification after ensuring the NIC will understand and + * process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_vlan(const struct rte_flow_item *item, - const void *default_mask, - void *data) +mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size, struct rte_flow_error *error) { const struct rte_flow_item_vlan *spec = item->spec; const struct rte_flow_item_vlan *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; - struct ibv_flow_spec_eth *eth; - const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth); - + const struct rte_flow_item_vlan nic_mask = { + .tci = RTE_BE16(0x0fff), + .inner_type = RTE_BE16(0xffff), + }; + unsigned int size = sizeof(struct ibv_flow_spec_eth); + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + struct ibv_flow_spec_eth eth = { + .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0), + .size = size, + }; + int ret; + const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | + MLX5_FLOW_LAYER_INNER_L4) : + (MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4); + const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : + MLX5_FLOW_LAYER_OUTER_VLAN; + const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; + + if (flow->layers & vlanm) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VLAN layer already configured"); + else if ((flow->layers & l34m) != 0) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L2 layer cannot follow L3/L4 layer"); + if (!mask) + mask = &rte_flow_item_vlan_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_vlan), error); + if (ret) + return ret; if (spec) { - unsigned int i; - if (!mask) - mask = default_mask; - - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!parser->queue[i].ibv_attr) - continue; - - eth = (void *)((uintptr_t)parser->queue[i].ibv_attr + - parser->queue[i].offset - eth_size); - eth->val.vlan_tag = spec->tci; - eth->mask.vlan_tag = mask->tci; - eth->val.vlan_tag &= eth->mask.vlan_tag; - } + eth.val.vlan_tag = spec->tci; + eth.mask.vlan_tag = mask->tci; + eth.val.vlan_tag &= eth.mask.vlan_tag; + eth.val.ether_type = spec->inner_type; + eth.mask.ether_type = mask->inner_type; + eth.val.ether_type &= eth.mask.ether_type; } - return 0; + /* + * From verbs perspective an empty VLAN is equivalent + * to a packet without VLAN layer. + */ + if (!eth.mask.vlan_tag) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + item->spec, + "VLAN cannot be empty"); + if (!(flow->layers & l2m)) { + if (size <= flow_size) { + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + mlx5_flow_spec_verbs_add(flow, ð, size); + } + } else { + if (flow->cur_verbs) + mlx5_flow_item_vlan_update(flow->cur_verbs->attr, + ð); + size = 0; /* Only an update is done in eth specification. */ + } + flow->layers |= tunnel ? + (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) : + (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN); + return size; } /** - * Convert IPv4 item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_ipv4(const struct rte_flow_item *item, - const void *default_mask, - void *data) +mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size, struct rte_flow_error *error) { const struct rte_flow_item_ipv4 *spec = item->spec; const struct rte_flow_item_ipv4 *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; - unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext); + const struct rte_flow_item_ipv4 nic_mask = { + .hdr = { + .src_addr = RTE_BE32(0xffffffff), + .dst_addr = RTE_BE32(0xffffffff), + .type_of_service = 0xff, + .next_proto_id = 0xff, + }, + }; + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext); struct ibv_flow_spec_ipv4_ext ipv4 = { - .type = parser->inner | IBV_FLOW_SPEC_IPV4_EXT, - .size = ipv4_size, + .type = IBV_FLOW_SPEC_IPV4_EXT | + (tunnel ? IBV_FLOW_SPEC_INNER : 0), + .size = size, }; + int ret; - /* Don't update layer for the inner pattern. */ - if (!parser->inner) - parser->layer = HASH_RXQ_IPV4; + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "multiple L3 layers not supported"); + else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L3 cannot follow an L4 layer."); + if (!mask) + mask = &rte_flow_item_ipv4_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_ipv4), error); + if (ret < 0) + return ret; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; if (spec) { - if (!mask) - mask = default_mask; ipv4.val = (struct ibv_flow_ipv4_ext_filter){ .src_ip = spec->hdr.src_addr, .dst_ip = spec->hdr.dst_addr, @@ -1325,44 +1047,108 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item, ipv4.val.proto &= ipv4.mask.proto; ipv4.val.tos &= ipv4.mask.tos; } - mlx5_flow_create_copy(parser, &ipv4, ipv4_size); - return 0; + flow->l3_protocol_en = !!ipv4.mask.proto; + flow->l3_protocol = ipv4.val.proto; + if (size <= flow_size) { + mlx5_flow_verbs_hashfields_adjust + (flow, tunnel, + (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_OTHER), + (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3; + mlx5_flow_spec_verbs_add(flow, &ipv4, size); + } + return size; } /** - * Convert IPv6 item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_ipv6(const struct rte_flow_item *item, - const void *default_mask, - void *data) +mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size, struct rte_flow_error *error) { const struct rte_flow_item_ipv6 *spec = item->spec; const struct rte_flow_item_ipv6 *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; - unsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6); + const struct rte_flow_item_ipv6 nic_mask = { + .hdr = { + .src_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .dst_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .vtc_flow = RTE_BE32(0xffffffff), + .proto = 0xff, + .hop_limits = 0xff, + }, + }; + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + unsigned int size = sizeof(struct ibv_flow_spec_ipv6); struct ibv_flow_spec_ipv6 ipv6 = { - .type = parser->inner | IBV_FLOW_SPEC_IPV6, - .size = ipv6_size, + .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0), + .size = size, }; + int ret; - /* Don't update layer for the inner pattern. */ - if (!parser->inner) - parser->layer = HASH_RXQ_IPV6; + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "multiple L3 layers not supported"); + else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L3 cannot follow an L4 layer."); + /* + * IPv6 is not recognised by the NIC inside a GRE tunnel. + * Such support has to be disabled as the rule will be + * accepted. Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and + * Mellanox OFED 4.4-1.0.0.0. + */ + if (tunnel && flow->layers & MLX5_FLOW_LAYER_GRE) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "IPv6 inside a GRE tunnel is" + " not recognised."); + if (!mask) + mask = &rte_flow_item_ipv6_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_ipv6), error); + if (ret < 0) + return ret; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; if (spec) { unsigned int i; uint32_t vtc_flow_val; uint32_t vtc_flow_mask; - if (!mask) - mask = default_mask; memcpy(&ipv6.val.src_ip, spec->hdr.src_addr, RTE_DIM(ipv6.val.src_ip)); memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr, @@ -1397,44 +1183,86 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item, ipv6.val.next_hdr &= ipv6.mask.next_hdr; ipv6.val.hop_limit &= ipv6.mask.hop_limit; } - mlx5_flow_create_copy(parser, &ipv6, ipv6_size); - return 0; + flow->l3_protocol_en = !!ipv6.mask.next_hdr; + flow->l3_protocol = ipv6.val.next_hdr; + if (size <= flow_size) { + mlx5_flow_verbs_hashfields_adjust + (flow, tunnel, + (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER), + (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3; + mlx5_flow_spec_verbs_add(flow, &ipv6, size); + } + return size; } /** - * Convert UDP item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_udp(const struct rte_flow_item *item, - const void *default_mask, - void *data) +mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size, struct rte_flow_error *error) { const struct rte_flow_item_udp *spec = item->spec; const struct rte_flow_item_udp *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; - unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp); + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); struct ibv_flow_spec_tcp_udp udp = { - .type = parser->inner | IBV_FLOW_SPEC_UDP, - .size = udp_size, + .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0), + .size = size, }; + int ret; - /* Don't update layer for the inner pattern. */ - if (!parser->inner) { - if (parser->layer == HASH_RXQ_IPV4) - parser->layer = HASH_RXQ_UDPV4; - else - parser->layer = HASH_RXQ_UDPV6; - } + if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_UDP) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "protocol filtering not compatible" + " with UDP layer"); + if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3))) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L3 is mandatory to filter" + " on L4"); + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L4 layer is already" + " present"); + if (!mask) + mask = &rte_flow_item_udp_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_udp_mask, + sizeof(struct rte_flow_item_udp), error); + if (ret < 0) + return ret; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : + MLX5_FLOW_LAYER_OUTER_L4_UDP; if (spec) { - if (!mask) - mask = default_mask; udp.val.dst_port = spec->hdr.dst_port; udp.val.src_port = spec->hdr.src_port; udp.mask.dst_port = mask->hdr.dst_port; @@ -1443,44 +1271,81 @@ mlx5_flow_create_udp(const struct rte_flow_item *item, udp.val.src_port &= udp.mask.src_port; udp.val.dst_port &= udp.mask.dst_port; } - mlx5_flow_create_copy(parser, &udp, udp_size); - return 0; + if (size <= flow_size) { + mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP, + (IBV_RX_HASH_SRC_PORT_UDP | + IBV_RX_HASH_DST_PORT_UDP)); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4; + mlx5_flow_spec_verbs_add(flow, &udp, size); + } + return size; } /** - * Convert TCP item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_tcp(const struct rte_flow_item *item, - const void *default_mask, - void *data) +mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size, struct rte_flow_error *error) { const struct rte_flow_item_tcp *spec = item->spec; const struct rte_flow_item_tcp *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; - unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp); + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); struct ibv_flow_spec_tcp_udp tcp = { - .type = parser->inner | IBV_FLOW_SPEC_TCP, - .size = tcp_size, + .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0), + .size = size, }; + int ret; - /* Don't update layer for the inner pattern. */ - if (!parser->inner) { - if (parser->layer == HASH_RXQ_IPV4) - parser->layer = HASH_RXQ_TCPV4; - else - parser->layer = HASH_RXQ_TCPV6; - } + if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_TCP) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "protocol filtering not compatible" + " with TCP layer"); + if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3))) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L3 is mandatory to filter on L4"); + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L4 layer is already present"); + if (!mask) + mask = &rte_flow_item_tcp_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_tcp_mask, + sizeof(struct rte_flow_item_tcp), error); + if (ret < 0) + return ret; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; if (spec) { - if (!mask) - mask = default_mask; tcp.val.dst_port = spec->hdr.dst_port; tcp.val.src_port = spec->hdr.src_port; tcp.mask.dst_port = mask->hdr.dst_port; @@ -1489,43 +1354,78 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item, tcp.val.src_port &= tcp.mask.src_port; tcp.val.dst_port &= tcp.mask.dst_port; } - mlx5_flow_create_copy(parser, &tcp, tcp_size); - return 0; + if (size <= flow_size) { + mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP, + (IBV_RX_HASH_SRC_PORT_TCP | + IBV_RX_HASH_DST_PORT_TCP)); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4; + mlx5_flow_spec_verbs_add(flow, &tcp, size); + } + return size; } /** - * Convert VXLAN item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_vxlan(const struct rte_flow_item *item, - const void *default_mask, - void *data) +mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size, struct rte_flow_error *error) { const struct rte_flow_item_vxlan *spec = item->spec; const struct rte_flow_item_vxlan *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; unsigned int size = sizeof(struct ibv_flow_spec_tunnel); struct ibv_flow_spec_tunnel vxlan = { - .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL, + .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, .size = size, }; + int ret; union vni { uint32_t vlan_id; uint8_t vni[4]; - } id; + } id = { .vlan_id = 0, }; - id.vni[0] = 0; - parser->inner = IBV_FLOW_SPEC_INNER; + if (flow->layers & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "a tunnel is already present"); + /* + * Verify only UDPv4 is present as defined in + * https://tools.ietf.org/html/rfc7348 + */ + if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "no outer UDP layer found"); + if (!mask) + mask = &rte_flow_item_vxlan_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_vxlan_mask, + sizeof(struct rte_flow_item_vxlan), error); + if (ret < 0) + return ret; if (spec) { - if (!mask) - mask = default_mask; memcpy(&id.vni[1], spec->vni, 3); vxlan.val.tunnel_id = id.vlan_id; memcpy(&id.vni[1], mask->vni, 3); @@ -1533,298 +1433,1572 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item, /* Remove unwanted bits from values. */ vxlan.val.tunnel_id &= vxlan.mask.tunnel_id; } + /* + * Tunnel id 0 is equivalent as not adding a VXLAN layer, if + * only this layer is defined in the Verbs specification it is + * interpreted as wildcard and all packets will match this + * rule, if it follows a full stack layer (ex: eth / ipv4 / + * udp), all packets matching the layers before will also + * match this rule. To avoid such situation, VNI 0 is + * currently refused. + */ + if (!vxlan.val.tunnel_id) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN vni cannot be 0"); + if (!(flow->layers & MLX5_FLOW_LAYER_OUTER)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN tunnel must be fully defined"); + if (size <= flow_size) { + mlx5_flow_spec_verbs_add(flow, &vxlan, size); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + } + flow->layers |= MLX5_FLOW_LAYER_VXLAN; + return size; +} + +/** + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] item + * Item specification. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev, + const struct rte_flow_item *item, + struct rte_flow *flow, const size_t flow_size, + struct rte_flow_error *error) +{ + const struct rte_flow_item_vxlan_gpe *spec = item->spec; + const struct rte_flow_item_vxlan_gpe *mask = item->mask; + unsigned int size = sizeof(struct ibv_flow_spec_tunnel); + struct ibv_flow_spec_tunnel vxlan_gpe = { + .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, + .size = size, + }; + int ret; + union vni { + uint32_t vlan_id; + uint8_t vni[4]; + } id = { .vlan_id = 0, }; + + if (!((struct priv *)dev->data->dev_private)->config.l3_vxlan_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L3 VXLAN is not enabled by device" + " parameter and/or not configured in" + " firmware"); + if (flow->layers & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "a tunnel is already present"); + /* + * Verify only UDPv4 is present as defined in + * https://tools.ietf.org/html/rfc7348 + */ + if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "no outer UDP layer found"); + if (!mask) + mask = &rte_flow_item_vxlan_gpe_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, + sizeof(struct rte_flow_item_vxlan_gpe), error); + if (ret < 0) + return ret; + if (spec) { + memcpy(&id.vni[1], spec->vni, 3); + vxlan_gpe.val.tunnel_id = id.vlan_id; + memcpy(&id.vni[1], mask->vni, 3); + vxlan_gpe.mask.tunnel_id = id.vlan_id; + if (spec->protocol) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VxLAN-GPE protocol not supported"); + /* Remove unwanted bits from values. */ + vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id; + } /* * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this * layer is defined in the Verbs specification it is interpreted as * wildcard and all packets will match this rule, if it follows a full * stack layer (ex: eth / ipv4 / udp), all packets matching the layers - * before will also match this rule. - * To avoid such situation, VNI 0 is currently refused. + * before will also match this rule. To avoid such situation, VNI 0 + * is currently refused. */ - if (!vxlan.val.tunnel_id) - return EINVAL; - mlx5_flow_create_copy(parser, &vxlan, size); + if (!vxlan_gpe.val.tunnel_id) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN-GPE vni cannot be 0"); + if (!(flow->layers & MLX5_FLOW_LAYER_OUTER)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN-GPE tunnel must be fully" + " defined"); + if (size <= flow_size) { + mlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + } + flow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE; + return size; +} + +/** + * Update the protocol in Verbs IPv4/IPv6 spec. + * + * @param[in, out] attr + * Pointer to Verbs attributes structure. + * @param[in] search + * Specification type to search in order to update the IP protocol. + * @param[in] protocol + * Protocol value to set if none is present in the specification. + */ +static void +mlx5_flow_item_gre_ip_protocol_update(struct ibv_flow_attr *attr, + enum ibv_flow_spec_type search, + uint8_t protocol) +{ + unsigned int i; + struct ibv_spec_header *hdr = (struct ibv_spec_header *) + ((uint8_t *)attr + sizeof(struct ibv_flow_attr)); + + if (!attr) + return; + for (i = 0; i != attr->num_of_specs; ++i) { + if (hdr->type == search) { + union { + struct ibv_flow_spec_ipv4_ext *ipv4; + struct ibv_flow_spec_ipv6 *ipv6; + } ip; + + switch (search) { + case IBV_FLOW_SPEC_IPV4_EXT: + ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr; + if (!ip.ipv4->val.proto) { + ip.ipv4->val.proto = protocol; + ip.ipv4->mask.proto = 0xff; + } + break; + case IBV_FLOW_SPEC_IPV6: + ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr; + if (!ip.ipv6->val.next_hdr) { + ip.ipv6->val.next_hdr = protocol; + ip.ipv6->mask.next_hdr = 0xff; + } + break; + default: + break; + } + break; + } + hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size); + } +} + +/** + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * It will also update the previous L3 layer with the protocol value matching + * the GRE. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] item + * Item specification. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_item_gre(const struct rte_flow_item *item, + struct rte_flow *flow, const size_t flow_size, + struct rte_flow_error *error) +{ + struct mlx5_flow_verbs *verbs = flow->cur_verbs; + const struct rte_flow_item_gre *spec = item->spec; + const struct rte_flow_item_gre *mask = item->mask; +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + unsigned int size = sizeof(struct ibv_flow_spec_gre); + struct ibv_flow_spec_gre tunnel = { + .type = IBV_FLOW_SPEC_GRE, + .size = size, + }; +#else + unsigned int size = sizeof(struct ibv_flow_spec_tunnel); + struct ibv_flow_spec_tunnel tunnel = { + .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, + .size = size, + }; +#endif + int ret; + + if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_GRE) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "protocol filtering not compatible" + " with this GRE layer"); + if (flow->layers & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "a tunnel is already present"); + if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L3 Layer is missing"); + if (!mask) + mask = &rte_flow_item_gre_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_gre_mask, + sizeof(struct rte_flow_item_gre), error); + if (ret < 0) + return ret; +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + if (spec) { + tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver; + tunnel.val.protocol = spec->protocol; + tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver; + tunnel.mask.protocol = mask->protocol; + /* Remove unwanted bits from values. */ + tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver; + tunnel.val.protocol &= tunnel.mask.protocol; + tunnel.val.key &= tunnel.mask.key; + } +#else + if (spec && (spec->protocol & mask->protocol)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "without MPLS support the" + " specification cannot be used for" + " filtering"); +#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */ + if (size <= flow_size) { + if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4) + mlx5_flow_item_gre_ip_protocol_update + (verbs->attr, IBV_FLOW_SPEC_IPV4_EXT, + MLX5_IP_PROTOCOL_GRE); + else + mlx5_flow_item_gre_ip_protocol_update + (verbs->attr, IBV_FLOW_SPEC_IPV6, + MLX5_IP_PROTOCOL_GRE); + mlx5_flow_spec_verbs_add(flow, &tunnel, size); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + } + flow->layers |= MLX5_FLOW_LAYER_GRE; + return size; +} + +/** + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param[in] item + * Item specification. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused, + struct rte_flow *flow __rte_unused, + const size_t flow_size __rte_unused, + struct rte_flow_error *error) +{ +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + const struct rte_flow_item_mpls *spec = item->spec; + const struct rte_flow_item_mpls *mask = item->mask; + unsigned int size = sizeof(struct ibv_flow_spec_mpls); + struct ibv_flow_spec_mpls mpls = { + .type = IBV_FLOW_SPEC_MPLS, + .size = size, + }; + int ret; + + if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_MPLS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "protocol filtering not compatible" + " with MPLS layer"); + /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ + if (flow->layers & MLX5_FLOW_LAYER_TUNNEL && + (flow->layers & MLX5_FLOW_LAYER_GRE) != MLX5_FLOW_LAYER_GRE) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "a tunnel is already" + " present"); + if (!mask) + mask = &rte_flow_item_mpls_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_mpls_mask, + sizeof(struct rte_flow_item_mpls), error); + if (ret < 0) + return ret; + if (spec) { + memcpy(&mpls.val.label, spec, sizeof(mpls.val.label)); + memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label)); + /* Remove unwanted bits from values. */ + mpls.val.label &= mpls.mask.label; + } + if (size <= flow_size) { + mlx5_flow_spec_verbs_add(flow, &mpls, size); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + } + flow->layers |= MLX5_FLOW_LAYER_MPLS; + return size; +#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */ + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "MPLS is not supported by Verbs, please" + " update."); +} + +/** + * Convert the @p pattern into a Verbs specifications after ensuring the NIC + * will understand and process it correctly. + * The conversion is performed item per item, each of them is written into + * the @p flow if its size is lesser or equal to @p flow_size. + * Validation and memory consumption computation are still performed until the + * end of @p pattern, unless an error is encountered. + * + * @param[in] pattern + * Flow pattern. + * @param[in, out] flow + * Pointer to the rte_flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small some + * garbage may be present. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @pattern has fully been + * converted, otherwise another call with this returned memory size should + * be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_items(struct rte_eth_dev *dev, + const struct rte_flow_item pattern[], + struct rte_flow *flow, const size_t flow_size, + struct rte_flow_error *error) +{ + int remain = flow_size; + size_t size = 0; + + for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { + int ret = 0; + + switch (pattern->type) { + case RTE_FLOW_ITEM_TYPE_VOID: + break; + case RTE_FLOW_ITEM_TYPE_ETH: + ret = mlx5_flow_item_eth(pattern, flow, remain, error); + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + ret = mlx5_flow_item_vlan(pattern, flow, remain, error); + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + ret = mlx5_flow_item_ipv4(pattern, flow, remain, error); + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + ret = mlx5_flow_item_ipv6(pattern, flow, remain, error); + break; + case RTE_FLOW_ITEM_TYPE_UDP: + ret = mlx5_flow_item_udp(pattern, flow, remain, error); + break; + case RTE_FLOW_ITEM_TYPE_TCP: + ret = mlx5_flow_item_tcp(pattern, flow, remain, error); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + ret = mlx5_flow_item_vxlan(pattern, flow, remain, + error); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + ret = mlx5_flow_item_vxlan_gpe(dev, pattern, flow, + remain, error); + break; + case RTE_FLOW_ITEM_TYPE_GRE: + ret = mlx5_flow_item_gre(pattern, flow, remain, error); + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + ret = mlx5_flow_item_mpls(pattern, flow, remain, error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "item not supported"); + } + if (ret < 0) + return ret; + if (remain > ret) + remain -= ret; + else + remain = 0; + size += ret; + } + if (!flow->layers) { + const struct rte_flow_item item = { + .type = RTE_FLOW_ITEM_TYPE_ETH, + }; + + return mlx5_flow_item_eth(&item, flow, flow_size, error); + } + return size; +} + +/** + * Convert the @p action into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param[in] action + * Action configuration. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p action has fully been + * converted, otherwise another call with this returned memory size should + * be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_action_drop(const struct rte_flow_action *action, + struct rte_flow *flow, const size_t flow_size, + struct rte_flow_error *error) +{ + unsigned int size = sizeof(struct ibv_flow_spec_action_drop); + struct ibv_flow_spec_action_drop drop = { + .type = IBV_FLOW_SPEC_ACTION_DROP, + .size = size, + }; + + if (flow->fate) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "multiple fate actions are not" + " supported"); + if (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "drop is not compatible with" + " flag/mark action"); + if (size < flow_size) + mlx5_flow_spec_verbs_add(flow, &drop, size); + flow->fate |= MLX5_FLOW_FATE_DROP; + return size; +} + +/** + * Convert the @p action into @p flow after ensuring the NIC will understand + * and process it correctly. + * + * @param[in] dev + * Pointer to Ethernet device structure. + * @param[in] action + * Action configuration. + * @param[in, out] flow + * Pointer to flow structure. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_action_queue(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct priv *priv = dev->data->dev_private; + const struct rte_flow_action_queue *queue = action->conf; + + if (flow->fate) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "multiple fate actions are not" + " supported"); + if (queue->index >= priv->rxqs_n) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &queue->index, + "queue index out of range"); + if (!(*priv->rxqs)[queue->index]) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &queue->index, + "queue is not configured"); + if (flow->queue) + (*flow->queue)[0] = queue->index; + flow->rss.queue_num = 1; + flow->fate |= MLX5_FLOW_FATE_QUEUE; + return 0; +} + +/** + * Ensure the @p action will be understood and used correctly by the NIC. + * + * @param dev + * Pointer to Ethernet device structure. + * @param action[in] + * Pointer to flow actions array. + * @param flow[in, out] + * Pointer to the rte_flow structure. + * @param error[in, out] + * Pointer to error structure. + * + * @return + * On success @p flow->queue array and @p flow->rss are filled and valid. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_action_rss(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct priv *priv = dev->data->dev_private; + const struct rte_flow_action_rss *rss = action->conf; + unsigned int i; + + if (flow->fate) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "multiple fate actions are not" + " supported"); + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && + rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->func, + "RSS hash function not supported"); +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + if (rss->level > 2) +#else + if (rss->level > 1) +#endif + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->level, + "tunnel RSS is not supported"); + if (rss->key_len < MLX5_RSS_HASH_KEY_LEN) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->key_len, + "RSS hash key too small"); + if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->key_len, + "RSS hash key too large"); + if (!rss->queue_num) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + rss, + "no queues were provided for RSS"); + if (rss->queue_num > priv->config.ind_table_max_size) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->queue_num, + "number of queues too large"); + if (rss->types & MLX5_RSS_HF_MASK) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->types, + "some RSS protocols are not" + " supported"); + for (i = 0; i != rss->queue_num; ++i) { + if (rss->queue[i] >= priv->rxqs_n) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + rss, + "queue index out of range"); + if (!(*priv->rxqs)[rss->queue[i]]) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->queue[i], + "queue is not configured"); + } + if (flow->queue) + memcpy((*flow->queue), rss->queue, + rss->queue_num * sizeof(uint16_t)); + flow->rss.queue_num = rss->queue_num; + memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN); + flow->rss.types = rss->types; + flow->rss.level = rss->level; + flow->fate |= MLX5_FLOW_FATE_RSS; return 0; } /** - * Convert mark/flag action to Verbs specification. + * Convert the @p action into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param[in] action + * Action configuration. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p action has fully been + * converted, otherwise another call with this returned memory size should + * be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_action_flag(const struct rte_flow_action *action, + struct rte_flow *flow, const size_t flow_size, + struct rte_flow_error *error) +{ + unsigned int size = sizeof(struct ibv_flow_spec_action_tag); + struct ibv_flow_spec_action_tag tag = { + .type = IBV_FLOW_SPEC_ACTION_TAG, + .size = size, + .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT), + }; + struct mlx5_flow_verbs *verbs = flow->cur_verbs; + + if (flow->modifier & MLX5_FLOW_MOD_FLAG) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "flag action already present"); + if (flow->fate & MLX5_FLOW_FATE_DROP) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "flag is not compatible with drop" + " action"); + if (flow->modifier & MLX5_FLOW_MOD_MARK) + size = 0; + else if (size <= flow_size && verbs) + mlx5_flow_spec_verbs_add(flow, &tag, size); + flow->modifier |= MLX5_FLOW_MOD_FLAG; + return size; +} + +/** + * Update verbs specification to modify the flag to mark. + * + * @param[in, out] verbs + * Pointer to the mlx5_flow_verbs structure. + * @param[in] mark_id + * Mark identifier to replace the flag. + */ +static void +mlx5_flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id) +{ + struct ibv_spec_header *hdr; + int i; + + if (!verbs) + return; + /* Update Verbs specification. */ + hdr = (struct ibv_spec_header *)verbs->specs; + if (!hdr) + return; + for (i = 0; i != verbs->attr->num_of_specs; ++i) { + if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) { + struct ibv_flow_spec_action_tag *t = + (struct ibv_flow_spec_action_tag *)hdr; + + t->tag_id = mlx5_flow_mark_set(mark_id); + } + hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size); + } +} + +/** + * Convert the @p action into @p flow (or by updating the already present + * Flag Verbs specification) after ensuring the NIC will understand and + * process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param[in] action + * Action configuration. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p action has fully been + * converted, otherwise another call with this returned memory size should + * be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_action_mark(const struct rte_flow_action *action, + struct rte_flow *flow, const size_t flow_size, + struct rte_flow_error *error) +{ + const struct rte_flow_action_mark *mark = action->conf; + unsigned int size = sizeof(struct ibv_flow_spec_action_tag); + struct ibv_flow_spec_action_tag tag = { + .type = IBV_FLOW_SPEC_ACTION_TAG, + .size = size, + }; + struct mlx5_flow_verbs *verbs = flow->cur_verbs; + + if (!mark) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "configuration cannot be null"); + if (mark->id >= MLX5_FLOW_MARK_MAX) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &mark->id, + "mark id must in 0 <= id < " + RTE_STR(MLX5_FLOW_MARK_MAX)); + if (flow->modifier & MLX5_FLOW_MOD_MARK) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "mark action already present"); + if (flow->fate & MLX5_FLOW_FATE_DROP) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "mark is not compatible with drop" + " action"); + if (flow->modifier & MLX5_FLOW_MOD_FLAG) { + mlx5_flow_verbs_mark_update(verbs, mark->id); + size = 0; + } else if (size <= flow_size) { + tag.tag_id = mlx5_flow_mark_set(mark->id); + mlx5_flow_spec_verbs_add(flow, &tag, size); + } + flow->modifier |= MLX5_FLOW_MOD_MARK; + return size; +} + +/** + * Convert the @p action into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param action[in] + * Action configuration. + * @param flow[in, out] + * Pointer to flow structure. + * @param flow_size[in] + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param error[int, out] + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p action has fully been + * converted, otherwise another call with this returned memory size should + * be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_action_count(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + struct rte_flow *flow, + const size_t flow_size __rte_unused, + struct rte_flow_error *error) +{ + const struct rte_flow_action_count *count = action->conf; +#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT + unsigned int size = sizeof(struct ibv_flow_spec_counter_action); + struct ibv_flow_spec_counter_action counter = { + .type = IBV_FLOW_SPEC_ACTION_COUNT, + .size = size, + }; +#endif + + if (!flow->counter) { + flow->counter = mlx5_flow_counter_new(dev, count->shared, + count->id); + if (!flow->counter) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "cannot get counter" + " context."); + } + if (!((struct priv *)dev->data->dev_private)->config.flow_counter_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "flow counters are not supported."); + flow->modifier |= MLX5_FLOW_MOD_COUNT; +#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT + counter.counter_set_handle = flow->counter->cs->handle; + if (size <= flow_size) + mlx5_flow_spec_verbs_add(flow, &counter, size); + return size; +#endif + return 0; +} + +/** + * Convert the @p action into @p flow after ensuring the NIC will understand + * and process it correctly. + * The conversion is performed action per action, each of them is written into + * the @p flow if its size is lesser or equal to @p flow_size. + * Validation and memory consumption computation are still performed until the + * end of @p action, unless an error is encountered. + * + * @param[in] dev + * Pointer to Ethernet device structure. + * @param[in] actions + * Pointer to flow actions array. + * @param[in, out] flow + * Pointer to the rte_flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small some + * garbage may be present. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p actions has fully been + * converted, otherwise another call with this returned memory size should + * be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_actions(struct rte_eth_dev *dev, + const struct rte_flow_action actions[], + struct rte_flow *flow, const size_t flow_size, + struct rte_flow_error *error) +{ + size_t size = 0; + int remain = flow_size; + int ret = 0; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_FLAG: + ret = mlx5_flow_action_flag(actions, flow, remain, + error); + break; + case RTE_FLOW_ACTION_TYPE_MARK: + ret = mlx5_flow_action_mark(actions, flow, remain, + error); + break; + case RTE_FLOW_ACTION_TYPE_DROP: + ret = mlx5_flow_action_drop(actions, flow, remain, + error); + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + ret = mlx5_flow_action_queue(dev, actions, flow, error); + break; + case RTE_FLOW_ACTION_TYPE_RSS: + ret = mlx5_flow_action_rss(dev, actions, flow, error); + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = mlx5_flow_action_count(dev, actions, flow, remain, + error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } + if (ret < 0) + return ret; + if (remain > ret) + remain -= ret; + else + remain = 0; + size += ret; + } + if (!flow->fate) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "no fate action found"); + return size; +} + +/** + * Validate flow rule and fill flow structure accordingly. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] flow + * Pointer to flow structure. + * @param flow_size + * Size of allocated space for @p flow. + * @param[in] attr + * Flow rule attributes. + * @param[in] pattern + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * A positive value representing the size of the flow object in bytes + * regardless of @p flow_size on success, a negative errno value otherwise + * and rte_errno is set. + */ +static int +mlx5_flow_merge_switch(struct rte_eth_dev *dev, + struct rte_flow *flow, + size_t flow_size, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + unsigned int n = mlx5_dev_to_port_id(dev->device, NULL, 0); + uint16_t port_id[!n + n]; + struct mlx5_nl_flow_ptoi ptoi[!n + n + 1]; + size_t off = RTE_ALIGN_CEIL(sizeof(*flow), alignof(max_align_t)); + unsigned int i; + unsigned int own = 0; + int ret; + + /* At least one port is needed when no switch domain is present. */ + if (!n) { + n = 1; + port_id[0] = dev->data->port_id; + } else { + n = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, n), n); + } + for (i = 0; i != n; ++i) { + struct rte_eth_dev_info dev_info; + + rte_eth_dev_info_get(port_id[i], &dev_info); + if (port_id[i] == dev->data->port_id) + own = i; + ptoi[i].port_id = port_id[i]; + ptoi[i].ifindex = dev_info.if_index; + } + /* Ensure first entry of ptoi[] is the current device. */ + if (own) { + ptoi[n] = ptoi[0]; + ptoi[0] = ptoi[own]; + ptoi[own] = ptoi[n]; + } + /* An entry with zero ifindex terminates ptoi[]. */ + ptoi[n].port_id = 0; + ptoi[n].ifindex = 0; + if (flow_size < off) + flow_size = 0; + ret = mlx5_nl_flow_transpose((uint8_t *)flow + off, + flow_size ? flow_size - off : 0, + ptoi, attr, pattern, actions, error); + if (ret < 0) + return ret; + if (flow_size) { + *flow = (struct rte_flow){ + .attributes = *attr, + .nl_flow = (uint8_t *)flow + off, + }; + /* + * Generate a reasonably unique handle based on the address + * of the target buffer. + * + * This is straightforward on 32-bit systems where the flow + * pointer can be used directly. Otherwise, its least + * significant part is taken after shifting it by the + * previous power of two of the pointed buffer size. + */ + if (sizeof(flow) <= 4) + mlx5_nl_flow_brand(flow->nl_flow, (uintptr_t)flow); + else + mlx5_nl_flow_brand + (flow->nl_flow, + (uintptr_t)flow >> + rte_log2_u32(rte_align32prevpow2(flow_size))); + } + return off + ret; +} + +static unsigned int +mlx5_find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) +{ + const struct rte_flow_item *item; + unsigned int has_vlan = 0; + + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + has_vlan = 1; + break; + } + } + if (has_vlan) + return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : + MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; + return rss_level < 2 ? MLX5_EXPANSION_ROOT : + MLX5_EXPANSION_ROOT_OUTER; +} + +/** + * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC + * after ensuring the NIC will understand and process it correctly. + * The conversion is only performed item/action per item/action, each of + * them is written into the @p flow if its size is lesser or equal to @p + * flow_size. + * Validation and memory consumption computation are still performed until the + * end, unless an error is encountered. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small some + * garbage may be present. + * @param[in] attributes + * Flow rule attributes. + * @param[in] pattern + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the flow has fully been converted and + * can be applied, otherwise another call with this returned memory size + * should be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow, + const size_t flow_size, + const struct rte_flow_attr *attributes, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_flow local_flow = { .layers = 0, }; + size_t size = sizeof(*flow); + union { + struct rte_flow_expand_rss buf; + uint8_t buffer[2048]; + } expand_buffer; + struct rte_flow_expand_rss *buf = &expand_buffer.buf; + struct mlx5_flow_verbs *original_verbs = NULL; + size_t original_verbs_size = 0; + uint32_t original_layers = 0; + int expanded_pattern_idx = 0; + int ret; + uint32_t i; + + if (attributes->transfer) + return mlx5_flow_merge_switch(dev, flow, flow_size, + attributes, pattern, + actions, error); + if (size > flow_size) + flow = &local_flow; + ret = mlx5_flow_attributes(dev, attributes, flow, error); + if (ret < 0) + return ret; + ret = mlx5_flow_actions(dev, actions, &local_flow, 0, error); + if (ret < 0) + return ret; + if (local_flow.rss.types) { + unsigned int graph_root; + + graph_root = mlx5_find_graph_root(pattern, + local_flow.rss.level); + ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), + pattern, local_flow.rss.types, + mlx5_support_expansion, + graph_root); + assert(ret > 0 && + (unsigned int)ret < sizeof(expand_buffer.buffer)); + } else { + buf->entries = 1; + buf->entry[0].pattern = (void *)(uintptr_t)pattern; + } + size += RTE_ALIGN_CEIL(local_flow.rss.queue_num * sizeof(uint16_t), + sizeof(void *)); + if (size <= flow_size) + flow->queue = (void *)(flow + 1); + LIST_INIT(&flow->verbs); + flow->layers = 0; + flow->modifier = 0; + flow->fate = 0; + for (i = 0; i != buf->entries; ++i) { + size_t off = size; + size_t off2; + + flow->layers = original_layers; + size += sizeof(struct ibv_flow_attr) + + sizeof(struct mlx5_flow_verbs); + off2 = size; + if (size < flow_size) { + flow->cur_verbs = (void *)((uintptr_t)flow + off); + flow->cur_verbs->attr = (void *)(flow->cur_verbs + 1); + flow->cur_verbs->specs = + (void *)(flow->cur_verbs->attr + 1); + } + /* First iteration convert the pattern into Verbs. */ + if (i == 0) { + /* Actions don't need to be converted several time. */ + ret = mlx5_flow_actions(dev, actions, flow, + (size < flow_size) ? + flow_size - size : 0, + error); + if (ret < 0) + return ret; + size += ret; + } else { + /* + * Next iteration means the pattern has already been + * converted and an expansion is necessary to match + * the user RSS request. For that only the expanded + * items will be converted, the common part with the + * user pattern are just copied into the next buffer + * zone. + */ + size += original_verbs_size; + if (size < flow_size) { + rte_memcpy(flow->cur_verbs->attr, + original_verbs->attr, + original_verbs_size + + sizeof(struct ibv_flow_attr)); + flow->cur_verbs->size = original_verbs_size; + } + } + ret = mlx5_flow_items + (dev, + (const struct rte_flow_item *) + &buf->entry[i].pattern[expanded_pattern_idx], + flow, + (size < flow_size) ? flow_size - size : 0, error); + if (ret < 0) + return ret; + size += ret; + if (size <= flow_size) { + mlx5_flow_adjust_priority(dev, flow); + LIST_INSERT_HEAD(&flow->verbs, flow->cur_verbs, next); + } + /* + * Keep a pointer of the first verbs conversion and the layers + * it has encountered. + */ + if (i == 0) { + original_verbs = flow->cur_verbs; + original_verbs_size = size - off2; + original_layers = flow->layers; + /* + * move the index of the expanded pattern to the + * first item not addressed yet. + */ + if (pattern->type == RTE_FLOW_ITEM_TYPE_END) { + expanded_pattern_idx++; + } else { + const struct rte_flow_item *item = pattern; + + for (item = pattern; + item->type != RTE_FLOW_ITEM_TYPE_END; + ++item) + expanded_pattern_idx++; + } + } + } + /* Restore the origin layers in the flow. */ + flow->layers = original_layers; + return size; +} + +/** + * Lookup and set the ptype in the data Rx part. A single Ptype can be used, + * if several tunnel rules are used on this queue, the tunnel ptype will be + * cleared. * - * @param parser - * Internal parser structure. - * @param mark_id - * Mark identifier. + * @param rxq_ctrl + * Rx queue to update. */ -static int -mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id) +static void +mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) { - unsigned int size = sizeof(struct ibv_flow_spec_action_tag); - struct ibv_flow_spec_action_tag tag = { - .type = IBV_FLOW_SPEC_ACTION_TAG, - .size = size, - .tag_id = mlx5_flow_mark_set(mark_id), - }; + unsigned int i; + uint32_t tunnel_ptype = 0; - assert(parser->mark); - mlx5_flow_create_copy(parser, &tag, size); - return 0; + /* Look up for the ptype to use. */ + for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { + if (!rxq_ctrl->flow_tunnels_n[i]) + continue; + if (!tunnel_ptype) { + tunnel_ptype = tunnels_info[i].ptype; + } else { + tunnel_ptype = 0; + break; + } + } + rxq_ctrl->rxq.tunnel = tunnel_ptype; } /** - * Convert count action to Verbs specification. - * - * @param priv - * Pointer to private structure. - * @param parser - * Pointer to MLX5 flow parser structure. + * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow. * - * @return - * 0 on success, errno value on failure. + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] flow + * Pointer to flow structure. */ -static int -mlx5_flow_create_count(struct priv *priv __rte_unused, - struct mlx5_flow_parse *parser __rte_unused) +static void +mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) { -#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT - unsigned int size = sizeof(struct ibv_flow_spec_counter_action); - struct ibv_counter_set_init_attr init_attr = {0}; - struct ibv_flow_spec_counter_action counter = { - .type = IBV_FLOW_SPEC_ACTION_COUNT, - .size = size, - .counter_set_handle = 0, - }; + struct priv *priv = dev->data->dev_private; + const int mark = !!(flow->modifier & + (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK)); + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + unsigned int i; - init_attr.counter_set_id = 0; - parser->cs = mlx5_glue->create_counter_set(priv->ctx, &init_attr); - if (!parser->cs) - return EINVAL; - counter.counter_set_handle = parser->cs->handle; - mlx5_flow_create_copy(parser, &counter, size); -#endif - return 0; + for (i = 0; i != flow->rss.queue_num; ++i) { + int idx = (*flow->queue)[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of((*priv->rxqs)[idx], + struct mlx5_rxq_ctrl, rxq); + + if (mark) { + rxq_ctrl->rxq.mark = 1; + rxq_ctrl->flow_mark_n++; + } + if (tunnel) { + unsigned int j; + + /* Increase the counter matching the flow. */ + for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { + if ((tunnels_info[j].tunnel & flow->layers) == + tunnels_info[j].tunnel) { + rxq_ctrl->flow_tunnels_n[j]++; + break; + } + } + mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl); + } + } } /** - * Complete flow rule creation with a drop queue. + * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the + * @p flow if no other flow uses it with the same kind of request. * - * @param priv - * Pointer to private structure. - * @param parser - * Internal parser structure. - * @param flow - * Pointer to the rte_flow. - * @param[out] error - * Perform verbose error reporting if not NULL. - * - * @return - * 0 on success, errno value on failure. + * @param dev + * Pointer to Ethernet device. + * @param[in] flow + * Pointer to the flow. */ -static int -priv_flow_create_action_queue_drop(struct priv *priv, - struct mlx5_flow_parse *parser, - struct rte_flow *flow, - struct rte_flow_error *error) +static void +mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct ibv_flow_spec_action_drop *drop; - unsigned int size = sizeof(struct ibv_flow_spec_action_drop); - int err = 0; - - assert(priv->pd); - assert(priv->ctx); - flow->drop = 1; - drop = (void *)((uintptr_t)parser->queue[HASH_RXQ_ETH].ibv_attr + - parser->queue[HASH_RXQ_ETH].offset); - *drop = (struct ibv_flow_spec_action_drop){ - .type = IBV_FLOW_SPEC_ACTION_DROP, - .size = size, - }; - ++parser->queue[HASH_RXQ_ETH].ibv_attr->num_of_specs; - parser->queue[HASH_RXQ_ETH].offset += size; - flow->frxq[HASH_RXQ_ETH].ibv_attr = - parser->queue[HASH_RXQ_ETH].ibv_attr; - if (parser->count) - flow->cs = parser->cs; - if (!priv->dev->data->dev_started) - return 0; - parser->queue[HASH_RXQ_ETH].ibv_attr = NULL; - flow->frxq[HASH_RXQ_ETH].ibv_flow = - mlx5_glue->create_flow(priv->flow_drop_queue->qp, - flow->frxq[HASH_RXQ_ETH].ibv_attr); - if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) { - rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "flow rule creation failure"); - err = ENOMEM; - goto error; + struct priv *priv = dev->data->dev_private; + const int mark = !!(flow->modifier & + (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK)); + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + unsigned int i; + + assert(dev->data->dev_started); + for (i = 0; i != flow->rss.queue_num; ++i) { + int idx = (*flow->queue)[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of((*priv->rxqs)[idx], + struct mlx5_rxq_ctrl, rxq); + + if (mark) { + rxq_ctrl->flow_mark_n--; + rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; + } + if (tunnel) { + unsigned int j; + + /* Decrease the counter matching the flow. */ + for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { + if ((tunnels_info[j].tunnel & flow->layers) == + tunnels_info[j].tunnel) { + rxq_ctrl->flow_tunnels_n[j]--; + break; + } + } + mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl); + } } - return 0; -error: - assert(flow); - if (flow->frxq[HASH_RXQ_ETH].ibv_flow) { - claim_zero(mlx5_glue->destroy_flow - (flow->frxq[HASH_RXQ_ETH].ibv_flow)); - flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL; - } - if (flow->frxq[HASH_RXQ_ETH].ibv_attr) { - rte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr); - flow->frxq[HASH_RXQ_ETH].ibv_attr = NULL; - } - if (flow->cs) { - claim_zero(mlx5_glue->destroy_counter_set(flow->cs)); - flow->cs = NULL; - parser->cs = NULL; - } - return err; } /** - * Create hash Rx queues when RSS is enabled. - * - * @param priv - * Pointer to private structure. - * @param parser - * Internal parser structure. - * @param flow - * Pointer to the rte_flow. - * @param[out] error - * Perform verbose error reporting if not NULL. + * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. * - * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * @param dev + * Pointer to Ethernet device. */ -static int -priv_flow_create_action_queue_rss(struct priv *priv, - struct mlx5_flow_parse *parser, - struct rte_flow *flow, - struct rte_flow_error *error) +static void +mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; - for (i = 0; i != hash_rxq_init_n; ++i) { - uint64_t hash_fields; + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_ctrl *rxq_ctrl; + unsigned int j; - if (!parser->queue[i].ibv_attr) - continue; - flow->frxq[i].ibv_attr = parser->queue[i].ibv_attr; - parser->queue[i].ibv_attr = NULL; - hash_fields = hash_rxq_init[i].hash_fields; - if (!priv->dev->data->dev_started) + if (!(*priv->rxqs)[i]) continue; - flow->frxq[i].hrxq = - mlx5_priv_hrxq_get(priv, - parser->rss_conf.rss_key, - parser->rss_conf.rss_key_len, - hash_fields, - parser->queues, - parser->queues_n); - if (flow->frxq[i].hrxq) - continue; - flow->frxq[i].hrxq = - mlx5_priv_hrxq_new(priv, - parser->rss_conf.rss_key, - parser->rss_conf.rss_key_len, - hash_fields, - parser->queues, - parser->queues_n); - if (!flow->frxq[i].hrxq) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "cannot create hash rxq"); - return ENOMEM; - } + rxq_ctrl = container_of((*priv->rxqs)[i], + struct mlx5_rxq_ctrl, rxq); + rxq_ctrl->flow_mark_n = 0; + rxq_ctrl->rxq.mark = 0; + for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) + rxq_ctrl->flow_tunnels_n[j] = 0; + rxq_ctrl->rxq.tunnel = 0; } +} + +/** + * Validate a flow supported by the NIC. + * + * @see rte_flow_validate() + * @see rte_flow_ops + */ +int +mlx5_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error); + + if (ret < 0) + return ret; return 0; } /** - * Complete flow rule creation. + * Remove the flow. * - * @param priv - * Pointer to private structure. - * @param parser - * Internal parser structure. - * @param flow - * Pointer to the rte_flow. + * @param[in] dev + * Pointer to Ethernet device. + * @param[in, out] flow + * Pointer to flow structure. + */ +static void +mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct priv *priv = dev->data->dev_private; + struct mlx5_flow_verbs *verbs; + + if (flow->nl_flow && priv->mnl_socket) + mlx5_nl_flow_destroy(priv->mnl_socket, flow->nl_flow, NULL); + LIST_FOREACH(verbs, &flow->verbs, next) { + if (verbs->flow) { + claim_zero(mlx5_glue->destroy_flow(verbs->flow)); + verbs->flow = NULL; + } + if (verbs->hrxq) { + if (flow->fate & MLX5_FLOW_FATE_DROP) + mlx5_hrxq_drop_release(dev); + else + mlx5_hrxq_release(dev, verbs->hrxq); + verbs->hrxq = NULL; + } + } + if (flow->counter) { + mlx5_flow_counter_release(flow->counter); + flow->counter = NULL; + } +} + +/** + * Apply the flow. + * + * @param[in] dev + * Pointer to Ethernet device structure. + * @param[in, out] flow + * Pointer to flow structure. * @param[out] error - * Perform verbose error reporting if not NULL. + * Pointer to error structure. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_create_action_queue(struct priv *priv, - struct mlx5_flow_parse *parser, - struct rte_flow *flow, - struct rte_flow_error *error) +mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) { - int err = 0; - unsigned int i; + struct priv *priv = dev->data->dev_private; + struct mlx5_flow_verbs *verbs; + int err; - assert(priv->pd); - assert(priv->ctx); - assert(!parser->drop); - err = priv_flow_create_action_queue_rss(priv, parser, flow, error); - if (err) - goto error; - if (parser->count) - flow->cs = parser->cs; - if (!priv->dev->data->dev_started) - return 0; - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!flow->frxq[i].hrxq) - continue; - flow->frxq[i].ibv_flow = - mlx5_glue->create_flow(flow->frxq[i].hrxq->qp, - flow->frxq[i].ibv_attr); - if (!flow->frxq[i].ibv_flow) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "flow rule creation failure"); - err = ENOMEM; + LIST_FOREACH(verbs, &flow->verbs, next) { + if (flow->fate & MLX5_FLOW_FATE_DROP) { + verbs->hrxq = mlx5_hrxq_drop_new(dev); + if (!verbs->hrxq) { + rte_flow_error_set + (error, errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot get drop hash queue"); + goto error; + } + } else { + struct mlx5_hrxq *hrxq; + + hrxq = mlx5_hrxq_get(dev, flow->key, + MLX5_RSS_HASH_KEY_LEN, + verbs->hash_fields, + (*flow->queue), + flow->rss.queue_num); + if (!hrxq) + hrxq = mlx5_hrxq_new(dev, flow->key, + MLX5_RSS_HASH_KEY_LEN, + verbs->hash_fields, + (*flow->queue), + flow->rss.queue_num, + !!(flow->layers & + MLX5_FLOW_LAYER_TUNNEL)); + if (!hrxq) { + rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot get hash queue"); + goto error; + } + verbs->hrxq = hrxq; + } + verbs->flow = + mlx5_glue->create_flow(verbs->hrxq->qp, verbs->attr); + if (!verbs->flow) { + rte_flow_error_set(error, errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "hardware refuses to create flow"); goto error; } - DEBUG("%p type %d QP %p ibv_flow %p", - (void *)flow, i, - (void *)flow->frxq[i].hrxq, - (void *)flow->frxq[i].ibv_flow); - } - for (i = 0; i != parser->queues_n; ++i) { - struct mlx5_rxq_data *q = - (*priv->rxqs)[parser->queues[i]]; - - q->mark |= parser->mark; } + if (flow->nl_flow && + priv->mnl_socket && + mlx5_nl_flow_create(priv->mnl_socket, flow->nl_flow, error)) + goto error; return 0; error: - assert(flow); - for (i = 0; i != hash_rxq_init_n; ++i) { - if (flow->frxq[i].ibv_flow) { - struct ibv_flow *ibv_flow = flow->frxq[i].ibv_flow; - - claim_zero(mlx5_glue->destroy_flow(ibv_flow)); + err = rte_errno; /* Save rte_errno before cleanup. */ + LIST_FOREACH(verbs, &flow->verbs, next) { + if (verbs->hrxq) { + if (flow->fate & MLX5_FLOW_FATE_DROP) + mlx5_hrxq_drop_release(dev); + else + mlx5_hrxq_release(dev, verbs->hrxq); + verbs->hrxq = NULL; } - if (flow->frxq[i].hrxq) - mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq); - if (flow->frxq[i].ibv_attr) - rte_free(flow->frxq[i].ibv_attr); - } - if (flow->cs) { - claim_zero(mlx5_glue->destroy_counter_set(flow->cs)); - flow->cs = NULL; - parser->cs = NULL; } - return err; + rte_errno = err; /* Restore rte_errno. */ + return -rte_errno; } /** - * Convert a flow. + * Create a flow and add it to @p list. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. * @param[in] attr * Flow rule attributes. - * @param[in] pattern + * @param[in] items * Pattern specification (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END action). @@ -1832,85 +3006,53 @@ error: * Perform verbose error reporting if not NULL. * * @return - * A flow on success, NULL otherwise. + * A flow on success, NULL otherwise and rte_errno is set. */ static struct rte_flow * -priv_flow_create(struct priv *priv, - struct mlx5_flows *list, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +mlx5_flow_list_create(struct rte_eth_dev *dev, + struct mlx5_flows *list, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { - struct mlx5_flow_parse parser = { .create = 1, }; struct rte_flow *flow = NULL; - unsigned int i; - int err; + size_t size = 0; + int ret; - err = priv_flow_convert(priv, attr, items, actions, error, &parser); - if (err) - goto exit; - flow = rte_calloc(__func__, 1, - sizeof(*flow) + parser.queues_n * sizeof(uint16_t), - 0); + ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error); + if (ret < 0) + return NULL; + size = ret; + flow = rte_calloc(__func__, 1, size, 0); if (!flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate flow memory"); + "not enough memory to create flow"); return NULL; } - /* Copy queues configuration. */ - flow->queues = (uint16_t (*)[])(flow + 1); - memcpy(flow->queues, parser.queues, parser.queues_n * sizeof(uint16_t)); - flow->queues_n = parser.queues_n; - flow->mark = parser.mark; - /* Copy RSS configuration. */ - flow->rss_conf = parser.rss_conf; - flow->rss_conf.rss_key = flow->rss_key; - memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len); - /* finalise the flow. */ - if (parser.drop) - err = priv_flow_create_action_queue_drop(priv, &parser, flow, - error); - else - err = priv_flow_create_action_queue(priv, &parser, flow, error); - if (err) - goto exit; + ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error); + if (ret < 0) { + rte_free(flow); + return NULL; + } + assert((size_t)ret == size); + if (dev->data->dev_started) { + ret = mlx5_flow_apply(dev, flow, error); + if (ret < 0) { + ret = rte_errno; /* Save rte_errno before cleanup. */ + if (flow) { + mlx5_flow_remove(dev, flow); + rte_free(flow); + } + rte_errno = ret; /* Restore rte_errno. */ + return NULL; + } + } TAILQ_INSERT_TAIL(list, flow, next); - DEBUG("Flow created %p", (void *)flow); + mlx5_flow_rxq_flags_set(dev, flow); return flow; -exit: - ERROR("flow creation error: %s", error->message); - for (i = 0; i != hash_rxq_init_n; ++i) { - if (parser.queue[i].ibv_attr) - rte_free(parser.queue[i].ibv_attr); - } - rte_free(flow); - return NULL; -} - -/** - * Validate a flow supported by the NIC. - * - * @see rte_flow_validate() - * @see rte_flow_ops - */ -int -mlx5_flow_validate(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) -{ - struct priv *priv = dev->data->dev_private; - int ret; - struct mlx5_flow_parse parser = { .create = 0, }; - - priv_lock(priv); - ret = priv_flow_convert(priv, attr, items, actions, error, &parser); - priv_unlock(priv); - return ret; } /** @@ -1926,379 +3068,123 @@ mlx5_flow_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; - struct rte_flow *flow; - - priv_lock(priv); - flow = priv_flow_create(priv, &priv->flows, attr, items, actions, - error); - priv_unlock(priv); - return flow; + return mlx5_flow_list_create + (dev, &((struct priv *)dev->data->dev_private)->flows, + attr, items, actions, error); } /** - * Destroy a flow. + * Destroy a flow in a list. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. * @param[in] flow * Flow to destroy. */ static void -priv_flow_destroy(struct priv *priv, - struct mlx5_flows *list, - struct rte_flow *flow) +mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, + struct rte_flow *flow) { - unsigned int i; - - if (flow->drop || !flow->mark) - goto free; - for (i = 0; i != flow->queues_n; ++i) { - struct rte_flow *tmp; - int mark = 0; - - /* - * To remove the mark from the queue, the queue must not be - * present in any other marked flow (RSS or not). - */ - TAILQ_FOREACH(tmp, list, next) { - unsigned int j; - uint16_t *tqs = NULL; - uint16_t tq_n = 0; - - if (!tmp->mark) - continue; - for (j = 0; j != hash_rxq_init_n; ++j) { - if (!tmp->frxq[j].hrxq) - continue; - tqs = tmp->frxq[j].hrxq->ind_table->queues; - tq_n = tmp->frxq[j].hrxq->ind_table->queues_n; - } - if (!tq_n) - continue; - for (j = 0; (j != tq_n) && !mark; j++) - if (tqs[j] == (*flow->queues)[i]) - mark = 1; - } - (*priv->rxqs)[(*flow->queues)[i]]->mark = mark; - } -free: - if (flow->drop) { - if (flow->frxq[HASH_RXQ_ETH].ibv_flow) - claim_zero(mlx5_glue->destroy_flow - (flow->frxq[HASH_RXQ_ETH].ibv_flow)); - rte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr); - } else { - for (i = 0; i != hash_rxq_init_n; ++i) { - struct mlx5_flow *frxq = &flow->frxq[i]; - - if (frxq->ibv_flow) - claim_zero(mlx5_glue->destroy_flow - (frxq->ibv_flow)); - if (frxq->hrxq) - mlx5_priv_hrxq_release(priv, frxq->hrxq); - if (frxq->ibv_attr) - rte_free(frxq->ibv_attr); - } - } - if (flow->cs) { - claim_zero(mlx5_glue->destroy_counter_set(flow->cs)); - flow->cs = NULL; - } + mlx5_flow_remove(dev, flow); TAILQ_REMOVE(list, flow, next); - DEBUG("Flow destroyed %p", (void *)flow); + /* + * Update RX queue flags only if port is started, otherwise it is + * already clean. + */ + if (dev->data->dev_started) + mlx5_flow_rxq_flags_trim(dev, flow); rte_free(flow); } /** * Destroy all flows. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. */ void -priv_flow_flush(struct priv *priv, struct mlx5_flows *list) +mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) { while (!TAILQ_EMPTY(list)) { struct rte_flow *flow; flow = TAILQ_FIRST(list); - priv_flow_destroy(priv, list, flow); - } -} - -/** - * Create drop queue. - * - * @param priv - * Pointer to private structure. - * - * @return - * 0 on success. - */ -int -priv_flow_create_drop_queue(struct priv *priv) -{ - struct mlx5_hrxq_drop *fdq = NULL; - - assert(priv->pd); - assert(priv->ctx); - fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0); - if (!fdq) { - WARN("cannot allocate memory for drop queue"); - goto error; - } - fdq->cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0); - if (!fdq->cq) { - WARN("cannot allocate CQ for drop queue"); - goto error; - } - fdq->wq = mlx5_glue->create_wq - (priv->ctx, - &(struct ibv_wq_init_attr){ - .wq_type = IBV_WQT_RQ, - .max_wr = 1, - .max_sge = 1, - .pd = priv->pd, - .cq = fdq->cq, - }); - if (!fdq->wq) { - WARN("cannot allocate WQ for drop queue"); - goto error; - } - fdq->ind_table = mlx5_glue->create_rwq_ind_table - (priv->ctx, - &(struct ibv_rwq_ind_table_init_attr){ - .log_ind_tbl_size = 0, - .ind_tbl = &fdq->wq, - .comp_mask = 0, - }); - if (!fdq->ind_table) { - WARN("cannot allocate indirection table for drop queue"); - goto error; + mlx5_flow_list_destroy(dev, list, flow); } - fdq->qp = mlx5_glue->create_qp_ex - (priv->ctx, - &(struct ibv_qp_init_attr_ex){ - .qp_type = IBV_QPT_RAW_PACKET, - .comp_mask = - IBV_QP_INIT_ATTR_PD | - IBV_QP_INIT_ATTR_IND_TABLE | - IBV_QP_INIT_ATTR_RX_HASH, - .rx_hash_conf = (struct ibv_rx_hash_conf){ - .rx_hash_function = - IBV_RX_HASH_FUNC_TOEPLITZ, - .rx_hash_key_len = rss_hash_default_key_len, - .rx_hash_key = rss_hash_default_key, - .rx_hash_fields_mask = 0, - }, - .rwq_ind_tbl = fdq->ind_table, - .pd = priv->pd - }); - if (!fdq->qp) { - WARN("cannot allocate QP for drop queue"); - goto error; - } - priv->flow_drop_queue = fdq; - return 0; -error: - if (fdq->qp) - claim_zero(mlx5_glue->destroy_qp(fdq->qp)); - if (fdq->ind_table) - claim_zero(mlx5_glue->destroy_rwq_ind_table(fdq->ind_table)); - if (fdq->wq) - claim_zero(mlx5_glue->destroy_wq(fdq->wq)); - if (fdq->cq) - claim_zero(mlx5_glue->destroy_cq(fdq->cq)); - if (fdq) - rte_free(fdq); - priv->flow_drop_queue = NULL; - return -1; -} - -/** - * Delete drop queue. - * - * @param priv - * Pointer to private structure. - */ -void -priv_flow_delete_drop_queue(struct priv *priv) -{ - struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue; - - if (!fdq) - return; - if (fdq->qp) - claim_zero(mlx5_glue->destroy_qp(fdq->qp)); - if (fdq->ind_table) - claim_zero(mlx5_glue->destroy_rwq_ind_table(fdq->ind_table)); - if (fdq->wq) - claim_zero(mlx5_glue->destroy_wq(fdq->wq)); - if (fdq->cq) - claim_zero(mlx5_glue->destroy_cq(fdq->cq)); - rte_free(fdq); - priv->flow_drop_queue = NULL; } /** * Remove all flows. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. */ void -priv_flow_stop(struct priv *priv, struct mlx5_flows *list) +mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) { struct rte_flow *flow; - TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { - unsigned int i; - struct mlx5_ind_table_ibv *ind_tbl = NULL; - - if (flow->drop) { - if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) - continue; - claim_zero(mlx5_glue->destroy_flow - (flow->frxq[HASH_RXQ_ETH].ibv_flow)); - flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL; - DEBUG("Flow %p removed", (void *)flow); - /* Next flow. */ - continue; - } - /* Verify the flow has not already been cleaned. */ - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!flow->frxq[i].ibv_flow) - continue; - /* - * Indirection table may be necessary to remove the - * flags in the Rx queues. - * This helps to speed-up the process by avoiding - * another loop. - */ - ind_tbl = flow->frxq[i].hrxq->ind_table; - break; - } - if (i == hash_rxq_init_n) - return; - if (flow->mark) { - assert(ind_tbl); - for (i = 0; i != ind_tbl->queues_n; ++i) - (*priv->rxqs)[ind_tbl->queues[i]]->mark = 0; - } - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!flow->frxq[i].ibv_flow) - continue; - claim_zero(mlx5_glue->destroy_flow - (flow->frxq[i].ibv_flow)); - flow->frxq[i].ibv_flow = NULL; - mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq); - flow->frxq[i].hrxq = NULL; - } - DEBUG("Flow %p removed", (void *)flow); - } + TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) + mlx5_flow_remove(dev, flow); + mlx5_flow_rxq_flags_clear(dev); } /** * Add all flows. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_flow_start(struct priv *priv, struct mlx5_flows *list) +mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) { struct rte_flow *flow; + struct rte_flow_error error; + int ret = 0; - TAILQ_FOREACH(flow, list, next) { - unsigned int i; - - if (flow->drop) { - flow->frxq[HASH_RXQ_ETH].ibv_flow = - mlx5_glue->create_flow - (priv->flow_drop_queue->qp, - flow->frxq[HASH_RXQ_ETH].ibv_attr); - if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) { - DEBUG("Flow %p cannot be applied", - (void *)flow); - rte_errno = EINVAL; - return rte_errno; - } - DEBUG("Flow %p applied", (void *)flow); - /* Next flow. */ - continue; - } - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!flow->frxq[i].ibv_attr) - continue; - flow->frxq[i].hrxq = - mlx5_priv_hrxq_get(priv, flow->rss_conf.rss_key, - flow->rss_conf.rss_key_len, - hash_rxq_init[i].hash_fields, - (*flow->queues), - flow->queues_n); - if (flow->frxq[i].hrxq) - goto flow_create; - flow->frxq[i].hrxq = - mlx5_priv_hrxq_new(priv, flow->rss_conf.rss_key, - flow->rss_conf.rss_key_len, - hash_rxq_init[i].hash_fields, - (*flow->queues), - flow->queues_n); - if (!flow->frxq[i].hrxq) { - DEBUG("Flow %p cannot be applied", - (void *)flow); - rte_errno = EINVAL; - return rte_errno; - } -flow_create: - flow->frxq[i].ibv_flow = - mlx5_glue->create_flow(flow->frxq[i].hrxq->qp, - flow->frxq[i].ibv_attr); - if (!flow->frxq[i].ibv_flow) { - DEBUG("Flow %p cannot be applied", - (void *)flow); - rte_errno = EINVAL; - return rte_errno; - } - DEBUG("Flow %p applied", (void *)flow); - } - if (!flow->mark) - continue; - for (i = 0; i != flow->queues_n; ++i) - (*priv->rxqs)[(*flow->queues)[i]]->mark = 1; + TAILQ_FOREACH(flow, list, next) { + ret = mlx5_flow_apply(dev, flow, &error); + if (ret < 0) + goto error; + mlx5_flow_rxq_flags_set(dev, flow); } return 0; +error: + ret = rte_errno; /* Save rte_errno before cleanup. */ + mlx5_flow_stop(dev, list); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** * Verify the flow list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return the number of flows not released. */ int -priv_flow_verify(struct priv *priv) +mlx5_flow_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct rte_flow *flow; int ret = 0; TAILQ_FOREACH(flow, &priv->flows, next) { - DEBUG("%p: flow %p still referenced", (void *)priv, - (void *)flow); + DRV_LOG(DEBUG, "port %u flow %p still referenced", + dev->data->port_id, (void *)flow); ++ret; } return ret; @@ -2319,7 +3205,7 @@ priv_flow_verify(struct priv *priv) * A VLAN flow mask to apply. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, @@ -2331,7 +3217,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, struct priv *priv = dev->data->dev_private; const struct rte_flow_attr attr = { .ingress = 1, - .priority = MLX5_CTRL_FLOW_PRIORITY, + .priority = MLX5_FLOW_PRIO_RSVD, }; struct rte_flow_item items[] = { { @@ -2351,9 +3237,20 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, .type = RTE_FLOW_ITEM_TYPE_END, }, }; + uint16_t queue[priv->reta_idx_n]; + struct rte_flow_action_rss action_rss = { + .func = RTE_ETH_HASH_FUNCTION_DEFAULT, + .level = 0, + .types = priv->rss_conf.rss_hf, + .key_len = priv->rss_conf.rss_key_len, + .queue_num = priv->reta_idx_n, + .key = priv->rss_conf.rss_key, + .queue = queue, + }; struct rte_flow_action actions[] = { { .type = RTE_FLOW_ACTION_TYPE_RSS, + .conf = &action_rss, }, { .type = RTE_FLOW_ACTION_TYPE_END, @@ -2362,26 +3259,17 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, struct rte_flow *flow; struct rte_flow_error error; unsigned int i; - union { - struct rte_flow_action_rss rss; - struct { - const struct rte_eth_rss_conf *rss_conf; - uint16_t num; - uint16_t queue[RTE_MAX_QUEUES_PER_PORT]; - } local; - } action_rss; - - if (!priv->reta_idx_n) - return EINVAL; + + if (!priv->reta_idx_n) { + rte_errno = EINVAL; + return -rte_errno; + } for (i = 0; i != priv->reta_idx_n; ++i) - action_rss.local.queue[i] = (*priv->reta_idx)[i]; - action_rss.local.rss_conf = &priv->rss_conf; - action_rss.local.num = priv->reta_idx_n; - actions[0].conf = (const void *)&action_rss.rss; - flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions, - &error); + queue[i] = (*priv->reta_idx)[i]; + flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items, + actions, &error); if (!flow) - return rte_errno; + return -rte_errno; return 0; } @@ -2396,7 +3284,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, * An Ethernet flow mask to apply. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_ctrl_flow(struct rte_eth_dev *dev, @@ -2415,14 +3303,11 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev, int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, - struct rte_flow_error *error) + struct rte_flow_error *error __rte_unused) { struct priv *priv = dev->data->dev_private; - (void)error; - priv_lock(priv); - priv_flow_destroy(priv, &priv->flows, flow); - priv_unlock(priv); + mlx5_flow_list_destroy(dev, &priv->flows, flow); return 0; } @@ -2434,152 +3319,161 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, */ int mlx5_flow_flush(struct rte_eth_dev *dev, - struct rte_flow_error *error) + struct rte_flow_error *error __rte_unused) { struct priv *priv = dev->data->dev_private; - (void)error; - priv_lock(priv); - priv_flow_flush(priv, &priv->flows); - priv_unlock(priv); + mlx5_flow_list_flush(dev, &priv->flows); return 0; } -#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT /** - * Query flow counter. - * - * @param cs - * the counter set. - * @param counter_value - * returned data from the counter. + * Isolated mode. * - * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * @see rte_flow_isolate() + * @see rte_flow_ops */ -static int -priv_flow_query_count(struct ibv_counter_set *cs, - struct mlx5_flow_counter_stats *counter_stats, - struct rte_flow_query_count *query_count, - struct rte_flow_error *error) +int +mlx5_flow_isolate(struct rte_eth_dev *dev, + int enable, + struct rte_flow_error *error) { - uint64_t counters[2]; - struct ibv_query_counter_set_attr query_cs_attr = { - .cs = cs, - .query_flags = IBV_COUNTER_SET_FORCE_UPDATE, - }; - struct ibv_counter_set_data query_out = { - .out = counters, - .outlen = 2 * sizeof(uint64_t), - }; - int res = mlx5_glue->query_counter_set(&query_cs_attr, &query_out); + struct priv *priv = dev->data->dev_private; - if (res) { - rte_flow_error_set(error, -res, + if (dev->data->dev_started) { + rte_flow_error_set(error, EBUSY, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot read counter"); - return -res; - } - query_count->hits_set = 1; - query_count->bytes_set = 1; - query_count->hits = counters[0] - counter_stats->hits; - query_count->bytes = counters[1] - counter_stats->bytes; - if (query_count->reset) { - counter_stats->hits = counters[0]; - counter_stats->bytes = counters[1]; + "port must be stopped first"); + return -rte_errno; } + priv->isolated = !!enable; + if (enable) + dev->dev_ops = &mlx5_dev_ops_isolate; + else + dev->dev_ops = &mlx5_dev_ops; return 0; } /** - * Query a flows. + * Query flow counter. * - * @see rte_flow_query() - * @see rte_flow_ops + * @param flow + * Pointer to the flow. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -int -mlx5_flow_query(struct rte_eth_dev *dev, - struct rte_flow *flow, - enum rte_flow_action_type action __rte_unused, - void *data, - struct rte_flow_error *error) +static int +mlx5_flow_query_count(struct rte_flow *flow __rte_unused, + void *data __rte_unused, + struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; - int res = EINVAL; +#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT + if (flow->modifier & MLX5_FLOW_MOD_COUNT) { + struct rte_flow_query_count *qc = data; + uint64_t counters[2] = {0, 0}; + struct ibv_query_counter_set_attr query_cs_attr = { + .cs = flow->counter->cs, + .query_flags = IBV_COUNTER_SET_FORCE_UPDATE, + }; + struct ibv_counter_set_data query_out = { + .out = counters, + .outlen = 2 * sizeof(uint64_t), + }; + int err = mlx5_glue->query_counter_set(&query_cs_attr, + &query_out); - priv_lock(priv); - if (flow->cs) { - res = priv_flow_query_count(flow->cs, - &flow->counter_stats, - (struct rte_flow_query_count *)data, - error); - } else { - rte_flow_error_set(error, res, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "no counter found for flow"); + if (err) + return rte_flow_error_set + (error, err, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot read counter"); + qc->hits_set = 1; + qc->bytes_set = 1; + qc->hits = counters[0] - flow->counter->hits; + qc->bytes = counters[1] - flow->counter->bytes; + if (qc->reset) { + flow->counter->hits = counters[0]; + flow->counter->bytes = counters[1]; + } + return 0; } - priv_unlock(priv); - return -res; -} + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "flow does not have counter"); #endif + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "counters are not available"); +} /** - * Isolated mode. + * Query a flows. * - * @see rte_flow_isolate() + * @see rte_flow_query() * @see rte_flow_ops */ int -mlx5_flow_isolate(struct rte_eth_dev *dev, - int enable, - struct rte_flow_error *error) +mlx5_flow_query(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + int ret = 0; - priv_lock(priv); - if (dev->data->dev_started) { - rte_flow_error_set(error, EBUSY, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "port must be stopped first"); - priv_unlock(priv); - return -rte_errno; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = mlx5_flow_query_count(flow, data, error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } + if (ret < 0) + return ret; } - priv->isolated = !!enable; - if (enable) - priv->dev->dev_ops = &mlx5_dev_ops_isolate; - else - priv->dev->dev_ops = &mlx5_dev_ops; - priv_unlock(priv); return 0; } /** * Convert a flow director filter to a generic flow. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Flow director filter to add. * @param attributes * Generic flow parameters structure. * * @return - * 0 on success, errno value on error. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_fdir_filter_convert(struct priv *priv, +mlx5_fdir_filter_convert(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter, struct mlx5_fdir *attributes) { + struct priv *priv = dev->data->dev_private; const struct rte_eth_fdir_input *input = &fdir_filter->input; + const struct rte_eth_fdir_masks *mask = + &dev->data->dev_conf.fdir_conf.mask; /* Validate queue number. */ if (fdir_filter->action.rx_queue >= priv->rxqs_n) { - ERROR("invalid queue number %d", fdir_filter->action.rx_queue); - return EINVAL; + DRV_LOG(ERR, "port %u invalid queue number %d", + dev->data->port_id, fdir_filter->action.rx_queue); + rte_errno = EINVAL; + return -rte_errno; } attributes->attr.ingress = 1; attributes->items[0] = (struct rte_flow_item) { @@ -2600,144 +3494,140 @@ priv_fdir_filter_convert(struct priv *priv, }; break; default: - ERROR("invalid behavior %d", fdir_filter->action.behavior); - return ENOTSUP; + DRV_LOG(ERR, "port %u invalid behavior %d", + dev->data->port_id, + fdir_filter->action.behavior); + rte_errno = ENOTSUP; + return -rte_errno; } attributes->queue.index = fdir_filter->action.rx_queue; + /* Handle L3. */ switch (fdir_filter->input.flow_type) { case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: attributes->l3.ipv4.hdr = (struct ipv4_hdr){ - .src_addr = input->flow.udp4_flow.ip.src_ip, - .dst_addr = input->flow.udp4_flow.ip.dst_ip, - .time_to_live = input->flow.udp4_flow.ip.ttl, - .type_of_service = input->flow.udp4_flow.ip.tos, - .next_proto_id = input->flow.udp4_flow.ip.proto, + .src_addr = input->flow.ip4_flow.src_ip, + .dst_addr = input->flow.ip4_flow.dst_ip, + .time_to_live = input->flow.ip4_flow.ttl, + .type_of_service = input->flow.ip4_flow.tos, + .next_proto_id = input->flow.ip4_flow.proto, }; - attributes->l4.udp.hdr = (struct udp_hdr){ - .src_port = input->flow.udp4_flow.src_port, - .dst_port = input->flow.udp4_flow.dst_port, + attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){ + .src_addr = mask->ipv4_mask.src_ip, + .dst_addr = mask->ipv4_mask.dst_ip, + .time_to_live = mask->ipv4_mask.ttl, + .type_of_service = mask->ipv4_mask.tos, + .next_proto_id = mask->ipv4_mask.proto, }; attributes->items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_IPV4, .spec = &attributes->l3, - .mask = &attributes->l3, + .mask = &attributes->l3_mask, + }; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + attributes->l3.ipv6.hdr = (struct ipv6_hdr){ + .hop_limits = input->flow.ipv6_flow.hop_limits, + .proto = input->flow.ipv6_flow.proto, + }; + + memcpy(attributes->l3.ipv6.hdr.src_addr, + input->flow.ipv6_flow.src_ip, + RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); + memcpy(attributes->l3.ipv6.hdr.dst_addr, + input->flow.ipv6_flow.dst_ip, + RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); + memcpy(attributes->l3_mask.ipv6.hdr.src_addr, + mask->ipv6_mask.src_ip, + RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); + memcpy(attributes->l3_mask.ipv6.hdr.dst_addr, + mask->ipv6_mask.dst_ip, + RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); + attributes->items[1] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .spec = &attributes->l3, + .mask = &attributes->l3_mask, + }; + break; + default: + DRV_LOG(ERR, "port %u invalid flow type%d", + dev->data->port_id, fdir_filter->input.flow_type); + rte_errno = ENOTSUP; + return -rte_errno; + } + /* Handle L4. */ + switch (fdir_filter->input.flow_type) { + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + attributes->l4.udp.hdr = (struct udp_hdr){ + .src_port = input->flow.udp4_flow.src_port, + .dst_port = input->flow.udp4_flow.dst_port, + }; + attributes->l4_mask.udp.hdr = (struct udp_hdr){ + .src_port = mask->src_port_mask, + .dst_port = mask->dst_port_mask, }; attributes->items[2] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_UDP, .spec = &attributes->l4, - .mask = &attributes->l4, + .mask = &attributes->l4_mask, }; break; case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: - attributes->l3.ipv4.hdr = (struct ipv4_hdr){ - .src_addr = input->flow.tcp4_flow.ip.src_ip, - .dst_addr = input->flow.tcp4_flow.ip.dst_ip, - .time_to_live = input->flow.tcp4_flow.ip.ttl, - .type_of_service = input->flow.tcp4_flow.ip.tos, - .next_proto_id = input->flow.tcp4_flow.ip.proto, - }; attributes->l4.tcp.hdr = (struct tcp_hdr){ .src_port = input->flow.tcp4_flow.src_port, .dst_port = input->flow.tcp4_flow.dst_port, }; - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV4, - .spec = &attributes->l3, - .mask = &attributes->l3, + attributes->l4_mask.tcp.hdr = (struct tcp_hdr){ + .src_port = mask->src_port_mask, + .dst_port = mask->dst_port_mask, }; attributes->items[2] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_TCP, .spec = &attributes->l4, - .mask = &attributes->l4, - }; - break; - case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: - attributes->l3.ipv4.hdr = (struct ipv4_hdr){ - .src_addr = input->flow.ip4_flow.src_ip, - .dst_addr = input->flow.ip4_flow.dst_ip, - .time_to_live = input->flow.ip4_flow.ttl, - .type_of_service = input->flow.ip4_flow.tos, - .next_proto_id = input->flow.ip4_flow.proto, - }; - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV4, - .spec = &attributes->l3, - .mask = &attributes->l3, + .mask = &attributes->l4_mask, }; break; case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: - attributes->l3.ipv6.hdr = (struct ipv6_hdr){ - .hop_limits = input->flow.udp6_flow.ip.hop_limits, - .proto = input->flow.udp6_flow.ip.proto, - }; - memcpy(attributes->l3.ipv6.hdr.src_addr, - input->flow.udp6_flow.ip.src_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); - memcpy(attributes->l3.ipv6.hdr.dst_addr, - input->flow.udp6_flow.ip.dst_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); attributes->l4.udp.hdr = (struct udp_hdr){ .src_port = input->flow.udp6_flow.src_port, .dst_port = input->flow.udp6_flow.dst_port, }; - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV6, - .spec = &attributes->l3, - .mask = &attributes->l3, + attributes->l4_mask.udp.hdr = (struct udp_hdr){ + .src_port = mask->src_port_mask, + .dst_port = mask->dst_port_mask, }; attributes->items[2] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_UDP, .spec = &attributes->l4, - .mask = &attributes->l4, + .mask = &attributes->l4_mask, }; break; case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: - attributes->l3.ipv6.hdr = (struct ipv6_hdr){ - .hop_limits = input->flow.tcp6_flow.ip.hop_limits, - .proto = input->flow.tcp6_flow.ip.proto, - }; - memcpy(attributes->l3.ipv6.hdr.src_addr, - input->flow.tcp6_flow.ip.src_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); - memcpy(attributes->l3.ipv6.hdr.dst_addr, - input->flow.tcp6_flow.ip.dst_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); attributes->l4.tcp.hdr = (struct tcp_hdr){ .src_port = input->flow.tcp6_flow.src_port, .dst_port = input->flow.tcp6_flow.dst_port, }; - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV6, - .spec = &attributes->l3, - .mask = &attributes->l3, + attributes->l4_mask.tcp.hdr = (struct tcp_hdr){ + .src_port = mask->src_port_mask, + .dst_port = mask->dst_port_mask, }; attributes->items[2] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_TCP, .spec = &attributes->l4, - .mask = &attributes->l4, + .mask = &attributes->l4_mask, }; break; + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: - attributes->l3.ipv6.hdr = (struct ipv6_hdr){ - .hop_limits = input->flow.ipv6_flow.hop_limits, - .proto = input->flow.ipv6_flow.proto, - }; - memcpy(attributes->l3.ipv6.hdr.src_addr, - input->flow.ipv6_flow.src_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); - memcpy(attributes->l3.ipv6.hdr.dst_addr, - input->flow.ipv6_flow.dst_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV6, - .spec = &attributes->l3, - .mask = &attributes->l3, - }; break; default: - ERROR("invalid flow type%d", - fdir_filter->input.flow_type); - return ENOTSUP; + DRV_LOG(ERR, "port %u invalid flow type%d", + dev->data->port_id, fdir_filter->input.flow_type); + rte_errno = ENOTSUP; + return -rte_errno; } return 0; } @@ -2745,18 +3635,19 @@ priv_fdir_filter_convert(struct priv *priv, /** * Add new flow director filter and store it in list. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Flow director filter to add. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_fdir_filter_add(struct priv *priv, +mlx5_fdir_filter_add(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { + struct priv *priv = dev->data->dev_private; struct mlx5_fdir attributes = { .attr.group = 0, .l2_mask = { @@ -2765,181 +3656,96 @@ priv_fdir_filter_add(struct priv *priv, .type = 0, }, }; - struct mlx5_flow_parse parser = { - .layer = HASH_RXQ_ETH, - }; struct rte_flow_error error; struct rte_flow *flow; int ret; - ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes); + ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes); if (ret) - return -ret; - ret = priv_flow_convert(priv, &attributes.attr, attributes.items, - attributes.actions, &error, &parser); - if (ret) - return -ret; - flow = priv_flow_create(priv, - &priv->flows, - &attributes.attr, - attributes.items, - attributes.actions, - &error); + return ret; + flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr, + attributes.items, attributes.actions, + &error); if (flow) { - DEBUG("FDIR created %p", (void *)flow); + DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id, + (void *)flow); return 0; } - return ENOTSUP; + return -rte_errno; } /** * Delete specific filter. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Filter to be deleted. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_fdir_filter_delete(struct priv *priv, - const struct rte_eth_fdir_filter *fdir_filter) +mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused, + const struct rte_eth_fdir_filter *fdir_filter + __rte_unused) { - struct mlx5_fdir attributes = { - .attr.group = 0, - }; - struct mlx5_flow_parse parser = { - .create = 1, - .layer = HASH_RXQ_ETH, - }; - struct rte_flow_error error; - struct rte_flow *flow; - unsigned int i; - int ret; - - ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes); - if (ret) - return -ret; - ret = priv_flow_convert(priv, &attributes.attr, attributes.items, - attributes.actions, &error, &parser); - if (ret) - goto exit; - /* - * Special case for drop action which is only set in the - * specifications when the flow is created. In this situation the - * drop specification is missing. - */ - if (parser.drop) { - struct ibv_flow_spec_action_drop *drop; - - drop = (void *)((uintptr_t)parser.queue[HASH_RXQ_ETH].ibv_attr + - parser.queue[HASH_RXQ_ETH].offset); - *drop = (struct ibv_flow_spec_action_drop){ - .type = IBV_FLOW_SPEC_ACTION_DROP, - .size = sizeof(struct ibv_flow_spec_action_drop), - }; - parser.queue[HASH_RXQ_ETH].ibv_attr->num_of_specs++; - } - TAILQ_FOREACH(flow, &priv->flows, next) { - struct ibv_flow_attr *attr; - struct ibv_spec_header *attr_h; - void *spec; - struct ibv_flow_attr *flow_attr; - struct ibv_spec_header *flow_h; - void *flow_spec; - unsigned int specs_n; - - attr = parser.queue[HASH_RXQ_ETH].ibv_attr; - flow_attr = flow->frxq[HASH_RXQ_ETH].ibv_attr; - /* Compare first the attributes. */ - if (memcmp(attr, flow_attr, sizeof(struct ibv_flow_attr))) - continue; - if (attr->num_of_specs == 0) - continue; - spec = (void *)((uintptr_t)attr + - sizeof(struct ibv_flow_attr)); - flow_spec = (void *)((uintptr_t)flow_attr + - sizeof(struct ibv_flow_attr)); - specs_n = RTE_MIN(attr->num_of_specs, flow_attr->num_of_specs); - for (i = 0; i != specs_n; ++i) { - attr_h = spec; - flow_h = flow_spec; - if (memcmp(spec, flow_spec, - RTE_MIN(attr_h->size, flow_h->size))) - goto wrong_flow; - spec = (void *)((uintptr_t)spec + attr_h->size); - flow_spec = (void *)((uintptr_t)flow_spec + - flow_h->size); - } - /* At this point, the flow match. */ - break; -wrong_flow: - /* The flow does not match. */ - continue; - } - if (flow) - priv_flow_destroy(priv, &priv->flows, flow); -exit: - for (i = 0; i != hash_rxq_init_n; ++i) { - if (parser.queue[i].ibv_attr) - rte_free(parser.queue[i].ibv_attr); - } - return -ret; + rte_errno = ENOTSUP; + return -rte_errno; } /** * Update queue for specific filter. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Filter to be updated. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_fdir_filter_update(struct priv *priv, +mlx5_fdir_filter_update(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { int ret; - ret = priv_fdir_filter_delete(priv, fdir_filter); + ret = mlx5_fdir_filter_delete(dev, fdir_filter); if (ret) return ret; - ret = priv_fdir_filter_add(priv, fdir_filter); - return ret; + return mlx5_fdir_filter_add(dev, fdir_filter); } /** * Flush all filters. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. */ static void -priv_fdir_filter_flush(struct priv *priv) +mlx5_fdir_filter_flush(struct rte_eth_dev *dev) { - priv_flow_flush(priv, &priv->flows); + struct priv *priv = dev->data->dev_private; + + mlx5_flow_list_flush(dev, &priv->flows); } /** * Get flow director information. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param[out] fdir_info * Resulting flow director information. */ static void -priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info) +mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) { struct rte_eth_fdir_masks *mask = - &priv->dev->data->dev_conf.fdir_conf.mask; + &dev->data->dev_conf.fdir_conf.mask; - fdir_info->mode = priv->dev->data->dev_conf.fdir_conf.mode; + fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; fdir_info->guarant_spc = 0; rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask)); fdir_info->max_flexpayload = 0; @@ -2953,54 +3759,52 @@ priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info) /** * Deal with flow director operations. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param filter_op * Operation to perform. * @param arg * Pointer to operation-specific structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg) +mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, + void *arg) { enum rte_fdir_mode fdir_mode = - priv->dev->data->dev_conf.fdir_conf.mode; - int ret = 0; + dev->data->dev_conf.fdir_conf.mode; if (filter_op == RTE_ETH_FILTER_NOP) return 0; if (fdir_mode != RTE_FDIR_MODE_PERFECT && fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { - ERROR("%p: flow director mode %d not supported", - (void *)priv, fdir_mode); - return EINVAL; + DRV_LOG(ERR, "port %u flow director mode %d not supported", + dev->data->port_id, fdir_mode); + rte_errno = EINVAL; + return -rte_errno; } switch (filter_op) { case RTE_ETH_FILTER_ADD: - ret = priv_fdir_filter_add(priv, arg); - break; + return mlx5_fdir_filter_add(dev, arg); case RTE_ETH_FILTER_UPDATE: - ret = priv_fdir_filter_update(priv, arg); - break; + return mlx5_fdir_filter_update(dev, arg); case RTE_ETH_FILTER_DELETE: - ret = priv_fdir_filter_delete(priv, arg); - break; + return mlx5_fdir_filter_delete(dev, arg); case RTE_ETH_FILTER_FLUSH: - priv_fdir_filter_flush(priv); + mlx5_fdir_filter_flush(dev); break; case RTE_ETH_FILTER_INFO: - priv_fdir_info_get(priv, arg); + mlx5_fdir_info_get(dev, arg); break; default: - DEBUG("%p: unknown operation %u", (void *)priv, - filter_op); - ret = EINVAL; - break; + DRV_LOG(DEBUG, "port %u unknown operation %u", + dev->data->port_id, filter_op); + rte_errno = EINVAL; + return -rte_errno; } - return ret; + return 0; } /** @@ -3016,7 +3820,7 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg) * Pointer to operation-specific structure. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, @@ -3024,24 +3828,21 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg) { - int ret = EINVAL; - struct priv *priv = dev->data->dev_private; - switch (filter_type) { case RTE_ETH_FILTER_GENERIC: - if (filter_op != RTE_ETH_FILTER_GET) - return -EINVAL; + if (filter_op != RTE_ETH_FILTER_GET) { + rte_errno = EINVAL; + return -rte_errno; + } *(const void **)arg = &mlx5_flow_ops; return 0; case RTE_ETH_FILTER_FDIR: - priv_lock(priv); - ret = priv_fdir_ctrl_func(priv, filter_op, arg); - priv_unlock(priv); - break; + return mlx5_fdir_ctrl_func(dev, filter_op, arg); default: - ERROR("%p: filter type (%d) not supported", - (void *)dev, filter_type); - break; + DRV_LOG(ERR, "port %u filter type (%d) not supported", + dev->data->port_id, filter_type); + rte_errno = ENOTSUP; + return -rte_errno; } - return -ret; + return 0; } diff --git a/drivers/net/mlx5/mlx5_glue.c b/drivers/net/mlx5/mlx5_glue.c index 1c4396ad..84f9492a 100644 --- a/drivers/net/mlx5/mlx5_glue.c +++ b/drivers/net/mlx5/mlx5_glue.c @@ -1,12 +1,19 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2018 6WIND S.A. - * Copyright 2018 Mellanox Technologies, Ltd. + * Copyright 2018 Mellanox Technologies, Ltd */ #include +#include #include #include +/* + * Not needed by this file; included to work around the lack of off_t + * definition for mlx5dv.h with unpatched rdma-core versions. + */ +#include + /* Verbs headers do not support -pedantic. */ #ifdef PEDANTIC #pragma GCC diagnostic ignored "-Wpedantic" @@ -17,6 +24,8 @@ #pragma GCC diagnostic error "-Wpedantic" #endif +#include + #include "mlx5_autoconf.h" #include "mlx5_glue.h" @@ -287,6 +296,21 @@ mlx5_glue_dv_create_cq(struct ibv_context *context, return mlx5dv_create_cq(context, cq_attr, mlx5_cq_attr); } +static struct ibv_wq * +mlx5_glue_dv_create_wq(struct ibv_context *context, + struct ibv_wq_init_attr *wq_attr, + struct mlx5dv_wq_init_attr *mlx5_wq_attr) +{ +#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + (void)context; + (void)wq_attr; + (void)mlx5_wq_attr; + return NULL; +#else + return mlx5dv_create_wq(context, wq_attr, mlx5_wq_attr); +#endif +} + static int mlx5_glue_dv_query_device(struct ibv_context *ctx, struct mlx5dv_context *attrs_out) @@ -307,6 +331,22 @@ mlx5_glue_dv_init_obj(struct mlx5dv_obj *obj, uint64_t obj_type) return mlx5dv_init_obj(obj, obj_type); } +static struct ibv_qp * +mlx5_glue_dv_create_qp(struct ibv_context *context, + struct ibv_qp_init_attr_ex *qp_init_attr_ex, + struct mlx5dv_qp_init_attr *dv_qp_init_attr) +{ +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + return mlx5dv_create_qp(context, qp_init_attr_ex, dv_qp_init_attr); +#else + (void)context; + (void)qp_init_attr_ex; + (void)dv_qp_init_attr; + return NULL; +#endif +} + +alignas(RTE_CACHE_LINE_SIZE) const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){ .version = MLX5_GLUE_VERSION, .fork_init = mlx5_glue_fork_init, @@ -347,7 +387,9 @@ const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){ .port_state_str = mlx5_glue_port_state_str, .cq_ex_to_cq = mlx5_glue_cq_ex_to_cq, .dv_create_cq = mlx5_glue_dv_create_cq, + .dv_create_wq = mlx5_glue_dv_create_wq, .dv_query_device = mlx5_glue_dv_query_device, .dv_set_context_attr = mlx5_glue_dv_set_context_attr, .dv_init_obj = mlx5_glue_dv_init_obj, + .dv_create_qp = mlx5_glue_dv_create_qp, }; diff --git a/drivers/net/mlx5/mlx5_glue.h b/drivers/net/mlx5/mlx5_glue.h index b5efee3b..e584d367 100644 --- a/drivers/net/mlx5/mlx5_glue.h +++ b/drivers/net/mlx5/mlx5_glue.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2018 6WIND S.A. - * Copyright 2018 Mellanox Technologies, Ltd. + * Copyright 2018 Mellanox Technologies, Ltd */ #ifndef MLX5_GLUE_H_ @@ -31,6 +31,14 @@ struct ibv_counter_set_init_attr; struct ibv_query_counter_set_attr; #endif +#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT +struct mlx5dv_qp_init_attr; +#endif + +#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT +struct mlx5dv_wq_init_attr; +#endif + /* LIB_GLUE_VERSION must be updated every time this structure is modified. */ struct mlx5_glue { const char *version; @@ -100,12 +108,20 @@ struct mlx5_glue { (struct ibv_context *context, struct ibv_cq_init_attr_ex *cq_attr, struct mlx5dv_cq_init_attr *mlx5_cq_attr); + struct ibv_wq *(*dv_create_wq) + (struct ibv_context *context, + struct ibv_wq_init_attr *wq_attr, + struct mlx5dv_wq_init_attr *mlx5_wq_attr); int (*dv_query_device)(struct ibv_context *ctx_in, struct mlx5dv_context *attrs_out); int (*dv_set_context_attr)(struct ibv_context *ibv_ctx, enum mlx5dv_set_ctx_attr_type type, void *attr); int (*dv_init_obj)(struct mlx5dv_obj *obj, uint64_t obj_type); + struct ibv_qp *(*dv_create_qp) + (struct ibv_context *context, + struct ibv_qp_init_attr_ex *qp_init_attr_ex, + struct mlx5dv_qp_init_attr *dv_qp_init_attr); }; const struct mlx5_glue *mlx5_glue; diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index e8a8d459..12ee37f5 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -35,44 +35,52 @@ /** * Get MAC address by querying netdevice. * - * @param[in] priv - * struct priv for the requested device. + * @param[in] dev + * Pointer to Ethernet device. * @param[out] mac * MAC address output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]) +mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN]) { struct ifreq request; + int ret; - if (priv_ifreq(priv, SIOCGIFHWADDR, &request)) - return -1; + ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request, 0); + if (ret) + return ret; memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN); return 0; } /** - * DPDK callback to remove a MAC address. + * Remove a MAC address from the internal array. * * @param dev * Pointer to Ethernet device structure. * @param index * MAC address index. */ -void -mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +static void +mlx5_internal_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) { + struct priv *priv = dev->data->dev_private; + const int vf = priv->config.vf; + assert(index < MLX5_MAX_MAC_ADDRESSES); + if (is_zero_ether_addr(&dev->data->mac_addrs[index])) + return; + if (vf) + mlx5_nl_mac_addr_remove(dev, &dev->data->mac_addrs[index], + index); memset(&dev->data->mac_addrs[index], 0, sizeof(struct ether_addr)); - if (!dev->data->promiscuous) - mlx5_traffic_restart(dev); } /** - * DPDK callback to add a MAC address. + * Adds a MAC address to the internal array. * * @param dev * Pointer to Ethernet device structure. @@ -80,21 +88,23 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) * MAC address to register. * @param index * MAC address index. - * @param vmdq - * VMDq pool index to associate address with (ignored). * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -int -mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, - uint32_t index, uint32_t vmdq) +static int +mlx5_internal_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, + uint32_t index) { + struct priv *priv = dev->data->dev_private; + const int vf = priv->config.vf; unsigned int i; - int ret = 0; - (void)vmdq; assert(index < MLX5_MAX_MAC_ADDRESSES); + if (is_zero_ether_addr(mac)) { + rte_errno = EINVAL; + return -rte_errno; + } /* First, make sure this address isn't already configured. */ for (i = 0; (i != MLX5_MAX_MAC_ADDRESSES); ++i) { /* Skip this index, it's going to be reconfigured. */ @@ -103,12 +113,74 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, if (memcmp(&dev->data->mac_addrs[i], mac, sizeof(*mac))) continue; /* Address already configured elsewhere, return with error. */ - return EADDRINUSE; + rte_errno = EADDRINUSE; + return -rte_errno; + } + if (vf) { + int ret = mlx5_nl_mac_addr_add(dev, mac, index); + + if (ret) + return ret; } dev->data->mac_addrs[index] = *mac; + return 0; +} + +/** + * DPDK callback to remove a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param index + * MAC address index. + */ +void +mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + int ret; + + if (index >= MLX5_MAX_UC_MAC_ADDRESSES) + return; + mlx5_internal_mac_addr_remove(dev, index); + if (!dev->data->promiscuous) { + ret = mlx5_traffic_restart(dev); + if (ret) + DRV_LOG(ERR, "port %u cannot restart traffic: %s", + dev->data->port_id, strerror(rte_errno)); + } +} + +/** + * DPDK callback to add a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + * @param index + * MAC address index. + * @param vmdq + * VMDq pool index to associate address with (ignored). + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, + uint32_t index, uint32_t vmdq __rte_unused) +{ + int ret; + + if (index >= MLX5_MAX_UC_MAC_ADDRESSES) { + rte_errno = EINVAL; + return -rte_errno; + } + ret = mlx5_internal_mac_addr_add(dev, mac, index); + if (ret < 0) + return ret; if (!dev->data->promiscuous) - mlx5_traffic_restart(dev); - return ret; + return mlx5_traffic_restart(dev); + return 0; } /** @@ -118,10 +190,43 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, * Pointer to Ethernet device structure. * @param mac_addr * MAC address to register. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -void +int mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { - DEBUG("%p: setting primary MAC address", (void *)dev); - mlx5_mac_addr_add(dev, mac_addr, 0, 0); + DRV_LOG(DEBUG, "port %u setting primary MAC address", + dev->data->port_id); + return mlx5_mac_addr_add(dev, mac_addr, 0, 0); +} + +/** + * DPDK callback to set multicast addresses list. + * + * @see rte_eth_dev_set_mc_addr_list() + */ +int +mlx5_set_mc_addr_list(struct rte_eth_dev *dev, + struct ether_addr *mc_addr_set, uint32_t nb_mc_addr) +{ + uint32_t i; + int ret; + + if (nb_mc_addr >= MLX5_MAX_MC_MAC_ADDRESSES) { + rte_errno = ENOSPC; + return -rte_errno; + } + for (i = MLX5_MAX_UC_MAC_ADDRESSES; i != MLX5_MAX_MAC_ADDRESSES; ++i) + mlx5_internal_mac_addr_remove(dev, i); + i = MLX5_MAX_UC_MAC_ADDRESSES; + while (nb_mc_addr--) { + ret = mlx5_internal_mac_addr_add(dev, mc_addr_set++, i++); + if (ret) + return ret; + } + if (!dev->data->promiscuous) + return mlx5_traffic_restart(dev); + return 0; } diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 857dfcd8..1d1bcb5f 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2016 6WIND S.A. - * Copyright 2016 Mellanox. + * Copyright 2016 Mellanox Technologies, Ltd */ #ifdef PEDANTIC @@ -13,362 +13,1174 @@ #include #include +#include #include "mlx5.h" +#include "mlx5_mr.h" #include "mlx5_rxtx.h" #include "mlx5_glue.h" -struct mlx5_check_mempool_data { +struct mr_find_contig_memsegs_data { + uintptr_t addr; + uintptr_t start; + uintptr_t end; + const struct rte_memseg_list *msl; +}; + +struct mr_update_mp_data { + struct rte_eth_dev *dev; + struct mlx5_mr_ctrl *mr_ctrl; int ret; - char *start; - char *end; }; -/* Called by mlx5_check_mempool() when iterating the memory chunks. */ -static void -mlx5_check_mempool_cb(struct rte_mempool *mp, - void *opaque, struct rte_mempool_memhdr *memhdr, - unsigned int mem_idx) +/** + * Expand B-tree table to a given size. Can't be called with holding + * memory_hotplug_lock or priv->mr.rwlock due to rte_realloc(). + * + * @param bt + * Pointer to B-tree structure. + * @param n + * Number of entries for expansion. + * + * @return + * 0 on success, -1 on failure. + */ +static int +mr_btree_expand(struct mlx5_mr_btree *bt, int n) +{ + void *mem; + int ret = 0; + + if (n <= bt->size) + return ret; + /* + * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is + * used inside if there's no room to expand. Because this is a quite + * rare case and a part of very slow path, it is very acceptable. + * Initially cache_bh[] will be given practically enough space and once + * it is expanded, expansion wouldn't be needed again ever. + */ + mem = rte_realloc(bt->table, n * sizeof(struct mlx5_mr_cache), 0); + if (mem == NULL) { + /* Not an error, B-tree search will be skipped. */ + DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table", + (void *)bt); + ret = -1; + } else { + DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n); + bt->table = mem; + bt->size = n; + } + return ret; +} + +/** + * Look up LKey from given B-tree lookup table, store the last index and return + * searched LKey. + * + * @param bt + * Pointer to B-tree structure. + * @param[out] idx + * Pointer to index. Even on search failure, returns index where it stops + * searching so that index can be used when inserting a new entry. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static uint32_t +mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr) { - struct mlx5_check_mempool_data *data = opaque; + struct mlx5_mr_cache *lkp_tbl; + uint16_t n; + uint16_t base = 0; - (void)mp; - (void)mem_idx; + assert(bt != NULL); + lkp_tbl = *bt->table; + n = bt->len; + /* First entry must be NULL for comparison. */ + assert(bt->len > 0 || (lkp_tbl[0].start == 0 && + lkp_tbl[0].lkey == UINT32_MAX)); + /* Binary search. */ + do { + register uint16_t delta = n >> 1; - /* It already failed, skip the next chunks. */ - if (data->ret != 0) - return; - /* It is the first chunk. */ - if (data->start == NULL && data->end == NULL) { - data->start = memhdr->addr; - data->end = data->start + memhdr->len; - return; + if (addr < lkp_tbl[base + delta].start) { + n = delta; + } else { + base += delta; + n -= delta; + } + } while (n > 1); + assert(addr >= lkp_tbl[base].start); + *idx = base; + if (addr < lkp_tbl[base].end) + return lkp_tbl[base].lkey; + /* Not found. */ + return UINT32_MAX; +} + +/** + * Insert an entry to B-tree lookup table. + * + * @param bt + * Pointer to B-tree structure. + * @param entry + * Pointer to new entry to insert. + * + * @return + * 0 on success, -1 on failure. + */ +static int +mr_btree_insert(struct mlx5_mr_btree *bt, struct mlx5_mr_cache *entry) +{ + struct mlx5_mr_cache *lkp_tbl; + uint16_t idx = 0; + size_t shift; + + assert(bt != NULL); + assert(bt->len <= bt->size); + assert(bt->len > 0); + lkp_tbl = *bt->table; + /* Find out the slot for insertion. */ + if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) { + DRV_LOG(DEBUG, + "abort insertion to B-tree(%p): already exist at" + " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", + (void *)bt, idx, entry->start, entry->end, entry->lkey); + /* Already exist, return. */ + return 0; } - if (data->end == memhdr->addr) { - data->end += memhdr->len; - return; + /* If table is full, return error. */ + if (unlikely(bt->len == bt->size)) { + bt->overflow = 1; + return -1; + } + /* Insert entry. */ + ++idx; + shift = (bt->len - idx) * sizeof(struct mlx5_mr_cache); + if (shift) + memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift); + lkp_tbl[idx] = *entry; + bt->len++; + DRV_LOG(DEBUG, + "inserted B-tree(%p)[%u]," + " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", + (void *)bt, idx, entry->start, entry->end, entry->lkey); + return 0; +} + +/** + * Initialize B-tree and allocate memory for lookup table. + * + * @param bt + * Pointer to B-tree structure. + * @param n + * Number of entries to allocate. + * @param socket + * NUMA socket on which memory must be allocated. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket) +{ + if (bt == NULL) { + rte_errno = EINVAL; + return -rte_errno; } - if (data->start == (char *)memhdr->addr + memhdr->len) { - data->start -= memhdr->len; + assert(!bt->table && !bt->size); + memset(bt, 0, sizeof(*bt)); + bt->table = rte_calloc_socket("B-tree table", + n, sizeof(struct mlx5_mr_cache), + 0, socket); + if (bt->table == NULL) { + rte_errno = ENOMEM; + DEBUG("failed to allocate memory for btree cache on socket %d", + socket); + return -rte_errno; + } + bt->size = n; + /* First entry must be NULL for binary search. */ + (*bt->table)[bt->len++] = (struct mlx5_mr_cache) { + .lkey = UINT32_MAX, + }; + DEBUG("initialized B-tree %p with table %p", + (void *)bt, (void *)bt->table); + return 0; +} + +/** + * Free B-tree resources. + * + * @param bt + * Pointer to B-tree structure. + */ +void +mlx5_mr_btree_free(struct mlx5_mr_btree *bt) +{ + if (bt == NULL) return; + DEBUG("freeing B-tree %p with table %p", + (void *)bt, (void *)bt->table); + rte_free(bt->table); + memset(bt, 0, sizeof(*bt)); +} + +/** + * Dump all the entries in a B-tree + * + * @param bt + * Pointer to B-tree structure. + */ +void +mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused) +{ +#ifndef NDEBUG + int idx; + struct mlx5_mr_cache *lkp_tbl; + + if (bt == NULL) + return; + lkp_tbl = *bt->table; + for (idx = 0; idx < bt->len; ++idx) { + struct mlx5_mr_cache *entry = &lkp_tbl[idx]; + + DEBUG("B-tree(%p)[%u]," + " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", + (void *)bt, idx, entry->start, entry->end, entry->lkey); } - /* Error, mempool is not virtually contiguous. */ - data->ret = -1; +#endif } /** - * Check if a mempool can be used: it must be virtually contiguous. + * Find virtually contiguous memory chunk in a given MR. * - * @param[in] mp - * Pointer to memory pool. - * @param[out] start - * Pointer to the start address of the mempool virtual memory area - * @param[out] end - * Pointer to the end address of the mempool virtual memory area + * @param dev + * Pointer to MR structure. + * @param[out] entry + * Pointer to returning MR cache entry. If not found, this will not be + * updated. + * @param start_idx + * Start index of the memseg bitmap. * * @return - * 0 on success (mempool is virtually contiguous), -1 on error. + * Next index to go on lookup. */ -static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, - uintptr_t *end) +static int +mr_find_next_chunk(struct mlx5_mr *mr, struct mlx5_mr_cache *entry, + int base_idx) { - struct mlx5_check_mempool_data data; + uintptr_t start = 0; + uintptr_t end = 0; + uint32_t idx = 0; - memset(&data, 0, sizeof(data)); - rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data); - *start = (uintptr_t)data.start; - *end = (uintptr_t)data.end; + for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) { + if (rte_bitmap_get(mr->ms_bmp, idx)) { + const struct rte_memseg_list *msl; + const struct rte_memseg *ms; - return data.ret; + msl = mr->msl; + ms = rte_fbarray_get(&msl->memseg_arr, + mr->ms_base_idx + idx); + assert(msl->page_sz == ms->hugepage_sz); + if (!start) + start = ms->addr_64; + end = ms->addr_64 + ms->hugepage_sz; + } else if (start) { + /* Passed the end of a fragment. */ + break; + } + } + if (start) { + /* Found one chunk. */ + entry->start = start; + entry->end = end; + entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey); + } + return idx; } /** - * Register a Memory Region (MR) <-> Memory Pool (MP) association in - * txq->mp2mr[]. If mp2mr[] is full, remove an entry first. + * Insert a MR to the global B-tree cache. It may fail due to low-on-memory. + * Then, this entry will have to be searched by mr_lookup_dev_list() in + * mlx5_mr_create() on miss. * - * This function should only be called by txq_mp2mr(). + * @param dev + * Pointer to Ethernet device. + * @param mr + * Pointer to MR to insert. * - * @param priv - * Pointer to private structure. - * @param txq - * Pointer to TX queue structure. - * @param[in] mp - * Memory Pool for which a Memory Region lkey must be returned. - * @param idx - * Index of the next available entry. + * @return + * 0 on success, -1 on failure. + */ +static int +mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx5_mr *mr) +{ + struct priv *priv = dev->data->dev_private; + unsigned int n; + + DRV_LOG(DEBUG, "port %u inserting MR(%p) to global cache", + dev->data->port_id, (void *)mr); + for (n = 0; n < mr->ms_bmp_n; ) { + struct mlx5_mr_cache entry = { 0, }; + + /* Find a contiguous chunk and advance the index. */ + n = mr_find_next_chunk(mr, &entry, n); + if (!entry.end) + break; + if (mr_btree_insert(&priv->mr.cache, &entry) < 0) { + /* + * Overflowed, but the global table cannot be expanded + * because of deadlock. + */ + return -1; + } + } + return 0; +} + +/** + * Look up address in the original global MR list. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry. If no match, this will not be updated. + * @param addr + * Search key. * * @return - * mr on success, NULL on failure. + * Found MR on match, NULL otherwise. */ -struct mlx5_mr* -priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq, - struct rte_mempool *mp, unsigned int idx) +static struct mlx5_mr * +mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry, + uintptr_t addr) { - struct mlx5_txq_ctrl *txq_ctrl = - container_of(txq, struct mlx5_txq_ctrl, txq); + struct priv *priv = dev->data->dev_private; struct mlx5_mr *mr; - /* Add a new entry, register MR first. */ - DEBUG("%p: discovered new memory pool \"%s\" (%p)", - (void *)txq_ctrl, mp->name, (void *)mp); - mr = priv_mr_get(priv, mp); - if (mr == NULL) { - if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - DEBUG("Using unregistered mempool 0x%p(%s) in secondary process," - " please create mempool before rte_eth_dev_start()", - (void *)mp, mp->name); - return NULL; + /* Iterate all the existing MRs. */ + LIST_FOREACH(mr, &priv->mr.mr_list, mr) { + unsigned int n; + + if (mr->ms_n == 0) + continue; + for (n = 0; n < mr->ms_bmp_n; ) { + struct mlx5_mr_cache ret = { 0, }; + + n = mr_find_next_chunk(mr, &ret, n); + if (addr >= ret.start && addr < ret.end) { + /* Found. */ + *entry = ret; + return mr; + } } - mr = priv_mr_new(priv, mp); - } - if (unlikely(mr == NULL)) { - DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.", - (void *)txq_ctrl); - return NULL; - } - if (unlikely(idx == RTE_DIM(txq->mp2mr))) { - /* Table is full, remove oldest entry. */ - DEBUG("%p: MR <-> MP table full, dropping oldest entry.", - (void *)txq_ctrl); - --idx; - priv_mr_release(priv, txq->mp2mr[0]); - memmove(&txq->mp2mr[0], &txq->mp2mr[1], - (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0]))); } - /* Store the new entry. */ - txq_ctrl->txq.mp2mr[idx] = mr; - DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, - (void *)txq_ctrl, mp->name, (void *)mp, - txq_ctrl->txq.mp2mr[idx]->lkey); - return mr; + return NULL; } /** - * Register a Memory Region (MR) <-> Memory Pool (MP) association in - * txq->mp2mr[]. If mp2mr[] is full, remove an entry first. - * - * This function should only be called by txq_mp2mr(). + * Look up address on device. * - * @param txq - * Pointer to TX queue structure. - * @param[in] mp - * Memory Pool for which a Memory Region lkey must be returned. - * @param idx - * Index of the next available entry. + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry. If no match, this will not be updated. + * @param addr + * Search key. * * @return - * mr on success, NULL on failure. + * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. */ -struct mlx5_mr* -mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp, - unsigned int idx) +static uint32_t +mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry, + uintptr_t addr) { - struct mlx5_txq_ctrl *txq_ctrl = - container_of(txq, struct mlx5_txq_ctrl, txq); + struct priv *priv = dev->data->dev_private; + uint16_t idx; + uint32_t lkey = UINT32_MAX; struct mlx5_mr *mr; - priv_lock(txq_ctrl->priv); - mr = priv_txq_mp2mr_reg(txq_ctrl->priv, txq, mp, idx); - priv_unlock(txq_ctrl->priv); - return mr; + /* + * If the global cache has overflowed since it failed to expand the + * B-tree table, it can't have all the existing MRs. Then, the address + * has to be searched by traversing the original MR list instead, which + * is very slow path. Otherwise, the global cache is all inclusive. + */ + if (!unlikely(priv->mr.cache.overflow)) { + lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr); + if (lkey != UINT32_MAX) + *entry = (*priv->mr.cache.table)[idx]; + } else { + /* Falling back to the slowest path. */ + mr = mr_lookup_dev_list(dev, entry, addr); + if (mr != NULL) + lkey = entry->lkey; + } + assert(lkey == UINT32_MAX || (addr >= entry->start && + addr < entry->end)); + return lkey; } -struct mlx5_mp2mr_mbuf_check_data { - int ret; -}; +/** + * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free() + * can raise memory free event and the callback function will spin on the lock. + * + * @param mr + * Pointer to MR to free. + */ +static void +mr_free(struct mlx5_mr *mr) +{ + if (mr == NULL) + return; + DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr); + if (mr->ibv_mr != NULL) + claim_zero(mlx5_glue->dereg_mr(mr->ibv_mr)); + if (mr->ms_bmp != NULL) + rte_bitmap_free(mr->ms_bmp); + rte_free(mr); +} /** - * Callback function for rte_mempool_obj_iter() to check whether a given - * mempool object looks like a mbuf. - * - * @param[in] mp - * The mempool pointer - * @param[in] arg - * Context data (struct txq_mp2mr_mbuf_check_data). Contains the - * return value. - * @param[in] obj - * Object address. - * @param index - * Object index, unused. + * Releass resources of detached MR having no online entry. + * + * @param dev + * Pointer to Ethernet device. */ static void -txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj, - uint32_t index __rte_unused) +mlx5_mr_garbage_collect(struct rte_eth_dev *dev) { - struct mlx5_mp2mr_mbuf_check_data *data = arg; - struct rte_mbuf *buf = obj; + struct priv *priv = dev->data->dev_private; + struct mlx5_mr *mr_next; + struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list); + /* Must be called from the primary process. */ + assert(rte_eal_process_type() == RTE_PROC_PRIMARY); /* - * Check whether mbuf structure fits element size and whether mempool - * pointer is valid. + * MR can't be freed with holding the lock because rte_free() could call + * memory free callback function. This will be a deadlock situation. */ - if (sizeof(*buf) > mp->elt_size || buf->pool != mp) - data->ret = -1; + rte_rwlock_write_lock(&priv->mr.rwlock); + /* Detach the whole free list and release it after unlocking. */ + free_list = priv->mr.mr_free_list; + LIST_INIT(&priv->mr.mr_free_list); + rte_rwlock_write_unlock(&priv->mr.rwlock); + /* Release resources. */ + mr_next = LIST_FIRST(&free_list); + while (mr_next != NULL) { + struct mlx5_mr *mr = mr_next; + + mr_next = LIST_NEXT(mr, mr); + mr_free(mr); + } +} + +/* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */ +static int +mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl, + const struct rte_memseg *ms, size_t len, void *arg) +{ + struct mr_find_contig_memsegs_data *data = arg; + + if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len) + return 0; + /* Found, save it and stop walking. */ + data->start = ms->addr_64; + data->end = ms->addr_64 + len; + data->msl = msl; + return 1; } /** - * Iterator function for rte_mempool_walk() to register existing mempools and - * fill the MP to MR cache of a TX queue. + * Create a new global Memroy Region (MR) for a missing virtual address. + * Register entire virtually contiguous memory chunk around the address. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry, found in the global cache or newly + * created. If failed to create one, this will not be updated. + * @param addr + * Target virtual address to register. * - * @param[in] mp - * Memory Pool to register. - * @param *arg - * Pointer to TX queue structure. + * @return + * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. */ -void -mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg) +static uint32_t +mlx5_mr_create(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry, + uintptr_t addr) { - struct priv *priv = (struct priv *)arg; - struct mlx5_mp2mr_mbuf_check_data data = { - .ret = 0, + struct priv *priv = dev->data->dev_private; + struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; + const struct rte_memseg_list *msl; + const struct rte_memseg *ms; + struct mlx5_mr *mr = NULL; + size_t len; + uint32_t ms_n; + uint32_t bmp_size; + void *bmp_mem; + int ms_idx_shift = -1; + unsigned int n; + struct mr_find_contig_memsegs_data data = { + .addr = addr, }; - struct mlx5_mr *mr; + struct mr_find_contig_memsegs_data data_re; - /* Register mempool only if the first element looks like a mbuf. */ - if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 || - data.ret == -1) - return; - mr = priv_mr_get(priv, mp); - if (mr) { - priv_mr_release(priv, mr); - return; + DRV_LOG(DEBUG, "port %u creating a MR using address (%p)", + dev->data->port_id, (void *)addr); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + DRV_LOG(WARNING, + "port %u using address (%p) of unregistered mempool" + " in secondary process, please create mempool" + " before rte_eth_dev_start()", + dev->data->port_id, (void *)addr); + rte_errno = EPERM; + goto err_nolock; + } + /* + * Release detached MRs if any. This can't be called with holding either + * memory_hotplug_lock or priv->mr.rwlock. MRs on the free list have + * been detached by the memory free event but it couldn't be released + * inside the callback due to deadlock. As a result, releasing resources + * is quite opportunistic. + */ + mlx5_mr_garbage_collect(dev); + /* + * Find out a contiguous virtual address chunk in use, to which the + * given address belongs, in order to register maximum range. In the + * best case where mempools are not dynamically recreated and + * '--socket-mem' is speicified as an EAL option, it is very likely to + * have only one MR(LKey) per a socket and per a hugepage-size even + * though the system memory is highly fragmented. + */ + if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) { + DRV_LOG(WARNING, + "port %u unable to find virtually contiguous" + " chunk for address (%p)." + " rte_memseg_contig_walk() failed.", + dev->data->port_id, (void *)addr); + rte_errno = ENXIO; + goto err_nolock; + } +alloc_resources: + /* Addresses must be page-aligned. */ + assert(rte_is_aligned((void *)data.start, data.msl->page_sz)); + assert(rte_is_aligned((void *)data.end, data.msl->page_sz)); + msl = data.msl; + ms = rte_mem_virt2memseg((void *)data.start, msl); + len = data.end - data.start; + assert(msl->page_sz == ms->hugepage_sz); + /* Number of memsegs in the range. */ + ms_n = len / msl->page_sz; + DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR ")," + " page_sz=0x%" PRIx64 ", ms_n=%u", + dev->data->port_id, (void *)addr, + data.start, data.end, msl->page_sz, ms_n); + /* Size of memory for bitmap. */ + bmp_size = rte_bitmap_get_memory_footprint(ms_n); + mr = rte_zmalloc_socket(NULL, + RTE_ALIGN_CEIL(sizeof(*mr), + RTE_CACHE_LINE_SIZE) + + bmp_size, + RTE_CACHE_LINE_SIZE, msl->socket_id); + if (mr == NULL) { + DEBUG("port %u unable to allocate memory for a new MR of" + " address (%p).", + dev->data->port_id, (void *)addr); + rte_errno = ENOMEM; + goto err_nolock; + } + mr->msl = msl; + /* + * Save the index of the first memseg and initialize memseg bitmap. To + * see if a memseg of ms_idx in the memseg-list is still valid, check: + * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx) + */ + mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms); + bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE); + mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size); + if (mr->ms_bmp == NULL) { + DEBUG("port %u unable to initialize bitamp for a new MR of" + " address (%p).", + dev->data->port_id, (void *)addr); + rte_errno = EINVAL; + goto err_nolock; + } + /* + * Should recheck whether the extended contiguous chunk is still valid. + * Because memory_hotplug_lock can't be held if there's any memory + * related calls in a critical path, resource allocation above can't be + * locked. If the memory has been changed at this point, try again with + * just single page. If not, go on with the big chunk atomically from + * here. + */ + rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + data_re = data; + if (len > msl->page_sz && + !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) { + DEBUG("port %u unable to find virtually contiguous" + " chunk for address (%p)." + " rte_memseg_contig_walk() failed.", + dev->data->port_id, (void *)addr); + rte_errno = ENXIO; + goto err_memlock; + } + if (data.start != data_re.start || data.end != data_re.end) { + /* + * The extended contiguous chunk has been changed. Try again + * with single memseg instead. + */ + data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz); + data.end = data.start + msl->page_sz; + rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + mr_free(mr); + goto alloc_resources; + } + assert(data.msl == data_re.msl); + rte_rwlock_write_lock(&priv->mr.rwlock); + /* + * Check the address is really missing. If other thread already created + * one or it is not found due to overflow, abort and return. + */ + if (mr_lookup_dev(dev, entry, addr) != UINT32_MAX) { + /* + * Insert to the global cache table. It may fail due to + * low-on-memory. Then, this entry will have to be searched + * here again. + */ + mr_btree_insert(&priv->mr.cache, entry); + DEBUG("port %u found MR for %p on final lookup, abort", + dev->data->port_id, (void *)addr); + rte_rwlock_write_unlock(&priv->mr.rwlock); + rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + /* + * Must be unlocked before calling rte_free() because + * mlx5_mr_mem_event_free_cb() can be called inside. + */ + mr_free(mr); + return entry->lkey; } - priv_mr_new(priv, mp); + /* + * Trim start and end addresses for verbs MR. Set bits for registering + * memsegs but exclude already registered ones. Bitmap can be + * fragmented. + */ + for (n = 0; n < ms_n; ++n) { + uintptr_t start; + struct mlx5_mr_cache ret = { 0, }; + + start = data_re.start + n * msl->page_sz; + /* Exclude memsegs already registered by other MRs. */ + if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) { + /* + * Start from the first unregistered memseg in the + * extended range. + */ + if (ms_idx_shift == -1) { + mr->ms_base_idx += n; + data.start = start; + ms_idx_shift = n; + } + data.end = start + msl->page_sz; + rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift); + ++mr->ms_n; + } + } + len = data.end - data.start; + mr->ms_bmp_n = len / msl->page_sz; + assert(ms_idx_shift + mr->ms_bmp_n <= ms_n); + /* + * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be + * called with holding the memory lock because it doesn't use + * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket() + * through mlx5_alloc_verbs_buf(). + */ + mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)data.start, len, + IBV_ACCESS_LOCAL_WRITE); + if (mr->ibv_mr == NULL) { + DEBUG("port %u fail to create a verbs MR for address (%p)", + dev->data->port_id, (void *)addr); + rte_errno = EINVAL; + goto err_mrlock; + } + assert((uintptr_t)mr->ibv_mr->addr == data.start); + assert(mr->ibv_mr->length == len); + LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr); + DEBUG("port %u MR CREATED (%p) for %p:\n" + " [0x%" PRIxPTR ", 0x%" PRIxPTR ")," + " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u", + dev->data->port_id, (void *)mr, (void *)addr, + data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey), + mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n); + /* Insert to the global cache table. */ + mr_insert_dev_cache(dev, mr); + /* Fill in output data. */ + mr_lookup_dev(dev, entry, addr); + /* Lookup can't fail. */ + assert(entry->lkey != UINT32_MAX); + rte_rwlock_write_unlock(&priv->mr.rwlock); + rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + return entry->lkey; +err_mrlock: + rte_rwlock_write_unlock(&priv->mr.rwlock); +err_memlock: + rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); +err_nolock: + /* + * In case of error, as this can be called in a datapath, a warning + * message per an error is preferable instead. Must be unlocked before + * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called + * inside. + */ + mr_free(mr); + return UINT32_MAX; } /** - * Register a new memory region from the mempool and store it in the memory - * region list. + * Rebuild the global B-tree cache of device from the original MR list. * - * @param priv - * Pointer to private structure. - * @param mp - * Pointer to the memory pool to register. - * @return - * The memory region on success. + * @param dev + * Pointer to Ethernet device. */ -struct mlx5_mr* -priv_mr_new(struct priv *priv, struct rte_mempool *mp) +static void +mr_rebuild_dev_cache(struct rte_eth_dev *dev) { - const struct rte_memseg *ms = rte_eal_get_physmem_layout(); - uintptr_t start; - uintptr_t end; - unsigned int i; + struct priv *priv = dev->data->dev_private; struct mlx5_mr *mr; - mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id); - if (!mr) { - DEBUG("unable to configure MR, ibv_reg_mr() failed."); - return NULL; + DRV_LOG(DEBUG, "port %u rebuild dev cache[]", dev->data->port_id); + /* Flush cache to rebuild. */ + priv->mr.cache.len = 1; + priv->mr.cache.overflow = 0; + /* Iterate all the existing MRs. */ + LIST_FOREACH(mr, &priv->mr.mr_list, mr) + if (mr_insert_dev_cache(dev, mr) < 0) + return; +} + +/** + * Callback for memory free event. Iterate freed memsegs and check whether it + * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a + * result, the MR would be fragmented. If it becomes empty, the MR will be freed + * later by mlx5_mr_garbage_collect(). Even if this callback is called from a + * secondary process, the garbage collector will be called in primary process + * as the secondary process can't call mlx5_mr_create(). + * + * The global cache must be rebuilt if there's any change and this event has to + * be propagated to dataplane threads to flush the local caches. + * + * @param dev + * Pointer to Ethernet device. + * @param addr + * Address of freed memory. + * @param len + * Size of freed memory. + */ +static void +mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len) +{ + struct priv *priv = dev->data->dev_private; + const struct rte_memseg_list *msl; + struct mlx5_mr *mr; + int ms_n; + int i; + int rebuild = 0; + + DEBUG("port %u free callback: addr=%p, len=%zu", + dev->data->port_id, addr, len); + msl = rte_mem_virt2memseg_list(addr); + /* addr and len must be page-aligned. */ + assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz)); + assert(len == RTE_ALIGN(len, msl->page_sz)); + ms_n = len / msl->page_sz; + rte_rwlock_write_lock(&priv->mr.rwlock); + /* Clear bits of freed memsegs from MR. */ + for (i = 0; i < ms_n; ++i) { + const struct rte_memseg *ms; + struct mlx5_mr_cache entry; + uintptr_t start; + int ms_idx; + uint32_t pos; + + /* Find MR having this memseg. */ + start = (uintptr_t)addr + i * msl->page_sz; + mr = mr_lookup_dev_list(dev, &entry, start); + if (mr == NULL) + continue; + ms = rte_mem_virt2memseg((void *)start, msl); + assert(ms != NULL); + assert(msl->page_sz == ms->hugepage_sz); + ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms); + pos = ms_idx - mr->ms_base_idx; + assert(rte_bitmap_get(mr->ms_bmp, pos)); + assert(pos < mr->ms_bmp_n); + DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p", + dev->data->port_id, (void *)mr, pos, (void *)start); + rte_bitmap_clear(mr->ms_bmp, pos); + if (--mr->ms_n == 0) { + LIST_REMOVE(mr, mr); + LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr); + DEBUG("port %u remove MR(%p) from list", + dev->data->port_id, (void *)mr); + } + /* + * MR is fragmented or will be freed. the global cache must be + * rebuilt. + */ + rebuild = 1; } - if (mlx5_check_mempool(mp, &start, &end) != 0) { - ERROR("mempool %p: not virtually contiguous", - (void *)mp); - return NULL; + if (rebuild) { + mr_rebuild_dev_cache(dev); + /* + * Flush local caches by propagating invalidation across cores. + * rte_smp_wmb() is enough to synchronize this event. If one of + * freed memsegs is seen by other core, that means the memseg + * has been allocated by allocator, which will come after this + * free call. Therefore, this store instruction (incrementing + * generation below) will be guaranteed to be seen by other core + * before the core sees the newly allocated memory. + */ + ++priv->mr.dev_gen; + DEBUG("broadcasting local cache flush, gen=%d", + priv->mr.dev_gen); + rte_smp_wmb(); } - DEBUG("mempool %p area start=%p end=%p size=%zu", - (void *)mp, (void *)start, (void *)end, - (size_t)(end - start)); - /* Save original addresses for exact MR lookup. */ - mr->start = start; - mr->end = end; - /* Round start and end to page boundary if found in memory segments. */ - for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) { - uintptr_t addr = (uintptr_t)ms[i].addr; - size_t len = ms[i].len; - unsigned int align = ms[i].hugepage_sz; - - if ((start > addr) && (start < addr + len)) - start = RTE_ALIGN_FLOOR(start, align); - if ((end > addr) && (end < addr + len)) - end = RTE_ALIGN_CEIL(end, align); + rte_rwlock_write_unlock(&priv->mr.rwlock); +} + +/** + * Callback for memory event. This can be called from both primary and secondary + * process. + * + * @param event_type + * Memory event type. + * @param addr + * Address of memory. + * @param len + * Size of memory. + */ +void +mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, + size_t len, void *arg __rte_unused) +{ + struct priv *priv; + struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list; + + switch (event_type) { + case RTE_MEM_EVENT_FREE: + rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); + /* Iterate all the existing mlx5 devices. */ + LIST_FOREACH(priv, dev_list, mem_event_cb) + mlx5_mr_mem_event_free_cb(ETH_DEV(priv), addr, len); + rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); + break; + case RTE_MEM_EVENT_ALLOC: + default: + break; } - DEBUG("mempool %p using start=%p end=%p size=%zu for MR", - (void *)mp, (void *)start, (void *)end, - (size_t)(end - start)); - mr->mr = mlx5_glue->reg_mr(priv->pd, (void *)start, end - start, - IBV_ACCESS_LOCAL_WRITE); - mr->mp = mp; - mr->lkey = rte_cpu_to_be_32(mr->mr->lkey); - rte_atomic32_inc(&mr->refcnt); - DEBUG("%p: new Memory Region %p refcnt: %d", (void *)priv, - (void *)mr, rte_atomic32_read(&mr->refcnt)); - LIST_INSERT_HEAD(&priv->mr, mr, next); - return mr; } /** - * Search the memory region object in the memory region list. + * Look up address in the global MR cache table. If not found, create a new MR. + * Insert the found/created entry to local bottom-half cache table. + * + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param[out] entry + * Pointer to returning MR cache entry, found in the global cache or newly + * created. If failed to create one, this is not written. + * @param addr + * Search key. * - * @param priv - * Pointer to private structure. - * @param mp - * Pointer to the memory pool to register. * @return - * The memory region on success. + * Searched LKey on success, UINT32_MAX on no match. */ -struct mlx5_mr* -priv_mr_get(struct priv *priv, struct rte_mempool *mp) +static uint32_t +mlx5_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, + struct mlx5_mr_cache *entry, uintptr_t addr) { - struct mlx5_mr *mr; + struct priv *priv = dev->data->dev_private; + struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh; + uint16_t idx; + uint32_t lkey; - assert(mp); - if (LIST_EMPTY(&priv->mr)) - return NULL; - LIST_FOREACH(mr, &priv->mr, next) { - if (mr->mp == mp) { - rte_atomic32_inc(&mr->refcnt); - DEBUG("Memory Region %p refcnt: %d", - (void *)mr, rte_atomic32_read(&mr->refcnt)); - return mr; - } + /* If local cache table is full, try to double it. */ + if (unlikely(bt->len == bt->size)) + mr_btree_expand(bt, bt->size << 1); + /* Look up in the global cache. */ + rte_rwlock_read_lock(&priv->mr.rwlock); + lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr); + if (lkey != UINT32_MAX) { + /* Found. */ + *entry = (*priv->mr.cache.table)[idx]; + rte_rwlock_read_unlock(&priv->mr.rwlock); + /* + * Update local cache. Even if it fails, return the found entry + * to update top-half cache. Next time, this entry will be found + * in the global cache. + */ + mr_btree_insert(bt, entry); + return lkey; } - return NULL; + rte_rwlock_read_unlock(&priv->mr.rwlock); + /* First time to see the address? Create a new MR. */ + lkey = mlx5_mr_create(dev, entry, addr); + /* + * Update the local cache if successfully created a new global MR. Even + * if failed to create one, there's no action to take in this datapath + * code. As returning LKey is invalid, this will eventually make HW + * fail. + */ + if (lkey != UINT32_MAX) + mr_btree_insert(bt, entry); + return lkey; } /** - * Release the memory region object. + * Bottom-half of LKey search on datapath. Firstly search in cache_bh[] and if + * misses, search in the global MR cache table and update the new entry to + * per-queue local caches. * - * @param mr - * Pointer to memory region to release. + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param addr + * Search key. * * @return - * 0 on success, errno on failure. + * Searched LKey on success, UINT32_MAX on no match. */ -int -priv_mr_release(struct priv *priv, struct mlx5_mr *mr) +static uint32_t +mlx5_mr_addr2mr_bh(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, + uintptr_t addr) { - (void)priv; - assert(mr); - DEBUG("Memory Region %p refcnt: %d", - (void *)mr, rte_atomic32_read(&mr->refcnt)); - if (rte_atomic32_dec_and_test(&mr->refcnt)) { - claim_zero(mlx5_glue->dereg_mr(mr->mr)); - LIST_REMOVE(mr, next); - rte_free(mr); - return 0; + uint32_t lkey; + uint16_t bh_idx = 0; + /* Victim in top-half cache to replace with new entry. */ + struct mlx5_mr_cache *repl = &mr_ctrl->cache[mr_ctrl->head]; + + /* Binary-search MR translation table. */ + lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr); + /* Update top-half cache. */ + if (likely(lkey != UINT32_MAX)) { + *repl = (*mr_ctrl->cache_bh.table)[bh_idx]; + } else { + /* + * If missed in local lookup table, search in the global cache + * and local cache_bh[] will be updated inside if possible. + * Top-half cache entry will also be updated. + */ + lkey = mlx5_mr_lookup_dev(dev, mr_ctrl, repl, addr); + if (unlikely(lkey == UINT32_MAX)) + return UINT32_MAX; } - return EBUSY; + /* Update the most recently used entry. */ + mr_ctrl->mru = mr_ctrl->head; + /* Point to the next victim, the oldest. */ + mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N; + return lkey; } /** - * Verify the flow list is empty + * Bottom-half of LKey search on Rx. * - * @param priv - * Pointer to private structure. + * @param rxq + * Pointer to Rx queue structure. + * @param addr + * Search key. * - * @return the number of object not released. + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +uint32_t +mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr) +{ + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; + struct priv *priv = rxq_ctrl->priv; + + DRV_LOG(DEBUG, + "Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p", + rxq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr); + return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr); +} + +/** + * Bottom-half of LKey search on Tx. + * + * @param txq + * Pointer to Tx queue structure. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +uint32_t +mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr) +{ + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq, struct mlx5_txq_ctrl, txq); + struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + struct priv *priv = txq_ctrl->priv; + + DRV_LOG(DEBUG, + "Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p", + txq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr); + return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr); +} + +/** + * Flush all of the local cache entries. + * + * @param mr_ctrl + * Pointer to per-queue MR control structure. + */ +void +mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl) +{ + /* Reset the most-recently-used index. */ + mr_ctrl->mru = 0; + /* Reset the linear search array. */ + mr_ctrl->head = 0; + memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache)); + /* Reset the B-tree table. */ + mr_ctrl->cache_bh.len = 1; + mr_ctrl->cache_bh.overflow = 0; + /* Update the generation number. */ + mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr; + DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d", + (void *)mr_ctrl, mr_ctrl->cur_gen); +} + +/* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */ +static void +mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque, + struct rte_mempool_memhdr *memhdr, + unsigned mem_idx __rte_unused) +{ + struct mr_update_mp_data *data = opaque; + uint32_t lkey; + + /* Stop iteration if failed in the previous walk. */ + if (data->ret < 0) + return; + /* Register address of the chunk and update local caches. */ + lkey = mlx5_mr_addr2mr_bh(data->dev, data->mr_ctrl, + (uintptr_t)memhdr->addr); + if (lkey == UINT32_MAX) + data->ret = -1; +} + +/** + * Register entire memory chunks in a Mempool. + * + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param mp + * Pointer to registering Mempool. + * + * @return + * 0 on success, -1 on failure. */ int -priv_mr_verify(struct priv *priv) +mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, + struct rte_mempool *mp) { - int ret = 0; + struct mr_update_mp_data data = { + .dev = dev, + .mr_ctrl = mr_ctrl, + .ret = 0, + }; + + rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data); + return data.ret; +} + +/** + * Dump all the created MRs and the global cache entries. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_mr_dump_dev(struct rte_eth_dev *dev __rte_unused) +{ +#ifndef NDEBUG + struct priv *priv = dev->data->dev_private; struct mlx5_mr *mr; + int mr_n = 0; + int chunk_n = 0; - LIST_FOREACH(mr, &priv->mr, next) { - DEBUG("%p: mr %p still referenced", (void *)priv, - (void *)mr); - ++ret; + rte_rwlock_read_lock(&priv->mr.rwlock); + /* Iterate all the existing MRs. */ + LIST_FOREACH(mr, &priv->mr.mr_list, mr) { + unsigned int n; + + DEBUG("port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u", + dev->data->port_id, mr_n++, + rte_cpu_to_be_32(mr->ibv_mr->lkey), + mr->ms_n, mr->ms_bmp_n); + if (mr->ms_n == 0) + continue; + for (n = 0; n < mr->ms_bmp_n; ) { + struct mlx5_mr_cache ret = { 0, }; + + n = mr_find_next_chunk(mr, &ret, n); + if (!ret.end) + break; + DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")", + chunk_n++, ret.start, ret.end); + } } - return ret; + DEBUG("port %u dumping global cache", dev->data->port_id); + mlx5_mr_btree_dump(&priv->mr.cache); + rte_rwlock_read_unlock(&priv->mr.rwlock); +#endif +} + +/** + * Release all the created MRs and resources. Remove device from memory callback + * list. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_mr_release(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx5_mr *mr_next = LIST_FIRST(&priv->mr.mr_list); + + /* Remove from memory callback device list. */ + rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); + LIST_REMOVE(priv, mem_event_cb); + rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); + if (rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG) + mlx5_mr_dump_dev(dev); + rte_rwlock_write_lock(&priv->mr.rwlock); + /* Detach from MR list and move to free list. */ + while (mr_next != NULL) { + struct mlx5_mr *mr = mr_next; + + mr_next = LIST_NEXT(mr, mr); + LIST_REMOVE(mr, mr); + LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr); + } + LIST_INIT(&priv->mr.mr_list); + /* Free global cache. */ + mlx5_mr_btree_free(&priv->mr.cache); + rte_rwlock_write_unlock(&priv->mr.rwlock); + /* Free all remaining MRs. */ + mlx5_mr_garbage_collect(dev); } diff --git a/drivers/net/mlx5/mlx5_mr.h b/drivers/net/mlx5/mlx5_mr.h new file mode 100644 index 00000000..a57003fe --- /dev/null +++ b/drivers/net/mlx5/mlx5_mr.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 6WIND S.A. + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#ifndef RTE_PMD_MLX5_MR_H_ +#define RTE_PMD_MLX5_MR_H_ + +#include +#include +#include + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include + +/* Memory Region object. */ +struct mlx5_mr { + LIST_ENTRY(mlx5_mr) mr; /**< Pointer to the prev/next entry. */ + struct ibv_mr *ibv_mr; /* Verbs Memory Region. */ + const struct rte_memseg_list *msl; + int ms_base_idx; /* Start index of msl->memseg_arr[]. */ + int ms_n; /* Number of memsegs in use. */ + uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */ + struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */ +}; + +/* Cache entry for Memory Region. */ +struct mlx5_mr_cache { + uintptr_t start; /* Start address of MR. */ + uintptr_t end; /* End address of MR. */ + uint32_t lkey; /* rte_cpu_to_be_32(ibv_mr->lkey). */ +} __rte_packed; + +/* MR Cache table for Binary search. */ +struct mlx5_mr_btree { + uint16_t len; /* Number of entries. */ + uint16_t size; /* Total number of entries. */ + int overflow; /* Mark failure of table expansion. */ + struct mlx5_mr_cache (*table)[]; +} __rte_packed; + +/* Per-queue MR control descriptor. */ +struct mlx5_mr_ctrl { + uint32_t *dev_gen_ptr; /* Generation number of device to poll. */ + uint32_t cur_gen; /* Generation number saved to flush caches. */ + uint16_t mru; /* Index of last hit entry in top-half cache. */ + uint16_t head; /* Index of the oldest entry in top-half cache. */ + struct mlx5_mr_cache cache[MLX5_MR_CACHE_N]; /* Cache for top-half. */ + struct mlx5_mr_btree cache_bh; /* Cache for bottom-half. */ +} __rte_packed; + +extern struct mlx5_dev_list mlx5_mem_event_cb_list; +extern rte_rwlock_t mlx5_mem_event_rwlock; + +/* First entry must be NULL for comparison. */ +#define mlx5_mr_btree_len(bt) ((bt)->len - 1) + +int mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket); +void mlx5_mr_btree_free(struct mlx5_mr_btree *bt); +void mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, + size_t len, void *arg); +int mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, + struct rte_mempool *mp); +void mlx5_mr_release(struct rte_eth_dev *dev); + +/* Debug purpose functions. */ +void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt); +void mlx5_mr_dump_dev(struct rte_eth_dev *dev); + +/** + * Look up LKey from given lookup table by linear search. Firstly look up the + * last-hit entry. If miss, the entire array is searched. If found, update the + * last-hit index and return LKey. + * + * @param lkp_tbl + * Pointer to lookup table. + * @param[in,out] cached_idx + * Pointer to last-hit index. + * @param n + * Size of lookup table. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static __rte_always_inline uint32_t +mlx5_mr_lookup_cache(struct mlx5_mr_cache *lkp_tbl, uint16_t *cached_idx, + uint16_t n, uintptr_t addr) +{ + uint16_t idx; + + if (likely(addr >= lkp_tbl[*cached_idx].start && + addr < lkp_tbl[*cached_idx].end)) + return lkp_tbl[*cached_idx].lkey; + for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) { + if (addr >= lkp_tbl[idx].start && + addr < lkp_tbl[idx].end) { + /* Found. */ + *cached_idx = idx; + return lkp_tbl[idx].lkey; + } + } + return UINT32_MAX; +} + +#endif /* RTE_PMD_MLX5_MR_H_ */ diff --git a/drivers/net/mlx5/mlx5_nl.c b/drivers/net/mlx5/mlx5_nl.c new file mode 100644 index 00000000..d61826ae --- /dev/null +++ b/drivers/net/mlx5/mlx5_nl.c @@ -0,0 +1,916 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 6WIND S.A. + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mlx5.h" +#include "mlx5_utils.h" + +/* Size of the buffer to receive kernel messages */ +#define MLX5_NL_BUF_SIZE (32 * 1024) +/* Send buffer size for the Netlink socket */ +#define MLX5_SEND_BUF_SIZE 32768 +/* Receive buffer size for the Netlink socket */ +#define MLX5_RECV_BUF_SIZE 32768 + +/* + * Define NDA_RTA as defined in iproute2 sources. + * + * see in iproute2 sources file include/libnetlink.h + */ +#ifndef MLX5_NDA_RTA +#define MLX5_NDA_RTA(r) \ + ((struct rtattr *)(((char *)(r)) + NLMSG_ALIGN(sizeof(struct ndmsg)))) +#endif + +/* + * The following definitions are normally found in rdma/rdma_netlink.h, + * however they are so recent that most systems do not expose them yet. + */ +#ifndef HAVE_RDMA_NL_NLDEV +#define RDMA_NL_NLDEV 5 +#endif +#ifndef HAVE_RDMA_NLDEV_CMD_GET +#define RDMA_NLDEV_CMD_GET 1 +#endif +#ifndef HAVE_RDMA_NLDEV_CMD_PORT_GET +#define RDMA_NLDEV_CMD_PORT_GET 5 +#endif +#ifndef HAVE_RDMA_NLDEV_ATTR_DEV_INDEX +#define RDMA_NLDEV_ATTR_DEV_INDEX 1 +#endif +#ifndef HAVE_RDMA_NLDEV_ATTR_DEV_NAME +#define RDMA_NLDEV_ATTR_DEV_NAME 2 +#endif +#ifndef HAVE_RDMA_NLDEV_ATTR_PORT_INDEX +#define RDMA_NLDEV_ATTR_PORT_INDEX 3 +#endif +#ifndef HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX +#define RDMA_NLDEV_ATTR_NDEV_INDEX 50 +#endif + +/* These are normally found in linux/if_link.h. */ +#ifndef HAVE_IFLA_PHYS_SWITCH_ID +#define IFLA_PHYS_SWITCH_ID 36 +#endif +#ifndef HAVE_IFLA_PHYS_PORT_NAME +#define IFLA_PHYS_PORT_NAME 38 +#endif + +/* Add/remove MAC address through Netlink */ +struct mlx5_nl_mac_addr { + struct ether_addr (*mac)[]; + /**< MAC address handled by the device. */ + int mac_n; /**< Number of addresses in the array. */ +}; + +/** Data structure used by mlx5_nl_ifindex_cb(). */ +struct mlx5_nl_ifindex_data { + const char *name; /**< IB device name (in). */ + uint32_t ibindex; /**< IB device index (out). */ + uint32_t ifindex; /**< Network interface index (out). */ +}; + +/** + * Opens a Netlink socket. + * + * @param protocol + * Netlink protocol (e.g. NETLINK_ROUTE, NETLINK_RDMA). + * + * @return + * A file descriptor on success, a negative errno value otherwise and + * rte_errno is set. + */ +int +mlx5_nl_init(int protocol) +{ + int fd; + int sndbuf_size = MLX5_SEND_BUF_SIZE; + int rcvbuf_size = MLX5_RECV_BUF_SIZE; + struct sockaddr_nl local = { + .nl_family = AF_NETLINK, + }; + int ret; + + fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, protocol); + if (fd == -1) { + rte_errno = errno; + return -rte_errno; + } + ret = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf_size, sizeof(int)); + if (ret == -1) { + rte_errno = errno; + goto error; + } + ret = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf_size, sizeof(int)); + if (ret == -1) { + rte_errno = errno; + goto error; + } + ret = bind(fd, (struct sockaddr *)&local, sizeof(local)); + if (ret == -1) { + rte_errno = errno; + goto error; + } + return fd; +error: + close(fd); + return -rte_errno; +} + +/** + * Send a request message to the kernel on the Netlink socket. + * + * @param[in] nlsk_fd + * Netlink socket file descriptor. + * @param[in] nh + * The Netlink message send to the kernel. + * @param[in] ssn + * Sequence number. + * @param[in] req + * Pointer to the request structure. + * @param[in] len + * Length of the request in bytes. + * + * @return + * The number of sent bytes on success, a negative errno value otherwise and + * rte_errno is set. + */ +static int +mlx5_nl_request(int nlsk_fd, struct nlmsghdr *nh, uint32_t sn, void *req, + int len) +{ + struct sockaddr_nl sa = { + .nl_family = AF_NETLINK, + }; + struct iovec iov[2] = { + { .iov_base = nh, .iov_len = sizeof(*nh), }, + { .iov_base = req, .iov_len = len, }, + }; + struct msghdr msg = { + .msg_name = &sa, + .msg_namelen = sizeof(sa), + .msg_iov = iov, + .msg_iovlen = 2, + }; + int send_bytes; + + nh->nlmsg_pid = 0; /* communication with the kernel uses pid 0 */ + nh->nlmsg_seq = sn; + send_bytes = sendmsg(nlsk_fd, &msg, 0); + if (send_bytes < 0) { + rte_errno = errno; + return -rte_errno; + } + return send_bytes; +} + +/** + * Send a message to the kernel on the Netlink socket. + * + * @param[in] nlsk_fd + * The Netlink socket file descriptor used for communication. + * @param[in] nh + * The Netlink message send to the kernel. + * @param[in] sn + * Sequence number. + * + * @return + * The number of sent bytes on success, a negative errno value otherwise and + * rte_errno is set. + */ +static int +mlx5_nl_send(int nlsk_fd, struct nlmsghdr *nh, uint32_t sn) +{ + struct sockaddr_nl sa = { + .nl_family = AF_NETLINK, + }; + struct iovec iov = { + .iov_base = nh, + .iov_len = nh->nlmsg_len, + }; + struct msghdr msg = { + .msg_name = &sa, + .msg_namelen = sizeof(sa), + .msg_iov = &iov, + .msg_iovlen = 1, + }; + int send_bytes; + + nh->nlmsg_pid = 0; /* communication with the kernel uses pid 0 */ + nh->nlmsg_seq = sn; + send_bytes = sendmsg(nlsk_fd, &msg, 0); + if (send_bytes < 0) { + rte_errno = errno; + return -rte_errno; + } + return send_bytes; +} + +/** + * Receive a message from the kernel on the Netlink socket, following + * mlx5_nl_send(). + * + * @param[in] nlsk_fd + * The Netlink socket file descriptor used for communication. + * @param[in] sn + * Sequence number. + * @param[in] cb + * The callback function to call for each Netlink message received. + * @param[in, out] arg + * Custom arguments for the callback. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_nl_recv(int nlsk_fd, uint32_t sn, int (*cb)(struct nlmsghdr *, void *arg), + void *arg) +{ + struct sockaddr_nl sa; + char buf[MLX5_RECV_BUF_SIZE]; + struct iovec iov = { + .iov_base = buf, + .iov_len = sizeof(buf), + }; + struct msghdr msg = { + .msg_name = &sa, + .msg_namelen = sizeof(sa), + .msg_iov = &iov, + /* One message at a time */ + .msg_iovlen = 1, + }; + int multipart = 0; + int ret = 0; + + do { + struct nlmsghdr *nh; + int recv_bytes = 0; + + do { + recv_bytes = recvmsg(nlsk_fd, &msg, 0); + if (recv_bytes == -1) { + rte_errno = errno; + return -rte_errno; + } + nh = (struct nlmsghdr *)buf; + } while (nh->nlmsg_seq != sn); + for (; + NLMSG_OK(nh, (unsigned int)recv_bytes); + nh = NLMSG_NEXT(nh, recv_bytes)) { + if (nh->nlmsg_type == NLMSG_ERROR) { + struct nlmsgerr *err_data = NLMSG_DATA(nh); + + if (err_data->error < 0) { + rte_errno = -err_data->error; + return -rte_errno; + } + /* Ack message. */ + return 0; + } + /* Multi-part msgs and their trailing DONE message. */ + if (nh->nlmsg_flags & NLM_F_MULTI) { + if (nh->nlmsg_type == NLMSG_DONE) + return 0; + multipart = 1; + } + if (cb) { + ret = cb(nh, arg); + if (ret < 0) + return ret; + } + } + } while (multipart); + return ret; +} + +/** + * Parse Netlink message to retrieve the bridge MAC address. + * + * @param nh + * Pointer to Netlink Message Header. + * @param arg + * PMD data register with this callback. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_nl_mac_addr_cb(struct nlmsghdr *nh, void *arg) +{ + struct mlx5_nl_mac_addr *data = arg; + struct ndmsg *r = NLMSG_DATA(nh); + struct rtattr *attribute; + int len; + + len = nh->nlmsg_len - NLMSG_LENGTH(sizeof(*r)); + for (attribute = MLX5_NDA_RTA(r); + RTA_OK(attribute, len); + attribute = RTA_NEXT(attribute, len)) { + if (attribute->rta_type == NDA_LLADDR) { + if (data->mac_n == MLX5_MAX_MAC_ADDRESSES) { + DRV_LOG(WARNING, + "not enough room to finalize the" + " request"); + rte_errno = ENOMEM; + return -rte_errno; + } +#ifndef NDEBUG + char m[18]; + + ether_format_addr(m, 18, RTA_DATA(attribute)); + DRV_LOG(DEBUG, "bridge MAC address %s", m); +#endif + memcpy(&(*data->mac)[data->mac_n++], + RTA_DATA(attribute), ETHER_ADDR_LEN); + } + } + return 0; +} + +/** + * Get bridge MAC addresses. + * + * @param dev + * Pointer to Ethernet device. + * @param mac[out] + * Pointer to the array table of MAC addresses to fill. + * Its size should be of MLX5_MAX_MAC_ADDRESSES. + * @param mac_n[out] + * Number of entries filled in MAC array. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_nl_mac_addr_list(struct rte_eth_dev *dev, struct ether_addr (*mac)[], + int *mac_n) +{ + struct priv *priv = dev->data->dev_private; + unsigned int iface_idx = mlx5_ifindex(dev); + struct { + struct nlmsghdr hdr; + struct ifinfomsg ifm; + } req = { + .hdr = { + .nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)), + .nlmsg_type = RTM_GETNEIGH, + .nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, + }, + .ifm = { + .ifi_family = PF_BRIDGE, + .ifi_index = iface_idx, + }, + }; + struct mlx5_nl_mac_addr data = { + .mac = mac, + .mac_n = 0, + }; + int fd; + int ret; + uint32_t sn = priv->nl_sn++; + + if (priv->nl_socket_route == -1) + return 0; + fd = priv->nl_socket_route; + ret = mlx5_nl_request(fd, &req.hdr, sn, &req.ifm, + sizeof(struct ifinfomsg)); + if (ret < 0) + goto error; + ret = mlx5_nl_recv(fd, sn, mlx5_nl_mac_addr_cb, &data); + if (ret < 0) + goto error; + *mac_n = data.mac_n; + return 0; +error: + DRV_LOG(DEBUG, "port %u cannot retrieve MAC address list %s", + dev->data->port_id, strerror(rte_errno)); + return -rte_errno; +} + +/** + * Modify the MAC address neighbour table with Netlink. + * + * @param dev + * Pointer to Ethernet device. + * @param mac + * MAC address to consider. + * @param add + * 1 to add the MAC address, 0 to remove the MAC address. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_nl_mac_addr_modify(struct rte_eth_dev *dev, struct ether_addr *mac, + int add) +{ + struct priv *priv = dev->data->dev_private; + unsigned int iface_idx = mlx5_ifindex(dev); + struct { + struct nlmsghdr hdr; + struct ndmsg ndm; + struct rtattr rta; + uint8_t buffer[ETHER_ADDR_LEN]; + } req = { + .hdr = { + .nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg)), + .nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | + NLM_F_EXCL | NLM_F_ACK, + .nlmsg_type = add ? RTM_NEWNEIGH : RTM_DELNEIGH, + }, + .ndm = { + .ndm_family = PF_BRIDGE, + .ndm_state = NUD_NOARP | NUD_PERMANENT, + .ndm_ifindex = iface_idx, + .ndm_flags = NTF_SELF, + }, + .rta = { + .rta_type = NDA_LLADDR, + .rta_len = RTA_LENGTH(ETHER_ADDR_LEN), + }, + }; + int fd; + int ret; + uint32_t sn = priv->nl_sn++; + + if (priv->nl_socket_route == -1) + return 0; + fd = priv->nl_socket_route; + memcpy(RTA_DATA(&req.rta), mac, ETHER_ADDR_LEN); + req.hdr.nlmsg_len = NLMSG_ALIGN(req.hdr.nlmsg_len) + + RTA_ALIGN(req.rta.rta_len); + ret = mlx5_nl_send(fd, &req.hdr, sn); + if (ret < 0) + goto error; + ret = mlx5_nl_recv(fd, sn, NULL, NULL); + if (ret < 0) + goto error; + return 0; +error: + DRV_LOG(DEBUG, + "port %u cannot %s MAC address %02X:%02X:%02X:%02X:%02X:%02X" + " %s", + dev->data->port_id, + add ? "add" : "remove", + mac->addr_bytes[0], mac->addr_bytes[1], + mac->addr_bytes[2], mac->addr_bytes[3], + mac->addr_bytes[4], mac->addr_bytes[5], + strerror(rte_errno)); + return -rte_errno; +} + +/** + * Add a MAC address. + * + * @param dev + * Pointer to Ethernet device. + * @param mac + * MAC address to register. + * @param index + * MAC address index. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_nl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, + uint32_t index) +{ + struct priv *priv = dev->data->dev_private; + int ret; + + ret = mlx5_nl_mac_addr_modify(dev, mac, 1); + if (!ret) + BITFIELD_SET(priv->mac_own, index); + if (ret == -EEXIST) + return 0; + return ret; +} + +/** + * Remove a MAC address. + * + * @param dev + * Pointer to Ethernet device. + * @param mac + * MAC address to remove. + * @param index + * MAC address index. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_nl_mac_addr_remove(struct rte_eth_dev *dev, struct ether_addr *mac, + uint32_t index) +{ + struct priv *priv = dev->data->dev_private; + + BITFIELD_RESET(priv->mac_own, index); + return mlx5_nl_mac_addr_modify(dev, mac, 0); +} + +/** + * Synchronize Netlink bridge table to the internal table. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_nl_mac_addr_sync(struct rte_eth_dev *dev) +{ + struct ether_addr macs[MLX5_MAX_MAC_ADDRESSES]; + int macs_n = 0; + int i; + int ret; + + ret = mlx5_nl_mac_addr_list(dev, &macs, &macs_n); + if (ret) + return; + for (i = 0; i != macs_n; ++i) { + int j; + + /* Verify the address is not in the array yet. */ + for (j = 0; j != MLX5_MAX_MAC_ADDRESSES; ++j) + if (is_same_ether_addr(&macs[i], + &dev->data->mac_addrs[j])) + break; + if (j != MLX5_MAX_MAC_ADDRESSES) + continue; + /* Find the first entry available. */ + for (j = 0; j != MLX5_MAX_MAC_ADDRESSES; ++j) { + if (is_zero_ether_addr(&dev->data->mac_addrs[j])) { + dev->data->mac_addrs[j] = macs[i]; + break; + } + } + } +} + +/** + * Flush all added MAC addresses. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_nl_mac_addr_flush(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + int i; + + for (i = MLX5_MAX_MAC_ADDRESSES - 1; i >= 0; --i) { + struct ether_addr *m = &dev->data->mac_addrs[i]; + + if (BITFIELD_ISSET(priv->mac_own, i)) + mlx5_nl_mac_addr_remove(dev, m, i); + } +} + +/** + * Enable promiscuous / all multicast mode through Netlink. + * + * @param dev + * Pointer to Ethernet device structure. + * @param flags + * IFF_PROMISC for promiscuous, IFF_ALLMULTI for allmulti. + * @param enable + * Nonzero to enable, disable otherwise. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_nl_device_flags(struct rte_eth_dev *dev, uint32_t flags, int enable) +{ + struct priv *priv = dev->data->dev_private; + unsigned int iface_idx = mlx5_ifindex(dev); + struct { + struct nlmsghdr hdr; + struct ifinfomsg ifi; + } req = { + .hdr = { + .nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)), + .nlmsg_type = RTM_NEWLINK, + .nlmsg_flags = NLM_F_REQUEST, + }, + .ifi = { + .ifi_flags = enable ? flags : 0, + .ifi_change = flags, + .ifi_index = iface_idx, + }, + }; + int fd; + int ret; + + assert(!(flags & ~(IFF_PROMISC | IFF_ALLMULTI))); + if (priv->nl_socket_route < 0) + return 0; + fd = priv->nl_socket_route; + ret = mlx5_nl_send(fd, &req.hdr, priv->nl_sn++); + if (ret < 0) + return ret; + return 0; +} + +/** + * Enable promiscuous mode through Netlink. + * + * @param dev + * Pointer to Ethernet device structure. + * @param enable + * Nonzero to enable, disable otherwise. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_nl_promisc(struct rte_eth_dev *dev, int enable) +{ + int ret = mlx5_nl_device_flags(dev, IFF_PROMISC, enable); + + if (ret) + DRV_LOG(DEBUG, + "port %u cannot %s promisc mode: Netlink error %s", + dev->data->port_id, enable ? "enable" : "disable", + strerror(rte_errno)); + return ret; +} + +/** + * Enable all multicast mode through Netlink. + * + * @param dev + * Pointer to Ethernet device structure. + * @param enable + * Nonzero to enable, disable otherwise. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_nl_allmulti(struct rte_eth_dev *dev, int enable) +{ + int ret = mlx5_nl_device_flags(dev, IFF_ALLMULTI, enable); + + if (ret) + DRV_LOG(DEBUG, + "port %u cannot %s allmulti mode: Netlink error %s", + dev->data->port_id, enable ? "enable" : "disable", + strerror(rte_errno)); + return ret; +} + +/** + * Process network interface information from Netlink message. + * + * @param nh + * Pointer to Netlink message header. + * @param arg + * Opaque data pointer for this callback. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_nl_ifindex_cb(struct nlmsghdr *nh, void *arg) +{ + struct mlx5_nl_ifindex_data *data = arg; + size_t off = NLMSG_HDRLEN; + uint32_t ibindex = 0; + uint32_t ifindex = 0; + int found = 0; + + if (nh->nlmsg_type != + RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET) && + nh->nlmsg_type != + RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_PORT_GET)) + goto error; + while (off < nh->nlmsg_len) { + struct nlattr *na = (void *)((uintptr_t)nh + off); + void *payload = (void *)((uintptr_t)na + NLA_HDRLEN); + + if (na->nla_len > nh->nlmsg_len - off) + goto error; + switch (na->nla_type) { + case RDMA_NLDEV_ATTR_DEV_INDEX: + ibindex = *(uint32_t *)payload; + break; + case RDMA_NLDEV_ATTR_DEV_NAME: + if (!strcmp(payload, data->name)) + found = 1; + break; + case RDMA_NLDEV_ATTR_NDEV_INDEX: + ifindex = *(uint32_t *)payload; + break; + default: + break; + } + off += NLA_ALIGN(na->nla_len); + } + if (found) { + data->ibindex = ibindex; + data->ifindex = ifindex; + } + return 0; +error: + rte_errno = EINVAL; + return -rte_errno; +} + +/** + * Get index of network interface associated with some IB device. + * + * This is the only somewhat safe method to avoid resorting to heuristics + * when faced with port representors. Unfortunately it requires at least + * Linux 4.17. + * + * @param nl + * Netlink socket of the RDMA kind (NETLINK_RDMA). + * @param[in] name + * IB device name. + * + * @return + * A valid (nonzero) interface index on success, 0 otherwise and rte_errno + * is set. + */ +unsigned int +mlx5_nl_ifindex(int nl, const char *name) +{ + static const uint32_t pindex = 1; + uint32_t seq = random(); + struct mlx5_nl_ifindex_data data = { + .name = name, + .ibindex = 0, /* Determined during first pass. */ + .ifindex = 0, /* Determined during second pass. */ + }; + union { + struct nlmsghdr nh; + uint8_t buf[NLMSG_HDRLEN + + NLA_HDRLEN + NLA_ALIGN(sizeof(data.ibindex)) + + NLA_HDRLEN + NLA_ALIGN(sizeof(pindex))]; + } req = { + .nh = { + .nlmsg_len = NLMSG_LENGTH(0), + .nlmsg_type = RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, + RDMA_NLDEV_CMD_GET), + .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_DUMP, + }, + }; + struct nlattr *na; + int ret; + + ret = mlx5_nl_send(nl, &req.nh, seq); + if (ret < 0) + return 0; + ret = mlx5_nl_recv(nl, seq, mlx5_nl_ifindex_cb, &data); + if (ret < 0) + return 0; + if (!data.ibindex) + goto error; + ++seq; + req.nh.nlmsg_type = RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, + RDMA_NLDEV_CMD_PORT_GET); + req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; + req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.buf) - NLMSG_HDRLEN); + na = (void *)((uintptr_t)req.buf + NLMSG_HDRLEN); + na->nla_len = NLA_HDRLEN + sizeof(data.ibindex); + na->nla_type = RDMA_NLDEV_ATTR_DEV_INDEX; + memcpy((void *)((uintptr_t)na + NLA_HDRLEN), + &data.ibindex, sizeof(data.ibindex)); + na = (void *)((uintptr_t)na + NLA_ALIGN(na->nla_len)); + na->nla_len = NLA_HDRLEN + sizeof(pindex); + na->nla_type = RDMA_NLDEV_ATTR_PORT_INDEX; + memcpy((void *)((uintptr_t)na + NLA_HDRLEN), + &pindex, sizeof(pindex)); + ret = mlx5_nl_send(nl, &req.nh, seq); + if (ret < 0) + return 0; + ret = mlx5_nl_recv(nl, seq, mlx5_nl_ifindex_cb, &data); + if (ret < 0) + return 0; + if (!data.ifindex) + goto error; + return data.ifindex; +error: + rte_errno = ENODEV; + return 0; +} + +/** + * Process switch information from Netlink message. + * + * @param nh + * Pointer to Netlink message header. + * @param arg + * Opaque data pointer for this callback. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_nl_switch_info_cb(struct nlmsghdr *nh, void *arg) +{ + struct mlx5_switch_info info = { + .master = 0, + .representor = 0, + .port_name = 0, + .switch_id = 0, + }; + size_t off = NLMSG_LENGTH(sizeof(struct ifinfomsg)); + bool port_name_set = false; + bool switch_id_set = false; + + if (nh->nlmsg_type != RTM_NEWLINK) + goto error; + while (off < nh->nlmsg_len) { + struct rtattr *ra = (void *)((uintptr_t)nh + off); + void *payload = RTA_DATA(ra); + char *end; + unsigned int i; + + if (ra->rta_len > nh->nlmsg_len - off) + goto error; + switch (ra->rta_type) { + case IFLA_PHYS_PORT_NAME: + errno = 0; + info.port_name = strtol(payload, &end, 0); + if (errno || + (size_t)(end - (char *)payload) != strlen(payload)) + goto error; + port_name_set = true; + break; + case IFLA_PHYS_SWITCH_ID: + info.switch_id = 0; + for (i = 0; i < RTA_PAYLOAD(ra); ++i) { + info.switch_id <<= 8; + info.switch_id |= ((uint8_t *)payload)[i]; + } + switch_id_set = true; + break; + } + off += RTA_ALIGN(ra->rta_len); + } + info.master = switch_id_set && !port_name_set; + info.representor = switch_id_set && port_name_set; + memcpy(arg, &info, sizeof(info)); + return 0; +error: + rte_errno = EINVAL; + return -rte_errno; +} + +/** + * Get switch information associated with network interface. + * + * @param nl + * Netlink socket of the ROUTE kind (NETLINK_ROUTE). + * @param ifindex + * Network interface index. + * @param[out] info + * Switch information object, populated in case of success. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_nl_switch_info(int nl, unsigned int ifindex, struct mlx5_switch_info *info) +{ + uint32_t seq = random(); + struct { + struct nlmsghdr nh; + struct ifinfomsg info; + } req = { + .nh = { + .nlmsg_len = NLMSG_LENGTH(sizeof(req.info)), + .nlmsg_type = RTM_GETLINK, + .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK, + }, + .info = { + .ifi_family = AF_UNSPEC, + .ifi_index = ifindex, + }, + }; + int ret; + + ret = mlx5_nl_send(nl, &req.nh, seq); + if (ret >= 0) + ret = mlx5_nl_recv(nl, seq, mlx5_nl_switch_info_cb, info); + return ret; +} diff --git a/drivers/net/mlx5/mlx5_nl_flow.c b/drivers/net/mlx5/mlx5_nl_flow.c new file mode 100644 index 00000000..a1c8c340 --- /dev/null +++ b/drivers/net/mlx5/mlx5_nl_flow.c @@ -0,0 +1,1248 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 6WIND S.A. + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mlx5.h" +#include "mlx5_autoconf.h" + +#ifdef HAVE_TC_ACT_VLAN + +#include + +#else /* HAVE_TC_ACT_VLAN */ + +#define TCA_VLAN_ACT_POP 1 +#define TCA_VLAN_ACT_PUSH 2 +#define TCA_VLAN_ACT_MODIFY 3 +#define TCA_VLAN_PARMS 2 +#define TCA_VLAN_PUSH_VLAN_ID 3 +#define TCA_VLAN_PUSH_VLAN_PROTOCOL 4 +#define TCA_VLAN_PAD 5 +#define TCA_VLAN_PUSH_VLAN_PRIORITY 6 + +struct tc_vlan { + tc_gen; + int v_action; +}; + +#endif /* HAVE_TC_ACT_VLAN */ + +/* Normally found in linux/netlink.h. */ +#ifndef NETLINK_CAP_ACK +#define NETLINK_CAP_ACK 10 +#endif + +/* Normally found in linux/pkt_sched.h. */ +#ifndef TC_H_MIN_INGRESS +#define TC_H_MIN_INGRESS 0xfff2u +#endif + +/* Normally found in linux/pkt_cls.h. */ +#ifndef TCA_CLS_FLAGS_SKIP_SW +#define TCA_CLS_FLAGS_SKIP_SW (1 << 1) +#endif +#ifndef HAVE_TCA_FLOWER_ACT +#define TCA_FLOWER_ACT 3 +#endif +#ifndef HAVE_TCA_FLOWER_FLAGS +#define TCA_FLOWER_FLAGS 22 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_ETH_TYPE +#define TCA_FLOWER_KEY_ETH_TYPE 8 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST +#define TCA_FLOWER_KEY_ETH_DST 4 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST_MASK +#define TCA_FLOWER_KEY_ETH_DST_MASK 5 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC +#define TCA_FLOWER_KEY_ETH_SRC 6 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK +#define TCA_FLOWER_KEY_ETH_SRC_MASK 7 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_IP_PROTO +#define TCA_FLOWER_KEY_IP_PROTO 9 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC +#define TCA_FLOWER_KEY_IPV4_SRC 10 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK +#define TCA_FLOWER_KEY_IPV4_SRC_MASK 11 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST +#define TCA_FLOWER_KEY_IPV4_DST 12 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK +#define TCA_FLOWER_KEY_IPV4_DST_MASK 13 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC +#define TCA_FLOWER_KEY_IPV6_SRC 14 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK +#define TCA_FLOWER_KEY_IPV6_SRC_MASK 15 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST +#define TCA_FLOWER_KEY_IPV6_DST 16 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK +#define TCA_FLOWER_KEY_IPV6_DST_MASK 17 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC +#define TCA_FLOWER_KEY_TCP_SRC 18 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK +#define TCA_FLOWER_KEY_TCP_SRC_MASK 35 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST +#define TCA_FLOWER_KEY_TCP_DST 19 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST_MASK +#define TCA_FLOWER_KEY_TCP_DST_MASK 36 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC +#define TCA_FLOWER_KEY_UDP_SRC 20 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK +#define TCA_FLOWER_KEY_UDP_SRC_MASK 37 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST +#define TCA_FLOWER_KEY_UDP_DST 21 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST_MASK +#define TCA_FLOWER_KEY_UDP_DST_MASK 38 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ID +#define TCA_FLOWER_KEY_VLAN_ID 23 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_VLAN_PRIO +#define TCA_FLOWER_KEY_VLAN_PRIO 24 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE +#define TCA_FLOWER_KEY_VLAN_ETH_TYPE 25 +#endif + +/** Parser state definitions for mlx5_nl_flow_trans[]. */ +enum mlx5_nl_flow_trans { + INVALID, + BACK, + ATTR, + PATTERN, + ITEM_VOID, + ITEM_PORT_ID, + ITEM_ETH, + ITEM_VLAN, + ITEM_IPV4, + ITEM_IPV6, + ITEM_TCP, + ITEM_UDP, + ACTIONS, + ACTION_VOID, + ACTION_PORT_ID, + ACTION_DROP, + ACTION_OF_POP_VLAN, + ACTION_OF_PUSH_VLAN, + ACTION_OF_SET_VLAN_VID, + ACTION_OF_SET_VLAN_PCP, + END, +}; + +#define TRANS(...) (const enum mlx5_nl_flow_trans []){ __VA_ARGS__, INVALID, } + +#define PATTERN_COMMON \ + ITEM_VOID, ITEM_PORT_ID, ACTIONS +#define ACTIONS_COMMON \ + ACTION_VOID, ACTION_OF_POP_VLAN, ACTION_OF_PUSH_VLAN, \ + ACTION_OF_SET_VLAN_VID, ACTION_OF_SET_VLAN_PCP +#define ACTIONS_FATE \ + ACTION_PORT_ID, ACTION_DROP + +/** Parser state transitions used by mlx5_nl_flow_transpose(). */ +static const enum mlx5_nl_flow_trans *const mlx5_nl_flow_trans[] = { + [INVALID] = NULL, + [BACK] = NULL, + [ATTR] = TRANS(PATTERN), + [PATTERN] = TRANS(ITEM_ETH, PATTERN_COMMON), + [ITEM_VOID] = TRANS(BACK), + [ITEM_PORT_ID] = TRANS(BACK), + [ITEM_ETH] = TRANS(ITEM_IPV4, ITEM_IPV6, ITEM_VLAN, PATTERN_COMMON), + [ITEM_VLAN] = TRANS(ITEM_IPV4, ITEM_IPV6, PATTERN_COMMON), + [ITEM_IPV4] = TRANS(ITEM_TCP, ITEM_UDP, PATTERN_COMMON), + [ITEM_IPV6] = TRANS(ITEM_TCP, ITEM_UDP, PATTERN_COMMON), + [ITEM_TCP] = TRANS(PATTERN_COMMON), + [ITEM_UDP] = TRANS(PATTERN_COMMON), + [ACTIONS] = TRANS(ACTIONS_FATE, ACTIONS_COMMON), + [ACTION_VOID] = TRANS(BACK), + [ACTION_PORT_ID] = TRANS(ACTION_VOID, END), + [ACTION_DROP] = TRANS(ACTION_VOID, END), + [ACTION_OF_POP_VLAN] = TRANS(ACTIONS_FATE, ACTIONS_COMMON), + [ACTION_OF_PUSH_VLAN] = TRANS(ACTIONS_FATE, ACTIONS_COMMON), + [ACTION_OF_SET_VLAN_VID] = TRANS(ACTIONS_FATE, ACTIONS_COMMON), + [ACTION_OF_SET_VLAN_PCP] = TRANS(ACTIONS_FATE, ACTIONS_COMMON), + [END] = NULL, +}; + +/** Empty masks for known item types. */ +static const union { + struct rte_flow_item_port_id port_id; + struct rte_flow_item_eth eth; + struct rte_flow_item_vlan vlan; + struct rte_flow_item_ipv4 ipv4; + struct rte_flow_item_ipv6 ipv6; + struct rte_flow_item_tcp tcp; + struct rte_flow_item_udp udp; +} mlx5_nl_flow_mask_empty; + +/** Supported masks for known item types. */ +static const struct { + struct rte_flow_item_port_id port_id; + struct rte_flow_item_eth eth; + struct rte_flow_item_vlan vlan; + struct rte_flow_item_ipv4 ipv4; + struct rte_flow_item_ipv6 ipv6; + struct rte_flow_item_tcp tcp; + struct rte_flow_item_udp udp; +} mlx5_nl_flow_mask_supported = { + .port_id = { + .id = 0xffffffff, + }, + .eth = { + .type = RTE_BE16(0xffff), + .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", + }, + .vlan = { + /* PCP and VID only, no DEI. */ + .tci = RTE_BE16(0xefff), + .inner_type = RTE_BE16(0xffff), + }, + .ipv4.hdr = { + .next_proto_id = 0xff, + .src_addr = RTE_BE32(0xffffffff), + .dst_addr = RTE_BE32(0xffffffff), + }, + .ipv6.hdr = { + .proto = 0xff, + .src_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .dst_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + }, + .tcp.hdr = { + .src_port = RTE_BE16(0xffff), + .dst_port = RTE_BE16(0xffff), + }, + .udp.hdr = { + .src_port = RTE_BE16(0xffff), + .dst_port = RTE_BE16(0xffff), + }, +}; + +/** + * Retrieve mask for pattern item. + * + * This function does basic sanity checks on a pattern item in order to + * return the most appropriate mask for it. + * + * @param[in] item + * Item specification. + * @param[in] mask_default + * Default mask for pattern item as specified by the flow API. + * @param[in] mask_supported + * Mask fields supported by the implementation. + * @param[in] mask_empty + * Empty mask to return when there is no specification. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * Either @p item->mask or one of the mask parameters on success, NULL + * otherwise and rte_errno is set. + */ +static const void * +mlx5_nl_flow_item_mask(const struct rte_flow_item *item, + const void *mask_default, + const void *mask_supported, + const void *mask_empty, + size_t mask_size, + struct rte_flow_error *error) +{ + const uint8_t *mask; + size_t i; + + /* item->last and item->mask cannot exist without item->spec. */ + if (!item->spec && (item->mask || item->last)) { + rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, + "\"mask\" or \"last\" field provided without a" + " corresponding \"spec\""); + return NULL; + } + /* No spec, no mask, no problem. */ + if (!item->spec) + return mask_empty; + mask = item->mask ? item->mask : mask_default; + assert(mask); + /* + * Single-pass check to make sure that: + * - Mask is supported, no bits are set outside mask_supported. + * - Both item->spec and item->last are included in mask. + */ + for (i = 0; i != mask_size; ++i) { + if (!mask[i]) + continue; + if ((mask[i] | ((const uint8_t *)mask_supported)[i]) != + ((const uint8_t *)mask_supported)[i]) { + rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, + mask, "unsupported field found in \"mask\""); + return NULL; + } + if (item->last && + (((const uint8_t *)item->spec)[i] & mask[i]) != + (((const uint8_t *)item->last)[i] & mask[i])) { + rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_LAST, + item->last, + "range between \"spec\" and \"last\" not" + " comprised in \"mask\""); + return NULL; + } + } + return mask; +} + +/** + * Transpose flow rule description to rtnetlink message. + * + * This function transposes a flow rule description to a traffic control + * (TC) filter creation message ready to be sent over Netlink. + * + * Target interface is specified as the first entry of the @p ptoi table. + * Subsequent entries enable this function to resolve other DPDK port IDs + * found in the flow rule. + * + * @param[out] buf + * Output message buffer. May be NULL when @p size is 0. + * @param size + * Size of @p buf. Message may be truncated if not large enough. + * @param[in] ptoi + * DPDK port ID to network interface index translation table. This table + * is terminated by an entry with a zero ifindex value. + * @param[in] attr + * Flow rule attributes. + * @param[in] pattern + * Pattern specification. + * @param[in] actions + * Associated actions. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * A positive value representing the exact size of the message in bytes + * regardless of the @p size parameter on success, a negative errno value + * otherwise and rte_errno is set. + */ +int +mlx5_nl_flow_transpose(void *buf, + size_t size, + const struct mlx5_nl_flow_ptoi *ptoi, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + struct rte_flow_error *error) +{ + alignas(struct nlmsghdr) + uint8_t buf_tmp[mnl_nlmsg_size(sizeof(struct tcmsg) + 1024)]; + const struct rte_flow_item *item; + const struct rte_flow_action *action; + unsigned int n; + uint32_t act_index_cur; + bool in_port_id_set; + bool eth_type_set; + bool vlan_present; + bool vlan_eth_type_set; + bool ip_proto_set; + struct nlattr *na_flower; + struct nlattr *na_flower_act; + struct nlattr *na_vlan_id; + struct nlattr *na_vlan_priority; + const enum mlx5_nl_flow_trans *trans; + const enum mlx5_nl_flow_trans *back; + + if (!size) + goto error_nobufs; +init: + item = pattern; + action = actions; + n = 0; + act_index_cur = 0; + in_port_id_set = false; + eth_type_set = false; + vlan_present = false; + vlan_eth_type_set = false; + ip_proto_set = false; + na_flower = NULL; + na_flower_act = NULL; + na_vlan_id = NULL; + na_vlan_priority = NULL; + trans = TRANS(ATTR); + back = trans; +trans: + switch (trans[n++]) { + union { + const struct rte_flow_item_port_id *port_id; + const struct rte_flow_item_eth *eth; + const struct rte_flow_item_vlan *vlan; + const struct rte_flow_item_ipv4 *ipv4; + const struct rte_flow_item_ipv6 *ipv6; + const struct rte_flow_item_tcp *tcp; + const struct rte_flow_item_udp *udp; + } spec, mask; + union { + const struct rte_flow_action_port_id *port_id; + const struct rte_flow_action_of_push_vlan *of_push_vlan; + const struct rte_flow_action_of_set_vlan_vid * + of_set_vlan_vid; + const struct rte_flow_action_of_set_vlan_pcp * + of_set_vlan_pcp; + } conf; + struct nlmsghdr *nlh; + struct tcmsg *tcm; + struct nlattr *act_index; + struct nlattr *act; + unsigned int i; + + case INVALID: + if (item->type) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, "unsupported pattern item combination"); + else if (action->type) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + action, "unsupported action combination"); + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "flow rule lacks some kind of fate action"); + case BACK: + trans = back; + n = 0; + goto trans; + case ATTR: + /* + * Supported attributes: no groups, some priorities and + * ingress only. Don't care about transfer as it is the + * caller's problem. + */ + if (attr->group) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, "groups are not supported"); + if (attr->priority > 0xfffe) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "lowest priority level is 0xfffe"); + if (!attr->ingress) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "only ingress is supported"); + if (attr->egress) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "egress is not supported"); + if (size < mnl_nlmsg_size(sizeof(*tcm))) + goto error_nobufs; + nlh = mnl_nlmsg_put_header(buf); + nlh->nlmsg_type = 0; + nlh->nlmsg_flags = 0; + nlh->nlmsg_seq = 0; + tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm)); + tcm->tcm_family = AF_UNSPEC; + tcm->tcm_ifindex = ptoi[0].ifindex; + /* + * Let kernel pick a handle by default. A predictable handle + * can be set by the caller on the resulting buffer through + * mlx5_nl_flow_brand(). + */ + tcm->tcm_handle = 0; + tcm->tcm_parent = TC_H_MAKE(TC_H_INGRESS, TC_H_MIN_INGRESS); + /* + * Priority cannot be zero to prevent the kernel from + * picking one automatically. + */ + tcm->tcm_info = TC_H_MAKE((attr->priority + 1) << 16, + RTE_BE16(ETH_P_ALL)); + break; + case PATTERN: + if (!mnl_attr_put_strz_check(buf, size, TCA_KIND, "flower")) + goto error_nobufs; + na_flower = mnl_attr_nest_start_check(buf, size, TCA_OPTIONS); + if (!na_flower) + goto error_nobufs; + if (!mnl_attr_put_u32_check(buf, size, TCA_FLOWER_FLAGS, + TCA_CLS_FLAGS_SKIP_SW)) + goto error_nobufs; + break; + case ITEM_VOID: + if (item->type != RTE_FLOW_ITEM_TYPE_VOID) + goto trans; + ++item; + break; + case ITEM_PORT_ID: + if (item->type != RTE_FLOW_ITEM_TYPE_PORT_ID) + goto trans; + mask.port_id = mlx5_nl_flow_item_mask + (item, &rte_flow_item_port_id_mask, + &mlx5_nl_flow_mask_supported.port_id, + &mlx5_nl_flow_mask_empty.port_id, + sizeof(mlx5_nl_flow_mask_supported.port_id), error); + if (!mask.port_id) + return -rte_errno; + if (mask.port_id == &mlx5_nl_flow_mask_empty.port_id) { + in_port_id_set = 1; + ++item; + break; + } + spec.port_id = item->spec; + if (mask.port_id->id && mask.port_id->id != 0xffffffff) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, + mask.port_id, + "no support for partial mask on" + " \"id\" field"); + if (!mask.port_id->id) + i = 0; + else + for (i = 0; ptoi[i].ifindex; ++i) + if (ptoi[i].port_id == spec.port_id->id) + break; + if (!ptoi[i].ifindex) + return rte_flow_error_set + (error, ENODEV, RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + spec.port_id, + "missing data to convert port ID to ifindex"); + tcm = mnl_nlmsg_get_payload(buf); + if (in_port_id_set && + ptoi[i].ifindex != (unsigned int)tcm->tcm_ifindex) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + spec.port_id, + "cannot match traffic for several port IDs" + " through a single flow rule"); + tcm->tcm_ifindex = ptoi[i].ifindex; + in_port_id_set = 1; + ++item; + break; + case ITEM_ETH: + if (item->type != RTE_FLOW_ITEM_TYPE_ETH) + goto trans; + mask.eth = mlx5_nl_flow_item_mask + (item, &rte_flow_item_eth_mask, + &mlx5_nl_flow_mask_supported.eth, + &mlx5_nl_flow_mask_empty.eth, + sizeof(mlx5_nl_flow_mask_supported.eth), error); + if (!mask.eth) + return -rte_errno; + if (mask.eth == &mlx5_nl_flow_mask_empty.eth) { + ++item; + break; + } + spec.eth = item->spec; + if (mask.eth->type && mask.eth->type != RTE_BE16(0xffff)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, + mask.eth, + "no support for partial mask on" + " \"type\" field"); + if (mask.eth->type) { + if (!mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_ETH_TYPE, + spec.eth->type)) + goto error_nobufs; + eth_type_set = 1; + } + if ((!is_zero_ether_addr(&mask.eth->dst) && + (!mnl_attr_put_check(buf, size, + TCA_FLOWER_KEY_ETH_DST, + ETHER_ADDR_LEN, + spec.eth->dst.addr_bytes) || + !mnl_attr_put_check(buf, size, + TCA_FLOWER_KEY_ETH_DST_MASK, + ETHER_ADDR_LEN, + mask.eth->dst.addr_bytes))) || + (!is_zero_ether_addr(&mask.eth->src) && + (!mnl_attr_put_check(buf, size, + TCA_FLOWER_KEY_ETH_SRC, + ETHER_ADDR_LEN, + spec.eth->src.addr_bytes) || + !mnl_attr_put_check(buf, size, + TCA_FLOWER_KEY_ETH_SRC_MASK, + ETHER_ADDR_LEN, + mask.eth->src.addr_bytes)))) + goto error_nobufs; + ++item; + break; + case ITEM_VLAN: + if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) + goto trans; + mask.vlan = mlx5_nl_flow_item_mask + (item, &rte_flow_item_vlan_mask, + &mlx5_nl_flow_mask_supported.vlan, + &mlx5_nl_flow_mask_empty.vlan, + sizeof(mlx5_nl_flow_mask_supported.vlan), error); + if (!mask.vlan) + return -rte_errno; + if (!eth_type_set && + !mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_ETH_TYPE, + RTE_BE16(ETH_P_8021Q))) + goto error_nobufs; + eth_type_set = 1; + vlan_present = 1; + if (mask.vlan == &mlx5_nl_flow_mask_empty.vlan) { + ++item; + break; + } + spec.vlan = item->spec; + if ((mask.vlan->tci & RTE_BE16(0xe000) && + (mask.vlan->tci & RTE_BE16(0xe000)) != RTE_BE16(0xe000)) || + (mask.vlan->tci & RTE_BE16(0x0fff) && + (mask.vlan->tci & RTE_BE16(0x0fff)) != RTE_BE16(0x0fff)) || + (mask.vlan->inner_type && + mask.vlan->inner_type != RTE_BE16(0xffff))) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, + mask.vlan, + "no support for partial masks on" + " \"tci\" (PCP and VID parts) and" + " \"inner_type\" fields"); + if (mask.vlan->inner_type) { + if (!mnl_attr_put_u16_check + (buf, size, TCA_FLOWER_KEY_VLAN_ETH_TYPE, + spec.vlan->inner_type)) + goto error_nobufs; + vlan_eth_type_set = 1; + } + if ((mask.vlan->tci & RTE_BE16(0xe000) && + !mnl_attr_put_u8_check + (buf, size, TCA_FLOWER_KEY_VLAN_PRIO, + (rte_be_to_cpu_16(spec.vlan->tci) >> 13) & 0x7)) || + (mask.vlan->tci & RTE_BE16(0x0fff) && + !mnl_attr_put_u16_check + (buf, size, TCA_FLOWER_KEY_VLAN_ID, + rte_be_to_cpu_16(spec.vlan->tci & RTE_BE16(0x0fff))))) + goto error_nobufs; + ++item; + break; + case ITEM_IPV4: + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) + goto trans; + mask.ipv4 = mlx5_nl_flow_item_mask + (item, &rte_flow_item_ipv4_mask, + &mlx5_nl_flow_mask_supported.ipv4, + &mlx5_nl_flow_mask_empty.ipv4, + sizeof(mlx5_nl_flow_mask_supported.ipv4), error); + if (!mask.ipv4) + return -rte_errno; + if ((!eth_type_set || !vlan_eth_type_set) && + !mnl_attr_put_u16_check(buf, size, + vlan_present ? + TCA_FLOWER_KEY_VLAN_ETH_TYPE : + TCA_FLOWER_KEY_ETH_TYPE, + RTE_BE16(ETH_P_IP))) + goto error_nobufs; + eth_type_set = 1; + vlan_eth_type_set = 1; + if (mask.ipv4 == &mlx5_nl_flow_mask_empty.ipv4) { + ++item; + break; + } + spec.ipv4 = item->spec; + if (mask.ipv4->hdr.next_proto_id && + mask.ipv4->hdr.next_proto_id != 0xff) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, + mask.ipv4, + "no support for partial mask on" + " \"hdr.next_proto_id\" field"); + if (mask.ipv4->hdr.next_proto_id) { + if (!mnl_attr_put_u8_check + (buf, size, TCA_FLOWER_KEY_IP_PROTO, + spec.ipv4->hdr.next_proto_id)) + goto error_nobufs; + ip_proto_set = 1; + } + if ((mask.ipv4->hdr.src_addr && + (!mnl_attr_put_u32_check(buf, size, + TCA_FLOWER_KEY_IPV4_SRC, + spec.ipv4->hdr.src_addr) || + !mnl_attr_put_u32_check(buf, size, + TCA_FLOWER_KEY_IPV4_SRC_MASK, + mask.ipv4->hdr.src_addr))) || + (mask.ipv4->hdr.dst_addr && + (!mnl_attr_put_u32_check(buf, size, + TCA_FLOWER_KEY_IPV4_DST, + spec.ipv4->hdr.dst_addr) || + !mnl_attr_put_u32_check(buf, size, + TCA_FLOWER_KEY_IPV4_DST_MASK, + mask.ipv4->hdr.dst_addr)))) + goto error_nobufs; + ++item; + break; + case ITEM_IPV6: + if (item->type != RTE_FLOW_ITEM_TYPE_IPV6) + goto trans; + mask.ipv6 = mlx5_nl_flow_item_mask + (item, &rte_flow_item_ipv6_mask, + &mlx5_nl_flow_mask_supported.ipv6, + &mlx5_nl_flow_mask_empty.ipv6, + sizeof(mlx5_nl_flow_mask_supported.ipv6), error); + if (!mask.ipv6) + return -rte_errno; + if ((!eth_type_set || !vlan_eth_type_set) && + !mnl_attr_put_u16_check(buf, size, + vlan_present ? + TCA_FLOWER_KEY_VLAN_ETH_TYPE : + TCA_FLOWER_KEY_ETH_TYPE, + RTE_BE16(ETH_P_IPV6))) + goto error_nobufs; + eth_type_set = 1; + vlan_eth_type_set = 1; + if (mask.ipv6 == &mlx5_nl_flow_mask_empty.ipv6) { + ++item; + break; + } + spec.ipv6 = item->spec; + if (mask.ipv6->hdr.proto && mask.ipv6->hdr.proto != 0xff) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, + mask.ipv6, + "no support for partial mask on" + " \"hdr.proto\" field"); + if (mask.ipv6->hdr.proto) { + if (!mnl_attr_put_u8_check + (buf, size, TCA_FLOWER_KEY_IP_PROTO, + spec.ipv6->hdr.proto)) + goto error_nobufs; + ip_proto_set = 1; + } + if ((!IN6_IS_ADDR_UNSPECIFIED(mask.ipv6->hdr.src_addr) && + (!mnl_attr_put_check(buf, size, + TCA_FLOWER_KEY_IPV6_SRC, + sizeof(spec.ipv6->hdr.src_addr), + spec.ipv6->hdr.src_addr) || + !mnl_attr_put_check(buf, size, + TCA_FLOWER_KEY_IPV6_SRC_MASK, + sizeof(mask.ipv6->hdr.src_addr), + mask.ipv6->hdr.src_addr))) || + (!IN6_IS_ADDR_UNSPECIFIED(mask.ipv6->hdr.dst_addr) && + (!mnl_attr_put_check(buf, size, + TCA_FLOWER_KEY_IPV6_DST, + sizeof(spec.ipv6->hdr.dst_addr), + spec.ipv6->hdr.dst_addr) || + !mnl_attr_put_check(buf, size, + TCA_FLOWER_KEY_IPV6_DST_MASK, + sizeof(mask.ipv6->hdr.dst_addr), + mask.ipv6->hdr.dst_addr)))) + goto error_nobufs; + ++item; + break; + case ITEM_TCP: + if (item->type != RTE_FLOW_ITEM_TYPE_TCP) + goto trans; + mask.tcp = mlx5_nl_flow_item_mask + (item, &rte_flow_item_tcp_mask, + &mlx5_nl_flow_mask_supported.tcp, + &mlx5_nl_flow_mask_empty.tcp, + sizeof(mlx5_nl_flow_mask_supported.tcp), error); + if (!mask.tcp) + return -rte_errno; + if (!ip_proto_set && + !mnl_attr_put_u8_check(buf, size, + TCA_FLOWER_KEY_IP_PROTO, + IPPROTO_TCP)) + goto error_nobufs; + if (mask.tcp == &mlx5_nl_flow_mask_empty.tcp) { + ++item; + break; + } + spec.tcp = item->spec; + if ((mask.tcp->hdr.src_port && + mask.tcp->hdr.src_port != RTE_BE16(0xffff)) || + (mask.tcp->hdr.dst_port && + mask.tcp->hdr.dst_port != RTE_BE16(0xffff))) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, + mask.tcp, + "no support for partial masks on" + " \"hdr.src_port\" and \"hdr.dst_port\"" + " fields"); + if ((mask.tcp->hdr.src_port && + (!mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_TCP_SRC, + spec.tcp->hdr.src_port) || + !mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_TCP_SRC_MASK, + mask.tcp->hdr.src_port))) || + (mask.tcp->hdr.dst_port && + (!mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_TCP_DST, + spec.tcp->hdr.dst_port) || + !mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_TCP_DST_MASK, + mask.tcp->hdr.dst_port)))) + goto error_nobufs; + ++item; + break; + case ITEM_UDP: + if (item->type != RTE_FLOW_ITEM_TYPE_UDP) + goto trans; + mask.udp = mlx5_nl_flow_item_mask + (item, &rte_flow_item_udp_mask, + &mlx5_nl_flow_mask_supported.udp, + &mlx5_nl_flow_mask_empty.udp, + sizeof(mlx5_nl_flow_mask_supported.udp), error); + if (!mask.udp) + return -rte_errno; + if (!ip_proto_set && + !mnl_attr_put_u8_check(buf, size, + TCA_FLOWER_KEY_IP_PROTO, + IPPROTO_UDP)) + goto error_nobufs; + if (mask.udp == &mlx5_nl_flow_mask_empty.udp) { + ++item; + break; + } + spec.udp = item->spec; + if ((mask.udp->hdr.src_port && + mask.udp->hdr.src_port != RTE_BE16(0xffff)) || + (mask.udp->hdr.dst_port && + mask.udp->hdr.dst_port != RTE_BE16(0xffff))) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, + mask.udp, + "no support for partial masks on" + " \"hdr.src_port\" and \"hdr.dst_port\"" + " fields"); + if ((mask.udp->hdr.src_port && + (!mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_UDP_SRC, + spec.udp->hdr.src_port) || + !mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_UDP_SRC_MASK, + mask.udp->hdr.src_port))) || + (mask.udp->hdr.dst_port && + (!mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_UDP_DST, + spec.udp->hdr.dst_port) || + !mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_UDP_DST_MASK, + mask.udp->hdr.dst_port)))) + goto error_nobufs; + ++item; + break; + case ACTIONS: + if (item->type != RTE_FLOW_ITEM_TYPE_END) + goto trans; + assert(na_flower); + assert(!na_flower_act); + na_flower_act = + mnl_attr_nest_start_check(buf, size, TCA_FLOWER_ACT); + if (!na_flower_act) + goto error_nobufs; + act_index_cur = 1; + break; + case ACTION_VOID: + if (action->type != RTE_FLOW_ACTION_TYPE_VOID) + goto trans; + ++action; + break; + case ACTION_PORT_ID: + if (action->type != RTE_FLOW_ACTION_TYPE_PORT_ID) + goto trans; + conf.port_id = action->conf; + if (conf.port_id->original) + i = 0; + else + for (i = 0; ptoi[i].ifindex; ++i) + if (ptoi[i].port_id == conf.port_id->id) + break; + if (!ptoi[i].ifindex) + return rte_flow_error_set + (error, ENODEV, RTE_FLOW_ERROR_TYPE_ACTION_CONF, + conf.port_id, + "missing data to convert port ID to ifindex"); + act_index = + mnl_attr_nest_start_check(buf, size, act_index_cur++); + if (!act_index || + !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "mirred")) + goto error_nobufs; + act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS); + if (!act) + goto error_nobufs; + if (!mnl_attr_put_check(buf, size, TCA_MIRRED_PARMS, + sizeof(struct tc_mirred), + &(struct tc_mirred){ + .action = TC_ACT_STOLEN, + .eaction = TCA_EGRESS_REDIR, + .ifindex = ptoi[i].ifindex, + })) + goto error_nobufs; + mnl_attr_nest_end(buf, act); + mnl_attr_nest_end(buf, act_index); + ++action; + break; + case ACTION_DROP: + if (action->type != RTE_FLOW_ACTION_TYPE_DROP) + goto trans; + act_index = + mnl_attr_nest_start_check(buf, size, act_index_cur++); + if (!act_index || + !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "gact")) + goto error_nobufs; + act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS); + if (!act) + goto error_nobufs; + if (!mnl_attr_put_check(buf, size, TCA_GACT_PARMS, + sizeof(struct tc_gact), + &(struct tc_gact){ + .action = TC_ACT_SHOT, + })) + goto error_nobufs; + mnl_attr_nest_end(buf, act); + mnl_attr_nest_end(buf, act_index); + ++action; + break; + case ACTION_OF_POP_VLAN: + if (action->type != RTE_FLOW_ACTION_TYPE_OF_POP_VLAN) + goto trans; + conf.of_push_vlan = NULL; + i = TCA_VLAN_ACT_POP; + goto action_of_vlan; + case ACTION_OF_PUSH_VLAN: + if (action->type != RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) + goto trans; + conf.of_push_vlan = action->conf; + i = TCA_VLAN_ACT_PUSH; + goto action_of_vlan; + case ACTION_OF_SET_VLAN_VID: + if (action->type != RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) + goto trans; + conf.of_set_vlan_vid = action->conf; + if (na_vlan_id) + goto override_na_vlan_id; + i = TCA_VLAN_ACT_MODIFY; + goto action_of_vlan; + case ACTION_OF_SET_VLAN_PCP: + if (action->type != RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) + goto trans; + conf.of_set_vlan_pcp = action->conf; + if (na_vlan_priority) + goto override_na_vlan_priority; + i = TCA_VLAN_ACT_MODIFY; + goto action_of_vlan; +action_of_vlan: + act_index = + mnl_attr_nest_start_check(buf, size, act_index_cur++); + if (!act_index || + !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "vlan")) + goto error_nobufs; + act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS); + if (!act) + goto error_nobufs; + if (!mnl_attr_put_check(buf, size, TCA_VLAN_PARMS, + sizeof(struct tc_vlan), + &(struct tc_vlan){ + .action = TC_ACT_PIPE, + .v_action = i, + })) + goto error_nobufs; + if (i == TCA_VLAN_ACT_POP) { + mnl_attr_nest_end(buf, act); + mnl_attr_nest_end(buf, act_index); + ++action; + break; + } + if (i == TCA_VLAN_ACT_PUSH && + !mnl_attr_put_u16_check(buf, size, + TCA_VLAN_PUSH_VLAN_PROTOCOL, + conf.of_push_vlan->ethertype)) + goto error_nobufs; + na_vlan_id = mnl_nlmsg_get_payload_tail(buf); + if (!mnl_attr_put_u16_check(buf, size, TCA_VLAN_PAD, 0)) + goto error_nobufs; + na_vlan_priority = mnl_nlmsg_get_payload_tail(buf); + if (!mnl_attr_put_u8_check(buf, size, TCA_VLAN_PAD, 0)) + goto error_nobufs; + mnl_attr_nest_end(buf, act); + mnl_attr_nest_end(buf, act_index); + if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) { +override_na_vlan_id: + na_vlan_id->nla_type = TCA_VLAN_PUSH_VLAN_ID; + *(uint16_t *)mnl_attr_get_payload(na_vlan_id) = + rte_be_to_cpu_16 + (conf.of_set_vlan_vid->vlan_vid); + } else if (action->type == + RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) { +override_na_vlan_priority: + na_vlan_priority->nla_type = + TCA_VLAN_PUSH_VLAN_PRIORITY; + *(uint8_t *)mnl_attr_get_payload(na_vlan_priority) = + conf.of_set_vlan_pcp->vlan_pcp; + } + ++action; + break; + case END: + if (item->type != RTE_FLOW_ITEM_TYPE_END || + action->type != RTE_FLOW_ACTION_TYPE_END) + goto trans; + if (na_flower_act) + mnl_attr_nest_end(buf, na_flower_act); + if (na_flower) + mnl_attr_nest_end(buf, na_flower); + nlh = buf; + return nlh->nlmsg_len; + } + back = trans; + trans = mlx5_nl_flow_trans[trans[n - 1]]; + n = 0; + goto trans; +error_nobufs: + if (buf != buf_tmp) { + buf = buf_tmp; + size = sizeof(buf_tmp); + goto init; + } + return rte_flow_error_set + (error, ENOBUFS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "generated TC message is too large"); +} + +/** + * Brand rtnetlink buffer with unique handle. + * + * This handle should be unique for a given network interface to avoid + * collisions. + * + * @param buf + * Flow rule buffer previously initialized by mlx5_nl_flow_transpose(). + * @param handle + * Unique 32-bit handle to use. + */ +void +mlx5_nl_flow_brand(void *buf, uint32_t handle) +{ + struct tcmsg *tcm = mnl_nlmsg_get_payload(buf); + + tcm->tcm_handle = handle; +} + +/** + * Send Netlink message with acknowledgment. + * + * @param nl + * Libmnl socket to use. + * @param nlh + * Message to send. This function always raises the NLM_F_ACK flag before + * sending. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_nl_flow_nl_ack(struct mnl_socket *nl, struct nlmsghdr *nlh) +{ + alignas(struct nlmsghdr) + uint8_t ans[mnl_nlmsg_size(sizeof(struct nlmsgerr)) + + nlh->nlmsg_len - sizeof(*nlh)]; + uint32_t seq = random(); + int ret; + + nlh->nlmsg_flags |= NLM_F_ACK; + nlh->nlmsg_seq = seq; + ret = mnl_socket_sendto(nl, nlh, nlh->nlmsg_len); + if (ret != -1) + ret = mnl_socket_recvfrom(nl, ans, sizeof(ans)); + if (ret != -1) + ret = mnl_cb_run + (ans, ret, seq, mnl_socket_get_portid(nl), NULL, NULL); + if (!ret) + return 0; + rte_errno = errno; + return -rte_errno; +} + +/** + * Create a Netlink flow rule. + * + * @param nl + * Libmnl socket to use. + * @param buf + * Flow rule buffer previously initialized by mlx5_nl_flow_transpose(). + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_nl_flow_create(struct mnl_socket *nl, void *buf, + struct rte_flow_error *error) +{ + struct nlmsghdr *nlh = buf; + + nlh->nlmsg_type = RTM_NEWTFILTER; + nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL; + if (!mlx5_nl_flow_nl_ack(nl, nlh)) + return 0; + return rte_flow_error_set + (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "netlink: failed to create TC flow rule"); +} + +/** + * Destroy a Netlink flow rule. + * + * @param nl + * Libmnl socket to use. + * @param buf + * Flow rule buffer previously initialized by mlx5_nl_flow_transpose(). + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_nl_flow_destroy(struct mnl_socket *nl, void *buf, + struct rte_flow_error *error) +{ + struct nlmsghdr *nlh = buf; + + nlh->nlmsg_type = RTM_DELTFILTER; + nlh->nlmsg_flags = NLM_F_REQUEST; + if (!mlx5_nl_flow_nl_ack(nl, nlh)) + return 0; + return rte_flow_error_set + (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "netlink: failed to destroy TC flow rule"); +} + +/** + * Initialize ingress qdisc of a given network interface. + * + * @param nl + * Libmnl socket of the @p NETLINK_ROUTE kind. + * @param ifindex + * Index of network interface to initialize. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_nl_flow_init(struct mnl_socket *nl, unsigned int ifindex, + struct rte_flow_error *error) +{ + struct nlmsghdr *nlh; + struct tcmsg *tcm; + alignas(struct nlmsghdr) + uint8_t buf[mnl_nlmsg_size(sizeof(*tcm) + 128)]; + + /* Destroy existing ingress qdisc and everything attached to it. */ + nlh = mnl_nlmsg_put_header(buf); + nlh->nlmsg_type = RTM_DELQDISC; + nlh->nlmsg_flags = NLM_F_REQUEST; + tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm)); + tcm->tcm_family = AF_UNSPEC; + tcm->tcm_ifindex = ifindex; + tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0); + tcm->tcm_parent = TC_H_INGRESS; + /* Ignore errors when qdisc is already absent. */ + if (mlx5_nl_flow_nl_ack(nl, nlh) && + rte_errno != EINVAL && rte_errno != ENOENT) + return rte_flow_error_set + (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "netlink: failed to remove ingress qdisc"); + /* Create fresh ingress qdisc. */ + nlh = mnl_nlmsg_put_header(buf); + nlh->nlmsg_type = RTM_NEWQDISC; + nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL; + tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm)); + tcm->tcm_family = AF_UNSPEC; + tcm->tcm_ifindex = ifindex; + tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0); + tcm->tcm_parent = TC_H_INGRESS; + mnl_attr_put_strz_check(nlh, sizeof(buf), TCA_KIND, "ingress"); + if (mlx5_nl_flow_nl_ack(nl, nlh)) + return rte_flow_error_set + (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "netlink: failed to create ingress qdisc"); + return 0; +} + +/** + * Create and configure a libmnl socket for Netlink flow rules. + * + * @return + * A valid libmnl socket object pointer on success, NULL otherwise and + * rte_errno is set. + */ +struct mnl_socket * +mlx5_nl_flow_socket_create(void) +{ + struct mnl_socket *nl = mnl_socket_open(NETLINK_ROUTE); + + if (nl) { + mnl_socket_setsockopt(nl, NETLINK_CAP_ACK, &(int){ 1 }, + sizeof(int)); + if (!mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID)) + return nl; + } + rte_errno = errno; + if (nl) + mnl_socket_close(nl); + return NULL; +} + +/** + * Destroy a libmnl socket. + */ +void +mlx5_nl_flow_socket_destroy(struct mnl_socket *nl) +{ + mnl_socket_close(nl); +} diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h index 9eb9c15e..0870d32f 100644 --- a/drivers/net/mlx5/mlx5_prm.h +++ b/drivers/net/mlx5/mlx5_prm.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2016 6WIND S.A. - * Copyright 2016 Mellanox. + * Copyright 2016 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX5_PRM_H_ @@ -21,6 +21,9 @@ #include #include "mlx5_autoconf.h" +/* RSS hash key size. */ +#define MLX5_RSS_HASH_KEY_LEN 40 + /* Get CQE owner bit. */ #define MLX5_CQE_OWNER(op_own) ((op_own) & MLX5_CQE_OWNER_MASK) @@ -107,6 +110,30 @@ /* Inner L4 checksum offload (Tunneled packets only). */ #define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5) +/* Outer L4 type is TCP. */ +#define MLX5_ETH_WQE_L4_OUTER_TCP (0u << 5) + +/* Outer L4 type is UDP. */ +#define MLX5_ETH_WQE_L4_OUTER_UDP (1u << 5) + +/* Outer L3 type is IPV4. */ +#define MLX5_ETH_WQE_L3_OUTER_IPV4 (0u << 4) + +/* Outer L3 type is IPV6. */ +#define MLX5_ETH_WQE_L3_OUTER_IPV6 (1u << 4) + +/* Inner L4 type is TCP. */ +#define MLX5_ETH_WQE_L4_INNER_TCP (0u << 1) + +/* Inner L4 type is UDP. */ +#define MLX5_ETH_WQE_L4_INNER_UDP (1u << 1) + +/* Inner L3 type is IPV4. */ +#define MLX5_ETH_WQE_L3_INNER_IPV4 (0u << 0) + +/* Inner L3 type is IPV6. */ +#define MLX5_ETH_WQE_L3_INNER_IPV6 (1u << 0) + /* Is flow mark valid. */ #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN #define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00) @@ -195,13 +222,30 @@ struct mlx5_mpw { } data; }; +/* WQE for Multi-Packet RQ. */ +struct mlx5_wqe_mprq { + struct mlx5_wqe_srq_next_seg next_seg; + struct mlx5_wqe_data_seg dseg; +}; + +#define MLX5_MPRQ_LEN_MASK 0x000ffff +#define MLX5_MPRQ_LEN_SHIFT 0 +#define MLX5_MPRQ_STRIDE_NUM_MASK 0x3fff0000 +#define MLX5_MPRQ_STRIDE_NUM_SHIFT 16 +#define MLX5_MPRQ_FILLER_MASK 0x80000000 +#define MLX5_MPRQ_FILLER_SHIFT 31 + +#define MLX5_MPRQ_STRIDE_SHIFT_BYTE 2 + /* CQ element structure - should be equal to the cache line size */ struct mlx5_cqe { #if (RTE_CACHE_LINE_SIZE == 128) uint8_t padding[64]; #endif uint8_t pkt_info; - uint8_t rsvd0[11]; + uint8_t rsvd0; + uint16_t wqe_id; + uint8_t rsvd3[8]; uint32_t rx_hash_res; uint8_t rx_hash_type; uint8_t rsvd1[11]; @@ -246,7 +290,10 @@ struct mlx5_cqe { struct mlx5_mini_cqe8 { union { uint32_t rx_hash_result; - uint32_t checksum; + struct { + uint16_t checksum; + uint16_t stride_idx; + }; struct { uint16_t wqe_counter; uint8_t s_wqe_opcode; diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c index d06b0bee..b95778a8 100644 --- a/drivers/net/mlx5/mlx5_rss.c +++ b/drivers/net/mlx5/mlx5_rss.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -35,35 +35,49 @@ * RSS configuration data. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { struct priv *priv = dev->data->dev_private; - int ret = 0; + unsigned int i; + unsigned int idx; - priv_lock(priv); if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) { - ret = -EINVAL; - goto out; + rte_errno = EINVAL; + return -rte_errno; } if (rss_conf->rss_key && rss_conf->rss_key_len) { + if (rss_conf->rss_key_len != MLX5_RSS_HASH_KEY_LEN) { + DRV_LOG(ERR, + "port %u RSS key len must be %s Bytes long", + dev->data->port_id, + RTE_STR(MLX5_RSS_HASH_KEY_LEN)); + rte_errno = EINVAL; + return -rte_errno; + } priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key, rss_conf->rss_key_len, 0); if (!priv->rss_conf.rss_key) { - ret = -ENOMEM; - goto out; + rte_errno = ENOMEM; + return -rte_errno; } memcpy(priv->rss_conf.rss_key, rss_conf->rss_key, rss_conf->rss_key_len); priv->rss_conf.rss_key_len = rss_conf->rss_key_len; } priv->rss_conf.rss_hf = rss_conf->rss_hf; -out: - priv_unlock(priv); - return ret; + /* Enable the RSS hash in all Rx queues. */ + for (i = 0, idx = 0; idx != priv->rxqs_n; ++i) { + if (!(*priv->rxqs)[i]) + continue; + (*priv->rxqs)[i]->rss_hash = !!rss_conf->rss_hf && + !!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS); + ++idx; + } + return 0; } /** @@ -75,7 +89,7 @@ out: * RSS configuration data. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, @@ -83,9 +97,10 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, { struct priv *priv = dev->data->dev_private; - if (!rss_conf) - return -EINVAL; - priv_lock(priv); + if (!rss_conf) { + rte_errno = EINVAL; + return -rte_errno; + } if (rss_conf->rss_key && (rss_conf->rss_key_len >= priv->rss_conf.rss_key_len)) { memcpy(rss_conf->rss_key, priv->rss_conf.rss_key, @@ -93,24 +108,24 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, } rss_conf->rss_key_len = priv->rss_conf.rss_key_len; rss_conf->rss_hf = priv->rss_conf.rss_hf; - priv_unlock(priv); return 0; } /** * Allocate/reallocate RETA index table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @praram reta_size * The size of the array to allocate. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size) +mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size) { + struct priv *priv = dev->data->dev_private; void *mem; unsigned int old_size = priv->reta_idx_n; @@ -119,11 +134,12 @@ priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size) mem = rte_realloc(priv->reta_idx, reta_size * sizeof((*priv->reta_idx)[0]), 0); - if (!mem) - return ENOMEM; + if (!mem) { + rte_errno = ENOMEM; + return -rte_errno; + } priv->reta_idx = mem; priv->reta_idx_n = reta_size; - if (old_size < reta_size) memset(&(*priv->reta_idx)[old_size], 0, (reta_size - old_size) * @@ -132,28 +148,31 @@ priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size) } /** - * Query RETA table. + * DPDK callback to get the RETA indirection table. * - * @param priv - * Pointer to private structure. - * @param[in, out] reta_conf - * Pointer to the first RETA configuration structure. + * @param dev + * Pointer to Ethernet device structure. + * @param reta_conf + * Pointer to RETA configuration structure array. * @param reta_size - * Number of entries. + * Size of the RETA table. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -priv_dev_rss_reta_query(struct priv *priv, +int +mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, - unsigned int reta_size) + uint16_t reta_size) { + struct priv *priv = dev->data->dev_private; unsigned int idx; unsigned int i; - if (!reta_size || reta_size > priv->reta_idx_n) - return EINVAL; + if (!reta_size || reta_size > priv->reta_idx_n) { + rte_errno = EINVAL; + return -rte_errno; + } /* Fill each entry of the table even if its bit is not set. */ for (idx = 0, i = 0; (i != reta_size); ++i) { idx = i / RTE_RETA_GROUP_SIZE; @@ -164,34 +183,36 @@ priv_dev_rss_reta_query(struct priv *priv, } /** - * Update RETA table. + * DPDK callback to update the RETA indirection table. * - * @param priv - * Pointer to private structure. - * @param[in] reta_conf - * Pointer to the first RETA configuration structure. + * @param dev + * Pointer to Ethernet device structure. + * @param reta_conf + * Pointer to RETA configuration structure array. * @param reta_size - * Number of entries. + * Size of the RETA table. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -priv_dev_rss_reta_update(struct priv *priv, +int +mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, - unsigned int reta_size) + uint16_t reta_size) { + int ret; + struct priv *priv = dev->data->dev_private; unsigned int idx; unsigned int i; unsigned int pos; - int ret; - if (!reta_size) - return EINVAL; - ret = priv_rss_reta_index_resize(priv, reta_size); + if (!reta_size) { + rte_errno = EINVAL; + return -rte_errno; + } + ret = mlx5_rss_reta_index_resize(dev, reta_size); if (ret) return ret; - for (idx = 0, i = 0; (i != reta_size); ++i) { idx = i / RTE_RETA_GROUP_SIZE; pos = i % RTE_RETA_GROUP_SIZE; @@ -200,63 +221,9 @@ priv_dev_rss_reta_update(struct priv *priv, assert(reta_conf[idx].reta[pos] < priv->rxqs_n); (*priv->reta_idx)[i] = reta_conf[idx].reta[pos]; } - return 0; -} - -/** - * DPDK callback to get the RETA indirection table. - * - * @param dev - * Pointer to Ethernet device structure. - * @param reta_conf - * Pointer to RETA configuration structure array. - * @param reta_size - * Size of the RETA table. - * - * @return - * 0 on success, negative errno value on failure. - */ -int -mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size) -{ - int ret; - struct priv *priv = dev->data->dev_private; - - priv_lock(priv); - ret = priv_dev_rss_reta_query(priv, reta_conf, reta_size); - priv_unlock(priv); - return -ret; -} - -/** - * DPDK callback to update the RETA indirection table. - * - * @param dev - * Pointer to Ethernet device structure. - * @param reta_conf - * Pointer to RETA configuration structure array. - * @param reta_size - * Size of the RETA table. - * - * @return - * 0 on success, negative errno value on failure. - */ -int -mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size) -{ - int ret; - struct priv *priv = dev->data->dev_private; - - priv_lock(priv); - ret = priv_dev_rss_reta_update(priv, reta_conf, reta_size); - priv_unlock(priv); if (dev->data->dev_started) { mlx5_dev_stop(dev); - mlx5_dev_start(dev); + return mlx5_dev_start(dev); } - return -ret; + return 0; } diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c index 4ffc869a..e74fdef8 100644 --- a/drivers/net/mlx5/mlx5_rxmode.c +++ b/drivers/net/mlx5/mlx5_rxmode.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -32,8 +32,23 @@ void mlx5_promiscuous_enable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; + int ret; + dev->data->promiscuous = 1; - mlx5_traffic_restart(dev); + if (priv->isolated) { + DRV_LOG(WARNING, + "port %u cannot enable promiscuous mode" + " in flow isolation mode", + dev->data->port_id); + return; + } + if (priv->config.vf) + mlx5_nl_promisc(dev, 1); + ret = mlx5_traffic_restart(dev); + if (ret) + DRV_LOG(ERR, "port %u cannot enable promiscuous mode: %s", + dev->data->port_id, strerror(rte_errno)); } /** @@ -45,8 +60,16 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev) void mlx5_promiscuous_disable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; + int ret; + dev->data->promiscuous = 0; - mlx5_traffic_restart(dev); + if (priv->config.vf) + mlx5_nl_promisc(dev, 0); + ret = mlx5_traffic_restart(dev); + if (ret) + DRV_LOG(ERR, "port %u cannot disable promiscuous mode: %s", + dev->data->port_id, strerror(rte_errno)); } /** @@ -58,8 +81,23 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev) void mlx5_allmulticast_enable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; + int ret; + dev->data->all_multicast = 1; - mlx5_traffic_restart(dev); + if (priv->isolated) { + DRV_LOG(WARNING, + "port %u cannot enable allmulticast mode" + " in flow isolation mode", + dev->data->port_id); + return; + } + if (priv->config.vf) + mlx5_nl_allmulti(dev, 1); + ret = mlx5_traffic_restart(dev); + if (ret) + DRV_LOG(ERR, "port %u cannot enable allmulicast mode: %s", + dev->data->port_id, strerror(rte_errno)); } /** @@ -71,6 +109,14 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev) void mlx5_allmulticast_disable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; + int ret; + dev->data->all_multicast = 0; - mlx5_traffic_restart(dev); + if (priv->config.vf) + mlx5_nl_allmulti(dev, 0); + ret = mlx5_traffic_restart(dev); + if (ret) + DRV_LOG(ERR, "port %u cannot disable allmulicast mode: %s", + dev->data->port_id, strerror(rte_errno)); } diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index ff58c492..1f7bfd44 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -52,10 +52,129 @@ uint8_t rss_hash_default_key[] = { }; /* Length of the default RSS hash key. */ -const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key); +static_assert(MLX5_RSS_HASH_KEY_LEN == + (unsigned int)sizeof(rss_hash_default_key), + "wrong RSS default key size."); /** - * Allocate RX queue elements. + * Check whether Multi-Packet RQ can be enabled for the device. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 1 if supported, negative errno value if not. + */ +inline int +mlx5_check_mprq_support(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + + if (priv->config.mprq.enabled && + priv->rxqs_n >= priv->config.mprq.min_rxqs_num) + return 1; + return -ENOTSUP; +} + +/** + * Check whether Multi-Packet RQ is enabled for the Rx queue. + * + * @param rxq + * Pointer to receive queue structure. + * + * @return + * 0 if disabled, otherwise enabled. + */ +inline int +mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq) +{ + return rxq->strd_num_n > 0; +} + +/** + * Check whether Multi-Packet RQ is enabled for the device. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 if disabled, otherwise enabled. + */ +inline int +mlx5_mprq_enabled(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + uint16_t i; + uint16_t n = 0; + + if (mlx5_check_mprq_support(dev) < 0) + return 0; + /* All the configured queues should be enabled. */ + for (i = 0; i < priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + + if (!rxq) + continue; + if (mlx5_rxq_mprq_enabled(rxq)) + ++n; + } + /* Multi-Packet RQ can't be partially configured. */ + assert(n == 0 || n == priv->rxqs_n); + return n == priv->rxqs_n; +} + +/** + * Allocate RX queue elements for Multi-Packet RQ. + * + * @param rxq_ctrl + * Pointer to RX queue structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; + unsigned int wqe_n = 1 << rxq->elts_n; + unsigned int i; + int err; + + /* Iterate on segments. */ + for (i = 0; i <= wqe_n; ++i) { + struct mlx5_mprq_buf *buf; + + if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) { + DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id); + rte_errno = ENOMEM; + goto error; + } + if (i < wqe_n) + (*rxq->mprq_bufs)[i] = buf; + else + rxq->mprq_repl = buf; + } + DRV_LOG(DEBUG, + "port %u Rx queue %u allocated and configured %u segments", + rxq->port_id, rxq_ctrl->idx, wqe_n); + return 0; +error: + err = rte_errno; /* Save rte_errno before cleanup. */ + wqe_n = i; + for (i = 0; (i != wqe_n); ++i) { + if ((*rxq->mprq_bufs)[i] != NULL) + rte_mempool_put(rxq->mprq_mp, + (*rxq->mprq_bufs)[i]); + (*rxq->mprq_bufs)[i] = NULL; + } + DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything", + rxq->port_id, rxq_ctrl->idx); + rte_errno = err; /* Restore rte_errno. */ + return -rte_errno; +} + +/** + * Allocate RX queue elements for Single-Packet RQ. * * @param rxq_ctrl * Pointer to RX queue structure. @@ -63,13 +182,13 @@ const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key); * @return * 0 on success, errno value on failure. */ -int -rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) +static int +rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) { const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n; unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n; unsigned int i; - int ret = 0; + int err; /* Iterate on segments. */ for (i = 0; (i != elts_n); ++i) { @@ -77,8 +196,9 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp); if (buf == NULL) { - ERROR("%p: empty mbuf pool", (void *)rxq_ctrl); - ret = ENOMEM; + DRV_LOG(ERR, "port %u empty mbuf pool", + PORT_ID(rxq_ctrl->priv)); + rte_errno = ENOMEM; goto error; } /* Headroom is reserved by rte_pktmbuf_alloc(). */ @@ -97,7 +217,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) (*rxq_ctrl->rxq.elts)[i] = buf; } /* If Rx vector is activated. */ - if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) { + if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) { struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; struct rte_mbuf *mbuf_init = &rxq->fake_mbuf; int j; @@ -118,30 +238,78 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j) (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf; } - DEBUG("%p: allocated and configured %u segments (max %u packets)", - (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n)); - assert(ret == 0); + DRV_LOG(DEBUG, + "port %u Rx queue %u allocated and configured %u segments" + " (max %u packets)", + PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n, + elts_n / (1 << rxq_ctrl->rxq.sges_n)); return 0; error: + err = rte_errno; /* Save rte_errno before cleanup. */ elts_n = i; for (i = 0; (i != elts_n); ++i) { if ((*rxq_ctrl->rxq.elts)[i] != NULL) rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]); (*rxq_ctrl->rxq.elts)[i] = NULL; } - DEBUG("%p: failed, freed everything", (void *)rxq_ctrl); - assert(ret > 0); - return ret; + DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything", + PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx); + rte_errno = err; /* Restore rte_errno. */ + return -rte_errno; } /** - * Free RX queue elements. + * Allocate RX queue elements. + * + * @param rxq_ctrl + * Pointer to RX queue structure. + * + * @return + * 0 on success, errno value on failure. + */ +int +rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? + rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl); +} + +/** + * Free RX queue elements for Multi-Packet RQ. * * @param rxq_ctrl * Pointer to RX queue structure. */ static void -rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) +rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; + uint16_t i; + + DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs", + rxq->port_id, rxq_ctrl->idx); + if (rxq->mprq_bufs == NULL) + return; + assert(mlx5_rxq_check_vec_support(rxq) < 0); + for (i = 0; (i != (1u << rxq->elts_n)); ++i) { + if ((*rxq->mprq_bufs)[i] != NULL) + mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]); + (*rxq->mprq_bufs)[i] = NULL; + } + if (rxq->mprq_repl != NULL) { + mlx5_mprq_buf_free(rxq->mprq_repl); + rxq->mprq_repl = NULL; + } +} + +/** + * Free RX queue elements for Single-Packet RQ. + * + * @param rxq_ctrl + * Pointer to RX queue structure. + */ +static void +rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) { struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; const uint16_t q_n = (1 << rxq->elts_n); @@ -149,14 +317,15 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi); uint16_t i; - DEBUG("%p: freeing WRs", (void *)rxq_ctrl); + DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs", + PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx); if (rxq->elts == NULL) return; /** * Some mbuf in the Ring belongs to the application. They cannot be * freed. */ - if (rxq_check_vec_support(rxq) > 0) { + if (mlx5_rxq_check_vec_support(rxq) > 0) { for (i = 0; i < used; ++i) (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL; rxq->rq_pi = rxq->rq_ci; @@ -168,6 +337,21 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) } } +/** + * Free RX queue elements. + * + * @param rxq_ctrl + * Pointer to RX queue structure. + */ +static void +rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) + rxq_free_elts_mprq(rxq_ctrl); + else + rxq_free_elts_sprq(rxq_ctrl); +} + /** * Clean up a RX queue. * @@ -179,31 +363,35 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl) { - DEBUG("cleaning up %p", (void *)rxq_ctrl); + DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u", + PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx); if (rxq_ctrl->ibv) - mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv); + mlx5_rxq_ibv_release(rxq_ctrl->ibv); memset(rxq_ctrl, 0, sizeof(*rxq_ctrl)); } /** * Returns the per-queue supported offloads. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * Supported Rx offloads. */ uint64_t -mlx5_priv_get_rx_queue_offloads(struct priv *priv) +mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_dev_config *config = &priv->config; uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER | DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_JUMBO_FRAME); + offloads |= DEV_RX_OFFLOAD_CRC_STRIP; if (config->hw_fcs_strip) - offloads |= DEV_RX_OFFLOAD_CRC_STRIP; + offloads |= DEV_RX_OFFLOAD_KEEP_CRC; + if (config->hw_csum) offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | @@ -217,46 +405,17 @@ mlx5_priv_get_rx_queue_offloads(struct priv *priv) /** * Returns the per-port supported offloads. * - * @param priv - * Pointer to private structure. * @return * Supported Rx offloads. */ uint64_t -mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused) +mlx5_get_rx_port_offloads(void) { uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER; return offloads; } -/** - * Checks if the per-queue offload configuration is valid. - * - * @param priv - * Pointer to private structure. - * @param offloads - * Per-queue offloads configuration. - * - * @return - * 1 if the configuration is valid, 0 otherwise. - */ -static int -priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads) -{ - uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads; - uint64_t queue_supp_offloads = - mlx5_priv_get_rx_queue_offloads(priv); - uint64_t port_supp_offloads = mlx5_priv_get_rx_port_offloads(priv); - - if ((offloads & (queue_supp_offloads | port_supp_offloads)) != - offloads) - return 0; - if (((port_offloads ^ offloads) & port_supp_offloads)) - return 0; - return 1; -} - /** * * @param dev @@ -273,7 +432,7 @@ priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads) * Memory pool for buffer allocations. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, @@ -284,53 +443,40 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); - int ret = 0; - priv_lock(priv); if (!rte_is_power_of_2(desc)) { desc = 1 << log2above(desc); - WARN("%p: increased number of descriptors in RX queue %u" - " to the next power of two (%d)", - (void *)dev, idx, desc); + DRV_LOG(WARNING, + "port %u increased number of descriptors in Rx queue %u" + " to the next power of two (%d)", + dev->data->port_id, idx, desc); } - DEBUG("%p: configuring queue %u for %u descriptors", - (void *)dev, idx, desc); + DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors", + dev->data->port_id, idx, desc); if (idx >= priv->rxqs_n) { - ERROR("%p: queue index out of range (%u >= %u)", - (void *)dev, idx, priv->rxqs_n); - priv_unlock(priv); - return -EOVERFLOW; - } - if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) { - ret = ENOTSUP; - ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port " - "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64, - (void *)dev, conf->offloads, - dev->data->dev_conf.rxmode.offloads, - (mlx5_priv_get_rx_port_offloads(priv) | - mlx5_priv_get_rx_queue_offloads(priv))); - goto out; - } - if (!mlx5_priv_rxq_releasable(priv, idx)) { - ret = EBUSY; - ERROR("%p: unable to release queue index %u", - (void *)dev, idx); - goto out; - } - mlx5_priv_rxq_release(priv, idx); - rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, conf, mp); + DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)", + dev->data->port_id, idx, priv->rxqs_n); + rte_errno = EOVERFLOW; + return -rte_errno; + } + if (!mlx5_rxq_releasable(dev, idx)) { + DRV_LOG(ERR, "port %u unable to release queue index %u", + dev->data->port_id, idx); + rte_errno = EBUSY; + return -rte_errno; + } + mlx5_rxq_release(dev, idx); + rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp); if (!rxq_ctrl) { - ERROR("%p: unable to allocate queue index %u", - (void *)dev, idx); - ret = ENOMEM; - goto out; + DRV_LOG(ERR, "port %u unable to allocate queue index %u", + dev->data->port_id, idx); + rte_errno = ENOMEM; + return -rte_errno; } - DEBUG("%p: adding RX queue %p to list", - (void *)dev, (void *)rxq_ctrl); + DRV_LOG(DEBUG, "port %u adding Rx queue %u to list", + dev->data->port_id, idx); (*priv->rxqs)[idx] = &rxq_ctrl->rxq; -out: - priv_unlock(priv); - return -ret; + return 0; } /** @@ -350,45 +496,48 @@ mlx5_rx_queue_release(void *dpdk_rxq) return; rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); priv = rxq_ctrl->priv; - priv_lock(priv); - if (!mlx5_priv_rxq_releasable(priv, rxq_ctrl->rxq.stats.idx)) - rte_panic("Rx queue %p is still used by a flow and cannot be" - " removed\n", (void *)rxq_ctrl); - mlx5_priv_rxq_release(priv, rxq_ctrl->rxq.stats.idx); - priv_unlock(priv); + if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx)) + rte_panic("port %u Rx queue %u is still used by a flow and" + " cannot be removed\n", + PORT_ID(priv), rxq_ctrl->idx); + mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx); } /** * Allocate queue vector and fill epoll fd list for Rx interrupts. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return - * 0 on success, negative on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_rx_intr_vec_enable(struct priv *priv) +mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; unsigned int rxqs_n = priv->rxqs_n; unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); unsigned int count = 0; - struct rte_intr_handle *intr_handle = priv->dev->intr_handle; + struct rte_intr_handle *intr_handle = dev->intr_handle; - if (!priv->dev->data->dev_conf.intr_conf.rxq) + if (!dev->data->dev_conf.intr_conf.rxq) return 0; - priv_rx_intr_vec_disable(priv); + mlx5_rx_intr_vec_disable(dev); intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0])); if (intr_handle->intr_vec == NULL) { - ERROR("failed to allocate memory for interrupt vector," - " Rx interrupts will not be supported"); - return -ENOMEM; + DRV_LOG(ERR, + "port %u failed to allocate memory for interrupt" + " vector, Rx interrupts will not be supported", + dev->data->port_id); + rte_errno = ENOMEM; + return -rte_errno; } intr_handle->type = RTE_INTR_HANDLE_EXT; for (i = 0; i != n; ++i) { /* This rxq ibv must not be released in this function. */ - struct mlx5_rxq_ibv *rxq_ibv = mlx5_priv_rxq_ibv_get(priv, i); + struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i); int fd; int flags; int rc; @@ -402,27 +551,34 @@ priv_rx_intr_vec_enable(struct priv *priv) continue; } if (count >= RTE_MAX_RXTX_INTR_VEC_ID) { - ERROR("too many Rx queues for interrupt vector size" - " (%d), Rx interrupts cannot be enabled", - RTE_MAX_RXTX_INTR_VEC_ID); - priv_rx_intr_vec_disable(priv); - return -1; + DRV_LOG(ERR, + "port %u too many Rx queues for interrupt" + " vector size (%d), Rx interrupts cannot be" + " enabled", + dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID); + mlx5_rx_intr_vec_disable(dev); + rte_errno = ENOMEM; + return -rte_errno; } fd = rxq_ibv->channel->fd; flags = fcntl(fd, F_GETFL); rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK); if (rc < 0) { - ERROR("failed to make Rx interrupt file descriptor" - " %d non-blocking for queue index %d", fd, i); - priv_rx_intr_vec_disable(priv); - return -1; + rte_errno = errno; + DRV_LOG(ERR, + "port %u failed to make Rx interrupt file" + " descriptor %d non-blocking for queue index" + " %d", + dev->data->port_id, fd, i); + mlx5_rx_intr_vec_disable(dev); + return -rte_errno; } intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count; intr_handle->efds[count] = fd; count++; } if (!count) - priv_rx_intr_vec_disable(priv); + mlx5_rx_intr_vec_disable(dev); else intr_handle->nb_efd = count; return 0; @@ -431,18 +587,19 @@ priv_rx_intr_vec_enable(struct priv *priv) /** * Clean up Rx interrupts handler. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ void -priv_rx_intr_vec_disable(struct priv *priv) +mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev) { - struct rte_intr_handle *intr_handle = priv->dev->intr_handle; + struct priv *priv = dev->data->dev_private; + struct rte_intr_handle *intr_handle = dev->intr_handle; unsigned int i; unsigned int rxqs_n = priv->rxqs_n; unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); - if (!priv->dev->data->dev_conf.intr_conf.rxq) + if (!dev->data->dev_conf.intr_conf.rxq) return; if (!intr_handle->intr_vec) goto free; @@ -459,7 +616,7 @@ priv_rx_intr_vec_disable(struct priv *priv) */ rxq_data = (*priv->rxqs)[i]; rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv); + mlx5_rxq_ibv_release(rxq_ctrl->ibv); } free: rte_intr_free_epoll_fd(intr_handle); @@ -490,7 +647,8 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) doorbell = (uint64_t)doorbell_hi << 32; doorbell |= rxq->cqn; rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi); - rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg); + mlx5_uar_write64(rte_cpu_to_be_64(doorbell), + cq_db_reg, rxq->uar_lock_cq); } /** @@ -502,7 +660,7 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) * Rx queue number. * * @return - * 0 on success, negative on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) @@ -510,31 +668,25 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data; struct mlx5_rxq_ctrl *rxq_ctrl; - int ret = 0; - priv_lock(priv); rxq_data = (*priv->rxqs)[rx_queue_id]; if (!rxq_data) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (rxq_ctrl->irq) { struct mlx5_rxq_ibv *rxq_ibv; - rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id); + rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id); if (!rxq_ibv) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn); - mlx5_priv_rxq_ibv_release(priv, rxq_ibv); + mlx5_rxq_ibv_release(rxq_ibv); } -exit: - priv_unlock(priv); - if (ret) - WARN("unable to arm interrupt on rx queue %d", rx_queue_id); - return -ret; + return 0; } /** @@ -546,7 +698,7 @@ exit: * Rx queue number. * * @return - * 0 on success, negative on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) @@ -557,53 +709,54 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct mlx5_rxq_ibv *rxq_ibv = NULL; struct ibv_cq *ev_cq; void *ev_ctx; - int ret = 0; + int ret; - priv_lock(priv); rxq_data = (*priv->rxqs)[rx_queue_id]; if (!rxq_data) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (!rxq_ctrl->irq) - goto exit; - rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id); + return 0; + rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id); if (!rxq_ibv) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx); if (ret || ev_cq != rxq_ibv->cq) { - ret = EINVAL; + rte_errno = EINVAL; goto exit; } rxq_data->cq_arm_sn++; mlx5_glue->ack_cq_events(rxq_ibv->cq, 1); + return 0; exit: + ret = rte_errno; /* Save rte_errno before cleanup. */ if (rxq_ibv) - mlx5_priv_rxq_ibv_release(priv, rxq_ibv); - priv_unlock(priv); - if (ret) - WARN("unable to disable interrupt on rx queue %d", - rx_queue_id); - return -ret; + mlx5_rxq_ibv_release(rxq_ibv); + DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d", + dev->data->port_id, rx_queue_id); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** * Create the Rx queue Verbs object. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * Queue index in DPDK Rx queue array * * @return - * The Verbs object initialised if it can be created. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ -struct mlx5_rxq_ibv* -mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) +struct mlx5_rxq_ibv * +mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); @@ -613,10 +766,16 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) struct ibv_cq_init_attr_ex ibv; struct mlx5dv_cq_init_attr mlx5; } cq; - struct ibv_wq_init_attr wq; + struct { + struct ibv_wq_init_attr ibv; +#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + struct mlx5dv_wq_init_attr mlx5; +#endif + } wq; struct ibv_cq_ex cq_attr; } attr; - unsigned int cqe_n = (1 << rxq_data->elts_n) - 1; + unsigned int cqe_n; + unsigned int wqe_n = 1 << rxq_data->elts_n; struct mlx5_rxq_ibv *tmpl; struct mlx5dv_cq cq_info; struct mlx5dv_rwq rwq; @@ -624,6 +783,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) int ret = 0; struct mlx5dv_obj obj; struct mlx5_dev_config *config = &priv->config; + const int mprq_en = mlx5_rxq_mprq_enabled(rxq_data); assert(rxq_data); assert(!rxq_ctrl->ibv); @@ -632,28 +792,26 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0, rxq_ctrl->socket); if (!tmpl) { - ERROR("%p: cannot allocate verbs resources", - (void *)rxq_ctrl); + DRV_LOG(ERR, + "port %u Rx queue %u cannot allocate verbs resources", + dev->data->port_id, rxq_ctrl->idx); + rte_errno = ENOMEM; goto error; } tmpl->rxq_ctrl = rxq_ctrl; - /* Use the entire RX mempool as the memory region. */ - tmpl->mr = priv_mr_get(priv, rxq_data->mp); - if (!tmpl->mr) { - tmpl->mr = priv_mr_new(priv, rxq_data->mp); - if (!tmpl->mr) { - ERROR("%p: MR creation failure", (void *)rxq_ctrl); - goto error; - } - } if (rxq_ctrl->irq) { tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx); if (!tmpl->channel) { - ERROR("%p: Comp Channel creation failure", - (void *)rxq_ctrl); + DRV_LOG(ERR, "port %u: comp channel creation failure", + dev->data->port_id); + rte_errno = ENOMEM; goto error; } } + if (mprq_en) + cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1; + else + cqe_n = wqe_n - 1; attr.cq.ibv = (struct ibv_cq_init_attr_ex){ .cqe = cqe_n, .channel = tmpl->channel, @@ -665,32 +823,43 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) if (config->cqe_comp && !rxq_data->hw_timestamp) { attr.cq.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; +#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + attr.cq.mlx5.cqe_comp_res_format = + mprq_en ? MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX : + MLX5DV_CQE_RES_FORMAT_HASH; +#else attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH; +#endif /* * For vectorized Rx, it must not be doubled in order to * make cq_ci and rq_ci aligned. */ - if (rxq_check_vec_support(rxq_data) < 0) + if (mlx5_rxq_check_vec_support(rxq_data) < 0) attr.cq.ibv.cqe *= 2; } else if (config->cqe_comp && rxq_data->hw_timestamp) { - DEBUG("Rx CQE compression is disabled for HW timestamp"); + DRV_LOG(DEBUG, + "port %u Rx CQE compression is disabled for HW" + " timestamp", + dev->data->port_id); } tmpl->cq = mlx5_glue->cq_ex_to_cq (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv, &attr.cq.mlx5)); if (tmpl->cq == NULL) { - ERROR("%p: CQ creation failure", (void *)rxq_ctrl); + DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure", + dev->data->port_id, idx); + rte_errno = ENOMEM; goto error; } - DEBUG("priv->device_attr.max_qp_wr is %d", - priv->device_attr.orig_attr.max_qp_wr); - DEBUG("priv->device_attr.max_sge is %d", - priv->device_attr.orig_attr.max_sge); - attr.wq = (struct ibv_wq_init_attr){ + DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d", + dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr); + DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d", + dev->data->port_id, priv->device_attr.orig_attr.max_sge); + attr.wq.ibv = (struct ibv_wq_init_attr){ .wq_context = NULL, /* Could be useful in the future. */ .wq_type = IBV_WQT_RQ, /* Max number of outstanding WRs. */ - .max_wr = (1 << rxq_data->elts_n) >> rxq_data->sges_n, + .max_wr = wqe_n >> rxq_data->sges_n, /* Max number of scatter/gather elements in a WR. */ .max_sge = 1 << rxq_data->sges_n, .pd = priv->pd, @@ -704,32 +873,54 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) }; /* By default, FCS (CRC) is stripped by hardware. */ if (rxq_data->crc_present) { - attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS; - attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; + attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS; + attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; } #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING if (config->hw_padding) { - attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING; - attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; + attr.wq.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING; + attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; + } +#endif +#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + attr.wq.mlx5 = (struct mlx5dv_wq_init_attr){ + .comp_mask = 0, + }; + if (mprq_en) { + struct mlx5dv_striding_rq_init_attr *mprq_attr = + &attr.wq.mlx5.striding_rq_attrs; + + attr.wq.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ; + *mprq_attr = (struct mlx5dv_striding_rq_init_attr){ + .single_stride_log_num_of_bytes = rxq_data->strd_sz_n, + .single_wqe_log_num_of_strides = rxq_data->strd_num_n, + .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT, + }; } + tmpl->wq = mlx5_glue->dv_create_wq(priv->ctx, &attr.wq.ibv, + &attr.wq.mlx5); +#else + tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq.ibv); #endif - tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq); if (tmpl->wq == NULL) { - ERROR("%p: WQ creation failure", (void *)rxq_ctrl); + DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure", + dev->data->port_id, idx); + rte_errno = ENOMEM; goto error; } /* * Make sure number of WRs*SGEs match expectations since a queue * cannot allocate more than "desc" buffers. */ - if (((int)attr.wq.max_wr != - ((1 << rxq_data->elts_n) >> rxq_data->sges_n)) || - ((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) { - ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs", - (void *)rxq_ctrl, - ((1 << rxq_data->elts_n) >> rxq_data->sges_n), - (1 << rxq_data->sges_n), - attr.wq.max_wr, attr.wq.max_sge); + if (attr.wq.ibv.max_wr != (wqe_n >> rxq_data->sges_n) || + attr.wq.ibv.max_sge != (1u << rxq_data->sges_n)) { + DRV_LOG(ERR, + "port %u Rx queue %u requested %u*%u but got %u*%u" + " WRs*SGEs", + dev->data->port_id, idx, + wqe_n >> rxq_data->sges_n, (1 << rxq_data->sges_n), + attr.wq.ibv.max_wr, attr.wq.ibv.max_sge); + rte_errno = EINVAL; goto error; } /* Change queue state to ready. */ @@ -739,8 +930,10 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) }; ret = mlx5_glue->modify_wq(tmpl->wq, &mod); if (ret) { - ERROR("%p: WQ state to IBV_WQS_RDY failed", - (void *)rxq_ctrl); + DRV_LOG(ERR, + "port %u Rx queue %u WQ state to IBV_WQS_RDY failed", + dev->data->port_id, idx); + rte_errno = ret; goto error; } obj.cq.in = tmpl->cq; @@ -748,33 +941,53 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) obj.rwq.in = tmpl->wq; obj.rwq.out = &rwq; ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ); - if (ret != 0) + if (ret) { + rte_errno = ret; goto error; + } if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { - ERROR("Wrong MLX5_CQE_SIZE environment variable value: " - "it should be set to %u", RTE_CACHE_LINE_SIZE); + DRV_LOG(ERR, + "port %u wrong MLX5_CQE_SIZE environment variable" + " value: it should be set to %u", + dev->data->port_id, RTE_CACHE_LINE_SIZE); + rte_errno = EINVAL; goto error; } /* Fill the rings. */ - rxq_data->wqes = (volatile struct mlx5_wqe_data_seg (*)[]) - (uintptr_t)rwq.buf; - for (i = 0; (i != (unsigned int)(1 << rxq_data->elts_n)); ++i) { - struct rte_mbuf *buf = (*rxq_data->elts)[i]; - volatile struct mlx5_wqe_data_seg *scat = &(*rxq_data->wqes)[i]; - + rxq_data->wqes = rwq.buf; + for (i = 0; (i != wqe_n); ++i) { + volatile struct mlx5_wqe_data_seg *scat; + uintptr_t addr; + uint32_t byte_count; + + if (mprq_en) { + struct mlx5_mprq_buf *buf = (*rxq_data->mprq_bufs)[i]; + + scat = &((volatile struct mlx5_wqe_mprq *) + rxq_data->wqes)[i].dseg; + addr = (uintptr_t)mlx5_mprq_buf_addr(buf); + byte_count = (1 << rxq_data->strd_sz_n) * + (1 << rxq_data->strd_num_n); + } else { + struct rte_mbuf *buf = (*rxq_data->elts)[i]; + + scat = &((volatile struct mlx5_wqe_data_seg *) + rxq_data->wqes)[i]; + addr = rte_pktmbuf_mtod(buf, uintptr_t); + byte_count = DATA_LEN(buf); + } /* scat->addr must be able to store a pointer. */ assert(sizeof(scat->addr) >= sizeof(uintptr_t)); *scat = (struct mlx5_wqe_data_seg){ - .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, - uintptr_t)), - .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)), - .lkey = tmpl->mr->lkey, + .addr = rte_cpu_to_be_64(addr), + .byte_count = rte_cpu_to_be_32(byte_count), + .lkey = mlx5_rx_addr2mr(rxq_data, addr), }; } rxq_data->rq_db = rwq.dbrec; rxq_data->cqe_n = log2above(cq_info.cqe_cnt); rxq_data->cq_ci = 0; - rxq_data->rq_ci = 0; + rxq_data->consumed_strd = 0; rxq_data->rq_pi = 0; rxq_data->zip = (struct rxq_zip){ .ai = 0, @@ -785,43 +998,43 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) rxq_data->cqn = cq_info.cqn; rxq_data->cq_arm_sn = 0; /* Update doorbell counter. */ - rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n; + rxq_data->rq_ci = wqe_n >> rxq_data->sges_n; rte_wmb(); *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci); - DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl); + DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id, + idx, (void *)&tmpl); rte_atomic32_inc(&tmpl->refcnt); - DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv, - (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; return tmpl; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ if (tmpl->wq) claim_zero(mlx5_glue->destroy_wq(tmpl->wq)); if (tmpl->cq) claim_zero(mlx5_glue->destroy_cq(tmpl->cq)); if (tmpl->channel) claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel)); - if (tmpl->mr) - priv_mr_release(priv, tmpl->mr); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + rte_errno = ret; /* Restore rte_errno. */ return NULL; } /** * Get an Rx queue Verbs object. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * Queue index in DPDK Rx queue array * * @return * The Verbs object if it exists. */ -struct mlx5_rxq_ibv* -mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) +struct mlx5_rxq_ibv * +mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl; @@ -831,11 +1044,7 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) return NULL; rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (rxq_ctrl->ibv) { - priv_mr_get(priv, rxq_data->mp); rte_atomic32_inc(&rxq_ctrl->ibv->refcnt); - DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv, - (void *)rxq_ctrl->ibv, - rte_atomic32_read(&rxq_ctrl->ibv->refcnt)); } return rxq_ctrl->ibv; } @@ -843,28 +1052,18 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) /** * Release an Rx verbs queue object. * - * @param priv - * Pointer to private structure. * @param rxq_ibv * Verbs Rx queue object. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int -mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) +mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv) { - int ret; - assert(rxq_ibv); assert(rxq_ibv->wq); assert(rxq_ibv->cq); - assert(rxq_ibv->mr); - ret = priv_mr_release(priv, rxq_ibv->mr); - if (!ret) - rxq_ibv->mr = NULL; - DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv, - (void *)rxq_ibv, rte_atomic32_read(&rxq_ibv->refcnt)); if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) { rxq_free_elts(rxq_ibv->rxq_ctrl); claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq)); @@ -876,26 +1075,28 @@ mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) rte_free(rxq_ibv); return 0; } - return EBUSY; + return 1; } /** * Verify the Verbs Rx queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * - * @return the number of object not released. + * @return + * The number of object not released. */ int -mlx5_priv_rxq_ibv_verify(struct priv *priv) +mlx5_rxq_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int ret = 0; struct mlx5_rxq_ibv *rxq_ibv; LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) { - DEBUG("%p: Verbs Rx queue %p still referenced", (void *)priv, - (void *)rxq_ibv); + DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced", + dev->data->port_id, rxq_ibv->rxq_ctrl->idx); ++ret; } return ret; @@ -904,65 +1105,279 @@ mlx5_priv_rxq_ibv_verify(struct priv *priv) /** * Return true if a single reference exists on the object. * - * @param priv - * Pointer to private structure. * @param rxq_ibv * Verbs Rx queue object. */ int -mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) +mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv) { - (void)priv; assert(rxq_ibv); return (rte_atomic32_read(&rxq_ibv->refcnt) == 1); } +/** + * Callback function to initialize mbufs for Multi-Packet RQ. + */ +static inline void +mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg __rte_unused, + void *_m, unsigned int i __rte_unused) +{ + struct mlx5_mprq_buf *buf = _m; + + memset(_m, 0, sizeof(*buf)); + buf->mp = mp; + rte_atomic16_set(&buf->refcnt, 1); +} + +/** + * Free mempool of Multi-Packet RQ. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_mprq_free_mp(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct rte_mempool *mp = priv->mprq_mp; + unsigned int i; + + if (mp == NULL) + return 0; + DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ", + dev->data->port_id, mp->name); + /* + * If a buffer in the pool has been externally attached to a mbuf and it + * is still in use by application, destroying the Rx qeueue can spoil + * the packet. It is unlikely to happen but if application dynamically + * creates and destroys with holding Rx packets, this can happen. + * + * TODO: It is unavoidable for now because the mempool for Multi-Packet + * RQ isn't provided by application but managed by PMD. + */ + if (!rte_mempool_full(mp)) { + DRV_LOG(ERR, + "port %u mempool for Multi-Packet RQ is still in use", + dev->data->port_id); + rte_errno = EBUSY; + return -rte_errno; + } + rte_mempool_free(mp); + /* Unset mempool for each Rx queue. */ + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + + if (rxq == NULL) + continue; + rxq->mprq_mp = NULL; + } + return 0; +} + +/** + * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the + * mempool. If already allocated, reuse it if there're enough elements. + * Otherwise, resize it. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct rte_mempool *mp = priv->mprq_mp; + char name[RTE_MEMPOOL_NAMESIZE]; + unsigned int desc = 0; + unsigned int buf_len; + unsigned int obj_num; + unsigned int obj_size; + unsigned int strd_num_n = 0; + unsigned int strd_sz_n = 0; + unsigned int i; + + if (!mlx5_mprq_enabled(dev)) + return 0; + /* Count the total number of descriptors configured. */ + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + + if (rxq == NULL) + continue; + desc += 1 << rxq->elts_n; + /* Get the max number of strides. */ + if (strd_num_n < rxq->strd_num_n) + strd_num_n = rxq->strd_num_n; + /* Get the max size of a stride. */ + if (strd_sz_n < rxq->strd_sz_n) + strd_sz_n = rxq->strd_sz_n; + } + assert(strd_num_n && strd_sz_n); + buf_len = (1 << strd_num_n) * (1 << strd_sz_n); + obj_size = buf_len + sizeof(struct mlx5_mprq_buf); + /* + * Received packets can be either memcpy'd or externally referenced. In + * case that the packet is attached to an mbuf as an external buffer, as + * it isn't possible to predict how the buffers will be queued by + * application, there's no option to exactly pre-allocate needed buffers + * in advance but to speculatively prepares enough buffers. + * + * In the data path, if this Mempool is depleted, PMD will try to memcpy + * received packets to buffers provided by application (rxq->mp) until + * this Mempool gets available again. + */ + desc *= 4; + obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n; + /* + * rte_mempool_create_empty() has sanity check to refuse large cache + * size compared to the number of elements. + * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a + * constant number 2 instead. + */ + obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2); + /* Check a mempool is already allocated and if it can be resued. */ + if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) { + DRV_LOG(DEBUG, "port %u mempool %s is being reused", + dev->data->port_id, mp->name); + /* Reuse. */ + goto exit; + } else if (mp != NULL) { + DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it", + dev->data->port_id, mp->name); + /* + * If failed to free, which means it may be still in use, no way + * but to keep using the existing one. On buffer underrun, + * packets will be memcpy'd instead of external buffer + * attachment. + */ + if (mlx5_mprq_free_mp(dev)) { + if (mp->elt_size >= obj_size) + goto exit; + else + return -rte_errno; + } + } + snprintf(name, sizeof(name), "%s-mprq", dev->device->name); + mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ, + 0, NULL, NULL, mlx5_mprq_buf_init, NULL, + dev->device->numa_node, 0); + if (mp == NULL) { + DRV_LOG(ERR, + "port %u failed to allocate a mempool for" + " Multi-Packet RQ, count=%u, size=%u", + dev->data->port_id, obj_num, obj_size); + rte_errno = ENOMEM; + return -rte_errno; + } + priv->mprq_mp = mp; +exit: + /* Set mempool for each Rx queue. */ + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + + if (rxq == NULL) + continue; + rxq->mprq_mp = mp; + } + DRV_LOG(INFO, "port %u Multi-Packet RQ is configured", + dev->data->port_id); + return 0; +} + /** * Create a DPDK Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx - * TX queue index. + * RX queue index. * @param desc * Number of descriptors to configure in queue. * @param socket * NUMA socket on which memory must be allocated. * * @return - * A DPDK queue object on success. + * A DPDK queue object on success, NULL otherwise and rte_errno is set. */ -struct mlx5_rxq_ctrl* -mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, - unsigned int socket, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp) +struct mlx5_rxq_ctrl * +mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) { - struct rte_eth_dev *dev = priv->dev; + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *tmpl; unsigned int mb_len = rte_pktmbuf_data_room_size(mp); + unsigned int mprq_stride_size; struct mlx5_dev_config *config = &priv->config; /* * Always allocate extra slots, even if eventually * the vector Rx will not be used. */ - const uint16_t desc_n = + uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; + uint64_t offloads = conf->offloads | + dev->data->dev_conf.rxmode.offloads; + const int mprq_en = mlx5_check_mprq_support(dev) > 0; tmpl = rte_calloc_socket("RXQ", 1, sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *), 0, socket); - if (!tmpl) + if (!tmpl) { + rte_errno = ENOMEM; return NULL; + } + if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh, + MLX5_MR_BTREE_CACHE_N, socket)) { + /* rte_errno is already set. */ + goto error; + } tmpl->socket = socket; - if (priv->dev->data->dev_conf.intr_conf.rxq) + if (dev->data->dev_conf.intr_conf.rxq) tmpl->irq = 1; - /* Enable scattered packets support for this queue if necessary. */ + /* + * This Rx queue can be configured as a Multi-Packet RQ if all of the + * following conditions are met: + * - MPRQ is enabled. + * - The number of descs is more than the number of strides. + * - max_rx_pkt_len plus overhead is less than the max size of a + * stride. + * Otherwise, enable Rx scatter if necessary. + */ assert(mb_len >= RTE_PKTMBUF_HEADROOM); - if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= - (mb_len - RTE_PKTMBUF_HEADROOM)) { + mprq_stride_size = + dev->data->dev_conf.rxmode.max_rx_pkt_len + + sizeof(struct rte_mbuf_ext_shared_info) + + RTE_PKTMBUF_HEADROOM; + if (mprq_en && + desc > (1U << config->mprq.stride_num_n) && + mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) { + /* TODO: Rx scatter isn't supported yet. */ + tmpl->rxq.sges_n = 0; + /* Trim the number of descs needed. */ + desc >>= config->mprq.stride_num_n; + tmpl->rxq.strd_num_n = config->mprq.stride_num_n; + tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size), + config->mprq.min_stride_size_n); + tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT; + tmpl->rxq.mprq_max_memcpy_len = + RTE_MIN(mb_len - RTE_PKTMBUF_HEADROOM, + config->mprq.max_memcpy_len); + DRV_LOG(DEBUG, + "port %u Rx queue %u: Multi-Packet RQ is enabled" + " strd_num_n = %u, strd_sz_n = %u", + dev->data->port_id, idx, + tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n); + } else if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= + (mb_len - RTE_PKTMBUF_HEADROOM)) { tmpl->rxq.sges_n = 0; - } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) { + } else if (offloads & DEV_RX_OFFLOAD_SCATTER) { unsigned int size = RTE_PKTMBUF_HEADROOM + dev->data->dev_conf.rxmode.max_rx_pkt_len; @@ -978,57 +1393,71 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, size = mb_len * (1 << tmpl->rxq.sges_n); size -= RTE_PKTMBUF_HEADROOM; if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) { - ERROR("%p: too many SGEs (%u) needed to handle" - " requested maximum packet size %u", - (void *)dev, - 1 << sges_n, - dev->data->dev_conf.rxmode.max_rx_pkt_len); + DRV_LOG(ERR, + "port %u too many SGEs (%u) needed to handle" + " requested maximum packet size %u", + dev->data->port_id, + 1 << sges_n, + dev->data->dev_conf.rxmode.max_rx_pkt_len); + rte_errno = EOVERFLOW; goto error; } } else { - WARN("%p: the requested maximum Rx packet size (%u) is" - " larger than a single mbuf (%u) and scattered" - " mode has not been requested", - (void *)dev, - dev->data->dev_conf.rxmode.max_rx_pkt_len, - mb_len - RTE_PKTMBUF_HEADROOM); - } - DEBUG("%p: maximum number of segments per packet: %u", - (void *)dev, 1 << tmpl->rxq.sges_n); + DRV_LOG(WARNING, + "port %u the requested maximum Rx packet size (%u) is" + " larger than a single mbuf (%u) and scattered mode has" + " not been requested", + dev->data->port_id, + dev->data->dev_conf.rxmode.max_rx_pkt_len, + mb_len - RTE_PKTMBUF_HEADROOM); + } + if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq)) + DRV_LOG(WARNING, + "port %u MPRQ is requested but cannot be enabled" + " (requested: desc = %u, stride_sz = %u," + " supported: min_stride_num = %u, max_stride_sz = %u).", + dev->data->port_id, desc, mprq_stride_size, + (1 << config->mprq.stride_num_n), + (1 << config->mprq.max_stride_size_n)); + DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u", + dev->data->port_id, 1 << tmpl->rxq.sges_n); if (desc % (1 << tmpl->rxq.sges_n)) { - ERROR("%p: number of RX queue descriptors (%u) is not a" - " multiple of SGEs per packet (%u)", - (void *)dev, - desc, - 1 << tmpl->rxq.sges_n); + DRV_LOG(ERR, + "port %u number of Rx queue descriptors (%u) is not a" + " multiple of SGEs per packet (%u)", + dev->data->port_id, + desc, + 1 << tmpl->rxq.sges_n); + rte_errno = EINVAL; goto error; } /* Toggle RX checksum offload if hardware supports it. */ - tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM); - tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) && - priv->config.hw_csum_l2tun); - tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP); + tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM); + tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP); /* Configure VLAN stripping. */ - tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); + tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP); /* By default, FCS (CRC) is stripped by hardware. */ - if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) { - tmpl->rxq.crc_present = 0; - } else if (config->hw_fcs_strip) { - tmpl->rxq.crc_present = 1; - } else { - WARN("%p: CRC stripping has been disabled but will still" - " be performed by hardware, make sure MLNX_OFED and" - " firmware are up to date", - (void *)dev); - tmpl->rxq.crc_present = 0; - } - DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from" - " incoming frames to hide it", - (void *)dev, - tmpl->rxq.crc_present ? "disabled" : "enabled", - tmpl->rxq.crc_present << 2); + tmpl->rxq.crc_present = 0; + if (rte_eth_dev_must_keep_crc(offloads)) { + if (config->hw_fcs_strip) { + tmpl->rxq.crc_present = 1; + } else { + DRV_LOG(WARNING, + "port %u CRC stripping has been disabled but will" + " still be performed by hardware, make sure MLNX_OFED" + " and firmware are up to date", + dev->data->port_id); + } + } + DRV_LOG(DEBUG, + "port %u CRC stripping is %s, %u bytes will be subtracted from" + " incoming frames to hide it", + dev->data->port_id, + tmpl->rxq.crc_present ? "disabled" : "enabled", + tmpl->rxq.crc_present << 2); /* Save port ID. */ - tmpl->rxq.rss_hash = priv->rxqs_n > 1; + tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf && + (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS)); tmpl->rxq.port_id = dev->data->port_id; tmpl->priv = priv; tmpl->rxq.mp = mp; @@ -1036,9 +1465,11 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, tmpl->rxq.elts_n = log2above(desc); tmpl->rxq.elts = (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1); +#ifndef RTE_ARCH_64 + tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq; +#endif + tmpl->idx = idx; rte_atomic32_inc(&tmpl->refcnt); - DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, - (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; error: @@ -1049,28 +1480,26 @@ error: /** * Get a Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * * @return - * A pointer to the queue if it exists. + * A pointer to the queue if it exists, NULL otherwise. */ -struct mlx5_rxq_ctrl* -mlx5_priv_rxq_get(struct priv *priv, uint16_t idx) +struct mlx5_rxq_ctrl * +mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl = NULL; if ((*priv->rxqs)[idx]) { rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); - - mlx5_priv_rxq_ibv_get(priv, idx); + mlx5_rxq_ibv_get(dev, idx); rte_atomic32_inc(&rxq_ctrl->refcnt); - DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, - (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt)); } return rxq_ctrl; } @@ -1078,59 +1507,58 @@ mlx5_priv_rxq_get(struct priv *priv, uint16_t idx) /** * Release a Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int -mlx5_priv_rxq_release(struct priv *priv, uint16_t idx) +mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; if (!(*priv->rxqs)[idx]) return 0; rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); assert(rxq_ctrl->priv); - if (rxq_ctrl->ibv) { - int ret; - - ret = mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv); - if (!ret) - rxq_ctrl->ibv = NULL; - } - DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, - (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt)); + if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv)) + rxq_ctrl->ibv = NULL; if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) { + mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh); LIST_REMOVE(rxq_ctrl, next); rte_free(rxq_ctrl); (*priv->rxqs)[idx] = NULL; return 0; } - return EBUSY; + return 1; } /** * Verify if the queue can be released. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * * @return - * 1 if the queue can be released. + * 1 if the queue can be released, negative errno otherwise and rte_errno is + * set. */ int -mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx) +mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; - if (!(*priv->rxqs)[idx]) - return -1; + if (!(*priv->rxqs)[idx]) { + rte_errno = EINVAL; + return -rte_errno; + } rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1); } @@ -1138,20 +1566,22 @@ mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx) /** * Verify the Rx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * - * @return the number of object not released. + * @return + * The number of object not released. */ int -mlx5_priv_rxq_verify(struct priv *priv) +mlx5_rxq_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; int ret = 0; LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { - DEBUG("%p: Rx Queue %p still referenced", (void *)priv, - (void *)rxq_ctrl); + DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced", + dev->data->port_id, rxq_ctrl->idx); ++ret; } return ret; @@ -1160,20 +1590,21 @@ mlx5_priv_rxq_verify(struct priv *priv) /** * Create an indirection table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param queues * Queues entering in the indirection table. * @param queues_n * Number of queues in the array. * * @return - * A new indirection table. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ -struct mlx5_ind_table_ibv* -mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], - uint16_t queues_n) +struct mlx5_ind_table_ibv * +mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues, + uint32_t queues_n) { + struct priv *priv = dev->data->dev_private; struct mlx5_ind_table_ibv *ind_tbl; const unsigned int wq_n = rte_is_power_of_2(queues_n) ? log2above(queues_n) : @@ -1184,11 +1615,12 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) + queues_n * sizeof(uint16_t), 0); - if (!ind_tbl) + if (!ind_tbl) { + rte_errno = ENOMEM; return NULL; + } for (i = 0; i != queues_n; ++i) { - struct mlx5_rxq_ctrl *rxq = - mlx5_priv_rxq_get(priv, queues[i]); + struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]); if (!rxq) goto error; @@ -1206,24 +1638,24 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], .ind_tbl = wq, .comp_mask = 0, }); - if (!ind_tbl->ind_table) + if (!ind_tbl->ind_table) { + rte_errno = errno; goto error; + } rte_atomic32_inc(&ind_tbl->refcnt); LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); - DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv, - (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); return ind_tbl; error: rte_free(ind_tbl); - DEBUG("%p cannot create indirection table", (void *)priv); + DEBUG("port %u cannot create indirection table", dev->data->port_id); return NULL; } /** * Get an indirection table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param queues * Queues entering in the indirection table. * @param queues_n @@ -1232,10 +1664,11 @@ error: * @return * An indirection table if found. */ -struct mlx5_ind_table_ibv* -mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], - uint16_t queues_n) +struct mlx5_ind_table_ibv * +mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues, + uint32_t queues_n) { + struct priv *priv = dev->data->dev_private; struct mlx5_ind_table_ibv *ind_tbl; LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { @@ -1249,10 +1682,8 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], unsigned int i; rte_atomic32_inc(&ind_tbl->refcnt); - DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv, - (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); for (i = 0; i != ind_tbl->queues_n; ++i) - mlx5_priv_rxq_get(priv, ind_tbl->queues[i]); + mlx5_rxq_get(dev, ind_tbl->queues[i]); } return ind_tbl; } @@ -1260,52 +1691,53 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], /** * Release an indirection table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param ind_table * Indirection table to release. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int -mlx5_priv_ind_table_ibv_release(struct priv *priv, - struct mlx5_ind_table_ibv *ind_tbl) +mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, + struct mlx5_ind_table_ibv *ind_tbl) { unsigned int i; - DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv, - (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) claim_zero(mlx5_glue->destroy_rwq_ind_table (ind_tbl->ind_table)); for (i = 0; i != ind_tbl->queues_n; ++i) - claim_nonzero(mlx5_priv_rxq_release(priv, ind_tbl->queues[i])); + claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i])); if (!rte_atomic32_read(&ind_tbl->refcnt)) { LIST_REMOVE(ind_tbl, next); rte_free(ind_tbl); return 0; } - return EBUSY; + return 1; } /** * Verify the Rx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * - * @return the number of object not released. + * @return + * The number of object not released. */ int -mlx5_priv_ind_table_ibv_verify(struct priv *priv) +mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_ind_table_ibv *ind_tbl; int ret = 0; LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { - DEBUG("%p: Verbs indirection table %p still referenced", - (void *)priv, (void *)ind_tbl); + DRV_LOG(DEBUG, + "port %u Verbs indirection table %p still referenced", + dev->data->port_id, (void *)ind_tbl); ++ret; } return ret; @@ -1314,8 +1746,8 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv) /** * Create an Rx Hash queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param rss_key * RSS key for the Rx hash queue. * @param rss_key_len @@ -1329,22 +1761,60 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv) * Number of queues. * * @return - * An hash Rx queue on success. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ -struct mlx5_hrxq* -mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, - uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) +struct mlx5_hrxq * +mlx5_hrxq_new(struct rte_eth_dev *dev, + const uint8_t *rss_key, uint32_t rss_key_len, + uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n, + int tunnel __rte_unused) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; struct mlx5_ind_table_ibv *ind_tbl; struct ibv_qp *qp; + int err; queues_n = hash_fields ? queues_n : 1; - ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n); - if (!ind_tbl) - ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n); + ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); if (!ind_tbl) + ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n); + if (!ind_tbl) { + rte_errno = ENOMEM; return NULL; + } + if (!rss_key_len) { + rss_key_len = MLX5_RSS_HASH_KEY_LEN; + rss_key = rss_hash_default_key; + } +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + qp = mlx5_glue->dv_create_qp + (priv->ctx, + &(struct ibv_qp_init_attr_ex){ + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = + IBV_QP_INIT_ATTR_PD | + IBV_QP_INIT_ATTR_IND_TABLE | + IBV_QP_INIT_ATTR_RX_HASH, + .rx_hash_conf = (struct ibv_rx_hash_conf){ + .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = rss_key_len ? rss_key_len : + MLX5_RSS_HASH_KEY_LEN, + .rx_hash_key = rss_key ? + (void *)(uintptr_t)rss_key : + rss_hash_default_key, + .rx_hash_fields_mask = hash_fields, + }, + .rwq_ind_tbl = ind_tbl->ind_table, + .pd = priv->pd, + }, + &(struct mlx5dv_qp_init_attr){ + .comp_mask = tunnel ? + MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS : 0, + .create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS, + }); +#else qp = mlx5_glue->create_qp_ex (priv->ctx, &(struct ibv_qp_init_attr_ex){ @@ -1355,15 +1825,21 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, IBV_QP_INIT_ATTR_RX_HASH, .rx_hash_conf = (struct ibv_rx_hash_conf){ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, - .rx_hash_key_len = rss_key_len, - .rx_hash_key = rss_key, + .rx_hash_key_len = rss_key_len ? rss_key_len : + MLX5_RSS_HASH_KEY_LEN, + .rx_hash_key = rss_key ? + (void *)(uintptr_t)rss_key : + rss_hash_default_key, .rx_hash_fields_mask = hash_fields, }, .rwq_ind_tbl = ind_tbl->ind_table, .pd = priv->pd, }); - if (!qp) +#endif + if (!qp) { + rte_errno = errno; goto error; + } hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0); if (!hrxq) goto error; @@ -1374,21 +1850,21 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, memcpy(hrxq->rss_key, rss_key, rss_key_len); rte_atomic32_inc(&hrxq->refcnt); LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next); - DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv, - (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); return hrxq; error: - mlx5_priv_ind_table_ibv_release(priv, ind_tbl); + err = rte_errno; /* Save rte_errno before cleanup. */ + mlx5_ind_table_ibv_release(dev, ind_tbl); if (qp) claim_zero(mlx5_glue->destroy_qp(qp)); + rte_errno = err; /* Restore rte_errno. */ return NULL; } /** * Get an Rx Hash queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param rss_conf * RSS configuration for the Rx hash queue. * @param queues @@ -1400,10 +1876,13 @@ error: * @return * An hash Rx queue on success. */ -struct mlx5_hrxq* -mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, - uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) +struct mlx5_hrxq * +mlx5_hrxq_get(struct rte_eth_dev *dev, + const uint8_t *rss_key, uint32_t rss_key_len, + uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; queues_n = hash_fields ? queues_n : 1; @@ -1416,16 +1895,14 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, continue; if (hrxq->hash_fields != hash_fields) continue; - ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n); + ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); if (!ind_tbl) continue; if (ind_tbl != hrxq->ind_table) { - mlx5_priv_ind_table_ibv_release(priv, ind_tbl); + mlx5_ind_table_ibv_release(dev, ind_tbl); continue; } rte_atomic32_inc(&hrxq->refcnt); - DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv, - (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); return hrxq; } return NULL; @@ -1434,48 +1911,281 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, /** * Release the hash Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param hrxq * Pointer to Hash Rx queue to release. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int -mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq) +mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) { - DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv, - (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); - mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table); + mlx5_ind_table_ibv_release(dev, hrxq->ind_table); LIST_REMOVE(hrxq, next); rte_free(hrxq); return 0; } - claim_nonzero(mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table)); - return EBUSY; + claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table)); + return 1; } /** * Verify the Rx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * - * @return the number of object not released. + * @return + * The number of object not released. */ int -mlx5_priv_hrxq_ibv_verify(struct priv *priv) +mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; int ret = 0; LIST_FOREACH(hrxq, &priv->hrxqs, next) { - DEBUG("%p: Verbs Hash Rx queue %p still referenced", - (void *)priv, (void *)hrxq); + DRV_LOG(DEBUG, + "port %u Verbs hash Rx queue %p still referenced", + dev->data->port_id, (void *)hrxq); ++ret; } return ret; } + +/** + * Create a drop Rx queue Verbs object. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The Verbs object initialised, NULL otherwise and rte_errno is set. + */ +struct mlx5_rxq_ibv * +mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct ibv_cq *cq; + struct ibv_wq *wq = NULL; + struct mlx5_rxq_ibv *rxq; + + if (priv->drop_queue.rxq) + return priv->drop_queue.rxq; + cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0); + if (!cq) { + DEBUG("port %u cannot allocate CQ for drop queue", + dev->data->port_id); + rte_errno = errno; + goto error; + } + wq = mlx5_glue->create_wq(priv->ctx, + &(struct ibv_wq_init_attr){ + .wq_type = IBV_WQT_RQ, + .max_wr = 1, + .max_sge = 1, + .pd = priv->pd, + .cq = cq, + }); + if (!wq) { + DEBUG("port %u cannot allocate WQ for drop queue", + dev->data->port_id); + rte_errno = errno; + goto error; + } + rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0); + if (!rxq) { + DEBUG("port %u cannot allocate drop Rx queue memory", + dev->data->port_id); + rte_errno = ENOMEM; + goto error; + } + rxq->cq = cq; + rxq->wq = wq; + priv->drop_queue.rxq = rxq; + return rxq; +error: + if (wq) + claim_zero(mlx5_glue->destroy_wq(wq)); + if (cq) + claim_zero(mlx5_glue->destroy_cq(cq)); + return NULL; +} + +/** + * Release a drop Rx queue Verbs object. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The Verbs object initialised, NULL otherwise and rte_errno is set. + */ +void +mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx5_rxq_ibv *rxq = priv->drop_queue.rxq; + + if (rxq->wq) + claim_zero(mlx5_glue->destroy_wq(rxq->wq)); + if (rxq->cq) + claim_zero(mlx5_glue->destroy_cq(rxq->cq)); + rte_free(rxq); + priv->drop_queue.rxq = NULL; +} + +/** + * Create a drop indirection table. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The Verbs object initialised, NULL otherwise and rte_errno is set. + */ +struct mlx5_ind_table_ibv * +mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx5_ind_table_ibv *ind_tbl; + struct mlx5_rxq_ibv *rxq; + struct mlx5_ind_table_ibv tmpl; + + rxq = mlx5_rxq_ibv_drop_new(dev); + if (!rxq) + return NULL; + tmpl.ind_table = mlx5_glue->create_rwq_ind_table + (priv->ctx, + &(struct ibv_rwq_ind_table_init_attr){ + .log_ind_tbl_size = 0, + .ind_tbl = &rxq->wq, + .comp_mask = 0, + }); + if (!tmpl.ind_table) { + DEBUG("port %u cannot allocate indirection table for drop" + " queue", + dev->data->port_id); + rte_errno = errno; + goto error; + } + ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0); + if (!ind_tbl) { + rte_errno = ENOMEM; + goto error; + } + ind_tbl->ind_table = tmpl.ind_table; + return ind_tbl; +error: + mlx5_rxq_ibv_drop_release(dev); + return NULL; +} + +/** + * Release a drop indirection table. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table; + + claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table)); + mlx5_rxq_ibv_drop_release(dev); + rte_free(ind_tbl); + priv->drop_queue.hrxq->ind_table = NULL; +} + +/** + * Create a drop Rx Hash queue. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The Verbs object initialised, NULL otherwise and rte_errno is set. + */ +struct mlx5_hrxq * +mlx5_hrxq_drop_new(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx5_ind_table_ibv *ind_tbl; + struct ibv_qp *qp; + struct mlx5_hrxq *hrxq; + + if (priv->drop_queue.hrxq) { + rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt); + return priv->drop_queue.hrxq; + } + ind_tbl = mlx5_ind_table_ibv_drop_new(dev); + if (!ind_tbl) + return NULL; + qp = mlx5_glue->create_qp_ex(priv->ctx, + &(struct ibv_qp_init_attr_ex){ + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = + IBV_QP_INIT_ATTR_PD | + IBV_QP_INIT_ATTR_IND_TABLE | + IBV_QP_INIT_ATTR_RX_HASH, + .rx_hash_conf = (struct ibv_rx_hash_conf){ + .rx_hash_function = + IBV_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN, + .rx_hash_key = rss_hash_default_key, + .rx_hash_fields_mask = 0, + }, + .rwq_ind_tbl = ind_tbl->ind_table, + .pd = priv->pd + }); + if (!qp) { + DEBUG("port %u cannot allocate QP for drop queue", + dev->data->port_id); + rte_errno = errno; + goto error; + } + hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0); + if (!hrxq) { + DRV_LOG(WARNING, + "port %u cannot allocate memory for drop queue", + dev->data->port_id); + rte_errno = ENOMEM; + goto error; + } + hrxq->ind_table = ind_tbl; + hrxq->qp = qp; + priv->drop_queue.hrxq = hrxq; + rte_atomic32_set(&hrxq->refcnt, 1); + return hrxq; +error: + if (ind_tbl) + mlx5_ind_table_ibv_drop_release(dev); + return NULL; +} + +/** + * Release a drop hash Rx queue. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_hrxq_drop_release(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; + + if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { + claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); + mlx5_ind_table_ibv_drop_release(dev); + rte_free(hrxq); + priv->drop_queue.hrxq = NULL; + } +} diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index dc4ead93..2d14f8a6 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -34,19 +34,29 @@ #include "mlx5_prm.h" static __rte_always_inline uint32_t -rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe); +rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe); static __rte_always_inline int mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, - uint16_t cqe_cnt, uint32_t *rss_hash); + uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe); static __rte_always_inline uint32_t -rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe); +rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe); + +static __rte_always_inline void +rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, + volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res); + +static __rte_always_inline void +mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx); uint32_t mlx5_ptype_table[] __rte_cache_aligned = { [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */ }; +uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned; +uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned; + /** * Build a table to translate Rx completion flags to packet type. * @@ -86,6 +96,14 @@ mlx5_set_ptype_table(void) RTE_PTYPE_L4_TCP; (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP; + (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; /* UDP */ (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP; @@ -104,17 +122,27 @@ mlx5_set_ptype_table(void) RTE_PTYPE_L4_TCP; (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP; + (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP; (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP; /* Tunneled - L3 */ + (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_NONFRAG; (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_NONFRAG; + (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_NONFRAG; @@ -141,12 +169,36 @@ mlx5_set_ptype_table(void) (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP; + (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP; (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP; + (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; /* Tunneled - UDP */ (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | @@ -162,6 +214,74 @@ mlx5_set_ptype_table(void) RTE_PTYPE_INNER_L4_UDP; } +/** + * Build a table to translate packet to checksum type of Verbs. + */ +void +mlx5_set_cksum_table(void) +{ + unsigned int i; + uint8_t v; + + /* + * The index should have: + * bit[0] = PKT_TX_TCP_SEG + * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM + * bit[4] = PKT_TX_IP_CKSUM + * bit[8] = PKT_TX_OUTER_IP_CKSUM + * bit[9] = tunnel + */ + for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) { + v = 0; + if (i & (1 << 9)) { + /* Tunneled packet. */ + if (i & (1 << 8)) /* Outer IP. */ + v |= MLX5_ETH_WQE_L3_CSUM; + if (i & (1 << 4)) /* Inner IP. */ + v |= MLX5_ETH_WQE_L3_INNER_CSUM; + if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */ + v |= MLX5_ETH_WQE_L4_INNER_CSUM; + } else { + /* No tunnel. */ + if (i & (1 << 4)) /* IP. */ + v |= MLX5_ETH_WQE_L3_CSUM; + if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */ + v |= MLX5_ETH_WQE_L4_CSUM; + } + mlx5_cksum_table[i] = v; + } +} + +/** + * Build a table to translate packet type of mbuf to SWP type of Verbs. + */ +void +mlx5_set_swp_types_table(void) +{ + unsigned int i; + uint8_t v; + + /* + * The index should have: + * bit[0:1] = PKT_TX_L4_MASK + * bit[4] = PKT_TX_IPV6 + * bit[8] = PKT_TX_OUTER_IPV6 + * bit[9] = PKT_TX_OUTER_UDP + */ + for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) { + v = 0; + if (i & (1 << 8)) + v |= MLX5_ETH_WQE_L3_OUTER_IPV6; + if (i & (1 << 9)) + v |= MLX5_ETH_WQE_L4_OUTER_UDP; + if (i & (1 << 4)) + v |= MLX5_ETH_WQE_L3_INNER_IPV6; + if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52)) + v |= MLX5_ETH_WQE_L4_INNER_UDP; + mlx5_swp_types_table[i] = v; + } +} + /** * Return the size of tailroom of WQ. * @@ -218,6 +338,60 @@ mlx5_copy_to_wq(void *dst, const void *src, size_t n, return ret; } +/** + * Inline TSO headers into WQE. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int +inline_tso(struct mlx5_txq_data *txq, struct rte_mbuf *buf, + uint32_t *length, + uintptr_t *addr, + uint16_t *pkt_inline_sz, + uint8_t **raw, + uint16_t *max_wqe, + uint16_t *tso_segsz, + uint16_t *tso_header_sz) +{ + uintptr_t end = (uintptr_t)(((uintptr_t)txq->wqes) + + (1 << txq->wqe_n) * MLX5_WQE_SIZE); + unsigned int copy_b; + uint8_t vlan_sz = (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0; + const uint8_t tunneled = txq->tunnel_en && (buf->ol_flags & + PKT_TX_TUNNEL_MASK); + uint16_t n_wqe; + + *tso_segsz = buf->tso_segsz; + *tso_header_sz = buf->l2_len + vlan_sz + buf->l3_len + buf->l4_len; + if (unlikely(*tso_segsz == 0 || *tso_header_sz == 0)) { + txq->stats.oerrors++; + return -EINVAL; + } + if (tunneled) + *tso_header_sz += buf->outer_l2_len + buf->outer_l3_len; + /* First seg must contain all TSO headers. */ + if (unlikely(*tso_header_sz > MLX5_MAX_TSO_HEADER) || + *tso_header_sz > DATA_LEN(buf)) { + txq->stats.oerrors++; + return -EINVAL; + } + copy_b = *tso_header_sz - *pkt_inline_sz; + if (!copy_b || ((end - (uintptr_t)*raw) < copy_b)) + return -EAGAIN; + n_wqe = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4; + if (unlikely(*max_wqe < n_wqe)) + return -EINVAL; + *max_wqe -= n_wqe; + rte_memcpy((void *)*raw, (void *)*addr, copy_b); + *length -= copy_b; + *addr += copy_b; + copy_b = MLX5_WQE_DS(copy_b) * MLX5_WQE_DWORD_SIZE; + *pkt_inline_sz += copy_b; + *raw += copy_b; + return 0; +} + /** * DPDK callback to check the status of a tx descriptor. * @@ -321,6 +495,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) volatile struct mlx5_wqe_ctrl *last_wqe = NULL; unsigned int segs_n = 0; const unsigned int max_inline = txq->max_inline; + uint64_t addr_64; if (unlikely(!pkts_n)) return 0; @@ -329,13 +504,11 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) /* Start processing. */ mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); if (unlikely(!max_wqe)) return 0; do { - struct rte_mbuf *buf = NULL; + struct rte_mbuf *buf = *pkts; /* First_seg. */ uint8_t *raw; volatile struct mlx5_wqe_v *wqe = NULL; volatile rte_v128u32_t *dseg = NULL; @@ -347,14 +520,15 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) uint16_t tso_header_sz = 0; uint16_t ehdr; uint8_t cs_flags; - uint64_t tso = 0; + uint8_t tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG); + uint32_t swp_offsets = 0; + uint8_t swp_types = 0; uint16_t tso_segsz = 0; #ifdef MLX5_PMD_SOFT_COUNTERS uint32_t total_length = 0; #endif + int ret; - /* first_seg */ - buf = *pkts; segs_n = buf->nb_segs; /* * Make sure there is enough room to store this packet and @@ -389,7 +563,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (pkts_n - i > 1) rte_prefetch0( rte_pktmbuf_mtod(*(pkts + 1), volatile void *)); - cs_flags = txq_ol_cksum_to_cs(txq, buf); + cs_flags = txq_ol_cksum_to_cs(buf); + txq_mbuf_to_swp(txq, buf, (uint8_t *)&swp_offsets, &swp_types); raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE; /* Replace the Ethernet type by the VLAN if necessary. */ if (buf->ol_flags & PKT_TX_VLAN_PKT) { @@ -415,54 +590,14 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) addr += pkt_inline_sz; } raw += MLX5_WQE_DWORD_SIZE; - tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG); if (tso) { - uintptr_t end = - (uintptr_t)(((uintptr_t)txq->wqes) + - (1 << txq->wqe_n) * MLX5_WQE_SIZE); - unsigned int copy_b; - uint8_t vlan_sz = - (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0; - const uint64_t is_tunneled = - buf->ol_flags & (PKT_TX_TUNNEL_GRE | - PKT_TX_TUNNEL_VXLAN); - - tso_header_sz = buf->l2_len + vlan_sz + - buf->l3_len + buf->l4_len; - tso_segsz = buf->tso_segsz; - if (unlikely(tso_segsz == 0)) { - txq->stats.oerrors++; - break; - } - if (is_tunneled && txq->tunnel_en) { - tso_header_sz += buf->outer_l2_len + - buf->outer_l3_len; - cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM; - } else { - cs_flags |= MLX5_ETH_WQE_L4_CSUM; - } - if (unlikely(tso_header_sz > MLX5_MAX_TSO_HEADER)) { - txq->stats.oerrors++; + ret = inline_tso(txq, buf, &length, + &addr, &pkt_inline_sz, + &raw, &max_wqe, + &tso_segsz, &tso_header_sz); + if (ret == -EINVAL) { break; - } - copy_b = tso_header_sz - pkt_inline_sz; - /* First seg must contain all headers. */ - assert(copy_b <= length); - if (copy_b && ((end - (uintptr_t)raw) > copy_b)) { - uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4; - - if (unlikely(max_wqe < n)) - break; - max_wqe -= n; - rte_memcpy((void *)raw, (void *)addr, copy_b); - addr += copy_b; - length -= copy_b; - /* Include padding for TSO header. */ - copy_b = MLX5_WQE_DS(copy_b) * - MLX5_WQE_DWORD_SIZE; - pkt_inline_sz += copy_b; - raw += copy_b; - } else { + } else if (ret == -EAGAIN) { /* NOP WQE. */ wqe->ctrl = (rte_v128u32_t){ rte_cpu_to_be_32(txq->wqe_ci << 8), @@ -507,7 +642,8 @@ pkt_inline: if (unlikely(max_wqe < n)) break; max_wqe -= n; - if (tso && !inl) { + if (tso) { + assert(inl == 0); inl = rte_cpu_to_be_32(copy_b | MLX5_INLINE_SEG); rte_memcpy((void *)raw, @@ -542,8 +678,17 @@ pkt_inline: } else if (!segs_n) { goto next_pkt; } else { - raw += copy_b; - inline_room -= copy_b; + /* + * Further inline the next segment only for + * non-TSO packets. + */ + if (!tso) { + raw += copy_b; + inline_room -= copy_b; + } else { + inline_room = 0; + } + /* Move to the next segment. */ --segs_n; buf = buf->next; assert(buf); @@ -565,12 +710,12 @@ pkt_inline: ds = 3; use_dseg: /* Add the remaining packet as a simple ds. */ - addr = rte_cpu_to_be_64(addr); + addr_64 = rte_cpu_to_be_64(addr); *dseg = (rte_v128u32_t){ rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), - addr, - addr >> 32, + addr_64, + addr_64 >> 32, }; ++ds; if (!segs_n) @@ -604,12 +749,12 @@ next_seg: total_length += length; #endif /* Store segment information. */ - addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)); + addr_64 = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)); *dseg = (rte_v128u32_t){ rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), - addr, - addr >> 32, + addr_64, + addr_64 >> 32, }; (*txq->elts)[++elts_head & elts_m] = buf; if (--segs_n) @@ -633,8 +778,9 @@ next_pkt: 0, }; wqe->eseg = (rte_v128u32_t){ - 0, - cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16), + swp_offsets, + cs_flags | (swp_types << 8) | + (rte_cpu_to_be_16(tso_segsz) << 16), 0, (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz), }; @@ -647,8 +793,8 @@ next_pkt: 0, }; wqe->eseg = (rte_v128u32_t){ - 0, - cs_flags, + swp_offsets, + cs_flags | (swp_types << 8), 0, (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz), }; @@ -669,14 +815,13 @@ next_wqe: /* Check whether completion threshold has been reached. */ comp = txq->elts_comp + i + j + k; if (comp >= MLX5_TX_COMP_THRESH) { + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); /* Request completion on last WQE. */ last_wqe->ctrl2 = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ last_wqe->ctrl3 = txq->elts_head; txq->elts_comp = 0; -#ifndef NDEBUG - ++txq->cq_pi; -#endif } else { txq->elts_comp = comp; } @@ -795,8 +940,6 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) /* Start processing. */ mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); if (unlikely(!max_wqe)) return 0; @@ -820,7 +963,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) } max_elts -= segs_n; --pkts_n; - cs_flags = txq_ol_cksum_to_cs(txq, buf); + cs_flags = txq_ol_cksum_to_cs(buf); /* Retrieve packet information. */ length = PKT_LEN(buf); assert(length); @@ -885,14 +1028,13 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (comp >= MLX5_TX_COMP_THRESH) { volatile struct mlx5_wqe *wqe = mpw.wqe; + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); /* Request completion on last WQE. */ wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; -#ifndef NDEBUG - ++txq->cq_pi; -#endif } else { txq->elts_comp = comp; } @@ -1024,8 +1166,6 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, /* Start processing. */ mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); do { struct rte_mbuf *buf = *(pkts++); uintptr_t addr; @@ -1052,7 +1192,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, * iteration. */ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); - cs_flags = txq_ol_cksum_to_cs(txq, buf); + cs_flags = txq_ol_cksum_to_cs(buf); /* Retrieve packet information. */ length = PKT_LEN(buf); /* Start new session if packet differs. */ @@ -1182,14 +1322,13 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, if (comp >= MLX5_TX_COMP_THRESH) { volatile struct mlx5_wqe *wqe = mpw.wqe; + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); /* Request completion on last WQE. */ wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; -#ifndef NDEBUG - ++txq->cq_pi; -#endif } else { txq->elts_comp = comp; } @@ -1303,6 +1442,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, unsigned int mpw_room = 0; unsigned int inl_pad = 0; uint32_t inl_hdr; + uint64_t addr_64; struct mlx5_mpw mpw = { .state = MLX5_MPW_STATE_CLOSED, }; @@ -1312,15 +1452,12 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, /* Start processing. */ mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); if (unlikely(!max_wqe)) return 0; do { struct rte_mbuf *buf = *(pkts++); uintptr_t addr; - unsigned int n; unsigned int do_inline = 0; /* Whether inline is possible. */ uint32_t length; uint8_t cs_flags; @@ -1330,7 +1467,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, /* Make sure there is enough room to store this packet. */ if (max_elts - j == 0) break; - cs_flags = txq_ol_cksum_to_cs(txq, buf); + cs_flags = txq_ol_cksum_to_cs(buf); /* Retrieve packet information. */ length = PKT_LEN(buf); /* Start new session if: @@ -1382,7 +1519,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, (!txq->mpw_hdr_dseg || mpw.total_len >= MLX5_WQE_SIZE); } - if (do_inline) { + if (max_inline && do_inline) { /* Inline packet into WQE. */ unsigned int max; @@ -1440,16 +1577,13 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, ((uintptr_t)mpw.data.raw + inl_pad); (*txq->elts)[elts_head++ & elts_m] = buf; - addr = rte_pktmbuf_mtod(buf, uintptr_t); - for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++) - rte_prefetch2((void *)(addr + - n * RTE_CACHE_LINE_SIZE)); - addr = rte_cpu_to_be_64(addr); + addr_64 = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, + uintptr_t)); *dseg = (rte_v128u32_t) { rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), - addr, - addr >> 32, + addr_64, + addr_64 >> 32, }; mpw.data.raw = (volatile void *)(dseg + 1); mpw.total_len += (inl_pad + sizeof(*dseg)); @@ -1473,15 +1607,14 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) { volatile struct mlx5_wqe *wqe = mpw.wqe; + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); /* Request completion on last WQE. */ wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; txq->mpw_comp = txq->wqe_ci; -#ifndef NDEBUG - ++txq->cq_pi; -#endif } else { txq->elts_comp += j; } @@ -1541,6 +1674,8 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) /** * Translate RX completion flags to packet type. * + * @param[in] rxq + * Pointer to RX queue structure. * @param[in] cqe * Pointer to CQE. * @@ -1550,7 +1685,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) * Packet type for struct rte_mbuf. */ static inline uint32_t -rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe) +rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe) { uint8_t idx; uint8_t pinfo = cqe->pkt_info; @@ -1565,7 +1700,7 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe) * bit[7] = outer_l3_type */ idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10); - return mlx5_ptype_table[idx]; + return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6)); } /** @@ -1577,8 +1712,9 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe) * Pointer to RX queue. * @param cqe * CQE to process. - * @param[out] rss_hash - * Packet RSS Hash result. + * @param[out] mcqe + * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not + * written. * * @return * Packet size in bytes (0 if there is none), -1 in case of completion @@ -1586,7 +1722,7 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe) */ static inline int mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, - uint16_t cqe_cnt, uint32_t *rss_hash) + uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe) { struct rxq_zip *zip = &rxq->zip; uint16_t cqe_n = cqe_cnt + 1; @@ -1600,7 +1736,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info); len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt); - *rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result); + *mcqe = &(*mc)[zip->ai & 7]; if ((++zip->ai & 7) == 0) { /* Invalidate consumed CQEs */ idx = zip->ca; @@ -1665,7 +1801,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, zip->cq_ci = rxq->cq_ci + zip->cqe_cnt; /* Get packet size to return. */ len = rte_be_to_cpu_32((*mc)[0].byte_cnt); - *rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result); + *mcqe = &(*mc)[0]; zip->ai = 1; /* Prefetch all the entries to be invalidated */ idx = zip->ca; @@ -1676,7 +1812,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, } } else { len = rte_be_to_cpu_32(cqe->byte_cnt); - *rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res); } /* Error while receiving packet. */ if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR)) @@ -1688,8 +1823,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, /** * Translate RX completion flags to offload flags. * - * @param[in] rxq - * Pointer to RX queue structure. * @param[in] cqe * Pointer to CQE. * @@ -1697,7 +1830,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, * Offload flags (ol_flags) for struct rte_mbuf. */ static inline uint32_t -rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe) +rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe) { uint32_t ol_flags = 0; uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc); @@ -1709,17 +1842,55 @@ rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe) TRANSPOSE(flags, MLX5_CQE_RX_L4_HDR_VALID, PKT_RX_L4_CKSUM_GOOD); - if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun)) - ol_flags |= - TRANSPOSE(flags, - MLX5_CQE_RX_L3_HDR_VALID, - PKT_RX_IP_CKSUM_GOOD) | - TRANSPOSE(flags, - MLX5_CQE_RX_L4_HDR_VALID, - PKT_RX_L4_CKSUM_GOOD); return ol_flags; } +/** + * Fill in mbuf fields from RX completion flags. + * Note that pkt->ol_flags should be initialized outside of this function. + * + * @param rxq + * Pointer to RX queue. + * @param pkt + * mbuf to fill. + * @param cqe + * CQE to process. + * @param rss_hash_res + * Packet RSS Hash result. + */ +static inline void +rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, + volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res) +{ + /* Update packet information. */ + pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe); + if (rss_hash_res && rxq->rss_hash) { + pkt->hash.rss = rss_hash_res; + pkt->ol_flags |= PKT_RX_RSS_HASH; + } + if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) { + pkt->ol_flags |= PKT_RX_FDIR; + if (cqe->sop_drop_qpn != + rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) { + uint32_t mark = cqe->sop_drop_qpn; + + pkt->ol_flags |= PKT_RX_FDIR_ID; + pkt->hash.fdir.hi = mlx5_flow_mark_get(mark); + } + } + if (rxq->csum) + pkt->ol_flags |= rxq_cq_to_ol_flags(cqe); + if (rxq->vlan_strip && + (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) { + pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info); + } + if (rxq->hw_timestamp) { + pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp); + pkt->ol_flags |= PKT_RX_TIMESTAMP; + } +} + /** * DPDK callback for RX. * @@ -1750,9 +1921,11 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) while (pkts_n) { unsigned int idx = rq_ci & wqe_cnt; - volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx]; + volatile struct mlx5_wqe_data_seg *wqe = + &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; struct rte_mbuf *rep = (*rxq->elts)[idx]; - uint32_t rss_hash_res = 0; + volatile struct mlx5_mini_cqe8 *mcqe = NULL; + uint32_t rss_hash_res; if (pkt) NEXT(seg) = rep; @@ -1782,8 +1955,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) } if (!pkt) { cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; - len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, - &rss_hash_res); + len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe); if (!len) { rte_mbuf_raw_free(rep); break; @@ -1796,40 +1968,12 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) } pkt = seg; assert(len >= (rxq->crc_present << 2)); - /* Update packet information. */ - pkt->packet_type = rxq_cq_to_pkt_type(cqe); pkt->ol_flags = 0; - if (rss_hash_res && rxq->rss_hash) { - pkt->hash.rss = rss_hash_res; - pkt->ol_flags = PKT_RX_RSS_HASH; - } - if (rxq->mark && - MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) { - pkt->ol_flags |= PKT_RX_FDIR; - if (cqe->sop_drop_qpn != - rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) { - uint32_t mark = cqe->sop_drop_qpn; - - pkt->ol_flags |= PKT_RX_FDIR_ID; - pkt->hash.fdir.hi = - mlx5_flow_mark_get(mark); - } - } - if (rxq->csum | rxq->csum_l2tun) - pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe); - if (rxq->vlan_strip && - (cqe->hdr_type_etc & - rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) { - pkt->ol_flags |= PKT_RX_VLAN | - PKT_RX_VLAN_STRIPPED; - pkt->vlan_tci = - rte_be_to_cpu_16(cqe->vlan_info); - } - if (rxq->hw_timestamp) { - pkt->timestamp = - rte_be_to_cpu_64(cqe->timestamp); - pkt->ol_flags |= PKT_RX_TIMESTAMP; - } + /* If compressed, take hash result from mini-CQE. */ + rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ? + cqe->rx_hash_res : + mcqe->rx_hash_result); + rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res); if (rxq->crc_present) len -= ETHER_CRC_LEN; PKT_LEN(pkt) = len; @@ -1845,6 +1989,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) * changes. */ wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t)); + /* If there's only one MR, no need to replace LKey in WQE. */ + if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) + wqe->lkey = mlx5_rx_mb2mr(rxq, rep); if (len > DATA_LEN(seg)) { len -= DATA_LEN(seg); ++NB_SEGS(pkt); @@ -1882,6 +2029,246 @@ skip: return i; } +void +mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque) +{ + struct mlx5_mprq_buf *buf = opaque; + + if (rte_atomic16_read(&buf->refcnt) == 1) { + rte_mempool_put(buf->mp, buf); + } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) { + rte_atomic16_set(&buf->refcnt, 1); + rte_mempool_put(buf->mp, buf); + } +} + +void +mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf) +{ + mlx5_mprq_buf_free_cb(NULL, buf); +} + +static inline void +mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx) +{ + struct mlx5_mprq_buf *rep = rxq->mprq_repl; + volatile struct mlx5_wqe_data_seg *wqe = + &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg; + void *addr; + + assert(rep != NULL); + /* Replace MPRQ buf. */ + (*rxq->mprq_bufs)[rq_idx] = rep; + /* Replace WQE. */ + addr = mlx5_mprq_buf_addr(rep); + wqe->addr = rte_cpu_to_be_64((uintptr_t)addr); + /* If there's only one MR, no need to replace LKey in WQE. */ + if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) + wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr); + /* Stash a mbuf for next replacement. */ + if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep))) + rxq->mprq_repl = rep; + else + rxq->mprq_repl = NULL; +} + +/** + * DPDK callback for RX with Multi-Packet RQ support. + * + * @param dpdk_rxq + * Generic pointer to RX queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received (<= pkts_n). + */ +uint16_t +mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + struct mlx5_rxq_data *rxq = dpdk_rxq; + const unsigned int strd_n = 1 << rxq->strd_num_n; + const unsigned int strd_sz = 1 << rxq->strd_sz_n; + const unsigned int strd_shift = + MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en; + const unsigned int cq_mask = (1 << rxq->cqe_n) - 1; + const unsigned int wq_mask = (1 << rxq->elts_n) - 1; + volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; + unsigned int i = 0; + uint16_t rq_ci = rxq->rq_ci; + uint16_t consumed_strd = rxq->consumed_strd; + struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; + + while (i < pkts_n) { + struct rte_mbuf *pkt; + void *addr; + int ret; + unsigned int len; + uint16_t strd_cnt; + uint16_t strd_idx; + uint32_t offset; + uint32_t byte_cnt; + volatile struct mlx5_mini_cqe8 *mcqe = NULL; + uint32_t rss_hash_res = 0; + + if (consumed_strd == strd_n) { + /* Replace WQE only if the buffer is still in use. */ + if (rte_atomic16_read(&buf->refcnt) > 1) { + mprq_buf_replace(rxq, rq_ci & wq_mask); + /* Release the old buffer. */ + mlx5_mprq_buf_free(buf); + } else if (unlikely(rxq->mprq_repl == NULL)) { + struct mlx5_mprq_buf *rep; + + /* + * Currently, the MPRQ mempool is out of buffer + * and doing memcpy regardless of the size of Rx + * packet. Retry allocation to get back to + * normal. + */ + if (!rte_mempool_get(rxq->mprq_mp, + (void **)&rep)) + rxq->mprq_repl = rep; + } + /* Advance to the next WQE. */ + consumed_strd = 0; + ++rq_ci; + buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; + } + cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; + ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe); + if (!ret) + break; + if (unlikely(ret == -1)) { + /* RX error, packet is likely too large. */ + ++rxq->stats.idropped; + continue; + } + byte_cnt = ret; + strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >> + MLX5_MPRQ_STRIDE_NUM_SHIFT; + assert(strd_cnt); + consumed_strd += strd_cnt; + if (byte_cnt & MLX5_MPRQ_FILLER_MASK) + continue; + if (mcqe == NULL) { + rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res); + strd_idx = rte_be_to_cpu_16(cqe->wqe_counter); + } else { + /* mini-CQE for MPRQ doesn't have hash result. */ + strd_idx = rte_be_to_cpu_16(mcqe->stride_idx); + } + assert(strd_idx < strd_n); + assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask)); + /* + * Currently configured to receive a packet per a stride. But if + * MTU is adjusted through kernel interface, device could + * consume multiple strides without raising an error. In this + * case, the packet should be dropped because it is bigger than + * the max_rx_pkt_len. + */ + if (unlikely(strd_cnt > 1)) { + ++rxq->stats.idropped; + continue; + } + pkt = rte_pktmbuf_alloc(rxq->mp); + if (unlikely(pkt == NULL)) { + ++rxq->stats.rx_nombuf; + break; + } + len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT; + assert((int)len >= (rxq->crc_present << 2)); + if (rxq->crc_present) + len -= ETHER_CRC_LEN; + offset = strd_idx * strd_sz + strd_shift; + addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf), offset); + /* Initialize the offload flag. */ + pkt->ol_flags = 0; + /* + * Memcpy packets to the target mbuf if: + * - The size of packet is smaller than mprq_max_memcpy_len. + * - Out of buffer in the Mempool for Multi-Packet RQ. + */ + if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) { + /* + * When memcpy'ing packet due to out-of-buffer, the + * packet must be smaller than the target mbuf. + */ + if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) { + rte_pktmbuf_free_seg(pkt); + ++rxq->stats.idropped; + continue; + } + rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len); + } else { + rte_iova_t buf_iova; + struct rte_mbuf_ext_shared_info *shinfo; + uint16_t buf_len = strd_cnt * strd_sz; + + /* Increment the refcnt of the whole chunk. */ + rte_atomic16_add_return(&buf->refcnt, 1); + assert((uint16_t)rte_atomic16_read(&buf->refcnt) <= + strd_n + 1); + addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM); + /* + * MLX5 device doesn't use iova but it is necessary in a + * case where the Rx packet is transmitted via a + * different PMD. + */ + buf_iova = rte_mempool_virt2iova(buf) + + RTE_PTR_DIFF(addr, buf); + shinfo = rte_pktmbuf_ext_shinfo_init_helper(addr, + &buf_len, mlx5_mprq_buf_free_cb, buf); + /* + * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when + * attaching the stride to mbuf and more offload flags + * will be added below by calling rxq_cq_to_mbuf(). + * Other fields will be overwritten. + */ + rte_pktmbuf_attach_extbuf(pkt, addr, buf_iova, buf_len, + shinfo); + rte_pktmbuf_reset_headroom(pkt); + assert(pkt->ol_flags == EXT_ATTACHED_MBUF); + /* + * Prevent potential overflow due to MTU change through + * kernel interface. + */ + if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) { + rte_pktmbuf_free_seg(pkt); + ++rxq->stats.idropped; + continue; + } + } + rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res); + PKT_LEN(pkt) = len; + DATA_LEN(pkt) = len; + PORT(pkt) = rxq->port_id; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment bytes counter. */ + rxq->stats.ibytes += PKT_LEN(pkt); +#endif + /* Return packet. */ + *(pkts++) = pkt; + ++i; + } + /* Update the consumer indexes. */ + rxq->consumed_strd = consumed_strd; + rte_cio_wmb(); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + if (rq_ci != rxq->rq_ci) { + rxq->rq_ci = rq_ci; + rte_cio_wmb(); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); + } +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment packets counter. */ + rxq->stats.ipackets += i; +#endif + return i; +} + /** * Dummy DPDK callback for TX. * @@ -1899,11 +2286,10 @@ skip: * Number of packets successfully transmitted (<= pkts_n). */ uint16_t -removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +removed_tx_burst(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_txq; - (void)pkts; - (void)pkts_n; return 0; } @@ -1924,11 +2310,10 @@ removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) * Number of packets successfully received (<= pkts_n). */ uint16_t -removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +removed_rx_burst(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_rxq; - (void)pkts; - (void)pkts_n; return 0; } @@ -1940,58 +2325,49 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) */ uint16_t __attribute__((weak)) -mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_txq; - (void)pkts; - (void)pkts_n; return 0; } uint16_t __attribute__((weak)) -mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_tx_burst_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_txq; - (void)pkts; - (void)pkts_n; return 0; } uint16_t __attribute__((weak)) -mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_rx_burst_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_rxq; - (void)pkts; - (void)pkts_n; return 0; } int __attribute__((weak)) -priv_check_raw_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) +mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev __rte_unused) { - (void)priv; - (void)dev; return -ENOTSUP; } int __attribute__((weak)) -priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) +mlx5_check_vec_tx_support(struct rte_eth_dev *dev __rte_unused) { - (void)priv; - (void)dev; return -ENOTSUP; } int __attribute__((weak)) -rxq_check_vec_support(struct mlx5_rxq_data *rxq) +mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused) { - (void)rxq; return -ENOTSUP; } int __attribute__((weak)) -priv_check_vec_rx_support(struct priv *priv) +mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused) { - (void)priv; return -ENOTSUP; } diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index d7e89055..48ed2b20 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX5_RXTX_H_ @@ -26,13 +26,19 @@ #include #include #include +#include +#include #include "mlx5_utils.h" #include "mlx5.h" +#include "mlx5_mr.h" #include "mlx5_autoconf.h" #include "mlx5_defs.h" #include "mlx5_prm.h" +/* Support tunnel matching. */ +#define MLX5_FLOW_TUNNEL 5 + struct mlx5_rxq_stats { unsigned int idx; /**< Mapping index. */ #ifdef MLX5_PMD_SOFT_COUNTERS @@ -54,17 +60,6 @@ struct mlx5_txq_stats { struct priv; -/* Memory region queue object. */ -struct mlx5_mr { - LIST_ENTRY(mlx5_mr) next; /**< Pointer to the next element. */ - rte_atomic32_t refcnt; /*<< Reference counter. */ - uint32_t lkey; /*<< rte_cpu_to_be_32(mr->lkey) */ - uintptr_t start; /* Start address of MR */ - uintptr_t end; /* End address of MR */ - struct ibv_mr *mr; /*<< Memory Region. */ - struct rte_mempool *mp; /*<< Memory Pool. */ -}; - /* Compressed CQE context. */ struct rxq_zip { uint16_t ai; /* Array index. */ @@ -74,10 +69,19 @@ struct rxq_zip { uint32_t cqe_cnt; /* Number of CQEs. */ }; +/* Multi-Packet RQ buffer header. */ +struct mlx5_mprq_buf { + struct rte_mempool *mp; + rte_atomic16_t refcnt; /* Atomically accessed refcnt. */ + uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */ +} __rte_cache_aligned; + +/* Get pointer to the first stride. */ +#define mlx5_mprq_buf_addr(ptr) ((ptr) + 1) + /* RX queue descriptor. */ struct mlx5_rxq_data { unsigned int csum:1; /* Enable checksum offloading. */ - unsigned int csum_l2tun:1; /* Same for L2 tunnels. */ unsigned int hw_timestamp:1; /* Enable HW timestamp. */ unsigned int vlan_strip:1; /* Enable VLAN stripping. */ unsigned int crc_present:1; /* CRC must be subtracted. */ @@ -86,24 +90,41 @@ struct mlx5_rxq_data { unsigned int elts_n:4; /* Log 2 of Mbufs. */ unsigned int rss_hash:1; /* RSS hash result is enabled. */ unsigned int mark:1; /* Marked flow available on the queue. */ - unsigned int :15; /* Remaining bits. */ + unsigned int strd_num_n:5; /* Log 2 of the number of stride. */ + unsigned int strd_sz_n:4; /* Log 2 of stride size. */ + unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */ + unsigned int :6; /* Remaining bits. */ volatile uint32_t *rq_db; volatile uint32_t *cq_db; uint16_t port_id; uint16_t rq_ci; + uint16_t consumed_strd; /* Number of consumed strides in WQE. */ uint16_t rq_pi; uint16_t cq_ci; - volatile struct mlx5_wqe_data_seg(*wqes)[]; + struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */ + uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */ + volatile void *wqes; volatile struct mlx5_cqe(*cqes)[]; struct rxq_zip zip; /* Compressed context. */ - struct rte_mbuf *(*elts)[]; + RTE_STD_C11 + union { + struct rte_mbuf *(*elts)[]; + struct mlx5_mprq_buf *(*mprq_bufs)[]; + }; struct rte_mempool *mp; + struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */ + struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */ struct mlx5_rxq_stats stats; uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */ struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */ void *cq_uar; /* CQ user access region. */ uint32_t cqn; /* CQ number. */ uint8_t cq_arm_sn; /* CQ arm seq number. */ +#ifndef RTE_ARCH_64 + rte_spinlock_t *uar_lock_cq; + /* CQ (UAR) access lock required for 32bit implementations */ +#endif + uint32_t tunnel; /* Tunnel information. */ } __rte_cache_aligned; /* Verbs Rx queue elements. */ @@ -114,18 +135,20 @@ struct mlx5_rxq_ibv { struct ibv_cq *cq; /* Completion Queue. */ struct ibv_wq *wq; /* Work Queue. */ struct ibv_comp_channel *channel; - struct mlx5_mr *mr; /* Memory Region (for mp). */ }; /* RX queue control descriptor. */ struct mlx5_rxq_ctrl { LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ - struct priv *priv; /* Back pointer to private data. */ struct mlx5_rxq_ibv *ibv; /* Verbs elements. */ + struct priv *priv; /* Back pointer to private data. */ struct mlx5_rxq_data rxq; /* Data path structure. */ unsigned int socket; /* CPU socket ID for allocations. */ unsigned int irq:1; /* Whether IRQ is enabled. */ + uint16_t idx; /* Queue index. */ + uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */ + uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */ }; /* Indirection table. */ @@ -133,7 +156,7 @@ struct mlx5_ind_table_ibv { LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */ - uint16_t queues_n; /**< Number of queues in the list. */ + uint32_t queues_n; /**< Number of queues in the list. */ uint16_t queues[]; /**< Queue list. */ }; @@ -144,7 +167,7 @@ struct mlx5_hrxq { struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */ struct ibv_qp *qp; /* Verbs queue pair. */ uint64_t hash_fields; /* Verbs Hash fields. */ - uint8_t rss_key_len; /* Hash key length in bytes. */ + uint32_t rss_key_len; /* Hash key length in bytes. */ uint8_t rss_key[]; /* Hash key. */ }; @@ -167,26 +190,31 @@ struct mlx5_txq_data { uint16_t tso_en:1; /* When set hardware TSO is enabled. */ uint16_t tunnel_en:1; /* When set TX offload for tunneled packets are supported. */ + uint16_t swp_en:1; /* Whether SW parser is enabled. */ uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */ uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */ uint16_t inline_max_packet_sz; /* Max packet size for inlining. */ - uint16_t mr_cache_idx; /* Index of last hit entry. */ uint32_t qp_num_8s; /* QP number shifted by 8. */ uint64_t offloads; /* Offloads for Tx Queue. */ + struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */ volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */ volatile void *wqes; /* Work queue (use volatile to write into). */ volatile uint32_t *qp_db; /* Work queue doorbell. */ volatile uint32_t *cq_db; /* Completion queue doorbell. */ volatile void *bf_reg; /* Blueflame register remapped. */ - struct mlx5_mr *mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MR translation table. */ struct rte_mbuf *(*elts)[]; /* TX elements. */ struct mlx5_txq_stats stats; /* TX queue counters. */ +#ifndef RTE_ARCH_64 + rte_spinlock_t *uar_lock; + /* UAR access lock required for 32bit implementations */ +#endif } __rte_cache_aligned; /* Verbs Rx queue elements. */ struct mlx5_txq_ibv { LIST_ENTRY(mlx5_txq_ibv) next; /* Pointer to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ + struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */ struct ibv_cq *cq; /* Completion Queue. */ struct ibv_qp *qp; /* Queue Pair. */ }; @@ -195,112 +223,203 @@ struct mlx5_txq_ibv { struct mlx5_txq_ctrl { LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ - struct priv *priv; /* Back pointer to private data. */ unsigned int socket; /* CPU socket ID for allocations. */ unsigned int max_inline_data; /* Max inline data. */ unsigned int max_tso_header; /* Max TSO header size. */ struct mlx5_txq_ibv *ibv; /* Verbs queue object. */ + struct priv *priv; /* Back pointer to private data. */ struct mlx5_txq_data txq; /* Data path structure. */ off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */ volatile void *bf_reg_orig; /* Blueflame register from verbs. */ + uint16_t idx; /* Queue index. */ }; /* mlx5_rxq.c */ extern uint8_t rss_hash_default_key[]; -extern const size_t rss_hash_default_key_len; - -void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *); -int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, - const struct rte_eth_rxconf *, struct rte_mempool *); -void mlx5_rx_queue_release(void *); -int priv_rx_intr_vec_enable(struct priv *priv); -void priv_rx_intr_vec_disable(struct priv *priv); + +int mlx5_check_mprq_support(struct rte_eth_dev *dev); +int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq); +int mlx5_mprq_enabled(struct rte_eth_dev *dev); +int mlx5_mprq_free_mp(struct rte_eth_dev *dev); +int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev); +void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl); +int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp); +void mlx5_rx_queue_release(void *dpdk_rxq); +int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev); +void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev); int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id); int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id); -struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_new(struct priv *, uint16_t); -struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_get(struct priv *, uint16_t); -int mlx5_priv_rxq_ibv_release(struct priv *, struct mlx5_rxq_ibv *); -int mlx5_priv_rxq_ibv_releasable(struct priv *, struct mlx5_rxq_ibv *); -int mlx5_priv_rxq_ibv_verify(struct priv *); -struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *, uint16_t, - uint16_t, unsigned int, - const struct rte_eth_rxconf *, - struct rte_mempool *); -struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *, uint16_t); -int mlx5_priv_rxq_release(struct priv *, uint16_t); -int mlx5_priv_rxq_releasable(struct priv *, uint16_t); -int mlx5_priv_rxq_verify(struct priv *); -int rxq_alloc_elts(struct mlx5_rxq_ctrl *); -struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_new(struct priv *, - uint16_t [], - uint16_t); -struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_get(struct priv *, - uint16_t [], - uint16_t); -int mlx5_priv_ind_table_ibv_release(struct priv *, struct mlx5_ind_table_ibv *); -int mlx5_priv_ind_table_ibv_verify(struct priv *); -struct mlx5_hrxq *mlx5_priv_hrxq_new(struct priv *, uint8_t *, uint8_t, - uint64_t, uint16_t [], uint16_t); -struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *, uint8_t *, uint8_t, - uint64_t, uint16_t [], uint16_t); -int mlx5_priv_hrxq_release(struct priv *, struct mlx5_hrxq *); -int mlx5_priv_hrxq_ibv_verify(struct priv *); -uint64_t mlx5_priv_get_rx_port_offloads(struct priv *); -uint64_t mlx5_priv_get_rx_queue_offloads(struct priv *); +struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx); +struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv); +int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv); +struct mlx5_rxq_ibv *mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev); +void mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev); +int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev); +struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, + uint16_t desc, unsigned int socket, + const struct rte_eth_rxconf *conf, + struct rte_mempool *mp); +struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_rxq_verify(struct rte_eth_dev *dev); +int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl); +int rxq_alloc_mprq_buf(struct mlx5_rxq_ctrl *rxq_ctrl); +struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, + const uint16_t *queues, + uint32_t queues_n); +struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, + const uint16_t *queues, + uint32_t queues_n); +int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, + struct mlx5_ind_table_ibv *ind_tbl); +int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev); +struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev); +void mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev); +struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev, + const uint8_t *rss_key, uint32_t rss_key_len, + uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n, + int tunnel __rte_unused); +struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev, + const uint8_t *rss_key, uint32_t rss_key_len, + uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n); +int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq); +int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev); +struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev); +void mlx5_hrxq_drop_release(struct rte_eth_dev *dev); +uint64_t mlx5_get_rx_port_offloads(void); +uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev); /* mlx5_txq.c */ -int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, - const struct rte_eth_txconf *); -void mlx5_tx_queue_release(void *); -int priv_tx_uar_remap(struct priv *priv, int fd); -struct mlx5_txq_ibv *mlx5_priv_txq_ibv_new(struct priv *, uint16_t); -struct mlx5_txq_ibv *mlx5_priv_txq_ibv_get(struct priv *, uint16_t); -int mlx5_priv_txq_ibv_release(struct priv *, struct mlx5_txq_ibv *); -int mlx5_priv_txq_ibv_releasable(struct priv *, struct mlx5_txq_ibv *); -int mlx5_priv_txq_ibv_verify(struct priv *); -struct mlx5_txq_ctrl *mlx5_priv_txq_new(struct priv *, uint16_t, - uint16_t, unsigned int, - const struct rte_eth_txconf *); -struct mlx5_txq_ctrl *mlx5_priv_txq_get(struct priv *, uint16_t); -int mlx5_priv_txq_release(struct priv *, uint16_t); -int mlx5_priv_txq_releasable(struct priv *, uint16_t); -int mlx5_priv_txq_verify(struct priv *); -void txq_alloc_elts(struct mlx5_txq_ctrl *); -uint64_t mlx5_priv_get_tx_port_offloads(struct priv *); +int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf); +void mlx5_tx_queue_release(void *dpdk_txq); +int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd); +struct mlx5_txq_ibv *mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx); +struct mlx5_txq_ibv *mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv); +int mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv); +int mlx5_txq_ibv_verify(struct rte_eth_dev *dev); +struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, + uint16_t desc, unsigned int socket, + const struct rte_eth_txconf *conf); +struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_verify(struct rte_eth_dev *dev); +void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl); +uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev); /* mlx5_rxtx.c */ extern uint32_t mlx5_ptype_table[]; +extern uint8_t mlx5_cksum_table[]; +extern uint8_t mlx5_swp_types_table[]; void mlx5_set_ptype_table(void); -uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_tx_burst_empw(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t); -uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t); -uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t); -int mlx5_rx_descriptor_status(void *, uint16_t); -int mlx5_tx_descriptor_status(void *, uint16_t); +void mlx5_set_cksum_table(void); +void mlx5_set_swp_types_table(void); +uint16_t mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); +void mlx5_mprq_buf_free_cb(void *addr, void *opaque); +void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf); +uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t pkts_n); +int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset); +int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset); /* Vectorized version of mlx5_rxtx.c */ -int priv_check_raw_vec_tx_support(struct priv *, struct rte_eth_dev *); -int priv_check_vec_tx_support(struct priv *, struct rte_eth_dev *); -int rxq_check_vec_support(struct mlx5_rxq_data *); -int priv_check_vec_rx_support(struct priv *); -uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_tx_burst_vec(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_rx_burst_vec(void *, struct rte_mbuf **, uint16_t); +int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev); +int mlx5_check_vec_tx_support(struct rte_eth_dev *dev); +int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data); +int mlx5_check_vec_rx_support(struct rte_eth_dev *dev); +uint16_t mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); /* mlx5_mr.c */ -void mlx5_mp2mr_iter(struct rte_mempool *, void *); -struct mlx5_mr *priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *, - struct rte_mempool *, unsigned int); -struct mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *, struct rte_mempool *, - unsigned int); +void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl); +uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr); +uint32_t mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr); + +/** + * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and + * 64bit architectures. + * + * @param val + * value to write in CPU endian format. + * @param addr + * Address to write to. + * @param lock + * Address of the lock to use for that UAR access. + */ +static __rte_always_inline void +__mlx5_uar_write64_relaxed(uint64_t val, volatile void *addr, + rte_spinlock_t *lock __rte_unused) +{ +#ifdef RTE_ARCH_64 + rte_write64_relaxed(val, addr); +#else /* !RTE_ARCH_64 */ + rte_spinlock_lock(lock); + rte_write32_relaxed(val, addr); + rte_io_wmb(); + rte_write32_relaxed(val >> 32, + (volatile void *)((volatile char *)addr + 4)); + rte_spinlock_unlock(lock); +#endif +} + +/** + * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and + * 64bit architectures while guaranteeing the order of execution with the + * code being executed. + * + * @param val + * value to write in CPU endian format. + * @param addr + * Address to write to. + * @param lock + * Address of the lock to use for that UAR access. + */ +static __rte_always_inline void +__mlx5_uar_write64(uint64_t val, volatile void *addr, rte_spinlock_t *lock) +{ + rte_io_wmb(); + __mlx5_uar_write64_relaxed(val, addr, lock); +} + +/* Assist macros, used instead of directly calling the functions they wrap. */ +#ifdef RTE_ARCH_64 +#define mlx5_uar_write64_relaxed(val, dst, lock) \ + __mlx5_uar_write64_relaxed(val, dst, NULL) +#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL) +#else +#define mlx5_uar_write64_relaxed(val, dst, lock) \ + __mlx5_uar_write64_relaxed(val, dst, lock) +#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock) +#endif #ifndef NDEBUG /** @@ -316,7 +435,7 @@ static inline int check_cqe_seen(volatile struct mlx5_cqe *cqe) { static const uint8_t magic[] = "seen"; - volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0; + volatile uint8_t (*buf)[sizeof(cqe->rsvd1)] = &cqe->rsvd1; int ret = 1; unsigned int i; @@ -363,9 +482,10 @@ check_cqe(volatile struct mlx5_cqe *cqe, (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR)) return 0; if (!check_cqe_seen(cqe)) { - ERROR("unexpected CQE error %u (0x%02x)" - " syndrome 0x%02x", - op_code, op_code, syndrome); + DRV_LOG(ERR, + "unexpected CQE error %u (0x%02x) syndrome" + " 0x%02x", + op_code, op_code, syndrome); rte_hexdump(stderr, "MLX5 Error CQE:", (const void *)((uintptr_t)err_cqe), sizeof(*err_cqe)); @@ -374,8 +494,8 @@ check_cqe(volatile struct mlx5_cqe *cqe, } else if ((op_code != MLX5_CQE_RESP_SEND) && (op_code != MLX5_CQE_REQ)) { if (!check_cqe_seen(cqe)) { - ERROR("unexpected CQE opcode %u (0x%02x)", - op_code, op_code); + DRV_LOG(ERR, "unexpected CQE opcode %u (0x%02x)", + op_code, op_code); rte_hexdump(stderr, "MLX5 CQE:", (const void *)((uintptr_t)cqe), sizeof(*cqe)); @@ -435,7 +555,7 @@ mlx5_tx_complete(struct mlx5_txq_data *txq) if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) || (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) { if (!check_cqe_seen(cqe)) { - ERROR("unexpected error CQE, TX stopped"); + DRV_LOG(ERR, "unexpected error CQE, Tx stopped"); rte_hexdump(stderr, "MLX5 TXQ:", (const void *)((uintptr_t)txq->wqes), ((1 << txq->wqe_n) * @@ -487,77 +607,65 @@ mlx5_tx_complete(struct mlx5_txq_data *txq) } /** - * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which - * the cloned mbuf is allocated is returned instead. + * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx + * as mempool is pre-configured and static. * - * @param buf - * Pointer to mbuf. + * @param rxq + * Pointer to Rx queue structure. + * @param addr + * Address to search. * * @return - * Memory pool where data is located for given mbuf. + * Searched LKey on success, UINT32_MAX on no match. */ -static struct rte_mempool * -mlx5_tx_mb2mp(struct rte_mbuf *buf) +static __rte_always_inline uint32_t +mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr) { - if (unlikely(RTE_MBUF_INDIRECT(buf))) - return rte_mbuf_from_indirect(buf)->pool; - return buf->pool; + struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; + uint32_t lkey; + + /* Linear search on MR cache array. */ + lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru, + MLX5_MR_CACHE_N, addr); + if (likely(lkey != UINT32_MAX)) + return lkey; + /* Take slower bottom-half (Binary Search) on miss. */ + return mlx5_rx_addr2mr_bh(rxq, addr); } +#define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr)) + /** - * Get Memory Region (MR) <-> rte_mbuf association from txq->mp2mr[]. - * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full, - * remove an entry first. + * Query LKey from a packet buffer for Tx. If not found, add the mempool. * * @param txq - * Pointer to TX queue structure. - * @param[in] mp - * Memory Pool for which a Memory Region lkey must be returned. + * Pointer to Tx queue structure. + * @param addr + * Address to search. * * @return - * mr->lkey on success, (uint32_t)-1 on failure. + * Searched LKey on success, UINT32_MAX on no match. */ static __rte_always_inline uint32_t -mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb) +mlx5_tx_addr2mr(struct mlx5_txq_data *txq, uintptr_t addr) { - uint16_t i = txq->mr_cache_idx; - uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t); - struct mlx5_mr *mr; - - assert(i < RTE_DIM(txq->mp2mr)); - if (likely(txq->mp2mr[i]->start <= addr && txq->mp2mr[i]->end > addr)) - return txq->mp2mr[i]->lkey; - for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { - if (unlikely(txq->mp2mr[i] == NULL || - txq->mp2mr[i]->mr == NULL)) { - /* Unknown MP, add a new MR for it. */ - break; - } - if (txq->mp2mr[i]->start <= addr && - txq->mp2mr[i]->end > addr) { - assert(txq->mp2mr[i]->lkey != (uint32_t)-1); - txq->mr_cache_idx = i; - return txq->mp2mr[i]->lkey; - } - } - mr = mlx5_txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i); - /* - * Request the reference to use in this queue, the original one is - * kept by the control plane. - */ - if (mr) { - rte_atomic32_inc(&mr->refcnt); - txq->mr_cache_idx = i >= RTE_DIM(txq->mp2mr) ? i - 1 : i; - return mr->lkey; - } else { - struct rte_mempool *mp = mlx5_tx_mb2mp(mb); - - WARN("Failed to register mempool 0x%p(%s)", - (void *)mp, mp->name); - } - return (uint32_t)-1; + struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + uint32_t lkey; + + /* Check generation bit to see if there's any change on existing MRs. */ + if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen)) + mlx5_mr_flush_local_cache(mr_ctrl); + /* Linear search on MR cache array. */ + lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru, + MLX5_MR_CACHE_N, addr); + if (likely(lkey != UINT32_MAX)) + return lkey; + /* Take slower bottom-half (binary search) on miss. */ + return mlx5_tx_addr2mr_bh(txq, addr); } +#define mlx5_tx_mb2mr(rxq, mb) mlx5_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr)) + /** * Ring TX queue doorbell and flush the update if requested. * @@ -579,7 +687,7 @@ mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe, *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci); /* Ensure ordering between DB record and BF copy. */ rte_wmb(); - *dst = *src; + mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock); if (cond) rte_wmb(); } @@ -599,38 +707,100 @@ mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe) } /** - * Convert the Checksum offloads to Verbs. + * Convert mbuf to Verb SWP. * * @param txq_data * Pointer to the Tx queue. * @param buf * Pointer to the mbuf. + * @param tso + * TSO offloads enabled. + * @param vlan + * VLAN offloads enabled + * @param offsets + * Pointer to the SWP header offsets. + * @param swp_types + * Pointer to the SWP header types. + */ +static __rte_always_inline void +txq_mbuf_to_swp(struct mlx5_txq_data *txq, struct rte_mbuf *buf, + uint8_t *offsets, uint8_t *swp_types) +{ + const uint64_t vlan = buf->ol_flags & PKT_TX_VLAN_PKT; + const uint64_t tunnel = buf->ol_flags & PKT_TX_TUNNEL_MASK; + const uint64_t tso = buf->ol_flags & PKT_TX_TCP_SEG; + const uint64_t csum_flags = buf->ol_flags & PKT_TX_L4_MASK; + const uint64_t inner_ip = + buf->ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6); + const uint64_t ol_flags_mask = PKT_TX_L4_MASK | PKT_TX_IPV6 | + PKT_TX_OUTER_IPV6; + uint16_t idx; + uint16_t off; + + if (likely(!txq->swp_en || (tunnel != PKT_TX_TUNNEL_UDP && + tunnel != PKT_TX_TUNNEL_IP))) + return; + /* + * The index should have: + * bit[0:1] = PKT_TX_L4_MASK + * bit[4] = PKT_TX_IPV6 + * bit[8] = PKT_TX_OUTER_IPV6 + * bit[9] = PKT_TX_OUTER_UDP + */ + idx = (buf->ol_flags & ol_flags_mask) >> 52; + if (tunnel == PKT_TX_TUNNEL_UDP) + idx |= 1 << 9; + *swp_types = mlx5_swp_types_table[idx]; + /* + * Set offsets for SW parser. Since ConnectX-5, SW parser just + * complements HW parser. SW parser starts to engage only if HW parser + * can't reach a header. For the older devices, HW parser will not kick + * in if any of SWP offsets is set. Therefore, all of the L3 offsets + * should be set regardless of HW offload. + */ + off = buf->outer_l2_len + (vlan ? sizeof(struct vlan_hdr) : 0); + offsets[1] = off >> 1; /* Outer L3 offset. */ + off += buf->outer_l3_len; + if (tunnel == PKT_TX_TUNNEL_UDP) + offsets[0] = off >> 1; /* Outer L4 offset. */ + if (inner_ip) { + off += buf->l2_len; + offsets[3] = off >> 1; /* Inner L3 offset. */ + if (csum_flags == PKT_TX_TCP_CKSUM || tso || + csum_flags == PKT_TX_UDP_CKSUM) { + off += buf->l3_len; + offsets[2] = off >> 1; /* Inner L4 offset. */ + } + } +} + +/** + * Convert the Checksum offloads to Verbs. + * + * @param buf + * Pointer to the mbuf. * * @return - * the converted cs_flags. + * Converted checksum flags. */ static __rte_always_inline uint8_t -txq_ol_cksum_to_cs(struct mlx5_txq_data *txq_data, struct rte_mbuf *buf) +txq_ol_cksum_to_cs(struct rte_mbuf *buf) { - uint8_t cs_flags = 0; - - /* Should we enable HW CKSUM offload */ - if (buf->ol_flags & - (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | - PKT_TX_OUTER_IP_CKSUM)) { - if (txq_data->tunnel_en && - (buf->ol_flags & - (PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN))) { - cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM | - MLX5_ETH_WQE_L4_INNER_CSUM; - if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM) - cs_flags |= MLX5_ETH_WQE_L3_CSUM; - } else { - cs_flags = MLX5_ETH_WQE_L3_CSUM | - MLX5_ETH_WQE_L4_CSUM; - } - } - return cs_flags; + uint32_t idx; + uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK); + const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK | + PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM; + + /* + * The index should have: + * bit[0] = PKT_TX_TCP_SEG + * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM + * bit[4] = PKT_TX_IP_CKSUM + * bit[8] = PKT_TX_OUTER_IP_CKSUM + * bit[9] = tunnel + */ + idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9); + return mlx5_cksum_table[idx]; } /** diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c index b66c2916..0a4aed8f 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -42,8 +42,6 @@ /** * Count the number of packets having same ol_flags and calculate cs_flags. * - * @param txq - * Pointer to TX queue structure. * @param pkts * Pointer to array of packets. * @param pkts_n @@ -55,8 +53,7 @@ * Number of packets having same ol_flags. */ static inline unsigned int -txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, - uint16_t pkts_n, uint8_t *cs_flags) +txq_calc_offload(struct rte_mbuf **pkts, uint16_t pkts_n, uint8_t *cs_flags) { unsigned int pos; const uint64_t ol_mask = @@ -70,7 +67,7 @@ txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, for (pos = 1; pos < pkts_n; ++pos) if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask) break; - *cs_flags = txq_ol_cksum_to_cs(txq, pkts[0]); + *cs_flags = txq_ol_cksum_to_cs(pkts[0]); return pos; } @@ -141,7 +138,7 @@ mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) n = txq_count_contig_single_seg(&pkts[nb_tx], n); if (txq->offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP) - n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags); + n = txq_calc_offload(&pkts[nb_tx], n, &cs_flags); ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags); nb_tx += ret; if (!ret) @@ -223,17 +220,14 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) /** * Check Tx queue flags are set for raw vectorized Tx. * - * @param priv - * Pointer to private structure. * @param dev - * Pointer to rte_eth_dev structure. + * Pointer to Ethernet device. * * @return * 1 if supported, negative errno value if not. */ int __attribute__((cold)) -priv_check_raw_vec_tx_support(__rte_unused struct priv *priv, - struct rte_eth_dev *dev) +mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev) { uint64_t offloads = dev->data->dev_conf.txmode.offloads; @@ -246,17 +240,16 @@ priv_check_raw_vec_tx_support(__rte_unused struct priv *priv, /** * Check a device can support vectorized TX. * - * @param priv - * Pointer to private structure. * @param dev - * Pointer to rte_eth_dev structure. + * Pointer to Ethernet device. * * @return * 1 if supported, negative errno value if not. */ int __attribute__((cold)) -priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) +mlx5_check_vec_tx_support(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; uint64_t offloads = dev->data->dev_conf.txmode.offloads; if (!priv->config.tx_vec_en || @@ -277,11 +270,13 @@ priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) * 1 if supported, negative errno value if not. */ int __attribute__((cold)) -rxq_check_vec_support(struct mlx5_rxq_data *rxq) +mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq) { struct mlx5_rxq_ctrl *ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); + if (mlx5_mprq_enabled(ETH_DEV(ctrl->priv))) + return -ENOTSUP; if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0) return -ENOTSUP; return 1; @@ -290,26 +285,29 @@ rxq_check_vec_support(struct mlx5_rxq_data *rxq) /** * Check a device can support vectorized RX. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * 1 if supported, negative errno value if not. */ int __attribute__((cold)) -priv_check_vec_rx_support(struct priv *priv) +mlx5_check_vec_rx_support(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; uint16_t i; if (!priv->config.rx_vec_en) return -ENOTSUP; + if (mlx5_mprq_enabled(dev)) + return -ENOTSUP; /* All the configured queues should support. */ for (i = 0; i < priv->rxqs_n; ++i) { struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; if (!rxq) continue; - if (rxq_check_vec_support(rxq) < 0) + if (mlx5_rxq_check_vec_support(rxq) < 0) break; } if (i != priv->rxqs_n) diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h index 44856bbf..fb884f92 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX5_RXTX_VEC_H_ @@ -87,21 +87,26 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n) const uint16_t q_mask = q_n - 1; uint16_t elts_idx = rxq->rq_ci & q_mask; struct rte_mbuf **elts = &(*rxq->elts)[elts_idx]; - volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx]; + volatile struct mlx5_wqe_data_seg *wq = + &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx]; unsigned int i; - assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH); + assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n)); assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi))); - assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP); + assert(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > MLX5_VPMD_DESCS_PER_LOOP); /* Not to cross queue end. */ n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx); if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) { rxq->stats.rx_nombuf += n; return; } - for (i = 0; i < n; ++i) + for (i = 0; i < n; ++i) { wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr + RTE_PKTMBUF_HEADROOM); + /* If there's only one MR, no need to replace LKey in WQE. */ + if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) + wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]); + } rxq->rq_ci += n; /* Prevent overflowing into consumed mbufs. */ elts_idx = rxq->rq_ci & q_mask; diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h index bbe1818e..b37b7381 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX5_RXTX_VEC_NEON_H_ @@ -107,8 +107,6 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, assert(elts_n > pkts_n); mlx5_tx_complete(txq); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); if (unlikely(!pkts_n)) return 0; for (n = 0; n < pkts_n; ++n) { @@ -142,7 +140,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, break; wqe = &((volatile struct mlx5_wqe64 *) txq->wqes)[wqe_ci & wq_mask].hdr; - cs_flags = txq_ol_cksum_to_cs(txq, buf); + cs_flags = txq_ol_cksum_to_cs(buf); /* Title WQEBB pointer. */ t_wqe = (uint8x16_t *)wqe; dseg = (uint8_t *)(wqe + 1); @@ -167,8 +165,8 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, vst1q_u8((void *)t_wqe, ctrl); /* Fill ESEG in the header. */ vst1q_u16((void *)(t_wqe + 1), - (uint16x8_t) { 0, 0, cs_flags, rte_cpu_to_be_16(len), - 0, 0, 0, 0 }); + ((uint16x8_t) { 0, 0, cs_flags, rte_cpu_to_be_16(len), + 0, 0, 0, 0 })); txq->wqe_ci = wqe_ci; } if (!n) @@ -176,12 +174,11 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, txq->elts_comp += (uint16_t)(elts_head - txq->elts_head); txq->elts_head = elts_head; if (txq->elts_comp >= MLX5_TX_COMP_THRESH) { + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); wqe->ctrl[2] = rte_cpu_to_be_32(8); wqe->ctrl[3] = txq->elts_head; txq->elts_comp = 0; -#ifndef NDEBUG - ++txq->cq_pi; -#endif } #ifdef MLX5_PMD_SOFT_COUNTERS txq->stats.opackets += n; @@ -245,8 +242,6 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n, assert(elts_n > pkts_n); mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts); if (unlikely(!pkts_n)) @@ -282,11 +277,10 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n, if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) { txq->elts_comp += pkts_n; } else { + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); /* Request a completion. */ txq->elts_comp = 0; -#ifndef NDEBUG - ++txq->cq_pi; -#endif comp_req = 8; } /* Fill CTRL in the header. */ @@ -300,10 +294,10 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n, vst1q_u8((void *)t_wqe, ctrl); /* Fill ESEG in the header. */ vst1q_u8((void *)(t_wqe + 1), - (uint8x16_t) { 0, 0, 0, 0, - cs_flags, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0 }); + ((uint8x16_t) { 0, 0, 0, 0, + cs_flags, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0 })); #ifdef MLX5_PMD_SOFT_COUNTERS txq->stats.opackets += pkts_n; #endif @@ -551,6 +545,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, const uint64x1_t mbuf_init = vld1_u64(&rxq->mbuf_initializer); const uint64x1_t r32_mask = vcreate_u64(0xffffffff); uint64x2_t rearm0, rearm1, rearm2, rearm3; + uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3; if (rxq->mark) { const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT); @@ -583,14 +578,18 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, ptype = vshrn_n_u32(ptype_info, 10); /* Errored packets will have RTE_PTYPE_ALL_MASK. */ ptype = vorr_u16(ptype, op_err); - pkts[0]->packet_type = - mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 6)]; - pkts[1]->packet_type = - mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 4)]; - pkts[2]->packet_type = - mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 2)]; - pkts[3]->packet_type = - mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 0)]; + pt_idx0 = vget_lane_u8(vreinterpret_u8_u16(ptype), 6); + pt_idx1 = vget_lane_u8(vreinterpret_u8_u16(ptype), 4); + pt_idx2 = vget_lane_u8(vreinterpret_u8_u16(ptype), 2); + pt_idx3 = vget_lane_u8(vreinterpret_u8_u16(ptype), 0); + pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] | + !!(pt_idx0 & (1 << 6)) * rxq->tunnel; + pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] | + !!(pt_idx1 & (1 << 6)) * rxq->tunnel; + pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] | + !!(pt_idx2 & (1 << 6)) * rxq->tunnel; + pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] | + !!(pt_idx3 & (1 << 6)) * rxq->tunnel; /* Fill flags for checksum and VLAN. */ pinfo = vandq_u32(ptype_info, ptype_ol_mask); pinfo = vreinterpretq_u32_u8( @@ -734,7 +733,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished). */ repl_n = q_n - (rxq->rq_ci - rxq->rq_pi); - if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH) + if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n)) mlx5_rx_replenish_bulk_mbuf(rxq, repl_n); /* See if there're unreturned mbufs from compressed CQE. */ rcvd_pkt = rxq->cq_ci - rxq->rq_pi; diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h index c088bcb5..54b3783c 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX5_RXTX_VEC_SSE_H_ @@ -107,8 +107,6 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, assert(elts_n > pkts_n); mlx5_tx_complete(txq); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); if (unlikely(!pkts_n)) return 0; for (n = 0; n < pkts_n; ++n) { @@ -144,7 +142,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, } wqe = &((volatile struct mlx5_wqe64 *) txq->wqes)[wqe_ci & wq_mask].hdr; - cs_flags = txq_ol_cksum_to_cs(txq, buf); + cs_flags = txq_ol_cksum_to_cs(buf); /* Title WQEBB pointer. */ t_wqe = (__m128i *)wqe; dseg = (__m128i *)(wqe + 1); @@ -177,12 +175,11 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, txq->elts_comp += (uint16_t)(elts_head - txq->elts_head); txq->elts_head = elts_head; if (txq->elts_comp >= MLX5_TX_COMP_THRESH) { + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); wqe->ctrl[2] = rte_cpu_to_be_32(8); wqe->ctrl[3] = txq->elts_head; txq->elts_comp = 0; -#ifndef NDEBUG - ++txq->cq_pi; -#endif } #ifdef MLX5_PMD_SOFT_COUNTERS txq->stats.opackets += n; @@ -244,8 +241,6 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n, assert(elts_n > pkts_n); mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts); assert(pkts_n <= MLX5_DSEG_MAX - nb_dword_in_hdr); @@ -283,11 +278,10 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n, if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) { txq->elts_comp += pkts_n; } else { + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); /* Request a completion. */ txq->elts_comp = 0; -#ifndef NDEBUG - ++txq->cq_pi; -#endif comp_req = 8; } /* Fill CTRL in the header. */ @@ -542,6 +536,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4], const __m128i mbuf_init = _mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer); __m128i rearm0, rearm1, rearm2, rearm3; + uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3; /* Extract pkt_info field. */ pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]); @@ -595,10 +590,18 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4], /* Errored packets will have RTE_PTYPE_ALL_MASK. */ op_err = _mm_srli_epi16(op_err, 8); ptype = _mm_or_si128(ptype, op_err); - pkts[0]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 0)]; - pkts[1]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 2)]; - pkts[2]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 4)]; - pkts[3]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 6)]; + pt_idx0 = _mm_extract_epi8(ptype, 0); + pt_idx1 = _mm_extract_epi8(ptype, 2); + pt_idx2 = _mm_extract_epi8(ptype, 4); + pt_idx3 = _mm_extract_epi8(ptype, 6); + pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] | + !!(pt_idx0 & (1 << 6)) * rxq->tunnel; + pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] | + !!(pt_idx1 & (1 << 6)) * rxq->tunnel; + pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] | + !!(pt_idx2 & (1 << 6)) * rxq->tunnel; + pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] | + !!(pt_idx3 & (1 << 6)) * rxq->tunnel; /* Fill flags for checksum and VLAN. */ pinfo = _mm_and_si128(pinfo, ptype_ol_mask); pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo); @@ -715,7 +718,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished). */ repl_n = q_n - (rxq->rq_ci - rxq->rq_pi); - if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH) + if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n)) mlx5_rx_replenish_bulk_mbuf(rxq, repl_n); /* See if there're unreturned mbufs from compressed CQE. */ rcvd_pkt = rxq->cq_ci - rxq->rq_pi; diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c index 61c1a4a5..a3a52291 100644 --- a/drivers/net/mlx5/mlx5_socket.c +++ b/drivers/net/mlx5/mlx5_socket.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2016 6WIND S.A. + * Copyright 2016 Mellanox Technologies, Ltd */ #define _GNU_SOURCE @@ -18,92 +19,105 @@ /** * Initialise the socket to communicate with the secondary process * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_socket_init(struct priv *priv) +mlx5_socket_init(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct sockaddr_un sun = { .sun_family = AF_UNIX, }; int ret; int flags; - struct stat file_stat; + /* + * Close the last socket that was used to communicate + * with the secondary process + */ + if (priv->primary_socket) + mlx5_socket_uninit(dev); /* * Initialise the socket to communicate with the secondary * process. */ ret = socket(AF_UNIX, SOCK_STREAM, 0); if (ret < 0) { - WARN("secondary process not supported: %s", strerror(errno)); - return ret; + rte_errno = errno; + DRV_LOG(WARNING, "port %u secondary process not supported: %s", + dev->data->port_id, strerror(errno)); + goto error; } priv->primary_socket = ret; flags = fcntl(priv->primary_socket, F_GETFL, 0); - if (flags == -1) - goto out; + if (flags == -1) { + rte_errno = errno; + goto error; + } ret = fcntl(priv->primary_socket, F_SETFL, flags | O_NONBLOCK); - if (ret < 0) - goto out; + if (ret < 0) { + rte_errno = errno; + goto error; + } snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket); - ret = stat(sun.sun_path, &file_stat); - if (!ret) - claim_zero(remove(sun.sun_path)); + remove(sun.sun_path); ret = bind(priv->primary_socket, (const struct sockaddr *)&sun, sizeof(sun)); if (ret < 0) { - WARN("cannot bind socket, secondary process not supported: %s", - strerror(errno)); + rte_errno = errno; + DRV_LOG(WARNING, + "port %u cannot bind socket, secondary process not" + " supported: %s", + dev->data->port_id, strerror(errno)); goto close; } ret = listen(priv->primary_socket, 0); if (ret < 0) { - WARN("Secondary process not supported: %s", strerror(errno)); + rte_errno = errno; + DRV_LOG(WARNING, "port %u secondary process not supported: %s", + dev->data->port_id, strerror(errno)); goto close; } - return ret; + return 0; close: remove(sun.sun_path); -out: +error: claim_zero(close(priv->primary_socket)); priv->primary_socket = 0; - return -(ret); + return -rte_errno; } /** * Un-Initialise the socket to communicate with the secondary process * - * @param[in] priv - * Pointer to private structure. - * - * @return - * 0 on success, errno value on failure. + * @param[in] dev */ -int -priv_socket_uninit(struct priv *priv) +void +mlx5_socket_uninit(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; + MKSTR(path, "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket); claim_zero(close(priv->primary_socket)); priv->primary_socket = 0; claim_zero(remove(path)); - return 0; } /** * Handle socket interrupts. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ void -priv_socket_handle(struct priv *priv) +mlx5_socket_handle(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int conn_sock; int ret = 0; struct cmsghdr *cmsg = NULL; @@ -125,25 +139,30 @@ priv_socket_handle(struct priv *priv) /* Accept the connection from the client. */ conn_sock = accept(priv->primary_socket, NULL, NULL); if (conn_sock < 0) { - WARN("connection failed: %s", strerror(errno)); + DRV_LOG(WARNING, "port %u connection failed: %s", + dev->data->port_id, strerror(errno)); return; } ret = setsockopt(conn_sock, SOL_SOCKET, SO_PASSCRED, &(int){1}, sizeof(int)); if (ret < 0) { - WARN("cannot change socket options"); - goto out; + ret = errno; + DRV_LOG(WARNING, "port %u cannot change socket options: %s", + dev->data->port_id, strerror(rte_errno)); + goto error; } ret = recvmsg(conn_sock, &msg, MSG_WAITALL); if (ret < 0) { - WARN("received an empty message: %s", strerror(errno)); - goto out; + ret = errno; + DRV_LOG(WARNING, "port %u received an empty message: %s", + dev->data->port_id, strerror(rte_errno)); + goto error; } /* Expect to receive credentials only. */ cmsg = CMSG_FIRSTHDR(&msg); if (cmsg == NULL) { - WARN("no message"); - goto out; + DRV_LOG(WARNING, "port %u no message", dev->data->port_id); + goto error; } if ((cmsg->cmsg_type == SCM_CREDENTIALS) && (cmsg->cmsg_len >= sizeof(*cred))) { @@ -152,14 +171,16 @@ priv_socket_handle(struct priv *priv) } cmsg = CMSG_NXTHDR(&msg, cmsg); if (cmsg != NULL) { - WARN("Message wrongly formatted"); - goto out; + DRV_LOG(WARNING, "port %u message wrongly formatted", + dev->data->port_id); + goto error; } /* Make sure all the ancillary data was received and valid. */ if ((cred == NULL) || (cred->uid != getuid()) || (cred->gid != getgid())) { - WARN("wrong credentials"); - goto out; + DRV_LOG(WARNING, "port %u wrong credentials", + dev->data->port_id); + goto error; } /* Set-up the ancillary data. */ cmsg = CMSG_FIRSTHDR(&msg); @@ -171,27 +192,29 @@ priv_socket_handle(struct priv *priv) *fd = priv->ctx->cmd_fd; ret = sendmsg(conn_sock, &msg, 0); if (ret < 0) - WARN("cannot send response"); -out: + DRV_LOG(WARNING, "port %u cannot send response", + dev->data->port_id); +error: close(conn_sock); } /** * Connect to the primary process. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet structure. * * @return - * fd on success, negative errno value on failure. + * fd on success, negative errno value otherwise and rte_errno is set. */ int -priv_socket_connect(struct priv *priv) +mlx5_socket_connect(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct sockaddr_un sun = { .sun_family = AF_UNIX, }; - int socket_fd; + int socket_fd = -1; int *fd = NULL; int ret; struct ucred *cred; @@ -211,57 +234,75 @@ priv_socket_connect(struct priv *priv) ret = socket(AF_UNIX, SOCK_STREAM, 0); if (ret < 0) { - WARN("cannot connect to primary"); - return ret; + rte_errno = errno; + DRV_LOG(WARNING, "port %u cannot connect to primary", + dev->data->port_id); + goto error; } socket_fd = ret; snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket); ret = connect(socket_fd, (const struct sockaddr *)&sun, sizeof(sun)); if (ret < 0) { - WARN("cannot connect to primary"); - goto out; + rte_errno = errno; + DRV_LOG(WARNING, "port %u cannot connect to primary", + dev->data->port_id); + goto error; } cmsg = CMSG_FIRSTHDR(&msg); if (cmsg == NULL) { - DEBUG("cannot get first message"); - goto out; + rte_errno = EINVAL; + DRV_LOG(DEBUG, "port %u cannot get first message", + dev->data->port_id); + goto error; } cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_CREDENTIALS; cmsg->cmsg_len = CMSG_LEN(sizeof(*cred)); cred = (struct ucred *)CMSG_DATA(cmsg); if (cred == NULL) { - DEBUG("no credentials received"); - goto out; + rte_errno = EINVAL; + DRV_LOG(DEBUG, "port %u no credentials received", + dev->data->port_id); + goto error; } cred->pid = getpid(); cred->uid = getuid(); cred->gid = getgid(); ret = sendmsg(socket_fd, &msg, MSG_DONTWAIT); if (ret < 0) { - WARN("cannot send credentials to primary: %s", - strerror(errno)); - goto out; + rte_errno = errno; + DRV_LOG(WARNING, + "port %u cannot send credentials to primary: %s", + dev->data->port_id, strerror(errno)); + goto error; } ret = recvmsg(socket_fd, &msg, MSG_WAITALL); if (ret <= 0) { - WARN("no message from primary: %s", strerror(errno)); - goto out; + rte_errno = errno; + DRV_LOG(WARNING, "port %u no message from primary: %s", + dev->data->port_id, strerror(errno)); + goto error; } cmsg = CMSG_FIRSTHDR(&msg); if (cmsg == NULL) { - WARN("No file descriptor received"); - goto out; + rte_errno = EINVAL; + DRV_LOG(WARNING, "port %u no file descriptor received", + dev->data->port_id); + goto error; } fd = (int *)CMSG_DATA(cmsg); - if (*fd <= 0) { - WARN("no file descriptor received: %s", strerror(errno)); - ret = *fd; - goto out; + if (*fd < 0) { + DRV_LOG(WARNING, "port %u no file descriptor received: %s", + dev->data->port_id, strerror(errno)); + rte_errno = *fd; + goto error; } ret = *fd; -out: close(socket_fd); return ret; +error: + if (socket_fd != -1) + close(socket_fd); + return -rte_errno; } diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index 378472a7..91f3d474 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -1,10 +1,13 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ +#include #include #include +#include +#include #include #include @@ -19,6 +22,7 @@ struct mlx5_counter_ctrl { char dpdk_name[RTE_ETH_XSTATS_NAME_SIZE]; /* Name of the counter on the device table. */ char ctr_name[RTE_ETH_XSTATS_NAME_SIZE]; + uint32_t ib:1; /**< Nonzero for IB counters. */ }; static const struct mlx5_counter_ctrl mlx5_counters_init[] = { @@ -93,6 +97,7 @@ static const struct mlx5_counter_ctrl mlx5_counters_init[] = { { .dpdk_name = "rx_out_of_buffer", .ctr_name = "out_of_buffer", + .ib = 1, }, { .dpdk_name = "tx_packets_phy", @@ -117,39 +122,56 @@ static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); /** * Read device counters table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[out] stats * Counters table output buffer. * * @return - * 0 on success and stats is filled, negative on error. + * 0 on success and stats is filled, negative errno value otherwise and + * rte_errno is set. */ static int -priv_read_dev_counters(struct priv *priv, uint64_t *stats) +mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) { + struct priv *priv = dev->data->dev_private; struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; unsigned int i; struct ifreq ifr; unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t); unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz]; struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf; + int ret; et_stats->cmd = ETHTOOL_GSTATS; et_stats->n_stats = xstats_ctrl->stats_n; ifr.ifr_data = (caddr_t)et_stats; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { - WARN("unable to read statistic values from device"); - return -1; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1); + if (ret) { + DRV_LOG(WARNING, + "port %u unable to read statistic values from device", + dev->data->port_id); + return ret; } for (i = 0; i != xstats_n; ++i) { - if (priv_is_ib_cntr(mlx5_counters_init[i].ctr_name)) - priv_get_cntr_sysfs(priv, - mlx5_counters_init[i].ctr_name, - &stats[i]); - else + if (mlx5_counters_init[i].ib) { + FILE *file; + MKSTR(path, "%s/ports/1/hw_counters/%s", + priv->ibdev_path, + mlx5_counters_init[i].ctr_name); + + file = fopen(path, "rb"); + if (file) { + int n = fscanf(file, "%" SCNu64, &stats[i]); + + fclose(file); + if (n != 1) + stats[i] = 0; + } + } else { stats[i] = (uint64_t) et_stats->data[xstats_ctrl->dev_table_idx[i]]; + } } return 0; } @@ -157,22 +179,26 @@ priv_read_dev_counters(struct priv *priv, uint64_t *stats) /** * Query the number of statistics provided by ETHTOOL. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return - * Number of statistics on success, -1 on error. + * Number of statistics on success, negative errno value otherwise and + * rte_errno is set. */ static int -priv_ethtool_get_stats_n(struct priv *priv) { +mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) { struct ethtool_drvinfo drvinfo; struct ifreq ifr; + int ret; drvinfo.cmd = ETHTOOL_GDRVINFO; ifr.ifr_data = (caddr_t)&drvinfo; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { - WARN("unable to query number of statistics"); - return -1; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1); + if (ret) { + DRV_LOG(WARNING, "port %u unable to query number of statistics", + dev->data->port_id); + return ret; } return drvinfo.n_stats; } @@ -180,12 +206,13 @@ priv_ethtool_get_stats_n(struct priv *priv) { /** * Init the structures to read device counters. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ void -priv_xstats_init(struct priv *priv) +mlx5_xstats_init(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; unsigned int i; unsigned int j; @@ -193,12 +220,15 @@ priv_xstats_init(struct priv *priv) struct ethtool_gstrings *strings = NULL; unsigned int dev_stats_n; unsigned int str_sz; + int ret; - dev_stats_n = priv_ethtool_get_stats_n(priv); - if (dev_stats_n < 1) { - WARN("no extended statistics available"); + ret = mlx5_ethtool_get_stats_n(dev); + if (ret < 0) { + DRV_LOG(WARNING, "port %u no extended statistics available", + dev->data->port_id); return; } + dev_stats_n = ret; xstats_ctrl->stats_n = dev_stats_n; /* Allocate memory to grab stat names and values. */ str_sz = dev_stats_n * ETH_GSTRING_LEN; @@ -206,15 +236,18 @@ priv_xstats_init(struct priv *priv) rte_malloc("xstats_strings", str_sz + sizeof(struct ethtool_gstrings), 0); if (!strings) { - WARN("unable to allocate memory for xstats"); + DRV_LOG(WARNING, "port %u unable to allocate memory for xstats", + dev->data->port_id); return; } strings->cmd = ETHTOOL_GSTRINGS; strings->string_set = ETH_SS_STATS; strings->len = dev_stats_n; ifr.ifr_data = (caddr_t)strings; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { - WARN("unable to get statistic names"); + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1); + if (ret) { + DRV_LOG(WARNING, "port %u unable to get statistic names", + dev->data->port_id); goto free; } for (j = 0; j != xstats_n; ++j) @@ -232,68 +265,67 @@ priv_xstats_init(struct priv *priv) } } for (j = 0; j != xstats_n; ++j) { - if (priv_is_ib_cntr(mlx5_counters_init[j].ctr_name)) + if (mlx5_counters_init[j].ib) continue; if (xstats_ctrl->dev_table_idx[j] >= dev_stats_n) { - WARN("counter \"%s\" is not recognized", - mlx5_counters_init[j].dpdk_name); + DRV_LOG(WARNING, + "port %u counter \"%s\" is not recognized", + dev->data->port_id, + mlx5_counters_init[j].dpdk_name); goto free; } } /* Copy to base at first time. */ assert(xstats_n <= MLX5_MAX_XSTATS); - priv_read_dev_counters(priv, xstats_ctrl->base); + ret = mlx5_read_dev_counters(dev, xstats_ctrl->base); + if (ret) + DRV_LOG(ERR, "port %u cannot read device counters: %s", + dev->data->port_id, strerror(rte_errno)); free: rte_free(strings); } /** - * Get device extended statistics. + * DPDK callback to get extended device statistics. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[out] stats * Pointer to rte extended stats table. + * @param n + * The size of the stats table. * * @return * Number of extended stats on success and stats is filled, - * negative on error. + * negative on error and rte_errno is set. */ -static int -priv_xstats_get(struct priv *priv, struct rte_eth_xstat *stats) +int +mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, + unsigned int n) { - struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + struct priv *priv = dev->data->dev_private; unsigned int i; - unsigned int n = xstats_n; uint64_t counters[n]; - if (priv_read_dev_counters(priv, counters) < 0) - return -1; - for (i = 0; i != xstats_n; ++i) { - stats[i].id = i; - stats[i].value = (counters[i] - xstats_ctrl->base[i]); - } - return n; -} - -/** - * Reset device extended statistics. - * - * @param priv - * Pointer to private structure. - */ -static void -priv_xstats_reset(struct priv *priv) -{ - struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; - unsigned int i; - unsigned int n = xstats_n; - uint64_t counters[n]; + if (n >= xstats_n && stats) { + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + int stats_n; + int ret; - if (priv_read_dev_counters(priv, counters) < 0) - return; - for (i = 0; i != n; ++i) - xstats_ctrl->base[i] = counters[i]; + stats_n = mlx5_ethtool_get_stats_n(dev); + if (stats_n < 0) + return stats_n; + if (xstats_ctrl->stats_n != stats_n) + mlx5_xstats_init(dev); + ret = mlx5_read_dev_counters(dev, counters); + if (ret) + return ret; + for (i = 0; i != xstats_n; ++i) { + stats[i].id = i; + stats[i].value = (counters[i] - xstats_ctrl->base[i]); + } + } + return xstats_n; } /** @@ -303,6 +335,10 @@ priv_xstats_reset(struct priv *priv) * Pointer to Ethernet device structure. * @param[out] stats * Stats structure output buffer. + * + * @return + * 0 on success and stats is filled, negative errno value otherwise and + * rte_errno is set. */ int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) @@ -312,7 +348,6 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) unsigned int i; unsigned int idx; - priv_lock(priv); /* Add software counters. */ for (i = 0; (i != priv->rxqs_n); ++i) { struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; @@ -358,7 +393,6 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* FIXME: retrieve and add hardware counters. */ #endif *stats = tmp; - priv_unlock(priv); return 0; } @@ -375,7 +409,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev) unsigned int i; unsigned int idx; - priv_lock(priv); for (i = 0; (i != priv->rxqs_n); ++i) { if ((*priv->rxqs)[i] == NULL) continue; @@ -393,45 +426,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev) #ifndef MLX5_PMD_SOFT_COUNTERS /* FIXME: reset hardware counters. */ #endif - priv_unlock(priv); -} - -/** - * DPDK callback to get extended device statistics. - * - * @param dev - * Pointer to Ethernet device structure. - * @param[out] stats - * Stats table output buffer. - * @param n - * The size of the stats table. - * - * @return - * Number of xstats on success, negative on failure. - */ -int -mlx5_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstat *stats, unsigned int n) -{ - struct priv *priv = dev->data->dev_private; - int ret = xstats_n; - - if (n >= xstats_n && stats) { - struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; - int stats_n; - - priv_lock(priv); - stats_n = priv_ethtool_get_stats_n(priv); - if (stats_n < 0) { - priv_unlock(priv); - return -1; - } - if (xstats_ctrl->stats_n != stats_n) - priv_xstats_init(priv); - ret = priv_xstats_get(priv, stats); - priv_unlock(priv); - } - return ret; } /** @@ -446,16 +440,27 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) struct priv *priv = dev->data->dev_private; struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; int stats_n; + unsigned int i; + unsigned int n = xstats_n; + uint64_t counters[n]; + int ret; - priv_lock(priv); - stats_n = priv_ethtool_get_stats_n(priv); - if (stats_n < 0) - goto unlock; + stats_n = mlx5_ethtool_get_stats_n(dev); + if (stats_n < 0) { + DRV_LOG(ERR, "port %u cannot get stats: %s", dev->data->port_id, + strerror(-stats_n)); + return; + } if (xstats_ctrl->stats_n != stats_n) - priv_xstats_init(priv); - priv_xstats_reset(priv); -unlock: - priv_unlock(priv); + mlx5_xstats_init(dev); + ret = mlx5_read_dev_counters(dev, counters); + if (ret) { + DRV_LOG(ERR, "port %u cannot read device counters: %s", + dev->data->port_id, strerror(rte_errno)); + return; + } + for (i = 0; i != n; ++i) + xstats_ctrl->base[i] = counters[i]; } /** @@ -472,21 +477,18 @@ unlock: * Number of xstats names. */ int -mlx5_xstats_get_names(struct rte_eth_dev *dev, - struct rte_eth_xstat_name *xstats_names, unsigned int n) +mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_xstat_name *xstats_names, unsigned int n) { - struct priv *priv = dev->data->dev_private; unsigned int i; if (n >= xstats_n && xstats_names) { - priv_lock(priv); for (i = 0; i != xstats_n; ++i) { strncpy(xstats_names[i].name, mlx5_counters_init[i].dpdk_name, RTE_ETH_XSTATS_NAME_SIZE); xstats_names[i].name[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0; } - priv_unlock(priv); } return xstats_n; } diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index f5711a99..e2a9bb70 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -14,83 +14,133 @@ #include "mlx5_rxtx.h" #include "mlx5_utils.h" +/** + * Stop traffic on Tx queues. + * + * @param dev + * Pointer to Ethernet device structure. + */ static void -priv_txq_stop(struct priv *priv) +mlx5_txq_stop(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; for (i = 0; i != priv->txqs_n; ++i) - mlx5_priv_txq_release(priv, i); + mlx5_txq_release(dev, i); } +/** + * Start traffic on Tx queues. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ static int -priv_txq_start(struct priv *priv) +mlx5_txq_start(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; - int ret = 0; + int ret; - /* Add memory regions to Tx queues. */ for (i = 0; i != priv->txqs_n; ++i) { - unsigned int idx = 0; - struct mlx5_mr *mr; - struct mlx5_txq_ctrl *txq_ctrl = mlx5_priv_txq_get(priv, i); + struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i); if (!txq_ctrl) continue; - LIST_FOREACH(mr, &priv->mr, next) { - priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++); - if (idx == MLX5_PMD_TX_MP_CACHE) - break; - } txq_alloc_elts(txq_ctrl); - txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i); + txq_ctrl->ibv = mlx5_txq_ibv_new(dev, i); if (!txq_ctrl->ibv) { - ret = ENOMEM; + rte_errno = ENOMEM; goto error; } } - ret = priv_tx_uar_remap(priv, priv->ctx->cmd_fd); - if (ret) + ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd); + if (ret) { + /* Adjust index for rollback. */ + i = priv->txqs_n - 1; goto error; - return ret; + } + return 0; error: - priv_txq_stop(priv); - return ret; + ret = rte_errno; /* Save rte_errno before cleanup. */ + do { + mlx5_txq_release(dev, i); + } while (i-- != 0); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } +/** + * Stop traffic on Rx queues. + * + * @param dev + * Pointer to Ethernet device structure. + */ static void -priv_rxq_stop(struct priv *priv) +mlx5_rxq_stop(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; for (i = 0; i != priv->rxqs_n; ++i) - mlx5_priv_rxq_release(priv, i); + mlx5_rxq_release(dev, i); } +/** + * Start traffic on Rx queues. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ static int -priv_rxq_start(struct priv *priv) +mlx5_rxq_start(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; int ret = 0; + /* Allocate/reuse/resize mempool for Multi-Packet RQ. */ + if (mlx5_mprq_alloc_mp(dev)) { + /* Should not release Rx queues but return immediately. */ + return -rte_errno; + } for (i = 0; i != priv->rxqs_n; ++i) { - struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_priv_rxq_get(priv, i); + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i); + struct rte_mempool *mp; if (!rxq_ctrl) continue; + /* Pre-register Rx mempool. */ + mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? + rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp; + DRV_LOG(DEBUG, + "port %u Rx queue %u registering" + " mp %s having %u chunks", + dev->data->port_id, rxq_ctrl->idx, + mp->name, mp->nb_mem_chunks); + mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp); ret = rxq_alloc_elts(rxq_ctrl); if (ret) goto error; - rxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, i); - if (!rxq_ctrl->ibv) { - ret = ENOMEM; + rxq_ctrl->ibv = mlx5_rxq_ibv_new(dev, i); + if (!rxq_ctrl->ibv) goto error; - } } - return -ret; + return 0; error: - priv_rxq_stop(priv); - return -ret; + ret = rte_errno; /* Save rte_errno before cleanup. */ + do { + mlx5_rxq_release(dev, i); + } while (i-- != 0); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -102,68 +152,62 @@ error: * Pointer to Ethernet device structure. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_start(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; - struct mlx5_mr *mr = NULL; - int err; + int ret; - dev->data->dev_started = 1; - priv_lock(priv); - err = priv_flow_create_drop_queue(priv); - if (err) { - ERROR("%p: Drop queue allocation failed: %s", - (void *)dev, strerror(err)); - goto error; + DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id); + ret = mlx5_txq_start(dev); + if (ret) { + DRV_LOG(ERR, "port %u Tx queue allocation failed: %s", + dev->data->port_id, strerror(rte_errno)); + return -rte_errno; } - DEBUG("%p: allocating and configuring hash RX queues", (void *)dev); - rte_mempool_walk(mlx5_mp2mr_iter, priv); - err = priv_txq_start(priv); - if (err) { - ERROR("%p: TXQ allocation failed: %s", - (void *)dev, strerror(err)); - goto error; + ret = mlx5_rxq_start(dev); + if (ret) { + DRV_LOG(ERR, "port %u Rx queue allocation failed: %s", + dev->data->port_id, strerror(rte_errno)); + mlx5_txq_stop(dev); + return -rte_errno; } - err = priv_rxq_start(priv); - if (err) { - ERROR("%p: RXQ allocation failed: %s", - (void *)dev, strerror(err)); + dev->data->dev_started = 1; + ret = mlx5_rx_intr_vec_enable(dev); + if (ret) { + DRV_LOG(ERR, "port %u Rx interrupt vector creation failed", + dev->data->port_id); goto error; } - err = priv_rx_intr_vec_enable(priv); - if (err) { - ERROR("%p: RX interrupt vector creation failed", - (void *)priv); + mlx5_xstats_init(dev); + ret = mlx5_traffic_enable(dev); + if (ret) { + DRV_LOG(DEBUG, "port %u failed to set defaults flows", + dev->data->port_id); goto error; } - priv_xstats_init(priv); - /* Update link status and Tx/Rx callbacks for the first time. */ - memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); - INFO("Forcing port %u link to be up", dev->data->port_id); - err = priv_force_link_status_change(priv, ETH_LINK_UP); - if (err) { - DEBUG("Failed to set port %u link to be up", - dev->data->port_id); + ret = mlx5_flow_start(dev, &priv->flows); + if (ret) { + DRV_LOG(DEBUG, "port %u failed to set flows", + dev->data->port_id); goto error; } - priv_dev_interrupt_handler_install(priv, dev); - priv_unlock(priv); + dev->tx_pkt_burst = mlx5_select_tx_function(dev); + dev->rx_pkt_burst = mlx5_select_rx_function(dev); + mlx5_dev_interrupt_handler_install(dev); return 0; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ /* Rollback. */ dev->data->dev_started = 0; - for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) - priv_mr_release(priv, mr); - priv_flow_stop(priv, &priv->flows); - priv_dev_traffic_disable(priv, dev); - priv_txq_stop(priv); - priv_rxq_stop(priv); - priv_flow_delete_drop_queue(priv); - priv_unlock(priv); - return err; + mlx5_flow_stop(dev, &priv->flows); + mlx5_traffic_disable(dev); + mlx5_txq_stop(dev); + mlx5_rxq_stop(dev); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -178,42 +222,37 @@ void mlx5_dev_stop(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; - struct mlx5_mr *mr; - priv_lock(priv); dev->data->dev_started = 0; /* Prevent crashes when queues are still in use. */ dev->rx_pkt_burst = removed_rx_burst; dev->tx_pkt_burst = removed_tx_burst; rte_wmb(); usleep(1000 * priv->rxqs_n); - DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev); - priv_flow_stop(priv, &priv->flows); - priv_dev_traffic_disable(priv, dev); - priv_rx_intr_vec_disable(priv); - priv_dev_interrupt_handler_uninstall(priv, dev); - priv_txq_stop(priv); - priv_rxq_stop(priv); - for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) - priv_mr_release(priv, mr); - priv_flow_delete_drop_queue(priv); - priv_unlock(priv); + DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id); + mlx5_flow_stop(dev, &priv->flows); + mlx5_traffic_disable(dev); + mlx5_rx_intr_vec_disable(dev); + mlx5_dev_interrupt_handler_uninstall(dev); + mlx5_txq_stop(dev); + mlx5_rxq_stop(dev); } /** * Enable traffic flows configured by control plane * - * @param priv + * @param dev * Pointer to Ethernet device private data. * @param dev * Pointer to Ethernet device structure. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) +mlx5_traffic_enable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct rte_flow_item_eth bcast = { .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", }; @@ -246,8 +285,9 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) .type = 0, }; - claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc)); - return 0; + ret = mlx5_ctrl_flow(dev, &promisc, &promisc); + if (ret) + goto error; } if (dev->data->all_multicast) { struct rte_flow_item_eth multicast = { @@ -256,7 +296,9 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) .type = 0, }; - claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast)); + ret = mlx5_ctrl_flow(dev, &multicast, &multicast); + if (ret) + goto error; } else { /* Add broadcast/multicast flows. */ for (i = 0; i != vlan_filter_n; ++i) { @@ -265,9 +307,8 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) struct rte_flow_item_vlan vlan_spec = { .tci = rte_cpu_to_be_16(vlan), }; - struct rte_flow_item_vlan vlan_mask = { - .tci = 0xffff, - }; + struct rte_flow_item_vlan vlan_mask = + rte_flow_item_vlan_mask; ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast, &vlan_spec, &vlan_mask); @@ -304,9 +345,8 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) struct rte_flow_item_vlan vlan_spec = { .tci = rte_cpu_to_be_16(vlan), }; - struct rte_flow_item_vlan vlan_mask = { - .tci = 0xffff, - }; + struct rte_flow_item_vlan vlan_mask = + rte_flow_item_vlan_mask; ret = mlx5_ctrl_flow_vlan(dev, &unicast, &unicast_mask, @@ -316,74 +356,49 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) goto error; } if (!vlan_filter_n) { - ret = mlx5_ctrl_flow(dev, &unicast, - &unicast_mask); + ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask); if (ret) goto error; } } return 0; error: - return rte_errno; + ret = rte_errno; /* Save rte_errno before cleanup. */ + mlx5_flow_list_flush(dev, &priv->ctrl_flows); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** * Disable traffic flows configured by control plane * - * @param priv - * Pointer to Ethernet device private data. * @param dev - * Pointer to Ethernet device structure. - * - * @return - * 0 on success. - */ -int -priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev) -{ - (void)dev; - priv_flow_flush(priv, &priv->ctrl_flows); - return 0; -} - -/** - * Restart traffic flows configured by control plane - * - * @param priv * Pointer to Ethernet device private data. - * @param dev - * Pointer to Ethernet device structure. - * - * @return - * 0 on success. */ -int -priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev) +void +mlx5_traffic_disable(struct rte_eth_dev *dev) { - if (dev->data->dev_started) { - priv_dev_traffic_disable(priv, dev); - priv_dev_traffic_enable(priv, dev); - } - return 0; + struct priv *priv = dev->data->dev_private; + + mlx5_flow_list_flush(dev, &priv->ctrl_flows); } /** * Restart traffic flows configured by control plane * * @param dev - * Pointer to Ethernet device structure. + * Pointer to Ethernet device private data. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_traffic_restart(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; - - priv_lock(priv); - priv_dev_traffic_restart(priv, dev); - priv_unlock(priv); + if (dev->data->dev_started) { + mlx5_traffic_disable(dev); + return mlx5_traffic_enable(dev); + } return 0; } diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index ed1c713e..f9bc4739 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -47,7 +47,8 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl) for (i = 0; (i != elts_n); ++i) (*txq_ctrl->txq.elts)[i] = NULL; - DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n); + DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs", + PORT_ID(txq_ctrl->priv), txq_ctrl->idx, elts_n); txq_ctrl->txq.elts_head = 0; txq_ctrl->txq.elts_tail = 0; txq_ctrl->txq.elts_comp = 0; @@ -68,7 +69,8 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) uint16_t elts_tail = txq_ctrl->txq.elts_tail; struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts; - DEBUG("%p: freeing WRs", (void *)txq_ctrl); + DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs", + PORT_ID(txq_ctrl->priv), txq_ctrl->idx); txq_ctrl->txq.elts_head = 0; txq_ctrl->txq.elts_tail = 0; txq_ctrl->txq.elts_comp = 0; @@ -91,15 +93,16 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) /** * Returns the per-port supported offloads. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * Supported Tx offloads. */ uint64_t -mlx5_priv_get_tx_port_offloads(struct priv *priv) +mlx5_get_tx_port_offloads(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS | DEV_TX_OFFLOAD_VLAN_INSERT); struct mlx5_dev_config *config = &priv->config; @@ -110,6 +113,14 @@ mlx5_priv_get_tx_port_offloads(struct priv *priv) DEV_TX_OFFLOAD_TCP_CKSUM); if (config->tso) offloads |= DEV_TX_OFFLOAD_TCP_TSO; + if (config->swp) { + if (config->hw_csum) + offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + if (config->tso) + offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO); + } + if (config->tunnel_en) { if (config->hw_csum) offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; @@ -120,31 +131,6 @@ mlx5_priv_get_tx_port_offloads(struct priv *priv) return offloads; } -/** - * Checks if the per-queue offload configuration is valid. - * - * @param priv - * Pointer to private structure. - * @param offloads - * Per-queue offloads configuration. - * - * @return - * 1 if the configuration is valid, 0 otherwise. - */ -static int -priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads) -{ - uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads; - uint64_t port_supp_offloads = mlx5_priv_get_tx_port_offloads(priv); - - /* There are no Tx offloads which are per queue. */ - if ((offloads & port_supp_offloads) != offloads) - return 0; - if ((port_offloads ^ offloads) & port_supp_offloads) - return 0; - return 1; -} - /** * DPDK callback to configure a TX queue. * @@ -160,7 +146,7 @@ priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads) * Thresholds parameters. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, @@ -170,64 +156,47 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct mlx5_txq_data *txq = (*priv->txqs)[idx]; struct mlx5_txq_ctrl *txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); - int ret = 0; - priv_lock(priv); - /* - * Don't verify port offloads for application which - * use the old API. - */ - if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) && - !priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) { - ret = ENOTSUP; - ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port " - "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64, - (void *)dev, conf->offloads, - dev->data->dev_conf.txmode.offloads, - mlx5_priv_get_tx_port_offloads(priv)); - goto out; - } if (desc <= MLX5_TX_COMP_THRESH) { - WARN("%p: number of descriptors requested for TX queue %u" - " must be higher than MLX5_TX_COMP_THRESH, using" - " %u instead of %u", - (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc); + DRV_LOG(WARNING, + "port %u number of descriptors requested for Tx queue" + " %u must be higher than MLX5_TX_COMP_THRESH, using %u" + " instead of %u", + dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc); desc = MLX5_TX_COMP_THRESH + 1; } if (!rte_is_power_of_2(desc)) { desc = 1 << log2above(desc); - WARN("%p: increased number of descriptors in TX queue %u" - " to the next power of two (%d)", - (void *)dev, idx, desc); + DRV_LOG(WARNING, + "port %u increased number of descriptors in Tx queue" + " %u to the next power of two (%d)", + dev->data->port_id, idx, desc); } - DEBUG("%p: configuring queue %u for %u descriptors", - (void *)dev, idx, desc); + DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors", + dev->data->port_id, idx, desc); if (idx >= priv->txqs_n) { - ERROR("%p: queue index out of range (%u >= %u)", - (void *)dev, idx, priv->txqs_n); - priv_unlock(priv); - return -EOVERFLOW; + DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)", + dev->data->port_id, idx, priv->txqs_n); + rte_errno = EOVERFLOW; + return -rte_errno; } - if (!mlx5_priv_txq_releasable(priv, idx)) { - ret = EBUSY; - ERROR("%p: unable to release queue index %u", - (void *)dev, idx); - goto out; + if (!mlx5_txq_releasable(dev, idx)) { + rte_errno = EBUSY; + DRV_LOG(ERR, "port %u unable to release queue index %u", + dev->data->port_id, idx); + return -rte_errno; } - mlx5_priv_txq_release(priv, idx); - txq_ctrl = mlx5_priv_txq_new(priv, idx, desc, socket, conf); + mlx5_txq_release(dev, idx); + txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf); if (!txq_ctrl) { - ERROR("%p: unable to allocate queue index %u", - (void *)dev, idx); - ret = ENOMEM; - goto out; + DRV_LOG(ERR, "port %u unable to allocate queue index %u", + dev->data->port_id, idx); + return -rte_errno; } - DEBUG("%p: adding TX queue %p to list", - (void *)dev, (void *)txq_ctrl); + DRV_LOG(DEBUG, "port %u adding Tx queue %u to list", + dev->data->port_id, idx); (*priv->txqs)[idx] = &txq_ctrl->txq; -out: - priv_unlock(priv); - return -ret; + return 0; } /** @@ -248,15 +217,13 @@ mlx5_tx_queue_release(void *dpdk_txq) return; txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); priv = txq_ctrl->priv; - priv_lock(priv); for (i = 0; (i != priv->txqs_n); ++i) if ((*priv->txqs)[i] == txq) { - DEBUG("%p: removing TX queue %p from list", - (void *)priv->dev, (void *)txq_ctrl); - mlx5_priv_txq_release(priv, i); + mlx5_txq_release(ETH_DEV(priv), i); + DRV_LOG(DEBUG, "port %u removing Tx queue %u from list", + PORT_ID(priv), txq_ctrl->idx); break; } - priv_unlock(priv); } @@ -265,17 +232,18 @@ mlx5_tx_queue_release(void *dpdk_txq) * Both primary and secondary process do mmap to make UAR address * aligned. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * @param fd * Verbs file descriptor to map UAR pages. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_tx_uar_remap(struct priv *priv, int fd) +mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) { + struct priv *priv = dev->data->dev_private; unsigned int i, j; uintptr_t pages[priv->txqs_n]; unsigned int pages_n = 0; @@ -287,7 +255,9 @@ priv_tx_uar_remap(struct priv *priv, int fd) struct mlx5_txq_ctrl *txq_ctrl; int already_mapped; size_t page_size = sysconf(_SC_PAGESIZE); - int r; +#ifndef RTE_ARCH_64 + unsigned int lock_idx; +#endif memset(pages, 0, priv->txqs_n * sizeof(uintptr_t)); /* @@ -300,6 +270,7 @@ priv_tx_uar_remap(struct priv *priv, int fd) continue; txq = (*priv->txqs)[i]; txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); + assert(txq_ctrl->idx == (uint16_t)i); /* UAR addr form verbs used to find dup and offset in page. */ uar_va = (uintptr_t)txq_ctrl->bf_reg_orig; off = uar_va & (page_size - 1); /* offset in page. */ @@ -313,7 +284,7 @@ priv_tx_uar_remap(struct priv *priv, int fd) } /* new address in reserved UAR address space. */ addr = RTE_PTR_ADD(priv->uar_base, - uar_va & (MLX5_UAR_SIZE - 1)); + uar_va & (uintptr_t)(MLX5_UAR_SIZE - 1)); if (!already_mapped) { pages[pages_n++] = uar_va; /* fixed mmap to specified address in reserved @@ -324,10 +295,12 @@ priv_tx_uar_remap(struct priv *priv, int fd) txq_ctrl->uar_mmap_offset); if (ret != addr) { /* fixed mmap have to return same address */ - ERROR("call to mmap failed on UAR for txq %d\n", - i); - r = ENXIO; - return r; + DRV_LOG(ERR, + "port %u call to mmap failed on UAR" + " for txq %u", + dev->data->port_id, txq_ctrl->idx); + rte_errno = ENXIO; + return -rte_errno; } } if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once */ @@ -335,6 +308,12 @@ priv_tx_uar_remap(struct priv *priv, int fd) else assert(txq_ctrl->txq.bf_reg == RTE_PTR_ADD((void *)addr, off)); +#ifndef RTE_ARCH_64 + /* Assign a UAR lock according to UAR page number */ + lock_idx = (txq_ctrl->uar_mmap_offset / page_size) & + MLX5_UAR_PAGE_NUM_MASK; + txq->uar_lock = &priv->uar_lock[lock_idx]; +#endif } return 0; } @@ -361,17 +340,18 @@ is_empw_burst_func(eth_tx_burst_t tx_pkt_burst) /** * Create the Tx queue Verbs object. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * Queue index in DPDK Rx queue array * * @return - * The Verbs object initialised if it can be created. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ -struct mlx5_txq_ibv* -mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) +struct mlx5_txq_ibv * +mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; struct mlx5_txq_ctrl *txq_ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq); @@ -388,18 +368,20 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) struct mlx5dv_cq cq_info; struct mlx5dv_obj obj; const int desc = 1 << txq_data->elts_n; - eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev); + eth_tx_burst_t tx_pkt_burst = mlx5_select_tx_function(dev); int ret = 0; assert(txq_data); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE; priv->verbs_alloc_ctx.obj = txq_ctrl; if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { - ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set"); - goto error; + DRV_LOG(ERR, + "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set", + dev->data->port_id); + rte_errno = EINVAL; + return NULL; } memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv)); - /* MRs will be registered in mp2mr[] later. */ attr.cq = (struct ibv_cq_init_attr_ex){ .comp_mask = 0, }; @@ -409,7 +391,9 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV; tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0); if (tmpl.cq == NULL) { - ERROR("%p: CQ creation failure", (void *)txq_ctrl); + DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure", + dev->data->port_id, idx); + rte_errno = errno; goto error; } attr.init = (struct ibv_qp_init_attr_ex){ @@ -450,19 +434,24 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) } tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init); if (tmpl.qp == NULL) { - ERROR("%p: QP creation failure", (void *)txq_ctrl); + DRV_LOG(ERR, "port %u Tx queue %u QP creation failure", + dev->data->port_id, idx); + rte_errno = errno; goto error; } attr.mod = (struct ibv_qp_attr){ /* Move the QP to this state. */ .qp_state = IBV_QPS_INIT, /* Primary port number. */ - .port_num = priv->port + .port_num = 1, }; ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, (IBV_QP_STATE | IBV_QP_PORT)); if (ret) { - ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl); + DRV_LOG(ERR, + "port %u Tx queue %u QP state to IBV_QPS_INIT failed", + dev->data->port_id, idx); + rte_errno = errno; goto error; } attr.mod = (struct ibv_qp_attr){ @@ -470,19 +459,27 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) }; ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); if (ret) { - ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl); + DRV_LOG(ERR, + "port %u Tx queue %u QP state to IBV_QPS_RTR failed", + dev->data->port_id, idx); + rte_errno = errno; goto error; } attr.mod.qp_state = IBV_QPS_RTS; ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); if (ret) { - ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl); + DRV_LOG(ERR, + "port %u Tx queue %u QP state to IBV_QPS_RTS failed", + dev->data->port_id, idx); + rte_errno = errno; goto error; } txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0, txq_ctrl->socket); if (!txq_ibv) { - ERROR("%p: cannot allocate memory", (void *)txq_ctrl); + DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory", + dev->data->port_id, idx); + rte_errno = ENOMEM; goto error; } obj.cq.in = tmpl.cq; @@ -490,11 +487,16 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) obj.qp.in = tmpl.qp; obj.qp.out = &qp; ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP); - if (ret != 0) + if (ret != 0) { + rte_errno = errno; goto error; + } if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { - ERROR("Wrong MLX5_CQE_SIZE environment variable value: " - "it should be set to %u", RTE_CACHE_LINE_SIZE); + DRV_LOG(ERR, + "port %u wrong MLX5_CQE_SIZE environment variable" + " value: it should be set to %u", + dev->data->port_id, RTE_CACHE_LINE_SIZE); + rte_errno = EINVAL; goto error; } txq_data->cqe_n = log2above(cq_info.cqe_cnt); @@ -518,38 +520,46 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) rte_atomic32_inc(&txq_ibv->refcnt); if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) { txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset; + DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%lx", + dev->data->port_id, txq_ctrl->uar_mmap_offset); } else { - ERROR("Failed to retrieve UAR info, invalid libmlx5.so version"); + DRV_LOG(ERR, + "port %u failed to retrieve UAR info, invalid" + " libmlx5.so", + dev->data->port_id); + rte_errno = EINVAL; goto error; } - DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, - (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt)); LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next); + txq_ibv->txq_ctrl = txq_ctrl; priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; return txq_ibv; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ if (tmpl.cq) claim_zero(mlx5_glue->destroy_cq(tmpl.cq)); if (tmpl.qp) claim_zero(mlx5_glue->destroy_qp(tmpl.qp)); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + rte_errno = ret; /* Restore rte_errno. */ return NULL; } /** * Get an Tx queue Verbs object. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * Queue index in DPDK Rx queue array * * @return * The Verbs object if it exists. */ -struct mlx5_txq_ibv* -mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) +struct mlx5_txq_ibv * +mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *txq_ctrl; if (idx >= priv->txqs_n) @@ -557,33 +567,24 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) if (!(*priv->txqs)[idx]) return NULL; txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - if (txq_ctrl->ibv) { + if (txq_ctrl->ibv) rte_atomic32_inc(&txq_ctrl->ibv->refcnt); - DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, - (void *)txq_ctrl->ibv, - rte_atomic32_read(&txq_ctrl->ibv->refcnt)); - } return txq_ctrl->ibv; } /** * Release an Tx verbs queue object. * - * @param priv - * Pointer to private structure. * @param txq_ibv * Verbs Tx queue object. * * @return - * 0 on success, errno on failure. + * 1 while a reference on it exists, 0 when freed. */ int -mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) +mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv) { - (void)priv; assert(txq_ibv); - DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, - (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt)); if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) { claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp)); claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq)); @@ -591,21 +592,18 @@ mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) rte_free(txq_ibv); return 0; } - return EBUSY; + return 1; } /** * Return true if a single reference exists on the object. * - * @param priv - * Pointer to private structure. * @param txq_ibv * Verbs Tx queue object. */ int -mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) +mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv) { - (void)priv; assert(txq_ibv); return (rte_atomic32_read(&txq_ibv->refcnt) == 1); } @@ -613,20 +611,22 @@ mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) /** * Verify the Verbs Tx queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * - * @return the number of object not released. + * @return + * The number of object not released. */ int -mlx5_priv_txq_ibv_verify(struct priv *priv) +mlx5_txq_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int ret = 0; struct mlx5_txq_ibv *txq_ibv; LIST_FOREACH(txq_ibv, &priv->txqsibv, next) { - DEBUG("%p: Verbs Tx queue %p still referenced", (void *)priv, - (void *)txq_ibv); + DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced", + dev->data->port_id, txq_ibv->txq_ctrl->idx); ++ret; } return ret; @@ -649,9 +649,14 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) unsigned int txq_inline; unsigned int txqs_inline; unsigned int inline_max_packet_sz; - eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev); + eth_tx_burst_t tx_pkt_burst = + mlx5_select_tx_function(ETH_DEV(priv)); int is_empw_func = is_empw_burst_func(tx_pkt_burst); - int tso = !!(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_TCP_TSO); + int tso = !!(txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO)); txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ? 0 : config->txq_inline; @@ -685,18 +690,6 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) inline_max_packet_sz) + (RTE_CACHE_LINE_SIZE - 1)) / RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE; - } else if (tso) { - int inline_diff = txq_ctrl->txq.max_inline - - max_tso_inline; - - /* - * Adjust inline value as Verbs aggregates - * tso_inline and txq_inline fields. - */ - txq_ctrl->max_inline_data = inline_diff > 0 ? - inline_diff * - RTE_CACHE_LINE_SIZE : - 0; } else { txq_ctrl->max_inline_data = txq_ctrl->txq.max_inline * RTE_CACHE_LINE_SIZE; @@ -716,9 +709,10 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) max_inline = max_inline - (max_inline % RTE_CACHE_LINE_SIZE); - WARN("txq inline is too large (%d) setting it to " - "the maximum possible: %d\n", - txq_inline, max_inline); + DRV_LOG(WARNING, + "port %u txq inline is too large (%d) setting" + " it to the maximum possible: %d\n", + PORT_ID(priv), txq_inline, max_inline); txq_ctrl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE; } @@ -729,14 +723,18 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) max_tso_inline); txq_ctrl->txq.tso_en = 1; } - txq_ctrl->txq.tunnel_en = config->tunnel_en; + txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp; + txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) & + txq_ctrl->txq.offloads) && config->swp; } /** * Create a DPDK Tx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @param desc @@ -747,76 +745,75 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) * Thresholds parameters. * * @return - * A DPDK queue object on success. + * A DPDK queue object on success, NULL otherwise and rte_errno is set. */ -struct mlx5_txq_ctrl* -mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc, - unsigned int socket, - const struct rte_eth_txconf *conf) +struct mlx5_txq_ctrl * +mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *tmpl; tmpl = rte_calloc_socket("TXQ", 1, sizeof(*tmpl) + desc * sizeof(struct rte_mbuf *), 0, socket); - if (!tmpl) + if (!tmpl) { + rte_errno = ENOMEM; return NULL; + } + if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh, + MLX5_MR_BTREE_CACHE_N, socket)) { + /* rte_errno is already set. */ + goto error; + } + /* Save pointer of global generation number to check memory event. */ + tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen; assert(desc > MLX5_TX_COMP_THRESH); - tmpl->txq.offloads = conf->offloads; + tmpl->txq.offloads = conf->offloads | + dev->data->dev_conf.txmode.offloads; tmpl->priv = priv; tmpl->socket = socket; tmpl->txq.elts_n = log2above(desc); + tmpl->idx = idx; txq_set_params(tmpl); - /* MRs will be registered in mp2mr[] later. */ - DEBUG("priv->device_attr.max_qp_wr is %d", - priv->device_attr.orig_attr.max_qp_wr); - DEBUG("priv->device_attr.max_sge is %d", - priv->device_attr.orig_attr.max_sge); + DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d", + dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr); + DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d", + dev->data->port_id, priv->device_attr.orig_attr.max_sge); tmpl->txq.elts = (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1); tmpl->txq.stats.idx = idx; rte_atomic32_inc(&tmpl->refcnt); - DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, - (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); return tmpl; +error: + rte_free(tmpl); + return NULL; } /** * Get a Tx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * * @return * A pointer to the queue if it exists. */ -struct mlx5_txq_ctrl* -mlx5_priv_txq_get(struct priv *priv, uint16_t idx) +struct mlx5_txq_ctrl * +mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *ctrl = NULL; if ((*priv->txqs)[idx]) { ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - unsigned int i; - - mlx5_priv_txq_ibv_get(priv, idx); - for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { - struct mlx5_mr *mr = NULL; - - (void)mr; - if (ctrl->txq.mp2mr[i]) { - mr = priv_mr_get(priv, ctrl->txq.mp2mr[i]->mp); - assert(mr); - } - } + mlx5_txq_ibv_get(dev, idx); rte_atomic32_inc(&ctrl->refcnt); - DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, - (void *)ctrl, rte_atomic32_read(&ctrl->refcnt)); } return ctrl; } @@ -824,57 +821,45 @@ mlx5_priv_txq_get(struct priv *priv, uint16_t idx) /** * Release a Tx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * * @return - * 0 on success, errno on failure. + * 1 while a reference on it exists, 0 when freed. */ int -mlx5_priv_txq_release(struct priv *priv, uint16_t idx) +mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) { - unsigned int i; + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *txq; size_t page_size = sysconf(_SC_PAGESIZE); if (!(*priv->txqs)[idx]) return 0; txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, - (void *)txq, rte_atomic32_read(&txq->refcnt)); - if (txq->ibv) { - int ret; - - ret = mlx5_priv_txq_ibv_release(priv, txq->ibv); - if (!ret) - txq->ibv = NULL; - } - for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { - if (txq->txq.mp2mr[i]) { - priv_mr_release(priv, txq->txq.mp2mr[i]); - txq->txq.mp2mr[i] = NULL; - } - } + if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv)) + txq->ibv = NULL; if (priv->uar_base) munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg, page_size), page_size); if (rte_atomic32_dec_and_test(&txq->refcnt)) { txq_free_elts(txq); + mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh); LIST_REMOVE(txq, next); rte_free(txq); (*priv->txqs)[idx] = NULL; return 0; } - return EBUSY; + return 1; } /** * Verify if the queue can be released. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @@ -882,8 +867,9 @@ mlx5_priv_txq_release(struct priv *priv, uint16_t idx) * 1 if the queue can be released. */ int -mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx) +mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *txq; if (!(*priv->txqs)[idx]) @@ -895,20 +881,22 @@ mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx) /** * Verify the Tx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * - * @return the number of object not released. + * @return + * The number of object not released. */ int -mlx5_priv_txq_verify(struct priv *priv) +mlx5_txq_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *txq; int ret = 0; LIST_FOREACH(txq, &priv->txqsctrl, next) { - DEBUG("%p: Tx Queue %p still referenced", (void *)priv, - (void *)txq); + DRV_LOG(DEBUG, "port %u Tx queue %u still referenced", + dev->data->port_id, txq->idx); ++ret; } return ret; diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h index e1bfb9cd..886f60e6 100644 --- a/drivers/net/mlx5/mlx5_utils.h +++ b/drivers/net/mlx5/mlx5_utils.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX5_UTILS_H_ @@ -61,14 +61,21 @@ pmd_drv_log_basename(const char *s) return s; } +extern int mlx5_logtype; + +#define PMD_DRV_LOG___(level, ...) \ + rte_log(RTE_LOG_ ## level, \ + mlx5_logtype, \ + RTE_FMT(MLX5_DRIVER_NAME ": " \ + RTE_FMT_HEAD(__VA_ARGS__,), \ + RTE_FMT_TAIL(__VA_ARGS__,))) + /* * When debugging is enabled (NDEBUG not defined), file, line and function * information replace the driver name (MLX5_DRIVER_NAME) in log messages. */ #ifndef NDEBUG -#define PMD_DRV_LOG___(level, ...) \ - ERRNO_SAFE(RTE_LOG(level, PMD, __VA_ARGS__)) #define PMD_DRV_LOG__(level, ...) \ PMD_DRV_LOG___(level, "%s:%u: %s(): " __VA_ARGS__) #define PMD_DRV_LOG_(level, s, ...) \ @@ -80,9 +87,6 @@ pmd_drv_log_basename(const char *s) __VA_ARGS__) #else /* NDEBUG */ - -#define PMD_DRV_LOG___(level, ...) \ - ERRNO_SAFE(RTE_LOG(level, PMD, MLX5_DRIVER_NAME ": " __VA_ARGS__)) #define PMD_DRV_LOG__(level, ...) \ PMD_DRV_LOG___(level, __VA_ARGS__) #define PMD_DRV_LOG_(level, s, ...) \ @@ -91,18 +95,15 @@ pmd_drv_log_basename(const char *s) #endif /* NDEBUG */ /* Generic printf()-like logging macro with automatic line feed. */ -#define PMD_DRV_LOG(level, ...) \ +#define DRV_LOG(level, ...) \ PMD_DRV_LOG_(level, \ __VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \ PMD_DRV_LOG_CPAREN) -/* - * Like assert(), DEBUG() becomes a no-op and claim_zero() does not perform - * any check when debugging is disabled. - */ +/* claim_zero() does not perform any check when debugging is disabled. */ #ifndef NDEBUG -#define DEBUG(...) PMD_DRV_LOG(DEBUG, __VA_ARGS__) +#define DEBUG(...) DRV_LOG(DEBUG, __VA_ARGS__) #define claim_zero(...) assert((__VA_ARGS__) == 0) #define claim_nonzero(...) assert((__VA_ARGS__) != 0) @@ -114,9 +115,9 @@ pmd_drv_log_basename(const char *s) #endif /* NDEBUG */ -#define INFO(...) PMD_DRV_LOG(INFO, __VA_ARGS__) -#define WARN(...) PMD_DRV_LOG(WARNING, __VA_ARGS__) -#define ERROR(...) PMD_DRV_LOG(ERR, __VA_ARGS__) +#define INFO(...) DRV_LOG(INFO, __VA_ARGS__) +#define WARN(...) DRV_LOG(WARNING, __VA_ARGS__) +#define ERROR(...) DRV_LOG(ERR, __VA_ARGS__) /* Convenience macros for accessing mbuf fields. */ #define NEXT(m) ((m)->next) diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c index 75c34562..c91d08be 100644 --- a/drivers/net/mlx5/mlx5_vlan.c +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -8,6 +8,12 @@ #include #include +/* + * Not needed by this file; included to work around the lack of off_t + * definition for mlx5dv.h with unpatched rdma-core versions. + */ +#include + /* Verbs headers do not support -pedantic. */ #ifdef PEDANTIC #pragma GCC diagnostic ignored "-Wpedantic" @@ -37,26 +43,24 @@ * Toggle filter. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { struct priv *priv = dev->data->dev_private; unsigned int i; - int ret = 0; - priv_lock(priv); - DEBUG("%p: %s VLAN filter ID %" PRIu16, - (void *)dev, (on ? "enable" : "disable"), vlan_id); + DRV_LOG(DEBUG, "port %u %s VLAN filter ID %" PRIu16, + dev->data->port_id, (on ? "enable" : "disable"), vlan_id); assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter)); for (i = 0; (i != priv->vlan_filter_n); ++i) if (priv->vlan_filter[i] == vlan_id) break; /* Check if there's room for another VLAN filter. */ if (i == RTE_DIM(priv->vlan_filter)) { - ret = -ENOMEM; - goto out; + rte_errno = ENOMEM; + return -rte_errno; } if (i < priv->vlan_filter_n) { assert(priv->vlan_filter_n != 0); @@ -79,37 +83,49 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) priv->vlan_filter[priv->vlan_filter_n] = vlan_id; ++priv->vlan_filter_n; } - if (dev->data->dev_started) - priv_dev_traffic_restart(priv, dev); out: - priv_unlock(priv); - return ret; + if (dev->data->dev_started) + return mlx5_traffic_restart(dev); + return 0; } /** - * Set/reset VLAN stripping for a specific queue. + * Callback to set/reset VLAN stripping for a specific queue. * - * @param priv - * Pointer to private structure. - * @param idx + * @param dev + * Pointer to Ethernet device structure. + * @param queue * RX queue index. * @param on * Enable/disable VLAN stripping. */ -static void -priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) +void +mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) { - struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; + struct priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq = (*priv->rxqs)[queue]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); struct ibv_wq_attr mod; uint16_t vlan_offloads = (on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) | 0; - int err; + int ret; - DEBUG("set VLAN offloads 0x%x for port %d queue %d", - vlan_offloads, rxq->port_id, idx); + /* Validate hw support */ + if (!priv->config.hw_vlan_strip) { + DRV_LOG(ERR, "port %u VLAN stripping is not supported", + dev->data->port_id); + return; + } + /* Validate queue number */ + if (queue >= priv->rxqs_n) { + DRV_LOG(ERR, "port %u VLAN stripping, invalid queue number %d", + dev->data->port_id, queue); + return; + } + DRV_LOG(DEBUG, "port %u set VLAN offloads 0x%x for port %uqueue %d", + dev->data->port_id, vlan_offloads, rxq->port_id, queue); if (!rxq_ctrl->ibv) { /* Update related bits in RX queue. */ rxq->vlan_strip = !!on; @@ -120,50 +136,16 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING, .flags = vlan_offloads, }; - - err = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod); - if (err) { - ERROR("%p: failed to modified stripping mode: %s", - (void *)priv, strerror(err)); + ret = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod); + if (ret) { + DRV_LOG(ERR, "port %u failed to modified stripping mode: %s", + dev->data->port_id, strerror(rte_errno)); return; } - /* Update related bits in RX queue. */ rxq->vlan_strip = !!on; } -/** - * Callback to set/reset VLAN stripping for a specific queue. - * - * @param dev - * Pointer to Ethernet device structure. - * @param queue - * RX queue index. - * @param on - * Enable/disable VLAN stripping. - */ -void -mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) -{ - struct priv *priv = dev->data->dev_private; - - /* Validate hw support */ - if (!priv->config.hw_vlan_strip) { - ERROR("VLAN stripping is not supported"); - return; - } - - /* Validate queue number */ - if (queue >= priv->rxqs_n) { - ERROR("VLAN stripping, invalid queue number %d", queue); - return; - } - - priv_lock(priv); - priv_vlan_strip_queue_set(priv, queue, on); - priv_unlock(priv); -} - /** * Callback to set/reset VLAN offloads for a port. * @@ -171,6 +153,9 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) * Pointer to Ethernet device structure. * @param mask * VLAN offload bit mask. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask) @@ -183,16 +168,13 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask) DEV_RX_OFFLOAD_VLAN_STRIP); if (!priv->config.hw_vlan_strip) { - ERROR("VLAN stripping is not supported"); + DRV_LOG(ERR, "port %u VLAN stripping is not supported", + dev->data->port_id); return 0; } - /* Run on every RX queue and set/reset VLAN stripping. */ - priv_lock(priv); for (i = 0; (i != priv->rxqs_n); i++) - priv_vlan_strip_queue_set(priv, i, hw_vlan_strip); - priv_unlock(priv); + mlx5_vlan_strip_queue_set(dev, i, hw_vlan_strip); } - return 0; } diff --git a/drivers/net/mrvl/Makefile b/drivers/net/mrvl/Makefile deleted file mode 100644 index f75e53c6..00000000 --- a/drivers/net/mrvl/Makefile +++ /dev/null @@ -1,68 +0,0 @@ -# BSD LICENSE -# -# Copyright(c) 2017 Marvell International Ltd. -# Copyright(c) 2017 Semihalf. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -include $(RTE_SDK)/mk/rte.vars.mk - -ifneq ($(MAKECMDGOALS),clean) -ifneq ($(MAKECMDGOALS),config) -ifeq ($(LIBMUSDK_PATH),) -$(error "Please define LIBMUSDK_PATH environment variable") -endif -endif -endif - -# library name -LIB = librte_pmd_mrvl.a - -# library version -LIBABIVER := 1 - -# versioning export map -EXPORT_MAP := rte_pmd_mrvl_version.map - -# external library dependencies -CFLAGS += -I$(LIBMUSDK_PATH)/include -CFLAGS += -DMVCONF_TYPES_PUBLIC -CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC -CFLAGS += $(WERROR_FLAGS) -CFLAGS += -O3 -LDLIBS += -L$(LIBMUSDK_PATH)/lib -LDLIBS += -lmusdk -LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_cfgfile -LDLIBS += -lrte_bus_vdev - -# library source files -SRCS-$(CONFIG_RTE_LIBRTE_MRVL_PMD) += mrvl_ethdev.c -SRCS-$(CONFIG_RTE_LIBRTE_MRVL_PMD) += mrvl_qos.c - -include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/mrvl/mrvl_ethdev.c b/drivers/net/mrvl/mrvl_ethdev.c deleted file mode 100644 index 705c4bd8..00000000 --- a/drivers/net/mrvl/mrvl_ethdev.c +++ /dev/null @@ -1,2511 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Marvell International Ltd. - * Copyright(c) 2017 Semihalf. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Semihalf nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include - -/* Unluckily, container_of is defined by both DPDK and MUSDK, - * we'll declare only one version. - * - * Note that it is not used in this PMD anyway. - */ -#ifdef container_of -#undef container_of -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "mrvl_ethdev.h" -#include "mrvl_qos.h" - -/* bitmask with reserved hifs */ -#define MRVL_MUSDK_HIFS_RESERVED 0x0F -/* bitmask with reserved bpools */ -#define MRVL_MUSDK_BPOOLS_RESERVED 0x07 -/* bitmask with reserved kernel RSS tables */ -#define MRVL_MUSDK_RSS_RESERVED 0x01 -/* maximum number of available hifs */ -#define MRVL_MUSDK_HIFS_MAX 9 - -/* prefetch shift */ -#define MRVL_MUSDK_PREFETCH_SHIFT 2 - -/* TCAM has 25 entries reserved for uc/mc filter entries */ -#define MRVL_MAC_ADDRS_MAX 25 -#define MRVL_MATCH_LEN 16 -#define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE) -/* Maximum allowable packet size */ -#define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE) - -#define MRVL_IFACE_NAME_ARG "iface" -#define MRVL_CFG_ARG "cfg" - -#define MRVL_BURST_SIZE 64 - -#define MRVL_ARP_LENGTH 28 - -#define MRVL_COOKIE_ADDR_INVALID ~0ULL - -#define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8) -#define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT) - -/* Memory size (in bytes) for MUSDK dma buffers */ -#define MRVL_MUSDK_DMA_MEMSIZE 41943040 - -/** Port Rx offload capabilities */ -#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \ - DEV_RX_OFFLOAD_JUMBO_FRAME | \ - DEV_RX_OFFLOAD_CRC_STRIP | \ - DEV_RX_OFFLOAD_CHECKSUM) - -/** Port Tx offloads capabilities */ -#define MRVL_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \ - DEV_TX_OFFLOAD_UDP_CKSUM | \ - DEV_TX_OFFLOAD_TCP_CKSUM) - -static const char * const valid_args[] = { - MRVL_IFACE_NAME_ARG, - MRVL_CFG_ARG, - NULL -}; - -static int used_hifs = MRVL_MUSDK_HIFS_RESERVED; -static struct pp2_hif *hifs[RTE_MAX_LCORE]; -static int used_bpools[PP2_NUM_PKT_PROC] = { - MRVL_MUSDK_BPOOLS_RESERVED, - MRVL_MUSDK_BPOOLS_RESERVED -}; - -struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS]; -int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE]; -uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID; - -struct mrvl_ifnames { - const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC]; - int idx; -}; - -/* - * To use buffer harvesting based on loopback port shadow queue structure - * was introduced for buffers information bookkeeping. - * - * Before sending the packet, related buffer information (pp2_buff_inf) is - * stored in shadow queue. After packet is transmitted no longer used - * packet buffer is released back to it's original hardware pool, - * on condition it originated from interface. - * In case it was generated by application itself i.e: mbuf->port field is - * 0xff then its released to software mempool. - */ -struct mrvl_shadow_txq { - int head; /* write index - used when sending buffers */ - int tail; /* read index - used when releasing buffers */ - u16 size; /* queue occupied size */ - u16 num_to_release; /* number of buffers sent, that can be released */ - struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */ -}; - -struct mrvl_rxq { - struct mrvl_priv *priv; - struct rte_mempool *mp; - int queue_id; - int port_id; - int cksum_enabled; - uint64_t bytes_recv; - uint64_t drop_mac; -}; - -struct mrvl_txq { - struct mrvl_priv *priv; - int queue_id; - int port_id; - uint64_t bytes_sent; - struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE]; -}; - -static int mrvl_lcore_first; -static int mrvl_lcore_last; -static int mrvl_dev_num; - -static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num); -static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio, - struct pp2_hif *hif, unsigned int core_id, - struct mrvl_shadow_txq *sq, int qid, int force); - -static inline int -mrvl_get_bpool_size(int pp2_id, int pool_id) -{ - int i; - int size = 0; - - for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) - size += mrvl_port_bpool_size[pp2_id][pool_id][i]; - - return size; -} - -static inline int -mrvl_reserve_bit(int *bitmap, int max) -{ - int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap); - - if (n >= max) - return -1; - - *bitmap |= 1 << n; - - return n; -} - -static int -mrvl_init_hif(int core_id) -{ - struct pp2_hif_params params; - char match[MRVL_MATCH_LEN]; - int ret; - - ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX); - if (ret < 0) { - RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id); - return ret; - } - - snprintf(match, sizeof(match), "hif-%d", ret); - memset(¶ms, 0, sizeof(params)); - params.match = match; - params.out_size = MRVL_PP2_AGGR_TXQD_MAX; - ret = pp2_hif_init(¶ms, &hifs[core_id]); - if (ret) { - RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", core_id); - return ret; - } - - return 0; -} - -static inline struct pp2_hif* -mrvl_get_hif(struct mrvl_priv *priv, int core_id) -{ - int ret; - - if (likely(hifs[core_id] != NULL)) - return hifs[core_id]; - - rte_spinlock_lock(&priv->lock); - - ret = mrvl_init_hif(core_id); - if (ret < 0) { - RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id); - goto out; - } - - if (core_id < mrvl_lcore_first) - mrvl_lcore_first = core_id; - - if (core_id > mrvl_lcore_last) - mrvl_lcore_last = core_id; -out: - rte_spinlock_unlock(&priv->lock); - - return hifs[core_id]; -} - -/** - * Configure rss based on dpdk rss configuration. - * - * @param priv - * Pointer to private structure. - * @param rss_conf - * Pointer to RSS configuration. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf) -{ - if (rss_conf->rss_key) - RTE_LOG(WARNING, PMD, "Changing hash key is not supported\n"); - - if (rss_conf->rss_hf == 0) { - priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; - } else if (rss_conf->rss_hf & ETH_RSS_IPV4) { - priv->ppio_params.inqs_params.hash_type = - PP2_PPIO_HASH_T_2_TUPLE; - } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { - priv->ppio_params.inqs_params.hash_type = - PP2_PPIO_HASH_T_5_TUPLE; - priv->rss_hf_tcp = 1; - } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { - priv->ppio_params.inqs_params.hash_type = - PP2_PPIO_HASH_T_5_TUPLE; - priv->rss_hf_tcp = 0; - } else { - return -EINVAL; - } - - return 0; -} - -/** - * Ethernet device configuration. - * - * Prepare the driver for a given number of TX and RX queues and - * configure RSS. - * - * @param dev - * Pointer to Ethernet device structure. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_dev_configure(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - int ret; - - if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE && - dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { - RTE_LOG(INFO, PMD, "Unsupported rx multi queue mode %d\n", - dev->data->dev_conf.rxmode.mq_mode); - return -EINVAL; - } - - if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { - RTE_LOG(INFO, PMD, - "L2 CRC stripping is always enabled in hw\n"); - dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP; - } - - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { - RTE_LOG(INFO, PMD, "VLAN stripping not supported\n"); - return -EINVAL; - } - - if (dev->data->dev_conf.rxmode.split_hdr_size) { - RTE_LOG(INFO, PMD, "Split headers not supported\n"); - return -EINVAL; - } - - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { - RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n"); - return -EINVAL; - } - - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) { - RTE_LOG(INFO, PMD, "LRO not supported\n"); - return -EINVAL; - } - - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) - dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len - - ETHER_HDR_LEN - ETHER_CRC_LEN; - - ret = mrvl_configure_rxqs(priv, dev->data->port_id, - dev->data->nb_rx_queues); - if (ret < 0) - return ret; - - priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues; - priv->ppio_params.maintain_stats = 1; - priv->nb_rx_queues = dev->data->nb_rx_queues; - - if (dev->data->nb_rx_queues == 1 && - dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { - RTE_LOG(WARNING, PMD, "Disabling hash for 1 rx queue\n"); - priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; - - return 0; - } - - return mrvl_configure_rss(priv, - &dev->data->dev_conf.rx_adv_conf.rss_conf); -} - -/** - * DPDK callback to change the MTU. - * - * Setting the MTU affects hardware MRU (packets larger than the MRU - * will be dropped). - * - * @param dev - * Pointer to Ethernet device structure. - * @param mtu - * New MTU. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) -{ - struct mrvl_priv *priv = dev->data->dev_private; - /* extra MV_MH_SIZE bytes are required for Marvell tag */ - uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN; - int ret; - - if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) - return -EINVAL; - - if (!priv->ppio) - return 0; - - ret = pp2_ppio_set_mru(priv->ppio, mru); - if (ret) - return ret; - - return pp2_ppio_set_mtu(priv->ppio, mtu); -} - -/** - * DPDK callback to bring the link up. - * - * @param dev - * Pointer to Ethernet device structure. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_dev_set_link_up(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - int ret; - - if (!priv->ppio) - return -EPERM; - - ret = pp2_ppio_enable(priv->ppio); - if (ret) - return ret; - - /* - * mtu/mru can be updated if pp2_ppio_enable() was called at least once - * as pp2_ppio_enable() changes port->t_mode from default 0 to - * PP2_TRAFFIC_INGRESS_EGRESS. - * - * Set mtu to default DPDK value here. - */ - ret = mrvl_mtu_set(dev, dev->data->mtu); - if (ret) - pp2_ppio_disable(priv->ppio); - - return ret; -} - -/** - * DPDK callback to bring the link down. - * - * @param dev - * Pointer to Ethernet device structure. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_dev_set_link_down(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - - if (!priv->ppio) - return -EPERM; - - return pp2_ppio_disable(priv->ppio); -} - -/** - * DPDK callback to start the device. - * - * @param dev - * Pointer to Ethernet device structure. - * - * @return - * 0 on success, negative errno value on failure. - */ -static int -mrvl_dev_start(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - char match[MRVL_MATCH_LEN]; - int ret = 0, def_init_size; - - snprintf(match, sizeof(match), "ppio-%d:%d", - priv->pp_id, priv->ppio_id); - priv->ppio_params.match = match; - - /* - * Calculate the minimum bpool size for refill feature as follows: - * 2 default burst sizes multiply by number of rx queues. - * If the bpool size will be below this value, new buffers will - * be added to the pool. - */ - priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2; - - /* In case initial bpool size configured in queues setup is - * smaller than minimum size add more buffers - */ - def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2; - if (priv->bpool_init_size < def_init_size) { - int buffs_to_add = def_init_size - priv->bpool_init_size; - - priv->bpool_init_size += buffs_to_add; - ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add); - if (ret) - RTE_LOG(ERR, PMD, "Failed to add buffers to bpool\n"); - } - - /* - * Calculate the maximum bpool size for refill feature as follows: - * maximum number of descriptors in rx queue multiply by number - * of rx queues plus minimum bpool size. - * In case the bpool size will exceed this value, superfluous buffers - * will be removed - */ - priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) + - priv->bpool_min_size; - - ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio); - if (ret) { - RTE_LOG(ERR, PMD, "Failed to init ppio\n"); - return ret; - } - - /* - * In case there are some some stale uc/mc mac addresses flush them - * here. It cannot be done during mrvl_dev_close() as port information - * is already gone at that point (due to pp2_ppio_deinit() in - * mrvl_dev_stop()). - */ - if (!priv->uc_mc_flushed) { - ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1); - if (ret) { - RTE_LOG(ERR, PMD, - "Failed to flush uc/mc filter list\n"); - goto out; - } - priv->uc_mc_flushed = 1; - } - - if (!priv->vlan_flushed) { - ret = pp2_ppio_flush_vlan(priv->ppio); - if (ret) { - RTE_LOG(ERR, PMD, "Failed to flush vlan list\n"); - /* - * TODO - * once pp2_ppio_flush_vlan() is supported jump to out - * goto out; - */ - } - priv->vlan_flushed = 1; - } - - /* For default QoS config, don't start classifier. */ - if (mrvl_qos_cfg) { - ret = mrvl_start_qos_mapping(priv); - if (ret) { - RTE_LOG(ERR, PMD, "Failed to setup QoS mapping\n"); - goto out; - } - } - - ret = mrvl_dev_set_link_up(dev); - if (ret) { - RTE_LOG(ERR, PMD, "Failed to set link up\n"); - goto out; - } - - return 0; -out: - RTE_LOG(ERR, PMD, "Failed to start device\n"); - pp2_ppio_deinit(priv->ppio); - return ret; -} - -/** - * Flush receive queues. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_flush_rx_queues(struct rte_eth_dev *dev) -{ - int i; - - RTE_LOG(INFO, PMD, "Flushing rx queues\n"); - for (i = 0; i < dev->data->nb_rx_queues; i++) { - int ret, num; - - do { - struct mrvl_rxq *q = dev->data->rx_queues[i]; - struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX]; - - num = MRVL_PP2_RXD_MAX; - ret = pp2_ppio_recv(q->priv->ppio, - q->priv->rxq_map[q->queue_id].tc, - q->priv->rxq_map[q->queue_id].inq, - descs, (uint16_t *)&num); - } while (ret == 0 && num); - } -} - -/** - * Flush transmit shadow queues. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev) -{ - int i, j; - struct mrvl_txq *txq; - - RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n"); - for (i = 0; i < dev->data->nb_tx_queues; i++) { - txq = (struct mrvl_txq *)dev->data->tx_queues[i]; - - for (j = 0; j < RTE_MAX_LCORE; j++) { - struct mrvl_shadow_txq *sq; - - if (!hifs[j]) - continue; - - sq = &txq->shadow_txqs[j]; - mrvl_free_sent_buffers(txq->priv->ppio, - hifs[j], j, sq, txq->queue_id, 1); - while (sq->tail != sq->head) { - uint64_t addr = cookie_addr_high | - sq->ent[sq->tail].buff.cookie; - rte_pktmbuf_free( - (struct rte_mbuf *)addr); - sq->tail = (sq->tail + 1) & - MRVL_PP2_TX_SHADOWQ_MASK; - } - memset(sq, 0, sizeof(*sq)); - } - } -} - -/** - * Flush hardware bpool (buffer-pool). - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_flush_bpool(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - struct pp2_hif *hif; - uint32_t num; - int ret; - unsigned int core_id = rte_lcore_id(); - - if (core_id == LCORE_ID_ANY) - core_id = 0; - - hif = mrvl_get_hif(priv, core_id); - - ret = pp2_bpool_get_num_buffs(priv->bpool, &num); - if (ret) { - RTE_LOG(ERR, PMD, "Failed to get bpool buffers number\n"); - return; - } - - while (num--) { - struct pp2_buff_inf inf; - uint64_t addr; - - ret = pp2_bpool_get_buff(hif, priv->bpool, &inf); - if (ret) - break; - - addr = cookie_addr_high | inf.cookie; - rte_pktmbuf_free((struct rte_mbuf *)addr); - } -} - -/** - * DPDK callback to stop the device. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_dev_stop(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - - mrvl_dev_set_link_down(dev); - mrvl_flush_rx_queues(dev); - mrvl_flush_tx_shadow_queues(dev); - if (priv->qos_tbl) { - pp2_cls_qos_tbl_deinit(priv->qos_tbl); - priv->qos_tbl = NULL; - } - pp2_ppio_deinit(priv->ppio); - priv->ppio = NULL; -} - -/** - * DPDK callback to close the device. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_dev_close(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - size_t i; - - for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) { - struct pp2_ppio_tc_params *tc_params = - &priv->ppio_params.inqs_params.tcs_params[i]; - - if (tc_params->inqs_params) { - rte_free(tc_params->inqs_params); - tc_params->inqs_params = NULL; - } - } - - mrvl_flush_bpool(dev); -} - -/** - * DPDK callback to retrieve physical link information. - * - * @param dev - * Pointer to Ethernet device structure. - * @param wait_to_complete - * Wait for request completion (ignored). - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) -{ - /* - * TODO - * once MUSDK provides necessary API use it here - */ - struct mrvl_priv *priv = dev->data->dev_private; - struct ethtool_cmd edata; - struct ifreq req; - int ret, fd, link_up; - - if (!priv->ppio) - return -EPERM; - - edata.cmd = ETHTOOL_GSET; - - strcpy(req.ifr_name, dev->data->name); - req.ifr_data = (void *)&edata; - - fd = socket(AF_INET, SOCK_DGRAM, 0); - if (fd == -1) - return -EFAULT; - - ret = ioctl(fd, SIOCETHTOOL, &req); - if (ret == -1) { - close(fd); - return -EFAULT; - } - - close(fd); - - switch (ethtool_cmd_speed(&edata)) { - case SPEED_10: - dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M; - break; - case SPEED_100: - dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M; - break; - case SPEED_1000: - dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G; - break; - case SPEED_10000: - dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G; - break; - default: - dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE; - } - - dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX : - ETH_LINK_HALF_DUPLEX; - dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG : - ETH_LINK_FIXED; - pp2_ppio_get_link_state(priv->ppio, &link_up); - dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN; - - return 0; -} - -/** - * DPDK callback to enable promiscuous mode. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_promiscuous_enable(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - int ret; - - if (!priv->ppio) - return; - - ret = pp2_ppio_set_promisc(priv->ppio, 1); - if (ret) - RTE_LOG(ERR, PMD, "Failed to enable promiscuous mode\n"); -} - -/** - * DPDK callback to enable allmulti mode. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_allmulticast_enable(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - int ret; - - if (!priv->ppio) - return; - - ret = pp2_ppio_set_mc_promisc(priv->ppio, 1); - if (ret) - RTE_LOG(ERR, PMD, "Failed enable all-multicast mode\n"); -} - -/** - * DPDK callback to disable promiscuous mode. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_promiscuous_disable(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - int ret; - - if (!priv->ppio) - return; - - ret = pp2_ppio_set_promisc(priv->ppio, 0); - if (ret) - RTE_LOG(ERR, PMD, "Failed to disable promiscuous mode\n"); -} - -/** - * DPDK callback to disable allmulticast mode. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_allmulticast_disable(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - int ret; - - if (!priv->ppio) - return; - - ret = pp2_ppio_set_mc_promisc(priv->ppio, 0); - if (ret) - RTE_LOG(ERR, PMD, "Failed to disable all-multicast mode\n"); -} - -/** - * DPDK callback to remove a MAC address. - * - * @param dev - * Pointer to Ethernet device structure. - * @param index - * MAC address index. - */ -static void -mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) -{ - struct mrvl_priv *priv = dev->data->dev_private; - char buf[ETHER_ADDR_FMT_SIZE]; - int ret; - - if (!priv->ppio) - return; - - ret = pp2_ppio_remove_mac_addr(priv->ppio, - dev->data->mac_addrs[index].addr_bytes); - if (ret) { - ether_format_addr(buf, sizeof(buf), - &dev->data->mac_addrs[index]); - RTE_LOG(ERR, PMD, "Failed to remove mac %s\n", buf); - } -} - -/** - * DPDK callback to add a MAC address. - * - * @param dev - * Pointer to Ethernet device structure. - * @param mac_addr - * MAC address to register. - * @param index - * MAC address index. - * @param vmdq - * VMDq pool index to associate address with (unused). - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, - uint32_t index, uint32_t vmdq __rte_unused) -{ - struct mrvl_priv *priv = dev->data->dev_private; - char buf[ETHER_ADDR_FMT_SIZE]; - int ret; - - if (index == 0) - /* For setting index 0, mrvl_mac_addr_set() should be used.*/ - return -1; - - if (!priv->ppio) - return 0; - - /* - * Maximum number of uc addresses can be tuned via kernel module mvpp2x - * parameter uc_filter_max. Maximum number of mc addresses is then - * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and - * 21 respectively. - * - * If more than uc_filter_max uc addresses were added to filter list - * then NIC will switch to promiscuous mode automatically. - * - * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses - * were added to filter list then NIC will switch to all-multicast mode - * automatically. - */ - ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes); - if (ret) { - ether_format_addr(buf, sizeof(buf), mac_addr); - RTE_LOG(ERR, PMD, "Failed to add mac %s\n", buf); - return -1; - } - - return 0; -} - -/** - * DPDK callback to set the primary MAC address. - * - * @param dev - * Pointer to Ethernet device structure. - * @param mac_addr - * MAC address to register. - */ -static void -mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) -{ - struct mrvl_priv *priv = dev->data->dev_private; - int ret; - - if (!priv->ppio) - return; - - ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes); - if (ret) { - char buf[ETHER_ADDR_FMT_SIZE]; - ether_format_addr(buf, sizeof(buf), mac_addr); - RTE_LOG(ERR, PMD, "Failed to set mac to %s\n", buf); - } -} - -/** - * DPDK callback to get device statistics. - * - * @param dev - * Pointer to Ethernet device structure. - * @param stats - * Stats structure output buffer. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) -{ - struct mrvl_priv *priv = dev->data->dev_private; - struct pp2_ppio_statistics ppio_stats; - uint64_t drop_mac = 0; - unsigned int i, idx, ret; - - if (!priv->ppio) - return -EPERM; - - for (i = 0; i < dev->data->nb_rx_queues; i++) { - struct mrvl_rxq *rxq = dev->data->rx_queues[i]; - struct pp2_ppio_inq_statistics rx_stats; - - if (!rxq) - continue; - - idx = rxq->queue_id; - if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { - RTE_LOG(ERR, PMD, - "rx queue %d stats out of range (0 - %d)\n", - idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); - continue; - } - - ret = pp2_ppio_inq_get_statistics(priv->ppio, - priv->rxq_map[idx].tc, - priv->rxq_map[idx].inq, - &rx_stats, 0); - if (unlikely(ret)) { - RTE_LOG(ERR, PMD, - "Failed to update rx queue %d stats\n", idx); - break; - } - - stats->q_ibytes[idx] = rxq->bytes_recv; - stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac; - stats->q_errors[idx] = rx_stats.drop_early + - rx_stats.drop_fullq + - rx_stats.drop_bm + - rxq->drop_mac; - stats->ibytes += rxq->bytes_recv; - drop_mac += rxq->drop_mac; - } - - for (i = 0; i < dev->data->nb_tx_queues; i++) { - struct mrvl_txq *txq = dev->data->tx_queues[i]; - struct pp2_ppio_outq_statistics tx_stats; - - if (!txq) - continue; - - idx = txq->queue_id; - if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { - RTE_LOG(ERR, PMD, - "tx queue %d stats out of range (0 - %d)\n", - idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); - } - - ret = pp2_ppio_outq_get_statistics(priv->ppio, idx, - &tx_stats, 0); - if (unlikely(ret)) { - RTE_LOG(ERR, PMD, - "Failed to update tx queue %d stats\n", idx); - break; - } - - stats->q_opackets[idx] = tx_stats.deq_desc; - stats->q_obytes[idx] = txq->bytes_sent; - stats->obytes += txq->bytes_sent; - } - - ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0); - if (unlikely(ret)) { - RTE_LOG(ERR, PMD, "Failed to update port statistics\n"); - return ret; - } - - stats->ipackets += ppio_stats.rx_packets - drop_mac; - stats->opackets += ppio_stats.tx_packets; - stats->imissed += ppio_stats.rx_fullq_dropped + - ppio_stats.rx_bm_dropped + - ppio_stats.rx_early_dropped + - ppio_stats.rx_fifo_dropped + - ppio_stats.rx_cls_dropped; - stats->ierrors = drop_mac; - - return 0; -} - -/** - * DPDK callback to clear device statistics. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_stats_reset(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - int i; - - if (!priv->ppio) - return; - - for (i = 0; i < dev->data->nb_rx_queues; i++) { - struct mrvl_rxq *rxq = dev->data->rx_queues[i]; - - pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc, - priv->rxq_map[i].inq, NULL, 1); - rxq->bytes_recv = 0; - rxq->drop_mac = 0; - } - - for (i = 0; i < dev->data->nb_tx_queues; i++) { - struct mrvl_txq *txq = dev->data->tx_queues[i]; - - pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1); - txq->bytes_sent = 0; - } - - pp2_ppio_get_statistics(priv->ppio, NULL, 1); -} - -/** - * DPDK callback to get information about the device. - * - * @param dev - * Pointer to Ethernet device structure (unused). - * @param info - * Info structure output buffer. - */ -static void -mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused, - struct rte_eth_dev_info *info) -{ - info->speed_capa = ETH_LINK_SPEED_10M | - ETH_LINK_SPEED_100M | - ETH_LINK_SPEED_1G | - ETH_LINK_SPEED_10G; - - info->max_rx_queues = MRVL_PP2_RXQ_MAX; - info->max_tx_queues = MRVL_PP2_TXQ_MAX; - info->max_mac_addrs = MRVL_MAC_ADDRS_MAX; - - info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX; - info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN; - info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN; - - info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX; - info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN; - info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN; - - info->rx_offload_capa = MRVL_RX_OFFLOADS; - info->rx_queue_offload_capa = MRVL_RX_OFFLOADS; - - info->tx_offload_capa = MRVL_TX_OFFLOADS; - info->tx_queue_offload_capa = MRVL_TX_OFFLOADS; - - info->flow_type_rss_offloads = ETH_RSS_IPV4 | - ETH_RSS_NONFRAG_IPV4_TCP | - ETH_RSS_NONFRAG_IPV4_UDP; - - /* By default packets are dropped if no descriptors are available */ - info->default_rxconf.rx_drop_en = 1; - info->default_rxconf.offloads = DEV_RX_OFFLOAD_CRC_STRIP; - - info->max_rx_pktlen = MRVL_PKT_SIZE_MAX; -} - -/** - * Return supported packet types. - * - * @param dev - * Pointer to Ethernet device structure (unused). - * - * @return - * Const pointer to the table with supported packet types. - */ -static const uint32_t * -mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) -{ - static const uint32_t ptypes[] = { - RTE_PTYPE_L2_ETHER, - RTE_PTYPE_L3_IPV4, - RTE_PTYPE_L3_IPV4_EXT, - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, - RTE_PTYPE_L3_IPV6, - RTE_PTYPE_L3_IPV6_EXT, - RTE_PTYPE_L2_ETHER_ARP, - RTE_PTYPE_L4_TCP, - RTE_PTYPE_L4_UDP - }; - - return ptypes; -} - -/** - * DPDK callback to get information about specific receive queue. - * - * @param dev - * Pointer to Ethernet device structure. - * @param rx_queue_id - * Receive queue index. - * @param qinfo - * Receive queue information structure. - */ -static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, - struct rte_eth_rxq_info *qinfo) -{ - struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id]; - struct mrvl_priv *priv = dev->data->dev_private; - int inq = priv->rxq_map[rx_queue_id].inq; - int tc = priv->rxq_map[rx_queue_id].tc; - struct pp2_ppio_tc_params *tc_params = - &priv->ppio_params.inqs_params.tcs_params[tc]; - - qinfo->mp = q->mp; - qinfo->nb_desc = tc_params->inqs_params[inq].size; -} - -/** - * DPDK callback to get information about specific transmit queue. - * - * @param dev - * Pointer to Ethernet device structure. - * @param tx_queue_id - * Transmit queue index. - * @param qinfo - * Transmit queue information structure. - */ -static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, - struct rte_eth_txq_info *qinfo) -{ - struct mrvl_priv *priv = dev->data->dev_private; - - qinfo->nb_desc = - priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size; -} - -/** - * DPDK callback to Configure a VLAN filter. - * - * @param dev - * Pointer to Ethernet device structure. - * @param vlan_id - * VLAN ID to filter. - * @param on - * Toggle filter. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) -{ - struct mrvl_priv *priv = dev->data->dev_private; - - if (!priv->ppio) - return -EPERM; - - return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) : - pp2_ppio_remove_vlan(priv->ppio, vlan_id); -} - -/** - * Release buffers to hardware bpool (buffer-pool) - * - * @param rxq - * Receive queue pointer. - * @param num - * Number of buffers to release to bpool. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_fill_bpool(struct mrvl_rxq *rxq, int num) -{ - struct buff_release_entry entries[MRVL_PP2_TXD_MAX]; - struct rte_mbuf *mbufs[MRVL_PP2_TXD_MAX]; - int i, ret; - unsigned int core_id; - struct pp2_hif *hif; - struct pp2_bpool *bpool; - - core_id = rte_lcore_id(); - if (core_id == LCORE_ID_ANY) - core_id = 0; - - hif = mrvl_get_hif(rxq->priv, core_id); - if (!hif) - return -1; - - bpool = rxq->priv->bpool; - - ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num); - if (ret) - return ret; - - if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID) - cookie_addr_high = - (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK; - - for (i = 0; i < num; i++) { - if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK) - != cookie_addr_high) { - RTE_LOG(ERR, PMD, - "mbuf virtual addr high 0x%lx out of range\n", - (uint64_t)mbufs[i] >> 32); - goto out; - } - - entries[i].buff.addr = - rte_mbuf_data_iova_default(mbufs[i]); - entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i]; - entries[i].bpool = bpool; - } - - pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i); - mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i; - - if (i != num) - goto out; - - return 0; -out: - for (; i < num; i++) - rte_pktmbuf_free(mbufs[i]); - - return -1; -} - -/** - * Check whether requested rx queue offloads match port offloads. - * - * @param - * dev Pointer to the device. - * @param - * requested Bitmap of the requested offloads. - * - * @return - * 1 if requested offloads are okay, 0 otherwise. - */ -static int -mrvl_rx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested) -{ - uint64_t mandatory = dev->data->dev_conf.rxmode.offloads; - uint64_t supported = MRVL_RX_OFFLOADS; - uint64_t unsupported = requested & ~supported; - uint64_t missing = mandatory & ~requested; - - if (unsupported) { - RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. " - "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n", - requested, supported); - return 0; - } - - if (missing) { - RTE_LOG(ERR, PMD, "Some Rx offloads are missing. " - "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n", - requested, missing); - return 0; - } - - return 1; -} - -/** - * DPDK callback to configure the receive queue. - * - * @param dev - * Pointer to Ethernet device structure. - * @param idx - * RX queue index. - * @param desc - * Number of descriptors to configure in queue. - * @param socket - * NUMA socket on which memory must be allocated. - * @param conf - * Thresholds parameters. - * @param mp - * Memory pool for buffer allocations. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, - unsigned int socket, - const struct rte_eth_rxconf *conf, - struct rte_mempool *mp) -{ - struct mrvl_priv *priv = dev->data->dev_private; - struct mrvl_rxq *rxq; - uint32_t min_size, - max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; - int ret, tc, inq; - - if (!mrvl_rx_queue_offloads_okay(dev, conf->offloads)) - return -ENOTSUP; - - if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) { - /* - * Unknown TC mapping, mapping will not have a correct queue. - */ - RTE_LOG(ERR, PMD, "Unknown TC mapping for queue %hu eth%hhu\n", - idx, priv->ppio_id); - return -EFAULT; - } - - min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM - - MRVL_PKT_EFFEC_OFFS; - if (min_size < max_rx_pkt_len) { - RTE_LOG(ERR, PMD, - "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.\n", - max_rx_pkt_len + RTE_PKTMBUF_HEADROOM + - MRVL_PKT_EFFEC_OFFS, - max_rx_pkt_len); - return -EINVAL; - } - - if (dev->data->rx_queues[idx]) { - rte_free(dev->data->rx_queues[idx]); - dev->data->rx_queues[idx] = NULL; - } - - rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket); - if (!rxq) - return -ENOMEM; - - rxq->priv = priv; - rxq->mp = mp; - rxq->cksum_enabled = - dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_IPV4_CKSUM; - rxq->queue_id = idx; - rxq->port_id = dev->data->port_id; - mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool; - - tc = priv->rxq_map[rxq->queue_id].tc, - inq = priv->rxq_map[rxq->queue_id].inq; - priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size = - desc; - - ret = mrvl_fill_bpool(rxq, desc); - if (ret) { - rte_free(rxq); - return ret; - } - - priv->bpool_init_size += desc; - - dev->data->rx_queues[idx] = rxq; - - return 0; -} - -/** - * DPDK callback to release the receive queue. - * - * @param rxq - * Generic receive queue pointer. - */ -static void -mrvl_rx_queue_release(void *rxq) -{ - struct mrvl_rxq *q = rxq; - struct pp2_ppio_tc_params *tc_params; - int i, num, tc, inq; - struct pp2_hif *hif; - unsigned int core_id = rte_lcore_id(); - - if (core_id == LCORE_ID_ANY) - core_id = 0; - - hif = mrvl_get_hif(q->priv, core_id); - - if (!q || !hif) - return; - - tc = q->priv->rxq_map[q->queue_id].tc; - inq = q->priv->rxq_map[q->queue_id].inq; - tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc]; - num = tc_params->inqs_params[inq].size; - for (i = 0; i < num; i++) { - struct pp2_buff_inf inf; - uint64_t addr; - - pp2_bpool_get_buff(hif, q->priv->bpool, &inf); - addr = cookie_addr_high | inf.cookie; - rte_pktmbuf_free((struct rte_mbuf *)addr); - } - - rte_free(q); -} - -/** - * Check whether requested tx queue offloads match port offloads. - * - * @param - * dev Pointer to the device. - * @param - * requested Bitmap of the requested offloads. - * - * @return - * 1 if requested offloads are okay, 0 otherwise. - */ -static int -mrvl_tx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested) -{ - uint64_t mandatory = dev->data->dev_conf.txmode.offloads; - uint64_t supported = MRVL_TX_OFFLOADS; - uint64_t unsupported = requested & ~supported; - uint64_t missing = mandatory & ~requested; - - if (unsupported) { - RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. " - "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n", - requested, supported); - return 0; - } - - if (missing) { - RTE_LOG(ERR, PMD, "Some Rx offloads are missing. " - "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n", - requested, missing); - return 0; - } - - return 1; -} - -/** - * DPDK callback to configure the transmit queue. - * - * @param dev - * Pointer to Ethernet device structure. - * @param idx - * Transmit queue index. - * @param desc - * Number of descriptors to configure in the queue. - * @param socket - * NUMA socket on which memory must be allocated. - * @param conf - * Thresholds parameters. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, - unsigned int socket, - const struct rte_eth_txconf *conf) -{ - struct mrvl_priv *priv = dev->data->dev_private; - struct mrvl_txq *txq; - - if (!mrvl_tx_queue_offloads_okay(dev, conf->offloads)) - return -ENOTSUP; - - if (dev->data->tx_queues[idx]) { - rte_free(dev->data->tx_queues[idx]); - dev->data->tx_queues[idx] = NULL; - } - - txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket); - if (!txq) - return -ENOMEM; - - txq->priv = priv; - txq->queue_id = idx; - txq->port_id = dev->data->port_id; - dev->data->tx_queues[idx] = txq; - - priv->ppio_params.outqs_params.outqs_params[idx].size = desc; - priv->ppio_params.outqs_params.outqs_params[idx].weight = 1; - - return 0; -} - -/** - * DPDK callback to release the transmit queue. - * - * @param txq - * Generic transmit queue pointer. - */ -static void -mrvl_tx_queue_release(void *txq) -{ - struct mrvl_txq *q = txq; - - if (!q) - return; - - rte_free(q); -} - -/** - * Update RSS hash configuration - * - * @param dev - * Pointer to Ethernet device structure. - * @param rss_conf - * Pointer to RSS configuration. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_rss_hash_update(struct rte_eth_dev *dev, - struct rte_eth_rss_conf *rss_conf) -{ - struct mrvl_priv *priv = dev->data->dev_private; - - return mrvl_configure_rss(priv, rss_conf); -} - -/** - * DPDK callback to get RSS hash configuration. - * - * @param dev - * Pointer to Ethernet device structure. - * @rss_conf - * Pointer to RSS configuration. - * - * @return - * Always 0. - */ -static int -mrvl_rss_hash_conf_get(struct rte_eth_dev *dev, - struct rte_eth_rss_conf *rss_conf) -{ - struct mrvl_priv *priv = dev->data->dev_private; - enum pp2_ppio_hash_type hash_type = - priv->ppio_params.inqs_params.hash_type; - - rss_conf->rss_key = NULL; - - if (hash_type == PP2_PPIO_HASH_T_NONE) - rss_conf->rss_hf = 0; - else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE) - rss_conf->rss_hf = ETH_RSS_IPV4; - else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp) - rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP; - else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp) - rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP; - - return 0; -} - -static const struct eth_dev_ops mrvl_ops = { - .dev_configure = mrvl_dev_configure, - .dev_start = mrvl_dev_start, - .dev_stop = mrvl_dev_stop, - .dev_set_link_up = mrvl_dev_set_link_up, - .dev_set_link_down = mrvl_dev_set_link_down, - .dev_close = mrvl_dev_close, - .link_update = mrvl_link_update, - .promiscuous_enable = mrvl_promiscuous_enable, - .allmulticast_enable = mrvl_allmulticast_enable, - .promiscuous_disable = mrvl_promiscuous_disable, - .allmulticast_disable = mrvl_allmulticast_disable, - .mac_addr_remove = mrvl_mac_addr_remove, - .mac_addr_add = mrvl_mac_addr_add, - .mac_addr_set = mrvl_mac_addr_set, - .mtu_set = mrvl_mtu_set, - .stats_get = mrvl_stats_get, - .stats_reset = mrvl_stats_reset, - .dev_infos_get = mrvl_dev_infos_get, - .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get, - .rxq_info_get = mrvl_rxq_info_get, - .txq_info_get = mrvl_txq_info_get, - .vlan_filter_set = mrvl_vlan_filter_set, - .rx_queue_setup = mrvl_rx_queue_setup, - .rx_queue_release = mrvl_rx_queue_release, - .tx_queue_setup = mrvl_tx_queue_setup, - .tx_queue_release = mrvl_tx_queue_release, - .rss_hash_update = mrvl_rss_hash_update, - .rss_hash_conf_get = mrvl_rss_hash_conf_get, -}; - -/** - * Return packet type information and l3/l4 offsets. - * - * @param desc - * Pointer to the received packet descriptor. - * @param l3_offset - * l3 packet offset. - * @param l4_offset - * l4 packet offset. - * - * @return - * Packet type information. - */ -static inline uint64_t -mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc, - uint8_t *l3_offset, uint8_t *l4_offset) -{ - enum pp2_inq_l3_type l3_type; - enum pp2_inq_l4_type l4_type; - uint64_t packet_type; - - pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset); - pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset); - - packet_type = RTE_PTYPE_L2_ETHER; - - switch (l3_type) { - case PP2_INQ_L3_TYPE_IPV4_NO_OPTS: - packet_type |= RTE_PTYPE_L3_IPV4; - break; - case PP2_INQ_L3_TYPE_IPV4_OK: - packet_type |= RTE_PTYPE_L3_IPV4_EXT; - break; - case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO: - packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; - break; - case PP2_INQ_L3_TYPE_IPV6_NO_EXT: - packet_type |= RTE_PTYPE_L3_IPV6; - break; - case PP2_INQ_L3_TYPE_IPV6_EXT: - packet_type |= RTE_PTYPE_L3_IPV6_EXT; - break; - case PP2_INQ_L3_TYPE_ARP: - packet_type |= RTE_PTYPE_L2_ETHER_ARP; - /* - * In case of ARP l4_offset is set to wrong value. - * Set it to proper one so that later on mbuf->l3_len can be - * calculated subtracting l4_offset and l3_offset. - */ - *l4_offset = *l3_offset + MRVL_ARP_LENGTH; - break; - default: - RTE_LOG(DEBUG, PMD, "Failed to recognise l3 packet type\n"); - break; - } - - switch (l4_type) { - case PP2_INQ_L4_TYPE_TCP: - packet_type |= RTE_PTYPE_L4_TCP; - break; - case PP2_INQ_L4_TYPE_UDP: - packet_type |= RTE_PTYPE_L4_UDP; - break; - default: - RTE_LOG(DEBUG, PMD, "Failed to recognise l4 packet type\n"); - break; - } - - return packet_type; -} - -/** - * Get offload information from the received packet descriptor. - * - * @param desc - * Pointer to the received packet descriptor. - * - * @return - * Mbuf offload flags. - */ -static inline uint64_t -mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc) -{ - uint64_t flags; - enum pp2_inq_desc_status status; - - status = pp2_ppio_inq_desc_get_l3_pkt_error(desc); - if (unlikely(status != PP2_DESC_ERR_OK)) - flags = PKT_RX_IP_CKSUM_BAD; - else - flags = PKT_RX_IP_CKSUM_GOOD; - - status = pp2_ppio_inq_desc_get_l4_pkt_error(desc); - if (unlikely(status != PP2_DESC_ERR_OK)) - flags |= PKT_RX_L4_CKSUM_BAD; - else - flags |= PKT_RX_L4_CKSUM_GOOD; - - return flags; -} - -/** - * DPDK callback for receive. - * - * @param rxq - * Generic pointer to the receive queue. - * @param rx_pkts - * Array to store received packets. - * @param nb_pkts - * Maximum number of packets in array. - * - * @return - * Number of packets successfully received. - */ -static uint16_t -mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) -{ - struct mrvl_rxq *q = rxq; - struct pp2_ppio_desc descs[nb_pkts]; - struct pp2_bpool *bpool; - int i, ret, rx_done = 0; - int num; - struct pp2_hif *hif; - unsigned int core_id = rte_lcore_id(); - - hif = mrvl_get_hif(q->priv, core_id); - - if (unlikely(!q->priv->ppio || !hif)) - return 0; - - bpool = q->priv->bpool; - - ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc, - q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts); - if (unlikely(ret < 0)) { - RTE_LOG(ERR, PMD, "Failed to receive packets\n"); - return 0; - } - mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts; - - for (i = 0; i < nb_pkts; i++) { - struct rte_mbuf *mbuf; - uint8_t l3_offset, l4_offset; - enum pp2_inq_desc_status status; - uint64_t addr; - - if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { - struct pp2_ppio_desc *pref_desc; - u64 pref_addr; - - pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT]; - pref_addr = cookie_addr_high | - pp2_ppio_inq_desc_get_cookie(pref_desc); - rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr)); - rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr)); - } - - addr = cookie_addr_high | - pp2_ppio_inq_desc_get_cookie(&descs[i]); - mbuf = (struct rte_mbuf *)addr; - rte_pktmbuf_reset(mbuf); - - /* drop packet in case of mac, overrun or resource error */ - status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]); - if (unlikely(status != PP2_DESC_ERR_OK)) { - struct pp2_buff_inf binf = { - .addr = rte_mbuf_data_iova_default(mbuf), - .cookie = (pp2_cookie_t)(uint64_t)mbuf, - }; - - pp2_bpool_put_buff(hif, bpool, &binf); - mrvl_port_bpool_size - [bpool->pp2_id][bpool->id][core_id]++; - q->drop_mac++; - continue; - } - - mbuf->data_off += MRVL_PKT_EFFEC_OFFS; - mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]); - mbuf->data_len = mbuf->pkt_len; - mbuf->port = q->port_id; - mbuf->packet_type = - mrvl_desc_to_packet_type_and_offset(&descs[i], - &l3_offset, - &l4_offset); - mbuf->l2_len = l3_offset; - mbuf->l3_len = l4_offset - l3_offset; - - if (likely(q->cksum_enabled)) - mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]); - - rx_pkts[rx_done++] = mbuf; - q->bytes_recv += mbuf->pkt_len; - } - - if (rte_spinlock_trylock(&q->priv->lock) == 1) { - num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id); - - if (unlikely(num <= q->priv->bpool_min_size || - (!rx_done && num < q->priv->bpool_init_size))) { - ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE); - if (ret) - RTE_LOG(ERR, PMD, "Failed to fill bpool\n"); - } else if (unlikely(num > q->priv->bpool_max_size)) { - int i; - int pkt_to_remove = num - q->priv->bpool_init_size; - struct rte_mbuf *mbuf; - struct pp2_buff_inf buff; - - RTE_LOG(DEBUG, PMD, - "\nport-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)\n", - bpool->pp2_id, q->priv->ppio->port_id, - bpool->id, pkt_to_remove, num, - q->priv->bpool_init_size); - - for (i = 0; i < pkt_to_remove; i++) { - ret = pp2_bpool_get_buff(hif, bpool, &buff); - if (ret) - break; - mbuf = (struct rte_mbuf *) - (cookie_addr_high | buff.cookie); - rte_pktmbuf_free(mbuf); - } - mrvl_port_bpool_size - [bpool->pp2_id][bpool->id][core_id] -= i; - } - rte_spinlock_unlock(&q->priv->lock); - } - - return rx_done; -} - -/** - * Prepare offload information. - * - * @param ol_flags - * Offload flags. - * @param packet_type - * Packet type bitfield. - * @param l3_type - * Pointer to the pp2_ouq_l3_type structure. - * @param l4_type - * Pointer to the pp2_outq_l4_type structure. - * @param gen_l3_cksum - * Will be set to 1 in case l3 checksum is computed. - * @param l4_cksum - * Will be set to 1 in case l4 checksum is computed. - * - * @return - * 0 on success, negative error value otherwise. - */ -static inline int -mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type, - enum pp2_outq_l3_type *l3_type, - enum pp2_outq_l4_type *l4_type, - int *gen_l3_cksum, - int *gen_l4_cksum) -{ - /* - * Based on ol_flags prepare information - * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor - * for offloading. - */ - if (ol_flags & PKT_TX_IPV4) { - *l3_type = PP2_OUTQ_L3_TYPE_IPV4; - *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0; - } else if (ol_flags & PKT_TX_IPV6) { - *l3_type = PP2_OUTQ_L3_TYPE_IPV6; - /* no checksum for ipv6 header */ - *gen_l3_cksum = 0; - } else { - /* if something different then stop processing */ - return -1; - } - - ol_flags &= PKT_TX_L4_MASK; - if ((packet_type & RTE_PTYPE_L4_TCP) && - ol_flags == PKT_TX_TCP_CKSUM) { - *l4_type = PP2_OUTQ_L4_TYPE_TCP; - *gen_l4_cksum = 1; - } else if ((packet_type & RTE_PTYPE_L4_UDP) && - ol_flags == PKT_TX_UDP_CKSUM) { - *l4_type = PP2_OUTQ_L4_TYPE_UDP; - *gen_l4_cksum = 1; - } else { - *l4_type = PP2_OUTQ_L4_TYPE_OTHER; - /* no checksum for other type */ - *gen_l4_cksum = 0; - } - - return 0; -} - -/** - * Release already sent buffers to bpool (buffer-pool). - * - * @param ppio - * Pointer to the port structure. - * @param hif - * Pointer to the MUSDK hardware interface. - * @param sq - * Pointer to the shadow queue. - * @param qid - * Queue id number. - * @param force - * Force releasing packets. - */ -static inline void -mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif, - unsigned int core_id, struct mrvl_shadow_txq *sq, - int qid, int force) -{ - struct buff_release_entry *entry; - uint16_t nb_done = 0, num = 0, skip_bufs = 0; - int i; - - pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done); - - sq->num_to_release += nb_done; - - if (likely(!force && - sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE)) - return; - - nb_done = sq->num_to_release; - sq->num_to_release = 0; - - for (i = 0; i < nb_done; i++) { - entry = &sq->ent[sq->tail + num]; - if (unlikely(!entry->buff.addr)) { - RTE_LOG(ERR, PMD, - "Shadow memory @%d: cookie(%lx), pa(%lx)!\n", - sq->tail, (u64)entry->buff.cookie, - (u64)entry->buff.addr); - skip_bufs = 1; - goto skip; - } - - if (unlikely(!entry->bpool)) { - struct rte_mbuf *mbuf; - - mbuf = (struct rte_mbuf *) - (cookie_addr_high | entry->buff.cookie); - rte_pktmbuf_free(mbuf); - skip_bufs = 1; - goto skip; - } - - mrvl_port_bpool_size - [entry->bpool->pp2_id][entry->bpool->id][core_id]++; - num++; - if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE)) - goto skip; - continue; -skip: - if (likely(num)) - pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); - num += skip_bufs; - sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; - sq->size -= num; - num = 0; - skip_bufs = 0; - } - - if (likely(num)) { - pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); - sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; - sq->size -= num; - } -} - -/** - * DPDK callback for transmit. - * - * @param txq - * Generic pointer transmit queue. - * @param tx_pkts - * Packets to transmit. - * @param nb_pkts - * Number of packets in array. - * - * @return - * Number of packets successfully transmitted. - */ -static uint16_t -mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) -{ - struct mrvl_txq *q = txq; - struct mrvl_shadow_txq *sq; - struct pp2_hif *hif; - struct pp2_ppio_desc descs[nb_pkts]; - unsigned int core_id = rte_lcore_id(); - int i, ret, bytes_sent = 0; - uint16_t num, sq_free_size; - uint64_t addr; - - hif = mrvl_get_hif(q->priv, core_id); - sq = &q->shadow_txqs[core_id]; - - if (unlikely(!q->priv->ppio || !hif)) - return 0; - - if (sq->size) - mrvl_free_sent_buffers(q->priv->ppio, hif, core_id, - sq, q->queue_id, 0); - - sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1; - if (unlikely(nb_pkts > sq_free_size)) { - RTE_LOG(DEBUG, PMD, - "No room in shadow queue for %d packets! %d packets will be sent.\n", - nb_pkts, sq_free_size); - nb_pkts = sq_free_size; - } - - for (i = 0; i < nb_pkts; i++) { - struct rte_mbuf *mbuf = tx_pkts[i]; - int gen_l3_cksum, gen_l4_cksum; - enum pp2_outq_l3_type l3_type; - enum pp2_outq_l4_type l4_type; - - if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { - struct rte_mbuf *pref_pkt_hdr; - - pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT]; - rte_mbuf_prefetch_part1(pref_pkt_hdr); - rte_mbuf_prefetch_part2(pref_pkt_hdr); - } - - sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf; - sq->ent[sq->head].buff.addr = - rte_mbuf_data_iova_default(mbuf); - sq->ent[sq->head].bpool = - (unlikely(mbuf->port >= RTE_MAX_ETHPORTS || - mbuf->refcnt > 1)) ? NULL : - mrvl_port_to_bpool_lookup[mbuf->port]; - sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK; - sq->size++; - - pp2_ppio_outq_desc_reset(&descs[i]); - pp2_ppio_outq_desc_set_phys_addr(&descs[i], - rte_pktmbuf_iova(mbuf)); - pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0); - pp2_ppio_outq_desc_set_pkt_len(&descs[i], - rte_pktmbuf_pkt_len(mbuf)); - - bytes_sent += rte_pktmbuf_pkt_len(mbuf); - /* - * in case unsupported ol_flags were passed - * do not update descriptor offload information - */ - ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type, - &l3_type, &l4_type, &gen_l3_cksum, - &gen_l4_cksum); - if (unlikely(ret)) - continue; - - pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type, - mbuf->l2_len, - mbuf->l2_len + mbuf->l3_len, - gen_l3_cksum, gen_l4_cksum); - } - - num = nb_pkts; - pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts); - /* number of packets that were not sent */ - if (unlikely(num > nb_pkts)) { - for (i = nb_pkts; i < num; i++) { - sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) & - MRVL_PP2_TX_SHADOWQ_MASK; - addr = cookie_addr_high | sq->ent[sq->head].buff.cookie; - bytes_sent -= - rte_pktmbuf_pkt_len((struct rte_mbuf *)addr); - } - sq->size -= num - nb_pkts; - } - - q->bytes_sent += bytes_sent; - - return nb_pkts; -} - -/** - * Initialize packet processor. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_init_pp2(void) -{ - struct pp2_init_params init_params; - - memset(&init_params, 0, sizeof(init_params)); - init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED; - init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED; - init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED; - - return pp2_init(&init_params); -} - -/** - * Deinitialize packet processor. - * - * @return - * 0 on success, negative error value otherwise. - */ -static void -mrvl_deinit_pp2(void) -{ - pp2_deinit(); -} - -/** - * Create private device structure. - * - * @param dev_name - * Pointer to the port name passed in the initialization parameters. - * - * @return - * Pointer to the newly allocated private device structure. - */ -static struct mrvl_priv * -mrvl_priv_create(const char *dev_name) -{ - struct pp2_bpool_params bpool_params; - char match[MRVL_MATCH_LEN]; - struct mrvl_priv *priv; - int ret, bpool_bit; - - priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id()); - if (!priv) - return NULL; - - ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name, - &priv->pp_id, &priv->ppio_id); - if (ret) - goto out_free_priv; - - bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id], - PP2_BPOOL_NUM_POOLS); - if (bpool_bit < 0) - goto out_free_priv; - priv->bpool_bit = bpool_bit; - - snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id, - priv->bpool_bit); - memset(&bpool_params, 0, sizeof(bpool_params)); - bpool_params.match = match; - bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS; - ret = pp2_bpool_init(&bpool_params, &priv->bpool); - if (ret) - goto out_clear_bpool_bit; - - priv->ppio_params.type = PP2_PPIO_T_NIC; - rte_spinlock_init(&priv->lock); - - return priv; -out_clear_bpool_bit: - used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); -out_free_priv: - rte_free(priv); - return NULL; -} - -/** - * Create device representing Ethernet port. - * - * @param name - * Pointer to the port's name. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name) -{ - int ret, fd = socket(AF_INET, SOCK_DGRAM, 0); - struct rte_eth_dev *eth_dev; - struct mrvl_priv *priv; - struct ifreq req; - - eth_dev = rte_eth_dev_allocate(name); - if (!eth_dev) - return -ENOMEM; - - priv = mrvl_priv_create(name); - if (!priv) { - ret = -ENOMEM; - goto out_free_dev; - } - - eth_dev->data->mac_addrs = - rte_zmalloc("mac_addrs", - ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0); - if (!eth_dev->data->mac_addrs) { - RTE_LOG(ERR, PMD, "Failed to allocate space for eth addrs\n"); - ret = -ENOMEM; - goto out_free_priv; - } - - memset(&req, 0, sizeof(req)); - strcpy(req.ifr_name, name); - ret = ioctl(fd, SIOCGIFHWADDR, &req); - if (ret) - goto out_free_mac; - - memcpy(eth_dev->data->mac_addrs[0].addr_bytes, - req.ifr_addr.sa_data, ETHER_ADDR_LEN); - - eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst; - eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst; - eth_dev->data->kdrv = RTE_KDRV_NONE; - eth_dev->data->dev_private = priv; - eth_dev->device = &vdev->device; - eth_dev->dev_ops = &mrvl_ops; - - return 0; -out_free_mac: - rte_free(eth_dev->data->mac_addrs); -out_free_dev: - rte_eth_dev_release_port(eth_dev); -out_free_priv: - rte_free(priv); - - return ret; -} - -/** - * Cleanup previously created device representing Ethernet port. - * - * @param name - * Pointer to the port name. - */ -static void -mrvl_eth_dev_destroy(const char *name) -{ - struct rte_eth_dev *eth_dev; - struct mrvl_priv *priv; - - eth_dev = rte_eth_dev_allocated(name); - if (!eth_dev) - return; - - priv = eth_dev->data->dev_private; - pp2_bpool_deinit(priv->bpool); - used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); - rte_free(priv); - rte_free(eth_dev->data->mac_addrs); - rte_eth_dev_release_port(eth_dev); -} - -/** - * Callback used by rte_kvargs_process() during argument parsing. - * - * @param key - * Pointer to the parsed key (unused). - * @param value - * Pointer to the parsed value. - * @param extra_args - * Pointer to the extra arguments which contains address of the - * table of pointers to parsed interface names. - * - * @return - * Always 0. - */ -static int -mrvl_get_ifnames(const char *key __rte_unused, const char *value, - void *extra_args) -{ - struct mrvl_ifnames *ifnames = extra_args; - - ifnames->names[ifnames->idx++] = value; - - return 0; -} - -/** - * Deinitialize per-lcore MUSDK hardware interfaces (hifs). - */ -static void -mrvl_deinit_hifs(void) -{ - int i; - - for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) { - if (hifs[i]) - pp2_hif_deinit(hifs[i]); - } - used_hifs = MRVL_MUSDK_HIFS_RESERVED; - memset(hifs, 0, sizeof(hifs)); -} - -/** - * DPDK callback to register the virtual device. - * - * @param vdev - * Pointer to the virtual device. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -rte_pmd_mrvl_probe(struct rte_vdev_device *vdev) -{ - struct rte_kvargs *kvlist; - struct mrvl_ifnames ifnames; - int ret = -EINVAL; - uint32_t i, ifnum, cfgnum; - const char *params; - - params = rte_vdev_device_args(vdev); - if (!params) - return -EINVAL; - - kvlist = rte_kvargs_parse(params, valid_args); - if (!kvlist) - return -EINVAL; - - ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG); - if (ifnum > RTE_DIM(ifnames.names)) - goto out_free_kvlist; - - ifnames.idx = 0; - rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG, - mrvl_get_ifnames, &ifnames); - - - /* - * The below system initialization should be done only once, - * on the first provided configuration file - */ - if (!mrvl_qos_cfg) { - cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG); - RTE_LOG(INFO, PMD, "Parsing config file!\n"); - if (cfgnum > 1) { - RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n"); - goto out_free_kvlist; - } else if (cfgnum == 1) { - rte_kvargs_process(kvlist, MRVL_CFG_ARG, - mrvl_get_qoscfg, &mrvl_qos_cfg); - } - } - - if (mrvl_dev_num) - goto init_devices; - - RTE_LOG(INFO, PMD, "Perform MUSDK initializations\n"); - /* - * ret == -EEXIST is correct, it means DMA - * has been already initialized (by another PMD). - */ - ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE); - if (ret < 0) { - if (ret != -EEXIST) - goto out_free_kvlist; - else - RTE_LOG(INFO, PMD, - "DMA memory has been already initialized by a different driver.\n"); - } - - ret = mrvl_init_pp2(); - if (ret) { - RTE_LOG(ERR, PMD, "Failed to init PP!\n"); - goto out_deinit_dma; - } - - memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size)); - memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup)); - - mrvl_lcore_first = RTE_MAX_LCORE; - mrvl_lcore_last = 0; - -init_devices: - for (i = 0; i < ifnum; i++) { - RTE_LOG(INFO, PMD, "Creating %s\n", ifnames.names[i]); - ret = mrvl_eth_dev_create(vdev, ifnames.names[i]); - if (ret) - goto out_cleanup; - } - mrvl_dev_num += ifnum; - - rte_kvargs_free(kvlist); - - return 0; -out_cleanup: - for (; i > 0; i--) - mrvl_eth_dev_destroy(ifnames.names[i]); - - if (mrvl_dev_num == 0) - mrvl_deinit_pp2(); -out_deinit_dma: - if (mrvl_dev_num == 0) - mv_sys_dma_mem_destroy(); -out_free_kvlist: - rte_kvargs_free(kvlist); - - return ret; -} - -/** - * DPDK callback to remove virtual device. - * - * @param vdev - * Pointer to the removed virtual device. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -rte_pmd_mrvl_remove(struct rte_vdev_device *vdev) -{ - int i; - const char *name; - - name = rte_vdev_device_name(vdev); - if (!name) - return -EINVAL; - - RTE_LOG(INFO, PMD, "Removing %s\n", name); - - for (i = 0; i < rte_eth_dev_count(); i++) { - char ifname[RTE_ETH_NAME_MAX_LEN]; - - rte_eth_dev_get_name_by_port(i, ifname); - mrvl_eth_dev_destroy(ifname); - mrvl_dev_num--; - } - - if (mrvl_dev_num == 0) { - RTE_LOG(INFO, PMD, "Perform MUSDK deinit\n"); - mrvl_deinit_hifs(); - mrvl_deinit_pp2(); - mv_sys_dma_mem_destroy(); - } - - return 0; -} - -static struct rte_vdev_driver pmd_mrvl_drv = { - .probe = rte_pmd_mrvl_probe, - .remove = rte_pmd_mrvl_remove, -}; - -RTE_PMD_REGISTER_VDEV(net_mrvl, pmd_mrvl_drv); -RTE_PMD_REGISTER_ALIAS(net_mrvl, eth_mrvl); diff --git a/drivers/net/mrvl/mrvl_ethdev.h b/drivers/net/mrvl/mrvl_ethdev.h deleted file mode 100644 index f7afae5c..00000000 --- a/drivers/net/mrvl/mrvl_ethdev.h +++ /dev/null @@ -1,118 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Marvell International Ltd. - * Copyright(c) 2017 Semihalf. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _MRVL_ETHDEV_H_ -#define _MRVL_ETHDEV_H_ - -#include - -#include -#include -#include -#include -#include -#include - -/** Maximum number of rx queues per port */ -#define MRVL_PP2_RXQ_MAX 32 - -/** Maximum number of tx queues per port */ -#define MRVL_PP2_TXQ_MAX 8 - -/** Minimum number of descriptors in tx queue */ -#define MRVL_PP2_TXD_MIN 16 - -/** Maximum number of descriptors in tx queue */ -#define MRVL_PP2_TXD_MAX 2048 - -/** Tx queue descriptors alignment */ -#define MRVL_PP2_TXD_ALIGN 16 - -/** Minimum number of descriptors in rx queue */ -#define MRVL_PP2_RXD_MIN 16 - -/** Maximum number of descriptors in rx queue */ -#define MRVL_PP2_RXD_MAX 2048 - -/** Rx queue descriptors alignment */ -#define MRVL_PP2_RXD_ALIGN 16 - -/** Maximum number of descriptors in tx aggregated queue */ -#define MRVL_PP2_AGGR_TXQD_MAX 2048 - -/** Maximum number of Traffic Classes. */ -#define MRVL_PP2_TC_MAX 8 - -/** Packet offset inside RX buffer. */ -#define MRVL_PKT_OFFS 64 - -/** Maximum number of descriptors in shadow queue. Must be power of 2 */ -#define MRVL_PP2_TX_SHADOWQ_SIZE MRVL_PP2_TXD_MAX - -/** Shadow queue size mask (since shadow queue size is power of 2) */ -#define MRVL_PP2_TX_SHADOWQ_MASK (MRVL_PP2_TX_SHADOWQ_SIZE - 1) - -/** Minimum number of sent buffers to release from shadow queue to BM */ -#define MRVL_PP2_BUF_RELEASE_BURST_SIZE 64 - -struct mrvl_priv { - /* Hot fields, used in fast path. */ - struct pp2_bpool *bpool; /**< BPool pointer */ - struct pp2_ppio *ppio; /**< Port handler pointer */ - rte_spinlock_t lock; /**< Spinlock for checking bpool status */ - uint16_t bpool_max_size; /**< BPool maximum size */ - uint16_t bpool_min_size; /**< BPool minimum size */ - uint16_t bpool_init_size; /**< Configured BPool size */ - - /** Mapping for DPDK rx queue->(TC, MRVL relative inq) */ - struct { - uint8_t tc; /**< Traffic Class */ - uint8_t inq; /**< Relative in-queue number */ - } rxq_map[MRVL_PP2_RXQ_MAX] __rte_cache_aligned; - - /* Configuration data, used sporadically. */ - uint8_t pp_id; - uint8_t ppio_id; - uint8_t bpool_bit; - uint8_t rss_hf_tcp; - uint8_t uc_mc_flushed; - uint8_t vlan_flushed; - - struct pp2_ppio_params ppio_params; - struct pp2_cls_qos_tbl_params qos_tbl_params; - struct pp2_cls_tbl *qos_tbl; - uint16_t nb_rx_queues; -}; - -#endif /* _MRVL_ETHDEV_H_ */ diff --git a/drivers/net/mrvl/mrvl_qos.c b/drivers/net/mrvl/mrvl_qos.c deleted file mode 100644 index fbb36813..00000000 --- a/drivers/net/mrvl/mrvl_qos.c +++ /dev/null @@ -1,636 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Marvell International Ltd. - * Copyright(c) 2017 Semihalf. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -/* Unluckily, container_of is defined by both DPDK and MUSDK, - * we'll declare only one version. - * - * Note that it is not used in this PMD anyway. - */ -#ifdef container_of -#undef container_of -#endif - -#include "mrvl_qos.h" - -/* Parsing tokens. Defined conveniently, so that any correction is easy. */ -#define MRVL_TOK_DEFAULT "default" -#define MRVL_TOK_DEFAULT_TC "default_tc" -#define MRVL_TOK_DSCP "dscp" -#define MRVL_TOK_MAPPING_PRIORITY "mapping_priority" -#define MRVL_TOK_IP "ip" -#define MRVL_TOK_IP_VLAN "ip/vlan" -#define MRVL_TOK_PCP "pcp" -#define MRVL_TOK_PORT "port" -#define MRVL_TOK_RXQ "rxq" -#define MRVL_TOK_SP "SP" -#define MRVL_TOK_TC "tc" -#define MRVL_TOK_TXQ "txq" -#define MRVL_TOK_VLAN "vlan" -#define MRVL_TOK_VLAN_IP "vlan/ip" -#define MRVL_TOK_WEIGHT "weight" - -/** Number of tokens in range a-b = 2. */ -#define MAX_RNG_TOKENS 2 - -/** Maximum possible value of PCP. */ -#define MAX_PCP 7 - -/** Maximum possible value of DSCP. */ -#define MAX_DSCP 63 - -/** Global QoS configuration. */ -struct mrvl_qos_cfg *mrvl_qos_cfg; - -/** - * Convert string to uint32_t with extra checks for result correctness. - * - * @param string String to convert. - * @param val Conversion result. - * @returns 0 in case of success, negative value otherwise. - */ -static int -get_val_securely(const char *string, uint32_t *val) -{ - char *endptr; - size_t len = strlen(string); - - if (len == 0) - return -1; - - errno = 0; - *val = strtoul(string, &endptr, 0); - if (errno != 0 || RTE_PTR_DIFF(endptr, string) != len) - return -2; - - return 0; -} - -/** - * Read out-queue configuration from file. - * - * @param file Path to the configuration file. - * @param port Port number. - * @param outq Out queue number. - * @param cfg Pointer to the Marvell QoS configuration structure. - * @returns 0 in case of success, negative value otherwise. - */ -static int -get_outq_cfg(struct rte_cfgfile *file, int port, int outq, - struct mrvl_qos_cfg *cfg) -{ - char sec_name[32]; - const char *entry; - uint32_t val; - - snprintf(sec_name, sizeof(sec_name), "%s %d %s %d", - MRVL_TOK_PORT, port, MRVL_TOK_TXQ, outq); - - /* Skip non-existing */ - if (rte_cfgfile_num_sections(file, sec_name, strlen(sec_name)) <= 0) - return 0; - - entry = rte_cfgfile_get_entry(file, sec_name, - MRVL_TOK_WEIGHT); - if (entry) { - if (get_val_securely(entry, &val) < 0) - return -1; - cfg->port[port].outq[outq].weight = (uint8_t)val; - } - - return 0; -} - -/** - * Gets multiple-entry values and places them in table. - * - * Entry can be anything, e.g. "1 2-3 5 6 7-9". This needs to be converted to - * table entries, respectively: {1, 2, 3, 5, 6, 7, 8, 9}. - * As all result table's elements are always 1-byte long, we - * won't overcomplicate the function, but we'll keep API generic, - * check if someone hasn't changed element size and make it simple - * to extend to other sizes. - * - * This function is purely utilitary, it does not print any error, only returns - * different error numbers. - * - * @param entry[in] Values string to parse. - * @param tab[out] Results table. - * @param elem_sz[in] Element size (in bytes). - * @param max_elems[in] Number of results table elements available. - * @param max val[in] Maximum value allowed. - * @returns Number of correctly parsed elements in case of success. - * @retval -1 Wrong element size. - * @retval -2 More tokens than result table allows. - * @retval -3 Wrong range syntax. - * @retval -4 Wrong range values. - * @retval -5 Maximum value exceeded. - */ -static int -get_entry_values(const char *entry, uint8_t *tab, - size_t elem_sz, uint8_t max_elems, uint8_t max_val) -{ - /* There should not be more tokens than max elements. - * Add 1 for error trap. - */ - char *tokens[max_elems + 1]; - - /* Begin, End + error trap = 3. */ - char *rng_tokens[MAX_RNG_TOKENS + 1]; - long beg, end; - uint32_t token_val; - int nb_tokens, nb_rng_tokens; - int i; - int values = 0; - char val; - char entry_cpy[CFG_VALUE_LEN]; - - if (elem_sz != 1) - return -1; - - /* Copy the entry to safely use rte_strsplit(). */ - snprintf(entry_cpy, RTE_DIM(entry_cpy), "%s", entry); - - /* - * If there are more tokens than array size, rte_strsplit will - * not return error, just array size. - */ - nb_tokens = rte_strsplit(entry_cpy, strlen(entry_cpy), - tokens, max_elems + 1, ' '); - - /* Quick check, will be refined later. */ - if (nb_tokens > max_elems) - return -2; - - for (i = 0; i < nb_tokens; ++i) { - if (strchr(tokens[i], '-') != NULL) { - /* - * Split to begin and end tokens. - * We want to catch error cases too, thus we leave - * option for number of tokens to be more than 2. - */ - nb_rng_tokens = rte_strsplit(tokens[i], - strlen(tokens[i]), rng_tokens, - RTE_DIM(rng_tokens), '-'); - if (nb_rng_tokens != 2) - return -3; - - /* Range and sanity checks. */ - if (get_val_securely(rng_tokens[0], &token_val) < 0) - return -4; - beg = (char)token_val; - if (get_val_securely(rng_tokens[1], &token_val) < 0) - return -4; - end = (char)token_val; - if (beg < 0 || beg > UCHAR_MAX || - end < 0 || end > UCHAR_MAX || end < beg) - return -4; - - for (val = beg; val <= end; ++val) { - if (val > max_val) - return -5; - - *tab = val; - tab = RTE_PTR_ADD(tab, elem_sz); - ++values; - if (values >= max_elems) - return -2; - } - } else { - /* Single values. */ - if (get_val_securely(tokens[i], &token_val) < 0) - return -5; - val = (char)token_val; - if (val > max_val) - return -5; - - *tab = val; - tab = RTE_PTR_ADD(tab, elem_sz); - ++values; - if (values >= max_elems) - return -2; - } - } - - return values; -} - -/** - * Parse Traffic Class'es mapping configuration. - * - * @param file Config file handle. - * @param port Which port to look for. - * @param tc Which Traffic Class to look for. - * @param cfg[out] Parsing results. - * @returns 0 in case of success, negative value otherwise. - */ -static int -parse_tc_cfg(struct rte_cfgfile *file, int port, int tc, - struct mrvl_qos_cfg *cfg) -{ - char sec_name[32]; - const char *entry; - int n; - - snprintf(sec_name, sizeof(sec_name), "%s %d %s %d", - MRVL_TOK_PORT, port, MRVL_TOK_TC, tc); - - /* Skip non-existing */ - if (rte_cfgfile_num_sections(file, sec_name, strlen(sec_name)) <= 0) - return 0; - - entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_RXQ); - if (entry) { - n = get_entry_values(entry, - cfg->port[port].tc[tc].inq, - sizeof(cfg->port[port].tc[tc].inq[0]), - RTE_DIM(cfg->port[port].tc[tc].inq), - MRVL_PP2_RXQ_MAX); - if (n < 0) { - RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n", - n, entry); - return n; - } - cfg->port[port].tc[tc].inqs = n; - } - - entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_PCP); - if (entry) { - n = get_entry_values(entry, - cfg->port[port].tc[tc].pcp, - sizeof(cfg->port[port].tc[tc].pcp[0]), - RTE_DIM(cfg->port[port].tc[tc].pcp), - MAX_PCP); - if (n < 0) { - RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n", - n, entry); - return n; - } - cfg->port[port].tc[tc].pcps = n; - } - - entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_DSCP); - if (entry) { - n = get_entry_values(entry, - cfg->port[port].tc[tc].dscp, - sizeof(cfg->port[port].tc[tc].dscp[0]), - RTE_DIM(cfg->port[port].tc[tc].dscp), - MAX_DSCP); - if (n < 0) { - RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n", - n, entry); - return n; - } - cfg->port[port].tc[tc].dscps = n; - } - return 0; -} - -/** - * Parse QoS configuration - rte_kvargs_process handler. - * - * Opens configuration file and parses its content. - * - * @param key Unused. - * @param path Path to config file. - * @param extra_args Pointer to configuration structure. - * @returns 0 in case of success, exits otherwise. - */ -int -mrvl_get_qoscfg(const char *key __rte_unused, const char *path, - void *extra_args) -{ - struct mrvl_qos_cfg **cfg = extra_args; - struct rte_cfgfile *file = rte_cfgfile_load(path, 0); - uint32_t val; - int n, i, ret; - const char *entry; - char sec_name[32]; - - if (file == NULL) - rte_exit(EXIT_FAILURE, "Cannot load configuration %s\n", path); - - /* Create configuration. This is never accessed on the fast path, - * so we can ignore socket. - */ - *cfg = rte_zmalloc("mrvl_qos_cfg", sizeof(struct mrvl_qos_cfg), 0); - if (*cfg == NULL) - rte_exit(EXIT_FAILURE, "Cannot allocate configuration %s\n", - path); - - n = rte_cfgfile_num_sections(file, MRVL_TOK_PORT, - sizeof(MRVL_TOK_PORT) - 1); - - if (n == 0) { - /* This is weird, but not bad. */ - RTE_LOG(WARNING, PMD, "Empty configuration file?\n"); - return 0; - } - - /* Use the number of ports given as vdev parameters. */ - for (n = 0; n < (PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC); ++n) { - snprintf(sec_name, sizeof(sec_name), "%s %d %s", - MRVL_TOK_PORT, n, MRVL_TOK_DEFAULT); - - /* Skip ports non-existing in configuration. */ - if (rte_cfgfile_num_sections(file, sec_name, - strlen(sec_name)) <= 0) { - (*cfg)->port[n].use_global_defaults = 1; - (*cfg)->port[n].mapping_priority = - PP2_CLS_QOS_TBL_VLAN_IP_PRI; - continue; - } - - entry = rte_cfgfile_get_entry(file, sec_name, - MRVL_TOK_DEFAULT_TC); - if (entry) { - if (get_val_securely(entry, &val) < 0 || - val > USHRT_MAX) - return -1; - (*cfg)->port[n].default_tc = (uint8_t)val; - } else { - RTE_LOG(ERR, PMD, - "Default Traffic Class required in custom configuration!\n"); - return -1; - } - - entry = rte_cfgfile_get_entry(file, sec_name, - MRVL_TOK_MAPPING_PRIORITY); - if (entry) { - if (!strncmp(entry, MRVL_TOK_VLAN_IP, - sizeof(MRVL_TOK_VLAN_IP))) - (*cfg)->port[n].mapping_priority = - PP2_CLS_QOS_TBL_VLAN_IP_PRI; - else if (!strncmp(entry, MRVL_TOK_IP_VLAN, - sizeof(MRVL_TOK_IP_VLAN))) - (*cfg)->port[n].mapping_priority = - PP2_CLS_QOS_TBL_IP_VLAN_PRI; - else if (!strncmp(entry, MRVL_TOK_IP, - sizeof(MRVL_TOK_IP))) - (*cfg)->port[n].mapping_priority = - PP2_CLS_QOS_TBL_IP_PRI; - else if (!strncmp(entry, MRVL_TOK_VLAN, - sizeof(MRVL_TOK_VLAN))) - (*cfg)->port[n].mapping_priority = - PP2_CLS_QOS_TBL_VLAN_PRI; - else - rte_exit(EXIT_FAILURE, - "Error in parsing %s value (%s)!\n", - MRVL_TOK_MAPPING_PRIORITY, entry); - } else { - (*cfg)->port[n].mapping_priority = - PP2_CLS_QOS_TBL_VLAN_IP_PRI; - } - - for (i = 0; i < MRVL_PP2_RXQ_MAX; ++i) { - ret = get_outq_cfg(file, n, i, *cfg); - if (ret < 0) - rte_exit(EXIT_FAILURE, - "Error %d parsing port %d outq %d!\n", - ret, n, i); - } - - for (i = 0; i < MRVL_PP2_TC_MAX; ++i) { - ret = parse_tc_cfg(file, n, i, *cfg); - if (ret < 0) - rte_exit(EXIT_FAILURE, - "Error %d parsing port %d tc %d!\n", - ret, n, i); - } - } - - return 0; -} - -/** - * Setup Traffic Class. - * - * Fill in TC parameters in single MUSDK TC config entry. - * @param param TC parameters entry. - * @param inqs Number of MUSDK in-queues in this TC. - * @param bpool Bpool for this TC. - * @returns 0 in case of success, exits otherwise. - */ -static int -setup_tc(struct pp2_ppio_tc_params *param, uint8_t inqs, - struct pp2_bpool *bpool) -{ - struct pp2_ppio_inq_params *inq_params; - - param->pkt_offset = MRVL_PKT_OFFS; - param->pools[0] = bpool; - - inq_params = rte_zmalloc_socket("inq_params", - inqs * sizeof(*inq_params), - 0, rte_socket_id()); - if (!inq_params) - return -ENOMEM; - - param->num_in_qs = inqs; - - /* Release old config if necessary. */ - if (param->inqs_params) - rte_free(param->inqs_params); - - param->inqs_params = inq_params; - - return 0; -} - -/** - * Configure RX Queues in a given port. - * - * Sets up RX queues, their Traffic Classes and DPDK rxq->(TC,inq) mapping. - * - * @param priv Port's private data - * @param portid DPDK port ID - * @param max_queues Maximum number of queues to configure. - * @returns 0 in case of success, negative value otherwise. - */ -int -mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid, - uint16_t max_queues) -{ - size_t i, tc; - - if (mrvl_qos_cfg == NULL || - mrvl_qos_cfg->port[portid].use_global_defaults) { - /* No port configuration, use default: 1 TC, no QoS. */ - priv->ppio_params.inqs_params.num_tcs = 1; - setup_tc(&priv->ppio_params.inqs_params.tcs_params[0], - max_queues, priv->bpool); - - /* Direct mapping of queues i.e. 0->0, 1->1 etc. */ - for (i = 0; i < max_queues; ++i) { - priv->rxq_map[i].tc = 0; - priv->rxq_map[i].inq = i; - } - return 0; - } - - /* We need only a subset of configuration. */ - struct port_cfg *port_cfg = &mrvl_qos_cfg->port[portid]; - - priv->qos_tbl_params.type = port_cfg->mapping_priority; - - /* - * We need to reverse mapping, from tc->pcp (better from usability - * point of view) to pcp->tc (configurable in MUSDK). - * First, set all map elements to "default". - */ - for (i = 0; i < RTE_DIM(priv->qos_tbl_params.pcp_cos_map); ++i) - priv->qos_tbl_params.pcp_cos_map[i].tc = port_cfg->default_tc; - - /* Then, fill in all known values. */ - for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) { - if (port_cfg->tc[tc].pcps > RTE_DIM(port_cfg->tc[0].pcp)) { - /* Better safe than sorry. */ - RTE_LOG(ERR, PMD, - "Too many PCPs configured in TC %zu!\n", tc); - return -1; - } - for (i = 0; i < port_cfg->tc[tc].pcps; ++i) { - priv->qos_tbl_params.pcp_cos_map[ - port_cfg->tc[tc].pcp[i]].tc = tc; - } - } - - /* - * The same logic goes with DSCP. - * First, set all map elements to "default". - */ - for (i = 0; i < RTE_DIM(priv->qos_tbl_params.dscp_cos_map); ++i) - priv->qos_tbl_params.dscp_cos_map[i].tc = - port_cfg->default_tc; - - /* Fill in all known values. */ - for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) { - if (port_cfg->tc[tc].dscps > RTE_DIM(port_cfg->tc[0].dscp)) { - /* Better safe than sorry. */ - RTE_LOG(ERR, PMD, - "Too many DSCPs configured in TC %zu!\n", tc); - return -1; - } - for (i = 0; i < port_cfg->tc[tc].dscps; ++i) { - priv->qos_tbl_params.dscp_cos_map[ - port_cfg->tc[tc].dscp[i]].tc = tc; - } - } - - /* - * Surprisingly, similar logic goes with queue mapping. - * We need only to store qid->tc mapping, - * to know TC when queue is read. - */ - for (i = 0; i < RTE_DIM(priv->rxq_map); ++i) - priv->rxq_map[i].tc = MRVL_UNKNOWN_TC; - - /* Set up DPDKq->(TC,inq) mapping. */ - for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) { - if (port_cfg->tc[tc].inqs > RTE_DIM(port_cfg->tc[0].inq)) { - /* Overflow. */ - RTE_LOG(ERR, PMD, - "Too many RX queues configured per TC %zu!\n", - tc); - return -1; - } - for (i = 0; i < port_cfg->tc[tc].inqs; ++i) { - uint8_t idx = port_cfg->tc[tc].inq[i]; - - if (idx > RTE_DIM(priv->rxq_map)) { - RTE_LOG(ERR, PMD, "Bad queue index %d!\n", idx); - return -1; - } - - priv->rxq_map[idx].tc = tc; - priv->rxq_map[idx].inq = i; - } - } - - /* - * Set up TC configuration. TCs need to be sequenced: 0, 1, 2 - * with no gaps. Empty TC means end of processing. - */ - for (i = 0; i < MRVL_PP2_TC_MAX; ++i) { - if (port_cfg->tc[i].inqs == 0) - break; - setup_tc(&priv->ppio_params.inqs_params.tcs_params[i], - port_cfg->tc[i].inqs, - priv->bpool); - } - - priv->ppio_params.inqs_params.num_tcs = i; - - return 0; -} - -/** - * Start QoS mapping. - * - * Finalize QoS table configuration and initialize it in SDK. It can be done - * only after port is started, so we have a valid ppio reference. - * - * @param priv Port's private (configuration) data. - * @returns 0 in case of success, exits otherwise. - */ -int -mrvl_start_qos_mapping(struct mrvl_priv *priv) -{ - size_t i; - - if (priv->ppio == NULL) { - RTE_LOG(ERR, PMD, "ppio must not be NULL here!\n"); - return -1; - } - - for (i = 0; i < RTE_DIM(priv->qos_tbl_params.pcp_cos_map); ++i) - priv->qos_tbl_params.pcp_cos_map[i].ppio = priv->ppio; - - for (i = 0; i < RTE_DIM(priv->qos_tbl_params.dscp_cos_map); ++i) - priv->qos_tbl_params.dscp_cos_map[i].ppio = priv->ppio; - - /* Initialize Classifier QoS table. */ - - return pp2_cls_qos_tbl_init(&priv->qos_tbl_params, &priv->qos_tbl); -} diff --git a/drivers/net/mrvl/mrvl_qos.h b/drivers/net/mrvl/mrvl_qos.h deleted file mode 100644 index ae7508c9..00000000 --- a/drivers/net/mrvl/mrvl_qos.h +++ /dev/null @@ -1,113 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Marvell International Ltd. - * Copyright(c) 2017 Semihalf. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _MRVL_QOS_H_ -#define _MRVL_QOS_H_ - -#include - -#include "mrvl_ethdev.h" - -/** Code Points per Traffic Class. Equals max(DSCP, PCP). */ -#define MRVL_CP_PER_TC (64) - -/** Value used as "unknown". */ -#define MRVL_UNKNOWN_TC (0xFF) - -/* QoS config. */ -struct mrvl_qos_cfg { - struct port_cfg { - struct { - uint8_t inq[MRVL_PP2_RXQ_MAX]; - uint8_t dscp[MRVL_CP_PER_TC]; - uint8_t pcp[MRVL_CP_PER_TC]; - uint8_t inqs; - uint8_t dscps; - uint8_t pcps; - } tc[MRVL_PP2_TC_MAX]; - struct { - uint8_t weight; - } outq[MRVL_PP2_RXQ_MAX]; - enum pp2_cls_qos_tbl_type mapping_priority; - uint16_t inqs; - uint16_t outqs; - uint8_t default_tc; - uint8_t use_global_defaults; - } port[RTE_MAX_ETHPORTS]; -}; - -/** Global QoS configuration. */ -extern struct mrvl_qos_cfg *mrvl_qos_cfg; - -/** - * Parse QoS configuration - rte_kvargs_process handler. - * - * Opens configuration file and parses its content. - * - * @param key Unused. - * @param path Path to config file. - * @param extra_args Pointer to configuration structure. - * @returns 0 in case of success, exits otherwise. - */ -int -mrvl_get_qoscfg(const char *key __rte_unused, const char *path, - void *extra_args); - -/** - * Configure RX Queues in a given port. - * - * Sets up RX queues, their Traffic Classes and DPDK rxq->(TC,inq) mapping. - * - * @param priv Port's private data - * @param portid DPDK port ID - * @param max_queues Maximum number of queues to configure. - * @returns 0 in case of success, negative value otherwise. - */ -int -mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid, - uint16_t max_queues); - -/** - * Start QoS mapping. - * - * Finalize QoS table configuration and initialize it in SDK. It can be done - * only after port is started, so we have a valid ppio reference. - * - * @param priv Port's private (configuration) data. - * @returns 0 in case of success, exits otherwise. - */ -int -mrvl_start_qos_mapping(struct mrvl_priv *priv); - -#endif /* _MRVL_QOS_H_ */ diff --git a/drivers/net/mrvl/rte_pmd_mrvl_version.map b/drivers/net/mrvl/rte_pmd_mrvl_version.map deleted file mode 100644 index a7530317..00000000 --- a/drivers/net/mrvl/rte_pmd_mrvl_version.map +++ /dev/null @@ -1,3 +0,0 @@ -DPDK_17.11 { - local: *; -}; diff --git a/drivers/net/mvpp2/Makefile b/drivers/net/mvpp2/Makefile new file mode 100644 index 00000000..492aef97 --- /dev/null +++ b/drivers/net/mvpp2/Makefile @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Marvell International Ltd. +# Copyright(c) 2017 Semihalf. +# All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +ifneq ($(MAKECMDGOALS),clean) +ifneq ($(MAKECMDGOALS),config) +ifeq ($(LIBMUSDK_PATH),) +$(error "Please define LIBMUSDK_PATH environment variable") +endif +endif +endif + +# library name +LIB = librte_pmd_mvpp2.a + +# library version +LIBABIVER := 1 + +# versioning export map +EXPORT_MAP := rte_pmd_mvpp2_version.map + +# external library dependencies +CFLAGS += -I$(LIBMUSDK_PATH)/include +CFLAGS += -DMVCONF_TYPES_PUBLIC +CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -O3 +LDLIBS += -L$(LIBMUSDK_PATH)/lib +LDLIBS += -lmusdk +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_cfgfile +LDLIBS += -lrte_bus_vdev + +# library source files +SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_qos.c +SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_flow.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/mvpp2/meson.build b/drivers/net/mvpp2/meson.build new file mode 100644 index 00000000..e1398895 --- /dev/null +++ b/drivers/net/mvpp2/meson.build @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Marvell International Ltd. +# Copyright(c) 2018 Semihalf. +# All rights reserved. + +path = get_option('lib_musdk_dir') +lib_dir = path + '/lib' +inc_dir = path + '/include' + +lib = cc.find_library('libmusdk', dirs : [lib_dir], required: false) +if not lib.found() + build = false +else + ext_deps += lib + includes += include_directories(inc_dir) + cflags += ['-DMVCONF_TYPES_PUBLIC', '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC'] +endif + +sources = files( + 'mrvl_ethdev.c', + 'mrvl_flow.c', + 'mrvl_qos.c' +) + +deps += ['cfgfile'] diff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c new file mode 100644 index 00000000..a2d0576e --- /dev/null +++ b/drivers/net/mvpp2/mrvl_ethdev.c @@ -0,0 +1,2761 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#include +#include +#include +#include +#include + +/* Unluckily, container_of is defined by both DPDK and MUSDK, + * we'll declare only one version. + * + * Note that it is not used in this PMD anyway. + */ +#ifdef container_of +#undef container_of +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mrvl_ethdev.h" +#include "mrvl_qos.h" + +/* bitmask with reserved hifs */ +#define MRVL_MUSDK_HIFS_RESERVED 0x0F +/* bitmask with reserved bpools */ +#define MRVL_MUSDK_BPOOLS_RESERVED 0x07 +/* bitmask with reserved kernel RSS tables */ +#define MRVL_MUSDK_RSS_RESERVED 0x01 +/* maximum number of available hifs */ +#define MRVL_MUSDK_HIFS_MAX 9 + +/* prefetch shift */ +#define MRVL_MUSDK_PREFETCH_SHIFT 2 + +/* TCAM has 25 entries reserved for uc/mc filter entries */ +#define MRVL_MAC_ADDRS_MAX 25 +#define MRVL_MATCH_LEN 16 +#define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE) +/* Maximum allowable packet size */ +#define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE) + +#define MRVL_IFACE_NAME_ARG "iface" +#define MRVL_CFG_ARG "cfg" + +#define MRVL_BURST_SIZE 64 + +#define MRVL_ARP_LENGTH 28 + +#define MRVL_COOKIE_ADDR_INVALID ~0ULL + +#define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8) +#define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT) + +/* Memory size (in bytes) for MUSDK dma buffers */ +#define MRVL_MUSDK_DMA_MEMSIZE 41943040 + +/** Port Rx offload capabilities */ +#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \ + DEV_RX_OFFLOAD_JUMBO_FRAME | \ + DEV_RX_OFFLOAD_CRC_STRIP | \ + DEV_RX_OFFLOAD_CHECKSUM) + +/** Port Tx offloads capabilities */ +#define MRVL_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM) + +static const char * const valid_args[] = { + MRVL_IFACE_NAME_ARG, + MRVL_CFG_ARG, + NULL +}; + +static int used_hifs = MRVL_MUSDK_HIFS_RESERVED; +static struct pp2_hif *hifs[RTE_MAX_LCORE]; +static int used_bpools[PP2_NUM_PKT_PROC] = { + MRVL_MUSDK_BPOOLS_RESERVED, + MRVL_MUSDK_BPOOLS_RESERVED +}; + +struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS]; +int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE]; +uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID; + +int mrvl_logtype; + +struct mrvl_ifnames { + const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC]; + int idx; +}; + +/* + * To use buffer harvesting based on loopback port shadow queue structure + * was introduced for buffers information bookkeeping. + * + * Before sending the packet, related buffer information (pp2_buff_inf) is + * stored in shadow queue. After packet is transmitted no longer used + * packet buffer is released back to it's original hardware pool, + * on condition it originated from interface. + * In case it was generated by application itself i.e: mbuf->port field is + * 0xff then its released to software mempool. + */ +struct mrvl_shadow_txq { + int head; /* write index - used when sending buffers */ + int tail; /* read index - used when releasing buffers */ + u16 size; /* queue occupied size */ + u16 num_to_release; /* number of buffers sent, that can be released */ + struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */ +}; + +struct mrvl_rxq { + struct mrvl_priv *priv; + struct rte_mempool *mp; + int queue_id; + int port_id; + int cksum_enabled; + uint64_t bytes_recv; + uint64_t drop_mac; +}; + +struct mrvl_txq { + struct mrvl_priv *priv; + int queue_id; + int port_id; + uint64_t bytes_sent; + struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE]; + int tx_deferred_start; +}; + +static int mrvl_lcore_first; +static int mrvl_lcore_last; +static int mrvl_dev_num; + +static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num); +static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio, + struct pp2_hif *hif, unsigned int core_id, + struct mrvl_shadow_txq *sq, int qid, int force); + +#define MRVL_XSTATS_TBL_ENTRY(name) { \ + #name, offsetof(struct pp2_ppio_statistics, name), \ + sizeof(((struct pp2_ppio_statistics *)0)->name) \ +} + +/* Table with xstats data */ +static struct { + const char *name; + unsigned int offset; + unsigned int size; +} mrvl_xstats_tbl[] = { + MRVL_XSTATS_TBL_ENTRY(rx_bytes), + MRVL_XSTATS_TBL_ENTRY(rx_packets), + MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets), + MRVL_XSTATS_TBL_ENTRY(rx_errors), + MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped), + MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped), + MRVL_XSTATS_TBL_ENTRY(rx_early_dropped), + MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped), + MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped), + MRVL_XSTATS_TBL_ENTRY(tx_bytes), + MRVL_XSTATS_TBL_ENTRY(tx_packets), + MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets), + MRVL_XSTATS_TBL_ENTRY(tx_errors) +}; + +static inline int +mrvl_get_bpool_size(int pp2_id, int pool_id) +{ + int i; + int size = 0; + + for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) + size += mrvl_port_bpool_size[pp2_id][pool_id][i]; + + return size; +} + +static inline int +mrvl_reserve_bit(int *bitmap, int max) +{ + int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap); + + if (n >= max) + return -1; + + *bitmap |= 1 << n; + + return n; +} + +static int +mrvl_init_hif(int core_id) +{ + struct pp2_hif_params params; + char match[MRVL_MATCH_LEN]; + int ret; + + ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX); + if (ret < 0) { + MRVL_LOG(ERR, "Failed to allocate hif %d", core_id); + return ret; + } + + snprintf(match, sizeof(match), "hif-%d", ret); + memset(¶ms, 0, sizeof(params)); + params.match = match; + params.out_size = MRVL_PP2_AGGR_TXQD_MAX; + ret = pp2_hif_init(¶ms, &hifs[core_id]); + if (ret) { + MRVL_LOG(ERR, "Failed to initialize hif %d", core_id); + return ret; + } + + return 0; +} + +static inline struct pp2_hif* +mrvl_get_hif(struct mrvl_priv *priv, int core_id) +{ + int ret; + + if (likely(hifs[core_id] != NULL)) + return hifs[core_id]; + + rte_spinlock_lock(&priv->lock); + + ret = mrvl_init_hif(core_id); + if (ret < 0) { + MRVL_LOG(ERR, "Failed to allocate hif %d", core_id); + goto out; + } + + if (core_id < mrvl_lcore_first) + mrvl_lcore_first = core_id; + + if (core_id > mrvl_lcore_last) + mrvl_lcore_last = core_id; +out: + rte_spinlock_unlock(&priv->lock); + + return hifs[core_id]; +} + +/** + * Configure rss based on dpdk rss configuration. + * + * @param priv + * Pointer to private structure. + * @param rss_conf + * Pointer to RSS configuration. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf) +{ + if (rss_conf->rss_key) + MRVL_LOG(WARNING, "Changing hash key is not supported"); + + if (rss_conf->rss_hf == 0) { + priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; + } else if (rss_conf->rss_hf & ETH_RSS_IPV4) { + priv->ppio_params.inqs_params.hash_type = + PP2_PPIO_HASH_T_2_TUPLE; + } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { + priv->ppio_params.inqs_params.hash_type = + PP2_PPIO_HASH_T_5_TUPLE; + priv->rss_hf_tcp = 1; + } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { + priv->ppio_params.inqs_params.hash_type = + PP2_PPIO_HASH_T_5_TUPLE; + priv->rss_hf_tcp = 0; + } else { + return -EINVAL; + } + + return 0; +} + +/** + * Ethernet device configuration. + * + * Prepare the driver for a given number of TX and RX queues and + * configure RSS. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_dev_configure(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE && + dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { + MRVL_LOG(INFO, "Unsupported rx multi queue mode %d", + dev->data->dev_conf.rxmode.mq_mode); + return -EINVAL; + } + + /* KEEP_CRC offload flag is not supported by PMD + * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed + */ + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) { + MRVL_LOG(INFO, "L2 CRC stripping is always enabled in hw"); + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP; + } + + if (dev->data->dev_conf.rxmode.split_hdr_size) { + MRVL_LOG(INFO, "Split headers not supported"); + return -EINVAL; + } + + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) + dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len - + ETHER_HDR_LEN - ETHER_CRC_LEN; + + ret = mrvl_configure_rxqs(priv, dev->data->port_id, + dev->data->nb_rx_queues); + if (ret < 0) + return ret; + + ret = mrvl_configure_txqs(priv, dev->data->port_id, + dev->data->nb_tx_queues); + if (ret < 0) + return ret; + + priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues; + priv->ppio_params.maintain_stats = 1; + priv->nb_rx_queues = dev->data->nb_rx_queues; + + if (dev->data->nb_rx_queues == 1 && + dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { + MRVL_LOG(WARNING, "Disabling hash for 1 rx queue"); + priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; + + return 0; + } + + return mrvl_configure_rss(priv, + &dev->data->dev_conf.rx_adv_conf.rss_conf); +} + +/** + * DPDK callback to change the MTU. + * + * Setting the MTU affects hardware MRU (packets larger than the MRU + * will be dropped). + * + * @param dev + * Pointer to Ethernet device structure. + * @param mtu + * New MTU. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct mrvl_priv *priv = dev->data->dev_private; + /* extra MV_MH_SIZE bytes are required for Marvell tag */ + uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN; + int ret; + + if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) + return -EINVAL; + + if (!priv->ppio) + return 0; + + ret = pp2_ppio_set_mru(priv->ppio, mru); + if (ret) + return ret; + + return pp2_ppio_set_mtu(priv->ppio, mtu); +} + +/** + * DPDK callback to bring the link up. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return -EPERM; + + ret = pp2_ppio_enable(priv->ppio); + if (ret) + return ret; + + /* + * mtu/mru can be updated if pp2_ppio_enable() was called at least once + * as pp2_ppio_enable() changes port->t_mode from default 0 to + * PP2_TRAFFIC_INGRESS_EGRESS. + * + * Set mtu to default DPDK value here. + */ + ret = mrvl_mtu_set(dev, dev->data->mtu); + if (ret) + pp2_ppio_disable(priv->ppio); + + return ret; +} + +/** + * DPDK callback to bring the link down. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + if (!priv->ppio) + return -EPERM; + + return pp2_ppio_disable(priv->ppio); +} + +/** + * DPDK callback to start tx queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param queue_id + * Transmit queue index. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv) + return -EPERM; + + /* passing 1 enables given tx queue */ + ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1); + if (ret) { + MRVL_LOG(ERR, "Failed to start txq %d", queue_id); + return ret; + } + + dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +/** + * DPDK callback to stop tx queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param queue_id + * Transmit queue index. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return -EPERM; + + /* passing 0 disables given tx queue */ + ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0); + if (ret) { + MRVL_LOG(ERR, "Failed to stop txq %d", queue_id); + return ret; + } + + dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +/** + * DPDK callback to start the device. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int +mrvl_dev_start(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + char match[MRVL_MATCH_LEN]; + int ret = 0, i, def_init_size; + + snprintf(match, sizeof(match), "ppio-%d:%d", + priv->pp_id, priv->ppio_id); + priv->ppio_params.match = match; + + /* + * Calculate the minimum bpool size for refill feature as follows: + * 2 default burst sizes multiply by number of rx queues. + * If the bpool size will be below this value, new buffers will + * be added to the pool. + */ + priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2; + + /* In case initial bpool size configured in queues setup is + * smaller than minimum size add more buffers + */ + def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2; + if (priv->bpool_init_size < def_init_size) { + int buffs_to_add = def_init_size - priv->bpool_init_size; + + priv->bpool_init_size += buffs_to_add; + ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add); + if (ret) + MRVL_LOG(ERR, "Failed to add buffers to bpool"); + } + + /* + * Calculate the maximum bpool size for refill feature as follows: + * maximum number of descriptors in rx queue multiply by number + * of rx queues plus minimum bpool size. + * In case the bpool size will exceed this value, superfluous buffers + * will be removed + */ + priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) + + priv->bpool_min_size; + + ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio); + if (ret) { + MRVL_LOG(ERR, "Failed to init ppio"); + return ret; + } + + /* + * In case there are some some stale uc/mc mac addresses flush them + * here. It cannot be done during mrvl_dev_close() as port information + * is already gone at that point (due to pp2_ppio_deinit() in + * mrvl_dev_stop()). + */ + if (!priv->uc_mc_flushed) { + ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1); + if (ret) { + MRVL_LOG(ERR, + "Failed to flush uc/mc filter list"); + goto out; + } + priv->uc_mc_flushed = 1; + } + + if (!priv->vlan_flushed) { + ret = pp2_ppio_flush_vlan(priv->ppio); + if (ret) { + MRVL_LOG(ERR, "Failed to flush vlan list"); + /* + * TODO + * once pp2_ppio_flush_vlan() is supported jump to out + * goto out; + */ + } + priv->vlan_flushed = 1; + } + + /* For default QoS config, don't start classifier. */ + if (mrvl_qos_cfg) { + ret = mrvl_start_qos_mapping(priv); + if (ret) { + MRVL_LOG(ERR, "Failed to setup QoS mapping"); + goto out; + } + } + + ret = mrvl_dev_set_link_up(dev); + if (ret) { + MRVL_LOG(ERR, "Failed to set link up"); + goto out; + } + + /* start tx queues */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct mrvl_txq *txq = dev->data->tx_queues[i]; + + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + + if (!txq->tx_deferred_start) + continue; + + /* + * All txqs are started by default. Stop them + * so that tx_deferred_start works as expected. + */ + ret = mrvl_tx_queue_stop(dev, i); + if (ret) + goto out; + } + + return 0; +out: + MRVL_LOG(ERR, "Failed to start device"); + pp2_ppio_deinit(priv->ppio); + return ret; +} + +/** + * Flush receive queues. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_flush_rx_queues(struct rte_eth_dev *dev) +{ + int i; + + MRVL_LOG(INFO, "Flushing rx queues"); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + int ret, num; + + do { + struct mrvl_rxq *q = dev->data->rx_queues[i]; + struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX]; + + num = MRVL_PP2_RXD_MAX; + ret = pp2_ppio_recv(q->priv->ppio, + q->priv->rxq_map[q->queue_id].tc, + q->priv->rxq_map[q->queue_id].inq, + descs, (uint16_t *)&num); + } while (ret == 0 && num); + } +} + +/** + * Flush transmit shadow queues. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev) +{ + int i, j; + struct mrvl_txq *txq; + + MRVL_LOG(INFO, "Flushing tx shadow queues"); + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = (struct mrvl_txq *)dev->data->tx_queues[i]; + + for (j = 0; j < RTE_MAX_LCORE; j++) { + struct mrvl_shadow_txq *sq; + + if (!hifs[j]) + continue; + + sq = &txq->shadow_txqs[j]; + mrvl_free_sent_buffers(txq->priv->ppio, + hifs[j], j, sq, txq->queue_id, 1); + while (sq->tail != sq->head) { + uint64_t addr = cookie_addr_high | + sq->ent[sq->tail].buff.cookie; + rte_pktmbuf_free( + (struct rte_mbuf *)addr); + sq->tail = (sq->tail + 1) & + MRVL_PP2_TX_SHADOWQ_MASK; + } + memset(sq, 0, sizeof(*sq)); + } + } +} + +/** + * Flush hardware bpool (buffer-pool). + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_flush_bpool(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct pp2_hif *hif; + uint32_t num; + int ret; + unsigned int core_id = rte_lcore_id(); + + if (core_id == LCORE_ID_ANY) + core_id = 0; + + hif = mrvl_get_hif(priv, core_id); + + ret = pp2_bpool_get_num_buffs(priv->bpool, &num); + if (ret) { + MRVL_LOG(ERR, "Failed to get bpool buffers number"); + return; + } + + while (num--) { + struct pp2_buff_inf inf; + uint64_t addr; + + ret = pp2_bpool_get_buff(hif, priv->bpool, &inf); + if (ret) + break; + + addr = cookie_addr_high | inf.cookie; + rte_pktmbuf_free((struct rte_mbuf *)addr); + } +} + +/** + * DPDK callback to stop the device. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_dev_stop(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + mrvl_dev_set_link_down(dev); + mrvl_flush_rx_queues(dev); + mrvl_flush_tx_shadow_queues(dev); + if (priv->cls_tbl) { + pp2_cls_tbl_deinit(priv->cls_tbl); + priv->cls_tbl = NULL; + } + if (priv->qos_tbl) { + pp2_cls_qos_tbl_deinit(priv->qos_tbl); + priv->qos_tbl = NULL; + } + if (priv->ppio) + pp2_ppio_deinit(priv->ppio); + priv->ppio = NULL; + + /* policer must be released after ppio deinitialization */ + if (priv->policer) { + pp2_cls_plcr_deinit(priv->policer); + priv->policer = NULL; + } +} + +/** + * DPDK callback to close the device. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_dev_close(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + size_t i; + + for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) { + struct pp2_ppio_tc_params *tc_params = + &priv->ppio_params.inqs_params.tcs_params[i]; + + if (tc_params->inqs_params) { + rte_free(tc_params->inqs_params); + tc_params->inqs_params = NULL; + } + } + + mrvl_flush_bpool(dev); +} + +/** + * DPDK callback to retrieve physical link information. + * + * @param dev + * Pointer to Ethernet device structure. + * @param wait_to_complete + * Wait for request completion (ignored). + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) +{ + /* + * TODO + * once MUSDK provides necessary API use it here + */ + struct mrvl_priv *priv = dev->data->dev_private; + struct ethtool_cmd edata; + struct ifreq req; + int ret, fd, link_up; + + if (!priv->ppio) + return -EPERM; + + edata.cmd = ETHTOOL_GSET; + + strcpy(req.ifr_name, dev->data->name); + req.ifr_data = (void *)&edata; + + fd = socket(AF_INET, SOCK_DGRAM, 0); + if (fd == -1) + return -EFAULT; + + ret = ioctl(fd, SIOCETHTOOL, &req); + if (ret == -1) { + close(fd); + return -EFAULT; + } + + close(fd); + + switch (ethtool_cmd_speed(&edata)) { + case SPEED_10: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M; + break; + case SPEED_100: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M; + break; + case SPEED_1000: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G; + break; + case SPEED_10000: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G; + break; + default: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE; + } + + dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX : + ETH_LINK_HALF_DUPLEX; + dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG : + ETH_LINK_FIXED; + pp2_ppio_get_link_state(priv->ppio, &link_up); + dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN; + + return 0; +} + +/** + * DPDK callback to enable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return; + + if (priv->isolated) + return; + + ret = pp2_ppio_set_promisc(priv->ppio, 1); + if (ret) + MRVL_LOG(ERR, "Failed to enable promiscuous mode"); +} + +/** + * DPDK callback to enable allmulti mode. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return; + + if (priv->isolated) + return; + + ret = pp2_ppio_set_mc_promisc(priv->ppio, 1); + if (ret) + MRVL_LOG(ERR, "Failed enable all-multicast mode"); +} + +/** + * DPDK callback to disable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return; + + ret = pp2_ppio_set_promisc(priv->ppio, 0); + if (ret) + MRVL_LOG(ERR, "Failed to disable promiscuous mode"); +} + +/** + * DPDK callback to disable allmulticast mode. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return; + + ret = pp2_ppio_set_mc_promisc(priv->ppio, 0); + if (ret) + MRVL_LOG(ERR, "Failed to disable all-multicast mode"); +} + +/** + * DPDK callback to remove a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param index + * MAC address index. + */ +static void +mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct mrvl_priv *priv = dev->data->dev_private; + char buf[ETHER_ADDR_FMT_SIZE]; + int ret; + + if (!priv->ppio) + return; + + if (priv->isolated) + return; + + ret = pp2_ppio_remove_mac_addr(priv->ppio, + dev->data->mac_addrs[index].addr_bytes); + if (ret) { + ether_format_addr(buf, sizeof(buf), + &dev->data->mac_addrs[index]); + MRVL_LOG(ERR, "Failed to remove mac %s", buf); + } +} + +/** + * DPDK callback to add a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + * @param index + * MAC address index. + * @param vmdq + * VMDq pool index to associate address with (unused). + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, uint32_t vmdq __rte_unused) +{ + struct mrvl_priv *priv = dev->data->dev_private; + char buf[ETHER_ADDR_FMT_SIZE]; + int ret; + + if (priv->isolated) + return -ENOTSUP; + + if (index == 0) + /* For setting index 0, mrvl_mac_addr_set() should be used.*/ + return -1; + + if (!priv->ppio) + return 0; + + /* + * Maximum number of uc addresses can be tuned via kernel module mvpp2x + * parameter uc_filter_max. Maximum number of mc addresses is then + * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and + * 21 respectively. + * + * If more than uc_filter_max uc addresses were added to filter list + * then NIC will switch to promiscuous mode automatically. + * + * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses + * were added to filter list then NIC will switch to all-multicast mode + * automatically. + */ + ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes); + if (ret) { + ether_format_addr(buf, sizeof(buf), mac_addr); + MRVL_LOG(ERR, "Failed to add mac %s", buf); + return -1; + } + + return 0; +} + +/** + * DPDK callback to set the primary MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return 0; + + if (priv->isolated) + return -ENOTSUP; + + ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes); + if (ret) { + char buf[ETHER_ADDR_FMT_SIZE]; + ether_format_addr(buf, sizeof(buf), mac_addr); + MRVL_LOG(ERR, "Failed to set mac to %s", buf); + } + + return ret; +} + +/** + * DPDK callback to get device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * @param stats + * Stats structure output buffer. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct pp2_ppio_statistics ppio_stats; + uint64_t drop_mac = 0; + unsigned int i, idx, ret; + + if (!priv->ppio) + return -EPERM; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct mrvl_rxq *rxq = dev->data->rx_queues[i]; + struct pp2_ppio_inq_statistics rx_stats; + + if (!rxq) + continue; + + idx = rxq->queue_id; + if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { + MRVL_LOG(ERR, + "rx queue %d stats out of range (0 - %d)", + idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); + continue; + } + + ret = pp2_ppio_inq_get_statistics(priv->ppio, + priv->rxq_map[idx].tc, + priv->rxq_map[idx].inq, + &rx_stats, 0); + if (unlikely(ret)) { + MRVL_LOG(ERR, + "Failed to update rx queue %d stats", idx); + break; + } + + stats->q_ibytes[idx] = rxq->bytes_recv; + stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac; + stats->q_errors[idx] = rx_stats.drop_early + + rx_stats.drop_fullq + + rx_stats.drop_bm + + rxq->drop_mac; + stats->ibytes += rxq->bytes_recv; + drop_mac += rxq->drop_mac; + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct mrvl_txq *txq = dev->data->tx_queues[i]; + struct pp2_ppio_outq_statistics tx_stats; + + if (!txq) + continue; + + idx = txq->queue_id; + if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { + MRVL_LOG(ERR, + "tx queue %d stats out of range (0 - %d)", + idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); + } + + ret = pp2_ppio_outq_get_statistics(priv->ppio, idx, + &tx_stats, 0); + if (unlikely(ret)) { + MRVL_LOG(ERR, + "Failed to update tx queue %d stats", idx); + break; + } + + stats->q_opackets[idx] = tx_stats.deq_desc; + stats->q_obytes[idx] = txq->bytes_sent; + stats->obytes += txq->bytes_sent; + } + + ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0); + if (unlikely(ret)) { + MRVL_LOG(ERR, "Failed to update port statistics"); + return ret; + } + + stats->ipackets += ppio_stats.rx_packets - drop_mac; + stats->opackets += ppio_stats.tx_packets; + stats->imissed += ppio_stats.rx_fullq_dropped + + ppio_stats.rx_bm_dropped + + ppio_stats.rx_early_dropped + + ppio_stats.rx_fifo_dropped + + ppio_stats.rx_cls_dropped; + stats->ierrors = drop_mac; + + return 0; +} + +/** + * DPDK callback to clear device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_stats_reset(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int i; + + if (!priv->ppio) + return; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct mrvl_rxq *rxq = dev->data->rx_queues[i]; + + pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc, + priv->rxq_map[i].inq, NULL, 1); + rxq->bytes_recv = 0; + rxq->drop_mac = 0; + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct mrvl_txq *txq = dev->data->tx_queues[i]; + + pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1); + txq->bytes_sent = 0; + } + + pp2_ppio_get_statistics(priv->ppio, NULL, 1); +} + +/** + * DPDK callback to get extended statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * @param stats + * Pointer to xstats table. + * @param n + * Number of entries in xstats table. + * @return + * Negative value on error, number of read xstats otherwise. + */ +static int +mrvl_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *stats, unsigned int n) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct pp2_ppio_statistics ppio_stats; + unsigned int i; + + if (!stats) + return 0; + + pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0); + for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) { + uint64_t val; + + if (mrvl_xstats_tbl[i].size == sizeof(uint32_t)) + val = *(uint32_t *)((uint8_t *)&ppio_stats + + mrvl_xstats_tbl[i].offset); + else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t)) + val = *(uint64_t *)((uint8_t *)&ppio_stats + + mrvl_xstats_tbl[i].offset); + else + return -EINVAL; + + stats[i].id = i; + stats[i].value = val; + } + + return n; +} + +/** + * DPDK callback to reset extended statistics. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_xstats_reset(struct rte_eth_dev *dev) +{ + mrvl_stats_reset(dev); +} + +/** + * DPDK callback to get extended statistics names. + * + * @param dev (unused) + * Pointer to Ethernet device structure. + * @param xstats_names + * Pointer to xstats names table. + * @param size + * Size of the xstats names table. + * @return + * Number of read names. + */ +static int +mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_xstat_name *xstats_names, + unsigned int size) +{ + unsigned int i; + + if (!xstats_names) + return RTE_DIM(mrvl_xstats_tbl); + + for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++) + snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s", + mrvl_xstats_tbl[i].name); + + return size; +} + +/** + * DPDK callback to get information about the device. + * + * @param dev + * Pointer to Ethernet device structure (unused). + * @param info + * Info structure output buffer. + */ +static void +mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_dev_info *info) +{ + info->speed_capa = ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M | + ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_10G; + + info->max_rx_queues = MRVL_PP2_RXQ_MAX; + info->max_tx_queues = MRVL_PP2_TXQ_MAX; + info->max_mac_addrs = MRVL_MAC_ADDRS_MAX; + + info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX; + info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN; + info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN; + + info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX; + info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN; + info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN; + + info->rx_offload_capa = MRVL_RX_OFFLOADS; + info->rx_queue_offload_capa = MRVL_RX_OFFLOADS; + + info->tx_offload_capa = MRVL_TX_OFFLOADS; + info->tx_queue_offload_capa = MRVL_TX_OFFLOADS; + + info->flow_type_rss_offloads = ETH_RSS_IPV4 | + ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_NONFRAG_IPV4_UDP; + + /* By default packets are dropped if no descriptors are available */ + info->default_rxconf.rx_drop_en = 1; + info->default_rxconf.offloads = DEV_RX_OFFLOAD_CRC_STRIP; + + info->max_rx_pktlen = MRVL_PKT_SIZE_MAX; +} + +/** + * Return supported packet types. + * + * @param dev + * Pointer to Ethernet device structure (unused). + * + * @return + * Const pointer to the table with supported packet types. + */ +static const uint32_t * +mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP + }; + + return ptypes; +} + +/** + * DPDK callback to get information about specific receive queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param rx_queue_id + * Receive queue index. + * @param qinfo + * Receive queue information structure. + */ +static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id]; + struct mrvl_priv *priv = dev->data->dev_private; + int inq = priv->rxq_map[rx_queue_id].inq; + int tc = priv->rxq_map[rx_queue_id].tc; + struct pp2_ppio_tc_params *tc_params = + &priv->ppio_params.inqs_params.tcs_params[tc]; + + qinfo->mp = q->mp; + qinfo->nb_desc = tc_params->inqs_params[inq].size; +} + +/** + * DPDK callback to get information about specific transmit queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param tx_queue_id + * Transmit queue index. + * @param qinfo + * Transmit queue information structure. + */ +static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id]; + + qinfo->nb_desc = + priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; +} + +/** + * DPDK callback to Configure a VLAN filter. + * + * @param dev + * Pointer to Ethernet device structure. + * @param vlan_id + * VLAN ID to filter. + * @param on + * Toggle filter. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + if (!priv->ppio) + return -EPERM; + + if (priv->isolated) + return -ENOTSUP; + + return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) : + pp2_ppio_remove_vlan(priv->ppio, vlan_id); +} + +/** + * Release buffers to hardware bpool (buffer-pool) + * + * @param rxq + * Receive queue pointer. + * @param num + * Number of buffers to release to bpool. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_fill_bpool(struct mrvl_rxq *rxq, int num) +{ + struct buff_release_entry entries[MRVL_PP2_RXD_MAX]; + struct rte_mbuf *mbufs[MRVL_PP2_RXD_MAX]; + int i, ret; + unsigned int core_id; + struct pp2_hif *hif; + struct pp2_bpool *bpool; + + core_id = rte_lcore_id(); + if (core_id == LCORE_ID_ANY) + core_id = 0; + + hif = mrvl_get_hif(rxq->priv, core_id); + if (!hif) + return -1; + + bpool = rxq->priv->bpool; + + ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num); + if (ret) + return ret; + + if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID) + cookie_addr_high = + (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK; + + for (i = 0; i < num; i++) { + if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK) + != cookie_addr_high) { + MRVL_LOG(ERR, + "mbuf virtual addr high 0x%lx out of range", + (uint64_t)mbufs[i] >> 32); + goto out; + } + + entries[i].buff.addr = + rte_mbuf_data_iova_default(mbufs[i]); + entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i]; + entries[i].bpool = bpool; + } + + pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i); + mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i; + + if (i != num) + goto out; + + return 0; +out: + for (; i < num; i++) + rte_pktmbuf_free(mbufs[i]); + + return -1; +} + +/** + * DPDK callback to configure the receive queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * RX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param conf + * Thresholds parameters. + * @param mp + * Memory pool for buffer allocations. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, + const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_rxq *rxq; + uint32_t min_size, + max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; + int ret, tc, inq; + uint64_t offloads; + + offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; + + if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) { + /* + * Unknown TC mapping, mapping will not have a correct queue. + */ + MRVL_LOG(ERR, "Unknown TC mapping for queue %hu eth%hhu", + idx, priv->ppio_id); + return -EFAULT; + } + + min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM - + MRVL_PKT_EFFEC_OFFS; + if (min_size < max_rx_pkt_len) { + MRVL_LOG(ERR, + "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.", + max_rx_pkt_len + RTE_PKTMBUF_HEADROOM + + MRVL_PKT_EFFEC_OFFS, + max_rx_pkt_len); + return -EINVAL; + } + + if (dev->data->rx_queues[idx]) { + rte_free(dev->data->rx_queues[idx]); + dev->data->rx_queues[idx] = NULL; + } + + rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket); + if (!rxq) + return -ENOMEM; + + rxq->priv = priv; + rxq->mp = mp; + rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM; + rxq->queue_id = idx; + rxq->port_id = dev->data->port_id; + mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool; + + tc = priv->rxq_map[rxq->queue_id].tc, + inq = priv->rxq_map[rxq->queue_id].inq; + priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size = + desc; + + ret = mrvl_fill_bpool(rxq, desc); + if (ret) { + rte_free(rxq); + return ret; + } + + priv->bpool_init_size += desc; + + dev->data->rx_queues[idx] = rxq; + + return 0; +} + +/** + * DPDK callback to release the receive queue. + * + * @param rxq + * Generic receive queue pointer. + */ +static void +mrvl_rx_queue_release(void *rxq) +{ + struct mrvl_rxq *q = rxq; + struct pp2_ppio_tc_params *tc_params; + int i, num, tc, inq; + struct pp2_hif *hif; + unsigned int core_id = rte_lcore_id(); + + if (core_id == LCORE_ID_ANY) + core_id = 0; + + if (!q) + return; + + hif = mrvl_get_hif(q->priv, core_id); + + if (!hif) + return; + + tc = q->priv->rxq_map[q->queue_id].tc; + inq = q->priv->rxq_map[q->queue_id].inq; + tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc]; + num = tc_params->inqs_params[inq].size; + for (i = 0; i < num; i++) { + struct pp2_buff_inf inf; + uint64_t addr; + + pp2_bpool_get_buff(hif, q->priv->bpool, &inf); + addr = cookie_addr_high | inf.cookie; + rte_pktmbuf_free((struct rte_mbuf *)addr); + } + + rte_free(q); +} + +/** + * DPDK callback to configure the transmit queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * Transmit queue index. + * @param desc + * Number of descriptors to configure in the queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param conf + * Tx queue configuration parameters. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, + const struct rte_eth_txconf *conf) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_txq *txq; + + if (dev->data->tx_queues[idx]) { + rte_free(dev->data->tx_queues[idx]); + dev->data->tx_queues[idx] = NULL; + } + + txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket); + if (!txq) + return -ENOMEM; + + txq->priv = priv; + txq->queue_id = idx; + txq->port_id = dev->data->port_id; + txq->tx_deferred_start = conf->tx_deferred_start; + dev->data->tx_queues[idx] = txq; + + priv->ppio_params.outqs_params.outqs_params[idx].size = desc; + + return 0; +} + +/** + * DPDK callback to release the transmit queue. + * + * @param txq + * Generic transmit queue pointer. + */ +static void +mrvl_tx_queue_release(void *txq) +{ + struct mrvl_txq *q = txq; + + if (!q) + return; + + rte_free(q); +} + +/** + * DPDK callback to get flow control configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * @param fc_conf + * Pointer to the flow control configuration. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret, en; + + if (!priv) + return -EPERM; + + ret = pp2_ppio_get_rx_pause(priv->ppio, &en); + if (ret) { + MRVL_LOG(ERR, "Failed to read rx pause state"); + return ret; + } + + fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE; + + return 0; +} + +/** + * DPDK callback to set flow control configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * @param fc_conf + * Pointer to the flow control configuration. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + if (!priv) + return -EPERM; + + if (fc_conf->high_water || + fc_conf->low_water || + fc_conf->pause_time || + fc_conf->mac_ctrl_frame_fwd || + fc_conf->autoneg) { + MRVL_LOG(ERR, "Flowctrl parameter is not supported"); + + return -EINVAL; + } + + if (fc_conf->mode == RTE_FC_NONE || + fc_conf->mode == RTE_FC_RX_PAUSE) { + int ret, en; + + en = fc_conf->mode == RTE_FC_NONE ? 0 : 1; + ret = pp2_ppio_set_rx_pause(priv->ppio, en); + if (ret) + MRVL_LOG(ERR, + "Failed to change flowctrl on RX side"); + + return ret; + } + + return 0; +} + +/** + * Update RSS hash configuration + * + * @param dev + * Pointer to Ethernet device structure. + * @param rss_conf + * Pointer to RSS configuration. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + if (priv->isolated) + return -ENOTSUP; + + return mrvl_configure_rss(priv, rss_conf); +} + +/** + * DPDK callback to get RSS hash configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * @rss_conf + * Pointer to RSS configuration. + * + * @return + * Always 0. + */ +static int +mrvl_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct mrvl_priv *priv = dev->data->dev_private; + enum pp2_ppio_hash_type hash_type = + priv->ppio_params.inqs_params.hash_type; + + rss_conf->rss_key = NULL; + + if (hash_type == PP2_PPIO_HASH_T_NONE) + rss_conf->rss_hf = 0; + else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE) + rss_conf->rss_hf = ETH_RSS_IPV4; + else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp) + rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP; + else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp) + rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP; + + return 0; +} + +/** + * DPDK callback to get rte_flow callbacks. + * + * @param dev + * Pointer to the device structure. + * @param filer_type + * Flow filter type. + * @param filter_op + * Flow filter operation. + * @param arg + * Pointer to pass the flow ops. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg) +{ + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &mrvl_flow_ops; + return 0; + default: + MRVL_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + return -EINVAL; + } +} + +static const struct eth_dev_ops mrvl_ops = { + .dev_configure = mrvl_dev_configure, + .dev_start = mrvl_dev_start, + .dev_stop = mrvl_dev_stop, + .dev_set_link_up = mrvl_dev_set_link_up, + .dev_set_link_down = mrvl_dev_set_link_down, + .dev_close = mrvl_dev_close, + .link_update = mrvl_link_update, + .promiscuous_enable = mrvl_promiscuous_enable, + .allmulticast_enable = mrvl_allmulticast_enable, + .promiscuous_disable = mrvl_promiscuous_disable, + .allmulticast_disable = mrvl_allmulticast_disable, + .mac_addr_remove = mrvl_mac_addr_remove, + .mac_addr_add = mrvl_mac_addr_add, + .mac_addr_set = mrvl_mac_addr_set, + .mtu_set = mrvl_mtu_set, + .stats_get = mrvl_stats_get, + .stats_reset = mrvl_stats_reset, + .xstats_get = mrvl_xstats_get, + .xstats_reset = mrvl_xstats_reset, + .xstats_get_names = mrvl_xstats_get_names, + .dev_infos_get = mrvl_dev_infos_get, + .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get, + .rxq_info_get = mrvl_rxq_info_get, + .txq_info_get = mrvl_txq_info_get, + .vlan_filter_set = mrvl_vlan_filter_set, + .tx_queue_start = mrvl_tx_queue_start, + .tx_queue_stop = mrvl_tx_queue_stop, + .rx_queue_setup = mrvl_rx_queue_setup, + .rx_queue_release = mrvl_rx_queue_release, + .tx_queue_setup = mrvl_tx_queue_setup, + .tx_queue_release = mrvl_tx_queue_release, + .flow_ctrl_get = mrvl_flow_ctrl_get, + .flow_ctrl_set = mrvl_flow_ctrl_set, + .rss_hash_update = mrvl_rss_hash_update, + .rss_hash_conf_get = mrvl_rss_hash_conf_get, + .filter_ctrl = mrvl_eth_filter_ctrl, +}; + +/** + * Return packet type information and l3/l4 offsets. + * + * @param desc + * Pointer to the received packet descriptor. + * @param l3_offset + * l3 packet offset. + * @param l4_offset + * l4 packet offset. + * + * @return + * Packet type information. + */ +static inline uint64_t +mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc, + uint8_t *l3_offset, uint8_t *l4_offset) +{ + enum pp2_inq_l3_type l3_type; + enum pp2_inq_l4_type l4_type; + uint64_t packet_type; + + pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset); + pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset); + + packet_type = RTE_PTYPE_L2_ETHER; + + switch (l3_type) { + case PP2_INQ_L3_TYPE_IPV4_NO_OPTS: + packet_type |= RTE_PTYPE_L3_IPV4; + break; + case PP2_INQ_L3_TYPE_IPV4_OK: + packet_type |= RTE_PTYPE_L3_IPV4_EXT; + break; + case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO: + packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + break; + case PP2_INQ_L3_TYPE_IPV6_NO_EXT: + packet_type |= RTE_PTYPE_L3_IPV6; + break; + case PP2_INQ_L3_TYPE_IPV6_EXT: + packet_type |= RTE_PTYPE_L3_IPV6_EXT; + break; + case PP2_INQ_L3_TYPE_ARP: + packet_type |= RTE_PTYPE_L2_ETHER_ARP; + /* + * In case of ARP l4_offset is set to wrong value. + * Set it to proper one so that later on mbuf->l3_len can be + * calculated subtracting l4_offset and l3_offset. + */ + *l4_offset = *l3_offset + MRVL_ARP_LENGTH; + break; + default: + MRVL_LOG(DEBUG, "Failed to recognise l3 packet type"); + break; + } + + switch (l4_type) { + case PP2_INQ_L4_TYPE_TCP: + packet_type |= RTE_PTYPE_L4_TCP; + break; + case PP2_INQ_L4_TYPE_UDP: + packet_type |= RTE_PTYPE_L4_UDP; + break; + default: + MRVL_LOG(DEBUG, "Failed to recognise l4 packet type"); + break; + } + + return packet_type; +} + +/** + * Get offload information from the received packet descriptor. + * + * @param desc + * Pointer to the received packet descriptor. + * + * @return + * Mbuf offload flags. + */ +static inline uint64_t +mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc) +{ + uint64_t flags; + enum pp2_inq_desc_status status; + + status = pp2_ppio_inq_desc_get_l3_pkt_error(desc); + if (unlikely(status != PP2_DESC_ERR_OK)) + flags = PKT_RX_IP_CKSUM_BAD; + else + flags = PKT_RX_IP_CKSUM_GOOD; + + status = pp2_ppio_inq_desc_get_l4_pkt_error(desc); + if (unlikely(status != PP2_DESC_ERR_OK)) + flags |= PKT_RX_L4_CKSUM_BAD; + else + flags |= PKT_RX_L4_CKSUM_GOOD; + + return flags; +} + +/** + * DPDK callback for receive. + * + * @param rxq + * Generic pointer to the receive queue. + * @param rx_pkts + * Array to store received packets. + * @param nb_pkts + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received. + */ +static uint16_t +mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct mrvl_rxq *q = rxq; + struct pp2_ppio_desc descs[nb_pkts]; + struct pp2_bpool *bpool; + int i, ret, rx_done = 0; + int num; + struct pp2_hif *hif; + unsigned int core_id = rte_lcore_id(); + + hif = mrvl_get_hif(q->priv, core_id); + + if (unlikely(!q->priv->ppio || !hif)) + return 0; + + bpool = q->priv->bpool; + + ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc, + q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts); + if (unlikely(ret < 0)) { + MRVL_LOG(ERR, "Failed to receive packets"); + return 0; + } + mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts; + + for (i = 0; i < nb_pkts; i++) { + struct rte_mbuf *mbuf; + uint8_t l3_offset, l4_offset; + enum pp2_inq_desc_status status; + uint64_t addr; + + if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { + struct pp2_ppio_desc *pref_desc; + u64 pref_addr; + + pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT]; + pref_addr = cookie_addr_high | + pp2_ppio_inq_desc_get_cookie(pref_desc); + rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr)); + rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr)); + } + + addr = cookie_addr_high | + pp2_ppio_inq_desc_get_cookie(&descs[i]); + mbuf = (struct rte_mbuf *)addr; + rte_pktmbuf_reset(mbuf); + + /* drop packet in case of mac, overrun or resource error */ + status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]); + if (unlikely(status != PP2_DESC_ERR_OK)) { + struct pp2_buff_inf binf = { + .addr = rte_mbuf_data_iova_default(mbuf), + .cookie = (pp2_cookie_t)(uint64_t)mbuf, + }; + + pp2_bpool_put_buff(hif, bpool, &binf); + mrvl_port_bpool_size + [bpool->pp2_id][bpool->id][core_id]++; + q->drop_mac++; + continue; + } + + mbuf->data_off += MRVL_PKT_EFFEC_OFFS; + mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]); + mbuf->data_len = mbuf->pkt_len; + mbuf->port = q->port_id; + mbuf->packet_type = + mrvl_desc_to_packet_type_and_offset(&descs[i], + &l3_offset, + &l4_offset); + mbuf->l2_len = l3_offset; + mbuf->l3_len = l4_offset - l3_offset; + + if (likely(q->cksum_enabled)) + mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]); + + rx_pkts[rx_done++] = mbuf; + q->bytes_recv += mbuf->pkt_len; + } + + if (rte_spinlock_trylock(&q->priv->lock) == 1) { + num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id); + + if (unlikely(num <= q->priv->bpool_min_size || + (!rx_done && num < q->priv->bpool_init_size))) { + ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE); + if (ret) + MRVL_LOG(ERR, "Failed to fill bpool"); + } else if (unlikely(num > q->priv->bpool_max_size)) { + int i; + int pkt_to_remove = num - q->priv->bpool_init_size; + struct rte_mbuf *mbuf; + struct pp2_buff_inf buff; + + MRVL_LOG(DEBUG, + "port-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)", + bpool->pp2_id, q->priv->ppio->port_id, + bpool->id, pkt_to_remove, num, + q->priv->bpool_init_size); + + for (i = 0; i < pkt_to_remove; i++) { + ret = pp2_bpool_get_buff(hif, bpool, &buff); + if (ret) + break; + mbuf = (struct rte_mbuf *) + (cookie_addr_high | buff.cookie); + rte_pktmbuf_free(mbuf); + } + mrvl_port_bpool_size + [bpool->pp2_id][bpool->id][core_id] -= i; + } + rte_spinlock_unlock(&q->priv->lock); + } + + return rx_done; +} + +/** + * Prepare offload information. + * + * @param ol_flags + * Offload flags. + * @param packet_type + * Packet type bitfield. + * @param l3_type + * Pointer to the pp2_ouq_l3_type structure. + * @param l4_type + * Pointer to the pp2_outq_l4_type structure. + * @param gen_l3_cksum + * Will be set to 1 in case l3 checksum is computed. + * @param l4_cksum + * Will be set to 1 in case l4 checksum is computed. + * + * @return + * 0 on success, negative error value otherwise. + */ +static inline int +mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type, + enum pp2_outq_l3_type *l3_type, + enum pp2_outq_l4_type *l4_type, + int *gen_l3_cksum, + int *gen_l4_cksum) +{ + /* + * Based on ol_flags prepare information + * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor + * for offloading. + */ + if (ol_flags & PKT_TX_IPV4) { + *l3_type = PP2_OUTQ_L3_TYPE_IPV4; + *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0; + } else if (ol_flags & PKT_TX_IPV6) { + *l3_type = PP2_OUTQ_L3_TYPE_IPV6; + /* no checksum for ipv6 header */ + *gen_l3_cksum = 0; + } else { + /* if something different then stop processing */ + return -1; + } + + ol_flags &= PKT_TX_L4_MASK; + if ((packet_type & RTE_PTYPE_L4_TCP) && + ol_flags == PKT_TX_TCP_CKSUM) { + *l4_type = PP2_OUTQ_L4_TYPE_TCP; + *gen_l4_cksum = 1; + } else if ((packet_type & RTE_PTYPE_L4_UDP) && + ol_flags == PKT_TX_UDP_CKSUM) { + *l4_type = PP2_OUTQ_L4_TYPE_UDP; + *gen_l4_cksum = 1; + } else { + *l4_type = PP2_OUTQ_L4_TYPE_OTHER; + /* no checksum for other type */ + *gen_l4_cksum = 0; + } + + return 0; +} + +/** + * Release already sent buffers to bpool (buffer-pool). + * + * @param ppio + * Pointer to the port structure. + * @param hif + * Pointer to the MUSDK hardware interface. + * @param sq + * Pointer to the shadow queue. + * @param qid + * Queue id number. + * @param force + * Force releasing packets. + */ +static inline void +mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif, + unsigned int core_id, struct mrvl_shadow_txq *sq, + int qid, int force) +{ + struct buff_release_entry *entry; + uint16_t nb_done = 0, num = 0, skip_bufs = 0; + int i; + + pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done); + + sq->num_to_release += nb_done; + + if (likely(!force && + sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE)) + return; + + nb_done = sq->num_to_release; + sq->num_to_release = 0; + + for (i = 0; i < nb_done; i++) { + entry = &sq->ent[sq->tail + num]; + if (unlikely(!entry->buff.addr)) { + MRVL_LOG(ERR, + "Shadow memory @%d: cookie(%lx), pa(%lx)!", + sq->tail, (u64)entry->buff.cookie, + (u64)entry->buff.addr); + skip_bufs = 1; + goto skip; + } + + if (unlikely(!entry->bpool)) { + struct rte_mbuf *mbuf; + + mbuf = (struct rte_mbuf *) + (cookie_addr_high | entry->buff.cookie); + rte_pktmbuf_free(mbuf); + skip_bufs = 1; + goto skip; + } + + mrvl_port_bpool_size + [entry->bpool->pp2_id][entry->bpool->id][core_id]++; + num++; + if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE)) + goto skip; + continue; +skip: + if (likely(num)) + pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); + num += skip_bufs; + sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; + sq->size -= num; + num = 0; + skip_bufs = 0; + } + + if (likely(num)) { + pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); + sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; + sq->size -= num; + } +} + +/** + * DPDK callback for transmit. + * + * @param txq + * Generic pointer transmit queue. + * @param tx_pkts + * Packets to transmit. + * @param nb_pkts + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted. + */ +static uint16_t +mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct mrvl_txq *q = txq; + struct mrvl_shadow_txq *sq; + struct pp2_hif *hif; + struct pp2_ppio_desc descs[nb_pkts]; + unsigned int core_id = rte_lcore_id(); + int i, ret, bytes_sent = 0; + uint16_t num, sq_free_size; + uint64_t addr; + + hif = mrvl_get_hif(q->priv, core_id); + sq = &q->shadow_txqs[core_id]; + + if (unlikely(!q->priv->ppio || !hif)) + return 0; + + if (sq->size) + mrvl_free_sent_buffers(q->priv->ppio, hif, core_id, + sq, q->queue_id, 0); + + sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1; + if (unlikely(nb_pkts > sq_free_size)) { + MRVL_LOG(DEBUG, + "No room in shadow queue for %d packets! %d packets will be sent.", + nb_pkts, sq_free_size); + nb_pkts = sq_free_size; + } + + for (i = 0; i < nb_pkts; i++) { + struct rte_mbuf *mbuf = tx_pkts[i]; + int gen_l3_cksum, gen_l4_cksum; + enum pp2_outq_l3_type l3_type; + enum pp2_outq_l4_type l4_type; + + if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { + struct rte_mbuf *pref_pkt_hdr; + + pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT]; + rte_mbuf_prefetch_part1(pref_pkt_hdr); + rte_mbuf_prefetch_part2(pref_pkt_hdr); + } + + sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf; + sq->ent[sq->head].buff.addr = + rte_mbuf_data_iova_default(mbuf); + sq->ent[sq->head].bpool = + (unlikely(mbuf->port >= RTE_MAX_ETHPORTS || + mbuf->refcnt > 1)) ? NULL : + mrvl_port_to_bpool_lookup[mbuf->port]; + sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK; + sq->size++; + + pp2_ppio_outq_desc_reset(&descs[i]); + pp2_ppio_outq_desc_set_phys_addr(&descs[i], + rte_pktmbuf_iova(mbuf)); + pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0); + pp2_ppio_outq_desc_set_pkt_len(&descs[i], + rte_pktmbuf_pkt_len(mbuf)); + + bytes_sent += rte_pktmbuf_pkt_len(mbuf); + /* + * in case unsupported ol_flags were passed + * do not update descriptor offload information + */ + ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type, + &l3_type, &l4_type, &gen_l3_cksum, + &gen_l4_cksum); + if (unlikely(ret)) + continue; + + pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type, + mbuf->l2_len, + mbuf->l2_len + mbuf->l3_len, + gen_l3_cksum, gen_l4_cksum); + } + + num = nb_pkts; + pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts); + /* number of packets that were not sent */ + if (unlikely(num > nb_pkts)) { + for (i = nb_pkts; i < num; i++) { + sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) & + MRVL_PP2_TX_SHADOWQ_MASK; + addr = cookie_addr_high | sq->ent[sq->head].buff.cookie; + bytes_sent -= + rte_pktmbuf_pkt_len((struct rte_mbuf *)addr); + } + sq->size -= num - nb_pkts; + } + + q->bytes_sent += bytes_sent; + + return nb_pkts; +} + +/** + * Initialize packet processor. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_init_pp2(void) +{ + struct pp2_init_params init_params; + + memset(&init_params, 0, sizeof(init_params)); + init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED; + init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED; + init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED; + + return pp2_init(&init_params); +} + +/** + * Deinitialize packet processor. + * + * @return + * 0 on success, negative error value otherwise. + */ +static void +mrvl_deinit_pp2(void) +{ + pp2_deinit(); +} + +/** + * Create private device structure. + * + * @param dev_name + * Pointer to the port name passed in the initialization parameters. + * + * @return + * Pointer to the newly allocated private device structure. + */ +static struct mrvl_priv * +mrvl_priv_create(const char *dev_name) +{ + struct pp2_bpool_params bpool_params; + char match[MRVL_MATCH_LEN]; + struct mrvl_priv *priv; + int ret, bpool_bit; + + priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id()); + if (!priv) + return NULL; + + ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name, + &priv->pp_id, &priv->ppio_id); + if (ret) + goto out_free_priv; + + bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id], + PP2_BPOOL_NUM_POOLS); + if (bpool_bit < 0) + goto out_free_priv; + priv->bpool_bit = bpool_bit; + + snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id, + priv->bpool_bit); + memset(&bpool_params, 0, sizeof(bpool_params)); + bpool_params.match = match; + bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS; + ret = pp2_bpool_init(&bpool_params, &priv->bpool); + if (ret) + goto out_clear_bpool_bit; + + priv->ppio_params.type = PP2_PPIO_T_NIC; + rte_spinlock_init(&priv->lock); + + return priv; +out_clear_bpool_bit: + used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); +out_free_priv: + rte_free(priv); + return NULL; +} + +/** + * Create device representing Ethernet port. + * + * @param name + * Pointer to the port's name. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name) +{ + int ret, fd = socket(AF_INET, SOCK_DGRAM, 0); + struct rte_eth_dev *eth_dev; + struct mrvl_priv *priv; + struct ifreq req; + + eth_dev = rte_eth_dev_allocate(name); + if (!eth_dev) + return -ENOMEM; + + priv = mrvl_priv_create(name); + if (!priv) { + ret = -ENOMEM; + goto out_free_dev; + } + + eth_dev->data->mac_addrs = + rte_zmalloc("mac_addrs", + ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0); + if (!eth_dev->data->mac_addrs) { + MRVL_LOG(ERR, "Failed to allocate space for eth addrs"); + ret = -ENOMEM; + goto out_free_priv; + } + + memset(&req, 0, sizeof(req)); + strcpy(req.ifr_name, name); + ret = ioctl(fd, SIOCGIFHWADDR, &req); + if (ret) + goto out_free_mac; + + memcpy(eth_dev->data->mac_addrs[0].addr_bytes, + req.ifr_addr.sa_data, ETHER_ADDR_LEN); + + eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst; + eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst; + eth_dev->data->kdrv = RTE_KDRV_NONE; + eth_dev->data->dev_private = priv; + eth_dev->device = &vdev->device; + eth_dev->dev_ops = &mrvl_ops; + + rte_eth_dev_probing_finish(eth_dev); + return 0; +out_free_mac: + rte_free(eth_dev->data->mac_addrs); +out_free_dev: + rte_eth_dev_release_port(eth_dev); +out_free_priv: + rte_free(priv); + + return ret; +} + +/** + * Cleanup previously created device representing Ethernet port. + * + * @param name + * Pointer to the port name. + */ +static void +mrvl_eth_dev_destroy(const char *name) +{ + struct rte_eth_dev *eth_dev; + struct mrvl_priv *priv; + + eth_dev = rte_eth_dev_allocated(name); + if (!eth_dev) + return; + + priv = eth_dev->data->dev_private; + pp2_bpool_deinit(priv->bpool); + used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); + rte_free(priv); + rte_free(eth_dev->data->mac_addrs); + rte_eth_dev_release_port(eth_dev); +} + +/** + * Callback used by rte_kvargs_process() during argument parsing. + * + * @param key + * Pointer to the parsed key (unused). + * @param value + * Pointer to the parsed value. + * @param extra_args + * Pointer to the extra arguments which contains address of the + * table of pointers to parsed interface names. + * + * @return + * Always 0. + */ +static int +mrvl_get_ifnames(const char *key __rte_unused, const char *value, + void *extra_args) +{ + struct mrvl_ifnames *ifnames = extra_args; + + ifnames->names[ifnames->idx++] = value; + + return 0; +} + +/** + * Deinitialize per-lcore MUSDK hardware interfaces (hifs). + */ +static void +mrvl_deinit_hifs(void) +{ + int i; + + for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) { + if (hifs[i]) + pp2_hif_deinit(hifs[i]); + } + used_hifs = MRVL_MUSDK_HIFS_RESERVED; + memset(hifs, 0, sizeof(hifs)); +} + +/** + * DPDK callback to register the virtual device. + * + * @param vdev + * Pointer to the virtual device. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +rte_pmd_mrvl_probe(struct rte_vdev_device *vdev) +{ + struct rte_kvargs *kvlist; + struct mrvl_ifnames ifnames; + int ret = -EINVAL; + uint32_t i, ifnum, cfgnum; + const char *params; + + params = rte_vdev_device_args(vdev); + if (!params) + return -EINVAL; + + kvlist = rte_kvargs_parse(params, valid_args); + if (!kvlist) + return -EINVAL; + + ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG); + if (ifnum > RTE_DIM(ifnames.names)) + goto out_free_kvlist; + + ifnames.idx = 0; + rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG, + mrvl_get_ifnames, &ifnames); + + + /* + * The below system initialization should be done only once, + * on the first provided configuration file + */ + if (!mrvl_qos_cfg) { + cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG); + MRVL_LOG(INFO, "Parsing config file!"); + if (cfgnum > 1) { + MRVL_LOG(ERR, "Cannot handle more than one config file!"); + goto out_free_kvlist; + } else if (cfgnum == 1) { + rte_kvargs_process(kvlist, MRVL_CFG_ARG, + mrvl_get_qoscfg, &mrvl_qos_cfg); + } + } + + if (mrvl_dev_num) + goto init_devices; + + MRVL_LOG(INFO, "Perform MUSDK initializations"); + /* + * ret == -EEXIST is correct, it means DMA + * has been already initialized (by another PMD). + */ + ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE); + if (ret < 0) { + if (ret != -EEXIST) + goto out_free_kvlist; + else + MRVL_LOG(INFO, + "DMA memory has been already initialized by a different driver."); + } + + ret = mrvl_init_pp2(); + if (ret) { + MRVL_LOG(ERR, "Failed to init PP!"); + goto out_deinit_dma; + } + + memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size)); + memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup)); + + mrvl_lcore_first = RTE_MAX_LCORE; + mrvl_lcore_last = 0; + +init_devices: + for (i = 0; i < ifnum; i++) { + MRVL_LOG(INFO, "Creating %s", ifnames.names[i]); + ret = mrvl_eth_dev_create(vdev, ifnames.names[i]); + if (ret) + goto out_cleanup; + } + mrvl_dev_num += ifnum; + + rte_kvargs_free(kvlist); + + return 0; +out_cleanup: + for (; i > 0; i--) + mrvl_eth_dev_destroy(ifnames.names[i]); + + if (mrvl_dev_num == 0) + mrvl_deinit_pp2(); +out_deinit_dma: + if (mrvl_dev_num == 0) + mv_sys_dma_mem_destroy(); +out_free_kvlist: + rte_kvargs_free(kvlist); + + return ret; +} + +/** + * DPDK callback to remove virtual device. + * + * @param vdev + * Pointer to the removed virtual device. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +rte_pmd_mrvl_remove(struct rte_vdev_device *vdev) +{ + int i; + const char *name; + + name = rte_vdev_device_name(vdev); + if (!name) + return -EINVAL; + + MRVL_LOG(INFO, "Removing %s", name); + + RTE_ETH_FOREACH_DEV(i) { /* FIXME: removing all devices! */ + char ifname[RTE_ETH_NAME_MAX_LEN]; + + rte_eth_dev_get_name_by_port(i, ifname); + mrvl_eth_dev_destroy(ifname); + mrvl_dev_num--; + } + + if (mrvl_dev_num == 0) { + MRVL_LOG(INFO, "Perform MUSDK deinit"); + mrvl_deinit_hifs(); + mrvl_deinit_pp2(); + mv_sys_dma_mem_destroy(); + } + + return 0; +} + +static struct rte_vdev_driver pmd_mrvl_drv = { + .probe = rte_pmd_mrvl_probe, + .remove = rte_pmd_mrvl_remove, +}; + +RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv); +RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2); + +RTE_INIT(mrvl_init_log) +{ + mrvl_logtype = rte_log_register("pmd.net.mvpp2"); + if (mrvl_logtype >= 0) + rte_log_set_level(mrvl_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/mvpp2/mrvl_ethdev.h b/drivers/net/mvpp2/mrvl_ethdev.h new file mode 100644 index 00000000..3726f788 --- /dev/null +++ b/drivers/net/mvpp2/mrvl_ethdev.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#ifndef _MRVL_ETHDEV_H_ +#define _MRVL_ETHDEV_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include + +/** Maximum number of rx queues per port */ +#define MRVL_PP2_RXQ_MAX 32 + +/** Maximum number of tx queues per port */ +#define MRVL_PP2_TXQ_MAX 8 + +/** Minimum number of descriptors in tx queue */ +#define MRVL_PP2_TXD_MIN 16 + +/** Maximum number of descriptors in tx queue */ +#define MRVL_PP2_TXD_MAX 2048 + +/** Tx queue descriptors alignment */ +#define MRVL_PP2_TXD_ALIGN 16 + +/** Minimum number of descriptors in rx queue */ +#define MRVL_PP2_RXD_MIN 16 + +/** Maximum number of descriptors in rx queue */ +#define MRVL_PP2_RXD_MAX 2048 + +/** Rx queue descriptors alignment */ +#define MRVL_PP2_RXD_ALIGN 16 + +/** Maximum number of descriptors in tx aggregated queue */ +#define MRVL_PP2_AGGR_TXQD_MAX 2048 + +/** Maximum number of Traffic Classes. */ +#define MRVL_PP2_TC_MAX 8 + +/** Packet offset inside RX buffer. */ +#define MRVL_PKT_OFFS 64 + +/** Maximum number of descriptors in shadow queue. Must be power of 2 */ +#define MRVL_PP2_TX_SHADOWQ_SIZE MRVL_PP2_TXD_MAX + +/** Shadow queue size mask (since shadow queue size is power of 2) */ +#define MRVL_PP2_TX_SHADOWQ_MASK (MRVL_PP2_TX_SHADOWQ_SIZE - 1) + +/** Minimum number of sent buffers to release from shadow queue to BM */ +#define MRVL_PP2_BUF_RELEASE_BURST_SIZE 64 + +struct mrvl_priv { + /* Hot fields, used in fast path. */ + struct pp2_bpool *bpool; /**< BPool pointer */ + struct pp2_ppio *ppio; /**< Port handler pointer */ + rte_spinlock_t lock; /**< Spinlock for checking bpool status */ + uint16_t bpool_max_size; /**< BPool maximum size */ + uint16_t bpool_min_size; /**< BPool minimum size */ + uint16_t bpool_init_size; /**< Configured BPool size */ + + /** Mapping for DPDK rx queue->(TC, MRVL relative inq) */ + struct { + uint8_t tc; /**< Traffic Class */ + uint8_t inq; /**< Relative in-queue number */ + } rxq_map[MRVL_PP2_RXQ_MAX] __rte_cache_aligned; + + /* Configuration data, used sporadically. */ + uint8_t pp_id; + uint8_t ppio_id; + uint8_t bpool_bit; + uint8_t rss_hf_tcp; + uint8_t uc_mc_flushed; + uint8_t vlan_flushed; + uint8_t isolated; + + struct pp2_ppio_params ppio_params; + struct pp2_cls_qos_tbl_params qos_tbl_params; + struct pp2_cls_tbl *qos_tbl; + uint16_t nb_rx_queues; + + struct pp2_cls_tbl_params cls_tbl_params; + struct pp2_cls_tbl *cls_tbl; + uint32_t cls_tbl_pattern; + LIST_HEAD(mrvl_flows, rte_flow) flows; + + struct pp2_cls_plcr *policer; +}; + +/** Flow operations forward declaration. */ +extern const struct rte_flow_ops mrvl_flow_ops; + +/** Current log type. */ +extern int mrvl_logtype; + +#define MRVL_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, mrvl_logtype, "%s(): " fmt "\n", \ + __func__, ##args) + +#endif /* _MRVL_ETHDEV_H_ */ diff --git a/drivers/net/mvpp2/mrvl_flow.c b/drivers/net/mvpp2/mrvl_flow.c new file mode 100644 index 00000000..ecc34192 --- /dev/null +++ b/drivers/net/mvpp2/mrvl_flow.c @@ -0,0 +1,2779 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Marvell International Ltd. + * Copyright(c) 2018 Semihalf. + * All rights reserved. + */ + +#include +#include +#include +#include + +#include + +#ifdef container_of +#undef container_of +#endif + +#include "mrvl_ethdev.h" +#include "mrvl_qos.h" +#include "env/mv_common.h" /* for BIT() */ + +/** Number of rules in the classifier table. */ +#define MRVL_CLS_MAX_NUM_RULES 20 + +/** Size of the classifier key and mask strings. */ +#define MRVL_CLS_STR_SIZE_MAX 40 + +/** Parsed fields in processed rte_flow_item. */ +enum mrvl_parsed_fields { + /* eth flags */ + F_DMAC = BIT(0), + F_SMAC = BIT(1), + F_TYPE = BIT(2), + /* vlan flags */ + F_VLAN_ID = BIT(3), + F_VLAN_PRI = BIT(4), + F_VLAN_TCI = BIT(5), /* not supported by MUSDK yet */ + /* ip4 flags */ + F_IP4_TOS = BIT(6), + F_IP4_SIP = BIT(7), + F_IP4_DIP = BIT(8), + F_IP4_PROTO = BIT(9), + /* ip6 flags */ + F_IP6_TC = BIT(10), /* not supported by MUSDK yet */ + F_IP6_SIP = BIT(11), + F_IP6_DIP = BIT(12), + F_IP6_FLOW = BIT(13), + F_IP6_NEXT_HDR = BIT(14), + /* tcp flags */ + F_TCP_SPORT = BIT(15), + F_TCP_DPORT = BIT(16), + /* udp flags */ + F_UDP_SPORT = BIT(17), + F_UDP_DPORT = BIT(18), +}; + +/** PMD-specific definition of a flow rule handle. */ +struct rte_flow { + LIST_ENTRY(rte_flow) next; + + enum mrvl_parsed_fields pattern; + + struct pp2_cls_tbl_rule rule; + struct pp2_cls_cos_desc cos; + struct pp2_cls_tbl_action action; +}; + +static const enum rte_flow_item_type pattern_eth[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_vlan[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_vlan_ip[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_vlan_ip6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_ip4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_ip4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_ip4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_ip6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_ip6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_ip6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan_ip[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan_ip_tcp[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan_ip_udp[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan_ip6[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan_ip6_tcp[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan_ip6_udp[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_ip[] = { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_ip6[] = { + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_ip_tcp[] = { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_ip6_tcp[] = { + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_ip_udp[] = { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_ip6_udp[] = { + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_tcp[] = { + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_udp[] = { + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +#define MRVL_VLAN_ID_MASK 0x0fff +#define MRVL_VLAN_PRI_MASK 0x7000 +#define MRVL_IPV4_DSCP_MASK 0xfc +#define MRVL_IPV4_ADDR_MASK 0xffffffff +#define MRVL_IPV6_FLOW_MASK 0x0fffff + +/** + * Given a flow item, return the next non-void one. + * + * @param items Pointer to the item in the table. + * @returns Next not-void item, NULL otherwise. + */ +static const struct rte_flow_item * +mrvl_next_item(const struct rte_flow_item *items) +{ + const struct rte_flow_item *item = items; + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->type != RTE_FLOW_ITEM_TYPE_VOID) + return item; + } + + return NULL; +} + +/** + * Allocate memory for classifier rule key and mask fields. + * + * @param field Pointer to the classifier rule. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_alloc_key_mask(struct pp2_cls_rule_key_field *field) +{ + unsigned int id = rte_socket_id(); + + field->key = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id); + if (!field->key) + goto out; + + field->mask = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id); + if (!field->mask) + goto out_mask; + + return 0; +out_mask: + rte_free(field->key); +out: + field->key = NULL; + field->mask = NULL; + return -1; +} + +/** + * Free memory allocated for classifier rule key and mask fields. + * + * @param field Pointer to the classifier rule. + */ +static void +mrvl_free_key_mask(struct pp2_cls_rule_key_field *field) +{ + rte_free(field->key); + rte_free(field->mask); + field->key = NULL; + field->mask = NULL; +} + +/** + * Free memory allocated for all classifier rule key and mask fields. + * + * @param rule Pointer to the classifier table rule. + */ +static void +mrvl_free_all_key_mask(struct pp2_cls_tbl_rule *rule) +{ + int i; + + for (i = 0; i < rule->num_fields; i++) + mrvl_free_key_mask(&rule->fields[i]); + rule->num_fields = 0; +} + +/* + * Initialize rte flow item parsing. + * + * @param item Pointer to the flow item. + * @param spec_ptr Pointer to the specific item pointer. + * @param mask_ptr Pointer to the specific item's mask pointer. + * @def_mask Pointer to the default mask. + * @size Size of the flow item. + * @error Pointer to the rte flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_init(const struct rte_flow_item *item, + const void **spec_ptr, + const void **mask_ptr, + const void *def_mask, + unsigned int size, + struct rte_flow_error *error) +{ + const uint8_t *spec; + const uint8_t *mask; + const uint8_t *last; + uint8_t zeros[size]; + + memset(zeros, 0, size); + + if (item == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "NULL item\n"); + return -rte_errno; + } + + if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Mask or last is set without spec\n"); + return -rte_errno; + } + + /* + * If "mask" is not set, default mask is used, + * but if default mask is NULL, "mask" should be set. + */ + if (item->mask == NULL) { + if (def_mask == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Mask should be specified\n"); + return -rte_errno; + } + + mask = (const uint8_t *)def_mask; + } else { + mask = (const uint8_t *)item->mask; + } + + spec = (const uint8_t *)item->spec; + last = (const uint8_t *)item->last; + + if (spec == NULL) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Spec should be specified\n"); + return -rte_errno; + } + + /* + * If field values in "last" are either 0 or equal to the corresponding + * values in "spec" then they are ignored. + */ + if (last != NULL && + !memcmp(last, zeros, size) && + memcmp(last, spec, size) != 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Ranging is not supported\n"); + return -rte_errno; + } + + *spec_ptr = spec; + *mask_ptr = mask; + + return 0; +} + +/** + * Parse the eth flow item. + * + * This will create classifier rule that matches either destination or source + * mac. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param mask Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_mac(const struct rte_flow_item_eth *spec, + const struct rte_flow_item_eth *mask, + int parse_dst, struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + const uint8_t *k, *m; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + if (parse_dst) { + k = spec->dst.addr_bytes; + m = mask->dst.addr_bytes; + + flow->pattern |= F_DMAC; + } else { + k = spec->src.addr_bytes; + m = mask->src.addr_bytes; + + flow->pattern |= F_SMAC; + } + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 6; + + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, + "%02x:%02x:%02x:%02x:%02x:%02x", + k[0], k[1], k[2], k[3], k[4], k[5]); + + snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, + "%02x:%02x:%02x:%02x:%02x:%02x", + m[0], m[1], m[2], m[3], m[4], m[5]); + + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Helper for parsing the eth flow item destination mac address. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_dmac(const struct rte_flow_item_eth *spec, + const struct rte_flow_item_eth *mask, + struct rte_flow *flow) +{ + return mrvl_parse_mac(spec, mask, 1, flow); +} + +/** + * Helper for parsing the eth flow item source mac address. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_smac(const struct rte_flow_item_eth *spec, + const struct rte_flow_item_eth *mask, + struct rte_flow *flow) +{ + return mrvl_parse_mac(spec, mask, 0, flow); +} + +/** + * Parse the ether type field of the eth flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_type(const struct rte_flow_item_eth *spec, + const struct rte_flow_item_eth *mask __rte_unused, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint16_t k; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 2; + + k = rte_be_to_cpu_16(spec->type); + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->pattern |= F_TYPE; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse the vid field of the vlan rte flow item. + * + * This will create classifier rule that matches vid. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec, + const struct rte_flow_item_vlan *mask __rte_unused, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint16_t k; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 2; + + k = rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_ID_MASK; + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->pattern |= F_VLAN_ID; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse the pri field of the vlan rte flow item. + * + * This will create classifier rule that matches pri. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec, + const struct rte_flow_item_vlan *mask __rte_unused, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint16_t k; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 1; + + k = (rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_PRI_MASK) >> 13; + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->pattern |= F_VLAN_PRI; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse the dscp field of the ipv4 rte flow item. + * + * This will create classifier rule that matches dscp field. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec, + const struct rte_flow_item_ipv4 *mask, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint8_t k, m; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 1; + + k = (spec->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2; + m = (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2; + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m); + + flow->pattern |= F_IP4_TOS; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse either source or destination ip addresses of the ipv4 flow item. + * + * This will create classifier rule that matches either destination + * or source ip field. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec, + const struct rte_flow_item_ipv4 *mask, + int parse_dst, struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + struct in_addr k; + uint32_t m; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + memset(&k, 0, sizeof(k)); + if (parse_dst) { + k.s_addr = spec->hdr.dst_addr; + m = rte_be_to_cpu_32(mask->hdr.dst_addr); + + flow->pattern |= F_IP4_DIP; + } else { + k.s_addr = spec->hdr.src_addr; + m = rte_be_to_cpu_32(mask->hdr.src_addr); + + flow->pattern |= F_IP4_SIP; + } + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 4; + + inet_ntop(AF_INET, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX); + snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "0x%x", m); + + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Helper for parsing destination ip of the ipv4 flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_ip4_dip(const struct rte_flow_item_ipv4 *spec, + const struct rte_flow_item_ipv4 *mask, + struct rte_flow *flow) +{ + return mrvl_parse_ip4_addr(spec, mask, 1, flow); +} + +/** + * Helper for parsing source ip of the ipv4 flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_ip4_sip(const struct rte_flow_item_ipv4 *spec, + const struct rte_flow_item_ipv4 *mask, + struct rte_flow *flow) +{ + return mrvl_parse_ip4_addr(spec, mask, 0, flow); +} + +/** + * Parse the proto field of the ipv4 rte flow item. + * + * This will create classifier rule that matches proto field. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec, + const struct rte_flow_item_ipv4 *mask __rte_unused, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint8_t k = spec->hdr.next_proto_id; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 1; + + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->pattern |= F_IP4_PROTO; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse either source or destination ip addresses of the ipv6 rte flow item. + * + * This will create classifier rule that matches either destination + * or source ip field. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec, + const struct rte_flow_item_ipv6 *mask, + int parse_dst, struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + int size = sizeof(spec->hdr.dst_addr); + struct in6_addr k, m; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + memset(&k, 0, sizeof(k)); + if (parse_dst) { + memcpy(k.s6_addr, spec->hdr.dst_addr, size); + memcpy(m.s6_addr, mask->hdr.dst_addr, size); + + flow->pattern |= F_IP6_DIP; + } else { + memcpy(k.s6_addr, spec->hdr.src_addr, size); + memcpy(m.s6_addr, mask->hdr.src_addr, size); + + flow->pattern |= F_IP6_SIP; + } + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 16; + + inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX); + inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX); + + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Helper for parsing destination ip of the ipv6 flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_ip6_dip(const struct rte_flow_item_ipv6 *spec, + const struct rte_flow_item_ipv6 *mask, + struct rte_flow *flow) +{ + return mrvl_parse_ip6_addr(spec, mask, 1, flow); +} + +/** + * Helper for parsing source ip of the ipv6 flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_ip6_sip(const struct rte_flow_item_ipv6 *spec, + const struct rte_flow_item_ipv6 *mask, + struct rte_flow *flow) +{ + return mrvl_parse_ip6_addr(spec, mask, 0, flow); +} + +/** + * Parse the flow label of the ipv6 flow item. + * + * This will create classifier rule that matches flow field. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 *spec, + const struct rte_flow_item_ipv6 *mask, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint32_t k = rte_be_to_cpu_32(spec->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK, + m = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 3; + + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m); + + flow->pattern |= F_IP6_FLOW; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse the next header of the ipv6 flow item. + * + * This will create classifier rule that matches next header field. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec, + const struct rte_flow_item_ipv6 *mask __rte_unused, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint8_t k = spec->hdr.proto; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 1; + + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->pattern |= F_IP6_NEXT_HDR; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse destination or source port of the tcp flow item. + * + * This will create classifier rule that matches either destination or + * source tcp port. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_tcp_port(const struct rte_flow_item_tcp *spec, + const struct rte_flow_item_tcp *mask __rte_unused, + int parse_dst, struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint16_t k; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 2; + + if (parse_dst) { + k = rte_be_to_cpu_16(spec->hdr.dst_port); + + flow->pattern |= F_TCP_DPORT; + } else { + k = rte_be_to_cpu_16(spec->hdr.src_port); + + flow->pattern |= F_TCP_SPORT; + } + + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Helper for parsing the tcp source port of the tcp flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_tcp_sport(const struct rte_flow_item_tcp *spec, + const struct rte_flow_item_tcp *mask, + struct rte_flow *flow) +{ + return mrvl_parse_tcp_port(spec, mask, 0, flow); +} + +/** + * Helper for parsing the tcp destination port of the tcp flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_tcp_dport(const struct rte_flow_item_tcp *spec, + const struct rte_flow_item_tcp *mask, + struct rte_flow *flow) +{ + return mrvl_parse_tcp_port(spec, mask, 1, flow); +} + +/** + * Parse destination or source port of the udp flow item. + * + * This will create classifier rule that matches either destination or + * source udp port. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_udp_port(const struct rte_flow_item_udp *spec, + const struct rte_flow_item_udp *mask __rte_unused, + int parse_dst, struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint16_t k; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 2; + + if (parse_dst) { + k = rte_be_to_cpu_16(spec->hdr.dst_port); + + flow->pattern |= F_UDP_DPORT; + } else { + k = rte_be_to_cpu_16(spec->hdr.src_port); + + flow->pattern |= F_UDP_SPORT; + } + + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Helper for parsing the udp source port of the udp flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_udp_sport(const struct rte_flow_item_udp *spec, + const struct rte_flow_item_udp *mask, + struct rte_flow *flow) +{ + return mrvl_parse_udp_port(spec, mask, 0, flow); +} + +/** + * Helper for parsing the udp destination port of the udp flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_udp_dport(const struct rte_flow_item_udp *spec, + const struct rte_flow_item_udp *mask, + struct rte_flow *flow) +{ + return mrvl_parse_udp_port(spec, mask, 1, flow); +} + +/** + * Parse eth flow item. + * + * @param item Pointer to the flow item. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param fields Pointer to the parsed parsed fields enum. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item_eth *spec = NULL, *mask = NULL; + struct ether_addr zero; + int ret; + + ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask, + &rte_flow_item_eth_mask, + sizeof(struct rte_flow_item_eth), error); + if (ret) + return ret; + + memset(&zero, 0, sizeof(zero)); + + if (memcmp(&mask->dst, &zero, sizeof(mask->dst))) { + ret = mrvl_parse_dmac(spec, mask, flow); + if (ret) + goto out; + } + + if (memcmp(&mask->src, &zero, sizeof(mask->src))) { + ret = mrvl_parse_smac(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->type) { + MRVL_LOG(WARNING, "eth type mask is ignored"); + ret = mrvl_parse_type(spec, mask, flow); + if (ret) + goto out; + } + + return 0; +out: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Reached maximum number of fields in cls tbl key\n"); + return -rte_errno; +} + +/** + * Parse vlan flow item. + * + * @param item Pointer to the flow item. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param fields Pointer to the parsed parsed fields enum. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_parse_vlan(const struct rte_flow_item *item, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item_vlan *spec = NULL, *mask = NULL; + uint16_t m; + int ret; + + ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask, + &rte_flow_item_vlan_mask, + sizeof(struct rte_flow_item_vlan), error); + if (ret) + return ret; + + m = rte_be_to_cpu_16(mask->tci); + if (m & MRVL_VLAN_ID_MASK) { + MRVL_LOG(WARNING, "vlan id mask is ignored"); + ret = mrvl_parse_vlan_id(spec, mask, flow); + if (ret) + goto out; + } + + if (m & MRVL_VLAN_PRI_MASK) { + MRVL_LOG(WARNING, "vlan pri mask is ignored"); + ret = mrvl_parse_vlan_pri(spec, mask, flow); + if (ret) + goto out; + } + + if (flow->pattern & F_TYPE) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "VLAN TPID matching is not supported"); + return -rte_errno; + } + if (mask->inner_type) { + struct rte_flow_item_eth spec_eth = { + .type = spec->inner_type, + }; + struct rte_flow_item_eth mask_eth = { + .type = mask->inner_type, + }; + + MRVL_LOG(WARNING, "inner eth type mask is ignored"); + ret = mrvl_parse_type(&spec_eth, &mask_eth, flow); + if (ret) + goto out; + } + + return 0; +out: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Reached maximum number of fields in cls tbl key\n"); + return -rte_errno; +} + +/** + * Parse ipv4 flow item. + * + * @param item Pointer to the flow item. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param fields Pointer to the parsed parsed fields enum. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_parse_ip4(const struct rte_flow_item *item, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv4 *spec = NULL, *mask = NULL; + int ret; + + ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask, + &rte_flow_item_ipv4_mask, + sizeof(struct rte_flow_item_ipv4), error); + if (ret) + return ret; + + if (mask->hdr.version_ihl || + mask->hdr.total_length || + mask->hdr.packet_id || + mask->hdr.fragment_offset || + mask->hdr.time_to_live || + mask->hdr.hdr_checksum) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by classifier\n"); + return -rte_errno; + } + + if (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) { + ret = mrvl_parse_ip4_dscp(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->hdr.src_addr) { + ret = mrvl_parse_ip4_sip(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->hdr.dst_addr) { + ret = mrvl_parse_ip4_dip(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->hdr.next_proto_id) { + MRVL_LOG(WARNING, "next proto id mask is ignored"); + ret = mrvl_parse_ip4_proto(spec, mask, flow); + if (ret) + goto out; + } + + return 0; +out: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Reached maximum number of fields in cls tbl key\n"); + return -rte_errno; +} + +/** + * Parse ipv6 flow item. + * + * @param item Pointer to the flow item. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param fields Pointer to the parsed parsed fields enum. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_parse_ip6(const struct rte_flow_item *item, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL; + struct ipv6_hdr zero; + uint32_t flow_mask; + int ret; + + ret = mrvl_parse_init(item, (const void **)&spec, + (const void **)&mask, + &rte_flow_item_ipv6_mask, + sizeof(struct rte_flow_item_ipv6), + error); + if (ret) + return ret; + + memset(&zero, 0, sizeof(zero)); + + if (mask->hdr.payload_len || + mask->hdr.hop_limits) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by classifier\n"); + return -rte_errno; + } + + if (memcmp(mask->hdr.src_addr, + zero.src_addr, sizeof(mask->hdr.src_addr))) { + ret = mrvl_parse_ip6_sip(spec, mask, flow); + if (ret) + goto out; + } + + if (memcmp(mask->hdr.dst_addr, + zero.dst_addr, sizeof(mask->hdr.dst_addr))) { + ret = mrvl_parse_ip6_dip(spec, mask, flow); + if (ret) + goto out; + } + + flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK; + if (flow_mask) { + ret = mrvl_parse_ip6_flow(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->hdr.proto) { + MRVL_LOG(WARNING, "next header mask is ignored"); + ret = mrvl_parse_ip6_next_hdr(spec, mask, flow); + if (ret) + goto out; + } + + return 0; +out: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Reached maximum number of fields in cls tbl key\n"); + return -rte_errno; +} + +/** + * Parse tcp flow item. + * + * @param item Pointer to the flow item. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param fields Pointer to the parsed parsed fields enum. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_parse_tcp(const struct rte_flow_item *item, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item_tcp *spec = NULL, *mask = NULL; + int ret; + + ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask, + &rte_flow_item_ipv4_mask, + sizeof(struct rte_flow_item_ipv4), error); + if (ret) + return ret; + + if (mask->hdr.sent_seq || + mask->hdr.recv_ack || + mask->hdr.data_off || + mask->hdr.tcp_flags || + mask->hdr.rx_win || + mask->hdr.cksum || + mask->hdr.tcp_urp) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by classifier\n"); + return -rte_errno; + } + + if (mask->hdr.src_port) { + MRVL_LOG(WARNING, "tcp sport mask is ignored"); + ret = mrvl_parse_tcp_sport(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->hdr.dst_port) { + MRVL_LOG(WARNING, "tcp dport mask is ignored"); + ret = mrvl_parse_tcp_dport(spec, mask, flow); + if (ret) + goto out; + } + + return 0; +out: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Reached maximum number of fields in cls tbl key\n"); + return -rte_errno; +} + +/** + * Parse udp flow item. + * + * @param item Pointer to the flow item. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param fields Pointer to the parsed parsed fields enum. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_parse_udp(const struct rte_flow_item *item, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item_udp *spec = NULL, *mask = NULL; + int ret; + + ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask, + &rte_flow_item_ipv4_mask, + sizeof(struct rte_flow_item_ipv4), error); + if (ret) + return ret; + + if (mask->hdr.dgram_len || + mask->hdr.dgram_cksum) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by classifier\n"); + return -rte_errno; + } + + if (mask->hdr.src_port) { + MRVL_LOG(WARNING, "udp sport mask is ignored"); + ret = mrvl_parse_udp_sport(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->hdr.dst_port) { + MRVL_LOG(WARNING, "udp dport mask is ignored"); + ret = mrvl_parse_udp_dport(spec, mask, flow); + if (ret) + goto out; + } + + return 0; +out: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Reached maximum number of fields in cls tbl key\n"); + return -rte_errno; +} + +/** + * Parse flow pattern composed of the the eth item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_eth(pattern, flow, error); +} + +/** + * Parse flow pattern composed of the eth and vlan items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_vlan(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_eth(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + + return mrvl_parse_vlan(item, flow, error); +} + +/** + * Parse flow pattern composed of the eth, vlan and ip4/ip6 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param ip6 1 to parse ip6 item, 0 to parse ip4 item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_vlan_ip4_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int ip6) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_eth(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + ret = mrvl_parse_vlan(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + + return ip6 ? mrvl_parse_ip6(item, flow, error) : + mrvl_parse_ip4(item, flow, error); +} + +/** + * Parse flow pattern composed of the eth, vlan and ipv4 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_vlan_ip4(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the eth, vlan and ipv6 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_vlan_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the eth and ip4/ip6 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param ip6 1 to parse ip6 item, 0 to parse ip4 item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_ip4_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int ip6) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_eth(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + + return ip6 ? mrvl_parse_ip6(item, flow, error) : + mrvl_parse_ip4(item, flow, error); +} + +/** + * Parse flow pattern composed of the eth and ipv4 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_eth_ip4(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the eth and ipv6 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_eth_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the eth, ip4 and tcp/udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param tcp 1 to parse tcp item, 0 to parse udp item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_ip4_tcp_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int tcp) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + item = mrvl_next_item(item + 1); + + if (tcp) + return mrvl_parse_tcp(item, flow, error); + + return mrvl_parse_udp(item, flow, error); +} + +/** + * Parse flow pattern composed of the eth, ipv4 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_eth_ip4_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the eth, ipv4 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_eth_ip4_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the eth, ipv6 and tcp/udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param tcp 1 to parse tcp item, 0 to parse udp item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_ip6_tcp_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int tcp) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + item = mrvl_next_item(item + 1); + + if (tcp) + return mrvl_parse_tcp(item, flow, error); + + return mrvl_parse_udp(item, flow, error); +} + +/** + * Parse flow pattern composed of the eth, ipv6 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_eth_ip6_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the eth, ipv6 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_eth_ip6_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the vlan item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_vlan(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + + return mrvl_parse_vlan(item, flow, error); +} + +/** + * Parse flow pattern composed of the vlan and ip4/ip6 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param ip6 1 to parse ip6 item, 0 to parse ip4 item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_vlan_ip4_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int ip6) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_vlan(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + + return ip6 ? mrvl_parse_ip6(item, flow, error) : + mrvl_parse_ip4(item, flow, error); +} + +/** + * Parse flow pattern composed of the vlan and ipv4 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_vlan_ip4(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the vlan, ipv4 and tcp/udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_vlan_ip_tcp_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int tcp) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + item = mrvl_next_item(item + 1); + + if (tcp) + return mrvl_parse_tcp(item, flow, error); + + return mrvl_parse_udp(item, flow, error); +} + +/** + * Parse flow pattern composed of the vlan, ipv4 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_vlan_ip_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the vlan, ipv4 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_vlan_ip_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the vlan and ipv6 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_vlan_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the vlan, ipv6 and tcp/udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_vlan_ip6_tcp_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int tcp) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + item = mrvl_next_item(item + 1); + + if (tcp) + return mrvl_parse_tcp(item, flow, error); + + return mrvl_parse_udp(item, flow, error); +} + +/** + * Parse flow pattern composed of the vlan, ipv6 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_vlan_ip6_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the vlan, ipv6 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_vlan_ip6_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the ip4/ip6 item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param ip6 1 to parse ip6 item, 0 to parse ip4 item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_ip4_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int ip6) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + + return ip6 ? mrvl_parse_ip6(item, flow, error) : + mrvl_parse_ip4(item, flow, error); +} + +/** + * Parse flow pattern composed of the ipv4 item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_ip4(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the ipv6 item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the ip4/ip6 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param ip6 1 to parse ip6 item, 0 to parse ip4 item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_ip4_ip6_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int ip6) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = ip6 ? mrvl_parse_ip6(item, flow, error) : + mrvl_parse_ip4(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + + return mrvl_parse_tcp(item, flow, error); +} + +/** + * Parse flow pattern composed of the ipv4 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_ip4_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the ipv6 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_ip6_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the ipv4/ipv6 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_ip4_ip6_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int ip6) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = ip6 ? mrvl_parse_ip6(item, flow, error) : + mrvl_parse_ip4(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + + return mrvl_parse_udp(item, flow, error); +} + +/** + * Parse flow pattern composed of the ipv4 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_ip4_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the ipv6 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_ip6_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the tcp item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + + return mrvl_parse_tcp(item, flow, error); +} + +/** + * Parse flow pattern composed of the udp item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + + return mrvl_parse_udp(item, flow, error); +} + +/** + * Structure used to map specific flow pattern to the pattern parse callback + * which will iterate over each pattern item and extract relevant data. + */ +static const struct { + const enum rte_flow_item_type *pattern; + int (*parse)(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error); +} mrvl_patterns[] = { + { pattern_eth, mrvl_parse_pattern_eth }, + { pattern_eth_vlan, mrvl_parse_pattern_eth_vlan }, + { pattern_eth_vlan_ip, mrvl_parse_pattern_eth_vlan_ip4 }, + { pattern_eth_vlan_ip6, mrvl_parse_pattern_eth_vlan_ip6 }, + { pattern_eth_ip4, mrvl_parse_pattern_eth_ip4 }, + { pattern_eth_ip4_tcp, mrvl_parse_pattern_eth_ip4_tcp }, + { pattern_eth_ip4_udp, mrvl_parse_pattern_eth_ip4_udp }, + { pattern_eth_ip6, mrvl_parse_pattern_eth_ip6 }, + { pattern_eth_ip6_tcp, mrvl_parse_pattern_eth_ip6_tcp }, + { pattern_eth_ip6_udp, mrvl_parse_pattern_eth_ip6_udp }, + { pattern_vlan, mrvl_parse_pattern_vlan }, + { pattern_vlan_ip, mrvl_parse_pattern_vlan_ip4 }, + { pattern_vlan_ip_tcp, mrvl_parse_pattern_vlan_ip_tcp }, + { pattern_vlan_ip_udp, mrvl_parse_pattern_vlan_ip_udp }, + { pattern_vlan_ip6, mrvl_parse_pattern_vlan_ip6 }, + { pattern_vlan_ip6_tcp, mrvl_parse_pattern_vlan_ip6_tcp }, + { pattern_vlan_ip6_udp, mrvl_parse_pattern_vlan_ip6_udp }, + { pattern_ip, mrvl_parse_pattern_ip4 }, + { pattern_ip_tcp, mrvl_parse_pattern_ip4_tcp }, + { pattern_ip_udp, mrvl_parse_pattern_ip4_udp }, + { pattern_ip6, mrvl_parse_pattern_ip6 }, + { pattern_ip6_tcp, mrvl_parse_pattern_ip6_tcp }, + { pattern_ip6_udp, mrvl_parse_pattern_ip6_udp }, + { pattern_tcp, mrvl_parse_pattern_tcp }, + { pattern_udp, mrvl_parse_pattern_udp } +}; + +/** + * Check whether provided pattern matches any of the supported ones. + * + * @param type_pattern Pointer to the pattern type. + * @param item_pattern Pointer to the flow pattern. + * @returns 1 in case of success, 0 value otherwise. + */ +static int +mrvl_patterns_match(const enum rte_flow_item_type *type_pattern, + const struct rte_flow_item *item_pattern) +{ + const enum rte_flow_item_type *type = type_pattern; + const struct rte_flow_item *item = item_pattern; + + for (;;) { + if (item->type == RTE_FLOW_ITEM_TYPE_VOID) { + item++; + continue; + } + + if (*type == RTE_FLOW_ITEM_TYPE_END || + item->type == RTE_FLOW_ITEM_TYPE_END) + break; + + if (*type != item->type) + break; + + item++; + type++; + } + + return *type == item->type; +} + +/** + * Parse flow attribute. + * + * This will check whether the provided attribute's flags are supported. + * + * @param priv Unused + * @param attr Pointer to the flow attribute. + * @param flow Unused + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_parse_attr(struct mrvl_priv *priv __rte_unused, + const struct rte_flow_attr *attr, + struct rte_flow *flow __rte_unused, + struct rte_flow_error *error) +{ + if (!attr) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute"); + return -rte_errno; + } + + if (attr->group) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL, + "Groups are not supported"); + return -rte_errno; + } + if (attr->priority) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL, + "Priorities are not supported"); + return -rte_errno; + } + if (!attr->ingress) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL, + "Only ingress is supported"); + return -rte_errno; + } + if (attr->egress) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "Egress is not supported"); + return -rte_errno; + } + if (attr->transfer) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL, + "Transfer is not supported"); + return -rte_errno; + } + + return 0; +} + +/** + * Parse flow pattern. + * + * Specific classifier rule will be created as well. + * + * @param priv Unused + * @param pattern Pointer to the flow pattern. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_parse_pattern(struct mrvl_priv *priv __rte_unused, + const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + unsigned int i; + int ret; + + for (i = 0; i < RTE_DIM(mrvl_patterns); i++) { + if (!mrvl_patterns_match(mrvl_patterns[i].pattern, pattern)) + continue; + + ret = mrvl_patterns[i].parse(pattern, flow, error); + if (ret) + mrvl_free_all_key_mask(&flow->rule); + + return ret; + } + + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Unsupported pattern"); + + return -rte_errno; +} + +/** + * Parse flow actions. + * + * @param priv Pointer to the port's private data. + * @param actions Pointer the action table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_parse_actions(struct mrvl_priv *priv, + const struct rte_flow_action actions[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_action *action = actions; + int specified = 0; + + for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) { + if (action->type == RTE_FLOW_ACTION_TYPE_VOID) + continue; + + if (action->type == RTE_FLOW_ACTION_TYPE_DROP) { + flow->cos.ppio = priv->ppio; + flow->cos.tc = 0; + flow->action.type = PP2_CLS_TBL_ACT_DROP; + flow->action.cos = &flow->cos; + specified++; + } else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) { + const struct rte_flow_action_queue *q = + (const struct rte_flow_action_queue *) + action->conf; + + if (q->index > priv->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "Queue index out of range"); + return -rte_errno; + } + + if (priv->rxq_map[q->index].tc == MRVL_UNKNOWN_TC) { + /* + * Unknown TC mapping, mapping will not have + * a correct queue. + */ + MRVL_LOG(ERR, + "Unknown TC mapping for queue %hu eth%hhu", + q->index, priv->ppio_id); + + rte_flow_error_set(error, EFAULT, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, NULL); + return -rte_errno; + } + + MRVL_LOG(DEBUG, + "Action: Assign packets to queue %d, tc:%d, q:%d", + q->index, priv->rxq_map[q->index].tc, + priv->rxq_map[q->index].inq); + + flow->cos.ppio = priv->ppio; + flow->cos.tc = priv->rxq_map[q->index].tc; + flow->action.type = PP2_CLS_TBL_ACT_DONE; + flow->action.cos = &flow->cos; + specified++; + } else { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Action not supported"); + return -rte_errno; + } + + } + + if (!specified) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Action not specified"); + return -rte_errno; + } + + return 0; +} + +/** + * Parse flow attribute, pattern and actions. + * + * @param priv Pointer to the port's private data. + * @param attr Pointer to the flow attribute. + * @param pattern Pointer to the flow pattern. + * @param actions Pointer to the flow actions. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_flow_parse(struct mrvl_priv *priv, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret; + + ret = mrvl_flow_parse_attr(priv, attr, flow, error); + if (ret) + return ret; + + ret = mrvl_flow_parse_pattern(priv, pattern, flow, error); + if (ret) + return ret; + + return mrvl_flow_parse_actions(priv, actions, flow, error); +} + +static inline enum pp2_cls_tbl_type +mrvl_engine_type(const struct rte_flow *flow) +{ + int i, size = 0; + + for (i = 0; i < flow->rule.num_fields; i++) + size += flow->rule.fields[i].size; + + /* + * For maskable engine type the key size must be up to 8 bytes. + * For keys with size bigger than 8 bytes, engine type must + * be set to exact match. + */ + if (size > 8) + return PP2_CLS_TBL_EXACT_MATCH; + + return PP2_CLS_TBL_MASKABLE; +} + +static int +mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct pp2_cls_tbl_key *key = &priv->cls_tbl_params.key; + int ret; + + if (priv->cls_tbl) { + pp2_cls_tbl_deinit(priv->cls_tbl); + priv->cls_tbl = NULL; + } + + memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params)); + + priv->cls_tbl_params.type = mrvl_engine_type(first_flow); + MRVL_LOG(INFO, "Setting cls search engine type to %s", + priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ? + "exact" : "maskable"); + priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES; + priv->cls_tbl_params.default_act.type = PP2_CLS_TBL_ACT_DONE; + priv->cls_tbl_params.default_act.cos = &first_flow->cos; + + if (first_flow->pattern & F_DMAC) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH; + key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_DA; + key->key_size += 6; + key->num_fields += 1; + } + + if (first_flow->pattern & F_SMAC) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH; + key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_SA; + key->key_size += 6; + key->num_fields += 1; + } + + if (first_flow->pattern & F_TYPE) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH; + key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_TYPE; + key->key_size += 2; + key->num_fields += 1; + } + + if (first_flow->pattern & F_VLAN_ID) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN; + key->proto_field[key->num_fields].field.vlan = MV_NET_VLAN_F_ID; + key->key_size += 2; + key->num_fields += 1; + } + + if (first_flow->pattern & F_VLAN_PRI) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN; + key->proto_field[key->num_fields].field.vlan = + MV_NET_VLAN_F_PRI; + key->key_size += 1; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP4_TOS) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4; + key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_TOS; + key->key_size += 1; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP4_SIP) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4; + key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_SA; + key->key_size += 4; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP4_DIP) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4; + key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_DA; + key->key_size += 4; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP4_PROTO) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4; + key->proto_field[key->num_fields].field.ipv4 = + MV_NET_IP4_F_PROTO; + key->key_size += 1; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP6_SIP) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6; + key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_SA; + key->key_size += 16; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP6_DIP) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6; + key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_DA; + key->key_size += 16; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP6_FLOW) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6; + key->proto_field[key->num_fields].field.ipv6 = + MV_NET_IP6_F_FLOW; + key->key_size += 3; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP6_NEXT_HDR) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6; + key->proto_field[key->num_fields].field.ipv6 = + MV_NET_IP6_F_NEXT_HDR; + key->key_size += 1; + key->num_fields += 1; + } + + if (first_flow->pattern & F_TCP_SPORT) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP; + key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP; + key->key_size += 2; + key->num_fields += 1; + } + + if (first_flow->pattern & F_TCP_DPORT) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP; + key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_DP; + key->key_size += 2; + key->num_fields += 1; + } + + if (first_flow->pattern & F_UDP_SPORT) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP; + key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP; + key->key_size += 2; + key->num_fields += 1; + } + + if (first_flow->pattern & F_UDP_DPORT) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP; + key->proto_field[key->num_fields].field.udp = MV_NET_TCP_F_DP; + key->key_size += 2; + key->num_fields += 1; + } + + ret = pp2_cls_tbl_init(&priv->cls_tbl_params, &priv->cls_tbl); + if (!ret) + priv->cls_tbl_pattern = first_flow->pattern; + + return ret; +} + +/** + * Check whether new flow can be added to the table + * + * @param priv Pointer to the port's private data. + * @param flow Pointer to the new flow. + * @return 1 in case flow can be added, 0 otherwise. + */ +static inline int +mrvl_flow_can_be_added(struct mrvl_priv *priv, const struct rte_flow *flow) +{ + return flow->pattern == priv->cls_tbl_pattern && + mrvl_engine_type(flow) == priv->cls_tbl_params.type; +} + +/** + * DPDK flow create callback called when flow is to be created. + * + * @param dev Pointer to the device. + * @param attr Pointer to the flow attribute. + * @param pattern Pointer to the flow pattern. + * @param actions Pointer to the flow actions. + * @param error Pointer to the flow error. + * @returns Pointer to the created flow in case of success, NULL otherwise. + */ +static struct rte_flow * +mrvl_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct rte_flow *flow, *first; + int ret; + + if (!dev->data->dev_started) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Port must be started first\n"); + return NULL; + } + + flow = rte_zmalloc_socket(NULL, sizeof(*flow), 0, rte_socket_id()); + if (!flow) + return NULL; + + ret = mrvl_flow_parse(priv, attr, pattern, actions, flow, error); + if (ret) + goto out; + + /* + * Four cases here: + * + * 1. In case table does not exist - create one. + * 2. In case table exists, is empty and new flow cannot be added + * recreate table. + * 3. In case table is not empty and new flow matches table format + * add it. + * 4. Otherwise flow cannot be added. + */ + first = LIST_FIRST(&priv->flows); + if (!priv->cls_tbl) { + ret = mrvl_create_cls_table(dev, flow); + } else if (!first && !mrvl_flow_can_be_added(priv, flow)) { + ret = mrvl_create_cls_table(dev, flow); + } else if (mrvl_flow_can_be_added(priv, flow)) { + ret = 0; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Pattern does not match cls table format\n"); + goto out; + } + + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to create cls table\n"); + goto out; + } + + ret = pp2_cls_tbl_add_rule(priv->cls_tbl, &flow->rule, &flow->action); + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to add rule\n"); + goto out; + } + + LIST_INSERT_HEAD(&priv->flows, flow, next); + + return flow; +out: + rte_free(flow); + return NULL; +} + +/** + * Remove classifier rule associated with given flow. + * + * @param priv Pointer to the port's private data. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_remove(struct mrvl_priv *priv, struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret; + + if (!priv->cls_tbl) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Classifier table not initialized"); + return -rte_errno; + } + + ret = pp2_cls_tbl_remove_rule(priv->cls_tbl, &flow->rule); + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to remove rule"); + return -rte_errno; + } + + mrvl_free_all_key_mask(&flow->rule); + + return 0; +} + +/** + * DPDK flow destroy callback called when flow is to be removed. + * + * @param priv Pointer to the port's private data. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct rte_flow *f; + int ret; + + LIST_FOREACH(f, &priv->flows, next) { + if (f == flow) + break; + } + + if (!flow) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Rule was not found"); + return -rte_errno; + } + + LIST_REMOVE(f, next); + + ret = mrvl_flow_remove(priv, flow, error); + if (ret) + return ret; + + rte_free(flow); + + return 0; +} + +/** + * DPDK flow callback called to verify given attribute, pattern and actions. + * + * @param dev Pointer to the device. + * @param attr Pointer to the flow attribute. + * @param pattern Pointer to the flow pattern. + * @param actions Pointer to the flow actions. + * @param error Pointer to the flow error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + static struct rte_flow *flow; + + flow = mrvl_flow_create(dev, attr, pattern, actions, error); + if (!flow) + return -rte_errno; + + mrvl_flow_destroy(dev, flow, error); + + return 0; +} + +/** + * DPDK flow flush callback called when flows are to be flushed. + * + * @param dev Pointer to the device. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + while (!LIST_EMPTY(&priv->flows)) { + struct rte_flow *flow = LIST_FIRST(&priv->flows); + int ret = mrvl_flow_remove(priv, flow, error); + if (ret) + return ret; + + LIST_REMOVE(flow, next); + rte_free(flow); + } + + return 0; +} + +/** + * DPDK flow isolate callback called to isolate port. + * + * @param dev Pointer to the device. + * @param enable Pass 0/1 to disable/enable port isolation. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_isolate(struct rte_eth_dev *dev, int enable, + struct rte_flow_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + if (dev->data->dev_started) { + rte_flow_error_set(error, EBUSY, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Port must be stopped first\n"); + return -rte_errno; + } + + priv->isolated = enable; + + return 0; +} + +const struct rte_flow_ops mrvl_flow_ops = { + .validate = mrvl_flow_validate, + .create = mrvl_flow_create, + .destroy = mrvl_flow_destroy, + .flush = mrvl_flow_flush, + .isolate = mrvl_flow_isolate +}; diff --git a/drivers/net/mvpp2/mrvl_qos.c b/drivers/net/mvpp2/mrvl_qos.c new file mode 100644 index 00000000..71856c1a --- /dev/null +++ b/drivers/net/mvpp2/mrvl_qos.c @@ -0,0 +1,894 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* Unluckily, container_of is defined by both DPDK and MUSDK, + * we'll declare only one version. + * + * Note that it is not used in this PMD anyway. + */ +#ifdef container_of +#undef container_of +#endif + +#include "mrvl_qos.h" + +/* Parsing tokens. Defined conveniently, so that any correction is easy. */ +#define MRVL_TOK_DEFAULT "default" +#define MRVL_TOK_DEFAULT_TC "default_tc" +#define MRVL_TOK_DSCP "dscp" +#define MRVL_TOK_MAPPING_PRIORITY "mapping_priority" +#define MRVL_TOK_IP "ip" +#define MRVL_TOK_IP_VLAN "ip/vlan" +#define MRVL_TOK_PCP "pcp" +#define MRVL_TOK_PORT "port" +#define MRVL_TOK_RXQ "rxq" +#define MRVL_TOK_TC "tc" +#define MRVL_TOK_TXQ "txq" +#define MRVL_TOK_VLAN "vlan" +#define MRVL_TOK_VLAN_IP "vlan/ip" + +/* egress specific configuration tokens */ +#define MRVL_TOK_BURST_SIZE "burst_size" +#define MRVL_TOK_RATE_LIMIT "rate_limit" +#define MRVL_TOK_RATE_LIMIT_ENABLE "rate_limit_enable" +#define MRVL_TOK_SCHED_MODE "sched_mode" +#define MRVL_TOK_SCHED_MODE_SP "sp" +#define MRVL_TOK_SCHED_MODE_WRR "wrr" +#define MRVL_TOK_WRR_WEIGHT "wrr_weight" + +/* policer specific configuration tokens */ +#define MRVL_TOK_PLCR_ENABLE "policer_enable" +#define MRVL_TOK_PLCR_UNIT "token_unit" +#define MRVL_TOK_PLCR_UNIT_BYTES "bytes" +#define MRVL_TOK_PLCR_UNIT_PACKETS "packets" +#define MRVL_TOK_PLCR_COLOR "color_mode" +#define MRVL_TOK_PLCR_COLOR_BLIND "blind" +#define MRVL_TOK_PLCR_COLOR_AWARE "aware" +#define MRVL_TOK_PLCR_CIR "cir" +#define MRVL_TOK_PLCR_CBS "cbs" +#define MRVL_TOK_PLCR_EBS "ebs" +#define MRVL_TOK_PLCR_DEFAULT_COLOR "default_color" +#define MRVL_TOK_PLCR_DEFAULT_COLOR_GREEN "green" +#define MRVL_TOK_PLCR_DEFAULT_COLOR_YELLOW "yellow" +#define MRVL_TOK_PLCR_DEFAULT_COLOR_RED "red" + +/** Number of tokens in range a-b = 2. */ +#define MAX_RNG_TOKENS 2 + +/** Maximum possible value of PCP. */ +#define MAX_PCP 7 + +/** Maximum possible value of DSCP. */ +#define MAX_DSCP 63 + +/** Global QoS configuration. */ +struct mrvl_qos_cfg *mrvl_qos_cfg; + +/** + * Convert string to uint32_t with extra checks for result correctness. + * + * @param string String to convert. + * @param val Conversion result. + * @returns 0 in case of success, negative value otherwise. + */ +static int +get_val_securely(const char *string, uint32_t *val) +{ + char *endptr; + size_t len = strlen(string); + + if (len == 0) + return -1; + + errno = 0; + *val = strtoul(string, &endptr, 0); + if (errno != 0 || RTE_PTR_DIFF(endptr, string) != len) + return -2; + + return 0; +} + +/** + * Read out-queue configuration from file. + * + * @param file Path to the configuration file. + * @param port Port number. + * @param outq Out queue number. + * @param cfg Pointer to the Marvell QoS configuration structure. + * @returns 0 in case of success, negative value otherwise. + */ +static int +get_outq_cfg(struct rte_cfgfile *file, int port, int outq, + struct mrvl_qos_cfg *cfg) +{ + char sec_name[32]; + const char *entry; + uint32_t val; + + snprintf(sec_name, sizeof(sec_name), "%s %d %s %d", + MRVL_TOK_PORT, port, MRVL_TOK_TXQ, outq); + + /* Skip non-existing */ + if (rte_cfgfile_num_sections(file, sec_name, strlen(sec_name)) <= 0) + return 0; + + /* Read scheduling mode */ + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_SCHED_MODE); + if (entry) { + if (!strncmp(entry, MRVL_TOK_SCHED_MODE_SP, + strlen(MRVL_TOK_SCHED_MODE_SP))) { + cfg->port[port].outq[outq].sched_mode = + PP2_PPIO_SCHED_M_SP; + } else if (!strncmp(entry, MRVL_TOK_SCHED_MODE_WRR, + strlen(MRVL_TOK_SCHED_MODE_WRR))) { + cfg->port[port].outq[outq].sched_mode = + PP2_PPIO_SCHED_M_WRR; + } else { + MRVL_LOG(ERR, "Unknown token: %s", entry); + return -1; + } + } + + /* Read wrr weight */ + if (cfg->port[port].outq[outq].sched_mode == PP2_PPIO_SCHED_M_WRR) { + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_WRR_WEIGHT); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + cfg->port[port].outq[outq].weight = val; + } + } + + /* + * There's no point in setting rate limiting for specific outq as + * global port rate limiting has priority. + */ + if (cfg->port[port].rate_limit_enable) { + MRVL_LOG(WARNING, "Port %d rate limiting already enabled", + port); + return 0; + } + + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_RATE_LIMIT_ENABLE); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + cfg->port[port].outq[outq].rate_limit_enable = val; + } + + if (!cfg->port[port].outq[outq].rate_limit_enable) + return 0; + + /* Read CBS (in kB) */ + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_BURST_SIZE); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + cfg->port[port].outq[outq].rate_limit_params.cbs = val; + } + + /* Read CIR (in kbps) */ + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_RATE_LIMIT); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + cfg->port[port].outq[outq].rate_limit_params.cir = val; + } + + return 0; +} + +/** + * Gets multiple-entry values and places them in table. + * + * Entry can be anything, e.g. "1 2-3 5 6 7-9". This needs to be converted to + * table entries, respectively: {1, 2, 3, 5, 6, 7, 8, 9}. + * As all result table's elements are always 1-byte long, we + * won't overcomplicate the function, but we'll keep API generic, + * check if someone hasn't changed element size and make it simple + * to extend to other sizes. + * + * This function is purely utilitary, it does not print any error, only returns + * different error numbers. + * + * @param entry[in] Values string to parse. + * @param tab[out] Results table. + * @param elem_sz[in] Element size (in bytes). + * @param max_elems[in] Number of results table elements available. + * @param max val[in] Maximum value allowed. + * @returns Number of correctly parsed elements in case of success. + * @retval -1 Wrong element size. + * @retval -2 More tokens than result table allows. + * @retval -3 Wrong range syntax. + * @retval -4 Wrong range values. + * @retval -5 Maximum value exceeded. + */ +static int +get_entry_values(const char *entry, uint8_t *tab, + size_t elem_sz, uint8_t max_elems, uint8_t max_val) +{ + /* There should not be more tokens than max elements. + * Add 1 for error trap. + */ + char *tokens[max_elems + 1]; + + /* Begin, End + error trap = 3. */ + char *rng_tokens[MAX_RNG_TOKENS + 1]; + long beg, end; + uint32_t token_val; + int nb_tokens, nb_rng_tokens; + int i; + int values = 0; + char val; + char entry_cpy[CFG_VALUE_LEN]; + + if (elem_sz != 1) + return -1; + + /* Copy the entry to safely use rte_strsplit(). */ + strlcpy(entry_cpy, entry, RTE_DIM(entry_cpy)); + + /* + * If there are more tokens than array size, rte_strsplit will + * not return error, just array size. + */ + nb_tokens = rte_strsplit(entry_cpy, strlen(entry_cpy), + tokens, max_elems + 1, ' '); + + /* Quick check, will be refined later. */ + if (nb_tokens > max_elems) + return -2; + + for (i = 0; i < nb_tokens; ++i) { + if (strchr(tokens[i], '-') != NULL) { + /* + * Split to begin and end tokens. + * We want to catch error cases too, thus we leave + * option for number of tokens to be more than 2. + */ + nb_rng_tokens = rte_strsplit(tokens[i], + strlen(tokens[i]), rng_tokens, + RTE_DIM(rng_tokens), '-'); + if (nb_rng_tokens != 2) + return -3; + + /* Range and sanity checks. */ + if (get_val_securely(rng_tokens[0], &token_val) < 0) + return -4; + beg = (char)token_val; + if (get_val_securely(rng_tokens[1], &token_val) < 0) + return -4; + end = (char)token_val; + if (beg < 0 || beg > UCHAR_MAX || + end < 0 || end > UCHAR_MAX || end < beg) + return -4; + + for (val = beg; val <= end; ++val) { + if (val > max_val) + return -5; + + *tab = val; + tab = RTE_PTR_ADD(tab, elem_sz); + ++values; + if (values >= max_elems) + return -2; + } + } else { + /* Single values. */ + if (get_val_securely(tokens[i], &token_val) < 0) + return -5; + val = (char)token_val; + if (val > max_val) + return -5; + + *tab = val; + tab = RTE_PTR_ADD(tab, elem_sz); + ++values; + if (values >= max_elems) + return -2; + } + } + + return values; +} + +/** + * Parse Traffic Class'es mapping configuration. + * + * @param file Config file handle. + * @param port Which port to look for. + * @param tc Which Traffic Class to look for. + * @param cfg[out] Parsing results. + * @returns 0 in case of success, negative value otherwise. + */ +static int +parse_tc_cfg(struct rte_cfgfile *file, int port, int tc, + struct mrvl_qos_cfg *cfg) +{ + char sec_name[32]; + const char *entry; + int n; + + snprintf(sec_name, sizeof(sec_name), "%s %d %s %d", + MRVL_TOK_PORT, port, MRVL_TOK_TC, tc); + + /* Skip non-existing */ + if (rte_cfgfile_num_sections(file, sec_name, strlen(sec_name)) <= 0) + return 0; + + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_RXQ); + if (entry) { + n = get_entry_values(entry, + cfg->port[port].tc[tc].inq, + sizeof(cfg->port[port].tc[tc].inq[0]), + RTE_DIM(cfg->port[port].tc[tc].inq), + MRVL_PP2_RXQ_MAX); + if (n < 0) { + MRVL_LOG(ERR, "Error %d while parsing: %s", + n, entry); + return n; + } + cfg->port[port].tc[tc].inqs = n; + } + + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_PCP); + if (entry) { + n = get_entry_values(entry, + cfg->port[port].tc[tc].pcp, + sizeof(cfg->port[port].tc[tc].pcp[0]), + RTE_DIM(cfg->port[port].tc[tc].pcp), + MAX_PCP); + if (n < 0) { + MRVL_LOG(ERR, "Error %d while parsing: %s", + n, entry); + return n; + } + cfg->port[port].tc[tc].pcps = n; + } + + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_DSCP); + if (entry) { + n = get_entry_values(entry, + cfg->port[port].tc[tc].dscp, + sizeof(cfg->port[port].tc[tc].dscp[0]), + RTE_DIM(cfg->port[port].tc[tc].dscp), + MAX_DSCP); + if (n < 0) { + MRVL_LOG(ERR, "Error %d while parsing: %s", + n, entry); + return n; + } + cfg->port[port].tc[tc].dscps = n; + } + + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_PLCR_DEFAULT_COLOR); + if (entry) { + if (!strncmp(entry, MRVL_TOK_PLCR_DEFAULT_COLOR_GREEN, + sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_GREEN))) { + cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_GREEN; + } else if (!strncmp(entry, MRVL_TOK_PLCR_DEFAULT_COLOR_YELLOW, + sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_YELLOW))) { + cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_YELLOW; + } else if (!strncmp(entry, MRVL_TOK_PLCR_DEFAULT_COLOR_RED, + sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_RED))) { + cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_RED; + } else { + MRVL_LOG(ERR, "Error while parsing: %s", entry); + return -1; + } + } + + return 0; +} + +/** + * Parse QoS configuration - rte_kvargs_process handler. + * + * Opens configuration file and parses its content. + * + * @param key Unused. + * @param path Path to config file. + * @param extra_args Pointer to configuration structure. + * @returns 0 in case of success, exits otherwise. + */ +int +mrvl_get_qoscfg(const char *key __rte_unused, const char *path, + void *extra_args) +{ + struct mrvl_qos_cfg **cfg = extra_args; + struct rte_cfgfile *file = rte_cfgfile_load(path, 0); + uint32_t val; + int n, i, ret; + const char *entry; + char sec_name[32]; + + if (file == NULL) + rte_exit(EXIT_FAILURE, "Cannot load configuration %s\n", path); + + /* Create configuration. This is never accessed on the fast path, + * so we can ignore socket. + */ + *cfg = rte_zmalloc("mrvl_qos_cfg", sizeof(struct mrvl_qos_cfg), 0); + if (*cfg == NULL) + rte_exit(EXIT_FAILURE, "Cannot allocate configuration %s\n", + path); + + n = rte_cfgfile_num_sections(file, MRVL_TOK_PORT, + sizeof(MRVL_TOK_PORT) - 1); + + if (n == 0) { + /* This is weird, but not bad. */ + MRVL_LOG(WARNING, "Empty configuration file?"); + return 0; + } + + /* Use the number of ports given as vdev parameters. */ + for (n = 0; n < (PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC); ++n) { + snprintf(sec_name, sizeof(sec_name), "%s %d %s", + MRVL_TOK_PORT, n, MRVL_TOK_DEFAULT); + + /* Skip ports non-existing in configuration. */ + if (rte_cfgfile_num_sections(file, sec_name, + strlen(sec_name)) <= 0) { + (*cfg)->port[n].use_global_defaults = 1; + (*cfg)->port[n].mapping_priority = + PP2_CLS_QOS_TBL_VLAN_IP_PRI; + continue; + } + + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_DEFAULT_TC); + if (entry) { + if (get_val_securely(entry, &val) < 0 || + val > USHRT_MAX) + return -1; + (*cfg)->port[n].default_tc = (uint8_t)val; + } else { + MRVL_LOG(ERR, + "Default Traffic Class required in custom configuration!"); + return -1; + } + + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_PLCR_ENABLE); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + (*cfg)->port[n].policer_enable = val; + } + + if ((*cfg)->port[n].policer_enable) { + enum pp2_cls_plcr_token_unit unit; + + /* Read policer token unit */ + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_PLCR_UNIT); + if (entry) { + if (!strncmp(entry, MRVL_TOK_PLCR_UNIT_BYTES, + sizeof(MRVL_TOK_PLCR_UNIT_BYTES))) { + unit = PP2_CLS_PLCR_BYTES_TOKEN_UNIT; + } else if (!strncmp(entry, + MRVL_TOK_PLCR_UNIT_PACKETS, + sizeof(MRVL_TOK_PLCR_UNIT_PACKETS))) { + unit = PP2_CLS_PLCR_PACKETS_TOKEN_UNIT; + } else { + MRVL_LOG(ERR, "Unknown token: %s", + entry); + return -1; + } + (*cfg)->port[n].policer_params.token_unit = + unit; + } + + /* Read policer color mode */ + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_PLCR_COLOR); + if (entry) { + enum pp2_cls_plcr_color_mode mode; + + if (!strncmp(entry, MRVL_TOK_PLCR_COLOR_BLIND, + sizeof(MRVL_TOK_PLCR_COLOR_BLIND))) { + mode = PP2_CLS_PLCR_COLOR_BLIND_MODE; + } else if (!strncmp(entry, + MRVL_TOK_PLCR_COLOR_AWARE, + sizeof(MRVL_TOK_PLCR_COLOR_AWARE))) { + mode = PP2_CLS_PLCR_COLOR_AWARE_MODE; + } else { + MRVL_LOG(ERR, + "Error in parsing: %s", + entry); + return -1; + } + (*cfg)->port[n].policer_params.color_mode = + mode; + } + + /* Read policer cir */ + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_PLCR_CIR); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + (*cfg)->port[n].policer_params.cir = val; + } + + /* Read policer cbs */ + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_PLCR_CBS); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + (*cfg)->port[n].policer_params.cbs = val; + } + + /* Read policer ebs */ + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_PLCR_EBS); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + (*cfg)->port[n].policer_params.ebs = val; + } + } + + /* + * Read per-port rate limiting. Setting that will + * disable per-queue rate limiting. + */ + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_RATE_LIMIT_ENABLE); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + (*cfg)->port[n].rate_limit_enable = val; + } + + if ((*cfg)->port[n].rate_limit_enable) { + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_BURST_SIZE); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + (*cfg)->port[n].rate_limit_params.cbs = val; + } + + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_RATE_LIMIT); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + (*cfg)->port[n].rate_limit_params.cir = val; + } + } + + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_MAPPING_PRIORITY); + if (entry) { + if (!strncmp(entry, MRVL_TOK_VLAN_IP, + sizeof(MRVL_TOK_VLAN_IP))) + (*cfg)->port[n].mapping_priority = + PP2_CLS_QOS_TBL_VLAN_IP_PRI; + else if (!strncmp(entry, MRVL_TOK_IP_VLAN, + sizeof(MRVL_TOK_IP_VLAN))) + (*cfg)->port[n].mapping_priority = + PP2_CLS_QOS_TBL_IP_VLAN_PRI; + else if (!strncmp(entry, MRVL_TOK_IP, + sizeof(MRVL_TOK_IP))) + (*cfg)->port[n].mapping_priority = + PP2_CLS_QOS_TBL_IP_PRI; + else if (!strncmp(entry, MRVL_TOK_VLAN, + sizeof(MRVL_TOK_VLAN))) + (*cfg)->port[n].mapping_priority = + PP2_CLS_QOS_TBL_VLAN_PRI; + else + rte_exit(EXIT_FAILURE, + "Error in parsing %s value (%s)!\n", + MRVL_TOK_MAPPING_PRIORITY, entry); + } else { + (*cfg)->port[n].mapping_priority = + PP2_CLS_QOS_TBL_VLAN_IP_PRI; + } + + for (i = 0; i < MRVL_PP2_RXQ_MAX; ++i) { + ret = get_outq_cfg(file, n, i, *cfg); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "Error %d parsing port %d outq %d!\n", + ret, n, i); + } + + for (i = 0; i < MRVL_PP2_TC_MAX; ++i) { + ret = parse_tc_cfg(file, n, i, *cfg); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "Error %d parsing port %d tc %d!\n", + ret, n, i); + } + } + + return 0; +} + +/** + * Setup Traffic Class. + * + * Fill in TC parameters in single MUSDK TC config entry. + * @param param TC parameters entry. + * @param inqs Number of MUSDK in-queues in this TC. + * @param bpool Bpool for this TC. + * @param color Default color for this TC. + * @returns 0 in case of success, exits otherwise. + */ +static int +setup_tc(struct pp2_ppio_tc_params *param, uint8_t inqs, + struct pp2_bpool *bpool, enum pp2_ppio_color color) +{ + struct pp2_ppio_inq_params *inq_params; + + param->pkt_offset = MRVL_PKT_OFFS; + param->pools[0] = bpool; + param->default_color = color; + + inq_params = rte_zmalloc_socket("inq_params", + inqs * sizeof(*inq_params), + 0, rte_socket_id()); + if (!inq_params) + return -ENOMEM; + + param->num_in_qs = inqs; + + /* Release old config if necessary. */ + if (param->inqs_params) + rte_free(param->inqs_params); + + param->inqs_params = inq_params; + + return 0; +} + +/** + * Setup ingress policer. + * + * @param priv Port's private data. + * @param params Pointer to the policer's configuration. + * @returns 0 in case of success, negative values otherwise. + */ +static int +setup_policer(struct mrvl_priv *priv, struct pp2_cls_plcr_params *params) +{ + char match[16]; + int ret; + + snprintf(match, sizeof(match), "policer-%d:%d\n", + priv->pp_id, priv->ppio_id); + params->match = match; + + ret = pp2_cls_plcr_init(params, &priv->policer); + if (ret) { + MRVL_LOG(ERR, "Failed to setup %s", match); + return -1; + } + + priv->ppio_params.inqs_params.plcr = priv->policer; + + return 0; +} + +/** + * Configure RX Queues in a given port. + * + * Sets up RX queues, their Traffic Classes and DPDK rxq->(TC,inq) mapping. + * + * @param priv Port's private data + * @param portid DPDK port ID + * @param max_queues Maximum number of queues to configure. + * @returns 0 in case of success, negative value otherwise. + */ +int +mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid, + uint16_t max_queues) +{ + size_t i, tc; + + if (mrvl_qos_cfg == NULL || + mrvl_qos_cfg->port[portid].use_global_defaults) { + /* + * No port configuration, use default: 1 TC, no QoS, + * TC color set to green. + */ + priv->ppio_params.inqs_params.num_tcs = 1; + setup_tc(&priv->ppio_params.inqs_params.tcs_params[0], + max_queues, priv->bpool, PP2_PPIO_COLOR_GREEN); + + /* Direct mapping of queues i.e. 0->0, 1->1 etc. */ + for (i = 0; i < max_queues; ++i) { + priv->rxq_map[i].tc = 0; + priv->rxq_map[i].inq = i; + } + return 0; + } + + /* We need only a subset of configuration. */ + struct port_cfg *port_cfg = &mrvl_qos_cfg->port[portid]; + + priv->qos_tbl_params.type = port_cfg->mapping_priority; + + /* + * We need to reverse mapping, from tc->pcp (better from usability + * point of view) to pcp->tc (configurable in MUSDK). + * First, set all map elements to "default". + */ + for (i = 0; i < RTE_DIM(priv->qos_tbl_params.pcp_cos_map); ++i) + priv->qos_tbl_params.pcp_cos_map[i].tc = port_cfg->default_tc; + + /* Then, fill in all known values. */ + for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) { + if (port_cfg->tc[tc].pcps > RTE_DIM(port_cfg->tc[0].pcp)) { + /* Better safe than sorry. */ + MRVL_LOG(ERR, + "Too many PCPs configured in TC %zu!", tc); + return -1; + } + for (i = 0; i < port_cfg->tc[tc].pcps; ++i) { + priv->qos_tbl_params.pcp_cos_map[ + port_cfg->tc[tc].pcp[i]].tc = tc; + } + } + + /* + * The same logic goes with DSCP. + * First, set all map elements to "default". + */ + for (i = 0; i < RTE_DIM(priv->qos_tbl_params.dscp_cos_map); ++i) + priv->qos_tbl_params.dscp_cos_map[i].tc = + port_cfg->default_tc; + + /* Fill in all known values. */ + for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) { + if (port_cfg->tc[tc].dscps > RTE_DIM(port_cfg->tc[0].dscp)) { + /* Better safe than sorry. */ + MRVL_LOG(ERR, + "Too many DSCPs configured in TC %zu!", tc); + return -1; + } + for (i = 0; i < port_cfg->tc[tc].dscps; ++i) { + priv->qos_tbl_params.dscp_cos_map[ + port_cfg->tc[tc].dscp[i]].tc = tc; + } + } + + /* + * Surprisingly, similar logic goes with queue mapping. + * We need only to store qid->tc mapping, + * to know TC when queue is read. + */ + for (i = 0; i < RTE_DIM(priv->rxq_map); ++i) + priv->rxq_map[i].tc = MRVL_UNKNOWN_TC; + + /* Set up DPDKq->(TC,inq) mapping. */ + for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) { + if (port_cfg->tc[tc].inqs > RTE_DIM(port_cfg->tc[0].inq)) { + /* Overflow. */ + MRVL_LOG(ERR, + "Too many RX queues configured per TC %zu!", + tc); + return -1; + } + for (i = 0; i < port_cfg->tc[tc].inqs; ++i) { + uint8_t idx = port_cfg->tc[tc].inq[i]; + + if (idx > RTE_DIM(priv->rxq_map)) { + MRVL_LOG(ERR, "Bad queue index %d!", idx); + return -1; + } + + priv->rxq_map[idx].tc = tc; + priv->rxq_map[idx].inq = i; + } + } + + /* + * Set up TC configuration. TCs need to be sequenced: 0, 1, 2 + * with no gaps. Empty TC means end of processing. + */ + for (i = 0; i < MRVL_PP2_TC_MAX; ++i) { + if (port_cfg->tc[i].inqs == 0) + break; + setup_tc(&priv->ppio_params.inqs_params.tcs_params[i], + port_cfg->tc[i].inqs, + priv->bpool, port_cfg->tc[i].color); + } + + priv->ppio_params.inqs_params.num_tcs = i; + + if (port_cfg->policer_enable) + return setup_policer(priv, &port_cfg->policer_params); + + return 0; +} + +/** + * Configure TX Queues in a given port. + * + * Sets up TX queues egress scheduler and limiter. + * + * @param priv Port's private data + * @param portid DPDK port ID + * @param max_queues Maximum number of queues to configure. + * @returns 0 in case of success, negative value otherwise. + */ +int +mrvl_configure_txqs(struct mrvl_priv *priv, uint16_t portid, + uint16_t max_queues) +{ + /* We need only a subset of configuration. */ + struct port_cfg *port_cfg = &mrvl_qos_cfg->port[portid]; + int i; + + if (mrvl_qos_cfg == NULL) + return 0; + + priv->ppio_params.rate_limit_enable = port_cfg->rate_limit_enable; + if (port_cfg->rate_limit_enable) + priv->ppio_params.rate_limit_params = + port_cfg->rate_limit_params; + + for (i = 0; i < max_queues; i++) { + struct pp2_ppio_outq_params *params = + &priv->ppio_params.outqs_params.outqs_params[i]; + + params->sched_mode = port_cfg->outq[i].sched_mode; + params->weight = port_cfg->outq[i].weight; + params->rate_limit_enable = port_cfg->outq[i].rate_limit_enable; + params->rate_limit_params = port_cfg->outq[i].rate_limit_params; + } + + return 0; +} + +/** + * Start QoS mapping. + * + * Finalize QoS table configuration and initialize it in SDK. It can be done + * only after port is started, so we have a valid ppio reference. + * + * @param priv Port's private (configuration) data. + * @returns 0 in case of success, exits otherwise. + */ +int +mrvl_start_qos_mapping(struct mrvl_priv *priv) +{ + size_t i; + + if (priv->ppio == NULL) { + MRVL_LOG(ERR, "ppio must not be NULL here!"); + return -1; + } + + for (i = 0; i < RTE_DIM(priv->qos_tbl_params.pcp_cos_map); ++i) + priv->qos_tbl_params.pcp_cos_map[i].ppio = priv->ppio; + + for (i = 0; i < RTE_DIM(priv->qos_tbl_params.dscp_cos_map); ++i) + priv->qos_tbl_params.dscp_cos_map[i].ppio = priv->ppio; + + /* Initialize Classifier QoS table. */ + + return pp2_cls_qos_tbl_init(&priv->qos_tbl_params, &priv->qos_tbl); +} diff --git a/drivers/net/mvpp2/mrvl_qos.h b/drivers/net/mvpp2/mrvl_qos.h new file mode 100644 index 00000000..fa9ddecb --- /dev/null +++ b/drivers/net/mvpp2/mrvl_qos.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#ifndef _MRVL_QOS_H_ +#define _MRVL_QOS_H_ + +#include + +#include "mrvl_ethdev.h" + +/** Code Points per Traffic Class. Equals max(DSCP, PCP). */ +#define MRVL_CP_PER_TC (64) + +/** Value used as "unknown". */ +#define MRVL_UNKNOWN_TC (0xFF) + +/* QoS config. */ +struct mrvl_qos_cfg { + struct port_cfg { + int rate_limit_enable; + struct pp2_ppio_rate_limit_params rate_limit_params; + struct { + uint8_t inq[MRVL_PP2_RXQ_MAX]; + uint8_t dscp[MRVL_CP_PER_TC]; + uint8_t pcp[MRVL_CP_PER_TC]; + uint8_t inqs; + uint8_t dscps; + uint8_t pcps; + enum pp2_ppio_color color; + } tc[MRVL_PP2_TC_MAX]; + struct { + enum pp2_ppio_outq_sched_mode sched_mode; + uint8_t weight; + int rate_limit_enable; + struct pp2_ppio_rate_limit_params rate_limit_params; + } outq[MRVL_PP2_RXQ_MAX]; + enum pp2_cls_qos_tbl_type mapping_priority; + uint16_t inqs; + uint16_t outqs; + uint8_t default_tc; + uint8_t use_global_defaults; + struct pp2_cls_plcr_params policer_params; + uint8_t policer_enable; + } port[RTE_MAX_ETHPORTS]; +}; + +/** Global QoS configuration. */ +extern struct mrvl_qos_cfg *mrvl_qos_cfg; + +/** + * Parse QoS configuration - rte_kvargs_process handler. + * + * Opens configuration file and parses its content. + * + * @param key Unused. + * @param path Path to config file. + * @param extra_args Pointer to configuration structure. + * @returns 0 in case of success, exits otherwise. + */ +int +mrvl_get_qoscfg(const char *key __rte_unused, const char *path, + void *extra_args); + +/** + * Configure RX Queues in a given port. + * + * Sets up RX queues, their Traffic Classes and DPDK rxq->(TC,inq) mapping. + * + * @param priv Port's private data + * @param portid DPDK port ID + * @param max_queues Maximum number of queues to configure. + * @returns 0 in case of success, negative value otherwise. + */ +int +mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid, + uint16_t max_queues); + +/** + * Configure TX Queues in a given port. + * + * Sets up TX queues egress scheduler and limiter. + * + * @param priv Port's private data + * @param portid DPDK port ID + * @param max_queues Maximum number of queues to configure. + * @returns 0 in case of success, negative value otherwise. + */ +int +mrvl_configure_txqs(struct mrvl_priv *priv, uint16_t portid, + uint16_t max_queues); + +/** + * Start QoS mapping. + * + * Finalize QoS table configuration and initialize it in SDK. It can be done + * only after port is started, so we have a valid ppio reference. + * + * @param priv Port's private (configuration) data. + * @returns 0 in case of success, exits otherwise. + */ +int +mrvl_start_qos_mapping(struct mrvl_priv *priv); + +#endif /* _MRVL_QOS_H_ */ diff --git a/drivers/net/mvpp2/rte_pmd_mvpp2_version.map b/drivers/net/mvpp2/rte_pmd_mvpp2_version.map new file mode 100644 index 00000000..a7530317 --- /dev/null +++ b/drivers/net/mvpp2/rte_pmd_mvpp2_version.map @@ -0,0 +1,3 @@ +DPDK_17.11 { + local: *; +}; diff --git a/drivers/net/netvsc/Makefile b/drivers/net/netvsc/Makefile new file mode 100644 index 00000000..3c713af3 --- /dev/null +++ b/drivers/net/netvsc/Makefile @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: BSD-3-Clause + +include $(RTE_SDK)/mk/rte.vars.mk + +LIB = librte_pmd_netvsc.a + +CFLAGS += -O3 $(WERROR_FLAGS) +CFLAGS += -DALLOW_EXPERIMENTAL_API + +EXPORT_MAP := rte_pmd_netvsc_version.map + +LIBABIVER := 1 + +SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_rndis.c +SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_nvs.c + +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_vmbus + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c new file mode 100644 index 00000000..78b842ba --- /dev/null +++ b/drivers/net/netvsc/hn_ethdev.c @@ -0,0 +1,761 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2018 Microsoft Corporation + * Copyright(c) 2013-2016 Brocade Communications Systems, Inc. + * All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hn_logs.h" +#include "hn_var.h" +#include "hn_rndis.h" +#include "hn_nvs.h" +#include "ndis.h" + +#define HN_TX_OFFLOAD_CAPS (DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_TSO | \ + DEV_TX_OFFLOAD_MULTI_SEGS | \ + DEV_TX_OFFLOAD_VLAN_INSERT) + +#define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \ + DEV_RX_OFFLOAD_VLAN_STRIP | \ + DEV_RX_OFFLOAD_CRC_STRIP) + +int hn_logtype_init; +int hn_logtype_driver; + +struct hn_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned int offset; +}; + +static const struct hn_xstats_name_off hn_stat_strings[] = { + { "good_packets", offsetof(struct hn_stats, packets) }, + { "good_bytes", offsetof(struct hn_stats, bytes) }, + { "errors", offsetof(struct hn_stats, errors) }, + { "allocation_failed", offsetof(struct hn_stats, nomemory) }, + { "multicast_packets", offsetof(struct hn_stats, multicast) }, + { "broadcast_packets", offsetof(struct hn_stats, broadcast) }, + { "undersize_packets", offsetof(struct hn_stats, size_bins[0]) }, + { "size_64_packets", offsetof(struct hn_stats, size_bins[1]) }, + { "size_65_127_packets", offsetof(struct hn_stats, size_bins[2]) }, + { "size_128_255_packets", offsetof(struct hn_stats, size_bins[3]) }, + { "size_256_511_packets", offsetof(struct hn_stats, size_bins[4]) }, + { "size_512_1023_packets", offsetof(struct hn_stats, size_bins[5]) }, + { "size_1024_1518_packets", offsetof(struct hn_stats, size_bins[6]) }, + { "size_1519_max_packets", offsetof(struct hn_stats, size_bins[7]) }, +}; + +static struct rte_eth_dev * +eth_dev_vmbus_allocate(struct rte_vmbus_device *dev, size_t private_data_size) +{ + struct rte_eth_dev *eth_dev; + const char *name; + + if (!dev) + return NULL; + + name = dev->device.name; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + eth_dev = rte_eth_dev_allocate(name); + if (!eth_dev) { + PMD_DRV_LOG(NOTICE, "can not allocate rte ethdev"); + return NULL; + } + + if (private_data_size) { + eth_dev->data->dev_private = + rte_zmalloc_socket(name, private_data_size, + RTE_CACHE_LINE_SIZE, dev->device.numa_node); + if (!eth_dev->data->dev_private) { + PMD_DRV_LOG(NOTICE, "can not allocate driver data"); + rte_eth_dev_release_port(eth_dev); + return NULL; + } + } + } else { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + PMD_DRV_LOG(NOTICE, "can not attach secondary"); + return NULL; + } + } + + eth_dev->device = &dev->device; + eth_dev->intr_handle = &dev->intr_handle; + + return eth_dev; +} + +static void +eth_dev_vmbus_release(struct rte_eth_dev *eth_dev) +{ + /* free ether device */ + rte_eth_dev_release_port(eth_dev); + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + rte_free(eth_dev->data->dev_private); + + eth_dev->data->dev_private = NULL; + + /* + * Secondary process will check the name to attach. + * Clear this field to avoid attaching a released ports. + */ + eth_dev->data->name[0] = '\0'; + + eth_dev->device = NULL; + eth_dev->intr_handle = NULL; +} + +/* Update link status. + * Note: the DPDK definition of "wait_to_complete" + * means block this call until link is up. + * which is not worth supporting. + */ +static int +hn_dev_link_update(struct rte_eth_dev *dev, + __rte_unused int wait_to_complete) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_link link, old; + int error; + + old = dev->data->dev_link; + + error = hn_rndis_get_linkstatus(hv); + if (error) + return error; + + hn_rndis_get_linkspeed(hv); + + link = (struct rte_eth_link) { + .link_duplex = ETH_LINK_FULL_DUPLEX, + .link_autoneg = ETH_LINK_SPEED_FIXED, + .link_speed = hv->link_speed / 10000, + }; + + if (hv->link_status == NDIS_MEDIA_STATE_CONNECTED) + link.link_status = ETH_LINK_UP; + else + link.link_status = ETH_LINK_DOWN; + + if (old.link_status == link.link_status) + return 0; + + PMD_INIT_LOG(DEBUG, "Port %d is %s", dev->data->port_id, + (link.link_status == ETH_LINK_UP) ? "up" : "down"); + + return rte_eth_linkstatus_set(dev, &link); +} + +static void hn_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct hn_data *hv = dev->data->dev_private; + + dev_info->speed_capa = ETH_LINK_SPEED_10G; + dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE; + dev_info->max_rx_pktlen = HN_MAX_XFER_LEN; + dev_info->max_mac_addrs = 1; + + dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ; + dev_info->flow_type_rss_offloads = + ETH_RSS_IPV4 | ETH_RSS_IPV6 | ETH_RSS_TCP | ETH_RSS_UDP; + + dev_info->max_rx_queues = hv->max_queues; + dev_info->max_tx_queues = hv->max_queues; + + hn_rndis_get_offload(hv, dev_info); +} + +static void +hn_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + + hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_PROMISCUOUS); +} + +static void +hn_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + uint32_t filter; + + filter = NDIS_PACKET_TYPE_DIRECTED | NDIS_PACKET_TYPE_BROADCAST; + if (dev->data->all_multicast) + filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; + hn_rndis_set_rxfilter(hv, filter); +} + +static void +hn_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + + hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED | + NDIS_PACKET_TYPE_ALL_MULTICAST | + NDIS_PACKET_TYPE_BROADCAST); +} + +static void +hn_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + + hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED | + NDIS_PACKET_TYPE_BROADCAST); +} + +/* Setup shared rx/tx queue data */ +static int hn_subchan_configure(struct hn_data *hv, + uint32_t subchan) +{ + struct vmbus_channel *primary = hn_primary_chan(hv); + int err; + unsigned int retry = 0; + + PMD_DRV_LOG(DEBUG, + "open %u subchannels", subchan); + + /* Send create sub channels command */ + err = hn_nvs_alloc_subchans(hv, &subchan); + if (err) + return err; + + while (subchan > 0) { + struct vmbus_channel *new_sc; + uint16_t chn_index; + + err = rte_vmbus_subchan_open(primary, &new_sc); + if (err == -ENOENT && ++retry < 1000) { + /* This can happen if not ready yet */ + rte_delay_ms(10); + continue; + } + + if (err) { + PMD_DRV_LOG(ERR, + "open subchannel failed: %d", err); + return err; + } + + retry = 0; + chn_index = rte_vmbus_sub_channel_index(new_sc); + if (chn_index == 0 || chn_index > hv->max_queues) { + PMD_DRV_LOG(ERR, + "Invalid subchannel offermsg channel %u", + chn_index); + return -EIO; + } + + PMD_DRV_LOG(DEBUG, "new sub channel %u", chn_index); + hv->channels[chn_index] = new_sc; + --subchan; + } + + return err; +} + +static int hn_dev_configure(struct rte_eth_dev *dev) +{ + const struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode; + const struct rte_eth_txmode *txmode = &dev_conf->txmode; + + const struct rte_eth_rss_conf *rss_conf = + &dev_conf->rx_adv_conf.rss_conf; + struct hn_data *hv = dev->data->dev_private; + uint64_t unsupported; + int err, subchan; + + PMD_INIT_FUNC_TRACE(); + + unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS; + if (unsupported) { + PMD_DRV_LOG(NOTICE, + "unsupported TX offload: %#" PRIx64, + unsupported); + return -EINVAL; + } + + unsupported = rxmode->offloads & ~HN_RX_OFFLOAD_CAPS; + if (unsupported) { + PMD_DRV_LOG(NOTICE, + "unsupported RX offload: %#" PRIx64, + rxmode->offloads); + return -EINVAL; + } + + err = hn_rndis_conf_offload(hv, txmode->offloads, + rxmode->offloads); + if (err) { + PMD_DRV_LOG(NOTICE, + "offload configure failed"); + return err; + } + + hv->num_queues = RTE_MAX(dev->data->nb_rx_queues, + dev->data->nb_tx_queues); + subchan = hv->num_queues - 1; + if (subchan > 0) { + err = hn_subchan_configure(hv, subchan); + if (err) { + PMD_DRV_LOG(NOTICE, + "subchannel configuration failed"); + return err; + } + + err = hn_rndis_conf_rss(hv, rss_conf); + if (err) { + PMD_DRV_LOG(NOTICE, + "rss configuration failed"); + return err; + } + } + + return 0; +} + +static int hn_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + unsigned int i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + const struct hn_tx_queue *txq = dev->data->tx_queues[i]; + + if (!txq) + continue; + + stats->opackets += txq->stats.packets; + stats->obytes += txq->stats.bytes; + stats->oerrors += txq->stats.errors + txq->stats.nomemory; + + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_opackets[i] = txq->stats.packets; + stats->q_obytes[i] = txq->stats.bytes; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + const struct hn_rx_queue *rxq = dev->data->rx_queues[i]; + + if (!rxq) + continue; + + stats->ipackets += rxq->stats.packets; + stats->ibytes += rxq->stats.bytes; + stats->ierrors += rxq->stats.errors; + stats->imissed += rxq->ring_full; + + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_ipackets[i] = rxq->stats.packets; + stats->q_ibytes[i] = rxq->stats.bytes; + } + } + + stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; + return 0; +} + +static void +hn_dev_stats_reset(struct rte_eth_dev *dev) +{ + unsigned int i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct hn_tx_queue *txq = dev->data->tx_queues[i]; + + if (!txq) + continue; + memset(&txq->stats, 0, sizeof(struct hn_stats)); + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct hn_rx_queue *rxq = dev->data->rx_queues[i]; + + if (!rxq) + continue; + + memset(&rxq->stats, 0, sizeof(struct hn_stats)); + rxq->ring_full = 0; + } +} + +static int +hn_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned int limit) +{ + unsigned int i, t, count = 0; + + PMD_INIT_FUNC_TRACE(); + + if (!xstats_names) + return dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings) + + dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings); + + /* Note: limit checked in rte_eth_xstats_names() */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + const struct hn_tx_queue *txq = dev->data->tx_queues[i]; + + if (!txq) + continue; + + for (t = 0; t < RTE_DIM(hn_stat_strings); t++) + snprintf(xstats_names[count++].name, + RTE_ETH_XSTATS_NAME_SIZE, + "tx_q%u_%s", i, hn_stat_strings[t].name); + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + const struct hn_rx_queue *rxq = dev->data->rx_queues[i]; + + if (!rxq) + continue; + + for (t = 0; t < RTE_DIM(hn_stat_strings); t++) + snprintf(xstats_names[count++].name, + RTE_ETH_XSTATS_NAME_SIZE, + "rx_q%u_%s", i, + hn_stat_strings[t].name); + } + + return count; +} + +static int +hn_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, + unsigned int n) +{ + unsigned int i, t, count = 0; + + const unsigned int nstats = + dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings) + + dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings); + const char *stats; + + PMD_INIT_FUNC_TRACE(); + + if (n < nstats) + return nstats; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + const struct hn_tx_queue *txq = dev->data->tx_queues[i]; + + if (!txq) + continue; + + stats = (const char *)&txq->stats; + for (t = 0; t < RTE_DIM(hn_stat_strings); t++) + xstats[count++].value = *(const uint64_t *) + (stats + hn_stat_strings[t].offset); + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + const struct hn_rx_queue *rxq = dev->data->rx_queues[i]; + + if (!rxq) + continue; + + stats = (const char *)&rxq->stats; + for (t = 0; t < RTE_DIM(hn_stat_strings); t++) + xstats[count++].value = *(const uint64_t *) + (stats + hn_stat_strings[t].offset); + } + + return count; +} + +static int +hn_dev_start(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + /* check if lsc interrupt feature is enabled */ + if (dev->data->dev_conf.intr_conf.lsc) { + PMD_DRV_LOG(ERR, "link status not supported yet"); + return -ENOTSUP; + } + + return hn_rndis_set_rxfilter(hv, + NDIS_PACKET_TYPE_BROADCAST | + NDIS_PACKET_TYPE_ALL_MULTICAST | + NDIS_PACKET_TYPE_DIRECTED); +} + +static void +hn_dev_stop(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + hn_rndis_set_rxfilter(hv, 0); +} + +static void +hn_dev_close(struct rte_eth_dev *dev __rte_unused) +{ + PMD_INIT_LOG(DEBUG, "close"); +} + +static const struct eth_dev_ops hn_eth_dev_ops = { + .dev_configure = hn_dev_configure, + .dev_start = hn_dev_start, + .dev_stop = hn_dev_stop, + .dev_close = hn_dev_close, + .dev_infos_get = hn_dev_info_get, + .txq_info_get = hn_dev_tx_queue_info, + .rxq_info_get = hn_dev_rx_queue_info, + .promiscuous_enable = hn_dev_promiscuous_enable, + .promiscuous_disable = hn_dev_promiscuous_disable, + .allmulticast_enable = hn_dev_allmulticast_enable, + .allmulticast_disable = hn_dev_allmulticast_disable, + .tx_queue_setup = hn_dev_tx_queue_setup, + .tx_queue_release = hn_dev_tx_queue_release, + .rx_queue_setup = hn_dev_rx_queue_setup, + .rx_queue_release = hn_dev_rx_queue_release, + .link_update = hn_dev_link_update, + .stats_get = hn_dev_stats_get, + .xstats_get = hn_dev_xstats_get, + .xstats_get_names = hn_dev_xstats_get_names, + .stats_reset = hn_dev_stats_reset, + .xstats_reset = hn_dev_stats_reset, +}; + +/* + * Setup connection between PMD and kernel. + */ +static int +hn_attach(struct hn_data *hv, unsigned int mtu) +{ + int error; + + /* Attach NVS */ + error = hn_nvs_attach(hv, mtu); + if (error) + goto failed_nvs; + + /* Attach RNDIS */ + error = hn_rndis_attach(hv); + if (error) + goto failed_rndis; + + /* + * NOTE: + * Under certain conditions on certain versions of Hyper-V, + * the RNDIS rxfilter is _not_ zero on the hypervisor side + * after the successful RNDIS initialization. + */ + hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_NONE); + return 0; +failed_rndis: + hn_nvs_detach(hv); +failed_nvs: + return error; +} + +static void +hn_detach(struct hn_data *hv) +{ + hn_nvs_detach(hv); + hn_rndis_detach(hv); +} + +static int +eth_hn_dev_init(struct rte_eth_dev *eth_dev) +{ + struct hn_data *hv = eth_dev->data->dev_private; + struct rte_device *device = eth_dev->device; + struct rte_vmbus_device *vmbus; + unsigned int rxr_cnt; + int err, max_chan; + + PMD_INIT_FUNC_TRACE(); + + vmbus = container_of(device, struct rte_vmbus_device, device); + eth_dev->dev_ops = &hn_eth_dev_ops; + eth_dev->tx_pkt_burst = &hn_xmit_pkts; + eth_dev->rx_pkt_burst = &hn_recv_pkts; + + /* + * for secondary processes, we don't initialize any further as primary + * has already done this work. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + /* Since Hyper-V only supports one MAC address, just use local data */ + eth_dev->data->mac_addrs = &hv->mac_addr; + + hv->vmbus = vmbus; + hv->rxbuf_res = &vmbus->resource[HV_RECV_BUF_MAP]; + hv->chim_res = &vmbus->resource[HV_SEND_BUF_MAP]; + hv->port_id = eth_dev->data->port_id; + + /* Initialize primary channel input for control operations */ + err = rte_vmbus_chan_open(vmbus, &hv->channels[0]); + if (err) + return err; + + hv->primary = hn_rx_queue_alloc(hv, 0, + eth_dev->device->numa_node); + + if (!hv->primary) + return -ENOMEM; + + err = hn_attach(hv, ETHER_MTU); + if (err) + goto failed; + + err = hn_tx_pool_init(eth_dev); + if (err) + goto failed; + + err = hn_rndis_get_eaddr(hv, hv->mac_addr.addr_bytes); + if (err) + goto failed; + + max_chan = rte_vmbus_max_channels(vmbus); + PMD_INIT_LOG(DEBUG, "VMBus max channels %d", max_chan); + if (max_chan <= 0) + goto failed; + + if (hn_rndis_query_rsscaps(hv, &rxr_cnt) != 0) + rxr_cnt = 1; + + hv->max_queues = RTE_MIN(rxr_cnt, (unsigned int)max_chan); + + return 0; + +failed: + PMD_INIT_LOG(NOTICE, "device init failed"); + + hn_detach(hv); + return err; +} + +static int +eth_hn_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct hn_data *hv = eth_dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + hn_dev_stop(eth_dev); + hn_dev_close(eth_dev); + + eth_dev->dev_ops = NULL; + eth_dev->tx_pkt_burst = NULL; + eth_dev->rx_pkt_burst = NULL; + + hn_detach(hv); + rte_vmbus_chan_close(hv->primary->chan); + rte_free(hv->primary); + + eth_dev->data->mac_addrs = NULL; + + return 0; +} + +static int eth_hn_probe(struct rte_vmbus_driver *drv __rte_unused, + struct rte_vmbus_device *dev) +{ + struct rte_eth_dev *eth_dev; + int ret; + + PMD_INIT_FUNC_TRACE(); + + eth_dev = eth_dev_vmbus_allocate(dev, sizeof(struct hn_data)); + if (!eth_dev) + return -ENOMEM; + + ret = eth_hn_dev_init(eth_dev); + if (ret) + eth_dev_vmbus_release(eth_dev); + else + rte_eth_dev_probing_finish(eth_dev); + + return ret; +} + +static int eth_hn_remove(struct rte_vmbus_device *dev) +{ + struct rte_eth_dev *eth_dev; + int ret; + + PMD_INIT_FUNC_TRACE(); + + eth_dev = rte_eth_dev_allocated(dev->device.name); + if (!eth_dev) + return -ENODEV; + + ret = eth_hn_dev_uninit(eth_dev); + if (ret) + return ret; + + eth_dev_vmbus_release(eth_dev); + return 0; +} + +/* Network device GUID */ +static const rte_uuid_t hn_net_ids[] = { + /* f8615163-df3e-46c5-913f-f2d2f965ed0e */ + RTE_UUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x913f, 0xf2d2f965ed0eULL), + { 0 } +}; + +static struct rte_vmbus_driver rte_netvsc_pmd = { + .id_table = hn_net_ids, + .probe = eth_hn_probe, + .remove = eth_hn_remove, +}; + +RTE_PMD_REGISTER_VMBUS(net_netvsc, rte_netvsc_pmd); +RTE_PMD_REGISTER_KMOD_DEP(net_netvsc, "* uio_hv_generic"); + +RTE_INIT(hn_init_log); +static void +hn_init_log(void) +{ + hn_logtype_init = rte_log_register("pmd.net.netvsc.init"); + if (hn_logtype_init >= 0) + rte_log_set_level(hn_logtype_init, RTE_LOG_NOTICE); + hn_logtype_driver = rte_log_register("pmd.net.netvsc.driver"); + if (hn_logtype_driver >= 0) + rte_log_set_level(hn_logtype_driver, RTE_LOG_NOTICE); +} diff --git a/drivers/net/netvsc/hn_logs.h b/drivers/net/netvsc/hn_logs.h new file mode 100644 index 00000000..cddadef0 --- /dev/null +++ b/drivers/net/netvsc/hn_logs.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ + +#ifndef _HN_LOGS_H_ +#define _HN_LOGS_H_ + +#include + +extern int hn_logtype_init; +extern int hn_logtype_driver; + +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, hn_logtype_init, "%s(): " fmt "\n",\ + __func__, ## args) +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") + +#ifdef RTE_LIBRTE_NETVSC_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, hn_logtype_driver, \ + "%s() rx: " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_NETVSC_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, hn_logtype_driver, \ + "%s() tx: " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while (0) +#endif + +#define PMD_DRV_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, hn_logtype_driver, "%s(): " fmt "\n", \ + __func__, ## args) + +#endif /* _HN_LOGS_H_ */ diff --git a/drivers/net/netvsc/hn_nvs.c b/drivers/net/netvsc/hn_nvs.c new file mode 100644 index 00000000..77d3b839 --- /dev/null +++ b/drivers/net/netvsc/hn_nvs.c @@ -0,0 +1,546 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2018 Microsoft Corp. + * Copyright (c) 2010-2012 Citrix Inc. + * Copyright (c) 2012 NetApp Inc. + * All rights reserved. + */ + +/* + * Network Virtualization Service. + */ + + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hn_logs.h" +#include "hn_var.h" +#include "hn_nvs.h" + +static const uint32_t hn_nvs_version[] = { + NVS_VERSION_61, + NVS_VERSION_6, + NVS_VERSION_5, + NVS_VERSION_4, + NVS_VERSION_2, + NVS_VERSION_1 +}; + +static int hn_nvs_req_send(struct hn_data *hv, + void *req, uint32_t reqlen) +{ + return rte_vmbus_chan_send(hn_primary_chan(hv), + VMBUS_CHANPKT_TYPE_INBAND, + req, reqlen, 0, + VMBUS_CHANPKT_FLAG_NONE, NULL); +} + +static int +hn_nvs_execute(struct hn_data *hv, + void *req, uint32_t reqlen, + void *resp, uint32_t resplen, + uint32_t type) +{ + struct vmbus_channel *chan = hn_primary_chan(hv); + char buffer[NVS_RESPSIZE_MAX]; + const struct hn_nvs_hdr *hdr; + uint32_t len; + int ret; + + /* Send request to ring buffer */ + ret = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_INBAND, + req, reqlen, 0, + VMBUS_CHANPKT_FLAG_RC, NULL); + + if (ret) { + PMD_DRV_LOG(ERR, "send request failed: %d", ret); + return ret; + } + + retry: + len = sizeof(buffer); + ret = rte_vmbus_chan_recv(chan, buffer, &len, NULL); + if (ret == -EAGAIN) { + rte_delay_us(HN_CHAN_INTERVAL_US); + goto retry; + } + + if (ret < 0) { + PMD_DRV_LOG(ERR, "recv response failed: %d", ret); + return ret; + } + + hdr = (struct hn_nvs_hdr *)buffer; + if (hdr->type != type) { + PMD_DRV_LOG(ERR, "unexpected NVS resp %#x, expect %#x", + hdr->type, type); + return -EINVAL; + } + + if (len < resplen) { + PMD_DRV_LOG(ERR, + "invalid NVS resp len %u (expect %u)", + len, resplen); + return -EINVAL; + } + + memcpy(resp, buffer, resplen); + + /* All pass! */ + return 0; +} + +static int +hn_nvs_doinit(struct hn_data *hv, uint32_t nvs_ver) +{ + struct hn_nvs_init init; + struct hn_nvs_init_resp resp; + uint32_t status; + int error; + + memset(&init, 0, sizeof(init)); + init.type = NVS_TYPE_INIT; + init.ver_min = nvs_ver; + init.ver_max = nvs_ver; + + error = hn_nvs_execute(hv, &init, sizeof(init), + &resp, sizeof(resp), + NVS_TYPE_INIT_RESP); + if (error) + return error; + + status = resp.status; + if (status != NVS_STATUS_OK) { + /* Not fatal, try other versions */ + PMD_INIT_LOG(DEBUG, "nvs init failed for ver 0x%x", + nvs_ver); + return -EINVAL; + } + + return 0; +} + +static int +hn_nvs_conn_rxbuf(struct hn_data *hv) +{ + struct hn_nvs_rxbuf_conn conn; + struct hn_nvs_rxbuf_connresp resp; + uint32_t status; + int error; + + /* Kernel has already setup RXBUF on primary channel. */ + + /* + * Connect RXBUF to NVS. + */ + conn.type = NVS_TYPE_RXBUF_CONN; + conn.gpadl = hv->rxbuf_res->phys_addr; + conn.sig = NVS_RXBUF_SIG; + PMD_DRV_LOG(DEBUG, "connect rxbuff va=%p gpad=%#" PRIx64, + hv->rxbuf_res->addr, + hv->rxbuf_res->phys_addr); + + error = hn_nvs_execute(hv, &conn, sizeof(conn), + &resp, sizeof(resp), + NVS_TYPE_RXBUF_CONNRESP); + if (error) { + PMD_DRV_LOG(ERR, + "exec nvs rxbuf conn failed: %d", + error); + return error; + } + + status = resp.status; + if (status != NVS_STATUS_OK) { + PMD_DRV_LOG(ERR, + "nvs rxbuf conn failed: %x", status); + return -EIO; + } + if (resp.nsect != 1) { + PMD_DRV_LOG(ERR, + "nvs rxbuf response num sections %u != 1", + resp.nsect); + return -EIO; + } + + PMD_DRV_LOG(INFO, + "receive buffer size %u count %u", + resp.nvs_sect[0].slotsz, + resp.nvs_sect[0].slotcnt); + hv->rxbuf_section_cnt = resp.nvs_sect[0].slotcnt; + + hv->rxbuf_info = rte_calloc("HN_RXBUF_INFO", hv->rxbuf_section_cnt, + sizeof(*hv->rxbuf_info), RTE_CACHE_LINE_SIZE); + if (!hv->rxbuf_info) { + PMD_DRV_LOG(ERR, + "could not allocate rxbuf info"); + return -ENOMEM; + } + + return 0; +} + +static void +hn_nvs_disconn_rxbuf(struct hn_data *hv) +{ + struct hn_nvs_rxbuf_disconn disconn; + int error; + + /* + * Disconnect RXBUF from NVS. + */ + memset(&disconn, 0, sizeof(disconn)); + disconn.type = NVS_TYPE_RXBUF_DISCONN; + disconn.sig = NVS_RXBUF_SIG; + + /* NOTE: No response. */ + error = hn_nvs_req_send(hv, &disconn, sizeof(disconn)); + if (error) { + PMD_DRV_LOG(ERR, + "send nvs rxbuf disconn failed: %d", + error); + } + + rte_free(hv->rxbuf_info); + /* + * Linger long enough for NVS to disconnect RXBUF. + */ + rte_delay_ms(200); +} + +static void +hn_nvs_disconn_chim(struct hn_data *hv) +{ + int error; + + if (hv->chim_cnt != 0) { + struct hn_nvs_chim_disconn disconn; + + /* Disconnect chimney sending buffer from NVS. */ + memset(&disconn, 0, sizeof(disconn)); + disconn.type = NVS_TYPE_CHIM_DISCONN; + disconn.sig = NVS_CHIM_SIG; + + /* NOTE: No response. */ + error = hn_nvs_req_send(hv, &disconn, sizeof(disconn)); + + if (error) { + PMD_DRV_LOG(ERR, + "send nvs chim disconn failed: %d", error); + } + + hv->chim_cnt = 0; + /* + * Linger long enough for NVS to disconnect chimney + * sending buffer. + */ + rte_delay_ms(200); + } +} + +static int +hn_nvs_conn_chim(struct hn_data *hv) +{ + struct hn_nvs_chim_conn chim; + struct hn_nvs_chim_connresp resp; + uint32_t sectsz; + unsigned long len = hv->chim_res->len; + int error; + + /* Connect chimney sending buffer to NVS */ + memset(&chim, 0, sizeof(chim)); + chim.type = NVS_TYPE_CHIM_CONN; + chim.gpadl = hv->chim_res->phys_addr; + chim.sig = NVS_CHIM_SIG; + PMD_DRV_LOG(DEBUG, "connect send buf va=%p gpad=%#" PRIx64, + hv->chim_res->addr, + hv->chim_res->phys_addr); + + error = hn_nvs_execute(hv, &chim, sizeof(chim), + &resp, sizeof(resp), + NVS_TYPE_CHIM_CONNRESP); + if (error) { + PMD_DRV_LOG(ERR, "exec nvs chim conn failed"); + goto cleanup; + } + + if (resp.status != NVS_STATUS_OK) { + PMD_DRV_LOG(ERR, "nvs chim conn failed: %x", + resp.status); + error = -EIO; + goto cleanup; + } + + sectsz = resp.sectsz; + if (sectsz == 0 || sectsz & (sizeof(uint32_t) - 1)) { + /* Can't use chimney sending buffer; done! */ + PMD_DRV_LOG(NOTICE, + "invalid chimney sending buffer section size: %u", + sectsz); + return 0; + } + + hv->chim_szmax = sectsz; + hv->chim_cnt = len / sectsz; + + PMD_DRV_LOG(INFO, "send buffer %lu section size:%u, count:%u", + len, hv->chim_szmax, hv->chim_cnt); + + if (len % hv->chim_szmax != 0) { + PMD_DRV_LOG(NOTICE, + "chimney sending sections are not properly aligned"); + } + + /* Done! */ + return 0; + +cleanup: + hn_nvs_disconn_chim(hv); + return error; +} + +/* + * Configure MTU and enable VLAN. + */ +static int +hn_nvs_conf_ndis(struct hn_data *hv, unsigned int mtu) +{ + struct hn_nvs_ndis_conf conf; + int error; + + memset(&conf, 0, sizeof(conf)); + conf.type = NVS_TYPE_NDIS_CONF; + conf.mtu = mtu + ETHER_HDR_LEN; + conf.caps = NVS_NDIS_CONF_VLAN; + + /* TODO enable SRIOV */ + //if (hv->nvs_ver >= NVS_VERSION_5) + // conf.caps |= NVS_NDIS_CONF_SRIOV; + + /* NOTE: No response. */ + error = hn_nvs_req_send(hv, &conf, sizeof(conf)); + if (error) { + PMD_DRV_LOG(ERR, + "send nvs ndis conf failed: %d", error); + return error; + } + + return 0; +} + +static int +hn_nvs_init_ndis(struct hn_data *hv) +{ + struct hn_nvs_ndis_init ndis; + int error; + + memset(&ndis, 0, sizeof(ndis)); + ndis.type = NVS_TYPE_NDIS_INIT; + ndis.ndis_major = NDIS_VERSION_MAJOR(hv->ndis_ver); + ndis.ndis_minor = NDIS_VERSION_MINOR(hv->ndis_ver); + + /* NOTE: No response. */ + error = hn_nvs_req_send(hv, &ndis, sizeof(ndis)); + if (error) + PMD_DRV_LOG(ERR, + "send nvs ndis init failed: %d", error); + + return error; +} + +static int +hn_nvs_init(struct hn_data *hv) +{ + unsigned int i; + int error; + + /* + * Find the supported NVS version and set NDIS version accordingly. + */ + for (i = 0; i < RTE_DIM(hn_nvs_version); ++i) { + error = hn_nvs_doinit(hv, hn_nvs_version[i]); + if (error) { + PMD_INIT_LOG(DEBUG, "version %#x error %d", + hn_nvs_version[i], error); + continue; + } + + hv->nvs_ver = hn_nvs_version[i]; + + /* Set NDIS version according to NVS version. */ + hv->ndis_ver = NDIS_VERSION_6_30; + if (hv->nvs_ver <= NVS_VERSION_4) + hv->ndis_ver = NDIS_VERSION_6_1; + + PMD_INIT_LOG(DEBUG, + "NVS version %#x, NDIS version %u.%u", + hv->nvs_ver, NDIS_VERSION_MAJOR(hv->ndis_ver), + NDIS_VERSION_MINOR(hv->ndis_ver)); + return 0; + } + + PMD_DRV_LOG(ERR, + "no NVS compatible version available"); + return -ENXIO; +} + +int +hn_nvs_attach(struct hn_data *hv, unsigned int mtu) +{ + int error; + + /* + * Initialize NVS. + */ + error = hn_nvs_init(hv); + if (error) + return error; + + /** Configure NDIS before initializing it. */ + if (hv->nvs_ver >= NVS_VERSION_2) { + error = hn_nvs_conf_ndis(hv, mtu); + if (error) + return error; + } + + /* + * Initialize NDIS. + */ + error = hn_nvs_init_ndis(hv); + if (error) + return error; + + /* + * Connect RXBUF. + */ + error = hn_nvs_conn_rxbuf(hv); + if (error) + return error; + + /* + * Connect chimney sending buffer. + */ + error = hn_nvs_conn_chim(hv); + if (error) { + hn_nvs_disconn_rxbuf(hv); + return error; + } + + return 0; +} + +void +hn_nvs_detach(struct hn_data *hv __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); + + /* NOTE: there are no requests to stop the NVS. */ + hn_nvs_disconn_rxbuf(hv); + hn_nvs_disconn_chim(hv); +} + +/* + * Ack the consumed RXBUF associated w/ this channel packet, + * so that this RXBUF can be recycled by the hypervisor. + */ +void +hn_nvs_ack_rxbuf(struct vmbus_channel *chan, uint64_t tid) +{ + unsigned int retries = 0; + struct hn_nvs_rndis_ack ack = { + .type = NVS_TYPE_RNDIS_ACK, + .status = NVS_STATUS_OK, + }; + int error; + + PMD_RX_LOG(DEBUG, "ack RX id %" PRIu64, tid); + + again: + error = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_COMP, + &ack, sizeof(ack), tid, + VMBUS_CHANPKT_FLAG_NONE, NULL); + + if (error == 0) + return; + + if (error == -EAGAIN) { + /* + * NOTE: + * This should _not_ happen in real world, since the + * consumption of the TX bufring from the TX path is + * controlled. + */ + PMD_RX_LOG(NOTICE, "RXBUF ack retry"); + if (++retries < 10) { + rte_delay_ms(1); + goto again; + } + } + /* RXBUF leaks! */ + PMD_DRV_LOG(ERR, "RXBUF ack failed"); +} + +int +hn_nvs_alloc_subchans(struct hn_data *hv, uint32_t *nsubch) +{ + struct hn_nvs_subch_req req; + struct hn_nvs_subch_resp resp; + int error; + + memset(&req, 0, sizeof(req)); + req.type = NVS_TYPE_SUBCH_REQ; + req.op = NVS_SUBCH_OP_ALLOC; + req.nsubch = *nsubch; + + error = hn_nvs_execute(hv, &req, sizeof(req), + &resp, sizeof(resp), + NVS_TYPE_SUBCH_RESP); + if (error) + return error; + + if (resp.status != NVS_STATUS_OK) { + PMD_INIT_LOG(ERR, + "nvs subch alloc failed: %#x", + resp.status); + return -EIO; + } + + if (resp.nsubch > *nsubch) { + PMD_INIT_LOG(NOTICE, + "%u subchans are allocated, requested %u", + resp.nsubch, *nsubch); + } + *nsubch = resp.nsubch; + + return 0; +} + +void +hn_nvs_set_datapath(struct hn_data *hv, uint32_t path) +{ + struct hn_nvs_datapath dp; + + memset(&dp, 0, sizeof(dp)); + dp.type = NVS_TYPE_SET_DATAPATH; + dp.active_path = path; + + hn_nvs_req_send(hv, &dp, sizeof(dp)); +} diff --git a/drivers/net/netvsc/hn_nvs.h b/drivers/net/netvsc/hn_nvs.h new file mode 100644 index 00000000..984a9c11 --- /dev/null +++ b/drivers/net/netvsc/hn_nvs.h @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2018 Microsoft Corp. + * All rights reserved. + */ + +/* + * The indirection table message is the largest message + * received from host, and that is 112 bytes. + */ +#define NVS_RESPSIZE_MAX 256 + +/* + * NDIS protocol version numbers + */ +#define NDIS_VERSION_6_1 0x00060001 +#define NDIS_VERSION_6_20 0x00060014 +#define NDIS_VERSION_6_30 0x0006001e +#define NDIS_VERSION_MAJOR(ver) (((ver) & 0xffff0000) >> 16) +#define NDIS_VERSION_MINOR(ver) ((ver) & 0xffff) + +/* + * NVS versions. + */ +#define NVS_VERSION_1 0x00002 +#define NVS_VERSION_2 0x30002 +#define NVS_VERSION_4 0x40000 +#define NVS_VERSION_5 0x50000 +#define NVS_VERSION_6 0x60000 +#define NVS_VERSION_61 0x60001 + +#define NVS_RXBUF_SIG 0xcafe +#define NVS_CHIM_SIG 0xface + +#define NVS_CHIM_IDX_INVALID 0xffffffff + +#define NVS_RNDIS_MTYPE_DATA 0 +#define NVS_RNDIS_MTYPE_CTRL 1 + +/* + * NVS message transacion status codes. + */ +#define NVS_STATUS_OK 1 +#define NVS_STATUS_FAILED 2 + +/* + * NVS request/response message types. + */ +#define NVS_TYPE_INIT 1 +#define NVS_TYPE_INIT_RESP 2 + +#define NVS_TYPE_NDIS_INIT 100 +#define NVS_TYPE_RXBUF_CONN 101 +#define NVS_TYPE_RXBUF_CONNRESP 102 +#define NVS_TYPE_RXBUF_DISCONN 103 +#define NVS_TYPE_CHIM_CONN 104 +#define NVS_TYPE_CHIM_CONNRESP 105 +#define NVS_TYPE_CHIM_DISCONN 106 +#define NVS_TYPE_RNDIS 107 +#define NVS_TYPE_RNDIS_ACK 108 + +#define NVS_TYPE_NDIS_CONF 125 +#define NVS_TYPE_VFASSOC_NOTE 128 /* notification */ +#define NVS_TYPE_SET_DATAPATH 129 +#define NVS_TYPE_SUBCH_REQ 133 +#define NVS_TYPE_SUBCH_RESP 133 /* same as SUBCH_REQ */ +#define NVS_TYPE_TXTBL_NOTE 134 /* notification */ + + +/* NVS message common header */ +struct hn_nvs_hdr { + uint32_t type; +} __rte_packed; + +struct hn_nvs_init { + uint32_t type; /* NVS_TYPE_INIT */ + uint32_t ver_min; + uint32_t ver_max; + uint8_t rsvd[28]; +} __rte_packed; + +struct hn_nvs_init_resp { + uint32_t type; /* NVS_TYPE_INIT_RESP */ + uint32_t ver; /* deprecated */ + uint32_t rsvd; + uint32_t status; /* NVS_STATUS_ */ +} __rte_packed; + +/* No response */ +struct hn_nvs_ndis_conf { + uint32_t type; /* NVS_TYPE_NDIS_CONF */ + uint32_t mtu; + uint32_t rsvd; + uint64_t caps; /* NVS_NDIS_CONF_ */ + uint8_t rsvd1[20]; +} __rte_packed; + +#define NVS_NDIS_CONF_SRIOV 0x0004 +#define NVS_NDIS_CONF_VLAN 0x0008 + +/* No response */ +struct hn_nvs_ndis_init { + uint32_t type; /* NVS_TYPE_NDIS_INIT */ + uint32_t ndis_major; /* NDIS_VERSION_MAJOR_ */ + uint32_t ndis_minor; /* NDIS_VERSION_MINOR_ */ + uint8_t rsvd[28]; +} __rte_packed; + +#define NVS_DATAPATH_SYNTHETIC 0 +#define NVS_DATAPATH_VF 1 + +/* No response */ +struct hn_nvs_datapath { + uint32_t type; /* NVS_TYPE_SET_DATAPATH */ + uint32_t active_path;/* NVS_DATAPATH_* */ + uint8_t rsvd[32]; +} __rte_packed; + +struct hn_nvs_rxbuf_conn { + uint32_t type; /* NVS_TYPE_RXBUF_CONN */ + uint32_t gpadl; /* RXBUF vmbus GPADL */ + uint16_t sig; /* NVS_RXBUF_SIG */ + uint8_t rsvd[30]; +} __rte_packed; + +struct hn_nvs_rxbuf_sect { + uint32_t start; + uint32_t slotsz; + uint32_t slotcnt; + uint32_t end; +} __rte_packed; + +struct hn_nvs_rxbuf_connresp { + uint32_t type; /* NVS_TYPE_RXBUF_CONNRESP */ + uint32_t status; /* NVS_STATUS_ */ + uint32_t nsect; /* # of elem in nvs_sect */ + struct hn_nvs_rxbuf_sect nvs_sect[1]; +} __rte_packed; + +/* No response */ +struct hn_nvs_rxbuf_disconn { + uint32_t type; /* NVS_TYPE_RXBUF_DISCONN */ + uint16_t sig; /* NVS_RXBUF_SIG */ + uint8_t rsvd[34]; +} __rte_packed; + +struct hn_nvs_chim_conn { + uint32_t type; /* NVS_TYPE_CHIM_CONN */ + uint32_t gpadl; /* chimney buf vmbus GPADL */ + uint16_t sig; /* NDIS_NVS_CHIM_SIG */ + uint8_t rsvd[30]; +} __rte_packed; + +struct hn_nvs_chim_connresp { + uint32_t type; /* NVS_TYPE_CHIM_CONNRESP */ + uint32_t status; /* NVS_STATUS_ */ + uint32_t sectsz; /* section size */ +} __rte_packed; + +/* No response */ +struct hn_nvs_chim_disconn { + uint32_t type; /* NVS_TYPE_CHIM_DISCONN */ + uint16_t sig; /* NVS_CHIM_SIG */ + uint8_t rsvd[34]; +} __rte_packed; + +#define NVS_SUBCH_OP_ALLOC 1 + +struct hn_nvs_subch_req { + uint32_t type; /* NVS_TYPE_SUBCH_REQ */ + uint32_t op; /* NVS_SUBCH_OP_ */ + uint32_t nsubch; + uint8_t rsvd[28]; +} __rte_packed; + +struct hn_nvs_subch_resp { + uint32_t type; /* NVS_TYPE_SUBCH_RESP */ + uint32_t status; /* NVS_STATUS_ */ + uint32_t nsubch; + uint8_t rsvd[28]; +} __rte_packed; + +struct hn_nvs_rndis { + uint32_t type; /* NVS_TYPE_RNDIS */ + uint32_t rndis_mtype;/* NVS_RNDIS_MTYPE_ */ + /* + * Chimney sending buffer index and size. + * + * NOTE: + * If nvs_chim_idx is set to NVS_CHIM_IDX_INVALID + * and nvs_chim_sz is set to 0, then chimney sending + * buffer is _not_ used by this RNDIS message. + */ + uint32_t chim_idx; + uint32_t chim_sz; + uint8_t rsvd[24]; +} __rte_packed; + +struct hn_nvs_rndis_ack { + uint32_t type; /* NVS_TYPE_RNDIS_ACK */ + uint32_t status; /* NVS_STATUS_ */ + uint8_t rsvd[32]; +} __rte_packed; + + +int hn_nvs_attach(struct hn_data *hv, unsigned int mtu); +void hn_nvs_detach(struct hn_data *hv); +void hn_nvs_ack_rxbuf(struct vmbus_channel *chan, uint64_t tid); +int hn_nvs_alloc_subchans(struct hn_data *hv, uint32_t *nsubch); +void hn_nvs_set_datapath(struct hn_data *hv, uint32_t path); + +static inline int +hn_nvs_send(struct vmbus_channel *chan, uint16_t flags, + void *nvs_msg, int nvs_msglen, uintptr_t sndc, + bool *need_sig) +{ + return rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_INBAND, + nvs_msg, nvs_msglen, (uint64_t)sndc, + flags, need_sig); +} + +static inline int +hn_nvs_send_sglist(struct vmbus_channel *chan, + struct vmbus_gpa sg[], unsigned int sglen, + void *nvs_msg, int nvs_msglen, + uintptr_t sndc, bool *need_sig) +{ + return rte_vmbus_chan_send_sglist(chan, sg, sglen, nvs_msg, nvs_msglen, + (uint64_t)sndc, need_sig); +} diff --git a/drivers/net/netvsc/hn_rndis.c b/drivers/net/netvsc/hn_rndis.c new file mode 100644 index 00000000..bde33969 --- /dev/null +++ b/drivers/net/netvsc/hn_rndis.c @@ -0,0 +1,1099 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2009-2018 Microsoft Corp. + * Copyright (c) 2010-2012 Citrix Inc. + * Copyright (c) 2012 NetApp Inc. + * All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hn_logs.h" +#include "hn_var.h" +#include "hn_nvs.h" +#include "hn_rndis.h" +#include "ndis.h" + +#define HN_RNDIS_XFER_SIZE 0x4000 + +#define HN_NDIS_TXCSUM_CAP_IP4 \ + (NDIS_TXCSUM_CAP_IP4 | NDIS_TXCSUM_CAP_IP4OPT) +#define HN_NDIS_TXCSUM_CAP_TCP4 \ + (NDIS_TXCSUM_CAP_TCP4 | NDIS_TXCSUM_CAP_TCP4OPT) +#define HN_NDIS_TXCSUM_CAP_TCP6 \ + (NDIS_TXCSUM_CAP_TCP6 | NDIS_TXCSUM_CAP_TCP6OPT | \ + NDIS_TXCSUM_CAP_IP6EXT) +#define HN_NDIS_TXCSUM_CAP_UDP6 \ + (NDIS_TXCSUM_CAP_UDP6 | NDIS_TXCSUM_CAP_IP6EXT) +#define HN_NDIS_LSOV2_CAP_IP6 \ + (NDIS_LSOV2_CAP_IP6EXT | NDIS_LSOV2_CAP_TCP6OPT) + +/* Get unique request id */ +static inline uint32_t +hn_rndis_rid(struct hn_data *hv) +{ + uint32_t rid; + + do { + rid = rte_atomic32_add_return(&hv->rndis_req_id, 1); + } while (rid == 0); + + return rid; +} + +static void *hn_rndis_alloc(struct hn_data *hv, size_t size) +{ + return rte_zmalloc_socket("RNDIS", size, PAGE_SIZE, + hv->vmbus->device.numa_node); +} + +#ifdef RTE_LIBRTE_NETVSC_DEBUG_DUMP +void hn_rndis_dump(const void *buf) +{ + const union { + struct rndis_msghdr hdr; + struct rndis_packet_msg pkt; + struct rndis_init_req init_request; + struct rndis_init_comp init_complete; + struct rndis_halt_req halt; + struct rndis_query_req query_request; + struct rndis_query_comp query_complete; + struct rndis_set_req set_request; + struct rndis_set_comp set_complete; + struct rndis_reset_req reset_request; + struct rndis_reset_comp reset_complete; + struct rndis_keepalive_req keepalive_request; + struct rndis_keepalive_comp keepalive_complete; + struct rndis_status_msg indicate_status; + } *rndis_msg = buf; + + switch (rndis_msg->hdr.type) { + case RNDIS_PACKET_MSG: { + const struct rndis_pktinfo *ppi; + unsigned int ppi_len; + + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_MSG_PACKET (len %u, data %u:%u, # oob %u %u:%u, pkt %u:%u)\n", + rndis_msg->pkt.len, + rndis_msg->pkt.dataoffset, + rndis_msg->pkt.datalen, + rndis_msg->pkt.oobdataelements, + rndis_msg->pkt.oobdataoffset, + rndis_msg->pkt.oobdatalen, + rndis_msg->pkt.pktinfooffset, + rndis_msg->pkt.pktinfolen); + + ppi = (const struct rndis_pktinfo *) + ((const char *)buf + + RNDIS_PACKET_MSG_OFFSET_ABS(rndis_msg->pkt.pktinfooffset)); + + ppi_len = rndis_msg->pkt.pktinfolen; + while (ppi_len > 0) { + const void *ppi_data; + + ppi_data = ppi->data; + + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + " PPI (size %u, type %u, offs %u data %#x)\n", + ppi->size, ppi->type, ppi->offset, + *(const uint32_t *)ppi_data); + if (ppi->size == 0) + break; + ppi_len -= ppi->size; + ppi = (const struct rndis_pktinfo *) + ((const char *)ppi + ppi->size); + } + break; + } + case RNDIS_INITIALIZE_MSG: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_MSG_INIT (len %u id %#x, ver %u.%u max xfer %u)\n", + rndis_msg->init_request.len, + rndis_msg->init_request.rid, + rndis_msg->init_request.ver_major, + rndis_msg->init_request.ver_minor, + rndis_msg->init_request.max_xfersz); + break; + + case RNDIS_INITIALIZE_CMPLT: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_MSG_INIT_C (len %u, id %#x, status 0x%x, vers %u.%u, " + "flags %d, max xfer %u, max pkts %u, aligned %u)\n", + rndis_msg->init_complete.len, + rndis_msg->init_complete.rid, + rndis_msg->init_complete.status, + rndis_msg->init_complete.ver_major, + rndis_msg->init_complete.ver_minor, + rndis_msg->init_complete.devflags, + rndis_msg->init_complete.pktmaxsz, + rndis_msg->init_complete.pktmaxcnt, + rndis_msg->init_complete.align); + break; + + case RNDIS_HALT_MSG: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_HALT (len %u id %#x)\n", + rndis_msg->halt.len, rndis_msg->halt.rid); + break; + + case RNDIS_QUERY_MSG: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_QUERY (len %u, id %#x, oid %#x, info %u:%u)\n", + rndis_msg->query_request.len, + rndis_msg->query_request.rid, + rndis_msg->query_request.oid, + rndis_msg->query_request.infobuflen, + rndis_msg->query_request.infobufoffset); + break; + + case RNDIS_QUERY_CMPLT: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_MSG_QUERY_C (len %u, id %#x, status 0x%x, buf %u:%u)\n", + rndis_msg->query_complete.len, + rndis_msg->query_complete.rid, + rndis_msg->query_complete.status, + rndis_msg->query_complete.infobuflen, + rndis_msg->query_complete.infobufoffset); + break; + + case RNDIS_SET_MSG: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_SET (len %u, id %#x, oid %#x, info %u:%u)\n", + rndis_msg->set_request.len, + rndis_msg->set_request.rid, + rndis_msg->set_request.oid, + rndis_msg->set_request.infobuflen, + rndis_msg->set_request.infobufoffset); + break; + + case RNDIS_SET_CMPLT: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n", + rndis_msg->set_complete.len, + rndis_msg->set_complete.rid, + rndis_msg->set_complete.status); + break; + + case RNDIS_INDICATE_STATUS_MSG: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_MSG_INDICATE (len %u, status %#x, buf len %u, buf offset %u)\n", + rndis_msg->indicate_status.len, + rndis_msg->indicate_status.status, + rndis_msg->indicate_status.stbuflen, + rndis_msg->indicate_status.stbufoffset); + break; + + case RNDIS_RESET_MSG: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_RESET (len %u, id %#x)\n", + rndis_msg->reset_request.len, + rndis_msg->reset_request.rid); + break; + + case RNDIS_RESET_CMPLT: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_RESET_C (len %u, status %#x address %#x)\n", + rndis_msg->reset_complete.len, + rndis_msg->reset_complete.status, + rndis_msg->reset_complete.adrreset); + break; + + case RNDIS_KEEPALIVE_MSG: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_KEEPALIVE (len %u, id %#x)\n", + rndis_msg->keepalive_request.len, + rndis_msg->keepalive_request.rid); + break; + + case RNDIS_KEEPALIVE_CMPLT: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_KEEPALIVE_C (len %u, id %#x address %#x)\n", + rndis_msg->keepalive_complete.len, + rndis_msg->keepalive_complete.rid, + rndis_msg->keepalive_complete.status); + break; + + default: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS type %#x len %u\n", + rndis_msg->hdr.type, + rndis_msg->hdr.len); + break; + } +} +#endif + +static int hn_nvs_send_rndis_ctrl(struct vmbus_channel *chan, + const void *req, uint32_t reqlen) + +{ + struct hn_nvs_rndis nvs_rndis = { + .type = NVS_TYPE_RNDIS, + .rndis_mtype = NVS_RNDIS_MTYPE_CTRL, + .chim_idx = NVS_CHIM_IDX_INVALID, + .chim_sz = 0 + }; + struct vmbus_gpa sg; + rte_iova_t addr; + + addr = rte_malloc_virt2iova(req); + if (unlikely(addr == RTE_BAD_IOVA)) { + PMD_DRV_LOG(ERR, "RNDIS send request can not get iova"); + return -EINVAL; + } + + if (unlikely(reqlen > PAGE_SIZE)) { + PMD_DRV_LOG(ERR, "RNDIS request %u greater than page size", + reqlen); + return -EINVAL; + } + + sg.page = addr / PAGE_SIZE; + sg.ofs = addr & PAGE_MASK; + sg.len = reqlen; + + if (sg.ofs + reqlen > PAGE_SIZE) { + PMD_DRV_LOG(ERR, "RNDIS request crosses page bounary"); + return -EINVAL; + } + + hn_rndis_dump(req); + + return hn_nvs_send_sglist(chan, &sg, 1, + &nvs_rndis, sizeof(nvs_rndis), 0U, NULL); +} + +void hn_rndis_link_status(struct hn_data *hv __rte_unused, const void *msg) +{ + const struct rndis_status_msg *indicate = msg; + + hn_rndis_dump(msg); + + PMD_DRV_LOG(DEBUG, "link status %#x", indicate->status); + + switch (indicate->status) { + case RNDIS_STATUS_LINK_SPEED_CHANGE: + case RNDIS_STATUS_NETWORK_CHANGE: + case RNDIS_STATUS_TASK_OFFLOAD_CURRENT_CONFIG: + /* ignore not in DPDK API */ + break; + + case RNDIS_STATUS_MEDIA_CONNECT: + case RNDIS_STATUS_MEDIA_DISCONNECT: + /* TODO handle as LSC interrupt */ + break; + default: + PMD_DRV_LOG(NOTICE, "unknown RNDIS indication: %#x", + indicate->status); + } +} + +/* Callback from hn_process_events when response is visible */ +void hn_rndis_receive_response(struct hn_data *hv, + const void *data, uint32_t len) +{ + const struct rndis_init_comp *hdr = data; + + hn_rndis_dump(data); + + if (len < sizeof(3 * sizeof(uint32_t))) { + PMD_DRV_LOG(ERR, + "missing RNDIS header %u", len); + return; + } + + if (len < hdr->len) { + PMD_DRV_LOG(ERR, + "truncated RNDIS response %u", len); + return; + } + + if (len > sizeof(hv->rndis_resp)) { + PMD_DRV_LOG(NOTICE, + "RNDIS response exceeds buffer"); + len = sizeof(hv->rndis_resp); + } + + if (hdr->rid == 0) { + PMD_DRV_LOG(NOTICE, + "RNDIS response id zero!"); + } + + memcpy(hv->rndis_resp, data, len); + + /* make sure response copied before update */ + rte_smp_wmb(); + + if (rte_atomic32_cmpset(&hv->rndis_pending, hdr->rid, 0) == 0) { + PMD_DRV_LOG(ERR, + "received id %#x pending id %#x", + hdr->rid, (uint32_t)hv->rndis_pending); + } +} + +/* Do request/response transaction */ +static int hn_rndis_exec1(struct hn_data *hv, + const void *req, uint32_t reqlen, + void *comp, uint32_t comp_len) +{ + const struct rndis_halt_req *hdr = req; + uint32_t rid = hdr->rid; + struct vmbus_channel *chan = hn_primary_chan(hv); + int error; + + if (comp_len > sizeof(hv->rndis_resp)) { + PMD_DRV_LOG(ERR, + "Expected completion size %u exceeds buffer %zu", + comp_len, sizeof(hv->rndis_resp)); + return -EIO; + } + + if (comp != NULL && + rte_atomic32_cmpset(&hv->rndis_pending, 0, rid) == 0) { + PMD_DRV_LOG(ERR, + "Request already pending"); + return -EBUSY; + } + + error = hn_nvs_send_rndis_ctrl(chan, req, reqlen); + if (error) { + PMD_DRV_LOG(ERR, "RNDIS ctrl send failed: %d", error); + return error; + } + + if (comp) { + /* Poll primary channel until response received */ + while (hv->rndis_pending == rid) + hn_process_events(hv, 0); + + memcpy(comp, hv->rndis_resp, comp_len); + } + + return 0; +} + +/* Do transaction and validate response */ +static int hn_rndis_execute(struct hn_data *hv, uint32_t rid, + const void *req, uint32_t reqlen, + void *comp, uint32_t comp_len, uint32_t comp_type) +{ + const struct rndis_comp_hdr *hdr = comp; + int ret; + + memset(comp, 0, comp_len); + + ret = hn_rndis_exec1(hv, req, reqlen, comp, comp_len); + if (ret < 0) + return ret; + /* + * Check this RNDIS complete message. + */ + if (unlikely(hdr->type != comp_type)) { + PMD_DRV_LOG(ERR, + "unexpected RNDIS response complete %#x expect %#x", + hdr->type, comp_type); + + return -ENXIO; + } + if (unlikely(hdr->rid != rid)) { + PMD_DRV_LOG(ERR, + "RNDIS comp rid mismatch %#x, expect %#x", + hdr->rid, rid); + return -EINVAL; + } + + /* All pass! */ + return 0; +} + +static int +hn_rndis_query(struct hn_data *hv, uint32_t oid, + const void *idata, uint32_t idlen, + void *odata, uint32_t odlen) +{ + struct rndis_query_req *req; + struct rndis_query_comp *comp; + uint32_t reqlen, comp_len; + int error = -EIO; + unsigned int ofs; + uint32_t rid; + + reqlen = sizeof(*req) + idlen; + req = hn_rndis_alloc(hv, reqlen); + if (req == NULL) + return -ENOMEM; + + comp_len = sizeof(*comp) + odlen; + comp = rte_zmalloc("QUERY", comp_len, PAGE_SIZE); + if (!comp) { + error = -ENOMEM; + goto done; + } + comp->status = RNDIS_STATUS_PENDING; + + rid = hn_rndis_rid(hv); + + req->type = RNDIS_QUERY_MSG; + req->len = reqlen; + req->rid = rid; + req->oid = oid; + req->infobufoffset = RNDIS_QUERY_REQ_INFOBUFOFFSET; + req->infobuflen = idlen; + + /* Input data immediately follows RNDIS query. */ + memcpy(req + 1, idata, idlen); + + error = hn_rndis_execute(hv, rid, req, reqlen, + comp, comp_len, RNDIS_QUERY_CMPLT); + + if (error) + goto done; + + if (comp->status != RNDIS_STATUS_SUCCESS) { + PMD_DRV_LOG(ERR, "RNDIS query 0x%08x failed: status 0x%08x", + oid, comp->status); + error = -EINVAL; + goto done; + } + + if (comp->infobuflen == 0 || comp->infobufoffset == 0) { + /* No output data! */ + PMD_DRV_LOG(ERR, "RNDIS query 0x%08x, no data", oid); + error = 0; + goto done; + } + + /* + * Check output data length and offset. + */ + /* ofs is the offset from the beginning of comp. */ + ofs = RNDIS_QUERY_COMP_INFOBUFOFFSET_ABS(comp->infobufoffset); + if (ofs < sizeof(*comp) || ofs + comp->infobuflen > comp_len) { + PMD_DRV_LOG(ERR, "RNDIS query invalid comp ib off/len, %u/%u", + comp->infobufoffset, comp->infobuflen); + error = -EINVAL; + goto done; + } + + /* Save output data. */ + if (comp->infobuflen < odlen) + odlen = comp->infobuflen; + + /* ofs is the offset from the beginning of comp. */ + memcpy(odata, (const char *)comp + ofs, odlen); + + error = 0; +done: + rte_free(comp); + rte_free(req); + return error; +} + +static int +hn_rndis_halt(struct hn_data *hv) +{ + struct rndis_halt_req *halt; + + halt = hn_rndis_alloc(hv, sizeof(*halt)); + if (halt == NULL) + return -ENOMEM; + + halt->type = RNDIS_HALT_MSG; + halt->len = sizeof(*halt); + halt->rid = hn_rndis_rid(hv); + + /* No RNDIS completion; rely on NVS message send completion */ + hn_rndis_exec1(hv, halt, sizeof(*halt), NULL, 0); + + rte_free(halt); + + PMD_INIT_LOG(DEBUG, "RNDIS halt done"); + return 0; +} + +static int +hn_rndis_query_hwcaps(struct hn_data *hv, struct ndis_offload *caps) +{ + struct ndis_offload in; + uint32_t caps_len, size; + int error; + + memset(caps, 0, sizeof(*caps)); + memset(&in, 0, sizeof(in)); + in.ndis_hdr.ndis_type = NDIS_OBJTYPE_OFFLOAD; + + if (hv->ndis_ver >= NDIS_VERSION_6_30) { + in.ndis_hdr.ndis_rev = NDIS_OFFLOAD_REV_3; + size = NDIS_OFFLOAD_SIZE; + } else if (hv->ndis_ver >= NDIS_VERSION_6_1) { + in.ndis_hdr.ndis_rev = NDIS_OFFLOAD_REV_2; + size = NDIS_OFFLOAD_SIZE_6_1; + } else { + in.ndis_hdr.ndis_rev = NDIS_OFFLOAD_REV_1; + size = NDIS_OFFLOAD_SIZE_6_0; + } + in.ndis_hdr.ndis_size = size; + + caps_len = NDIS_OFFLOAD_SIZE; + error = hn_rndis_query(hv, OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES, + &in, size, caps, caps_len); + if (error) + return error; + + /* Preliminary verification. */ + if (caps->ndis_hdr.ndis_type != NDIS_OBJTYPE_OFFLOAD) { + PMD_DRV_LOG(NOTICE, "invalid NDIS objtype 0x%02x", + caps->ndis_hdr.ndis_type); + return -EINVAL; + } + if (caps->ndis_hdr.ndis_rev < NDIS_OFFLOAD_REV_1) { + PMD_DRV_LOG(NOTICE, "invalid NDIS objrev 0x%02x", + caps->ndis_hdr.ndis_rev); + return -EINVAL; + } + if (caps->ndis_hdr.ndis_size > caps_len) { + PMD_DRV_LOG(NOTICE, "invalid NDIS objsize %u, data size %u", + caps->ndis_hdr.ndis_size, caps_len); + return -EINVAL; + } else if (caps->ndis_hdr.ndis_size < NDIS_OFFLOAD_SIZE_6_0) { + PMD_DRV_LOG(NOTICE, "invalid NDIS objsize %u", + caps->ndis_hdr.ndis_size); + return -EINVAL; + } + + return 0; +} + +int +hn_rndis_query_rsscaps(struct hn_data *hv, + unsigned int *rxr_cnt0) +{ + struct ndis_rss_caps in, caps; + unsigned int indsz, rxr_cnt; + uint32_t caps_len; + int error; + + *rxr_cnt0 = 0; + + if (hv->ndis_ver < NDIS_VERSION_6_20) { + PMD_DRV_LOG(DEBUG, "RSS not supported on this host"); + return -EOPNOTSUPP; + } + + memset(&in, 0, sizeof(in)); + in.ndis_hdr.ndis_type = NDIS_OBJTYPE_RSS_CAPS; + in.ndis_hdr.ndis_rev = NDIS_RSS_CAPS_REV_2; + in.ndis_hdr.ndis_size = NDIS_RSS_CAPS_SIZE; + + caps_len = NDIS_RSS_CAPS_SIZE; + error = hn_rndis_query(hv, OID_GEN_RECEIVE_SCALE_CAPABILITIES, + &in, NDIS_RSS_CAPS_SIZE, + &caps, caps_len); + if (error) + return error; + + PMD_INIT_LOG(DEBUG, "RX rings %u indirect %u caps %#x", + caps.ndis_nrxr, caps.ndis_nind, caps.ndis_caps); + /* + * Preliminary verification. + */ + if (caps.ndis_hdr.ndis_type != NDIS_OBJTYPE_RSS_CAPS) { + PMD_DRV_LOG(ERR, "invalid NDIS objtype 0x%02x", + caps.ndis_hdr.ndis_type); + return -EINVAL; + } + if (caps.ndis_hdr.ndis_rev < NDIS_RSS_CAPS_REV_1) { + PMD_DRV_LOG(ERR, "invalid NDIS objrev 0x%02x", + caps.ndis_hdr.ndis_rev); + return -EINVAL; + } + if (caps.ndis_hdr.ndis_size > caps_len) { + PMD_DRV_LOG(ERR, + "invalid NDIS objsize %u, data size %u", + caps.ndis_hdr.ndis_size, caps_len); + return -EINVAL; + } else if (caps.ndis_hdr.ndis_size < NDIS_RSS_CAPS_SIZE_6_0) { + PMD_DRV_LOG(ERR, "invalid NDIS objsize %u", + caps.ndis_hdr.ndis_size); + return -EINVAL; + } + + /* + * Save information for later RSS configuration. + */ + if (caps.ndis_nrxr == 0) { + PMD_DRV_LOG(ERR, "0 RX rings!?"); + return -EINVAL; + } + rxr_cnt = caps.ndis_nrxr; + + if (caps.ndis_hdr.ndis_size == NDIS_RSS_CAPS_SIZE && + caps.ndis_hdr.ndis_rev >= NDIS_RSS_CAPS_REV_2) { + if (caps.ndis_nind > NDIS_HASH_INDCNT) { + PMD_DRV_LOG(ERR, + "too many RSS indirect table entries %u", + caps.ndis_nind); + return -EOPNOTSUPP; + } + if (!rte_is_power_of_2(caps.ndis_nind)) { + PMD_DRV_LOG(ERR, + "RSS indirect table size is not power-of-2 %u", + caps.ndis_nind); + } + + indsz = caps.ndis_nind; + } else { + indsz = NDIS_HASH_INDCNT; + } + + if (indsz < rxr_cnt) { + PMD_DRV_LOG(NOTICE, + "# of RX rings (%d) > RSS indirect table size %d", + rxr_cnt, indsz); + rxr_cnt = indsz; + } + + hv->rss_offloads = 0; + if (caps.ndis_caps & NDIS_RSS_CAP_IPV4) + hv->rss_offloads |= ETH_RSS_IPV4 + | ETH_RSS_NONFRAG_IPV4_TCP + | ETH_RSS_NONFRAG_IPV4_UDP; + if (caps.ndis_caps & NDIS_RSS_CAP_IPV6) + hv->rss_offloads |= ETH_RSS_IPV6 + | ETH_RSS_NONFRAG_IPV6_TCP; + if (caps.ndis_caps & NDIS_RSS_CAP_IPV6_EX) + hv->rss_offloads |= ETH_RSS_IPV6_EX + | ETH_RSS_IPV6_TCP_EX; + + /* Commit! */ + *rxr_cnt0 = rxr_cnt; + + return 0; +} + +static int +hn_rndis_set(struct hn_data *hv, uint32_t oid, const void *data, uint32_t dlen) +{ + struct rndis_set_req *req; + struct rndis_set_comp comp; + uint32_t reqlen, comp_len; + uint32_t rid; + int error; + + reqlen = sizeof(*req) + dlen; + req = rte_zmalloc("RNDIS_SET", reqlen, PAGE_SIZE); + if (!req) + return -ENOMEM; + + rid = hn_rndis_rid(hv); + req->type = RNDIS_SET_MSG; + req->len = reqlen; + req->rid = rid; + req->oid = oid; + req->infobuflen = dlen; + req->infobufoffset = RNDIS_SET_REQ_INFOBUFOFFSET; + + /* Data immediately follows RNDIS set. */ + memcpy(req + 1, data, dlen); + + comp_len = sizeof(comp); + error = hn_rndis_execute(hv, rid, req, reqlen, + &comp, comp_len, + RNDIS_SET_CMPLT); + if (error) { + PMD_DRV_LOG(ERR, "exec RNDIS set %#" PRIx32 " failed", + oid); + error = EIO; + goto done; + } + + if (comp.status != RNDIS_STATUS_SUCCESS) { + PMD_DRV_LOG(ERR, + "RNDIS set %#" PRIx32 " failed: status %#" PRIx32, + oid, comp.status); + error = EIO; + goto done; + } + +done: + rte_free(req); + return error; +} + +int hn_rndis_conf_offload(struct hn_data *hv, + uint64_t tx_offloads, uint64_t rx_offloads) +{ + struct ndis_offload_params params; + struct ndis_offload hwcaps; + int error; + + error = hn_rndis_query_hwcaps(hv, &hwcaps); + if (error) { + PMD_DRV_LOG(ERR, "hwcaps query failed: %d", error); + return error; + } + + /* NOTE: 0 means "no change" */ + memset(¶ms, 0, sizeof(params)); + + params.ndis_hdr.ndis_type = NDIS_OBJTYPE_DEFAULT; + if (hv->ndis_ver < NDIS_VERSION_6_30) { + params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_2; + params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE_6_1; + } else { + params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_3; + params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE; + } + + if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) { + if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_TCP4) + params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TX; + else + goto unsupported; + + if (hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_TCP6) + params.ndis_tcp6csum = NDIS_OFFLOAD_PARAM_TX; + else + goto unsupported; + } + + if (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) { + if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4) + == NDIS_RXCSUM_CAP_TCP4) + params.ndis_tcp4csum |= NDIS_OFFLOAD_PARAM_RX; + else + goto unsupported; + + if ((hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6) + == NDIS_RXCSUM_CAP_TCP6) + params.ndis_tcp6csum |= NDIS_OFFLOAD_PARAM_RX; + else + goto unsupported; + } + + if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) { + if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) + params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TX; + else + goto unsupported; + + if ((hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) + == NDIS_TXCSUM_CAP_UDP6) + params.ndis_udp6csum = NDIS_OFFLOAD_PARAM_TX; + else + goto unsupported; + } + + if (rx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) { + if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4) + params.ndis_udp4csum |= NDIS_OFFLOAD_PARAM_RX; + else + goto unsupported; + + if (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6) + params.ndis_udp6csum |= NDIS_OFFLOAD_PARAM_RX; + else + goto unsupported; + } + + if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) { + if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_IP4) + == NDIS_TXCSUM_CAP_IP4) + params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TX; + else + goto unsupported; + } + if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) { + if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4) + params.ndis_ip4csum |= NDIS_OFFLOAD_PARAM_RX; + else + goto unsupported; + } + + if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO) { + if (hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023) + params.ndis_lsov2_ip4 = NDIS_OFFLOAD_LSOV2_ON; + else + goto unsupported; + + if ((hwcaps.ndis_lsov2.ndis_ip6_opts & HN_NDIS_LSOV2_CAP_IP6) + == HN_NDIS_LSOV2_CAP_IP6) + params.ndis_lsov2_ip6 = NDIS_OFFLOAD_LSOV2_ON; + else + goto unsupported; + } + + error = hn_rndis_set(hv, OID_TCP_OFFLOAD_PARAMETERS, ¶ms, + params.ndis_hdr.ndis_size); + if (error) { + PMD_DRV_LOG(ERR, "offload config failed"); + return error; + } + + return 0; + unsupported: + PMD_DRV_LOG(NOTICE, + "offload tx:%" PRIx64 " rx:%" PRIx64 " not supported by this version", + tx_offloads, rx_offloads); + return -EINVAL; +} + +int hn_rndis_get_offload(struct hn_data *hv, + struct rte_eth_dev_info *dev_info) +{ + struct ndis_offload hwcaps; + int error; + + memset(&hwcaps, 0, sizeof(hwcaps)); + + error = hn_rndis_query_hwcaps(hv, &hwcaps); + if (error) { + PMD_DRV_LOG(ERR, "hwcaps query failed: %d", error); + return error; + } + + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_VLAN_INSERT; + + if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_IP4) + == HN_NDIS_TXCSUM_CAP_IP4) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM; + + if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_TCP4) + == HN_NDIS_TXCSUM_CAP_TCP4 && + (hwcaps.ndis_csum.ndis_ip6_txcsum & HN_NDIS_TXCSUM_CAP_TCP6) + == HN_NDIS_TXCSUM_CAP_TCP6) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_CKSUM; + + if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) && + (hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_UDP6)) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_UDP_CKSUM; + + if ((hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023) && + (hwcaps.ndis_lsov2.ndis_ip6_opts & HN_NDIS_LSOV2_CAP_IP6) + == HN_NDIS_LSOV2_CAP_IP6) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; + + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_CRC_STRIP; + + if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM; + + if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4) && + (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6)) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_CKSUM; + + if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4) && + (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6)) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_UDP_CKSUM; + + return 0; +} + +int +hn_rndis_set_rxfilter(struct hn_data *hv, uint32_t filter) +{ + int error; + + error = hn_rndis_set(hv, OID_GEN_CURRENT_PACKET_FILTER, + &filter, sizeof(filter)); + if (error) { + PMD_DRV_LOG(ERR, "set RX filter %#" PRIx32 " failed: %d", + filter, error); + } else { + PMD_DRV_LOG(DEBUG, "set RX filter %#" PRIx32 " done", filter); + } + + return error; +} + +/* The default RSS key. + * This value is the same as MLX5 so that flows will be + * received on same path for both VF ans synthetic NIC. + */ +static const uint8_t rss_default_key[NDIS_HASH_KEYSIZE_TOEPLITZ] = { + 0x2c, 0xc6, 0x81, 0xd1, 0x5b, 0xdb, 0xf4, 0xf7, + 0xfc, 0xa2, 0x83, 0x19, 0xdb, 0x1a, 0x3e, 0x94, + 0x6b, 0x9e, 0x38, 0xd9, 0x2c, 0x9c, 0x03, 0xd1, + 0xad, 0x99, 0x44, 0xa7, 0xd9, 0x56, 0x3d, 0x59, + 0x06, 0x3c, 0x25, 0xf3, 0xfc, 0x1f, 0xdc, 0x2a, +}; + +int hn_rndis_conf_rss(struct hn_data *hv, + const struct rte_eth_rss_conf *rss_conf) +{ + struct ndis_rssprm_toeplitz rssp; + struct ndis_rss_params *prm = &rssp.rss_params; + const uint8_t *rss_key = rss_conf->rss_key ? : rss_default_key; + uint32_t rss_hash; + unsigned int i; + int error; + + PMD_INIT_FUNC_TRACE(); + + memset(&rssp, 0, sizeof(rssp)); + + prm->ndis_hdr.ndis_type = NDIS_OBJTYPE_RSS_PARAMS; + prm->ndis_hdr.ndis_rev = NDIS_RSS_PARAMS_REV_2; + prm->ndis_hdr.ndis_size = sizeof(*prm); + prm->ndis_flags = 0; + + rss_hash = NDIS_HASH_FUNCTION_TOEPLITZ; + if (rss_conf->rss_hf & ETH_RSS_IPV4) + rss_hash |= NDIS_HASH_IPV4; + if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + rss_hash |= NDIS_HASH_TCP_IPV4; + if (rss_conf->rss_hf & ETH_RSS_IPV6) + rss_hash |= NDIS_HASH_IPV6; + if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) + rss_hash |= NDIS_HASH_TCP_IPV6; + + prm->ndis_hash = rss_hash; + prm->ndis_indsize = sizeof(rssp.rss_ind[0]) * NDIS_HASH_INDCNT; + prm->ndis_indoffset = offsetof(struct ndis_rssprm_toeplitz, rss_ind[0]); + prm->ndis_keysize = NDIS_HASH_KEYSIZE_TOEPLITZ; + prm->ndis_keyoffset = offsetof(struct ndis_rssprm_toeplitz, rss_key[0]); + + for (i = 0; i < NDIS_HASH_INDCNT; i++) + rssp.rss_ind[i] = i % hv->num_queues; + + /* Set hask key values */ + memcpy(&rssp.rss_key, rss_key, NDIS_HASH_KEYSIZE_TOEPLITZ); + + error = hn_rndis_set(hv, OID_GEN_RECEIVE_SCALE_PARAMETERS, + &rssp, sizeof(rssp)); + if (error) { + PMD_DRV_LOG(ERR, + "RSS config num queues=%u failed: %d", + hv->num_queues, error); + } + return error; +} + +static int hn_rndis_init(struct hn_data *hv) +{ + struct rndis_init_req *req; + struct rndis_init_comp comp; + uint32_t comp_len, rid; + int error; + + req = hn_rndis_alloc(hv, sizeof(*req)); + if (!req) { + PMD_DRV_LOG(ERR, "no memory for RNDIS init"); + return -ENXIO; + } + + rid = hn_rndis_rid(hv); + req->type = RNDIS_INITIALIZE_MSG; + req->len = sizeof(*req); + req->rid = rid; + req->ver_major = RNDIS_VERSION_MAJOR; + req->ver_minor = RNDIS_VERSION_MINOR; + req->max_xfersz = HN_RNDIS_XFER_SIZE; + + comp_len = RNDIS_INIT_COMP_SIZE_MIN; + error = hn_rndis_execute(hv, rid, req, sizeof(*req), + &comp, comp_len, + RNDIS_INITIALIZE_CMPLT); + if (error) + goto done; + + if (comp.status != RNDIS_STATUS_SUCCESS) { + PMD_DRV_LOG(ERR, "RNDIS init failed: status 0x%08x", + comp.status); + error = -EIO; + goto done; + } + + hv->rndis_agg_size = comp.pktmaxsz; + hv->rndis_agg_pkts = comp.pktmaxcnt; + hv->rndis_agg_align = 1U << comp.align; + + if (hv->rndis_agg_align < sizeof(uint32_t)) { + /* + * The RNDIS packet message encap assumes that the RNDIS + * packet message is at least 4 bytes aligned. Fix up the + * alignment here, if the remote side sets the alignment + * too low. + */ + PMD_DRV_LOG(NOTICE, + "fixup RNDIS aggpkt align: %u -> %zu", + hv->rndis_agg_align, sizeof(uint32_t)); + hv->rndis_agg_align = sizeof(uint32_t); + } + + PMD_INIT_LOG(INFO, + "RNDIS ver %u.%u, aggpkt size %u, aggpkt cnt %u, aggpkt align %u", + comp.ver_major, comp.ver_minor, + hv->rndis_agg_size, hv->rndis_agg_pkts, + hv->rndis_agg_align); + error = 0; +done: + rte_free(req); + return error; +} + +int +hn_rndis_get_eaddr(struct hn_data *hv, uint8_t *eaddr) +{ + uint32_t eaddr_len; + int error; + + eaddr_len = ETHER_ADDR_LEN; + error = hn_rndis_query(hv, OID_802_3_PERMANENT_ADDRESS, NULL, 0, + eaddr, eaddr_len); + if (error) + return error; + + PMD_DRV_LOG(INFO, "MAC address %02x:%02x:%02x:%02x:%02x:%02x", + eaddr[0], eaddr[1], eaddr[2], + eaddr[3], eaddr[4], eaddr[5]); + return 0; +} + +int +hn_rndis_get_linkstatus(struct hn_data *hv) +{ + return hn_rndis_query(hv, OID_GEN_MEDIA_CONNECT_STATUS, NULL, 0, + &hv->link_status, sizeof(uint32_t)); +} + +int +hn_rndis_get_linkspeed(struct hn_data *hv) +{ + return hn_rndis_query(hv, OID_GEN_LINK_SPEED, NULL, 0, + &hv->link_speed, sizeof(uint32_t)); +} + +int +hn_rndis_attach(struct hn_data *hv) +{ + /* Initialize RNDIS. */ + return hn_rndis_init(hv); +} + +void +hn_rndis_detach(struct hn_data *hv) +{ + /* Halt the RNDIS. */ + hn_rndis_halt(hv); +} diff --git a/drivers/net/netvsc/hn_rndis.h b/drivers/net/netvsc/hn_rndis.h new file mode 100644 index 00000000..89e2e6ba --- /dev/null +++ b/drivers/net/netvsc/hn_rndis.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ + +#include "rndis.h" + +struct hn_data; + +void hn_rndis_receive_response(struct hn_data *hv, + const void *data, uint32_t len); +void hn_rndis_link_status(struct hn_data *hv, const void *data); +int hn_rndis_attach(struct hn_data *hv); +void hn_rndis_detach(struct hn_data *hv); +int hn_rndis_get_eaddr(struct hn_data *hv, uint8_t *eaddr); +int hn_rndis_get_linkstatus(struct hn_data *hv); +int hn_rndis_get_linkspeed(struct hn_data *hv); +int hn_rndis_set_rxfilter(struct hn_data *hv, uint32_t filter); +void hn_rndis_rx_ctrl(struct hn_data *hv, const void *data, + int dlen); +int hn_rndis_get_offload(struct hn_data *hv, + struct rte_eth_dev_info *dev_info); +int hn_rndis_conf_offload(struct hn_data *hv, + uint64_t tx_offloads, + uint64_t rx_offloads); +int hn_rndis_query_rsscaps(struct hn_data *hv, + unsigned int *rxr_cnt0); +int hn_rndis_conf_rss(struct hn_data *hv, + const struct rte_eth_rss_conf *rss_conf); + +#ifdef RTE_LIBRTE_NETVSC_DEBUG_DUMP +void hn_rndis_dump(const void *buf); +#else +#define hn_rndis_dump(buf) +#endif diff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c new file mode 100644 index 00000000..02ef27e3 --- /dev/null +++ b/drivers/net/netvsc/hn_rxtx.c @@ -0,0 +1,1334 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2018 Microsoft Corporation + * Copyright(c) 2013-2016 Brocade Communications Systems, Inc. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hn_logs.h" +#include "hn_var.h" +#include "hn_rndis.h" +#include "hn_nvs.h" +#include "ndis.h" + +#define HN_NVS_SEND_MSG_SIZE \ + (sizeof(struct vmbus_chanpkt_hdr) + sizeof(struct hn_nvs_rndis)) + +#define HN_TXD_CACHE_SIZE 32 /* per cpu tx_descriptor pool cache */ +#define HN_TXCOPY_THRESHOLD 512 + +#define HN_RXCOPY_THRESHOLD 256 +#define HN_RXQ_EVENT_DEFAULT 2048 + +struct hn_rxinfo { + uint32_t vlan_info; + uint32_t csum_info; + uint32_t hash_info; + uint32_t hash_value; +}; + +#define HN_RXINFO_VLAN 0x0001 +#define HN_RXINFO_CSUM 0x0002 +#define HN_RXINFO_HASHINF 0x0004 +#define HN_RXINFO_HASHVAL 0x0008 +#define HN_RXINFO_ALL \ + (HN_RXINFO_VLAN | \ + HN_RXINFO_CSUM | \ + HN_RXINFO_HASHINF | \ + HN_RXINFO_HASHVAL) + +#define HN_NDIS_VLAN_INFO_INVALID 0xffffffff +#define HN_NDIS_RXCSUM_INFO_INVALID 0 +#define HN_NDIS_HASH_INFO_INVALID 0 + +/* + * Per-transmit book keeping. + * A slot in transmit ring (chim_index) is reserved for each transmit. + * + * There are two types of transmit: + * - buffered transmit where chimney buffer is used and RNDIS header + * is in the buffer. mbuf == NULL for this case. + * + * - direct transmit where RNDIS header is in the in rndis_pkt + * mbuf is freed after transmit. + * + * Descriptors come from per-port pool which is used + * to limit number of outstanding requests per device. + */ +struct hn_txdesc { + struct rte_mbuf *m; + + uint16_t queue_id; + uint16_t chim_index; + uint32_t chim_size; + uint32_t data_size; + uint32_t packets; + + struct rndis_packet_msg *rndis_pkt; +}; + +#define HN_RNDIS_PKT_LEN \ + (sizeof(struct rndis_packet_msg) + \ + RNDIS_PKTINFO_SIZE(NDIS_HASH_VALUE_SIZE) + \ + RNDIS_PKTINFO_SIZE(NDIS_VLAN_INFO_SIZE) + \ + RNDIS_PKTINFO_SIZE(NDIS_LSO2_INFO_SIZE) + \ + RNDIS_PKTINFO_SIZE(NDIS_TXCSUM_INFO_SIZE)) + +/* Minimum space required for a packet */ +#define HN_PKTSIZE_MIN(align) \ + RTE_ALIGN(ETHER_MIN_LEN + HN_RNDIS_PKT_LEN, align) + +#define DEFAULT_TX_FREE_THRESH 32U + +static void +hn_update_packet_stats(struct hn_stats *stats, const struct rte_mbuf *m) +{ + uint32_t s = m->pkt_len; + const struct ether_addr *ea; + + if (s == 64) { + stats->size_bins[1]++; + } else if (s > 64 && s < 1024) { + uint32_t bin; + + /* count zeros, and offset into correct bin */ + bin = (sizeof(s) * 8) - __builtin_clz(s) - 5; + stats->size_bins[bin]++; + } else { + if (s < 64) + stats->size_bins[0]++; + else if (s < 1519) + stats->size_bins[6]++; + else if (s >= 1519) + stats->size_bins[7]++; + } + + ea = rte_pktmbuf_mtod(m, const struct ether_addr *); + if (is_multicast_ether_addr(ea)) { + if (is_broadcast_ether_addr(ea)) + stats->broadcast++; + else + stats->multicast++; + } +} + +static inline unsigned int hn_rndis_pktlen(const struct rndis_packet_msg *pkt) +{ + return pkt->pktinfooffset + pkt->pktinfolen; +} + +static inline uint32_t +hn_rndis_pktmsg_offset(uint32_t ofs) +{ + return ofs - offsetof(struct rndis_packet_msg, dataoffset); +} + +static void hn_txd_init(struct rte_mempool *mp __rte_unused, + void *opaque, void *obj, unsigned int idx) +{ + struct hn_txdesc *txd = obj; + struct rte_eth_dev *dev = opaque; + struct rndis_packet_msg *pkt; + + memset(txd, 0, sizeof(*txd)); + txd->chim_index = idx; + + pkt = rte_malloc_socket("RNDIS_TX", HN_RNDIS_PKT_LEN, + rte_align32pow2(HN_RNDIS_PKT_LEN), + dev->device->numa_node); + if (!pkt) + rte_exit(EXIT_FAILURE, "can not allocate RNDIS header"); + + txd->rndis_pkt = pkt; +} + +/* + * Unlike Linux and FreeBSD, this driver uses a mempool + * to limit outstanding transmits and reserve buffers + */ +int +hn_tx_pool_init(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + char name[RTE_MEMPOOL_NAMESIZE]; + struct rte_mempool *mp; + + snprintf(name, sizeof(name), + "hn_txd_%u", dev->data->port_id); + + PMD_INIT_LOG(DEBUG, "create a TX send pool %s n=%u size=%zu socket=%d", + name, hv->chim_cnt, sizeof(struct hn_txdesc), + dev->device->numa_node); + + mp = rte_mempool_create(name, hv->chim_cnt, sizeof(struct hn_txdesc), + HN_TXD_CACHE_SIZE, 0, + NULL, NULL, + hn_txd_init, dev, + dev->device->numa_node, 0); + if (!mp) { + PMD_DRV_LOG(ERR, + "mempool %s create failed: %d", name, rte_errno); + return -rte_errno; + } + + hv->tx_pool = mp; + return 0; +} + +static void hn_reset_txagg(struct hn_tx_queue *txq) +{ + txq->agg_szleft = txq->agg_szmax; + txq->agg_pktleft = txq->agg_pktmax; + txq->agg_txd = NULL; + txq->agg_prevpkt = NULL; +} + +int +hn_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t nb_desc __rte_unused, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) + +{ + struct hn_data *hv = dev->data->dev_private; + struct hn_tx_queue *txq; + uint32_t tx_free_thresh; + + PMD_INIT_FUNC_TRACE(); + + txq = rte_zmalloc_socket("HN_TXQ", sizeof(*txq), RTE_CACHE_LINE_SIZE, + socket_id); + if (!txq) + return -ENOMEM; + + txq->hv = hv; + txq->chan = hv->channels[queue_idx]; + txq->port_id = dev->data->port_id; + txq->queue_id = queue_idx; + + tx_free_thresh = tx_conf->tx_free_thresh; + if (tx_free_thresh == 0) + tx_free_thresh = RTE_MIN(hv->chim_cnt / 4, + DEFAULT_TX_FREE_THRESH); + + if (tx_free_thresh >= hv->chim_cnt - 3) + tx_free_thresh = hv->chim_cnt - 3; + + txq->free_thresh = tx_free_thresh; + + txq->agg_szmax = RTE_MIN(hv->chim_szmax, hv->rndis_agg_size); + txq->agg_pktmax = hv->rndis_agg_pkts; + txq->agg_align = hv->rndis_agg_align; + + hn_reset_txagg(txq); + + dev->data->tx_queues[queue_idx] = txq; + + return 0; +} + +void +hn_dev_tx_queue_release(void *arg) +{ + struct hn_tx_queue *txq = arg; + struct hn_txdesc *txd; + + PMD_INIT_FUNC_TRACE(); + + if (!txq) + return; + + /* If any pending data is still present just drop it */ + txd = txq->agg_txd; + if (txd) + rte_mempool_put(txq->hv->tx_pool, txd); + + rte_free(txq); +} + +void +hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx, + struct rte_eth_txq_info *qinfo) +{ + struct hn_data *hv = dev->data->dev_private; + struct hn_tx_queue *txq = dev->data->rx_queues[queue_idx]; + + qinfo->conf.tx_free_thresh = txq->free_thresh; + qinfo->nb_desc = hv->tx_pool->size; +} + +static void +hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id, + unsigned long xactid, const struct hn_nvs_rndis_ack *ack) +{ + struct hn_txdesc *txd = (struct hn_txdesc *)xactid; + struct hn_tx_queue *txq; + + /* Control packets are sent with xacid == 0 */ + if (!txd) + return; + + txq = dev->data->tx_queues[queue_id]; + if (likely(ack->status == NVS_STATUS_OK)) { + PMD_TX_LOG(DEBUG, "port %u:%u complete tx %u packets %u bytes %u", + txq->port_id, txq->queue_id, txd->chim_index, + txd->packets, txd->data_size); + txq->stats.bytes += txd->data_size; + txq->stats.packets += txd->packets; + } else { + PMD_TX_LOG(NOTICE, "port %u:%u complete tx %u failed status %u", + txq->port_id, txq->queue_id, txd->chim_index, ack->status); + ++txq->stats.errors; + } + + rte_pktmbuf_free(txd->m); + + rte_mempool_put(txq->hv->tx_pool, txd); +} + +/* Handle transmit completion events */ +static void +hn_nvs_handle_comp(struct rte_eth_dev *dev, uint16_t queue_id, + const struct vmbus_chanpkt_hdr *pkt, + const void *data) +{ + const struct hn_nvs_hdr *hdr = data; + + switch (hdr->type) { + case NVS_TYPE_RNDIS_ACK: + hn_nvs_send_completed(dev, queue_id, pkt->xactid, data); + break; + + default: + PMD_TX_LOG(NOTICE, + "unexpected send completion type %u", + hdr->type); + } +} + +/* Parse per-packet info (meta data) */ +static int +hn_rndis_rxinfo(const void *info_data, unsigned int info_dlen, + struct hn_rxinfo *info) +{ + const struct rndis_pktinfo *pi = info_data; + uint32_t mask = 0; + + while (info_dlen != 0) { + const void *data; + uint32_t dlen; + + if (unlikely(info_dlen < sizeof(*pi))) + return -EINVAL; + + if (unlikely(info_dlen < pi->size)) + return -EINVAL; + info_dlen -= pi->size; + + if (unlikely(pi->size & RNDIS_PKTINFO_SIZE_ALIGNMASK)) + return -EINVAL; + if (unlikely(pi->size < pi->offset)) + return -EINVAL; + + dlen = pi->size - pi->offset; + data = pi->data; + + switch (pi->type) { + case NDIS_PKTINFO_TYPE_VLAN: + if (unlikely(dlen < NDIS_VLAN_INFO_SIZE)) + return -EINVAL; + info->vlan_info = *((const uint32_t *)data); + mask |= HN_RXINFO_VLAN; + break; + + case NDIS_PKTINFO_TYPE_CSUM: + if (unlikely(dlen < NDIS_RXCSUM_INFO_SIZE)) + return -EINVAL; + info->csum_info = *((const uint32_t *)data); + mask |= HN_RXINFO_CSUM; + break; + + case NDIS_PKTINFO_TYPE_HASHVAL: + if (unlikely(dlen < NDIS_HASH_VALUE_SIZE)) + return -EINVAL; + info->hash_value = *((const uint32_t *)data); + mask |= HN_RXINFO_HASHVAL; + break; + + case NDIS_PKTINFO_TYPE_HASHINF: + if (unlikely(dlen < NDIS_HASH_INFO_SIZE)) + return -EINVAL; + info->hash_info = *((const uint32_t *)data); + mask |= HN_RXINFO_HASHINF; + break; + + default: + goto next; + } + + if (mask == HN_RXINFO_ALL) + break; /* All found; done */ +next: + pi = (const struct rndis_pktinfo *) + ((const uint8_t *)pi + pi->size); + } + + /* + * Final fixup. + * - If there is no hash value, invalidate the hash info. + */ + if (!(mask & HN_RXINFO_HASHVAL)) + info->hash_info = HN_NDIS_HASH_INFO_INVALID; + return 0; +} + +/* + * Ack the consumed RXBUF associated w/ this channel packet, + * so that this RXBUF can be recycled by the hypervisor. + */ +static void hn_rx_buf_release(struct hn_rx_bufinfo *rxb) +{ + struct rte_mbuf_ext_shared_info *shinfo = &rxb->shinfo; + struct hn_data *hv = rxb->hv; + + if (rte_mbuf_ext_refcnt_update(shinfo, -1) == 0) { + hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid); + --hv->rxbuf_outstanding; + } +} + +static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque) +{ + hn_rx_buf_release(opaque); +} + +static struct hn_rx_bufinfo *hn_rx_buf_init(const struct hn_rx_queue *rxq, + const struct vmbus_chanpkt_rxbuf *pkt) +{ + struct hn_rx_bufinfo *rxb; + + rxb = rxq->hv->rxbuf_info + pkt->hdr.xactid; + rxb->chan = rxq->chan; + rxb->xactid = pkt->hdr.xactid; + rxb->hv = rxq->hv; + + rxb->shinfo.free_cb = hn_rx_buf_free_cb; + rxb->shinfo.fcb_opaque = rxb; + rte_mbuf_ext_refcnt_set(&rxb->shinfo, 1); + return rxb; +} + +static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, + uint8_t *data, unsigned int headroom, unsigned int dlen, + const struct hn_rxinfo *info) +{ + struct hn_data *hv = rxq->hv; + struct rte_mbuf *m; + + m = rte_pktmbuf_alloc(rxq->mb_pool); + if (unlikely(!m)) { + struct rte_eth_dev *dev = + &rte_eth_devices[rxq->port_id]; + + dev->data->rx_mbuf_alloc_failed++; + return; + } + + /* + * For large packets, avoid copy if possible but need to keep + * some space available in receive area for later packets. + */ + if (dlen >= HN_RXCOPY_THRESHOLD && + hv->rxbuf_outstanding < hv->rxbuf_section_cnt / 2) { + struct rte_mbuf_ext_shared_info *shinfo; + const void *rxbuf; + rte_iova_t iova; + + /* + * Build an external mbuf that points to recveive area. + * Use refcount to handle multiple packets in same + * receive buffer section. + */ + rxbuf = hv->rxbuf_res->addr; + iova = rte_mem_virt2iova(rxbuf) + RTE_PTR_DIFF(data, rxbuf); + shinfo = &rxb->shinfo; + + if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 1) + ++hv->rxbuf_outstanding; + + rte_pktmbuf_attach_extbuf(m, data, iova, + dlen + headroom, shinfo); + m->data_off = headroom; + } else { + /* Mbuf's in pool must be large enough to hold small packets */ + if (unlikely(rte_pktmbuf_tailroom(m) < dlen)) { + rte_pktmbuf_free_seg(m); + ++rxq->stats.errors; + return; + } + rte_memcpy(rte_pktmbuf_mtod(m, void *), + data + headroom, dlen); + } + + m->port = rxq->port_id; + m->pkt_len = dlen; + m->data_len = dlen; + m->packet_type = rte_net_get_ptype(m, NULL, + RTE_PTYPE_L2_MASK | + RTE_PTYPE_L3_MASK | + RTE_PTYPE_L4_MASK); + + if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) { + m->vlan_tci = info->vlan_info; + m->ol_flags |= PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN; + } + + if (info->csum_info != HN_NDIS_RXCSUM_INFO_INVALID) { + if (info->csum_info & NDIS_RXCSUM_INFO_IPCS_OK) + m->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + + if (info->csum_info & (NDIS_RXCSUM_INFO_UDPCS_OK + | NDIS_RXCSUM_INFO_TCPCS_OK)) + m->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + else if (info->csum_info & (NDIS_RXCSUM_INFO_TCPCS_FAILED + | NDIS_RXCSUM_INFO_UDPCS_FAILED)) + m->ol_flags |= PKT_RX_L4_CKSUM_BAD; + } + + if (info->hash_info != HN_NDIS_HASH_INFO_INVALID) { + m->ol_flags |= PKT_RX_RSS_HASH; + m->hash.rss = info->hash_value; + } + + PMD_RX_LOG(DEBUG, + "port %u:%u RX id %"PRIu64" size %u type %#x ol_flags %#"PRIx64, + rxq->port_id, rxq->queue_id, rxb->xactid, + m->pkt_len, m->packet_type, m->ol_flags); + + ++rxq->stats.packets; + rxq->stats.bytes += m->pkt_len; + hn_update_packet_stats(&rxq->stats, m); + + if (unlikely(rte_ring_sp_enqueue(rxq->rx_ring, m) != 0)) { + ++rxq->ring_full; + rte_pktmbuf_free(m); + } +} + +static void hn_rndis_rx_data(struct hn_rx_queue *rxq, + struct hn_rx_bufinfo *rxb, + void *data, uint32_t dlen) +{ + unsigned int data_off, data_len, pktinfo_off, pktinfo_len; + const struct rndis_packet_msg *pkt = data; + struct hn_rxinfo info = { + .vlan_info = HN_NDIS_VLAN_INFO_INVALID, + .csum_info = HN_NDIS_RXCSUM_INFO_INVALID, + .hash_info = HN_NDIS_HASH_INFO_INVALID, + }; + int err; + + hn_rndis_dump(pkt); + + if (unlikely(dlen < sizeof(*pkt))) + goto error; + + if (unlikely(dlen < pkt->len)) + goto error; /* truncated RNDIS from host */ + + if (unlikely(pkt->len < pkt->datalen + + pkt->oobdatalen + pkt->pktinfolen)) + goto error; + + if (unlikely(pkt->datalen == 0)) + goto error; + + /* Check offsets. */ + if (unlikely(pkt->dataoffset < RNDIS_PACKET_MSG_OFFSET_MIN)) + goto error; + + if (likely(pkt->pktinfooffset > 0) && + unlikely(pkt->pktinfooffset < RNDIS_PACKET_MSG_OFFSET_MIN || + (pkt->pktinfooffset & RNDIS_PACKET_MSG_OFFSET_ALIGNMASK))) + goto error; + + data_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset); + data_len = pkt->datalen; + pktinfo_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->pktinfooffset); + pktinfo_len = pkt->pktinfolen; + + if (likely(pktinfo_len > 0)) { + err = hn_rndis_rxinfo((const uint8_t *)pkt + pktinfo_off, + pktinfo_len, &info); + if (err) + goto error; + } + + if (unlikely(data_off + data_len > pkt->len)) + goto error; + + if (unlikely(data_len < ETHER_HDR_LEN)) + goto error; + + hn_rxpkt(rxq, rxb, data, data_off, data_len, &info); + return; +error: + ++rxq->stats.errors; +} + +static void +hn_rndis_receive(const struct rte_eth_dev *dev, struct hn_rx_queue *rxq, + struct hn_rx_bufinfo *rxb, void *buf, uint32_t len) +{ + const struct rndis_msghdr *hdr = buf; + + switch (hdr->type) { + case RNDIS_PACKET_MSG: + if (dev->data->dev_started) + hn_rndis_rx_data(rxq, rxb, buf, len); + break; + + case RNDIS_INDICATE_STATUS_MSG: + hn_rndis_link_status(rxq->hv, buf); + break; + + case RNDIS_INITIALIZE_CMPLT: + case RNDIS_QUERY_CMPLT: + case RNDIS_SET_CMPLT: + hn_rndis_receive_response(rxq->hv, buf, len); + break; + + default: + PMD_DRV_LOG(NOTICE, + "unexpected RNDIS message (type %#x len %u)", + hdr->type, len); + break; + } +} + +static void +hn_nvs_handle_rxbuf(struct rte_eth_dev *dev, + struct hn_data *hv, + struct hn_rx_queue *rxq, + const struct vmbus_chanpkt_hdr *hdr, + const void *buf) +{ + const struct vmbus_chanpkt_rxbuf *pkt; + const struct hn_nvs_hdr *nvs_hdr = buf; + uint32_t rxbuf_sz = hv->rxbuf_res->len; + char *rxbuf = hv->rxbuf_res->addr; + unsigned int i, hlen, count; + struct hn_rx_bufinfo *rxb; + + /* At minimum we need type header */ + if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*nvs_hdr))) { + PMD_RX_LOG(ERR, "invalid receive nvs RNDIS"); + return; + } + + /* Make sure that this is a RNDIS message. */ + if (unlikely(nvs_hdr->type != NVS_TYPE_RNDIS)) { + PMD_RX_LOG(ERR, "nvs type %u, not RNDIS", + nvs_hdr->type); + return; + } + + hlen = vmbus_chanpkt_getlen(hdr->hlen); + if (unlikely(hlen < sizeof(*pkt))) { + PMD_RX_LOG(ERR, "invalid rxbuf chanpkt"); + return; + } + + pkt = container_of(hdr, const struct vmbus_chanpkt_rxbuf, hdr); + if (unlikely(pkt->rxbuf_id != NVS_RXBUF_SIG)) { + PMD_RX_LOG(ERR, "invalid rxbuf_id 0x%08x", + pkt->rxbuf_id); + return; + } + + count = pkt->rxbuf_cnt; + if (unlikely(hlen < offsetof(struct vmbus_chanpkt_rxbuf, + rxbuf[count]))) { + PMD_RX_LOG(ERR, "invalid rxbuf_cnt %u", count); + return; + } + + if (pkt->hdr.xactid > hv->rxbuf_section_cnt) { + PMD_RX_LOG(ERR, "invalid rxbuf section id %" PRIx64, + pkt->hdr.xactid); + return; + } + + /* Setup receive buffer info to allow for callback */ + rxb = hn_rx_buf_init(rxq, pkt); + + /* Each range represents 1 RNDIS pkt that contains 1 Ethernet frame */ + for (i = 0; i < count; ++i) { + unsigned int ofs, len; + + ofs = pkt->rxbuf[i].ofs; + len = pkt->rxbuf[i].len; + + if (unlikely(ofs + len > rxbuf_sz)) { + PMD_RX_LOG(ERR, + "%uth RNDIS msg overflow ofs %u, len %u", + i, ofs, len); + continue; + } + + if (unlikely(len == 0)) { + PMD_RX_LOG(ERR, "%uth RNDIS msg len %u", i, len); + continue; + } + + hn_rndis_receive(dev, rxq, rxb, + rxbuf + ofs, len); + } + + /* Send ACK now if external mbuf not used */ + hn_rx_buf_release(rxb); +} + +struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv, + uint16_t queue_id, + unsigned int socket_id) +{ + struct hn_rx_queue *rxq; + + rxq = rte_zmalloc_socket("HN_RXQ", + sizeof(*rxq) + HN_RXQ_EVENT_DEFAULT, + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq) { + rxq->hv = hv; + rxq->chan = hv->channels[queue_id]; + rte_spinlock_init(&rxq->ring_lock); + rxq->port_id = hv->port_id; + rxq->queue_id = queue_id; + } + return rxq; +} + +int +hn_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mp) +{ + struct hn_data *hv = dev->data->dev_private; + char ring_name[RTE_RING_NAMESIZE]; + struct hn_rx_queue *rxq; + unsigned int count; + + PMD_INIT_FUNC_TRACE(); + + if (queue_idx == 0) { + rxq = hv->primary; + } else { + rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id); + if (!rxq) + return -ENOMEM; + } + + rxq->mb_pool = mp; + count = rte_mempool_avail_count(mp) / dev->data->nb_rx_queues; + if (nb_desc == 0 || nb_desc > count) + nb_desc = count; + + /* + * Staging ring from receive event logic to rx_pkts. + * rx_pkts assumes caller is handling multi-thread issue. + * event logic has locking. + */ + snprintf(ring_name, sizeof(ring_name), + "hn_rx_%u_%u", dev->data->port_id, queue_idx); + rxq->rx_ring = rte_ring_create(ring_name, + rte_align32pow2(nb_desc), + socket_id, 0); + if (!rxq->rx_ring) + goto fail; + + dev->data->rx_queues[queue_idx] = rxq; + return 0; + +fail: + rte_ring_free(rxq->rx_ring); + rte_free(rxq->event_buf); + rte_free(rxq); + return -ENOMEM; +} + +void +hn_dev_rx_queue_release(void *arg) +{ + struct hn_rx_queue *rxq = arg; + + PMD_INIT_FUNC_TRACE(); + + if (!rxq) + return; + + rte_ring_free(rxq->rx_ring); + rxq->rx_ring = NULL; + rxq->mb_pool = NULL; + + if (rxq != rxq->hv->primary) { + rte_free(rxq->event_buf); + rte_free(rxq); + } +} + +void +hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx, + struct rte_eth_rxq_info *qinfo) +{ + struct hn_rx_queue *rxq = dev->data->rx_queues[queue_idx]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = 1; + qinfo->nb_desc = rte_ring_get_capacity(rxq->rx_ring); +} + +static void +hn_nvs_handle_notify(const struct vmbus_chanpkt_hdr *pkthdr, + const void *data) +{ + const struct hn_nvs_hdr *hdr = data; + + if (unlikely(vmbus_chanpkt_datalen(pkthdr) < sizeof(*hdr))) { + PMD_DRV_LOG(ERR, "invalid nvs notify"); + return; + } + + PMD_DRV_LOG(INFO, + "got notify, nvs type %u", hdr->type); +} + +/* + * Process pending events on the channel. + * Called from both Rx queue poll and Tx cleanup + */ +void hn_process_events(struct hn_data *hv, uint16_t queue_id) +{ + struct rte_eth_dev *dev = &rte_eth_devices[hv->port_id]; + struct hn_rx_queue *rxq; + uint32_t bytes_read = 0; + int ret = 0; + + rxq = queue_id == 0 ? hv->primary : dev->data->rx_queues[queue_id]; + + /* If no pending data then nothing to do */ + if (rte_vmbus_chan_rx_empty(rxq->chan)) + return; + + /* + * Since channel is shared between Rx and TX queue need to have a lock + * since DPDK does not force same CPU to be used for Rx/Tx. + */ + if (unlikely(!rte_spinlock_trylock(&rxq->ring_lock))) + return; + + for (;;) { + const struct vmbus_chanpkt_hdr *pkt; + uint32_t len = HN_RXQ_EVENT_DEFAULT; + const void *data; + + ret = rte_vmbus_chan_recv_raw(rxq->chan, rxq->event_buf, &len); + if (ret == -EAGAIN) + break; /* ring is empty */ + + else if (ret == -ENOBUFS) + rte_exit(EXIT_FAILURE, "event buffer not big enough (%u < %u)", + HN_RXQ_EVENT_DEFAULT, len); + else if (ret <= 0) + rte_exit(EXIT_FAILURE, + "vmbus ring buffer error: %d", ret); + + bytes_read += ret; + pkt = (const struct vmbus_chanpkt_hdr *)rxq->event_buf; + data = (char *)rxq->event_buf + vmbus_chanpkt_getlen(pkt->hlen); + + switch (pkt->type) { + case VMBUS_CHANPKT_TYPE_COMP: + hn_nvs_handle_comp(dev, queue_id, pkt, data); + break; + + case VMBUS_CHANPKT_TYPE_RXBUF: + hn_nvs_handle_rxbuf(dev, hv, rxq, pkt, data); + break; + + case VMBUS_CHANPKT_TYPE_INBAND: + hn_nvs_handle_notify(pkt, data); + break; + + default: + PMD_DRV_LOG(ERR, "unknown chan pkt %u", pkt->type); + break; + } + + if (rxq->rx_ring && rte_ring_full(rxq->rx_ring)) + break; + } + + if (bytes_read > 0) + rte_vmbus_chan_signal_read(rxq->chan, bytes_read); + + rte_spinlock_unlock(&rxq->ring_lock); +} + +static void hn_append_to_chim(struct hn_tx_queue *txq, + struct rndis_packet_msg *pkt, + const struct rte_mbuf *m) +{ + struct hn_txdesc *txd = txq->agg_txd; + uint8_t *buf = (uint8_t *)pkt; + unsigned int data_offs; + + hn_rndis_dump(pkt); + + data_offs = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset); + txd->chim_size += pkt->len; + txd->data_size += m->pkt_len; + ++txd->packets; + hn_update_packet_stats(&txq->stats, m); + + for (; m; m = m->next) { + uint16_t len = rte_pktmbuf_data_len(m); + + rte_memcpy(buf + data_offs, + rte_pktmbuf_mtod(m, const char *), len); + data_offs += len; + } +} + +/* + * Send pending aggregated data in chimney buffer (if any). + * Returns error if send was unsuccessful because channel ring buffer + * was full. + */ +static int hn_flush_txagg(struct hn_tx_queue *txq, bool *need_sig) + +{ + struct hn_txdesc *txd = txq->agg_txd; + struct hn_nvs_rndis rndis; + int ret; + + if (!txd) + return 0; + + rndis = (struct hn_nvs_rndis) { + .type = NVS_TYPE_RNDIS, + .rndis_mtype = NVS_RNDIS_MTYPE_DATA, + .chim_idx = txd->chim_index, + .chim_sz = txd->chim_size, + }; + + PMD_TX_LOG(DEBUG, "port %u:%u tx %u size %u", + txq->port_id, txq->queue_id, txd->chim_index, txd->chim_size); + + ret = hn_nvs_send(txq->chan, VMBUS_CHANPKT_FLAG_RC, + &rndis, sizeof(rndis), (uintptr_t)txd, need_sig); + + if (likely(ret == 0)) + hn_reset_txagg(txq); + else + PMD_TX_LOG(NOTICE, "port %u:%u send failed: %d", + txq->port_id, txq->queue_id, ret); + + return ret; +} + +static struct hn_txdesc *hn_new_txd(struct hn_data *hv, + struct hn_tx_queue *txq) +{ + struct hn_txdesc *txd; + + if (rte_mempool_get(hv->tx_pool, (void **)&txd)) { + ++txq->stats.nomemory; + PMD_TX_LOG(DEBUG, "tx pool exhausted!"); + return NULL; + } + + txd->m = NULL; + txd->queue_id = txq->queue_id; + txd->packets = 0; + txd->data_size = 0; + txd->chim_size = 0; + + return txd; +} + +static void * +hn_try_txagg(struct hn_data *hv, struct hn_tx_queue *txq, uint32_t pktsize) +{ + struct hn_txdesc *agg_txd = txq->agg_txd; + struct rndis_packet_msg *pkt; + void *chim; + + if (agg_txd) { + unsigned int padding, olen; + + /* + * Update the previous RNDIS packet's total length, + * it can be increased due to the mandatory alignment + * padding for this RNDIS packet. And update the + * aggregating txdesc's chimney sending buffer size + * accordingly. + * + * Zero-out the padding, as required by the RNDIS spec. + */ + pkt = txq->agg_prevpkt; + olen = pkt->len; + padding = RTE_ALIGN(olen, txq->agg_align) - olen; + if (padding > 0) { + agg_txd->chim_size += padding; + pkt->len += padding; + memset((uint8_t *)pkt + olen, 0, padding); + } + + chim = (uint8_t *)pkt + pkt->len; + + txq->agg_pktleft--; + txq->agg_szleft -= pktsize; + if (txq->agg_szleft < HN_PKTSIZE_MIN(txq->agg_align)) { + /* + * Probably can't aggregate more packets, + * flush this aggregating txdesc proactively. + */ + txq->agg_pktleft = 0; + } + } else { + agg_txd = hn_new_txd(hv, txq); + if (!agg_txd) + return NULL; + + chim = (uint8_t *)hv->chim_res->addr + + agg_txd->chim_index * hv->chim_szmax; + + txq->agg_txd = agg_txd; + txq->agg_pktleft = txq->agg_pktmax - 1; + txq->agg_szleft = txq->agg_szmax - pktsize; + } + txq->agg_prevpkt = chim; + + return chim; +} + +static inline void * +hn_rndis_pktinfo_append(struct rndis_packet_msg *pkt, + uint32_t pi_dlen, uint32_t pi_type) +{ + const uint32_t pi_size = RNDIS_PKTINFO_SIZE(pi_dlen); + struct rndis_pktinfo *pi; + + /* + * Per-packet-info does not move; it only grows. + * + * NOTE: + * pktinfooffset in this phase counts from the beginning + * of rndis_packet_msg. + */ + pi = (struct rndis_pktinfo *)((uint8_t *)pkt + hn_rndis_pktlen(pkt)); + + pkt->pktinfolen += pi_size; + + pi->size = pi_size; + pi->type = pi_type; + pi->offset = RNDIS_PKTINFO_OFFSET; + + return pi->data; +} + +/* Put RNDIS header and packet info on packet */ +static void hn_encap(struct rndis_packet_msg *pkt, + uint16_t queue_id, + const struct rte_mbuf *m) +{ + unsigned int hlen = m->l2_len + m->l3_len; + uint32_t *pi_data; + uint32_t pkt_hlen; + + pkt->type = RNDIS_PACKET_MSG; + pkt->len = m->pkt_len; + pkt->dataoffset = 0; + pkt->datalen = m->pkt_len; + pkt->oobdataoffset = 0; + pkt->oobdatalen = 0; + pkt->oobdataelements = 0; + pkt->pktinfooffset = sizeof(*pkt); + pkt->pktinfolen = 0; + pkt->vchandle = 0; + pkt->reserved = 0; + + /* + * Set the hash value for this packet, to the queue_id to cause + * TX done event for this packet on the right channel. + */ + pi_data = hn_rndis_pktinfo_append(pkt, NDIS_HASH_VALUE_SIZE, + NDIS_PKTINFO_TYPE_HASHVAL); + *pi_data = queue_id; + + if (m->ol_flags & PKT_TX_VLAN_PKT) { + pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE, + NDIS_PKTINFO_TYPE_VLAN); + *pi_data = m->vlan_tci; + } + + if (m->ol_flags & PKT_TX_TCP_SEG) { + pi_data = hn_rndis_pktinfo_append(pkt, NDIS_LSO2_INFO_SIZE, + NDIS_PKTINFO_TYPE_LSO); + + if (m->ol_flags & PKT_TX_IPV6) { + *pi_data = NDIS_LSO2_INFO_MAKEIPV6(hlen, + m->tso_segsz); + } else { + *pi_data = NDIS_LSO2_INFO_MAKEIPV4(hlen, + m->tso_segsz); + } + } else if (m->ol_flags & + (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)) { + pi_data = hn_rndis_pktinfo_append(pkt, NDIS_TXCSUM_INFO_SIZE, + NDIS_PKTINFO_TYPE_CSUM); + *pi_data = 0; + + if (m->ol_flags & PKT_TX_IPV6) + *pi_data |= NDIS_TXCSUM_INFO_IPV6; + if (m->ol_flags & PKT_TX_IPV4) { + *pi_data |= NDIS_TXCSUM_INFO_IPV4; + + if (m->ol_flags & PKT_TX_IP_CKSUM) + *pi_data |= NDIS_TXCSUM_INFO_IPCS; + } + + if (m->ol_flags & PKT_TX_TCP_CKSUM) + *pi_data |= NDIS_TXCSUM_INFO_MKTCPCS(hlen); + else if (m->ol_flags & PKT_TX_UDP_CKSUM) + *pi_data |= NDIS_TXCSUM_INFO_MKUDPCS(hlen); + } + + pkt_hlen = pkt->pktinfooffset + pkt->pktinfolen; + /* Fixup RNDIS packet message total length */ + pkt->len += pkt_hlen; + + /* Convert RNDIS packet message offsets */ + pkt->dataoffset = hn_rndis_pktmsg_offset(pkt_hlen); + pkt->pktinfooffset = hn_rndis_pktmsg_offset(pkt->pktinfooffset); +} + +/* How many scatter gather list elements ar needed */ +static unsigned int hn_get_slots(const struct rte_mbuf *m) +{ + unsigned int slots = 1; /* for RNDIS header */ + + while (m) { + unsigned int size = rte_pktmbuf_data_len(m); + unsigned int offs = rte_mbuf_data_iova(m) & PAGE_MASK; + + slots += (offs + size + PAGE_SIZE - 1) / PAGE_SIZE; + m = m->next; + } + + return slots; +} + +/* Build scatter gather list from chained mbuf */ +static unsigned int hn_fill_sg(struct vmbus_gpa *sg, + const struct rte_mbuf *m) +{ + unsigned int segs = 0; + + while (m) { + rte_iova_t addr = rte_mbuf_data_iova(m); + unsigned int page = addr / PAGE_SIZE; + unsigned int offset = addr & PAGE_MASK; + unsigned int len = rte_pktmbuf_data_len(m); + + while (len > 0) { + unsigned int bytes = RTE_MIN(len, PAGE_SIZE - offset); + + sg[segs].page = page; + sg[segs].ofs = offset; + sg[segs].len = bytes; + segs++; + + ++page; + offset = 0; + len -= bytes; + } + m = m->next; + } + + return segs; +} + +/* Transmit directly from mbuf */ +static int hn_xmit_sg(struct hn_tx_queue *txq, + const struct hn_txdesc *txd, const struct rte_mbuf *m, + bool *need_sig) +{ + struct vmbus_gpa sg[hn_get_slots(m)]; + struct hn_nvs_rndis nvs_rndis = { + .type = NVS_TYPE_RNDIS, + .rndis_mtype = NVS_RNDIS_MTYPE_DATA, + .chim_sz = txd->chim_size, + }; + rte_iova_t addr; + unsigned int segs; + + /* attach aggregation data if present */ + if (txd->chim_size > 0) + nvs_rndis.chim_idx = txd->chim_index; + else + nvs_rndis.chim_idx = NVS_CHIM_IDX_INVALID; + + hn_rndis_dump(txd->rndis_pkt); + + /* pass IOVA of rndis header in first segment */ + addr = rte_malloc_virt2iova(txd->rndis_pkt); + if (unlikely(addr == RTE_BAD_IOVA)) { + PMD_DRV_LOG(ERR, "RNDIS transmit can not get iova"); + return -EINVAL; + } + + sg[0].page = addr / PAGE_SIZE; + sg[0].ofs = addr & PAGE_MASK; + sg[0].len = RNDIS_PACKET_MSG_OFFSET_ABS(hn_rndis_pktlen(txd->rndis_pkt)); + segs = 1; + + hn_update_packet_stats(&txq->stats, m); + + segs += hn_fill_sg(sg + 1, m); + + PMD_TX_LOG(DEBUG, "port %u:%u tx %u segs %u size %u", + txq->port_id, txq->queue_id, txd->chim_index, + segs, nvs_rndis.chim_sz); + + return hn_nvs_send_sglist(txq->chan, sg, segs, + &nvs_rndis, sizeof(nvs_rndis), + (uintptr_t)txd, need_sig); +} + +uint16_t +hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct hn_tx_queue *txq = ptxq; + struct hn_data *hv = txq->hv; + bool need_sig = false; + uint16_t nb_tx; + int ret; + + if (unlikely(hv->closed)) + return 0; + + if (rte_mempool_avail_count(hv->tx_pool) <= txq->free_thresh) + hn_process_events(hv, txq->queue_id); + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + struct rte_mbuf *m = tx_pkts[nb_tx]; + uint32_t pkt_size = m->pkt_len + HN_RNDIS_PKT_LEN; + struct rndis_packet_msg *pkt; + + /* For small packets aggregate them in chimney buffer */ + if (m->pkt_len < HN_TXCOPY_THRESHOLD && pkt_size <= txq->agg_szmax) { + /* If this packet will not fit, then flush */ + if (txq->agg_pktleft == 0 || + RTE_ALIGN(pkt_size, txq->agg_align) > txq->agg_szleft) { + if (hn_flush_txagg(txq, &need_sig)) + goto fail; + } + + pkt = hn_try_txagg(hv, txq, pkt_size); + if (unlikely(!pkt)) + break; + + hn_encap(pkt, txq->queue_id, m); + hn_append_to_chim(txq, pkt, m); + + rte_pktmbuf_free(m); + + /* if buffer is full, flush */ + if (txq->agg_pktleft == 0 && + hn_flush_txagg(txq, &need_sig)) + goto fail; + } else { + struct hn_txdesc *txd; + + /* can send chimney data and large packet at once */ + txd = txq->agg_txd; + if (txd) { + hn_reset_txagg(txq); + } else { + txd = hn_new_txd(hv, txq); + if (unlikely(!txd)) + break; + } + + pkt = txd->rndis_pkt; + txd->m = m; + txd->data_size += m->pkt_len; + ++txd->packets; + + hn_encap(pkt, txq->queue_id, m); + + ret = hn_xmit_sg(txq, txd, m, &need_sig); + if (unlikely(ret != 0)) { + PMD_TX_LOG(NOTICE, "sg send failed: %d", ret); + ++txq->stats.errors; + rte_mempool_put(hv->tx_pool, txd); + goto fail; + } + } + } + + /* If partial buffer left, then try and send it. + * if that fails, then reuse it on next send. + */ + hn_flush_txagg(txq, &need_sig); + +fail: + if (need_sig) + rte_vmbus_chan_signal_tx(txq->chan); + + return nb_tx; +} + +uint16_t +hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct hn_rx_queue *rxq = prxq; + struct hn_data *hv = rxq->hv; + + if (unlikely(hv->closed)) + return 0; + + /* If ring is empty then process more */ + if (rte_ring_count(rxq->rx_ring) < nb_pkts) + hn_process_events(hv, rxq->queue_id); + + /* Get mbufs off staging ring */ + return rte_ring_sc_dequeue_burst(rxq->rx_ring, (void **)rx_pkts, + nb_pkts, NULL); +} diff --git a/drivers/net/netvsc/hn_var.h b/drivers/net/netvsc/hn_var.h new file mode 100644 index 00000000..f7ff8585 --- /dev/null +++ b/drivers/net/netvsc/hn_var.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2009-2018 Microsoft Corp. + * Copyright (c) 2016 Brocade Communications Systems, Inc. + * Copyright (c) 2012 NetApp Inc. + * Copyright (c) 2012 Citrix Inc. + * All rights reserved. + */ + +/* + * Tunable ethdev params + */ +#define HN_MIN_RX_BUF_SIZE 1024 +#define HN_MAX_XFER_LEN 2048 +#define HN_MAX_MAC_ADDRS 1 +#define HN_MAX_CHANNELS 64 + +/* Claimed to be 12232B */ +#define HN_MTU_MAX (9 * 1024) + +/* Retry interval */ +#define HN_CHAN_INTERVAL_US 100 + +/* Buffers need to be aligned */ +#ifndef PAGE_SIZE +#define PAGE_SIZE 4096 +#endif + +#ifndef PAGE_MASK +#define PAGE_MASK (PAGE_SIZE - 1) +#endif + +struct hn_data; +struct hn_txdesc; + +struct hn_stats { + uint64_t packets; + uint64_t bytes; + uint64_t errors; + uint64_t nomemory; + uint64_t multicast; + uint64_t broadcast; + /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */ + uint64_t size_bins[8]; +}; + +struct hn_tx_queue { + struct hn_data *hv; + struct vmbus_channel *chan; + uint16_t port_id; + uint16_t queue_id; + uint32_t free_thresh; + + /* Applied packet transmission aggregation limits. */ + uint32_t agg_szmax; + uint32_t agg_pktmax; + uint32_t agg_align; + + /* Packet transmission aggregation states */ + struct hn_txdesc *agg_txd; + uint32_t agg_pktleft; + uint32_t agg_szleft; + struct rndis_packet_msg *agg_prevpkt; + + struct hn_stats stats; +}; + +struct hn_rx_queue { + struct hn_data *hv; + struct vmbus_channel *chan; + struct rte_mempool *mb_pool; + struct rte_ring *rx_ring; + + rte_spinlock_t ring_lock; + uint32_t event_sz; + uint16_t port_id; + uint16_t queue_id; + struct hn_stats stats; + uint64_t ring_full; + + uint8_t event_buf[]; +}; + + +/* multi-packet data from host */ +struct hn_rx_bufinfo { + struct vmbus_channel *chan; + struct hn_data *hv; + uint64_t xactid; + struct rte_mbuf_ext_shared_info shinfo; +} __rte_cache_aligned; + +struct hn_data { + struct rte_vmbus_device *vmbus; + struct hn_rx_queue *primary; + uint16_t port_id; + bool closed; + uint32_t link_status; + uint32_t link_speed; + + struct rte_mem_resource *rxbuf_res; /* UIO resource for Rx */ + struct hn_rx_bufinfo *rxbuf_info; + uint32_t rxbuf_section_cnt; /* # of Rx sections */ + volatile uint32_t rxbuf_outstanding; + uint16_t max_queues; /* Max available queues */ + uint16_t num_queues; + uint64_t rss_offloads; + + struct rte_mem_resource *chim_res; /* UIO resource for Tx */ + struct rte_mempool *tx_pool; /* Tx descriptors */ + uint32_t chim_szmax; /* Max size per buffer */ + uint32_t chim_cnt; /* Max packets per buffer */ + + uint32_t nvs_ver; + uint32_t ndis_ver; + uint32_t rndis_agg_size; + uint32_t rndis_agg_pkts; + uint32_t rndis_agg_align; + + volatile uint32_t rndis_pending; + rte_atomic32_t rndis_req_id; + uint8_t rndis_resp[256]; + + struct ether_addr mac_addr; + struct vmbus_channel *channels[HN_MAX_CHANNELS]; +}; + +static inline struct vmbus_channel * +hn_primary_chan(const struct hn_data *hv) +{ + return hv->channels[0]; +} + +void hn_process_events(struct hn_data *hv, uint16_t queue_id); + +uint16_t hn_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t hn_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +int hn_tx_pool_init(struct rte_eth_dev *dev); +int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +void hn_dev_tx_queue_release(void *arg); +void hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx, + struct rte_eth_txq_info *qinfo); + +struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv, + uint16_t queue_id, + unsigned int socket_id); +int hn_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); +void hn_dev_rx_queue_release(void *arg); +void hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx, + struct rte_eth_rxq_info *qinfo); diff --git a/drivers/net/netvsc/meson.build b/drivers/net/netvsc/meson.build new file mode 100644 index 00000000..a717cdd4 --- /dev/null +++ b/drivers/net/netvsc/meson.build @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Microsoft Corporation + +build = dpdk_conf.has('RTE_LIBRTE_VMBUS_BUS') +version = 2 +sources = files('hn_ethdev.c', 'hn_rxtx.c', 'hn_rndis.c', 'hn_nvs.c') + +deps += ['bus_vmbus' ] + +allow_experimental_apis = true diff --git a/drivers/net/netvsc/ndis.h b/drivers/net/netvsc/ndis.h new file mode 100644 index 00000000..2e7ca99b --- /dev/null +++ b/drivers/net/netvsc/ndis.h @@ -0,0 +1,378 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2018 Microsoft Corp. + * All rights reserved. + */ + +#ifndef _NET_NDIS_H_ +#define _NET_NDIS_H_ + +#define NDIS_MEDIA_STATE_CONNECTED 0 +#define NDIS_MEDIA_STATE_DISCONNECTED 1 + +#define NDIS_NETCHANGE_TYPE_POSSIBLE 1 +#define NDIS_NETCHANGE_TYPE_DEFINITE 2 +#define NDIS_NETCHANGE_TYPE_FROMMEDIA 3 + +#define NDIS_OFFLOAD_SET_NOCHG 0 +#define NDIS_OFFLOAD_SET_ON 1 +#define NDIS_OFFLOAD_SET_OFF 2 + +/* a.k.a GRE MAC */ +#define NDIS_ENCAP_TYPE_NVGRE 0x00000001 + +#define NDIS_HASH_FUNCTION_MASK 0x000000FF /* see hash function */ +#define NDIS_HASH_TYPE_MASK 0x00FFFF00 /* see hash type */ + +/* hash function */ +#define NDIS_HASH_FUNCTION_TOEPLITZ 0x00000001 + +/* hash type */ +#define NDIS_HASH_IPV4 0x00000100 +#define NDIS_HASH_TCP_IPV4 0x00000200 +#define NDIS_HASH_IPV6 0x00000400 +#define NDIS_HASH_IPV6_EX 0x00000800 +#define NDIS_HASH_TCP_IPV6 0x00001000 +#define NDIS_HASH_TCP_IPV6_EX 0x00002000 + +#define NDIS_HASH_KEYSIZE_TOEPLITZ 40 +#define NDIS_HASH_INDCNT 128 + +#define NDIS_OBJTYPE_DEFAULT 0x80 +#define NDIS_OBJTYPE_RSS_CAPS 0x88 +#define NDIS_OBJTYPE_RSS_PARAMS 0x89 +#define NDIS_OBJTYPE_OFFLOAD 0xa7 + +struct ndis_object_hdr { + uint8_t ndis_type; /* NDIS_OBJTYPE_ */ + uint8_t ndis_rev; /* type specific */ + uint16_t ndis_size; /* incl. this hdr */ +} __rte_packed; + +/* + * OID_TCP_OFFLOAD_PARAMETERS + * ndis_type: NDIS_OBJTYPE_DEFAULT + */ +struct ndis_offload_params { + struct ndis_object_hdr ndis_hdr; + uint8_t ndis_ip4csum; /* NDIS_OFFLOAD_PARAM_ */ + uint8_t ndis_tcp4csum; /* NDIS_OFFLOAD_PARAM_ */ + uint8_t ndis_udp4csum; /* NDIS_OFFLOAD_PARAM_ */ + uint8_t ndis_tcp6csum; /* NDIS_OFFLOAD_PARAM_ */ + uint8_t ndis_udp6csum; /* NDIS_OFFLOAD_PARAM_ */ + uint8_t ndis_lsov1; /* NDIS_OFFLOAD_PARAM_ */ + uint8_t ndis_ipsecv1; /* NDIS_OFFLOAD_IPSECV1_ */ + uint8_t ndis_lsov2_ip4; /* NDIS_OFFLOAD_LSOV2_ */ + uint8_t ndis_lsov2_ip6; /* NDIS_OFFLOAD_LSOV2_ */ + uint8_t ndis_tcp4conn; /* 0 */ + uint8_t ndis_tcp6conn; /* 0 */ + uint32_t ndis_flags; /* 0 */ + /* NDIS >= 6.1 */ + uint8_t ndis_ipsecv2; /* NDIS_OFFLOAD_IPSECV2_ */ + uint8_t ndis_ipsecv2_ip4;/* NDIS_OFFLOAD_IPSECV2_ */ + /* NDIS >= 6.30 */ + uint8_t ndis_rsc_ip4; /* NDIS_OFFLOAD_RSC_ */ + uint8_t ndis_rsc_ip6; /* NDIS_OFFLOAD_RSC_ */ + uint8_t ndis_encap; /* NDIS_OFFLOAD_SET_ */ + uint8_t ndis_encap_types;/* NDIS_ENCAP_TYPE_ */ +}; + +#define NDIS_OFFLOAD_PARAMS_SIZE sizeof(struct ndis_offload_params) +#define NDIS_OFFLOAD_PARAMS_SIZE_6_1 \ + offsetof(struct ndis_offload_params, ndis_rsc_ip4) + +#define NDIS_OFFLOAD_PARAMS_REV_2 2 /* NDIS 6.1 */ +#define NDIS_OFFLOAD_PARAMS_REV_3 3 /* NDIS 6.30 */ + +#define NDIS_OFFLOAD_PARAM_NOCHG 0 /* common */ +#define NDIS_OFFLOAD_PARAM_OFF 1 +#define NDIS_OFFLOAD_PARAM_TX 2 +#define NDIS_OFFLOAD_PARAM_RX 3 +#define NDIS_OFFLOAD_PARAM_TXRX 4 + +/* NDIS_OFFLOAD_PARAM_NOCHG */ +#define NDIS_OFFLOAD_LSOV1_OFF 1 +#define NDIS_OFFLOAD_LSOV1_ON 2 + +/* NDIS_OFFLOAD_PARAM_NOCHG */ +#define NDIS_OFFLOAD_IPSECV1_OFF 1 +#define NDIS_OFFLOAD_IPSECV1_AH 2 +#define NDIS_OFFLOAD_IPSECV1_ESP 3 +#define NDIS_OFFLOAD_IPSECV1_AH_ESP 4 + +/* NDIS_OFFLOAD_PARAM_NOCHG */ +#define NDIS_OFFLOAD_LSOV2_OFF 1 +#define NDIS_OFFLOAD_LSOV2_ON 2 + +/* NDIS_OFFLOAD_PARAM_NOCHG */ +#define NDIS_OFFLOAD_IPSECV2_OFF 1 +#define NDIS_OFFLOAD_IPSECV2_AH 2 +#define NDIS_OFFLOAD_IPSECV2_ESP 3 +#define NDIS_OFFLOAD_IPSECV2_AH_ESP 4 + +/* NDIS_OFFLOAD_PARAM_NOCHG */ +#define NDIS_OFFLOAD_RSC_OFF 1 +#define NDIS_OFFLOAD_RSC_ON 2 + +/* + * OID_GEN_RECEIVE_SCALE_CAPABILITIES + * ndis_type: NDIS_OBJTYPE_RSS_CAPS + */ +struct ndis_rss_caps { + struct ndis_object_hdr ndis_hdr; + uint32_t ndis_caps; /* NDIS_RSS_CAP_ */ + uint32_t ndis_nmsi; /* # of MSIs */ + uint32_t ndis_nrxr; /* # of RX rings */ + /* NDIS >= 6.30 */ + uint16_t ndis_nind; /* # of indtbl ent. */ + uint16_t ndis_pad; +} __rte_packed; + +#define NDIS_RSS_CAPS_SIZE \ + offsetof(struct ndis_rss_caps, ndis_pad) +#define NDIS_RSS_CAPS_SIZE_6_0 \ + offsetof(struct ndis_rss_caps, ndis_nind) + +#define NDIS_RSS_CAPS_REV_1 1 /* NDIS 6.{0,1,20} */ +#define NDIS_RSS_CAPS_REV_2 2 /* NDIS 6.30 */ + +#define NDIS_RSS_CAP_MSI 0x01000000 +#define NDIS_RSS_CAP_CLASSIFY_ISR 0x02000000 +#define NDIS_RSS_CAP_CLASSIFY_DPC 0x04000000 +#define NDIS_RSS_CAP_MSIX 0x08000000 +#define NDIS_RSS_CAP_IPV4 0x00000100 +#define NDIS_RSS_CAP_IPV6 0x00000200 +#define NDIS_RSS_CAP_IPV6_EX 0x00000400 +#define NDIS_RSS_CAP_HASH_TOEPLITZ NDIS_HASH_FUNCTION_TOEPLITZ +#define NDIS_RSS_CAP_HASHFUNC_MASK NDIS_HASH_FUNCTION_MASK + +/* + * OID_GEN_RECEIVE_SCALE_PARAMETERS + * ndis_type: NDIS_OBJTYPE_RSS_PARAMS + */ +struct ndis_rss_params { + struct ndis_object_hdr ndis_hdr; + uint16_t ndis_flags; /* NDIS_RSS_FLAG_ */ + uint16_t ndis_bcpu; /* base cpu 0 */ + uint32_t ndis_hash; /* NDIS_HASH_ */ + uint16_t ndis_indsize; /* indirect table */ + uint32_t ndis_indoffset; + uint16_t ndis_keysize; /* hash key */ + uint32_t ndis_keyoffset; + /* NDIS >= 6.20 */ + uint32_t ndis_cpumaskoffset; + uint32_t ndis_cpumaskcnt; + uint32_t ndis_cpumaskentsz; +}; + +#define NDIS_RSS_PARAMS_SIZE sizeof(struct ndis_rss_params) +#define NDIS_RSS_PARAMS_SIZE_6_0 \ + offsetof(struct ndis_rss_params, ndis_cpumaskoffset) + +#define NDIS_RSS_PARAMS_REV_1 1 /* NDIS 6.0 */ +#define NDIS_RSS_PARAMS_REV_2 2 /* NDIS 6.20 */ + +#define NDIS_RSS_FLAG_NONE 0x0000 +#define NDIS_RSS_FLAG_BCPU_UNCHG 0x0001 +#define NDIS_RSS_FLAG_HASH_UNCHG 0x0002 +#define NDIS_RSS_FLAG_IND_UNCHG 0x0004 +#define NDIS_RSS_FLAG_KEY_UNCHG 0x0008 +#define NDIS_RSS_FLAG_DISABLE 0x0010 + +/* non-standard convenient struct */ +struct ndis_rssprm_toeplitz { + struct ndis_rss_params rss_params; + /* Indirect table */ + uint32_t rss_ind[NDIS_HASH_INDCNT]; + /* Toeplitz hash key */ + uint8_t rss_key[NDIS_HASH_KEYSIZE_TOEPLITZ]; +}; + +#define NDIS_RSSPRM_TOEPLITZ_SIZE(nind) \ + offsetof(struct ndis_rssprm_toeplitz, rss_ind[nind]) + +/* + * OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES + * ndis_type: NDIS_OBJTYPE_OFFLOAD + */ + +#define NDIS_OFFLOAD_ENCAP_NONE 0x0000 +#define NDIS_OFFLOAD_ENCAP_NULL 0x0001 +#define NDIS_OFFLOAD_ENCAP_8023 0x0002 +#define NDIS_OFFLOAD_ENCAP_8023PQ 0x0004 +#define NDIS_OFFLOAD_ENCAP_8023PQ_OOB 0x0008 +#define NDIS_OFFLOAD_ENCAP_RFC1483 0x0010 + +struct ndis_csum_offload { + uint32_t ndis_ip4_txenc; /*NDIS_OFFLOAD_ENCAP_*/ + uint32_t ndis_ip4_txcsum; +#define NDIS_TXCSUM_CAP_IP4OPT 0x001 +#define NDIS_TXCSUM_CAP_TCP4OPT 0x004 +#define NDIS_TXCSUM_CAP_TCP4 0x010 +#define NDIS_TXCSUM_CAP_UDP4 0x040 +#define NDIS_TXCSUM_CAP_IP4 0x100 + uint32_t ndis_ip4_rxenc; /*NDIS_OFFLOAD_ENCAP_*/ + uint32_t ndis_ip4_rxcsum; +#define NDIS_RXCSUM_CAP_IP4OPT 0x001 +#define NDIS_RXCSUM_CAP_TCP4OPT 0x004 +#define NDIS_RXCSUM_CAP_TCP4 0x010 +#define NDIS_RXCSUM_CAP_UDP4 0x040 +#define NDIS_RXCSUM_CAP_IP4 0x100 + uint32_t ndis_ip6_txenc; /*NDIS_OFFLOAD_ENCAP_*/ + uint32_t ndis_ip6_txcsum; +#define NDIS_TXCSUM_CAP_IP6EXT 0x001 +#define NDIS_TXCSUM_CAP_TCP6OPT 0x004 +#define NDIS_TXCSUM_CAP_TCP6 0x010 +#define NDIS_TXCSUM_CAP_UDP6 0x040 + uint32_t ndis_ip6_rxenc; /*NDIS_OFFLOAD_ENCAP_*/ + uint32_t ndis_ip6_rxcsum; +#define NDIS_RXCSUM_CAP_IP6EXT 0x001 +#define NDIS_RXCSUM_CAP_TCP6OPT 0x004 +#define NDIS_RXCSUM_CAP_TCP6 0x010 +#define NDIS_RXCSUM_CAP_UDP6 0x040 +}; + +struct ndis_lsov1_offload { + uint32_t ndis_encap; /*NDIS_OFFLOAD_ENCAP_*/ + uint32_t ndis_maxsize; + uint32_t ndis_minsegs; + uint32_t ndis_opts; +}; + +struct ndis_ipsecv1_offload { + uint32_t ndis_encap; /*NDIS_OFFLOAD_ENCAP_*/ + uint32_t ndis_ah_esp; + uint32_t ndis_xport_tun; + uint32_t ndis_ip4_opts; + uint32_t ndis_flags; + uint32_t ndis_ip4_ah; + uint32_t ndis_ip4_esp; +}; + +struct ndis_lsov2_offload { + uint32_t ndis_ip4_encap; /*NDIS_OFFLOAD_ENCAP_*/ + uint32_t ndis_ip4_maxsz; + uint32_t ndis_ip4_minsg; + uint32_t ndis_ip6_encap; /*NDIS_OFFLOAD_ENCAP_*/ + uint32_t ndis_ip6_maxsz; + uint32_t ndis_ip6_minsg; + uint32_t ndis_ip6_opts; +#define NDIS_LSOV2_CAP_IP6EXT 0x001 +#define NDIS_LSOV2_CAP_TCP6OPT 0x004 +}; + +struct ndis_ipsecv2_offload { + uint32_t ndis_encap; /*NDIS_OFFLOAD_ENCAP_*/ + uint16_t ndis_ip6; + uint16_t ndis_ip4opt; + uint16_t ndis_ip6ext; + uint16_t ndis_ah; + uint16_t ndis_esp; + uint16_t ndis_ah_esp; + uint16_t ndis_xport; + uint16_t ndis_tun; + uint16_t ndis_xport_tun; + uint16_t ndis_lso; + uint16_t ndis_extseq; + uint32_t ndis_udp_esp; + uint32_t ndis_auth; + uint32_t ndis_crypto; + uint32_t ndis_sa_caps; +}; + +struct ndis_rsc_offload { + uint16_t ndis_ip4; + uint16_t ndis_ip6; +}; + +struct ndis_encap_offload { + uint32_t ndis_flags; + uint32_t ndis_maxhdr; +}; + +struct ndis_offload { + struct ndis_object_hdr ndis_hdr; + struct ndis_csum_offload ndis_csum; + struct ndis_lsov1_offload ndis_lsov1; + struct ndis_ipsecv1_offload ndis_ipsecv1; + struct ndis_lsov2_offload ndis_lsov2; + uint32_t ndis_flags; + /* NDIS >= 6.1 */ + struct ndis_ipsecv2_offload ndis_ipsecv2; + /* NDIS >= 6.30 */ + struct ndis_rsc_offload ndis_rsc; + struct ndis_encap_offload ndis_encap_gre; +}; + +#define NDIS_OFFLOAD_SIZE sizeof(struct ndis_offload) +#define NDIS_OFFLOAD_SIZE_6_0 offsetof(struct ndis_offload, ndis_ipsecv2) +#define NDIS_OFFLOAD_SIZE_6_1 offsetof(struct ndis_offload, ndis_rsc) + +#define NDIS_OFFLOAD_REV_1 1 /* NDIS 6.0 */ +#define NDIS_OFFLOAD_REV_2 2 /* NDIS 6.1 */ +#define NDIS_OFFLOAD_REV_3 3 /* NDIS 6.30 */ + +/* + * Per-packet-info + */ + +/* VLAN */ +#define NDIS_VLAN_INFO_SIZE sizeof(uint32_t) +#define NDIS_VLAN_INFO_PRI_MASK 0x0007 +#define NDIS_VLAN_INFO_CFI_MASK 0x0008 +#define NDIS_VLAN_INFO_ID_MASK 0xfff0 +#define NDIS_VLAN_INFO_MAKE(id, pri, cfi) \ + (((pri) & NDIS_VLAN_INFO_PRI_MASK) | \ + (((cfi) & 0x1) << 3) | (((id) & 0xfff) << 4)) +#define NDIS_VLAN_INFO_ID(inf) (((inf) & NDIS_VLAN_INFO_ID_MASK) >> 4) +#define NDIS_VLAN_INFO_CFI(inf) (((inf) & NDIS_VLAN_INFO_CFI_MASK) >> 3) +#define NDIS_VLAN_INFO_PRI(inf) ((inf) & NDIS_VLAN_INFO_PRI_MASK) + +/* Reception checksum */ +#define NDIS_RXCSUM_INFO_SIZE sizeof(uint32_t) +#define NDIS_RXCSUM_INFO_TCPCS_FAILED 0x0001 +#define NDIS_RXCSUM_INFO_UDPCS_FAILED 0x0002 +#define NDIS_RXCSUM_INFO_IPCS_FAILED 0x0004 +#define NDIS_RXCSUM_INFO_TCPCS_OK 0x0008 +#define NDIS_RXCSUM_INFO_UDPCS_OK 0x0010 +#define NDIS_RXCSUM_INFO_IPCS_OK 0x0020 +#define NDIS_RXCSUM_INFO_LOOPBACK 0x0040 +#define NDIS_RXCSUM_INFO_TCPCS_INVAL 0x0080 +#define NDIS_RXCSUM_INFO_IPCS_INVAL 0x0100 + +/* LSOv2 */ +#define NDIS_LSO2_INFO_SIZE sizeof(uint32_t) +#define NDIS_LSO2_INFO_MSS_MASK 0x000fffff +#define NDIS_LSO2_INFO_THOFF_MASK 0x3ff00000 +#define NDIS_LSO2_INFO_ISLSO2 0x40000000 +#define NDIS_LSO2_INFO_ISIPV6 0x80000000 + +#define NDIS_LSO2_INFO_MAKE(thoff, mss) \ + ((((uint32_t)(mss)) & NDIS_LSO2_INFO_MSS_MASK) | \ + ((((uint32_t)(thoff)) & 0x3ff) << 20) | \ + NDIS_LSO2_INFO_ISLSO2) + +#define NDIS_LSO2_INFO_MAKEIPV4(thoff, mss) \ + NDIS_LSO2_INFO_MAKE((thoff), (mss)) + +#define NDIS_LSO2_INFO_MAKEIPV6(thoff, mss) \ + (NDIS_LSO2_INFO_MAKE((thoff), (mss)) | NDIS_LSO2_INFO_ISIPV6) + +/* Transmission checksum */ +#define NDIS_TXCSUM_INFO_SIZE sizeof(uint32_t) +#define NDIS_TXCSUM_INFO_IPV4 0x00000001 +#define NDIS_TXCSUM_INFO_IPV6 0x00000002 +#define NDIS_TXCSUM_INFO_TCPCS 0x00000004 +#define NDIS_TXCSUM_INFO_UDPCS 0x00000008 +#define NDIS_TXCSUM_INFO_IPCS 0x00000010 +#define NDIS_TXCSUM_INFO_THOFF 0x03ff0000 + +#define NDIS_TXCSUM_INFO_MKL4CS(thoff, flag) \ + ((((uint32_t)(thoff)) << 16) | (flag)) + +#define NDIS_TXCSUM_INFO_MKTCPCS(thoff) \ + NDIS_TXCSUM_INFO_MKL4CS((thoff), NDIS_TXCSUM_INFO_TCPCS) + +#define NDIS_TXCSUM_INFO_MKUDPCS(thoff) \ + NDIS_TXCSUM_INFO_MKL4CS((thoff), NDIS_TXCSUM_INFO_UDPCS) + +#endif /* !_NET_NDIS_H_ */ diff --git a/drivers/net/netvsc/rndis.h b/drivers/net/netvsc/rndis.h new file mode 100644 index 00000000..eac9a99f --- /dev/null +++ b/drivers/net/netvsc/rndis.h @@ -0,0 +1,414 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2018 Microsoft Corp. + * Copyright (c) 2010 Jonathan Armani + * Copyright (c) 2010 Fabien Romano + * Copyright (c) 2010 Michael Knudsen + * All rights reserved. + */ + +#ifndef _NET_RNDIS_H_ +#define _NET_RNDIS_H_ + +/* Canonical major/minor version as of 22th Aug. 2016. */ +#define RNDIS_VERSION_MAJOR 0x00000001 +#define RNDIS_VERSION_MINOR 0x00000000 + +#define RNDIS_STATUS_SUCCESS 0x00000000 +#define RNDIS_STATUS_PENDING 0x00000103 + +#define RNDIS_STATUS_ONLINE 0x40010003 +#define RNDIS_STATUS_RESET_START 0x40010004 +#define RNDIS_STATUS_RESET_END 0x40010005 +#define RNDIS_STATUS_RING_STATUS 0x40010006 +#define RNDIS_STATUS_CLOSED 0x40010007 +#define RNDIS_STATUS_WAN_LINE_UP 0x40010008 +#define RNDIS_STATUS_WAN_LINE_DOWN 0x40010009 +#define RNDIS_STATUS_WAN_FRAGMENT 0x4001000A +#define RNDIS_STATUS_MEDIA_CONNECT 0x4001000B +#define RNDIS_STATUS_MEDIA_DISCONNECT 0x4001000C +#define RNDIS_STATUS_HARDWARE_LINE_UP 0x4001000D +#define RNDIS_STATUS_HARDWARE_LINE_DOWN 0x4001000E +#define RNDIS_STATUS_INTERFACE_UP 0x4001000F +#define RNDIS_STATUS_INTERFACE_DOWN 0x40010010 +#define RNDIS_STATUS_MEDIA_BUSY 0x40010011 +#define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION 0x40010012 +#define RNDIS_STATUS_WW_INDICATION RDIA_SPECIFIC_INDICATION +#define RNDIS_STATUS_LINK_SPEED_CHANGE 0x40010013 +#define RNDIS_STATUS_NETWORK_CHANGE 0x40010018 +#define RNDIS_STATUS_TASK_OFFLOAD_CURRENT_CONFIG 0x40020006 + +#define RNDIS_STATUS_FAILURE 0xC0000001 +#define RNDIS_STATUS_RESOURCES 0xC000009A +#define RNDIS_STATUS_NOT_SUPPORTED 0xC00000BB +#define RNDIS_STATUS_CLOSING 0xC0010002 +#define RNDIS_STATUS_BAD_VERSION 0xC0010004 +#define RNDIS_STATUS_BAD_CHARACTERISTICS 0xC0010005 +#define RNDIS_STATUS_ADAPTER_NOT_FOUND 0xC0010006 +#define RNDIS_STATUS_OPEN_FAILED 0xC0010007 +#define RNDIS_STATUS_DEVICE_FAILED 0xC0010008 +#define RNDIS_STATUS_MULTICAST_FULL 0xC0010009 +#define RNDIS_STATUS_MULTICAST_EXISTS 0xC001000A +#define RNDIS_STATUS_MULTICAST_NOT_FOUND 0xC001000B +#define RNDIS_STATUS_REQUEST_ABORTED 0xC001000C +#define RNDIS_STATUS_RESET_IN_PROGRESS 0xC001000D +#define RNDIS_STATUS_CLOSING_INDICATING 0xC001000E +#define RNDIS_STATUS_INVALID_PACKET 0xC001000F +#define RNDIS_STATUS_OPEN_LIST_FULL 0xC0010010 +#define RNDIS_STATUS_ADAPTER_NOT_READY 0xC0010011 +#define RNDIS_STATUS_ADAPTER_NOT_OPEN 0xC0010012 +#define RNDIS_STATUS_NOT_INDICATING 0xC0010013 +#define RNDIS_STATUS_INVALID_LENGTH 0xC0010014 +#define RNDIS_STATUS_INVALID_DATA 0xC0010015 +#define RNDIS_STATUS_BUFFER_TOO_SHORT 0xC0010016 +#define RNDIS_STATUS_INVALID_OID 0xC0010017 +#define RNDIS_STATUS_ADAPTER_REMOVED 0xC0010018 +#define RNDIS_STATUS_UNSUPPORTED_MEDIA 0xC0010019 +#define RNDIS_STATUS_GROUP_ADDRESS_IN_US 0xC001001A +#define RNDIS_STATUS_FILE_NOT_FOUND 0xC001001B +#define RNDIS_STATUS_ERROR_READING_FILE 0xC001001C +#define RNDIS_STATUS_ALREADY_MAPPED 0xC001001D +#define RNDIS_STATUS_RESOURCE_CONFLICT 0xC001001E +#define RNDIS_STATUS_NO_CABLE 0xC001001F + +#define OID_GEN_SUPPORTED_LIST 0x00010101 +#define OID_GEN_HARDWARE_STATUS 0x00010102 +#define OID_GEN_MEDIA_SUPPORTED 0x00010103 +#define OID_GEN_MEDIA_IN_USE 0x00010104 +#define OID_GEN_MAXIMUM_LOOKAHEAD 0x00010105 +#define OID_GEN_MAXIMUM_FRAME_SIZE 0x00010106 +#define OID_GEN_LINK_SPEED 0x00010107 +#define OID_GEN_TRANSMIT_BUFFER_SPACE 0x00010108 +#define OID_GEN_RECEIVE_BUFFER_SPACE 0x00010109 +#define OID_GEN_TRANSMIT_BLOCK_SIZE 0x0001010A +#define OID_GEN_RECEIVE_BLOCK_SIZE 0x0001010B +#define OID_GEN_VENDOR_ID 0x0001010C +#define OID_GEN_VENDOR_DESCRIPTION 0x0001010D +#define OID_GEN_CURRENT_PACKET_FILTER 0x0001010E +#define OID_GEN_CURRENT_LOOKAHEAD 0x0001010F +#define OID_GEN_DRIVER_VERSION 0x00010110 +#define OID_GEN_MAXIMUM_TOTAL_SIZE 0x00010111 +#define OID_GEN_PROTOCOL_OPTIONS 0x00010112 +#define OID_GEN_MAC_OPTIONS 0x00010113 +#define OID_GEN_MEDIA_CONNECT_STATUS 0x00010114 +#define OID_GEN_MAXIMUM_SEND_PACKETS 0x00010115 +#define OID_GEN_VENDOR_DRIVER_VERSION 0x00010116 +#define OID_GEN_SUPPORTED_GUIDS 0x00010117 +#define OID_GEN_NETWORK_LAYER_ADDRESSES 0x00010118 +#define OID_GEN_TRANSPORT_HEADER_OFFSET 0x00010119 +#define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203 +#define OID_GEN_RECEIVE_SCALE_PARAMETERS 0x00010204 +#define OID_GEN_MACHINE_NAME 0x0001021A +#define OID_GEN_RNDIS_CONFIG_PARAMETER 0x0001021B +#define OID_GEN_VLAN_ID 0x0001021C + +#define OID_802_3_PERMANENT_ADDRESS 0x01010101 +#define OID_802_3_CURRENT_ADDRESS 0x01010102 +#define OID_802_3_MULTICAST_LIST 0x01010103 +#define OID_802_3_MAXIMUM_LIST_SIZE 0x01010104 +#define OID_802_3_MAC_OPTIONS 0x01010105 +#define OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101 +#define OID_802_3_XMIT_ONE_COLLISION 0x01020102 +#define OID_802_3_XMIT_MORE_COLLISIONS 0x01020103 +#define OID_802_3_XMIT_DEFERRED 0x01020201 +#define OID_802_3_XMIT_MAX_COLLISIONS 0x01020202 +#define OID_802_3_RCV_OVERRUN 0x01020203 +#define OID_802_3_XMIT_UNDERRUN 0x01020204 +#define OID_802_3_XMIT_HEARTBEAT_FAILURE 0x01020205 +#define OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206 +#define OID_802_3_XMIT_LATE_COLLISIONS 0x01020207 + +#define OID_TCP_OFFLOAD_PARAMETERS 0xFC01020C +#define OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020D + +#define RNDIS_MEDIUM_802_3 0x00000000 + +/* Device flags */ +#define RNDIS_DF_CONNECTIONLESS 0x00000001 +#define RNDIS_DF_CONNECTION_ORIENTED 0x00000002 + +/* + * Common RNDIS message header. + */ +struct rndis_msghdr { + uint32_t type; + uint32_t len; +}; + +/* + * RNDIS data message + */ +#define RNDIS_PACKET_MSG 0x00000001 + +struct rndis_packet_msg { + uint32_t type; + uint32_t len; + uint32_t dataoffset; + uint32_t datalen; + uint32_t oobdataoffset; + uint32_t oobdatalen; + uint32_t oobdataelements; + uint32_t pktinfooffset; + uint32_t pktinfolen; + uint32_t vchandle; + uint32_t reserved; +}; + +/* + * Minimum value for dataoffset, oobdataoffset, and + * pktinfooffset. + */ +#define RNDIS_PACKET_MSG_OFFSET_MIN \ + (sizeof(struct rndis_packet_msg) - \ + offsetof(struct rndis_packet_msg, dataoffset)) + +/* Offset from the beginning of rndis_packet_msg. */ +#define RNDIS_PACKET_MSG_OFFSET_ABS(ofs) \ + ((ofs) + offsetof(struct rndis_packet_msg, dataoffset)) + +#define RNDIS_PACKET_MSG_OFFSET_ALIGN 4 +#define RNDIS_PACKET_MSG_OFFSET_ALIGNMASK \ + (RNDIS_PACKET_MSG_OFFSET_ALIGN - 1) + +/* Per-packet-info for RNDIS data message */ +struct rndis_pktinfo { + uint32_t size; + uint32_t type; /* NDIS_PKTINFO_TYPE_ */ + uint32_t offset; + uint8_t data[]; +}; + +#define RNDIS_PKTINFO_OFFSET \ + offsetof(struct rndis_pktinfo, data[0]) +#define RNDIS_PKTINFO_SIZE_ALIGN 4 +#define RNDIS_PKTINFO_SIZE_ALIGNMASK (RNDIS_PKTINFO_SIZE_ALIGN - 1) + +#define NDIS_PKTINFO_TYPE_CSUM 0 +#define NDIS_PKTINFO_TYPE_IPSEC 1 +#define NDIS_PKTINFO_TYPE_LSO 2 +#define NDIS_PKTINFO_TYPE_CLASSIFY 3 +/* reserved 4 */ +#define NDIS_PKTINFO_TYPE_SGLIST 5 +#define NDIS_PKTINFO_TYPE_VLAN 6 +#define NDIS_PKTINFO_TYPE_ORIG 7 +#define NDIS_PKTINFO_TYPE_PKT_CANCELID 8 +#define NDIS_PKTINFO_TYPE_ORIG_NBLIST 9 +#define NDIS_PKTINFO_TYPE_CACHE_NBLIST 10 +#define NDIS_PKTINFO_TYPE_PKT_PAD 11 + +/* RNDIS extension */ + +/* Per-packet hash info */ +#define NDIS_HASH_INFO_SIZE sizeof(uint32_t) +#define NDIS_PKTINFO_TYPE_HASHINF NDIS_PKTINFO_TYPE_ORIG_NBLIST +/* NDIS_HASH_ */ + +/* Per-packet hash value */ +#define NDIS_HASH_VALUE_SIZE sizeof(uint32_t) +#define NDIS_PKTINFO_TYPE_HASHVAL NDIS_PKTINFO_TYPE_PKT_CANCELID + +/* Per-packet-info size */ +#define RNDIS_PKTINFO_SIZE(dlen) offsetof(struct rndis_pktinfo, data[dlen]) + +/* + * RNDIS control messages + */ + +/* + * Common header for RNDIS completion messages. + * + * NOTE: It does not apply to RNDIS_RESET_CMPLT. + */ +struct rndis_comp_hdr { + uint32_t type; + uint32_t len; + uint32_t rid; + uint32_t status; +}; + +/* Initialize the device. */ +#define RNDIS_INITIALIZE_MSG 0x00000002 +#define RNDIS_INITIALIZE_CMPLT 0x80000002 + +struct rndis_init_req { + uint32_t type; + uint32_t len; + uint32_t rid; + uint32_t ver_major; + uint32_t ver_minor; + uint32_t max_xfersz; +}; + +struct rndis_init_comp { + uint32_t type; + uint32_t len; + uint32_t rid; + uint32_t status; + uint32_t ver_major; + uint32_t ver_minor; + uint32_t devflags; + uint32_t medium; + uint32_t pktmaxcnt; + uint32_t pktmaxsz; + uint32_t align; + uint32_t aflistoffset; + uint32_t aflistsz; +}; + +#define RNDIS_INIT_COMP_SIZE_MIN \ + offsetof(struct rndis_init_comp, aflistsz) + +/* Halt the device. No response sent. */ +#define RNDIS_HALT_MSG 0x00000003 + +struct rndis_halt_req { + uint32_t type; + uint32_t len; + uint32_t rid; +}; + +/* Send a query object. */ +#define RNDIS_QUERY_MSG 0x00000004 +#define RNDIS_QUERY_CMPLT 0x80000004 + +struct rndis_query_req { + uint32_t type; + uint32_t len; + uint32_t rid; + uint32_t oid; + uint32_t infobuflen; + uint32_t infobufoffset; + uint32_t devicevchdl; +}; + +#define RNDIS_QUERY_REQ_INFOBUFOFFSET \ + (sizeof(struct rndis_query_req) - \ + offsetof(struct rndis_query_req, rid)) + +struct rndis_query_comp { + uint32_t type; + uint32_t len; + uint32_t rid; + uint32_t status; + uint32_t infobuflen; + uint32_t infobufoffset; +}; + +/* infobuf offset from the beginning of rndis_query_comp. */ +#define RNDIS_QUERY_COMP_INFOBUFOFFSET_ABS(ofs) \ + ((ofs) + offsetof(struct rndis_query_comp, rid)) + +/* Send a set object request. */ +#define RNDIS_SET_MSG 0x00000005 +#define RNDIS_SET_CMPLT 0x80000005 + +struct rndis_set_req { + uint32_t type; + uint32_t len; + uint32_t rid; + uint32_t oid; + uint32_t infobuflen; + uint32_t infobufoffset; + uint32_t devicevchdl; +}; + +#define RNDIS_SET_REQ_INFOBUFOFFSET \ + (sizeof(struct rndis_set_req) - \ + offsetof(struct rndis_set_req, rid)) + +struct rndis_set_comp { + uint32_t type; + uint32_t len; + uint32_t rid; + uint32_t status; +}; + +/* + * Parameter used by OID_GEN_RNDIS_CONFIG_PARAMETER. + */ +#define RNDIS_SET_PARAM_NUMERIC 0x00000000 +#define RNDIS_SET_PARAM_STRING 0x00000002 + +struct rndis_set_parameter { + uint32_t nameoffset; + uint32_t namelen; + uint32_t type; + uint32_t valueoffset; + uint32_t valuelen; +}; + +/* Perform a soft reset on the device. */ +#define RNDIS_RESET_MSG 0x00000006 +#define RNDIS_RESET_CMPLT 0x80000006 + +struct rndis_reset_req { + uint32_t type; + uint32_t len; + uint32_t rid; +}; + +struct rndis_reset_comp { + uint32_t type; + uint32_t len; + uint32_t status; + uint32_t adrreset; +}; + +/* 802.3 link-state or undefined message error. Sent by device. */ +#define RNDIS_INDICATE_STATUS_MSG 0x00000007 + +struct rndis_status_msg { + uint32_t type; + uint32_t len; + uint32_t status; + uint32_t stbuflen; + uint32_t stbufoffset; + /* rndis_diag_info */ +}; + +/* stbuf offset from the beginning of rndis_status_msg. */ +#define RNDIS_STBUFOFFSET_ABS(ofs) \ + ((ofs) + offsetof(struct rndis_status_msg, status)) + +/* + * Immediately after rndis_status_msg.stbufoffset, if a control + * message is malformatted, or a packet message contains inappropriate + * content. + */ +struct rndis_diag_info { + uint32_t diagstatus; + uint32_t erroffset; +}; + +/* Keepalive message. May be sent by device. */ +#define RNDIS_KEEPALIVE_MSG 0x00000008 +#define RNDIS_KEEPALIVE_CMPLT 0x80000008 + +struct rndis_keepalive_req { + uint32_t type; + uint32_t len; + uint32_t rid; +}; + +struct rndis_keepalive_comp { + uint32_t type; + uint32_t len; + uint32_t rid; + uint32_t status; +}; + +/* Packet filter bits used by OID_GEN_CURRENT_PACKET_FILTER */ +#define NDIS_PACKET_TYPE_NONE 0x00000000 +#define NDIS_PACKET_TYPE_DIRECTED 0x00000001 +#define NDIS_PACKET_TYPE_MULTICAST 0x00000002 +#define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004 +#define NDIS_PACKET_TYPE_BROADCAST 0x00000008 +#define NDIS_PACKET_TYPE_SOURCE_ROUTING 0x00000010 +#define NDIS_PACKET_TYPE_PROMISCUOUS 0x00000020 +#define NDIS_PACKET_TYPE_SMT 0x00000040 +#define NDIS_PACKET_TYPE_ALL_LOCAL 0x00000080 +#define NDIS_PACKET_TYPE_GROUP 0x00001000 +#define NDIS_PACKET_TYPE_ALL_FUNCTIONAL 0x00002000 +#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00004000 +#define NDIS_PACKET_TYPE_MAC_FRAME 0x00008000 + +#endif /* !_NET_RNDIS_H_ */ diff --git a/drivers/net/netvsc/rte_pmd_netvsc_version.map b/drivers/net/netvsc/rte_pmd_netvsc_version.map new file mode 100644 index 00000000..d534019a --- /dev/null +++ b/drivers/net/netvsc/rte_pmd_netvsc_version.map @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ + +DPDK_18.08 { + local: *; +}; diff --git a/drivers/net/nfp/Makefile b/drivers/net/nfp/Makefile index aa3b68a4..ab4e0a7d 100644 --- a/drivers/net/nfp/Makefile +++ b/drivers/net/nfp/Makefile @@ -20,11 +20,24 @@ EXPORT_MAP := rte_pmd_nfp_version.map LIBABIVER := 1 +VPATH += $(SRCDIR)/nfpcore + +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_cppcore.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_cpp_pcie_ops.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_mutex.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_resource.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_crc.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_mip.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nffw.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_hwinfo.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_rtsym.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nsp.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nsp_cmds.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nsp_eth.c + # # all source are stored in SRCS-y # SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_net.c -SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nfpu.c -SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nspu.c include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/nfp/meson.build b/drivers/net/nfp/meson.build new file mode 100644 index 00000000..3ba37e27 --- /dev/null +++ b/drivers/net/nfp/meson.build @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +sources = files('nfpcore/nfp_cpp_pcie_ops.c', + 'nfpcore/nfp_nsp.c', + 'nfpcore/nfp_cppcore.c', + 'nfpcore/nfp_resource.c', + 'nfpcore/nfp_mip.c', + 'nfpcore/nfp_nffw.c', + 'nfpcore/nfp_rtsym.c', + 'nfpcore/nfp_nsp_cmds.c', + 'nfpcore/nfp_crc.c', + 'nfpcore/nfp_mutex.c', + 'nfpcore/nfp_nsp_eth.c', + 'nfpcore/nfp_hwinfo.c', + 'nfp_net.c') diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index e5bfde62..6e5e305f 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2015 Netronome Systems, Inc. + * Copyright (c) 2014-2018 Netronome Systems, Inc. * All rights reserved. * * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. @@ -55,7 +55,13 @@ #include #include -#include "nfp_nfpu.h" +#include "nfpcore/nfp_cpp.h" +#include "nfpcore/nfp_nffw.h" +#include "nfpcore/nfp_hwinfo.h" +#include "nfpcore/nfp_mip.h" +#include "nfpcore/nfp_rtsym.h" +#include "nfpcore/nfp_nsp.h" + #include "nfp_net_pmd.h" #include "nfp_net_logs.h" #include "nfp_net_ctrl.h" @@ -103,13 +109,11 @@ static int nfp_net_rss_reta_write(struct rte_eth_dev *dev, uint16_t reta_size); static int nfp_net_rss_hash_write(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); +static int nfp_set_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *mac_addr); -/* - * The offset of the queue controller queues in the PCIe Target. These - * happen to be at the same offset on the NFP6000 and the NFP3200 so - * we use a single macro here. - */ -#define NFP_PCIE_QUEUE(_q) (0x800 * ((_q) & 0xff)) +/* The offset of the queue controller queues in the PCIe Target */ +#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff))) /* Maximum value which can be added to a queue with one transaction */ #define NFP_QCP_MAX_ADD 0x7f @@ -213,57 +217,6 @@ nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val) nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off); } -/* - * Atomically reads link status information from global structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -nfp_net_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &dev->data->dev_link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/* - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -nfp_net_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &dev->data->dev_link; - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - static void nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq) { @@ -310,7 +263,7 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq) for (i = 0; i < txq->tx_count; i++) { if (txq->txbufs[i].mbuf) { - rte_pktmbuf_free(txq->txbufs[i].mbuf); + rte_pktmbuf_free_seg(txq->txbufs[i].mbuf); txq->txbufs[i].mbuf = NULL; } } @@ -343,7 +296,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update) uint32_t new; struct timespec wait; - PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...\n", + PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...", hw->qcp_cfg); if (hw->qcp_cfg == NULL) @@ -354,7 +307,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update) wait.tv_sec = 0; wait.tv_nsec = 1000000; - PMD_DRV_LOG(DEBUG, "Polling for update ack...\n"); + PMD_DRV_LOG(DEBUG, "Polling for update ack..."); /* Poll update field, waiting for NFP to ack the config */ for (cnt = 0; ; cnt++) { @@ -372,7 +325,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update) } nanosleep(&wait, 0); /* waiting for a 1ms */ } - PMD_DRV_LOG(DEBUG, "Ack DONE\n"); + PMD_DRV_LOG(DEBUG, "Ack DONE"); return 0; } @@ -390,7 +343,7 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update) { uint32_t err; - PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n", + PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x", ctrl, update); rte_spinlock_lock(&hw->reconfig_lock); @@ -427,8 +380,6 @@ nfp_net_configure(struct rte_eth_dev *dev) struct rte_eth_conf *dev_conf; struct rte_eth_rxmode *rxmode; struct rte_eth_txmode *txmode; - uint32_t new_ctrl = 0; - uint32_t update = 0; struct nfp_net_hw *hw; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -454,96 +405,17 @@ nfp_net_configure(struct rte_eth_dev *dev) } /* Checking RX mode */ - if (rxmode->mq_mode & ETH_MQ_RX_RSS) { - if (hw->cap & NFP_NET_CFG_CTRL_RSS) { - update = NFP_NET_CFG_UPDATE_RSS; - new_ctrl = NFP_NET_CFG_CTRL_RSS; - } else { - PMD_INIT_LOG(INFO, "RSS not supported"); - return -EINVAL; - } - } - - if (rxmode->split_hdr_size) { - PMD_INIT_LOG(INFO, "rxmode does not support split header"); - return -EINVAL; - } - - if (rxmode->hw_ip_checksum) { - if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) { - new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM; - } else { - PMD_INIT_LOG(INFO, "RXCSUM not supported"); - return -EINVAL; - } - } - - if (rxmode->hw_vlan_filter) { - PMD_INIT_LOG(INFO, "VLAN filter not supported"); - return -EINVAL; - } - - if (rxmode->hw_vlan_strip) { - if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) { - new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN; - } else { - PMD_INIT_LOG(INFO, "hw vlan strip not supported"); - return -EINVAL; - } - } - - if (rxmode->hw_vlan_extend) { - PMD_INIT_LOG(INFO, "VLAN extended not supported"); - return -EINVAL; - } - - if (rxmode->jumbo_frame) - hw->mtu = rxmode->max_rx_pkt_len; - - if (!rxmode->hw_strip_crc) - PMD_INIT_LOG(INFO, "HW does strip CRC and it is not configurable"); - - if (rxmode->enable_scatter) { - PMD_INIT_LOG(INFO, "Scatter not supported"); + if (rxmode->mq_mode & ETH_MQ_RX_RSS && + !(hw->cap & NFP_NET_CFG_CTRL_RSS)) { + PMD_INIT_LOG(INFO, "RSS not supported"); return -EINVAL; } - /* If next capabilities are supported, configure them by default */ - - /* VLAN insertion */ - if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN) - new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN; - - /* L2 broadcast */ - if (hw->cap & NFP_NET_CFG_CTRL_L2BC) - new_ctrl |= NFP_NET_CFG_CTRL_L2BC; - - /* L2 multicast */ - if (hw->cap & NFP_NET_CFG_CTRL_L2MC) - new_ctrl |= NFP_NET_CFG_CTRL_L2MC; - - /* TX checksum offload */ - if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM) - new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM; - - /* LSO offload */ - if (hw->cap & NFP_NET_CFG_CTRL_LSO) - new_ctrl |= NFP_NET_CFG_CTRL_LSO; - - /* RX gather */ - if (hw->cap & NFP_NET_CFG_CTRL_GATHER) - new_ctrl |= NFP_NET_CFG_CTRL_GATHER; - - if (!new_ctrl) - return 0; - - update |= NFP_NET_CFG_UPDATE_GEN; - - nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl); - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) - return -EIO; - - hw->ctrl = new_ctrl; + /* KEEP_CRC offload flag is not supported by PMD + * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed + */ + if (rte_eth_dev_must_keep_crc(rxmode->offloads)) + PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!"); return 0; } @@ -625,47 +497,29 @@ nfp_net_cfg_queue_setup(struct nfp_net_hw *hw) #define ETH_ADDR_LEN 6 static void -nfp_eth_copy_mac_reverse(uint8_t *dst, const uint8_t *src) +nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src) { int i; for (i = 0; i < ETH_ADDR_LEN; i++) - dst[ETH_ADDR_LEN - i - 1] = src[i]; + dst[i] = src[i]; } static int nfp_net_pf_read_mac(struct nfp_net_hw *hw, int port) { - union eth_table_entry *entry; - int idx, i; - - idx = port; - entry = hw->eth_table; - - /* Reading NFP ethernet table obtained before */ - for (i = 0; i < NSP_ETH_MAX_COUNT; i++) { - if (!(entry->port & NSP_ETH_PORT_LANES_MASK)) { - /* port not in use */ - entry++; - continue; - } - if (idx == 0) - break; - idx--; - entry++; - } - - if (i == NSP_ETH_MAX_COUNT) - return -EINVAL; + struct nfp_eth_table *nfp_eth_table; + nfp_eth_table = nfp_eth_read_ports(hw->cpp); /* * hw points to port0 private data. We need hw now pointing to * right port. */ hw += port; - nfp_eth_copy_mac_reverse((uint8_t *)&hw->mac_addr, - (uint8_t *)&entry->mac_addr); + nfp_eth_copy_mac((uint8_t *)&hw->mac_addr, + (uint8_t *)&nfp_eth_table->ports[port].mac_addr); + free(nfp_eth_table); return 0; } @@ -675,7 +529,7 @@ nfp_net_vf_read_mac(struct nfp_net_hw *hw) uint32_t tmp; tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR)); - memcpy(&hw->mac_addr[0], &tmp, sizeof(struct ether_addr)); + memcpy(&hw->mac_addr[0], &tmp, 4); tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4)); memcpy(&hw->mac_addr[4], &tmp, 2); @@ -695,6 +549,37 @@ nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac) hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6); } +int +nfp_set_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr) +{ + struct nfp_net_hw *hw; + uint32_t update, ctrl; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) && + !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) { + PMD_INIT_LOG(INFO, "MAC address unable to change when" + " port enabled"); + return -EBUSY; + } + + if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) && + !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) + return -EBUSY; + + /* Writing new MAC to the specific port BAR address */ + nfp_net_write_mac(hw, (uint8_t *)mac_addr); + + /* Signal the NIC about the change */ + update = NFP_NET_CFG_UPDATE_MACADDR; + ctrl = hw->ctrl | NFP_NET_CFG_CTRL_LIVE_ADDR; + if (nfp_net_reconfig(hw, ctrl, update) < 0) { + PMD_INIT_LOG(INFO, "MAC address update failed"); + return -EIO; + } + return 0; +} + static int nfp_configure_rx_interrupt(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle) @@ -729,7 +614,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev, */ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1); intr_handle->intr_vec[i] = i + 1; - PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d\n", i, + PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i, intr_handle->intr_vec[i]); } } @@ -739,15 +624,75 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev, return 0; } +static uint32_t +nfp_check_offloads(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + struct rte_eth_conf *dev_conf; + struct rte_eth_rxmode *rxmode; + struct rte_eth_txmode *txmode; + uint32_t ctrl = 0; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_conf = &dev->data->dev_conf; + rxmode = &dev_conf->rxmode; + txmode = &dev_conf->txmode; + + if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) { + if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) + ctrl |= NFP_NET_CFG_CTRL_RXCSUM; + } + + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) + ctrl |= NFP_NET_CFG_CTRL_RXVLAN; + } + + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) + hw->mtu = rxmode->max_rx_pkt_len; + + if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) + ctrl |= NFP_NET_CFG_CTRL_TXVLAN; + + /* L2 broadcast */ + if (hw->cap & NFP_NET_CFG_CTRL_L2BC) + ctrl |= NFP_NET_CFG_CTRL_L2BC; + + /* L2 multicast */ + if (hw->cap & NFP_NET_CFG_CTRL_L2MC) + ctrl |= NFP_NET_CFG_CTRL_L2MC; + + /* TX checksum offload */ + if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM || + txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM || + txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) + ctrl |= NFP_NET_CFG_CTRL_TXCSUM; + + /* LSO offload */ + if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) { + if (hw->cap & NFP_NET_CFG_CTRL_LSO) + ctrl |= NFP_NET_CFG_CTRL_LSO; + else + ctrl |= NFP_NET_CFG_CTRL_LSO2; + } + + /* RX gather */ + if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) + ctrl |= NFP_NET_CFG_CTRL_GATHER; + + return ctrl; +} + static int nfp_net_start(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - struct rte_eth_conf *dev_conf; - struct rte_eth_rxmode *rxmode; uint32_t new_ctrl, update = 0; struct nfp_net_hw *hw; + struct rte_eth_conf *dev_conf; + struct rte_eth_rxmode *rxmode; uint32_t intr_vector; int ret; @@ -758,9 +703,6 @@ nfp_net_start(struct rte_eth_dev *dev) /* Disabling queues just in case... */ nfp_net_disable_queues(dev); - /* Writing configuration parameters in the device */ - nfp_net_params_setup(hw); - /* Enabling the required queues in the device */ nfp_net_enable_queues(dev); @@ -795,21 +737,22 @@ nfp_net_start(struct rte_eth_dev *dev) rte_intr_enable(intr_handle); + new_ctrl = nfp_check_offloads(dev); + + /* Writing configuration parameters in the device */ + nfp_net_params_setup(hw); + dev_conf = &dev->data->dev_conf; rxmode = &dev_conf->rxmode; - /* Checking RX mode */ if (rxmode->mq_mode & ETH_MQ_RX_RSS) { - if (hw->cap & NFP_NET_CFG_CTRL_RSS) { - if (!nfp_net_rss_config_default(dev)) - update |= NFP_NET_CFG_UPDATE_RSS; - } else { - PMD_INIT_LOG(INFO, "RSS not supported"); - return -EINVAL; - } + nfp_net_rss_config_default(dev); + update |= NFP_NET_CFG_UPDATE_RSS; + new_ctrl |= NFP_NET_CFG_CTRL_RSS; } + /* Enable device */ - new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE; + new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; @@ -831,7 +774,7 @@ nfp_net_start(struct rte_eth_dev *dev) if (hw->is_pf) /* Configure the physical port up */ - nfp_nsp_eth_config(hw->nspu_desc, hw->pf_port_idx, 1); + nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 1); hw->ctrl = new_ctrl; @@ -882,7 +825,7 @@ nfp_net_stop(struct rte_eth_dev *dev) if (hw->is_pf) /* Configure the physical port down */ - nfp_nsp_eth_config(hw->nspu_desc, hw->pf_port_idx, 0); + nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 0); } /* Reset and stop device. The device can not be restarted. */ @@ -936,7 +879,7 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) uint32_t new_ctrl, update = 0; struct nfp_net_hw *hw; - PMD_DRV_LOG(DEBUG, "Promiscuous mode enable\n"); + PMD_DRV_LOG(DEBUG, "Promiscuous mode enable"); hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -946,7 +889,7 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) } if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) { - PMD_DRV_LOG(INFO, "Promiscuous mode already enabled\n"); + PMD_DRV_LOG(INFO, "Promiscuous mode already enabled"); return; } @@ -972,7 +915,7 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev) hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) { - PMD_DRV_LOG(INFO, "Promiscuous mode already disabled\n"); + PMD_DRV_LOG(INFO, "Promiscuous mode already disabled"); return; } @@ -999,8 +942,9 @@ static int nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) { struct nfp_net_hw *hw; - struct rte_eth_link link, old; + struct rte_eth_link link; uint32_t nn_link_status; + int ret; static const uint32_t ls_to_ethtool[] = { [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE, @@ -1013,13 +957,10 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) [NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G, }; - PMD_DRV_LOG(DEBUG, "Link update\n"); + PMD_DRV_LOG(DEBUG, "Link update"); hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - memset(&old, 0, sizeof(old)); - nfp_net_dev_atomic_read_link_status(dev, &old); - nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS); memset(&link, 0, sizeof(struct rte_eth_link)); @@ -1037,16 +978,14 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) else link.link_speed = ls_to_ethtool[nn_link_status]; - if (old.link_status != link.link_status) { - nfp_net_dev_atomic_write_link_status(dev, &link); + ret = rte_eth_linkstatus_set(dev, &link); + if (ret == 0) { if (link.link_status) - PMD_DRV_LOG(INFO, "NIC Link is Up\n"); + PMD_DRV_LOG(INFO, "NIC Link is Up"); else - PMD_DRV_LOG(INFO, "NIC Link is Down\n"); - return 0; + PMD_DRV_LOG(INFO, "NIC Link is Down"); } - - return -1; + return ret; } static int @@ -1214,7 +1153,6 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues; dev_info->min_rx_bufsize = ETHER_MIN_MTU; @@ -1230,6 +1168,9 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_KEEP_CRC; + if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN) dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; @@ -1238,6 +1179,12 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM; + if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; + + if (hw->cap & NFP_NET_CFG_CTRL_GATHER) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS; + dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { .pthresh = DEFAULT_RX_PTHRESH, @@ -1256,8 +1203,6 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .tx_free_thresh = DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, }; dev_info->flow_type_rss_offloads = ETH_RSS_NONFRAG_IPV4_TCP | @@ -1268,12 +1213,9 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ; dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ; - dev_info->speed_capa = ETH_SPEED_NUM_1G | ETH_LINK_SPEED_10G | - ETH_SPEED_NUM_25G | ETH_SPEED_NUM_40G | - ETH_SPEED_NUM_50G | ETH_LINK_SPEED_100G; - - if (hw->cap & NFP_NET_CFG_CTRL_LSO) - dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G; } static const uint32_t * @@ -1376,18 +1318,17 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_eth_link link; - memset(&link, 0, sizeof(link)); - nfp_net_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); if (link.link_status) - RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n", - dev->data->port_id, link.link_speed, - link.link_duplex == ETH_LINK_FULL_DUPLEX - ? "full-duplex" : "half-duplex"); + PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", + dev->data->port_id, link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX + ? "full-duplex" : "half-duplex"); else - RTE_LOG(INFO, PMD, " Port %d: Link Down\n", - dev->data->port_id); + PMD_DRV_LOG(INFO, " Port %d: Link Down", + dev->data->port_id); - RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n", + PMD_DRV_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d", pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function); } @@ -1428,11 +1369,9 @@ nfp_net_dev_interrupt_handler(void *param) struct rte_eth_link link; struct rte_eth_dev *dev = (struct rte_eth_dev *)param; - PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!\n"); + PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!"); - /* get the link status */ - memset(&link, 0, sizeof(link)); - nfp_net_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); nfp_net_link_update(dev, 0); @@ -1449,7 +1388,7 @@ nfp_net_dev_interrupt_handler(void *param) if (rte_eal_alarm_set(timeout * 1000, nfp_net_dev_interrupt_delayed_handler, (void *)dev) < 0) { - RTE_LOG(ERR, PMD, "Error setting alarm"); + PMD_INIT_LOG(ERR, "Error setting alarm"); /* Unmasking */ nfp_net_irq_unmask(dev); } @@ -1500,9 +1439,9 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) /* switch to jumbo mode if needed */ if ((uint32_t)mtu > ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.jumbo_frame = 1; + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; /* update max frame size */ dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu; @@ -1534,7 +1473,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 || (nb_desc > NFP_NET_MAX_RX_DESC) || (nb_desc < NFP_NET_MIN_RX_DESC)) { - RTE_LOG(ERR, PMD, "Wrong nb_desc value\n"); + PMD_DRV_LOG(ERR, "Wrong nb_desc value"); return -EINVAL; } @@ -1572,8 +1511,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, rxq->rx_count = nb_desc; rxq->port_id = dev->data->port_id; rxq->rx_free_thresh = rx_conf->rx_free_thresh; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 - : ETHER_CRC_LEN); rxq->drop_en = rx_conf->rx_drop_en; /* @@ -1587,7 +1524,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, socket_id); if (tz == NULL) { - RTE_LOG(ERR, PMD, "Error allocatig rx dma\n"); + PMD_DRV_LOG(ERR, "Error allocatig rx dma"); nfp_net_rx_queue_release(rxq); return -ENOMEM; } @@ -1605,7 +1542,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } - PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n", + PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64, rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma); nfp_net_reset_rx_queue(rxq); @@ -1630,7 +1567,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq) uint64_t dma_addr; unsigned i; - PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors\n", + PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors", rxq->rx_count); for (i = 0; i < rxq->rx_count; i++) { @@ -1638,7 +1575,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq) struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool); if (mbuf == NULL) { - RTE_LOG(ERR, PMD, "RX mbuf alloc failed queue_id=%u\n", + PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id=%u", (unsigned)rxq->qidx); return -ENOMEM; } @@ -1650,14 +1587,14 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq) rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff; rxd->fld.dma_addr_lo = dma_addr & 0xffffffff; rxe[i].mbuf = mbuf; - PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64 "\n", i, dma_addr); + PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64, i, dma_addr); } /* Make sure all writes are flushed before telling the hardware */ rte_wmb(); /* Not advertising the whole ring as the firmware gets confused if so */ - PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u\n", + PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u", rxq->rx_count - 1); nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1); @@ -1683,7 +1620,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 || (nb_desc > NFP_NET_MAX_TX_DESC) || (nb_desc < NFP_NET_MIN_TX_DESC)) { - RTE_LOG(ERR, PMD, "Wrong nb_desc value\n"); + PMD_DRV_LOG(ERR, "Wrong nb_desc value"); return -EINVAL; } @@ -1692,10 +1629,10 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, DEFAULT_TX_FREE_THRESH); if (tx_free_thresh > (nb_desc)) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the number of TX " "descriptors. (tx_free_thresh=%u port=%d " - "queue=%d)\n", (unsigned int)tx_free_thresh, + "queue=%d)", (unsigned int)tx_free_thresh, dev->data->port_id, (int)queue_idx); return -(EINVAL); } @@ -1705,7 +1642,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, * calling nfp_net_stop */ if (dev->data->tx_queues[queue_idx]) { - PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d\n", + PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", queue_idx); nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]); dev->data->tx_queues[queue_idx] = NULL; @@ -1715,7 +1652,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq), RTE_CACHE_LINE_SIZE, socket_id); if (txq == NULL) { - RTE_LOG(ERR, PMD, "Error allocating tx dma\n"); + PMD_DRV_LOG(ERR, "Error allocating tx dma"); return -ENOMEM; } @@ -1729,7 +1666,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN, socket_id); if (tz == NULL) { - RTE_LOG(ERR, PMD, "Error allocating tx dma\n"); + PMD_DRV_LOG(ERR, "Error allocating tx dma"); nfp_net_tx_queue_release(txq); return -ENOMEM; } @@ -1746,7 +1683,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx); txq->port_id = dev->data->port_id; - txq->txq_flags = tx_conf->txq_flags; /* Saving physical and virtual addresses for the TX ring */ txq->dma = (uint64_t)tz->iova; @@ -1760,7 +1696,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, nfp_net_tx_queue_release(txq); return -ENOMEM; } - PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n", + PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64, txq->txbufs, txq->txds, (unsigned long int)txq->dma); nfp_net_reset_tx_queue(txq); @@ -1786,7 +1722,7 @@ nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd, uint64_t ol_flags; struct nfp_net_hw *hw = txq->hw; - if (!(hw->cap & NFP_NET_CFG_CTRL_LSO)) + if (!(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)) goto clean_txd; ol_flags = mb->ol_flags; @@ -1794,15 +1730,19 @@ nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd, if (!(ol_flags & PKT_TX_TCP_SEG)) goto clean_txd; - txd->l4_offset = mb->l2_len + mb->l3_len + mb->l4_len; - txd->lso = rte_cpu_to_le_16(mb->tso_segsz); + txd->l3_offset = mb->l2_len; + txd->l4_offset = mb->l2_len + mb->l3_len; + txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len; + txd->mss = rte_cpu_to_le_16(mb->tso_segsz); txd->flags = PCIE_DESC_TX_LSO; return; clean_txd: txd->flags = 0; + txd->l3_offset = 0; txd->l4_offset = 0; - txd->lso = 0; + txd->lso_hdrlen = 0; + txd->mss = 0; } /* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */ @@ -1888,14 +1828,10 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) return; - if (NFD_CFG_MAJOR_VERSION_of(hw->ver) <= 3) { - if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS)) - return; - - hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET); - hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET); - - } else if (NFP_DESC_META_LEN(rxd)) { + /* this is true for new firmwares */ + if (likely(((hw->cap & NFP_NET_CFG_CTRL_RSS2) || + (NFD_CFG_MAJOR_VERSION_of(hw->ver) == 4)) && + NFP_DESC_META_LEN(rxd))) { /* * new metadata api: * <---- 32 bit -----> @@ -1928,7 +1864,11 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, return; } } else { - return; + if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS)) + return; + + hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET); + hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET); } mbuf->hash.rss = hash; @@ -2019,16 +1959,16 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) break; } + rxds = &rxq->rxds[rxq->rd_p]; + if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0) + break; + /* * Memory barrier to ensure that we won't do other * reads before the DD bit. */ rte_rmb(); - rxds = &rxq->rxds[rxq->rd_p]; - if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0) - break; - /* * We got a packet. Let's alloc a new mbuff for refilling the * free descriptor ring as soon as possible @@ -2051,7 +1991,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) mb = rxb->mbuf; rxb->mbuf = new_mb; - PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u\n", + PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u", rxds->rxd.data_len, rxq->mbuf_size); /* Size of this segment */ @@ -2089,6 +2029,8 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) mb->nb_segs = 1; mb->next = NULL; + mb->port = rxq->port_id; + /* Checking the RSS flag */ nfp_net_set_hash(rxq, rxds, mb); @@ -2120,7 +2062,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if (nb_hold == 0) return nb_hold; - PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received\n", + PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received", rxq->port_id, (unsigned int)rxq->qidx, nb_hold); nb_hold += rxq->nb_rx_hold; @@ -2131,7 +2073,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) */ rte_wmb(); if (nb_hold > rxq->rx_free_thresh) { - PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u\n", + PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u", rxq->port_id, (unsigned int)rxq->qidx, (unsigned)nb_hold, (unsigned)avail); nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold); @@ -2155,14 +2097,14 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq) int todo; PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete" - " status\n", txq->qidx); + " status", txq->qidx); /* Work out how many packets have been sent */ qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR); if (qcp_rd_p == txq->rd_p) { PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending " - "packets (%u, %u)\n", txq->qidx, + "packets (%u, %u)", txq->qidx, qcp_rd_p, txq->rd_p); return 0; } @@ -2172,7 +2114,7 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq) else todo = qcp_rd_p + txq->tx_count - txq->rd_p; - PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u\n", + PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u", qcp_rd_p, txq->rd_p, txq->rd_p); if (todo == 0) @@ -2226,7 +2168,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) hw = txq->hw; txds = &txq->txds[txq->wr_p]; - PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets\n", + PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets", txq->qidx, txq->wr_p, nb_pkts); if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq))) @@ -2240,7 +2182,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) i = 0; issued_descs = 0; - PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets\n", + PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets", txq->qidx, nb_pkts); /* Sending packets */ while ((i < nb_pkts) && free_descs) { @@ -2299,7 +2241,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) dma_size = pkt->data_len; dma_addr = rte_mbuf_data_iova(pkt); PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:" - "%" PRIx64 "\n", dma_addr); + "%" PRIx64 "", dma_addr); /* Filling descriptors fields */ txds->dma_len = dma_size; @@ -2314,11 +2256,15 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) txq->wr_p = 0; pkt_size -= dma_size; - if (!pkt_size) - /* End of packet */ - txds->offset_eop |= PCIE_DESC_TX_EOP; + + /* + * Making the EOP, packets with just one segment + * the priority + */ + if (likely(!pkt_size)) + txds->offset_eop = PCIE_DESC_TX_EOP; else - txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK; + txds->offset_eop = 0; pkt = pkt->next; /* Referencing next free TX descriptor */ @@ -2349,7 +2295,7 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask) if ((mask & ETH_VLAN_FILTER_OFFLOAD) || (mask & ETH_VLAN_EXTEND_OFFLOAD)) - RTE_LOG(INFO, PMD, "No support for ETH_VLAN_FILTER_OFFLOAD or" + PMD_DRV_LOG(INFO, "No support for ETH_VLAN_FILTER_OFFLOAD or" " ETH_VLAN_EXTEND_OFFLOAD"); /* Enable vlan strip if it is not configured yet */ @@ -2386,9 +2332,9 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev, NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) { - RTE_LOG(ERR, PMD, "The size of hash lookup table configured " + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); + "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); return -EINVAL; } @@ -2467,9 +2413,9 @@ nfp_net_reta_query(struct rte_eth_dev *dev, return -EINVAL; if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) { - RTE_LOG(ERR, PMD, "The size of hash lookup table configured " + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); + "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); return -EINVAL; } @@ -2555,14 +2501,14 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev, /* Checking if RSS is enabled */ if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) { if (rss_hf != 0) { /* Enable RSS? */ - RTE_LOG(ERR, PMD, "RSS unsupported\n"); + PMD_DRV_LOG(ERR, "RSS unsupported"); return -EINVAL; } return 0; /* Nothing to do */ } if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) { - RTE_LOG(ERR, PMD, "hash key too long\n"); + PMD_DRV_LOG(ERR, "hash key too long"); return -EINVAL; } @@ -2634,7 +2580,7 @@ nfp_net_rss_config_default(struct rte_eth_dev *dev) uint16_t queue; int i, j, ret; - RTE_LOG(INFO, PMD, "setting default RSS conf for %u queues\n", + PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues", rx_queues); nfp_reta_conf[0].mask = ~0x0; @@ -2654,7 +2600,7 @@ nfp_net_rss_config_default(struct rte_eth_dev *dev) dev_conf = &dev->data->dev_conf; if (!dev_conf) { - RTE_LOG(INFO, PMD, "wrong rss conf"); + PMD_DRV_LOG(INFO, "wrong rss conf"); return -EINVAL; } rss_conf = dev_conf->rx_adv_conf.rss_conf; @@ -2679,6 +2625,7 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = { .dev_infos_get = nfp_net_infos_get, .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, .mtu_set = nfp_net_dev_mtu_set, + .mac_addr_set = nfp_set_mac_addr, .vlan_offload_set = nfp_net_vlan_offload_set, .reta_update = nfp_net_reta_update, .reta_query = nfp_net_reta_query, @@ -2734,10 +2681,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) uint64_t tx_bar_off = 0, rx_bar_off = 0; uint32_t start_q; int stride = 4; - - nspu_desc_t *nspu_desc = NULL; - uint64_t bar_offset; int port = 0; + int err; PMD_INIT_FUNC_TRACE(); @@ -2747,18 +2692,17 @@ nfp_net_init(struct rte_eth_dev *eth_dev) (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) { port = get_pf_port_number(eth_dev->data->name); if (port < 0 || port > 7) { - RTE_LOG(ERR, PMD, "Port value is wrong\n"); + PMD_DRV_LOG(ERR, "Port value is wrong"); return -ENODEV; } - PMD_INIT_LOG(DEBUG, "Working with PF port value %d\n", port); + PMD_INIT_LOG(DEBUG, "Working with PF port value %d", port); /* This points to port 0 private data */ hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); /* This points to the specific port private data */ hw = &hwport0[port]; - hw->pf_port_idx = port; } else { hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); hwport0 = 0; @@ -2786,26 +2730,21 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr; if (hw->ctrl_bar == NULL) { - RTE_LOG(ERR, PMD, - "hw->ctrl_bar is NULL. BAR0 not configured\n"); + PMD_DRV_LOG(ERR, + "hw->ctrl_bar is NULL. BAR0 not configured"); return -ENODEV; } if (hw->is_pf && port == 0) { - nspu_desc = hw->nspu_desc; - - if (nfp_nsp_map_ctrl_bar(nspu_desc, &bar_offset) != 0) { - /* - * A firmware should be there after PF probe so this - * should not happen. - */ - RTE_LOG(ERR, PMD, "PF BAR symbol resolution failed\n"); - return -ENODEV; + hw->ctrl_bar = nfp_rtsym_map(hw->sym_tbl, "_pf0_net_bar0", + hw->total_ports * 32768, + &hw->ctrl_area); + if (!hw->ctrl_bar) { + printf("nfp_rtsym_map fails for _pf0_net_ctrl_bar"); + return -EIO; } - /* vNIC PF control BAR is a subset of PF PCI device BAR */ - hw->ctrl_bar += bar_offset; - PMD_INIT_LOG(DEBUG, "ctrl bar: %p\n", hw->ctrl_bar); + PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); } if (port > 0) { @@ -2817,7 +2756,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) (port * NFP_PF_CSR_SLICE_SIZE); } - PMD_INIT_LOG(DEBUG, "ctrl bar: %p\n", hw->ctrl_bar); + PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS); hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS); @@ -2828,31 +2767,34 @@ nfp_net_init(struct rte_eth_dev *eth_dev) case PCI_DEVICE_ID_NFP6000_PF_NIC: case PCI_DEVICE_ID_NFP6000_VF_NIC: start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); - tx_bar_off = NFP_PCIE_QUEUE(start_q); + tx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ; start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); - rx_bar_off = NFP_PCIE_QUEUE(start_q); + rx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ; break; default: - RTE_LOG(ERR, PMD, "nfp_net: no device ID matching\n"); - return -ENODEV; + PMD_DRV_LOG(ERR, "nfp_net: no device ID matching"); + err = -ENODEV; + goto dev_err_ctrl_map; } - PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "\n", tx_bar_off); - PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "\n", rx_bar_off); + PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off); + PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off); if (hw->is_pf && port == 0) { /* configure access to tx/rx vNIC BARs */ - nfp_nsp_map_queues_bar(nspu_desc, &bar_offset); - PMD_INIT_LOG(DEBUG, "tx/rx bar_offset: %" PRIx64 "\n", - bar_offset); - hwport0->hw_queues = (uint8_t *)pci_dev->mem_resource[0].addr; - - /* vNIC PF tx/rx BARs are a subset of PF PCI device */ - hwport0->hw_queues += bar_offset; + hwport0->hw_queues = nfp_cpp_map_area(hw->cpp, 0, 0, + NFP_PCIE_QUEUE(0), + NFP_QCP_QUEUE_AREA_SZ, + &hw->hwqueues_area); + + if (!hwport0->hw_queues) { + printf("nfp_rtsym_map fails for net.qc"); + err = -EIO; + goto dev_err_ctrl_map; + } - /* Lets seize the chance to read eth table from hw */ - if (nfp_nsp_eth_read_table(nspu_desc, &hw->eth_table)) - return -ENODEV; + PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p", + hwport0->hw_queues); } if (hw->is_pf) { @@ -2877,14 +2819,20 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); hw->mtu = ETHER_MTU; + /* VLAN insertion is incompatible with LSOv2 */ + if (hw->cap & NFP_NET_CFG_CTRL_LSO2) + hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN; + if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2) hw->rx_offset = NFP_NET_RX_OFFSET; else hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR); - PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d", - hw->ver, hw->max_mtu); - PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s", hw->cap, + PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d", + NFD_CFG_MAJOR_VERSION_of(hw->ver), + NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu); + + PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap, hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "", hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "", hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "", @@ -2894,8 +2842,11 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "", hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "", hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "", + hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "", hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "", - hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : ""); + hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "", + hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "", + hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : ""); hw->ctrl = 0; @@ -2912,7 +2863,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to space for MAC address"); - return -ENOMEM; + err = -ENOMEM; + goto dev_err_queues_map; } if (hw->is_pf) { @@ -2923,6 +2875,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) } if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) { + PMD_INIT_LOG(INFO, "Using random mac address for port %d", + port); /* Using random mac addresses for VFs */ eth_random_addr(&hw->mac_addr[0]); nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr); @@ -2951,11 +2905,19 @@ nfp_net_init(struct rte_eth_dev *eth_dev) nfp_net_stats_reset(eth_dev); return 0; + +dev_err_queues_map: + nfp_cpp_area_free(hw->hwqueues_area); +dev_err_ctrl_map: + nfp_cpp_area_free(hw->ctrl_area); + + return err; } static int nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports, - nfpu_desc_t *nfpu_desc, void **priv) + struct nfp_cpp *cpp, struct nfp_hwinfo *hwinfo, + int phys_port, struct nfp_rtsym_table *sym_tbl, void **priv) { struct rte_eth_dev *eth_dev; struct nfp_net_hw *hw; @@ -2993,12 +2955,16 @@ nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports, * Then dev_private is adjusted per port. */ hw = (struct nfp_net_hw *)(eth_dev->data->dev_private) + port; - hw->nspu_desc = nfpu_desc->nspu; - hw->nfpu_desc = nfpu_desc; + hw->cpp = cpp; + hw->hwinfo = hwinfo; + hw->sym_tbl = sym_tbl; + hw->pf_port_idx = phys_port; hw->is_pf = 1; if (ports > 1) hw->pf_multiport_enabled = 1; + hw->total_ports = ports; + eth_dev->device = &dev->device; rte_eth_copy_pci_info(eth_dev, dev); @@ -3006,82 +2972,228 @@ nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports, if (ret) rte_eth_dev_release_port(eth_dev); + else + rte_eth_dev_probing_finish(eth_dev); rte_free(port_name); return ret; } +#define DEFAULT_FW_PATH "/lib/firmware/netronome" + +static int +nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) +{ + struct nfp_cpp *cpp = nsp->cpp; + int fw_f; + char *fw_buf; + char fw_name[125]; + char serial[40]; + struct stat file_stat; + off_t fsize, bytes; + + /* Looking for firmware file in order of priority */ + + /* First try to find a firmware image specific for this device */ + sprintf(serial, "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", + cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3], + cpp->serial[4], cpp->serial[5], cpp->interface >> 8, + cpp->interface & 0xff); + + sprintf(fw_name, "%s/%s.nffw", DEFAULT_FW_PATH, serial); + + PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); + fw_f = open(fw_name, O_RDONLY); + if (fw_f > 0) + goto read_fw; + + /* Then try the PCI name */ + sprintf(fw_name, "%s/pci-%s.nffw", DEFAULT_FW_PATH, dev->device.name); + + PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); + fw_f = open(fw_name, O_RDONLY); + if (fw_f > 0) + goto read_fw; + + /* Finally try the card type and media */ + sprintf(fw_name, "%s/%s", DEFAULT_FW_PATH, card); + PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); + fw_f = open(fw_name, O_RDONLY); + if (fw_f < 0) { + PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name); + return -ENOENT; + } + +read_fw: + if (fstat(fw_f, &file_stat) < 0) { + PMD_DRV_LOG(INFO, "Firmware file %s size is unknown", fw_name); + close(fw_f); + return -ENOENT; + } + + fsize = file_stat.st_size; + PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %" PRIu64 "", + fw_name, (uint64_t)fsize); + + fw_buf = malloc((size_t)fsize); + if (!fw_buf) { + PMD_DRV_LOG(INFO, "malloc failed for fw buffer"); + close(fw_f); + return -ENOMEM; + } + memset(fw_buf, 0, fsize); + + bytes = read(fw_f, fw_buf, fsize); + if (bytes != fsize) { + PMD_DRV_LOG(INFO, "Reading fw to buffer failed." + "Just %" PRIu64 " of %" PRIu64 " bytes read", + (uint64_t)bytes, (uint64_t)fsize); + free(fw_buf); + close(fw_f); + return -EIO; + } + + PMD_DRV_LOG(INFO, "Uploading the firmware ..."); + nfp_nsp_load_fw(nsp, fw_buf, bytes); + PMD_DRV_LOG(INFO, "Done"); + + free(fw_buf); + close(fw_f); + + return 0; +} + +static int +nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp, + struct nfp_eth_table *nfp_eth_table, struct nfp_hwinfo *hwinfo) +{ + struct nfp_nsp *nsp; + const char *nfp_fw_model; + char card_desc[100]; + int err = 0; + + nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno"); + + if (nfp_fw_model) { + PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model); + } else { + PMD_DRV_LOG(ERR, "firmware model NOT found"); + return -EIO; + } + + if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) { + PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u", + nfp_eth_table->count); + return -EIO; + } + + PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports", + nfp_eth_table->count); + + PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed); + + sprintf(card_desc, "nic_%s_%dx%d.nffw", nfp_fw_model, + nfp_eth_table->count, nfp_eth_table->ports[0].speed / 1000); + + nsp = nfp_nsp_open(cpp); + if (!nsp) { + PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle"); + return -EIO; + } + + nfp_nsp_device_soft_reset(nsp); + err = nfp_fw_upload(dev, nsp, card_desc); + + nfp_nsp_close(nsp); + return err; +} + static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *dev) { - nfpu_desc_t *nfpu_desc; - nspu_desc_t *nspu_desc; - uint64_t offset_symbol; - uint8_t *bar_offset; - int major, minor; + struct nfp_cpp *cpp; + struct nfp_hwinfo *hwinfo; + struct nfp_rtsym_table *sym_tbl; + struct nfp_eth_table *nfp_eth_table = NULL; int total_ports; void *priv = 0; int ret = -ENODEV; + int err; int i; if (!dev) return ret; - nfpu_desc = rte_malloc("nfp nfpu", sizeof(nfpu_desc_t), 0); - if (!nfpu_desc) - return -ENOMEM; + /* + * When device bound to UIO, the device could be used, by mistake, + * by two DPDK apps, and the UIO driver does not avoid it. This + * could lead to a serious problem when configuring the NFP CPP + * interface. Here we avoid this telling to the CPP init code to + * use a lock file if UIO is being used. + */ + if (dev->kdrv == RTE_KDRV_VFIO) + cpp = nfp_cpp_from_device_name(dev, 0); + else + cpp = nfp_cpp_from_device_name(dev, 1); - if (nfpu_open(dev, nfpu_desc, 0) < 0) { - RTE_LOG(ERR, PMD, - "nfpu_open failed\n"); - goto nfpu_error; + if (!cpp) { + PMD_DRV_LOG(ERR, "A CPP handle can not be obtained"); + ret = -EIO; + goto error; } - nspu_desc = nfpu_desc->nspu; + hwinfo = nfp_hwinfo_read(cpp); + if (!hwinfo) { + PMD_DRV_LOG(ERR, "Error reading hwinfo table"); + return -EIO; + } + nfp_eth_table = nfp_eth_read_ports(cpp); + if (!nfp_eth_table) { + PMD_DRV_LOG(ERR, "Error reading NFP ethernet table"); + return -EIO; + } - /* Check NSP ABI version */ - if (nfp_nsp_get_abi_version(nspu_desc, &major, &minor) < 0) { - RTE_LOG(INFO, PMD, "NFP NSP not present\n"); + if (nfp_fw_setup(dev, cpp, nfp_eth_table, hwinfo)) { + PMD_DRV_LOG(INFO, "Error when uploading firmware"); + ret = -EIO; goto error; } - PMD_INIT_LOG(INFO, "nspu ABI version: %d.%d\n", major, minor); - if ((major == 0) && (minor < 20)) { - RTE_LOG(INFO, PMD, "NFP NSP ABI version too old. Required 0.20 or higher\n"); + /* Now the symbol table should be there */ + sym_tbl = nfp_rtsym_table_read(cpp); + if (!sym_tbl) { + PMD_DRV_LOG(ERR, "Something is wrong with the firmware" + " symbol table"); + ret = -EIO; goto error; } - ret = nfp_nsp_fw_setup(nspu_desc, "nfd_cfg_pf0_num_ports", - &offset_symbol); - if (ret) + total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err); + if (total_ports != (int)nfp_eth_table->count) { + PMD_DRV_LOG(ERR, "Inconsistent number of ports"); + ret = -EIO; goto error; - - bar_offset = (uint8_t *)dev->mem_resource[0].addr; - bar_offset += offset_symbol; - total_ports = (uint32_t)*bar_offset; - PMD_INIT_LOG(INFO, "Total pf ports: %d\n", total_ports); + } + PMD_INIT_LOG(INFO, "Total pf ports: %d", total_ports); if (total_ports <= 0 || total_ports > 8) { - RTE_LOG(ERR, PMD, "nfd_cfg_pf0_num_ports symbol with wrong value"); + PMD_DRV_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value"); ret = -ENODEV; goto error; } for (i = 0; i < total_ports; i++) { - ret = nfp_pf_create_dev(dev, i, total_ports, nfpu_desc, &priv); + ret = nfp_pf_create_dev(dev, i, total_ports, cpp, hwinfo, + nfp_eth_table->ports[i].index, + sym_tbl, &priv); if (ret) - goto error; + break; } - return 0; - error: - nfpu_close(nfpu_desc); -nfpu_error: - rte_free(nfpu_desc); - + free(nfp_eth_table); return ret; } @@ -3129,8 +3241,19 @@ static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev) if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) || (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) { port = get_pf_port_number(eth_dev->data->name); + /* + * hotplug is not possible with multiport PF although freeing + * data structures can be done for first port. + */ + if (port != 0) + return -ENOTSUP; hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); hw = &hwport0[port]; + nfp_cpp_area_free(hw->ctrl_area); + nfp_cpp_area_free(hw->hwqueues_area); + free(hw->hwinfo); + free(hw->sym_tbl); + nfp_cpp_free(hw->cpp); } else { hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); } @@ -3161,9 +3284,7 @@ RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map); RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio"); RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio"); -RTE_INIT(nfp_init_log); -static void -nfp_init_log(void) +RTE_INIT(nfp_init_log) { nfp_logtype_init = rte_log_register("pmd.net.nfp.init"); if (nfp_logtype_init >= 0) diff --git a/drivers/net/nfp/nfp_net_ctrl.h b/drivers/net/nfp/nfp_net_ctrl.h index 1ebd99ca..21e17da1 100644 --- a/drivers/net/nfp/nfp_net_ctrl.h +++ b/drivers/net/nfp/nfp_net_ctrl.h @@ -120,6 +120,9 @@ #define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* Enable VXLAN */ #define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* Enable NVGRE */ #define NFP_NET_CFG_CTRL_MSIX_TX_OFF (0x1 << 26) /* Disable MSIX for TX */ +#define NFP_NET_CFG_CTRL_LSO2 (0x1 << 28) /* LSO/TSO (version 2) */ +#define NFP_NET_CFG_CTRL_RSS2 (0x1 << 29) /* RSS (version 2) */ +#define NFP_NET_CFG_CTRL_LIVE_ADDR (0x1 << 31) /* live MAC addr change */ #define NFP_NET_CFG_UPDATE 0x0004 #define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */ #define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */ @@ -131,6 +134,7 @@ #define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */ #define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */ #define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */ +#define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */ #define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */ #define NFP_NET_CFG_TXRS_ENABLE 0x0008 #define NFP_NET_CFG_RXRS_ENABLE 0x0010 @@ -140,6 +144,8 @@ #define NFP_NET_CFG_LSC 0x0020 #define NFP_NET_CFG_MACADDR 0x0024 +#define NFP_NET_CFG_CTRL_LSO_ANY (NFP_NET_CFG_CTRL_LSO | NFP_NET_CFG_CTRL_LSO2) + /* * Read-only words (0x0030 - 0x0050): * @NFP_NET_CFG_VERSION: Firmware version number diff --git a/drivers/net/nfp/nfp_net_eth.h b/drivers/net/nfp/nfp_net_eth.h deleted file mode 100644 index af57f03c..00000000 --- a/drivers/net/nfp/nfp_net_eth.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) 2017 Netronome Systems, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution - * - * 3. Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * vim:shiftwidth=8:noexpandtab - * - * @file dpdk/pmd/nfp_net_eth.h - * - * Netronome NFP_NET PDM driver - */ - -union eth_table_entry { - struct { - uint64_t port; - uint64_t state; - uint8_t mac_addr[6]; - uint8_t resv[2]; - uint64_t control; - }; - uint64_t raw[4]; -}; - -#ifndef BIT_ULL -#define BIT_ULL(a) (1ULL << (a)) -#endif - -#define NSP_ETH_NBI_PORT_COUNT 24 -#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT) -#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * sizeof(union eth_table_entry)) - -#define NSP_ETH_PORT_LANES 0xf -#define NSP_ETH_PORT_INDEX 0xff00 -#define NSP_ETH_PORT_LABEL 0x3f000000000000 -#define NSP_ETH_PORT_PHYLABEL 0xfc0000000000000 - -#define NSP_ETH_PORT_LANES_MASK rte_cpu_to_le_64(NSP_ETH_PORT_LANES) - -#define NSP_ETH_STATE_CONFIGURED BIT_ULL(0) -#define NSP_ETH_STATE_ENABLED BIT_ULL(1) -#define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2) -#define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3) -#define NSP_ETH_STATE_RATE 0xf00 -#define NSP_ETH_STATE_INTERFACE 0xff000 -#define NSP_ETH_STATE_MEDIA 0x300000 -#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22) -#define NSP_ETH_STATE_ANEG 0x3800000 - -#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0) -#define NSP_ETH_CTRL_ENABLED BIT_ULL(1) -#define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2) -#define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3) -#define NSP_ETH_CTRL_SET_RATE BIT_ULL(4) -#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5) -#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6) diff --git a/drivers/net/nfp/nfp_net_logs.h b/drivers/net/nfp/nfp_net_logs.h index 3fe24e96..9952881c 100644 --- a/drivers/net/nfp/nfp_net_logs.h +++ b/drivers/net/nfp/nfp_net_logs.h @@ -36,19 +36,20 @@ extern int nfp_logtype_init; #define PMD_INIT_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) + rte_log(RTE_LOG_ ## level, nfp_logtype_init, \ + "%s(): " fmt "\n", __func__, ## args) #define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") #ifdef RTE_LIBRTE_NFP_NET_DEBUG_RX #define PMD_RX_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s() rx: " fmt, __func__, ## args) + RTE_LOG(level, PMD, "%s() rx: " fmt "\n", __func__, ## args) #else #define PMD_RX_LOG(level, fmt, args...) do { } while (0) #endif #ifdef RTE_LIBRTE_NFP_NET_DEBUG_TX #define PMD_TX_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s() tx: " fmt, __func__, ## args) + RTE_LOG(level, PMD, "%s() tx: " fmt "\n", __func__, ## args) #define ASSERT(x) if (!(x)) rte_panic("NFP_NET: x") #else #define PMD_TX_LOG(level, fmt, args...) do { } while (0) @@ -58,6 +59,6 @@ extern int nfp_logtype_init; extern int nfp_logtype_driver; #define PMD_DRV_LOG(level, fmt, args...) \ rte_log(RTE_LOG_ ## level, nfp_logtype_driver, \ - "%s(): " fmt, __func__, ## args) + "%s(): " fmt "\n", __func__, ## args) #endif /* _NFP_NET_LOGS_H_ */ diff --git a/drivers/net/nfp/nfp_net_pmd.h b/drivers/net/nfp/nfp_net_pmd.h index 1ae0ea62..c1b044ee 100644 --- a/drivers/net/nfp/nfp_net_pmd.h +++ b/drivers/net/nfp/nfp_net_pmd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2015 Netronome Systems, Inc. + * Copyright (c) 2014-2018 Netronome Systems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -63,6 +63,7 @@ struct nfp_net_adapter; #define NFP_NET_CRTL_BAR 0 #define NFP_NET_TX_BAR 2 #define NFP_NET_RX_BAR 2 +#define NFP_QCP_QUEUE_AREA_SZ 0x80000 /* Macros for accessing the Queue Controller Peripheral 'CSRs' */ #define NFP_QCP_QUEUE_OFF(_x) ((_x) * 0x800) @@ -184,18 +185,28 @@ static inline void nn_writeq(uint64_t val, volatile void *addr) struct nfp_net_tx_desc { union { struct { - uint8_t dma_addr_hi; /* High bits of host buf address */ + uint8_t dma_addr_hi; /* High bits of host buf address */ __le16 dma_len; /* Length to DMA for this desc */ - uint8_t offset_eop; /* Offset in buf where pkt starts + + uint8_t offset_eop; /* Offset in buf where pkt starts + * highest bit is eop flag. */ __le32 dma_addr_lo; /* Low 32bit of host buf addr */ - __le16 lso; /* MSS to be used for LSO */ - uint8_t l4_offset; /* LSO, where the L4 data starts */ - uint8_t flags; /* TX Flags, see @PCIE_DESC_TX_* */ - - __le16 vlan; /* VLAN tag to add if indicated */ + __le16 mss; /* MSS to be used for LSO */ + uint8_t lso_hdrlen; /* LSO, where the data starts */ + uint8_t flags; /* TX Flags, see @PCIE_DESC_TX_* */ + + union { + struct { + /* + * L3 and L4 header offsets required + * for TSOv2 + */ + uint8_t l3_offset; + uint8_t l4_offset; + }; + __le16 vlan; /* VLAN tag to add if indicated */ + }; __le16 data_len; /* Length of frame + meta data */ } __attribute__((__packed__)); __le32 vals[4]; @@ -247,15 +258,13 @@ struct nfp_net_txq { /* * At this point 48 bytes have been used for all the fields in the * TX critical path. We have room for 8 bytes and still all placed - * in a cache line. We are not using the threshold values below nor - * the txq_flags but if we need to, we can add the most used in the - * remaining bytes. + * in a cache line. We are not using the threshold values below but + * if we need to, we can add the most used in the remaining bytes. */ uint32_t tx_rs_thresh; /* not used by now. Future? */ uint32_t tx_pthresh; /* not used by now. Future? */ uint32_t tx_hthresh; /* not used by now. Future? */ uint32_t tx_wthresh; /* not used by now. Future? */ - uint32_t txq_flags; /* not used by now. Future? */ uint16_t port_id; int qidx; int tx_qcidx; @@ -430,20 +439,21 @@ struct nfp_net_hw { /* Records starting point for counters */ struct rte_eth_stats eth_stats_base; -#ifdef NFP_NET_LIBNFP struct nfp_cpp *cpp; struct nfp_cpp_area *ctrl_area; - struct nfp_cpp_area *tx_area; - struct nfp_cpp_area *rx_area; + struct nfp_cpp_area *hwqueues_area; struct nfp_cpp_area *msix_area; -#endif + uint8_t *hw_queues; uint8_t is_pf; uint8_t pf_port_idx; uint8_t pf_multiport_enabled; + uint8_t total_ports; + union eth_table_entry *eth_table; - nspu_desc_t *nspu_desc; - nfpu_desc_t *nfpu_desc; + + struct nfp_hwinfo *hwinfo; + struct nfp_rtsym_table *sym_tbl; }; struct nfp_net_adapter { diff --git a/drivers/net/nfp/nfp_nfpu.c b/drivers/net/nfp/nfp_nfpu.c deleted file mode 100644 index f11afef3..00000000 --- a/drivers/net/nfp/nfp_nfpu.c +++ /dev/null @@ -1,108 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "nfp_nfpu.h" - -/* PF BAR and expansion BAR for the NSP interface */ -#define NFP_CFG_PCIE_BAR 0 -#define NFP_CFG_EXP_BAR 7 - -#define NFP_CFG_EXP_BAR_CFG_BASE 0x30000 - -/* There could be other NFP userspace tools using the NSP interface. - * Make sure there is no other process using it and locking the access for - * avoiding problems. - */ -static int -nspv_aquire_process_lock(nfpu_desc_t *desc) -{ - int rc; - struct flock lock; - char lockname[30]; - - memset(&lock, 0, sizeof(lock)); - - snprintf(lockname, sizeof(lockname), "/var/lock/nfp%d", desc->nfp); - - /* Using S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH */ - desc->lock = open(lockname, O_RDWR | O_CREAT, 0666); - - if (desc->lock < 0) - return desc->lock; - - lock.l_type = F_WRLCK; - lock.l_whence = SEEK_SET; - rc = -1; - while (rc != 0) { - rc = fcntl(desc->lock, F_SETLK, &lock); - if (rc < 0) { - if ((errno != EAGAIN) && (errno != EACCES)) { - close(desc->lock); - return rc; - } - } - } - - return 0; -} - -int -nfpu_open(struct rte_pci_device *pci_dev, nfpu_desc_t *desc, int nfp) -{ - void *cfg_base, *mem_base; - size_t barsz; - int ret = 0; - int i = 0; - - desc->nfp = nfp; - - ret = nspv_aquire_process_lock(desc); - if (ret) - return -1; - - barsz = pci_dev->mem_resource[0].len; - - /* barsz in log2 */ - while (barsz >>= 1) - i++; - - barsz = i; - - /* Sanity check: we can assume any bar size less than 1MB an error */ - if (barsz < 20) - return -1; - - /* Getting address for NFP expansion BAR registers */ - cfg_base = pci_dev->mem_resource[0].addr; - cfg_base = (uint8_t *)cfg_base + NFP_CFG_EXP_BAR_CFG_BASE; - - /* Getting address for NFP NSP interface registers */ - mem_base = pci_dev->mem_resource[0].addr; - mem_base = (uint8_t *)mem_base + (NFP_CFG_EXP_BAR << (barsz - 3)); - - - desc->nspu = rte_malloc("nfp nspu", sizeof(nspu_desc_t), 0); - nfp_nspu_init(desc->nspu, desc->nfp, NFP_CFG_PCIE_BAR, barsz, - NFP_CFG_EXP_BAR, cfg_base, mem_base); - - return ret; -} - -int -nfpu_close(nfpu_desc_t *desc) -{ - rte_free(desc->nspu); - close(desc->lock); - unlink("/var/lock/nfp0"); - return 0; -} diff --git a/drivers/net/nfp/nfp_nfpu.h b/drivers/net/nfp/nfp_nfpu.h deleted file mode 100644 index e56fa099..00000000 --- a/drivers/net/nfp/nfp_nfpu.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2017 Netronome Systems, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution - * - * 3. Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * vim:shiftwidth=8:noexpandtab - * - * @file dpdk/pmd/nfp_nfpu.h - * - * Netronome NFP_NET PDM driver - */ - -/* - * NFP User interface creates a window for talking with NFP NSP processor - */ - - -#include -#include "nfp_nspu.h" - -typedef struct { - int nfp; - int lock; - nspu_desc_t *nspu; -} nfpu_desc_t; - -int nfpu_open(struct rte_pci_device *pci_dev, nfpu_desc_t *desc, int nfp); -int nfpu_close(nfpu_desc_t *desc); diff --git a/drivers/net/nfp/nfp_nspu.c b/drivers/net/nfp/nfp_nspu.c deleted file mode 100644 index f9089832..00000000 --- a/drivers/net/nfp/nfp_nspu.c +++ /dev/null @@ -1,642 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "nfp_nfpu.h" - -#define CFG_EXP_BAR_ADDR_SZ 1 -#define CFG_EXP_BAR_MAP_TYPE 1 - -#define EXP_BAR_TARGET_SHIFT 23 -#define EXP_BAR_LENGTH_SHIFT 27 /* 0=32, 1=64 bit increment */ -#define EXP_BAR_MAP_TYPE_SHIFT 29 /* Bulk BAR map */ - -/* NFP target for NSP access */ -#define NFP_NSP_TARGET 7 - -/* Expansion BARs for mapping PF vnic BARs */ -#define NFP_NET_PF_CFG_EXP_BAR 6 -#define NFP_NET_PF_HW_QUEUES_EXP_BAR 5 - -/* - * This is an NFP internal address used for configuring properly an NFP - * expansion BAR. - */ -#define MEM_CMD_BASE_ADDR 0x8100000000 - -/* NSP interface registers */ -#define NSP_BASE (MEM_CMD_BASE_ADDR + 0x22100) -#define NSP_STATUS 0x00 -#define NSP_COMMAND 0x08 -#define NSP_BUFFER 0x10 -#define NSP_DEFAULT_BUF 0x18 -#define NSP_DEFAULT_BUF_CFG 0x20 - -#define NSP_MAGIC 0xab10 -#define NSP_STATUS_MAGIC(x) (((x) >> 48) & 0xffff) -#define NSP_STATUS_MAJOR(x) (int)(((x) >> 44) & 0xf) -#define NSP_STATUS_MINOR(x) (int)(((x) >> 32) & 0xfff) - -/* NSP commands */ -#define NSP_CMD_RESET 1 -#define NSP_CMD_FW_LOAD 6 -#define NSP_CMD_READ_ETH_TABLE 7 -#define NSP_CMD_WRITE_ETH_TABLE 8 -#define NSP_CMD_GET_SYMBOL 14 - -#define NSP_BUFFER_CFG_SIZE_MASK (0xff) - -#define NSP_REG_ADDR(d, off, reg) ((uint8_t *)(d)->mem_base + (off) + (reg)) -#define NSP_REG_VAL(p) (*(uint64_t *)(p)) - -/* - * An NFP expansion BAR is configured for allowing access to a specific NFP - * target: - * - * IN: - * desc: struct with basic NSP addresses to work with - * expbar: NFP PF expansion BAR index to configure - * tgt: NFP target to configure access - * addr: NFP target address - * - * OUT: - * pcie_offset: NFP PCI BAR offset to work with - */ -static void -nfp_nspu_mem_bar_cfg(nspu_desc_t *desc, int expbar, int tgt, - uint64_t addr, uint64_t *pcie_offset) -{ - uint64_t x, y, barsz; - uint32_t *expbar_ptr; - - barsz = desc->barsz; - - /* - * NFP CPP address to configure. This comes from NFP 6000 - * datasheet document based on Bulk mapping. - */ - x = (addr >> (barsz - 3)) << (21 - (40 - (barsz - 3))); - x |= CFG_EXP_BAR_MAP_TYPE << EXP_BAR_MAP_TYPE_SHIFT; - x |= CFG_EXP_BAR_ADDR_SZ << EXP_BAR_LENGTH_SHIFT; - x |= tgt << EXP_BAR_TARGET_SHIFT; - - /* Getting expansion bar configuration register address */ - expbar_ptr = (uint32_t *)desc->cfg_base; - /* Each physical PCI BAR has 8 NFP expansion BARs */ - expbar_ptr += (desc->pcie_bar * 8) + expbar; - - /* Writing to the expansion BAR register */ - *expbar_ptr = (uint32_t)x; - - /* Getting the pcie offset to work with from userspace */ - y = addr & ((uint64_t)(1 << (barsz - 3)) - 1); - *pcie_offset = y; -} - -/* - * Configuring an expansion bar for accessing NSP userspace interface. This - * function configures always the same expansion bar, which implies access to - * previously configured NFP target is lost. - */ -static void -nspu_xlate(nspu_desc_t *desc, uint64_t addr, uint64_t *pcie_offset) -{ - nfp_nspu_mem_bar_cfg(desc, desc->exp_bar, NFP_NSP_TARGET, addr, - pcie_offset); -} - -int -nfp_nsp_get_abi_version(nspu_desc_t *desc, int *major, int *minor) -{ - uint64_t pcie_offset; - uint64_t nsp_reg; - - nspu_xlate(desc, NSP_BASE, &pcie_offset); - nsp_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, pcie_offset, NSP_STATUS)); - - if (NSP_STATUS_MAGIC(nsp_reg) != NSP_MAGIC) - return -1; - - *major = NSP_STATUS_MAJOR(nsp_reg); - *minor = NSP_STATUS_MINOR(nsp_reg); - - return 0; -} - -int -nfp_nspu_init(nspu_desc_t *desc, int nfp, int pcie_bar, size_t pcie_barsz, - int exp_bar, void *exp_bar_cfg_base, void *exp_bar_mmap) -{ - uint64_t offset, buffaddr; - uint64_t nsp_reg; - - desc->nfp = nfp; - desc->pcie_bar = pcie_bar; - desc->exp_bar = exp_bar; - desc->barsz = pcie_barsz; - desc->windowsz = 1 << (desc->barsz - 3); - desc->cfg_base = exp_bar_cfg_base; - desc->mem_base = exp_bar_mmap; - - nspu_xlate(desc, NSP_BASE, &offset); - - /* - * Other NSPU clients can use other buffers. Let's tell NSPU we use the - * default buffer. - */ - buffaddr = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_DEFAULT_BUF)); - NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_BUFFER)) = buffaddr; - - /* NFP internal addresses are 40 bits. Clean all other bits here */ - buffaddr = buffaddr & (((uint64_t)1 << 40) - 1); - desc->bufaddr = buffaddr; - - /* Lets get information about the buffer */ - nsp_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_DEFAULT_BUF_CFG)); - - /* Buffer size comes in MBs. Coversion to bytes */ - desc->buf_size = ((size_t)nsp_reg & NSP_BUFFER_CFG_SIZE_MASK) << 20; - - return 0; -} - -#define NSPU_NFP_BUF(addr, base, off) \ - (*(uint64_t *)((uint8_t *)(addr)->mem_base + ((base) | (off)))) - -#define NSPU_HOST_BUF(base, off) (*(uint64_t *)((uint8_t *)(base) + (off))) - -static int -nspu_buff_write(nspu_desc_t *desc, void *buffer, size_t size) -{ - uint64_t pcie_offset, pcie_window_base, pcie_window_offset; - uint64_t windowsz = desc->windowsz; - uint64_t buffaddr, j, i = 0; - int ret = 0; - - if (size > desc->buf_size) - return -1; - - buffaddr = desc->bufaddr; - windowsz = desc->windowsz; - - while (i < size) { - /* Expansion bar reconfiguration per window size */ - nspu_xlate(desc, buffaddr + i, &pcie_offset); - pcie_window_base = pcie_offset & (~(windowsz - 1)); - pcie_window_offset = pcie_offset & (windowsz - 1); - for (j = pcie_window_offset; ((j < windowsz) && (i < size)); - j += 8) { - NSPU_NFP_BUF(desc, pcie_window_base, j) = - NSPU_HOST_BUF(buffer, i); - i += 8; - } - } - - return ret; -} - -static int -nspu_buff_read(nspu_desc_t *desc, void *buffer, size_t size) -{ - uint64_t pcie_offset, pcie_window_base, pcie_window_offset; - uint64_t windowsz, i = 0, j; - uint64_t buffaddr; - int ret = 0; - - if (size > desc->buf_size) - return -1; - - buffaddr = desc->bufaddr; - windowsz = desc->windowsz; - - while (i < size) { - /* Expansion bar reconfiguration per window size */ - nspu_xlate(desc, buffaddr + i, &pcie_offset); - pcie_window_base = pcie_offset & (~(windowsz - 1)); - pcie_window_offset = pcie_offset & (windowsz - 1); - for (j = pcie_window_offset; ((j < windowsz) && (i < size)); - j += 8) { - NSPU_HOST_BUF(buffer, i) = - NSPU_NFP_BUF(desc, pcie_window_base, j); - i += 8; - } - } - - return ret; -} - -static int -nspu_command(nspu_desc_t *desc, uint16_t cmd, int read, int write, - void *buffer, size_t rsize, size_t wsize) -{ - uint64_t status, cmd_reg; - uint64_t offset; - int retry = 0; - int retries = 120; - int ret = 0; - - /* Same expansion BAR is used for different things */ - nspu_xlate(desc, NSP_BASE, &offset); - - status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS)); - - while ((status & 0x1) && (retry < retries)) { - status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS)); - retry++; - sleep(1); - } - - if (retry == retries) - return -1; - - if (write) { - ret = nspu_buff_write(desc, buffer, wsize); - if (ret) - return ret; - - /* Expansion BAR changes when writing the buffer */ - nspu_xlate(desc, NSP_BASE, &offset); - } - - NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_COMMAND)) = - (uint64_t)wsize << 32 | (uint64_t)cmd << 16 | 1; - - retry = 0; - - cmd_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_COMMAND)); - while ((cmd_reg & 0x1) && (retry < retries)) { - cmd_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_COMMAND)); - retry++; - sleep(1); - } - if (retry == retries) - return -1; - - retry = 0; - status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS)); - while ((status & 0x1) && (retry < retries)) { - status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS)); - retry++; - sleep(1); - } - - if (retry == retries) - return -1; - - ret = status & (0xff << 8); - if (ret) - return ret; - - if (read) { - ret = nspu_buff_read(desc, buffer, rsize); - if (ret) - return ret; - } - - return ret; -} - -static int -nfp_fw_reset(nspu_desc_t *nspu_desc) -{ - int res; - - res = nspu_command(nspu_desc, NSP_CMD_RESET, 0, 0, 0, 0, 0); - - if (res < 0) - RTE_LOG(INFO, PMD, "fw reset failed: error %d", res); - - return res; -} - -#define DEFAULT_FW_PATH "/lib/firmware/netronome" -#define DEFAULT_FW_FILENAME "nic_dpdk_default.nffw" - -static int -nfp_fw_upload(nspu_desc_t *nspu_desc) -{ - int fw_f; - char *fw_buf; - char filename[100]; - struct stat file_stat; - off_t fsize, bytes; - ssize_t size; - int ret; - - size = nspu_desc->buf_size; - - sprintf(filename, "%s/%s", DEFAULT_FW_PATH, DEFAULT_FW_FILENAME); - fw_f = open(filename, O_RDONLY); - if (fw_f < 0) { - RTE_LOG(INFO, PMD, "Firmware file %s/%s not found.", - DEFAULT_FW_PATH, DEFAULT_FW_FILENAME); - return -ENOENT; - } - - if (fstat(fw_f, &file_stat) < 0) { - RTE_LOG(INFO, PMD, "Firmware file %s/%s size is unknown", - DEFAULT_FW_PATH, DEFAULT_FW_FILENAME); - close(fw_f); - return -ENOENT; - } - - fsize = file_stat.st_size; - RTE_LOG(DEBUG, PMD, "Firmware file with size: %" PRIu64 "\n", - (uint64_t)fsize); - - if (fsize > (off_t)size) { - RTE_LOG(INFO, PMD, "fw file too big: %" PRIu64 - " bytes (%" PRIu64 " max)", - (uint64_t)fsize, (uint64_t)size); - close(fw_f); - return -EINVAL; - } - - fw_buf = malloc((size_t)size); - if (!fw_buf) { - RTE_LOG(INFO, PMD, "malloc failed for fw buffer"); - close(fw_f); - return -ENOMEM; - } - memset(fw_buf, 0, size); - - bytes = read(fw_f, fw_buf, fsize); - if (bytes != fsize) { - RTE_LOG(INFO, PMD, "Reading fw to buffer failed.\n" - "Just %" PRIu64 " of %" PRIu64 " bytes read.", - (uint64_t)bytes, (uint64_t)fsize); - free(fw_buf); - close(fw_f); - return -EIO; - } - - ret = nspu_command(nspu_desc, NSP_CMD_FW_LOAD, 0, 1, fw_buf, 0, bytes); - - free(fw_buf); - close(fw_f); - - return ret; -} - -/* Firmware symbol descriptor size */ -#define NFP_SYM_DESC_LEN 40 - -#define SYMBOL_DATA(b, off) (*(int64_t *)((b) + (off))) -#define SYMBOL_UDATA(b, off) (*(uint64_t *)((b) + (off))) - -/* Firmware symbols contain information about how to access what they - * represent. It can be as simple as an numeric variable declared at a - * specific NFP memory, but it can also be more complex structures and - * related to specific hardware functionalities or components. Target, - * domain and address allow to create the BAR window for accessing such - * hw object and size defines the length to map. - * - * A vNIC is a network interface implemented inside the NFP and using a - * subset of device PCI BARs. Specific firmware symbols allow to map those - * vNIC bars by host drivers like the NFP PMD. - * - * Accessing what the symbol represents implies to map the access through - * a PCI BAR window. NFP expansion BARs are used in this regard through - * the NSPU interface. - */ -static int -nfp_nspu_set_bar_from_symbl(nspu_desc_t *desc, const char *symbl, - uint32_t expbar, uint64_t *pcie_offset, - ssize_t *size) -{ - int64_t type; - int64_t target; - int64_t domain; - uint64_t addr; - char *sym_buf; - int ret = 0; - - sym_buf = malloc(desc->buf_size); - if (!sym_buf) - return -ENOMEM; - - strncpy(sym_buf, symbl, strlen(symbl)); - ret = nspu_command(desc, NSP_CMD_GET_SYMBOL, 1, 1, sym_buf, - NFP_SYM_DESC_LEN, strlen(symbl)); - if (ret) { - RTE_LOG(DEBUG, PMD, "symbol resolution (%s) failed\n", symbl); - goto clean; - } - - /* Reading symbol information */ - type = SYMBOL_DATA(sym_buf, 0); - target = SYMBOL_DATA(sym_buf, 8); - domain = SYMBOL_DATA(sym_buf, 16); - addr = SYMBOL_UDATA(sym_buf, 24); - *size = (ssize_t)SYMBOL_UDATA(sym_buf, 32); - - if (type != 1) { - RTE_LOG(INFO, PMD, "wrong symbol type\n"); - ret = -EINVAL; - goto clean; - } - if (!(target == 7 || target == -7)) { - RTE_LOG(INFO, PMD, "wrong symbol target\n"); - ret = -EINVAL; - goto clean; - } - if (domain == 8 || domain == 9) { - RTE_LOG(INFO, PMD, "wrong symbol domain\n"); - ret = -EINVAL; - goto clean; - } - - /* Adjusting address based on symbol location */ - if ((domain >= 24) && (domain < 28) && (target == 7)) { - addr = 1ULL << 37 | addr | ((uint64_t)domain & 0x3) << 35; - } else { - addr = 1ULL << 39 | addr | ((uint64_t)domain & 0x3f) << 32; - if (target == -7) - target = 7; - } - - /* Configuring NFP expansion bar for mapping specific PCI BAR window */ - nfp_nspu_mem_bar_cfg(desc, expbar, target, addr, pcie_offset); - - /* This is the PCI BAR offset to use by the host */ - *pcie_offset |= ((expbar & 0x7) << (desc->barsz - 3)); - -clean: - free(sym_buf); - return ret; -} - -int -nfp_nsp_fw_setup(nspu_desc_t *desc, const char *sym, uint64_t *pcie_offset) -{ - ssize_t bar0_sym_size; - - /* If the symbol resolution works, it implies a firmware app - * is already there. - */ - if (!nfp_nspu_set_bar_from_symbl(desc, sym, NFP_NET_PF_CFG_EXP_BAR, - pcie_offset, &bar0_sym_size)) - return 0; - - /* No firmware app detected or not the right one */ - RTE_LOG(INFO, PMD, "No firmware detected. Resetting NFP...\n"); - if (nfp_fw_reset(desc) < 0) { - RTE_LOG(ERR, PMD, "nfp fw reset failed\n"); - return -ENODEV; - } - - RTE_LOG(INFO, PMD, "Reset done.\n"); - RTE_LOG(INFO, PMD, "Uploading firmware...\n"); - - if (nfp_fw_upload(desc) < 0) { - RTE_LOG(ERR, PMD, "nfp fw upload failed\n"); - return -ENODEV; - } - - RTE_LOG(INFO, PMD, "Done.\n"); - - /* Now the symbol should be there */ - if (nfp_nspu_set_bar_from_symbl(desc, sym, NFP_NET_PF_CFG_EXP_BAR, - pcie_offset, &bar0_sym_size)) { - RTE_LOG(ERR, PMD, "nfp PF BAR symbol resolution failed\n"); - return -ENODEV; - } - - return 0; -} - -int -nfp_nsp_map_ctrl_bar(nspu_desc_t *desc, uint64_t *pcie_offset) -{ - ssize_t bar0_sym_size; - - if (nfp_nspu_set_bar_from_symbl(desc, "_pf0_net_bar0", - NFP_NET_PF_CFG_EXP_BAR, - pcie_offset, &bar0_sym_size)) - return -ENODEV; - - return 0; -} - -/* - * This is a hardcoded fixed NFP internal CPP bus address for the hw queues unit - * inside the PCIE island. - */ -#define NFP_CPP_PCIE_QUEUES ((uint64_t)(1ULL << 39) | 0x80000 | \ - ((uint64_t)0x4 & 0x3f) << 32) - -/* Configure a specific NFP expansion bar for accessing the vNIC rx/tx BARs */ -void -nfp_nsp_map_queues_bar(nspu_desc_t *desc, uint64_t *pcie_offset) -{ - nfp_nspu_mem_bar_cfg(desc, NFP_NET_PF_HW_QUEUES_EXP_BAR, 0, - NFP_CPP_PCIE_QUEUES, pcie_offset); - - /* This is the pcie offset to use by the host */ - *pcie_offset |= ((NFP_NET_PF_HW_QUEUES_EXP_BAR & 0x7) << (27 - 3)); -} - -int -nfp_nsp_eth_config(nspu_desc_t *desc, int port, int up) -{ - union eth_table_entry *entries, *entry; - int modified; - int ret, idx; - int i; - - idx = port; - - RTE_LOG(INFO, PMD, "Hw ethernet port %d configure...\n", port); - rte_spinlock_lock(&desc->nsp_lock); - entries = malloc(NSP_ETH_TABLE_SIZE); - if (!entries) { - rte_spinlock_unlock(&desc->nsp_lock); - return -ENOMEM; - } - - ret = nspu_command(desc, NSP_CMD_READ_ETH_TABLE, 1, 0, entries, - NSP_ETH_TABLE_SIZE, 0); - if (ret) { - rte_spinlock_unlock(&desc->nsp_lock); - free(entries); - return ret; - } - - entry = entries; - - for (i = 0; i < NSP_ETH_MAX_COUNT; i++) { - /* ports in use do not appear sequentially in the table */ - if (!(entry->port & NSP_ETH_PORT_LANES_MASK)) { - /* entry not in use */ - entry++; - continue; - } - if (idx == 0) - break; - idx--; - entry++; - } - - if (i == NSP_ETH_MAX_COUNT) { - rte_spinlock_unlock(&desc->nsp_lock); - free(entries); - return -EINVAL; - } - - if (up && !(entry->state & NSP_ETH_STATE_CONFIGURED)) { - entry->control |= NSP_ETH_STATE_CONFIGURED; - modified = 1; - } - - if (!up && (entry->state & NSP_ETH_STATE_CONFIGURED)) { - entry->control &= ~NSP_ETH_STATE_CONFIGURED; - modified = 1; - } - - if (modified) { - ret = nspu_command(desc, NSP_CMD_WRITE_ETH_TABLE, 0, 1, entries, - 0, NSP_ETH_TABLE_SIZE); - if (!ret) - RTE_LOG(INFO, PMD, - "Hw ethernet port %d configure done\n", port); - else - RTE_LOG(INFO, PMD, - "Hw ethernet port %d configure failed\n", port); - } - rte_spinlock_unlock(&desc->nsp_lock); - free(entries); - return ret; -} - -int -nfp_nsp_eth_read_table(nspu_desc_t *desc, union eth_table_entry **table) -{ - int ret; - - if (!table) - return -EINVAL; - - RTE_LOG(INFO, PMD, "Reading hw ethernet table...\n"); - - /* port 0 allocates the eth table and read it using NSPU */ - *table = malloc(NSP_ETH_TABLE_SIZE); - if (!*table) - return -ENOMEM; - - ret = nspu_command(desc, NSP_CMD_READ_ETH_TABLE, 1, 0, *table, - NSP_ETH_TABLE_SIZE, 0); - if (ret) - return ret; - - RTE_LOG(INFO, PMD, "Done\n"); - - return 0; -} diff --git a/drivers/net/nfp/nfp_nspu.h b/drivers/net/nfp/nfp_nspu.h deleted file mode 100644 index 8c33835e..00000000 --- a/drivers/net/nfp/nfp_nspu.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 2017 Netronome Systems, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution - * - * 3. Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * vim:shiftwidth=8:noexpandtab - * - * @file dpdk/pmd/nfp_nspu.h - * - * Netronome NFP_NET PDM driver - */ - -/* - * NSP is the NFP Service Processor. NSPU is NSP Userspace interface. - * - * NFP NSP helps with firmware/hardware configuration. NSP is another component - * in NFP programmable processor and accessing it from host requires to firstly - * configure a specific NFP PCI expansion BAR. - * - * Once access is ready, configuration can be done reading and writing - * from/to a specific PF PCI BAR window. This same interface will allow to - * create other PCI BAR windows for accessing other NFP components. - * - * This file includes low-level functions, using the NSPU interface, and high - * level functions, invoked by the PMD for using NSP services. This allows - * firmware upload, vNIC PCI BARs mapping and other low-level configurations - * like link setup. - * - * NSP access is done during initialization and it is not involved at all with - * the fast path. - */ - -#include -#include "nfp_net_eth.h" - -typedef struct { - int nfp; /* NFP device */ - int pcie_bar; /* PF PCI BAR to work with */ - int exp_bar; /* Expansion BAR number used by NSPU */ - int barsz; /* PCIE BAR log2 size */ - uint64_t bufaddr; /* commands buffer address */ - size_t buf_size; /* commands buffer size */ - uint64_t windowsz; /* NSPU BAR window size */ - void *cfg_base; /* Expansion BARs address */ - void *mem_base; /* NSP interface */ - rte_spinlock_t nsp_lock; -} nspu_desc_t; - -int nfp_nspu_init(nspu_desc_t *desc, int nfp, int pcie_bar, size_t pcie_barsz, - int exp_bar, void *exp_bar_cfg_base, void *exp_bar_mmap); -int nfp_nsp_get_abi_version(nspu_desc_t *desc, int *major, int *minor); -int nfp_nsp_fw_setup(nspu_desc_t *desc, const char *sym, uint64_t *pcie_offset); -int nfp_nsp_map_ctrl_bar(nspu_desc_t *desc, uint64_t *pcie_offset); -void nfp_nsp_map_queues_bar(nspu_desc_t *desc, uint64_t *pcie_offset); -int nfp_nsp_eth_config(nspu_desc_t *desc, int port, int up); -int nfp_nsp_eth_read_table(nspu_desc_t *desc, union eth_table_entry **table); diff --git a/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h b/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h new file mode 100644 index 00000000..6e380cca --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h @@ -0,0 +1,722 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_CPPAT_H__ +#define __NFP_CPPAT_H__ + +#include "nfp_platform.h" +#include "nfp_resid.h" + +/* This file contains helpers for creating CPP commands + * + * All magic NFP-6xxx IMB 'mode' numbers here are from: + * Databook (1 August 2013) + * - System Overview and Connectivity + * -- Internal Connectivity + * --- Distributed Switch Fabric - Command Push/Pull (DSF-CPP) Bus + * ---- CPP addressing + * ----- Table 3.6. CPP Address Translation Mode Commands + */ + +#define _NIC_NFP6000_MU_LOCALITY_DIRECT 2 + +static inline int +_nfp6000_decode_basic(uint64_t addr, int *dest_island, int cpp_tgt, int mode, + int addr40, int isld1, int isld0); + +static uint64_t +_nic_mask64(int msb, int lsb, int at0) +{ + uint64_t v; + int w = msb - lsb + 1; + + if (w == 64) + return ~(uint64_t)0; + + if ((lsb + w) > 64) + return 0; + + v = (UINT64_C(1) << w) - 1; + + if (at0) + return v; + + return v << lsb; +} + +/* For VQDR, we may not modify the Channel bits, which might overlap + * with the Index bit. When it does, we need to ensure that isld0 == isld1. + */ +static inline int +_nfp6000_encode_basic(uint64_t *addr, int dest_island, int cpp_tgt, int mode, + int addr40, int isld1, int isld0) +{ + uint64_t _u64; + int iid_lsb, idx_lsb; + int i, v = 0; + int isld[2]; + + isld[0] = isld0; + isld[1] = isld1; + + switch (cpp_tgt) { + case NFP6000_CPPTGT_MU: + /* This function doesn't handle MU */ + return NFP_ERRNO(EINVAL); + case NFP6000_CPPTGT_CTXPB: + /* This function doesn't handle CTXPB */ + return NFP_ERRNO(EINVAL); + default: + break; + } + + switch (mode) { + case 0: + if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) { + /* + * In this specific mode we'd rather not modify the + * address but we can verify if the existing contents + * will point to a valid island. + */ + i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode, + addr40, isld1, + isld0); + if (i != 0) + /* Full Island ID and channel bits overlap */ + return i; + + /* + * If dest_island is invalid, the current address won't + * go where expected. + */ + if (dest_island != -1 && dest_island != v) + return NFP_ERRNO(EINVAL); + + /* If dest_island was -1, we don't care */ + return 0; + } + + iid_lsb = (addr40) ? 34 : 26; + + /* <39:34> or <31:26> */ + _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0); + *addr &= ~_u64; + *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64; + return 0; + case 1: + if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) { + i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode, + addr40, isld1, isld0); + if (i != 0) + /* Full Island ID and channel bits overlap */ + return i; + + /* + * If dest_island is invalid, the current address won't + * go where expected. + */ + if (dest_island != -1 && dest_island != v) + return NFP_ERRNO(EINVAL); + + /* If dest_island was -1, we don't care */ + return 0; + } + + idx_lsb = (addr40) ? 39 : 31; + if (dest_island == isld0) { + /* Only need to clear the Index bit */ + *addr &= ~_nic_mask64(idx_lsb, idx_lsb, 0); + return 0; + } + + if (dest_island == isld1) { + /* Only need to set the Index bit */ + *addr |= (UINT64_C(1) << idx_lsb); + return 0; + } + + return NFP_ERRNO(ENODEV); + case 2: + if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) { + /* iid<0> = addr<30> = channel<0> */ + /* channel<1> = addr<31> = Index */ + + /* + * Special case where we allow channel bits to be set + * before hand and with them select an island. + * So we need to confirm that it's at least plausible. + */ + i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode, + addr40, isld1, isld0); + if (i != 0) + /* Full Island ID and channel bits overlap */ + return i; + + /* + * If dest_island is invalid, the current address won't + * go where expected. + */ + if (dest_island != -1 && dest_island != v) + return NFP_ERRNO(EINVAL); + + /* If dest_island was -1, we don't care */ + return 0; + } + + /* + * Make sure we compare against isldN values by clearing the + * LSB. This is what the silicon does. + **/ + isld[0] &= ~1; + isld[1] &= ~1; + + idx_lsb = (addr40) ? 39 : 31; + iid_lsb = idx_lsb - 1; + + /* + * Try each option, take first one that fits. Not sure if we + * would want to do some smarter searching and prefer 0 or non-0 + * island IDs. + */ + + for (i = 0; i < 2; i++) { + for (v = 0; v < 2; v++) { + if (dest_island != (isld[i] | v)) + continue; + *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0); + *addr |= (((uint64_t)i) << idx_lsb); + *addr |= (((uint64_t)v) << iid_lsb); + return 0; + } + } + + return NFP_ERRNO(ENODEV); + case 3: + if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) { + /* + * iid<0> = addr<29> = data + * iid<1> = addr<30> = channel<0> + * channel<1> = addr<31> = Index + */ + i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode, + addr40, isld1, isld0); + if (i != 0) + /* Full Island ID and channel bits overlap */ + return i; + + if (dest_island != -1 && dest_island != v) + return NFP_ERRNO(EINVAL); + + /* If dest_island was -1, we don't care */ + return 0; + } + + isld[0] &= ~3; + isld[1] &= ~3; + + idx_lsb = (addr40) ? 39 : 31; + iid_lsb = idx_lsb - 2; + + for (i = 0; i < 2; i++) { + for (v = 0; v < 4; v++) { + if (dest_island != (isld[i] | v)) + continue; + *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0); + *addr |= (((uint64_t)i) << idx_lsb); + *addr |= (((uint64_t)v) << iid_lsb); + return 0; + } + } + return NFP_ERRNO(ENODEV); + default: + break; + } + + return NFP_ERRNO(EINVAL); +} + +static inline int +_nfp6000_decode_basic(uint64_t addr, int *dest_island, int cpp_tgt, int mode, + int addr40, int isld1, int isld0) +{ + int iid_lsb, idx_lsb; + + switch (cpp_tgt) { + case NFP6000_CPPTGT_MU: + /* This function doesn't handle MU */ + return NFP_ERRNO(EINVAL); + case NFP6000_CPPTGT_CTXPB: + /* This function doesn't handle CTXPB */ + return NFP_ERRNO(EINVAL); + default: + break; + } + + switch (mode) { + case 0: + /* + * For VQDR, in this mode for 32-bit addressing it would be + * islands 0, 16, 32 and 48 depending on channel and upper + * address bits. Since those are not all valid islands, most + * decode cases would result in bad island IDs, but we do them + * anyway since this is decoding an address that is already + * assumed to be used as-is to get to sram. + */ + iid_lsb = (addr40) ? 34 : 26; + *dest_island = (int)(addr >> iid_lsb) & 0x3F; + return 0; + case 1: + /* + * For VQDR 32-bit, this would decode as: + * Channel 0: island#0 + * Channel 1: island#0 + * Channel 2: island#1 + * Channel 3: island#1 + * + * That would be valid as long as both islands have VQDR. + * Let's allow this. + */ + + idx_lsb = (addr40) ? 39 : 31; + if (addr & _nic_mask64(idx_lsb, idx_lsb, 0)) + *dest_island = isld1; + else + *dest_island = isld0; + + return 0; + case 2: + /* + * For VQDR 32-bit: + * Channel 0: (island#0 | 0) + * Channel 1: (island#0 | 1) + * Channel 2: (island#1 | 0) + * Channel 3: (island#1 | 1) + * + * Make sure we compare against isldN values by clearing the + * LSB. This is what the silicon does. + */ + isld0 &= ~1; + isld1 &= ~1; + + idx_lsb = (addr40) ? 39 : 31; + iid_lsb = idx_lsb - 1; + + if (addr & _nic_mask64(idx_lsb, idx_lsb, 0)) + *dest_island = isld1 | (int)((addr >> iid_lsb) & 1); + else + *dest_island = isld0 | (int)((addr >> iid_lsb) & 1); + + return 0; + case 3: + /* + * In this mode the data address starts to affect the island ID + * so rather not allow it. In some really specific case one + * could use this to send the upper half of the VQDR channel to + * another MU, but this is getting very specific. However, as + * above for mode 0, this is the decoder and the caller should + * validate the resulting IID. This blindly does what the + * silicon would do. + */ + + isld0 &= ~3; + isld1 &= ~3; + + idx_lsb = (addr40) ? 39 : 31; + iid_lsb = idx_lsb - 2; + + if (addr & _nic_mask64(idx_lsb, idx_lsb, 0)) + *dest_island = isld1 | (int)((addr >> iid_lsb) & 3); + else + *dest_island = isld0 | (int)((addr >> iid_lsb) & 3); + + return 0; + default: + break; + } + + return NFP_ERRNO(EINVAL); +} + +static inline int +_nfp6000_cppat_mu_locality_lsb(int mode, int addr40) +{ + switch (mode) { + case 0: + case 1: + case 2: + case 3: + return (addr40) ? 38 : 30; + default: + break; + } + return NFP_ERRNO(EINVAL); +} + +static inline int +_nfp6000_encode_mu(uint64_t *addr, int dest_island, int mode, int addr40, + int isld1, int isld0) +{ + uint64_t _u64; + int iid_lsb, idx_lsb, locality_lsb; + int i, v; + int isld[2]; + int da; + + isld[0] = isld0; + isld[1] = isld1; + locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40); + + if (((*addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT) + da = 1; + else + da = 0; + + switch (mode) { + case 0: + iid_lsb = (addr40) ? 32 : 24; + _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0); + *addr &= ~_u64; + *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64; + return 0; + case 1: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0); + *addr &= ~_u64; + *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64; + return 0; + } + + idx_lsb = (addr40) ? 37 : 29; + if (dest_island == isld0) { + *addr &= ~_nic_mask64(idx_lsb, idx_lsb, 0); + return 0; + } + + if (dest_island == isld1) { + *addr |= (UINT64_C(1) << idx_lsb); + return 0; + } + + return NFP_ERRNO(ENODEV); + case 2: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0); + *addr &= ~_u64; + *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64; + return 0; + } + + /* + * Make sure we compare against isldN values by clearing the + * LSB. This is what the silicon does. + */ + isld[0] &= ~1; + isld[1] &= ~1; + + idx_lsb = (addr40) ? 37 : 29; + iid_lsb = idx_lsb - 1; + + /* + * Try each option, take first one that fits. Not sure if we + * would want to do some smarter searching and prefer 0 or + * non-0 island IDs. + */ + + for (i = 0; i < 2; i++) { + for (v = 0; v < 2; v++) { + if (dest_island != (isld[i] | v)) + continue; + *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0); + *addr |= (((uint64_t)i) << idx_lsb); + *addr |= (((uint64_t)v) << iid_lsb); + return 0; + } + } + return NFP_ERRNO(ENODEV); + case 3: + /* + * Only the EMU will use 40 bit addressing. Silently set the + * direct locality bit for everyone else. The SDK toolchain + * uses dest_island <= 0 to test for atypical address encodings + * to support access to local-island CTM with a 32-but address + * (high-locality is effectively ignored and just used for + * routing to island #0). + */ + if (dest_island > 0 && + (dest_island < 24 || dest_island > 26)) { + *addr |= ((uint64_t)_NIC_NFP6000_MU_LOCALITY_DIRECT) + << locality_lsb; + da = 1; + } + + if (da) { + iid_lsb = (addr40) ? 32 : 24; + _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0); + *addr &= ~_u64; + *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64; + return 0; + } + + isld[0] &= ~3; + isld[1] &= ~3; + + idx_lsb = (addr40) ? 37 : 29; + iid_lsb = idx_lsb - 2; + + for (i = 0; i < 2; i++) { + for (v = 0; v < 4; v++) { + if (dest_island != (isld[i] | v)) + continue; + *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0); + *addr |= (((uint64_t)i) << idx_lsb); + *addr |= (((uint64_t)v) << iid_lsb); + return 0; + } + } + + return NFP_ERRNO(ENODEV); + default: + break; + } + + return NFP_ERRNO(EINVAL); +} + +static inline int +_nfp6000_decode_mu(uint64_t addr, int *dest_island, int mode, int addr40, + int isld1, int isld0) +{ + int iid_lsb, idx_lsb, locality_lsb; + int da; + + locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40); + + if (((addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT) + da = 1; + else + da = 0; + + switch (mode) { + case 0: + iid_lsb = (addr40) ? 32 : 24; + *dest_island = (int)(addr >> iid_lsb) & 0x3F; + return 0; + case 1: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + *dest_island = (int)(addr >> iid_lsb) & 0x3F; + return 0; + } + + idx_lsb = (addr40) ? 37 : 29; + + if (addr & _nic_mask64(idx_lsb, idx_lsb, 0)) + *dest_island = isld1; + else + *dest_island = isld0; + + return 0; + case 2: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + *dest_island = (int)(addr >> iid_lsb) & 0x3F; + return 0; + } + /* + * Make sure we compare against isldN values by clearing the + * LSB. This is what the silicon does. + */ + isld0 &= ~1; + isld1 &= ~1; + + idx_lsb = (addr40) ? 37 : 29; + iid_lsb = idx_lsb - 1; + + if (addr & _nic_mask64(idx_lsb, idx_lsb, 0)) + *dest_island = isld1 | (int)((addr >> iid_lsb) & 1); + else + *dest_island = isld0 | (int)((addr >> iid_lsb) & 1); + + return 0; + case 3: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + *dest_island = (int)(addr >> iid_lsb) & 0x3F; + return 0; + } + + isld0 &= ~3; + isld1 &= ~3; + + idx_lsb = (addr40) ? 37 : 29; + iid_lsb = idx_lsb - 2; + + if (addr & _nic_mask64(idx_lsb, idx_lsb, 0)) + *dest_island = isld1 | (int)((addr >> iid_lsb) & 3); + else + *dest_island = isld0 | (int)((addr >> iid_lsb) & 3); + + return 0; + default: + break; + } + + return NFP_ERRNO(EINVAL); +} + +static inline int +_nfp6000_cppat_addr_encode(uint64_t *addr, int dest_island, int cpp_tgt, + int mode, int addr40, int isld1, int isld0) +{ + switch (cpp_tgt) { + case NFP6000_CPPTGT_NBI: + case NFP6000_CPPTGT_VQDR: + case NFP6000_CPPTGT_ILA: + case NFP6000_CPPTGT_PCIE: + case NFP6000_CPPTGT_ARM: + case NFP6000_CPPTGT_CRYPTO: + case NFP6000_CPPTGT_CLS: + return _nfp6000_encode_basic(addr, dest_island, cpp_tgt, mode, + addr40, isld1, isld0); + + case NFP6000_CPPTGT_MU: + return _nfp6000_encode_mu(addr, dest_island, mode, addr40, + isld1, isld0); + + case NFP6000_CPPTGT_CTXPB: + if (mode != 1 || addr40 != 0) + return NFP_ERRNO(EINVAL); + + *addr &= ~_nic_mask64(29, 24, 0); + *addr |= (((uint64_t)dest_island) << 24) & + _nic_mask64(29, 24, 0); + return 0; + default: + break; + } + + return NFP_ERRNO(EINVAL); +} + +static inline int +_nfp6000_cppat_addr_decode(uint64_t addr, int *dest_island, int cpp_tgt, + int mode, int addr40, int isld1, int isld0) +{ + switch (cpp_tgt) { + case NFP6000_CPPTGT_NBI: + case NFP6000_CPPTGT_VQDR: + case NFP6000_CPPTGT_ILA: + case NFP6000_CPPTGT_PCIE: + case NFP6000_CPPTGT_ARM: + case NFP6000_CPPTGT_CRYPTO: + case NFP6000_CPPTGT_CLS: + return _nfp6000_decode_basic(addr, dest_island, cpp_tgt, mode, + addr40, isld1, isld0); + + case NFP6000_CPPTGT_MU: + return _nfp6000_decode_mu(addr, dest_island, mode, addr40, + isld1, isld0); + + case NFP6000_CPPTGT_CTXPB: + if (mode != 1 || addr40 != 0) + return -EINVAL; + *dest_island = (int)(addr >> 24) & 0x3F; + return 0; + default: + break; + } + + return -EINVAL; +} + +static inline int +_nfp6000_cppat_addr_iid_clear(uint64_t *addr, int cpp_tgt, int mode, int addr40) +{ + int iid_lsb, locality_lsb, da; + + switch (cpp_tgt) { + case NFP6000_CPPTGT_NBI: + case NFP6000_CPPTGT_VQDR: + case NFP6000_CPPTGT_ILA: + case NFP6000_CPPTGT_PCIE: + case NFP6000_CPPTGT_ARM: + case NFP6000_CPPTGT_CRYPTO: + case NFP6000_CPPTGT_CLS: + switch (mode) { + case 0: + iid_lsb = (addr40) ? 34 : 26; + *addr &= ~(UINT64_C(0x3F) << iid_lsb); + return 0; + case 1: + iid_lsb = (addr40) ? 39 : 31; + *addr &= ~_nic_mask64(iid_lsb, iid_lsb, 0); + return 0; + case 2: + iid_lsb = (addr40) ? 38 : 30; + *addr &= ~_nic_mask64(iid_lsb + 1, iid_lsb, 0); + return 0; + case 3: + iid_lsb = (addr40) ? 37 : 29; + *addr &= ~_nic_mask64(iid_lsb + 2, iid_lsb, 0); + return 0; + default: + break; + } + case NFP6000_CPPTGT_MU: + locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40); + da = (((*addr >> locality_lsb) & 3) == + _NIC_NFP6000_MU_LOCALITY_DIRECT); + switch (mode) { + case 0: + iid_lsb = (addr40) ? 32 : 24; + *addr &= ~(UINT64_C(0x3F) << iid_lsb); + return 0; + case 1: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + *addr &= ~(UINT64_C(0x3F) << iid_lsb); + return 0; + } + iid_lsb = (addr40) ? 37 : 29; + *addr &= ~_nic_mask64(iid_lsb, iid_lsb, 0); + return 0; + case 2: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + *addr &= ~(UINT64_C(0x3F) << iid_lsb); + return 0; + } + + iid_lsb = (addr40) ? 36 : 28; + *addr &= ~_nic_mask64(iid_lsb + 1, iid_lsb, 0); + return 0; + case 3: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + *addr &= ~(UINT64_C(0x3F) << iid_lsb); + return 0; + } + + iid_lsb = (addr40) ? 35 : 27; + *addr &= ~_nic_mask64(iid_lsb + 2, iid_lsb, 0); + return 0; + default: + break; + } + case NFP6000_CPPTGT_CTXPB: + if (mode != 1 || addr40 != 0) + return 0; + *addr &= ~(UINT64_C(0x3F) << 24); + return 0; + default: + break; + } + + return NFP_ERRNO(EINVAL); +} + +#endif /* __NFP_CPPAT_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h b/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h new file mode 100644 index 00000000..d46574b1 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_PLATFORM_H__ +#define __NFP_PLATFORM_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef BIT_ULL +#define BIT(x) (1 << (x)) +#define BIT_ULL(x) (1ULL << (x)) +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#define NFP_ERRNO(err) (errno = (err), -1) +#define NFP_ERRNO_RET(err, ret) (errno = (err), (ret)) +#define NFP_NOERR(errv) (errno) +#define NFP_ERRPTR(err) (errno = (err), NULL) +#define NFP_PTRERR(errv) (errno) + +#endif /* __NFP_PLATFORM_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h b/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h new file mode 100644 index 00000000..0e03948e --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h @@ -0,0 +1,592 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_RESID_H__ +#define __NFP_RESID_H__ + +#if (!defined(_NFP_RESID_NO_C_FUNC) && \ + (defined(__NFP_TOOL_NFCC) || defined(__NFP_TOOL_NFAS))) +#define _NFP_RESID_NO_C_FUNC +#endif + +#ifndef _NFP_RESID_NO_C_FUNC +#include "nfp_platform.h" +#endif + +/* + * NFP Chip Architectures + * + * These are semi-arbitrary values to indicate an NFP architecture. + * They serve as a software view of a group of chip families, not necessarily a + * direct mapping to actual hardware design. + */ +#define NFP_CHIP_ARCH_YD 1 +#define NFP_CHIP_ARCH_TH 2 + +/* + * NFP Chip Families. + * + * These are not enums, because they need to be microcode compatible. + * They are also not maskable. + * + * Note: The NFP-4xxx family is handled as NFP-6xxx in most software + * components. + * + */ +#define NFP_CHIP_FAMILY_NFP6000 0x6000 /* ARCH_TH */ + +/* NFP Microengine/Flow Processing Core Versions */ +#define NFP_CHIP_ME_VERSION_2_7 0x0207 +#define NFP_CHIP_ME_VERSION_2_8 0x0208 +#define NFP_CHIP_ME_VERSION_2_9 0x0209 + +/* NFP Chip Base Revisions. Minor stepping can just be added to these */ +#define NFP_CHIP_REVISION_A0 0x00 +#define NFP_CHIP_REVISION_B0 0x10 +#define NFP_CHIP_REVISION_C0 0x20 +#define NFP_CHIP_REVISION_PF 0xff /* Maximum possible revision */ + +/* CPP Targets for each chip architecture */ +#define NFP6000_CPPTGT_NBI 1 +#define NFP6000_CPPTGT_VQDR 2 +#define NFP6000_CPPTGT_ILA 6 +#define NFP6000_CPPTGT_MU 7 +#define NFP6000_CPPTGT_PCIE 9 +#define NFP6000_CPPTGT_ARM 10 +#define NFP6000_CPPTGT_CRYPTO 12 +#define NFP6000_CPPTGT_CTXPB 14 +#define NFP6000_CPPTGT_CLS 15 + +/* + * Wildcard indicating a CPP read or write action + * + * The action used will be either read or write depending on whether a read or + * write instruction/call is performed on the NFP_CPP_ID. It is recomended that + * the RW action is used even if all actions to be performed on a NFP_CPP_ID are + * known to be only reads or writes. Doing so will in many cases save NFP CPP + * internal software resources. + */ +#define NFP_CPP_ACTION_RW 32 + +#define NFP_CPP_TARGET_ID_MASK 0x1f + +/* + * NFP_CPP_ID - pack target, token, and action into a CPP ID. + * + * Create a 32-bit CPP identifier representing the access to be made. + * These identifiers are used as parameters to other NFP CPP functions. Some + * CPP devices may allow wildcard identifiers to be specified. + * + * @param[in] target NFP CPP target id + * @param[in] action NFP CPP action id + * @param[in] token NFP CPP token id + * @return NFP CPP ID + */ +#define NFP_CPP_ID(target, action, token) \ + ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \ + (((action) & 0xff) << 8)) + +#define NFP_CPP_ISLAND_ID(target, action, token, island) \ + ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \ + (((action) & 0xff) << 8) | (((island) & 0xff) << 0)) + +#ifndef _NFP_RESID_NO_C_FUNC + +/** + * Return the NFP CPP target of a NFP CPP ID + * @param[in] id NFP CPP ID + * @return NFP CPP target + */ +static inline uint8_t +NFP_CPP_ID_TARGET_of(uint32_t id) +{ + return (id >> 24) & NFP_CPP_TARGET_ID_MASK; +} + +/* + * Return the NFP CPP token of a NFP CPP ID + * @param[in] id NFP CPP ID + * @return NFP CPP token + */ +static inline uint8_t +NFP_CPP_ID_TOKEN_of(uint32_t id) +{ + return (id >> 16) & 0xff; +} + +/* + * Return the NFP CPP action of a NFP CPP ID + * @param[in] id NFP CPP ID + * @return NFP CPP action + */ +static inline uint8_t +NFP_CPP_ID_ACTION_of(uint32_t id) +{ + return (id >> 8) & 0xff; +} + +/* + * Return the NFP CPP action of a NFP CPP ID + * @param[in] id NFP CPP ID + * @return NFP CPP action + */ +static inline uint8_t +NFP_CPP_ID_ISLAND_of(uint32_t id) +{ + return (id) & 0xff; +} + +#endif /* _NFP_RESID_NO_C_FUNC */ + +/* + * Check if @p chip_family is an ARCH_TH chip. + * @param chip_family One of NFP_CHIP_FAMILY_* + */ +#define NFP_FAMILY_IS_ARCH_TH(chip_family) \ + ((int)(chip_family) == (int)NFP_CHIP_FAMILY_NFP6000) + +/* + * Get the NFP_CHIP_ARCH_* of @p chip_family. + * @param chip_family One of NFP_CHIP_FAMILY_* + */ +#define NFP_FAMILY_ARCH(x) \ + (__extension__ ({ \ + typeof(x) _x = (x); \ + (NFP_FAMILY_IS_ARCH_TH(_x) ? NFP_CHIP_ARCH_TH : \ + NFP_FAMILY_IS_ARCH_YD(_x) ? NFP_CHIP_ARCH_YD : -1) \ + })) + +/* + * Check if @p chip_family is an NFP-6xxx chip. + * @param chip_family One of NFP_CHIP_FAMILY_* + */ +#define NFP_FAMILY_IS_NFP6000(chip_family) \ + ((int)(chip_family) == (int)NFP_CHIP_FAMILY_NFP6000) + +/* + * Make microengine ID for NFP-6xxx. + * @param island_id Island ID. + * @param menum ME number, 0 based, within island. + * + * NOTE: menum should really be unsigned - MSC compiler throws error (not + * warning) if a clause is always true i.e. menum >= 0 if cluster_num is type + * unsigned int hence the cast of the menum to an int in that particular clause + */ +#define NFP6000_MEID(a, b) \ + (__extension__ ({ \ + typeof(a) _a = (a); \ + typeof(b) _b = (b); \ + (((((int)(_a) & 0x3F) == (int)(_a)) && \ + (((int)(_b) >= 0) && ((int)(_b) < 12))) ? \ + (int)(((_a) << 4) | ((_b) + 4)) : -1) \ + })) + +/* + * Do a general sanity check on the ME ID. + * The check is on the highest possible island ID for the chip family and the + * microengine number must be a master ID. + * @param meid ME ID as created by NFP6000_MEID + */ +#define NFP6000_MEID_IS_VALID(meid) \ + (__extension__ ({ \ + typeof(meid) _a = (meid); \ + ((((_a) >> 4) < 64) && (((_a) >> 4) >= 0) && \ + (((_a) & 0xF) >= 4)) \ + })) + +/* + * Extract island ID from ME ID. + * @param meid ME ID as created by NFP6000_MEID + */ +#define NFP6000_MEID_ISLAND_of(meid) (((meid) >> 4) & 0x3F) + +/* + * Extract microengine number (0 based) from ME ID. + * @param meid ME ID as created by NFP6000_MEID + */ +#define NFP6000_MEID_MENUM_of(meid) (((meid) & 0xF) - 4) + +/* + * Extract microengine group number (0 based) from ME ID. + * The group is two code-sharing microengines, so group 0 refers to MEs 0,1, + * group 1 refers to MEs 2,3 etc. + * @param meid ME ID as created by NFP6000_MEID + */ +#define NFP6000_MEID_MEGRP_of(meid) (NFP6000_MEID_MENUM_of(meid) >> 1) + +#ifndef _NFP_RESID_NO_C_FUNC + +/* + * Convert a string to an ME ID. + * + * @param s A string of format iX.meY + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the ME ID part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return ME ID on success, -1 on error. + */ +int nfp6000_idstr2meid(const char *s, const char **endptr); + +/* + * Extract island ID from string. + * + * Example: + * char *c; + * int val = nfp6000_idstr2island("i32.me5", &c); + * // val == 32, c == "me5" + * val = nfp6000_idstr2island("i32", &c); + * // val == 32, c == "" + * + * @param s A string of format "iX.anything" or "iX" + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the island part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return If successful, the island ID, -1 on error. + */ +int nfp6000_idstr2island(const char *s, const char **endptr); + +/* + * Extract microengine number from string. + * + * Example: + * char *c; + * int menum = nfp6000_idstr2menum("me5.anything", &c); + * // menum == 5, c == "anything" + * menum = nfp6000_idstr2menum("me5", &c); + * // menum == 5, c == "" + * + * @param s A string of format "meX.anything" or "meX" + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the ME number part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return If successful, the ME number, -1 on error. + */ +int nfp6000_idstr2menum(const char *s, const char **endptr); + +/* + * Extract context number from string. + * + * Example: + * char *c; + * int val = nfp6000_idstr2ctxnum("ctx5.anything", &c); + * // val == 5, c == "anything" + * val = nfp6000_idstr2ctxnum("ctx5", &c); + * // val == 5, c == "" + * + * @param s A string of format "ctxN.anything" or "ctxN" + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the context number part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return If successful, the context number, -1 on error. + */ +int nfp6000_idstr2ctxnum(const char *s, const char **endptr); + +/* + * Extract microengine group number from string. + * + * Example: + * char *c; + * int val = nfp6000_idstr2megrp("tg2.anything", &c); + * // val == 2, c == "anything" + * val = nfp6000_idstr2megrp("tg5", &c); + * // val == 2, c == "" + * + * @param s A string of format "tgX.anything" or "tgX" + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the ME group part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return If successful, the ME group number, -1 on error. + */ +int nfp6000_idstr2megrp(const char *s, const char **endptr); + +/* + * Create ME ID string of format "iX[.meY]". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param meid Microengine ID. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp6000_meid2str(char *s, int meid); + +/* + * Create ME ID string of format "name[.meY]" or "iX[.meY]". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param meid Microengine ID. + * @return Pointer to "s" on success, NULL on error. + * + * Similar to nfp6000_meid2str() except use an alias instead of "iX" + * if one exists for the island. + */ +const char *nfp6000_meid2altstr(char *s, int meid); + +/* + * Create string of format "iX". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param island_id Island ID. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp6000_island2str(char *s, int island_id); + +/* + * Create string of format "name", an island alias. + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param island_id Island ID. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp6000_island2altstr(char *s, int island_id); + +/* + * Create string of format "meY". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param menum Microengine number within island. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp6000_menum2str(char *s, int menum); + +/* + * Create string of format "ctxY". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param ctxnum Context number within microengine. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp6000_ctxnum2str(char *s, int ctxnum); + +/* + * Create string of format "tgY". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param megrp Microengine group number within cluster. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp6000_megrp2str(char *s, int megrp); + +/* + * Convert a string to an ME ID. + * + * @param chip_family Chip family ID + * @param s A string of format iX.meY (or clX.meY) + * @param endptr If non-NULL, *endptr will point to the trailing + * string after the ME ID part of the string, which + * is either an empty string or the first character + * after the separating period. + * @return ME ID on success, -1 on error. + */ +int nfp_idstr2meid(int chip_family, const char *s, const char **endptr); + +/* + * Extract island ID from string. + * + * Example: + * char *c; + * int val = nfp_idstr2island(chip, "i32.me5", &c); + * // val == 32, c == "me5" + * val = nfp_idstr2island(chip, "i32", &c); + * // val == 32, c == "" + * + * @param chip_family Chip family ID + * @param s A string of format "iX.anything" or "iX" + * @param endptr If non-NULL, *endptr will point to the trailing + * striong after the ME ID part of the string, which + * is either an empty string or the first character + * after the separating period. + * @return The island ID on succes, -1 on error. + */ +int nfp_idstr2island(int chip_family, const char *s, const char **endptr); + +/* + * Extract microengine number from string. + * + * Example: + * char *c; + * int menum = nfp_idstr2menum("me5.anything", &c); + * // menum == 5, c == "anything" + * menum = nfp_idstr2menum("me5", &c); + * // menum == 5, c == "" + * + * @param chip_family Chip family ID + * @param s A string of format "meX.anything" or "meX" + * @param endptr If non-NULL, *endptr will point to the trailing + * striong after the ME ID part of the string, which + * is either an empty string or the first character + * after the separating period. + * @return The ME number on succes, -1 on error. + */ +int nfp_idstr2menum(int chip_family, const char *s, const char **endptr); + +/* + * Extract context number from string. + * + * Example: + * char *c; + * int val = nfp_idstr2ctxnum("ctx5.anything", &c); + * // val == 5, c == "anything" + * val = nfp_idstr2ctxnum("ctx5", &c); + * // val == 5, c == "" + * + * @param s A string of format "ctxN.anything" or "ctxN" + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the context number part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return If successful, the context number, -1 on error. + */ +int nfp_idstr2ctxnum(int chip_family, const char *s, const char **endptr); + +/* + * Extract microengine group number from string. + * + * Example: + * char *c; + * int val = nfp_idstr2megrp("tg2.anything", &c); + * // val == 2, c == "anything" + * val = nfp_idstr2megrp("tg5", &c); + * // val == 5, c == "" + * + * @param s A string of format "tgX.anything" or "tgX" + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the ME group part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return If successful, the ME group number, -1 on error. + */ +int nfp_idstr2megrp(int chip_family, const char *s, const char **endptr); + +/* + * Create ME ID string of format "iX[.meY]". + * + * @param chip_family Chip family ID + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param meid Microengine ID. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_meid2str(int chip_family, char *s, int meid); + +/* + * Create ME ID string of format "name[.meY]" or "iX[.meY]". + * + * @param chip_family Chip family ID + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param meid Microengine ID. + * @return Pointer to "s" on success, NULL on error. + * + * Similar to nfp_meid2str() except use an alias instead of "iX" + * if one exists for the island. + */ +const char *nfp_meid2altstr(int chip_family, char *s, int meid); + +/* + * Create string of format "iX". + * + * @param chip_family Chip family ID + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param island_id Island ID. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_island2str(int chip_family, char *s, int island_id); + +/* + * Create string of format "name", an island alias. + * + * @param chip_family Chip family ID + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param island_id Island ID. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_island2altstr(int chip_family, char *s, int island_id); + +/* + * Create string of format "meY". + * + * @param chip_family Chip family ID + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param menum Microengine number within island. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_menum2str(int chip_family, char *s, int menum); + +/* + * Create string of format "ctxY". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param ctxnum Context number within microengine. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_ctxnum2str(int chip_family, char *s, int ctxnum); + +/* + * Create string of format "tgY". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param megrp Microengine group number within cluster. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_megrp2str(int chip_family, char *s, int megrp); + +/* + * Convert a two character string to revision number. + * + * Revision integer is 0x00 for A0, 0x11 for B1 etc. + * + * @param s Two character string. + * @return Revision number, -1 on error + */ +int nfp_idstr2rev(const char *s); + +/* + * Create string from revision number. + * + * String will be upper case. + * + * @param s Pointer to char buffer with size of at least 3 + * for 2 characters and string terminator. + * @param rev Revision number. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_rev2str(char *s, int rev); + +/* + * Get the NFP CPP address from a string + * + * String is in the format [island@]target[:[action:[token:]]address] + * + * @param chip_family Chip family ID + * @param tid Pointer to string to parse + * @param cpp_idp Pointer to CPP ID + * @param cpp_addrp Pointer to CPP address + * @return 0 on success, or -1 and errno + */ +int nfp_str2cpp(int chip_family, + const char *tid, + uint32_t *cpp_idp, + uint64_t *cpp_addrp); + + +#endif /* _NFP_RESID_NO_C_FUNC */ + +#endif /* __NFP_RESID_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h b/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h new file mode 100644 index 00000000..47e1ddae --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_NFP6000_H__ +#define __NFP_NFP6000_H__ + +/* CPP Target IDs */ +#define NFP_CPP_TARGET_INVALID 0 +#define NFP_CPP_TARGET_NBI 1 +#define NFP_CPP_TARGET_QDR 2 +#define NFP_CPP_TARGET_ILA 6 +#define NFP_CPP_TARGET_MU 7 +#define NFP_CPP_TARGET_PCIE 9 +#define NFP_CPP_TARGET_ARM 10 +#define NFP_CPP_TARGET_CRYPTO 12 +#define NFP_CPP_TARGET_ISLAND_XPB 14 /* Shared with CAP */ +#define NFP_CPP_TARGET_ISLAND_CAP 14 /* Shared with XPB */ +#define NFP_CPP_TARGET_CT_XPB 14 +#define NFP_CPP_TARGET_LOCAL_SCRATCH 15 +#define NFP_CPP_TARGET_CLS NFP_CPP_TARGET_LOCAL_SCRATCH + +#define NFP_ISL_EMEM0 24 + +#define NFP_MU_ADDR_ACCESS_TYPE_MASK 3ULL +#define NFP_MU_ADDR_ACCESS_TYPE_DIRECT 2ULL + +static inline int +nfp_cppat_mu_locality_lsb(int mode, int addr40) +{ + switch (mode) { + case 0 ... 3: + return addr40 ? 38 : 30; + default: + return -EINVAL; + } +} + +#endif /* NFP_NFP6000_H */ diff --git a/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h b/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h new file mode 100644 index 00000000..7ada1bb2 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_XPB_H__ +#define __NFP_XPB_H__ + +/* + * For use with NFP6000 Databook "XPB Addressing" section + */ +#define NFP_XPB_OVERLAY(island) (((island) & 0x3f) << 24) + +#define NFP_XPB_ISLAND(island) (NFP_XPB_OVERLAY(island) + 0x60000) + +#define NFP_XPB_ISLAND_of(offset) (((offset) >> 24) & 0x3F) + +/* + * For use with NFP6000 Databook "XPB Island and Device IDs" chapter + */ +#define NFP_XPB_DEVICE(island, slave, device) \ + (NFP_XPB_OVERLAY(island) | \ + (((slave) & 3) << 22) | \ + (((device) & 0x3f) << 16)) + +#endif /* NFP_XPB_H */ diff --git a/drivers/net/nfp/nfpcore/nfp_cpp.h b/drivers/net/nfp/nfpcore/nfp_cpp.h new file mode 100644 index 00000000..1427954c --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_cpp.h @@ -0,0 +1,781 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_CPP_H__ +#define __NFP_CPP_H__ + +#include + +#include "nfp-common/nfp_platform.h" +#include "nfp-common/nfp_resid.h" + +struct nfp_cpp_mutex; + +/* + * NFP CPP handle + */ +struct nfp_cpp { + uint32_t model; + uint32_t interface; + uint8_t *serial; + int serial_len; + void *priv; + + /* Mutex cache */ + struct nfp_cpp_mutex *mutex_cache; + const struct nfp_cpp_operations *op; + + /* + * NFP-6xxx originating island IMB CPP Address Translation. CPP Target + * ID is index into array. Values are obtained at runtime from local + * island XPB CSRs. + */ + uint32_t imb_cat_table[16]; + + int driver_lock_needed; +}; + +/* + * NFP CPP device area handle + */ +struct nfp_cpp_area { + struct nfp_cpp *cpp; + char *name; + unsigned long long offset; + unsigned long size; + /* Here follows the 'priv' part of nfp_cpp_area. */ +}; + +/* + * NFP CPP operations structure + */ +struct nfp_cpp_operations { + /* Size of priv area in struct nfp_cpp_area */ + size_t area_priv_size; + + /* Instance an NFP CPP */ + int (*init)(struct nfp_cpp *cpp, struct rte_pci_device *dev); + + /* + * Free the bus. + * Called only once, during nfp_cpp_unregister() + */ + void (*free)(struct nfp_cpp *cpp); + + /* + * Initialize a new NFP CPP area + * NOTE: This is _not_ serialized + */ + int (*area_init)(struct nfp_cpp_area *area, + uint32_t dest, + unsigned long long address, + unsigned long size); + /* + * Clean up a NFP CPP area before it is freed + * NOTE: This is _not_ serialized + */ + void (*area_cleanup)(struct nfp_cpp_area *area); + + /* + * Acquire resources for a NFP CPP area + * Serialized + */ + int (*area_acquire)(struct nfp_cpp_area *area); + /* + * Release resources for a NFP CPP area + * Serialized + */ + void (*area_release)(struct nfp_cpp_area *area); + /* + * Return a void IO pointer to a NFP CPP area + * NOTE: This is _not_ serialized + */ + + void *(*area_iomem)(struct nfp_cpp_area *area); + + void *(*area_mapped)(struct nfp_cpp_area *area); + /* + * Perform a read from a NFP CPP area + * Serialized + */ + int (*area_read)(struct nfp_cpp_area *area, + void *kernel_vaddr, + unsigned long offset, + unsigned int length); + /* + * Perform a write to a NFP CPP area + * Serialized + */ + int (*area_write)(struct nfp_cpp_area *area, + const void *kernel_vaddr, + unsigned long offset, + unsigned int length); +}; + +/* + * This should be the only external function the transport + * module supplies + */ +const struct nfp_cpp_operations *nfp_cpp_transport_operations(void); + +/* + * Set the model id + * + * @param cpp NFP CPP operations structure + * @param model Model ID + */ +void nfp_cpp_model_set(struct nfp_cpp *cpp, uint32_t model); + +/* + * Set the private instance owned data of a nfp_cpp struct + * + * @param cpp NFP CPP operations structure + * @param interface Interface ID + */ +void nfp_cpp_interface_set(struct nfp_cpp *cpp, uint32_t interface); + +/* + * Set the private instance owned data of a nfp_cpp struct + * + * @param cpp NFP CPP operations structure + * @param serial NFP serial byte array + * @param len Length of the serial byte array + */ +int nfp_cpp_serial_set(struct nfp_cpp *cpp, const uint8_t *serial, + size_t serial_len); + +/* + * Set the private data of the nfp_cpp instance + * + * @param cpp NFP CPP operations structure + * @return Opaque device pointer + */ +void nfp_cpp_priv_set(struct nfp_cpp *cpp, void *priv); + +/* + * Return the private data of the nfp_cpp instance + * + * @param cpp NFP CPP operations structure + * @return Opaque device pointer + */ +void *nfp_cpp_priv(struct nfp_cpp *cpp); + +/* + * Get the privately allocated portion of a NFP CPP area handle + * + * @param cpp_area NFP CPP area handle + * @return Pointer to the private area, or NULL on failure + */ +void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area); + +uint32_t __nfp_cpp_model_autodetect(struct nfp_cpp *cpp); + +/* + * NFP CPP core interface for CPP clients. + */ + +/* + * Open a NFP CPP handle to a CPP device + * + * @param[in] id 0-based ID for the CPP interface to use + * + * @return NFP CPP handle, or NULL on failure (and set errno accordingly). + */ +struct nfp_cpp *nfp_cpp_from_device_name(struct rte_pci_device *dev, + int driver_lock_needed); + +/* + * Free a NFP CPP handle + * + * @param[in] cpp NFP CPP handle + */ +void nfp_cpp_free(struct nfp_cpp *cpp); + +#define NFP_CPP_MODEL_INVALID 0xffffffff + +/* + * NFP_CPP_MODEL_CHIP_of - retrieve the chip ID from the model ID + * + * The chip ID is a 16-bit BCD+A-F encoding for the chip type. + * + * @param[in] model NFP CPP model id + * @return NFP CPP chip id + */ +#define NFP_CPP_MODEL_CHIP_of(model) (((model) >> 16) & 0xffff) + +/* + * NFP_CPP_MODEL_IS_6000 - Check for the NFP6000 family of devices + * + * NOTE: The NFP4000 series is considered as a NFP6000 series variant. + * + * @param[in] model NFP CPP model id + * @return true if model is in the NFP6000 family, false otherwise. + */ +#define NFP_CPP_MODEL_IS_6000(model) \ + ((NFP_CPP_MODEL_CHIP_of(model) >= 0x4000) && \ + (NFP_CPP_MODEL_CHIP_of(model) < 0x7000)) + +/* + * nfp_cpp_model - Retrieve the Model ID of the NFP + * + * @param[in] cpp NFP CPP handle + * @return NFP CPP Model ID + */ +uint32_t nfp_cpp_model(struct nfp_cpp *cpp); + +/* + * NFP Interface types - logical interface for this CPP connection 4 bits are + * reserved for interface type. + */ +#define NFP_CPP_INTERFACE_TYPE_INVALID 0x0 +#define NFP_CPP_INTERFACE_TYPE_PCI 0x1 +#define NFP_CPP_INTERFACE_TYPE_ARM 0x2 +#define NFP_CPP_INTERFACE_TYPE_RPC 0x3 +#define NFP_CPP_INTERFACE_TYPE_ILA 0x4 + +/* + * Construct a 16-bit NFP Interface ID + * + * Interface IDs consists of 4 bits of interface type, 4 bits of unit + * identifier, and 8 bits of channel identifier. + * + * The NFP Interface ID is used in the implementation of NFP CPP API mutexes, + * which use the MU Atomic CompareAndWrite operation - hence the limit to 16 + * bits to be able to use the NFP Interface ID as a lock owner. + * + * @param[in] type NFP Interface Type + * @param[in] unit Unit identifier for the interface type + * @param[in] channel Channel identifier for the interface unit + * @return Interface ID + */ +#define NFP_CPP_INTERFACE(type, unit, channel) \ + ((((type) & 0xf) << 12) | \ + (((unit) & 0xf) << 8) | \ + (((channel) & 0xff) << 0)) + +/* + * Get the interface type of a NFP Interface ID + * @param[in] interface NFP Interface ID + * @return NFP Interface ID's type + */ +#define NFP_CPP_INTERFACE_TYPE_of(interface) (((interface) >> 12) & 0xf) + +/* + * Get the interface unit of a NFP Interface ID + * @param[in] interface NFP Interface ID + * @return NFP Interface ID's unit + */ +#define NFP_CPP_INTERFACE_UNIT_of(interface) (((interface) >> 8) & 0xf) + +/* + * Get the interface channel of a NFP Interface ID + * @param[in] interface NFP Interface ID + * @return NFP Interface ID's channel + */ +#define NFP_CPP_INTERFACE_CHANNEL_of(interface) (((interface) >> 0) & 0xff) + +/* + * Retrieve the Interface ID of the NFP + * @param[in] cpp NFP CPP handle + * @return NFP CPP Interface ID + */ +uint16_t nfp_cpp_interface(struct nfp_cpp *cpp); + +/* + * Retrieve the NFP Serial Number (unique per NFP) + * @param[in] cpp NFP CPP handle + * @param[out] serial Pointer to reference the serial number array + * + * @return size of the NFP6000 serial number, in bytes + */ +int nfp_cpp_serial(struct nfp_cpp *cpp, const uint8_t **serial); + +/* + * Allocate a NFP CPP area handle, as an offset into a CPP ID + * @param[in] cpp NFP CPP handle + * @param[in] cpp_id NFP CPP ID + * @param[in] address Offset into the NFP CPP ID address space + * @param[in] size Size of the area to reserve + * + * @return NFP CPP handle, or NULL on failure (and set errno accordingly). + */ +struct nfp_cpp_area *nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, + unsigned long size); + +/* + * Allocate a NFP CPP area handle, as an offset into a CPP ID, by a named owner + * @param[in] cpp NFP CPP handle + * @param[in] cpp_id NFP CPP ID + * @param[in] name Name of owner of the area + * @param[in] address Offset into the NFP CPP ID address space + * @param[in] size Size of the area to reserve + * + * @return NFP CPP handle, or NULL on failure (and set errno accordingly). + */ +struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, + uint32_t cpp_id, + const char *name, + unsigned long long address, + unsigned long size); + +/* + * Free an allocated NFP CPP area handle + * @param[in] area NFP CPP area handle + */ +void nfp_cpp_area_free(struct nfp_cpp_area *area); + +/* + * Acquire the resources needed to access the NFP CPP area handle + * + * @param[in] area NFP CPP area handle + * + * @return 0 on success, -1 on failure (and set errno accordingly). + */ +int nfp_cpp_area_acquire(struct nfp_cpp_area *area); + +/* + * Release the resources needed to access the NFP CPP area handle + * + * @param[in] area NFP CPP area handle + */ +void nfp_cpp_area_release(struct nfp_cpp_area *area); + +/* + * Allocate, then acquire the resources needed to access the NFP CPP area handle + * @param[in] cpp NFP CPP handle + * @param[in] cpp_id NFP CPP ID + * @param[in] address Offset into the NFP CPP ID address space + * @param[in] size Size of the area to reserve + * + * @return NFP CPP handle, or NULL on failure (and set errno accordingly). + */ +struct nfp_cpp_area *nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, + uint32_t cpp_id, + unsigned long long address, + unsigned long size); + +/* + * Release the resources, then free the NFP CPP area handle + * @param[in] area NFP CPP area handle + */ +void nfp_cpp_area_release_free(struct nfp_cpp_area *area); + +uint8_t *nfp_cpp_map_area(struct nfp_cpp *cpp, int domain, int target, + uint64_t addr, unsigned long size, + struct nfp_cpp_area **area); +/* + * Return an IO pointer to the beginning of the NFP CPP area handle. The area + * must be acquired with 'nfp_cpp_area_acquire()' before calling this operation. + * + * @param[in] area NFP CPP area handle + * + * @return Pointer to IO memory, or NULL on failure (and set errno accordingly). + */ +void *nfp_cpp_area_mapped(struct nfp_cpp_area *area); + +/* + * Read from a NFP CPP area handle into a buffer. The area must be acquired with + * 'nfp_cpp_area_acquire()' before calling this operation. + * + * @param[in] area NFP CPP area handle + * @param[in] offset Offset into the area + * @param[in] buffer Location of buffer to receive the data + * @param[in] length Length of the data to read + * + * @return bytes read on success, -1 on failure (and set errno accordingly). + * + */ +int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset, + void *buffer, size_t length); + +/* + * Write to a NFP CPP area handle from a buffer. The area must be acquired with + * 'nfp_cpp_area_acquire()' before calling this operation. + * + * @param[in] area NFP CPP area handle + * @param[in] offset Offset into the area + * @param[in] buffer Location of buffer that holds the data + * @param[in] length Length of the data to read + * + * @return bytes written on success, -1 on failure (and set errno accordingly). + */ +int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset, + const void *buffer, size_t length); + +/* + * nfp_cpp_area_iomem() - get IOMEM region for CPP area + * @area: CPP area handle + * + * Returns an iomem pointer for use with readl()/writel() style operations. + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: pointer to the area, or NULL + */ +void *nfp_cpp_area_iomem(struct nfp_cpp_area *area); + +/* + * Verify that IO can be performed on an offset in an area + * + * @param[in] area NFP CPP area handle + * @param[in] offset Offset into the area + * @param[in] size Size of region to validate + * + * @return 0 on success, -1 on failure (and set errno accordingly). + */ +int nfp_cpp_area_check_range(struct nfp_cpp_area *area, + unsigned long long offset, unsigned long size); + +/* + * Get the NFP CPP handle that is the parent of a NFP CPP area handle + * + * @param cpp_area NFP CPP area handle + * @return NFP CPP handle + */ +struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area); + +/* + * Get the name passed during allocation of the NFP CPP area handle + * + * @param cpp_area NFP CPP area handle + * @return Pointer to the area's name + */ +const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area); + +/* + * Read a block of data from a NFP CPP ID + * + * @param[in] cpp NFP CPP handle + * @param[in] cpp_id NFP CPP ID + * @param[in] address Offset into the NFP CPP ID address space + * @param[in] kernel_vaddr Buffer to copy read data to + * @param[in] length Size of the area to reserve + * + * @return bytes read on success, -1 on failure (and set errno accordingly). + */ +int nfp_cpp_read(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, void *kernel_vaddr, size_t length); + +/* + * Write a block of data to a NFP CPP ID + * + * @param[in] cpp NFP CPP handle + * @param[in] cpp_id NFP CPP ID + * @param[in] address Offset into the NFP CPP ID address space + * @param[in] kernel_vaddr Buffer to copy write data from + * @param[in] length Size of the area to reserve + * + * @return bytes written on success, -1 on failure (and set errno accordingly). + */ +int nfp_cpp_write(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, const void *kernel_vaddr, + size_t length); + + + +/* + * Fill a NFP CPP area handle and offset with a value + * + * @param[in] area NFP CPP area handle + * @param[in] offset Offset into the NFP CPP ID address space + * @param[in] value 32-bit value to fill area with + * @param[in] length Size of the area to reserve + * + * @return bytes written on success, -1 on failure (and set errno accordingly). + */ +int nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset, + uint32_t value, size_t length); + +/* + * Read a single 32-bit value from a NFP CPP area handle + * + * @param area NFP CPP area handle + * @param offset offset into NFP CPP area handle + * @param value output value + * + * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this + * operation. + * + * NOTE: offset must be 32-bit aligned. + * + * @return 0 on success, or -1 on error (and set errno accordingly). + */ +int nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset, + uint32_t *value); + +/* + * Write a single 32-bit value to a NFP CPP area handle + * + * @param area NFP CPP area handle + * @param offset offset into NFP CPP area handle + * @param value value to write + * + * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this + * operation. + * + * NOTE: offset must be 32-bit aligned. + * + * @return 0 on success, or -1 on error (and set errno accordingly). + */ +int nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset, + uint32_t value); + +/* + * Read a single 64-bit value from a NFP CPP area handle + * + * @param area NFP CPP area handle + * @param offset offset into NFP CPP area handle + * @param value output value + * + * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this + * operation. + * + * NOTE: offset must be 64-bit aligned. + * + * @return 0 on success, or -1 on error (and set errno accordingly). + */ +int nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset, + uint64_t *value); + +/* + * Write a single 64-bit value to a NFP CPP area handle + * + * @param area NFP CPP area handle + * @param offset offset into NFP CPP area handle + * @param value value to write + * + * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this + * operation. + * + * NOTE: offset must be 64-bit aligned. + * + * @return 0 on success, or -1 on error (and set errno accordingly). + */ +int nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset, + uint64_t value); + +/* + * Write a single 32-bit value on the XPB bus + * + * @param cpp NFP CPP device handle + * @param xpb_tgt XPB target and address + * @param value value to write + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_xpb_writel(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t value); + +/* + * Read a single 32-bit value from the XPB bus + * + * @param cpp NFP CPP device handle + * @param xpb_tgt XPB target and address + * @param value output value + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_xpb_readl(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t *value); + +/* + * Modify bits of a 32-bit value from the XPB bus + * + * @param cpp NFP CPP device handle + * @param xpb_tgt XPB target and address + * @param mask mask of bits to alter + * @param value value to modify + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_xpb_writelm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask, + uint32_t value); + +/* + * Modify bits of a 32-bit value from the XPB bus + * + * @param cpp NFP CPP device handle + * @param xpb_tgt XPB target and address + * @param mask mask of bits to alter + * @param value value to monitor for + * @param timeout_us maximum number of us to wait (-1 for forever) + * + * @return >= 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_xpb_waitlm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask, + uint32_t value, int timeout_us); + +/* + * Read a 32-bit word from a NFP CPP ID + * + * @param cpp NFP CPP handle + * @param cpp_id NFP CPP ID + * @param address offset into the NFP CPP ID address space + * @param value output value + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_cpp_readl(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, uint32_t *value); + +/* + * Write a 32-bit value to a NFP CPP ID + * + * @param cpp NFP CPP handle + * @param cpp_id NFP CPP ID + * @param address offset into the NFP CPP ID address space + * @param value value to write + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + * + */ +int nfp_cpp_writel(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, uint32_t value); + +/* + * Read a 64-bit work from a NFP CPP ID + * + * @param cpp NFP CPP handle + * @param cpp_id NFP CPP ID + * @param address offset into the NFP CPP ID address space + * @param value output value + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_cpp_readq(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, uint64_t *value); + +/* + * Write a 64-bit value to a NFP CPP ID + * + * @param cpp NFP CPP handle + * @param cpp_id NFP CPP ID + * @param address offset into the NFP CPP ID address space + * @param value value to write + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_cpp_writeq(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, uint64_t value); + +/* + * Initialize a mutex location + + * The CPP target:address must point to a 64-bit aligned location, and will + * initialize 64 bits of data at the location. + * + * This creates the initial mutex state, as locked by this nfp_cpp_interface(). + * + * This function should only be called when setting up the initial lock state + * upon boot-up of the system. + * + * @param cpp NFP CPP handle + * @param target NFP CPP target ID + * @param address Offset into the address space of the NFP CPP target ID + * @param key_id Unique 32-bit value for this mutex + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target, + unsigned long long address, uint32_t key_id); + +/* + * Create a mutex handle from an address controlled by a MU Atomic engine + * + * The CPP target:address must point to a 64-bit aligned location, and reserve + * 64 bits of data at the location for use by the handle. + * + * Only target/address pairs that point to entities that support the MU Atomic + * Engine's CmpAndSwap32 command are supported. + * + * @param cpp NFP CPP handle + * @param target NFP CPP target ID + * @param address Offset into the address space of the NFP CPP target ID + * @param key_id 32-bit unique key (must match the key at this location) + * + * @return A non-NULL struct nfp_cpp_mutex * on success, NULL on + * failure. + */ +struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, + unsigned long long address, + uint32_t key_id); + +/* + * Get the NFP CPP handle the mutex was created with + * + * @param mutex NFP mutex handle + * @return NFP CPP handle + */ +struct nfp_cpp *nfp_cpp_mutex_cpp(struct nfp_cpp_mutex *mutex); + +/* + * Get the mutex key + * + * @param mutex NFP mutex handle + * @return Mutex key + */ +uint32_t nfp_cpp_mutex_key(struct nfp_cpp_mutex *mutex); + +/* + * Get the mutex owner + * + * @param mutex NFP mutex handle + * @return Interface ID of the mutex owner + * + * NOTE: This is for debug purposes ONLY - the owner may change at any time, + * unless it has been locked by this NFP CPP handle. + */ +uint16_t nfp_cpp_mutex_owner(struct nfp_cpp_mutex *mutex); + +/* + * Get the mutex target + * + * @param mutex NFP mutex handle + * @return Mutex CPP target (ie NFP_CPP_TARGET_MU) + */ +int nfp_cpp_mutex_target(struct nfp_cpp_mutex *mutex); + +/* + * Get the mutex address + * + * @param mutex NFP mutex handle + * @return Mutex CPP address + */ +uint64_t nfp_cpp_mutex_address(struct nfp_cpp_mutex *mutex); + +/* + * Free a mutex handle - does not alter the lock state + * + * @param mutex NFP CPP Mutex handle + */ +void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex); + +/* + * Lock a mutex handle, using the NFP MU Atomic Engine + * + * @param mutex NFP CPP Mutex handle + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex); + +/* + * Unlock a mutex handle, using the NFP MU Atomic Engine + * + * @param mutex NFP CPP Mutex handle + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex); + +/* + * Attempt to lock a mutex handle, using the NFP MU Atomic Engine + * + * @param mutex NFP CPP Mutex handle + * @return 0 if the lock succeeded, -1 on failure (and errno set + * appropriately). + */ +int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex); + +#endif /* !__NFP_CPP_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c new file mode 100644 index 00000000..c68d9400 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c @@ -0,0 +1,845 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +/* + * nfp_cpp_pcie_ops.c + * Authors: Vinayak Tammineedi + * + * Multiplexes the NFP BARs between NFP internal resources and + * implements the PCIe specific interface for generic CPP bus access. + * + * The BARs are managed and allocated if they are available. + * The generic CPP bus abstraction builds upon this BAR interface. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "nfp_cpp.h" +#include "nfp_target.h" +#include "nfp6000/nfp6000.h" + +#define NFP_PCIE_BAR(_pf) (0x30000 + ((_pf) & 7) * 0xc0) + +#define NFP_PCIE_BAR_PCIE2CPP_ACTION_BASEADDRESS(_x) (((_x) & 0x1f) << 16) +#define NFP_PCIE_BAR_PCIE2CPP_BASEADDRESS(_x) (((_x) & 0xffff) << 0) +#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT(_x) (((_x) & 0x3) << 27) +#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_32BIT 0 +#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_64BIT 1 +#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_0BYTE 3 +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE(_x) (((_x) & 0x7) << 29) +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_OF(_x) (((_x) >> 29) & 0x7) +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_FIXED 0 +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_BULK 1 +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_TARGET 2 +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_GENERAL 3 +#define NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(_x) (((_x) & 0xf) << 23) +#define NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(_x) (((_x) & 0x3) << 21) + +/* + * Minimal size of the PCIe cfg memory we depend on being mapped, + * queue controller and DMA controller don't have to be covered. + */ +#define NFP_PCI_MIN_MAP_SIZE 0x080000 + +#define NFP_PCIE_P2C_FIXED_SIZE(bar) (1 << (bar)->bitsize) +#define NFP_PCIE_P2C_BULK_SIZE(bar) (1 << (bar)->bitsize) +#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2)) +#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4)) +#define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4)) + +#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar, slot) \ + (NFP_PCIE_BAR(0) + ((bar) * 8 + (slot)) * 4) + +#define NFP_PCIE_CPP_BAR_PCIETOCPPEXPBAR(bar, slot) \ + (((bar) * 8 + (slot)) * 4) + +/* + * Define to enable a bit more verbose debug output. + * Set to 1 to enable a bit more verbose debug output. + */ +struct nfp_pcie_user; +struct nfp6000_area_priv; + +/* + * struct nfp_bar - describes BAR configuration and usage + * @nfp: backlink to owner + * @barcfg: cached contents of BAR config CSR + * @base: the BAR's base CPP offset + * @mask: mask for the BAR aperture (read only) + * @bitsize: bitsize of BAR aperture (read only) + * @index: index of the BAR + * @lock: lock to specify if bar is in use + * @refcnt: number of current users + * @iomem: mapped IO memory + */ +#define NFP_BAR_MAX 7 +struct nfp_bar { + struct nfp_pcie_user *nfp; + uint32_t barcfg; + uint64_t base; /* CPP address base */ + uint64_t mask; /* Bit mask of the bar */ + uint32_t bitsize; /* Bit size of the bar */ + int index; + int lock; + + char *csr; + char *iomem; +}; + +#define BUSDEV_SZ 13 +struct nfp_pcie_user { + struct nfp_bar bar[NFP_BAR_MAX]; + + int device; + int lock; + char busdev[BUSDEV_SZ]; + int barsz; + char *cfg; +}; + +static uint32_t +nfp_bar_maptype(struct nfp_bar *bar) +{ + return NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_OF(bar->barcfg); +} + +#define TARGET_WIDTH_32 4 +#define TARGET_WIDTH_64 8 + +static int +nfp_compute_bar(const struct nfp_bar *bar, uint32_t *bar_config, + uint64_t *bar_base, int tgt, int act, int tok, + uint64_t offset, size_t size, int width) +{ + uint32_t bitsize; + uint32_t newcfg; + uint64_t mask; + + if (tgt >= 16) + return -EINVAL; + + switch (width) { + case 8: + newcfg = + NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT + (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_64BIT); + break; + case 4: + newcfg = + NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT + (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_32BIT); + break; + case 0: + newcfg = + NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT + (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_0BYTE); + break; + default: + return -EINVAL; + } + + if (act != NFP_CPP_ACTION_RW && act != 0) { + /* Fixed CPP mapping with specific action */ + mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1); + + newcfg |= + NFP_PCIE_BAR_PCIE2CPP_MAPTYPE + (NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_FIXED); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(tgt); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_ACTION_BASEADDRESS(act); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(tok); + + if ((offset & mask) != ((offset + size - 1) & mask)) { + printf("BAR%d: Won't use for Fixed mapping\n", + bar->index); + printf("\t<%#llx,%#llx>, action=%d\n", + (unsigned long long)offset, + (unsigned long long)(offset + size), act); + printf("\tBAR too small (0x%llx).\n", + (unsigned long long)mask); + return -EINVAL; + } + offset &= mask; + +#ifdef DEBUG + printf("BAR%d: Created Fixed mapping\n", bar->index); + printf("\t%d:%d:%d:0x%#llx-0x%#llx>\n", tgt, act, tok, + (unsigned long long)offset, + (unsigned long long)(offset + mask)); +#endif + + bitsize = 40 - 16; + } else { + mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1); + + /* Bulk mapping */ + newcfg |= + NFP_PCIE_BAR_PCIE2CPP_MAPTYPE + (NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_BULK); + + newcfg |= NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(tgt); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(tok); + + if ((offset & mask) != ((offset + size - 1) & mask)) { + printf("BAR%d: Won't use for bulk mapping\n", + bar->index); + printf("\t<%#llx,%#llx>\n", (unsigned long long)offset, + (unsigned long long)(offset + size)); + printf("\ttarget=%d, token=%d\n", tgt, tok); + printf("\tBAR too small (%#llx) - (%#llx != %#llx).\n", + (unsigned long long)mask, + (unsigned long long)(offset & mask), + (unsigned long long)(offset + size - 1) & mask); + + return -EINVAL; + } + + offset &= mask; + +#ifdef DEBUG + printf("BAR%d: Created bulk mapping %d:x:%d:%#llx-%#llx\n", + bar->index, tgt, tok, (unsigned long long)offset, + (unsigned long long)(offset + ~mask)); +#endif + + bitsize = 40 - 21; + } + + if (bar->bitsize < bitsize) { + printf("BAR%d: Too small for %d:%d:%d\n", bar->index, tgt, tok, + act); + return -EINVAL; + } + + newcfg |= offset >> bitsize; + + if (bar_base) + *bar_base = offset; + + if (bar_config) + *bar_config = newcfg; + + return 0; +} + +static int +nfp_bar_write(struct nfp_pcie_user *nfp, struct nfp_bar *bar, + uint32_t newcfg) +{ + int base, slot; + + base = bar->index >> 3; + slot = bar->index & 7; + + if (!nfp->cfg) + return (-ENOMEM); + + bar->csr = nfp->cfg + + NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(base, slot); + + *(uint32_t *)(bar->csr) = newcfg; + + bar->barcfg = newcfg; +#ifdef DEBUG + printf("BAR%d: updated to 0x%08x\n", bar->index, newcfg); +#endif + + return 0; +} + +static int +nfp_reconfigure_bar(struct nfp_pcie_user *nfp, struct nfp_bar *bar, int tgt, + int act, int tok, uint64_t offset, size_t size, int width) +{ + uint64_t newbase; + uint32_t newcfg; + int err; + + err = nfp_compute_bar(bar, &newcfg, &newbase, tgt, act, tok, offset, + size, width); + if (err) + return err; + + bar->base = newbase; + + return nfp_bar_write(nfp, bar, newcfg); +} + +/* + * Map all PCI bars. We assume that the BAR with the PCIe config block is + * already mapped. + * + * BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM) + */ +static int +nfp_enable_bars(struct nfp_pcie_user *nfp) +{ + struct nfp_bar *bar; + int x; + + for (x = ARRAY_SIZE(nfp->bar); x > 0; x--) { + bar = &nfp->bar[x - 1]; + bar->barcfg = 0; + bar->nfp = nfp; + bar->index = x; + bar->mask = (1 << (nfp->barsz - 3)) - 1; + bar->bitsize = nfp->barsz - 3; + bar->base = 0; + bar->iomem = NULL; + bar->lock = 0; + bar->csr = nfp->cfg + + NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar->index >> 3, + bar->index & 7); + + bar->iomem = nfp->cfg + (bar->index << bar->bitsize); + } + return 0; +} + +static struct nfp_bar * +nfp_alloc_bar(struct nfp_pcie_user *nfp) +{ + struct nfp_bar *bar; + int x; + + for (x = ARRAY_SIZE(nfp->bar); x > 0; x--) { + bar = &nfp->bar[x - 1]; + if (!bar->lock) { + bar->lock = 1; + return bar; + } + } + return NULL; +} + +static void +nfp_disable_bars(struct nfp_pcie_user *nfp) +{ + struct nfp_bar *bar; + int x; + + for (x = ARRAY_SIZE(nfp->bar); x > 0; x--) { + bar = &nfp->bar[x - 1]; + if (bar->iomem) { + bar->iomem = NULL; + bar->lock = 0; + } + } +} + +/* + * Generic CPP bus access interface. + */ + +struct nfp6000_area_priv { + struct nfp_bar *bar; + uint32_t bar_offset; + + uint32_t target; + uint32_t action; + uint32_t token; + uint64_t offset; + struct { + int read; + int write; + int bar; + } width; + size_t size; + char *iomem; +}; + +static int +nfp6000_area_init(struct nfp_cpp_area *area, uint32_t dest, + unsigned long long address, unsigned long size) +{ + struct nfp_pcie_user *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area)); + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + uint32_t target = NFP_CPP_ID_TARGET_of(dest); + uint32_t action = NFP_CPP_ID_ACTION_of(dest); + uint32_t token = NFP_CPP_ID_TOKEN_of(dest); + int pp, ret = 0; + + pp = nfp6000_target_pushpull(NFP_CPP_ID(target, action, token), + address); + if (pp < 0) + return pp; + + priv->width.read = PUSH_WIDTH(pp); + priv->width.write = PULL_WIDTH(pp); + + if (priv->width.read > 0 && + priv->width.write > 0 && priv->width.read != priv->width.write) + return -EINVAL; + + if (priv->width.read > 0) + priv->width.bar = priv->width.read; + else + priv->width.bar = priv->width.write; + + priv->bar = nfp_alloc_bar(nfp); + if (priv->bar == NULL) + return -ENOMEM; + + priv->target = target; + priv->action = action; + priv->token = token; + priv->offset = address; + priv->size = size; + + ret = nfp_reconfigure_bar(nfp, priv->bar, priv->target, priv->action, + priv->token, priv->offset, priv->size, + priv->width.bar); + + return ret; +} + +static int +nfp6000_area_acquire(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + /* Calculate offset into BAR. */ + if (nfp_bar_maptype(priv->bar) == + NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_GENERAL) { + priv->bar_offset = priv->offset & + (NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1); + priv->bar_offset += + NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(priv->bar, + priv->target); + priv->bar_offset += + NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(priv->bar, priv->token); + } else { + priv->bar_offset = priv->offset & priv->bar->mask; + } + + /* Must have been too big. Sub-allocate. */ + if (!priv->bar->iomem) + return (-ENOMEM); + + priv->iomem = priv->bar->iomem + priv->bar_offset; + + return 0; +} + +static void * +nfp6000_area_mapped(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *area_priv = nfp_cpp_area_priv(area); + + if (!area_priv->iomem) + return NULL; + + return area_priv->iomem; +} + +static void +nfp6000_area_release(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + priv->bar->lock = 0; + priv->bar = NULL; + priv->iomem = NULL; +} + +static void * +nfp6000_area_iomem(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + return priv->iomem; +} + +static int +nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr, + unsigned long offset, unsigned int length) +{ + uint64_t *wrptr64 = kernel_vaddr; + const volatile uint64_t *rdptr64; + struct nfp6000_area_priv *priv; + uint32_t *wrptr32 = kernel_vaddr; + const volatile uint32_t *rdptr32; + int width; + unsigned int n; + bool is_64; + + priv = nfp_cpp_area_priv(area); + rdptr64 = (uint64_t *)(priv->iomem + offset); + rdptr32 = (uint32_t *)(priv->iomem + offset); + + if (offset + length > priv->size) + return -EFAULT; + + width = priv->width.read; + + if (width <= 0) + return -EINVAL; + + /* Unaligned? Translate to an explicit access */ + if ((priv->offset + offset) & (width - 1)) { + printf("aread_read unaligned!!!\n"); + return -EINVAL; + } + + is_64 = width == TARGET_WIDTH_64; + + /* MU reads via a PCIe2CPP BAR supports 32bit (and other) lengths */ + if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && + priv->action == NFP_CPP_ACTION_RW) { + is_64 = false; + } + + if (is_64) { + if (offset % sizeof(uint64_t) != 0 || + length % sizeof(uint64_t) != 0) + return -EINVAL; + } else { + if (offset % sizeof(uint32_t) != 0 || + length % sizeof(uint32_t) != 0) + return -EINVAL; + } + + if (!priv->bar) + return -EFAULT; + + if (is_64) + for (n = 0; n < length; n += sizeof(uint64_t)) { + *wrptr64 = *rdptr64; + wrptr64++; + rdptr64++; + } + else + for (n = 0; n < length; n += sizeof(uint32_t)) { + *wrptr32 = *rdptr32; + wrptr32++; + rdptr32++; + } + + return n; +} + +static int +nfp6000_area_write(struct nfp_cpp_area *area, const void *kernel_vaddr, + unsigned long offset, unsigned int length) +{ + const uint64_t *rdptr64 = kernel_vaddr; + uint64_t *wrptr64; + const uint32_t *rdptr32 = kernel_vaddr; + struct nfp6000_area_priv *priv; + uint32_t *wrptr32; + int width; + unsigned int n; + bool is_64; + + priv = nfp_cpp_area_priv(area); + wrptr64 = (uint64_t *)(priv->iomem + offset); + wrptr32 = (uint32_t *)(priv->iomem + offset); + + if (offset + length > priv->size) + return -EFAULT; + + width = priv->width.write; + + if (width <= 0) + return -EINVAL; + + /* Unaligned? Translate to an explicit access */ + if ((priv->offset + offset) & (width - 1)) + return -EINVAL; + + is_64 = width == TARGET_WIDTH_64; + + /* MU writes via a PCIe2CPP BAR supports 32bit (and other) lengths */ + if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && + priv->action == NFP_CPP_ACTION_RW) + is_64 = false; + + if (is_64) { + if (offset % sizeof(uint64_t) != 0 || + length % sizeof(uint64_t) != 0) + return -EINVAL; + } else { + if (offset % sizeof(uint32_t) != 0 || + length % sizeof(uint32_t) != 0) + return -EINVAL; + } + + if (!priv->bar) + return -EFAULT; + + if (is_64) + for (n = 0; n < length; n += sizeof(uint64_t)) { + *wrptr64 = *rdptr64; + wrptr64++; + rdptr64++; + } + else + for (n = 0; n < length; n += sizeof(uint32_t)) { + *wrptr32 = *rdptr32; + wrptr32++; + rdptr32++; + } + + return n; +} + +#define PCI_DEVICES "/sys/bus/pci/devices" + +static int +nfp_acquire_process_lock(struct nfp_pcie_user *desc) +{ + int rc; + struct flock lock; + char lockname[30]; + + memset(&lock, 0, sizeof(lock)); + + snprintf(lockname, sizeof(lockname), "/var/lock/nfp_%s", desc->busdev); + desc->lock = open(lockname, O_RDWR | O_CREAT, 0666); + if (desc->lock < 0) + return desc->lock; + + lock.l_type = F_WRLCK; + lock.l_whence = SEEK_SET; + rc = -1; + while (rc != 0) { + rc = fcntl(desc->lock, F_SETLKW, &lock); + if (rc < 0) { + if (errno != EAGAIN && errno != EACCES) { + close(desc->lock); + return rc; + } + } + } + + return 0; +} + +static int +nfp6000_set_model(struct rte_pci_device *dev, struct nfp_cpp *cpp) +{ + uint32_t model; + + if (rte_pci_read_config(dev, &model, 4, 0x2e) < 0) { + printf("nfp set model failed\n"); + return -1; + } + + model = model << 16; + nfp_cpp_model_set(cpp, model); + + return 0; +} + +static int +nfp6000_set_interface(struct rte_pci_device *dev, struct nfp_cpp *cpp) +{ + uint16_t interface; + + if (rte_pci_read_config(dev, &interface, 2, 0x154) < 0) { + printf("nfp set interface failed\n"); + return -1; + } + + nfp_cpp_interface_set(cpp, interface); + + return 0; +} + +#define PCI_CFG_SPACE_SIZE 256 +#define PCI_CFG_SPACE_EXP_SIZE 4096 +#define PCI_EXT_CAP_ID(header) (int)(header & 0x0000ffff) +#define PCI_EXT_CAP_NEXT(header) ((header >> 20) & 0xffc) +#define PCI_EXT_CAP_ID_DSN 0x03 +static int +nfp_pci_find_next_ext_capability(struct rte_pci_device *dev, int cap) +{ + uint32_t header; + int ttl; + int pos = PCI_CFG_SPACE_SIZE; + + /* minimum 8 bytes per capability */ + ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; + + if (rte_pci_read_config(dev, &header, 4, pos) < 0) { + printf("nfp error reading extended capabilities\n"); + return -1; + } + + /* + * If we have no capabilities, this is indicated by cap ID, + * cap version and next pointer all being 0. + */ + if (header == 0) + return 0; + + while (ttl-- > 0) { + if (PCI_EXT_CAP_ID(header) == cap) + return pos; + + pos = PCI_EXT_CAP_NEXT(header); + if (pos < PCI_CFG_SPACE_SIZE) + break; + + if (rte_pci_read_config(dev, &header, 4, pos) < 0) { + printf("nfp error reading extended capabilities\n"); + return -1; + } + } + + return 0; +} + +static int +nfp6000_set_serial(struct rte_pci_device *dev, struct nfp_cpp *cpp) +{ + uint16_t tmp; + uint8_t serial[6]; + int serial_len = 6; + int pos; + + pos = nfp_pci_find_next_ext_capability(dev, PCI_EXT_CAP_ID_DSN); + if (pos <= 0) { + printf("PCI_EXT_CAP_ID_DSN not found. nfp set serial failed\n"); + return -1; + } else { + pos += 6; + } + + if (rte_pci_read_config(dev, &tmp, 2, pos) < 0) { + printf("nfp set serial failed\n"); + return -1; + } + + serial[4] = (uint8_t)((tmp >> 8) & 0xff); + serial[5] = (uint8_t)(tmp & 0xff); + + pos += 2; + if (rte_pci_read_config(dev, &tmp, 2, pos) < 0) { + printf("nfp set serial failed\n"); + return -1; + } + + serial[2] = (uint8_t)((tmp >> 8) & 0xff); + serial[3] = (uint8_t)(tmp & 0xff); + + pos += 2; + if (rte_pci_read_config(dev, &tmp, 2, pos) < 0) { + printf("nfp set serial failed\n"); + return -1; + } + + serial[0] = (uint8_t)((tmp >> 8) & 0xff); + serial[1] = (uint8_t)(tmp & 0xff); + + nfp_cpp_serial_set(cpp, serial, serial_len); + + return 0; +} + +static int +nfp6000_set_barsz(struct rte_pci_device *dev, struct nfp_pcie_user *desc) +{ + unsigned long tmp; + int i = 0; + + tmp = dev->mem_resource[0].len; + + while (tmp >>= 1) + i++; + + desc->barsz = i; + return 0; +} + +static int +nfp6000_init(struct nfp_cpp *cpp, struct rte_pci_device *dev) +{ + int ret = 0; + uint32_t model; + struct nfp_pcie_user *desc; + + desc = malloc(sizeof(*desc)); + if (!desc) + return -1; + + + memset(desc->busdev, 0, BUSDEV_SZ); + strlcpy(desc->busdev, dev->device.name, sizeof(desc->busdev)); + + if (cpp->driver_lock_needed) { + ret = nfp_acquire_process_lock(desc); + if (ret) + return -1; + } + + if (nfp6000_set_model(dev, cpp) < 0) + return -1; + if (nfp6000_set_interface(dev, cpp) < 0) + return -1; + if (nfp6000_set_serial(dev, cpp) < 0) + return -1; + if (nfp6000_set_barsz(dev, desc) < 0) + return -1; + + desc->cfg = (char *)dev->mem_resource[0].addr; + + nfp_enable_bars(desc); + + nfp_cpp_priv_set(cpp, desc); + + model = __nfp_cpp_model_autodetect(cpp); + nfp_cpp_model_set(cpp, model); + + return ret; +} + +static void +nfp6000_free(struct nfp_cpp *cpp) +{ + struct nfp_pcie_user *desc = nfp_cpp_priv(cpp); + + nfp_disable_bars(desc); + if (cpp->driver_lock_needed) + close(desc->lock); + close(desc->device); + free(desc); +} + +static const struct nfp_cpp_operations nfp6000_pcie_ops = { + .init = nfp6000_init, + .free = nfp6000_free, + + .area_priv_size = sizeof(struct nfp6000_area_priv), + .area_init = nfp6000_area_init, + .area_acquire = nfp6000_area_acquire, + .area_release = nfp6000_area_release, + .area_mapped = nfp6000_area_mapped, + .area_read = nfp6000_area_read, + .area_write = nfp6000_area_write, + .area_iomem = nfp6000_area_iomem, +}; + +const struct +nfp_cpp_operations *nfp_cpp_transport_operations(void) +{ + return &nfp6000_pcie_ops; +} diff --git a/drivers/net/nfp/nfpcore/nfp_cppcore.c b/drivers/net/nfp/nfpcore/nfp_cppcore.c new file mode 100644 index 00000000..75d3c974 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_cppcore.c @@ -0,0 +1,858 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "nfp_cpp.h" +#include "nfp_target.h" +#include "nfp6000/nfp6000.h" +#include "nfp6000/nfp_xpb.h" +#include "nfp_nffw.h" + +#define NFP_PL_DEVICE_ID 0x00000004 +#define NFP_PL_DEVICE_ID_MASK 0xff + +#define NFP6000_ARM_GCSR_SOFTMODEL0 0x00400144 + +void +nfp_cpp_priv_set(struct nfp_cpp *cpp, void *priv) +{ + cpp->priv = priv; +} + +void * +nfp_cpp_priv(struct nfp_cpp *cpp) +{ + return cpp->priv; +} + +void +nfp_cpp_model_set(struct nfp_cpp *cpp, uint32_t model) +{ + cpp->model = model; +} + +uint32_t +nfp_cpp_model(struct nfp_cpp *cpp) +{ + if (!cpp) + return NFP_CPP_MODEL_INVALID; + + if (cpp->model == 0) + cpp->model = __nfp_cpp_model_autodetect(cpp); + + return cpp->model; +} + +void +nfp_cpp_interface_set(struct nfp_cpp *cpp, uint32_t interface) +{ + cpp->interface = interface; +} + +int +nfp_cpp_serial(struct nfp_cpp *cpp, const uint8_t **serial) +{ + *serial = cpp->serial; + return cpp->serial_len; +} + +int +nfp_cpp_serial_set(struct nfp_cpp *cpp, const uint8_t *serial, + size_t serial_len) +{ + if (cpp->serial_len) + free(cpp->serial); + + cpp->serial = malloc(serial_len); + if (!cpp->serial) + return -1; + + memcpy(cpp->serial, serial, serial_len); + cpp->serial_len = serial_len; + + return 0; +} + +uint16_t +nfp_cpp_interface(struct nfp_cpp *cpp) +{ + if (!cpp) + return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_INVALID, 0, 0); + + return cpp->interface; +} + +void * +nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area) +{ + return &cpp_area[1]; +} + +struct nfp_cpp * +nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area) +{ + return cpp_area->cpp; +} + +const char * +nfp_cpp_area_name(struct nfp_cpp_area *cpp_area) +{ + return cpp_area->name; +} + +/* + * nfp_cpp_area_alloc - allocate a new CPP area + * @cpp: CPP handle + * @dest: CPP id + * @address: start address on CPP target + * @size: size of area in bytes + * + * Allocate and initialize a CPP area structure. The area must later + * be locked down with an 'acquire' before it can be safely accessed. + * + * NOTE: @address and @size must be 32-bit aligned values. + */ +struct nfp_cpp_area * +nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, uint32_t dest, + const char *name, unsigned long long address, + unsigned long size) +{ + struct nfp_cpp_area *area; + uint64_t tmp64 = (uint64_t)address; + int tmp, err; + + if (!cpp) + return NULL; + + /* CPP bus uses only a 40-bit address */ + if ((address + size) > (1ULL << 40)) + return NFP_ERRPTR(EFAULT); + + /* Remap from cpp_island to cpp_target */ + err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table); + if (err < 0) + return NULL; + + address = (unsigned long long)tmp64; + + if (!name) + name = ""; + + area = calloc(1, sizeof(*area) + cpp->op->area_priv_size + + strlen(name) + 1); + if (!area) + return NULL; + + area->cpp = cpp; + area->name = ((char *)area) + sizeof(*area) + cpp->op->area_priv_size; + memcpy(area->name, name, strlen(name) + 1); + + /* + * Preserve errno around the call to area_init, since most + * implementations will blindly call nfp_target_action_width()for both + * read or write modes, and that will set errno to EINVAL. + */ + tmp = errno; + + err = cpp->op->area_init(area, dest, address, size); + if (err < 0) { + free(area); + return NULL; + } + + /* Restore errno */ + errno = tmp; + + area->offset = address; + area->size = size; + + return area; +} + +struct nfp_cpp_area * +nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t dest, + unsigned long long address, unsigned long size) +{ + return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size); +} + +/* + * nfp_cpp_area_alloc_acquire - allocate a new CPP area and lock it down + * + * @cpp: CPP handle + * @dest: CPP id + * @address: start address on CPP target + * @size: size of area + * + * Allocate and initilizae a CPP area structure, and lock it down so + * that it can be accessed directly. + * + * NOTE: @address and @size must be 32-bit aligned values. + * + * NOTE: The area must also be 'released' when the structure is freed. + */ +struct nfp_cpp_area * +nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, uint32_t destination, + unsigned long long address, unsigned long size) +{ + struct nfp_cpp_area *area; + + area = nfp_cpp_area_alloc(cpp, destination, address, size); + if (!area) + return NULL; + + if (nfp_cpp_area_acquire(area)) { + nfp_cpp_area_free(area); + return NULL; + } + + return area; +} + +/* + * nfp_cpp_area_free - free up the CPP area + * area: CPP area handle + * + * Frees up memory resources held by the CPP area. + */ +void +nfp_cpp_area_free(struct nfp_cpp_area *area) +{ + if (area->cpp->op->area_cleanup) + area->cpp->op->area_cleanup(area); + free(area); +} + +/* + * nfp_cpp_area_release_free - release CPP area and free it + * area: CPP area handle + * + * Releases CPP area and frees up memory resources held by the it. + */ +void +nfp_cpp_area_release_free(struct nfp_cpp_area *area) +{ + nfp_cpp_area_release(area); + nfp_cpp_area_free(area); +} + +/* + * nfp_cpp_area_acquire - lock down a CPP area for access + * @area: CPP area handle + * + * Locks down the CPP area for a potential long term activity. Area + * must always be locked down before being accessed. + */ +int +nfp_cpp_area_acquire(struct nfp_cpp_area *area) +{ + if (area->cpp->op->area_acquire) { + int err = area->cpp->op->area_acquire(area); + + if (err < 0) + return -1; + } + + return 0; +} + +/* + * nfp_cpp_area_release - release a locked down CPP area + * @area: CPP area handle + * + * Releases a previously locked down CPP area. + */ +void +nfp_cpp_area_release(struct nfp_cpp_area *area) +{ + if (area->cpp->op->area_release) + area->cpp->op->area_release(area); +} + +/* + * nfp_cpp_area_iomem() - get IOMEM region for CPP area + * + * @area: CPP area handle + * + * Returns an iomem pointer for use with readl()/writel() style operations. + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: pointer to the area, or NULL + */ +void * +nfp_cpp_area_iomem(struct nfp_cpp_area *area) +{ + void *iomem = NULL; + + if (area->cpp->op->area_iomem) + iomem = area->cpp->op->area_iomem(area); + + return iomem; +} + +/* + * nfp_cpp_area_read - read data from CPP area + * + * @area: CPP area handle + * @offset: offset into CPP area + * @kernel_vaddr: kernel address to put data into + * @length: number of bytes to read + * + * Read data from indicated CPP region. + * + * NOTE: @offset and @length must be 32-bit aligned values. + * + * NOTE: Area must have been locked down with an 'acquire'. + */ +int +nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset, + void *kernel_vaddr, size_t length) +{ + if ((offset + length) > area->size) + return NFP_ERRNO(EFAULT); + + return area->cpp->op->area_read(area, kernel_vaddr, offset, length); +} + +/* + * nfp_cpp_area_write - write data to CPP area + * + * @area: CPP area handle + * @offset: offset into CPP area + * @kernel_vaddr: kernel address to read data from + * @length: number of bytes to write + * + * Write data to indicated CPP region. + * + * NOTE: @offset and @length must be 32-bit aligned values. + * + * NOTE: Area must have been locked down with an 'acquire'. + */ +int +nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset, + const void *kernel_vaddr, size_t length) +{ + if ((offset + length) > area->size) + return NFP_ERRNO(EFAULT); + + return area->cpp->op->area_write(area, kernel_vaddr, offset, length); +} + +void * +nfp_cpp_area_mapped(struct nfp_cpp_area *area) +{ + if (area->cpp->op->area_mapped) + return area->cpp->op->area_mapped(area); + return NULL; +} + +/* + * nfp_cpp_area_check_range - check if address range fits in CPP area + * + * @area: CPP area handle + * @offset: offset into CPP area + * @length: size of address range in bytes + * + * Check if address range fits within CPP area. Return 0 if area fits + * or -1 on error. + */ +int +nfp_cpp_area_check_range(struct nfp_cpp_area *area, unsigned long long offset, + unsigned long length) +{ + if (((offset + length) > area->size)) + return NFP_ERRNO(EFAULT); + + return 0; +} + +/* + * Return the correct CPP address, and fixup xpb_addr as needed, + * based upon NFP model. + */ +static uint32_t +nfp_xpb_to_cpp(struct nfp_cpp *cpp, uint32_t *xpb_addr) +{ + uint32_t xpb; + int island; + + if (!NFP_CPP_MODEL_IS_6000(cpp->model)) + return 0; + + xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0); + + /* + * Ensure that non-local XPB accesses go out through the + * global XPBM bus. + */ + island = ((*xpb_addr) >> 24) & 0x3f; + + if (!island) + return xpb; + + if (island == 1) { + /* + * Accesses to the ARM Island overlay uses Island 0 + * Global Bit + */ + (*xpb_addr) &= ~0x7f000000; + if (*xpb_addr < 0x60000) + *xpb_addr |= (1 << 30); + else + /* And only non-ARM interfaces use island id = 1 */ + if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp)) != + NFP_CPP_INTERFACE_TYPE_ARM) + *xpb_addr |= (1 << 24); + } else { + (*xpb_addr) |= (1 << 30); + } + + return xpb; +} + +int +nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset, + uint32_t *value) +{ + int sz; + uint32_t tmp = 0; + + sz = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); + *value = rte_le_to_cpu_32(tmp); + + return (sz == sizeof(*value)) ? 0 : -1; +} + +int +nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset, + uint32_t value) +{ + int sz; + + value = rte_cpu_to_le_32(value); + sz = nfp_cpp_area_write(area, offset, &value, sizeof(value)); + return (sz == sizeof(value)) ? 0 : -1; +} + +int +nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset, + uint64_t *value) +{ + int sz; + uint64_t tmp = 0; + + sz = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); + *value = rte_le_to_cpu_64(tmp); + + return (sz == sizeof(*value)) ? 0 : -1; +} + +int +nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset, + uint64_t value) +{ + int sz; + + value = rte_cpu_to_le_64(value); + sz = nfp_cpp_area_write(area, offset, &value, sizeof(value)); + + return (sz == sizeof(value)) ? 0 : -1; +} + +int +nfp_cpp_readl(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address, + uint32_t *value) +{ + int sz; + uint32_t tmp; + + sz = nfp_cpp_read(cpp, cpp_id, address, &tmp, sizeof(tmp)); + *value = rte_le_to_cpu_32(tmp); + + return (sz == sizeof(*value)) ? 0 : -1; +} + +int +nfp_cpp_writel(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address, + uint32_t value) +{ + int sz; + + value = rte_cpu_to_le_32(value); + sz = nfp_cpp_write(cpp, cpp_id, address, &value, sizeof(value)); + + return (sz == sizeof(value)) ? 0 : -1; +} + +int +nfp_cpp_readq(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address, + uint64_t *value) +{ + int sz; + uint64_t tmp; + + sz = nfp_cpp_read(cpp, cpp_id, address, &tmp, sizeof(tmp)); + *value = rte_le_to_cpu_64(tmp); + + return (sz == sizeof(*value)) ? 0 : -1; +} + +int +nfp_cpp_writeq(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address, + uint64_t value) +{ + int sz; + + value = rte_cpu_to_le_64(value); + sz = nfp_cpp_write(cpp, cpp_id, address, &value, sizeof(value)); + + return (sz == sizeof(value)) ? 0 : -1; +} + +int +nfp_xpb_writel(struct nfp_cpp *cpp, uint32_t xpb_addr, uint32_t value) +{ + uint32_t cpp_dest; + + cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr); + + return nfp_cpp_writel(cpp, cpp_dest, xpb_addr, value); +} + +int +nfp_xpb_readl(struct nfp_cpp *cpp, uint32_t xpb_addr, uint32_t *value) +{ + uint32_t cpp_dest; + + cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr); + + return nfp_cpp_readl(cpp, cpp_dest, xpb_addr, value); +} + +static struct nfp_cpp * +nfp_cpp_alloc(struct rte_pci_device *dev, int driver_lock_needed) +{ + const struct nfp_cpp_operations *ops; + struct nfp_cpp *cpp; + int err; + + ops = nfp_cpp_transport_operations(); + + if (!ops || !ops->init) + return NFP_ERRPTR(EINVAL); + + cpp = calloc(1, sizeof(*cpp)); + if (!cpp) + return NULL; + + cpp->op = ops; + cpp->driver_lock_needed = driver_lock_needed; + + if (cpp->op->init) { + err = cpp->op->init(cpp, dev); + if (err < 0) { + free(cpp); + return NULL; + } + } + + if (NFP_CPP_MODEL_IS_6000(nfp_cpp_model(cpp))) { + uint32_t xpbaddr; + size_t tgt; + + for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) { + /* Hardcoded XPB IMB Base, island 0 */ + xpbaddr = 0x000a0000 + (tgt * 4); + err = nfp_xpb_readl(cpp, xpbaddr, + (uint32_t *)&cpp->imb_cat_table[tgt]); + if (err < 0) { + free(cpp); + return NULL; + } + } + } + + return cpp; +} + +/* + * nfp_cpp_free - free the CPP handle + * @cpp: CPP handle + */ +void +nfp_cpp_free(struct nfp_cpp *cpp) +{ + if (cpp->op && cpp->op->free) + cpp->op->free(cpp); + + if (cpp->serial_len) + free(cpp->serial); + + free(cpp); +} + +struct nfp_cpp * +nfp_cpp_from_device_name(struct rte_pci_device *dev, int driver_lock_needed) +{ + return nfp_cpp_alloc(dev, driver_lock_needed); +} + +/* + * Modify bits of a 32-bit value from the XPB bus + * + * @param cpp NFP CPP device handle + * @param xpb_tgt XPB target and address + * @param mask mask of bits to alter + * @param value value to modify + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int +nfp_xpb_writelm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask, + uint32_t value) +{ + int err; + uint32_t tmp; + + err = nfp_xpb_readl(cpp, xpb_tgt, &tmp); + if (err < 0) + return err; + + tmp &= ~mask; + tmp |= (mask & value); + return nfp_xpb_writel(cpp, xpb_tgt, tmp); +} + +/* + * Modify bits of a 32-bit value from the XPB bus + * + * @param cpp NFP CPP device handle + * @param xpb_tgt XPB target and address + * @param mask mask of bits to alter + * @param value value to monitor for + * @param timeout_us maximum number of us to wait (-1 for forever) + * + * @return >= 0 on success, or -1 on failure (and set errno accordingly). + */ +int +nfp_xpb_waitlm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask, + uint32_t value, int timeout_us) +{ + uint32_t tmp; + int err; + + do { + err = nfp_xpb_readl(cpp, xpb_tgt, &tmp); + if (err < 0) + goto exit; + + if ((tmp & mask) == (value & mask)) { + if (timeout_us < 0) + timeout_us = 0; + break; + } + + if (timeout_us < 0) + continue; + + timeout_us -= 100; + usleep(100); + } while (timeout_us >= 0); + + if (timeout_us < 0) + err = NFP_ERRNO(ETIMEDOUT); + else + err = timeout_us; + +exit: + return err; +} + +/* + * nfp_cpp_read - read from CPP target + * @cpp: CPP handle + * @destination: CPP id + * @address: offset into CPP target + * @kernel_vaddr: kernel buffer for result + * @length: number of bytes to read + */ +int +nfp_cpp_read(struct nfp_cpp *cpp, uint32_t destination, + unsigned long long address, void *kernel_vaddr, size_t length) +{ + struct nfp_cpp_area *area; + int err; + + area = nfp_cpp_area_alloc_acquire(cpp, destination, address, length); + if (!area) { + printf("Area allocation/acquire failed\n"); + return -1; + } + + err = nfp_cpp_area_read(area, 0, kernel_vaddr, length); + + nfp_cpp_area_release_free(area); + return err; +} + +/* + * nfp_cpp_write - write to CPP target + * @cpp: CPP handle + * @destination: CPP id + * @address: offset into CPP target + * @kernel_vaddr: kernel buffer to read from + * @length: number of bytes to write + */ +int +nfp_cpp_write(struct nfp_cpp *cpp, uint32_t destination, + unsigned long long address, const void *kernel_vaddr, + size_t length) +{ + struct nfp_cpp_area *area; + int err; + + area = nfp_cpp_area_alloc_acquire(cpp, destination, address, length); + if (!area) + return -1; + + err = nfp_cpp_area_write(area, 0, kernel_vaddr, length); + + nfp_cpp_area_release_free(area); + return err; +} + +/* + * nfp_cpp_area_fill - fill a CPP area with a value + * @area: CPP area + * @offset: offset into CPP area + * @value: value to fill with + * @length: length of area to fill + */ +int +nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset, + uint32_t value, size_t length) +{ + int err; + size_t i; + uint64_t value64; + + value = rte_cpu_to_le_32(value); + value64 = ((uint64_t)value << 32) | value; + + if ((offset + length) > area->size) + return NFP_ERRNO(EINVAL); + + if ((area->offset + offset) & 3) + return NFP_ERRNO(EINVAL); + + if (((area->offset + offset) & 7) == 4 && length >= 4) { + err = nfp_cpp_area_write(area, offset, &value, sizeof(value)); + if (err < 0) + return err; + if (err != sizeof(value)) + return NFP_ERRNO(ENOSPC); + offset += sizeof(value); + length -= sizeof(value); + } + + for (i = 0; (i + sizeof(value)) < length; i += sizeof(value64)) { + err = + nfp_cpp_area_write(area, offset + i, &value64, + sizeof(value64)); + if (err < 0) + return err; + if (err != sizeof(value64)) + return NFP_ERRNO(ENOSPC); + } + + if ((i + sizeof(value)) <= length) { + err = + nfp_cpp_area_write(area, offset + i, &value, sizeof(value)); + if (err < 0) + return err; + if (err != sizeof(value)) + return NFP_ERRNO(ENOSPC); + i += sizeof(value); + } + + return (int)i; +} + +/* + * NOTE: This code should not use nfp_xpb_* functions, + * as those are model-specific + */ +uint32_t +__nfp_cpp_model_autodetect(struct nfp_cpp *cpp) +{ + uint32_t arm_id = NFP_CPP_ID(NFP_CPP_TARGET_ARM, 0, 0); + uint32_t model = 0; + + nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, &model); + + if (NFP_CPP_MODEL_IS_6000(model)) { + uint32_t tmp; + + nfp_cpp_model_set(cpp, model); + + /* The PL's PluDeviceID revision code is authoratative */ + model &= ~0xff; + nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) + + NFP_PL_DEVICE_ID, &tmp); + model |= (NFP_PL_DEVICE_ID_MASK & tmp) - 0x10; + } + + return model; +} + +/* + * nfp_cpp_map_area() - Helper function to map an area + * @cpp: NFP CPP handler + * @domain: CPP domain + * @target: CPP target + * @addr: CPP address + * @size: Size of the area + * @area: Area handle (output) + * + * Map an area of IOMEM access. To undo the effect of this function call + * @nfp_cpp_area_release_free(*area). + * + * Return: Pointer to memory mapped area or ERR_PTR + */ +uint8_t * +nfp_cpp_map_area(struct nfp_cpp *cpp, int domain, int target, uint64_t addr, + unsigned long size, struct nfp_cpp_area **area) +{ + uint8_t *res; + uint32_t dest; + + dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, domain); + + *area = nfp_cpp_area_alloc_acquire(cpp, dest, addr, size); + if (!*area) + goto err_eio; + + res = nfp_cpp_area_iomem(*area); + if (!res) + goto err_release_free; + + return res; + +err_release_free: + nfp_cpp_area_release_free(*area); +err_eio: + return NULL; +} diff --git a/drivers/net/nfp/nfpcore/nfp_crc.c b/drivers/net/nfp/nfpcore/nfp_crc.c new file mode 100644 index 00000000..20431bf8 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_crc.c @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include +#include + +#include "nfp_crc.h" + +static inline uint32_t +nfp_crc32_be_generic(uint32_t crc, unsigned char const *p, size_t len, + uint32_t polynomial) +{ + int i; + while (len--) { + crc ^= *p++ << 24; + for (i = 0; i < 8; i++) + crc = (crc << 1) ^ ((crc & 0x80000000) ? polynomial : + 0); + } + return crc; +} + +static inline uint32_t +nfp_crc32_be(uint32_t crc, unsigned char const *p, size_t len) +{ + return nfp_crc32_be_generic(crc, p, len, CRCPOLY_BE); +} + +static uint32_t +nfp_crc32_posix_end(uint32_t crc, size_t total_len) +{ + /* Extend with the length of the string. */ + while (total_len != 0) { + uint8_t c = total_len & 0xff; + + crc = nfp_crc32_be(crc, &c, 1); + total_len >>= 8; + } + + return ~crc; +} + +uint32_t +nfp_crc32_posix(const void *buff, size_t len) +{ + return nfp_crc32_posix_end(nfp_crc32_be(0, buff, len), len); +} diff --git a/drivers/net/nfp/nfpcore/nfp_crc.h b/drivers/net/nfp/nfpcore/nfp_crc.h new file mode 100644 index 00000000..f99c89fc --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_crc.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_CRC_H__ +#define __NFP_CRC_H__ + +/* + * There are multiple 16-bit CRC polynomials in common use, but this is + * *the* standard CRC-32 polynomial, first popularized by Ethernet. + * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0 + */ +#define CRCPOLY_LE 0xedb88320 +#define CRCPOLY_BE 0x04c11db7 + +uint32_t nfp_crc32_posix(const void *buff, size_t len); + +#endif diff --git a/drivers/net/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/nfp/nfpcore/nfp_hwinfo.c new file mode 100644 index 00000000..c0516bf8 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_hwinfo.c @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +/* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM + * after chip reset. + * + * Examples of the fields: + * me.count = 40 + * me.mask = 0x7f_ffff_ffff + * + * me.count is the total number of MEs on the system. + * me.mask is the bitmask of MEs that are available for application usage. + * + * (ie, in this example, ME 39 has been reserved by boardconfig.) + */ + +#include +#include + +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" +#include "nfp_resource.h" +#include "nfp_hwinfo.h" +#include "nfp_crc.h" + +static int +nfp_hwinfo_is_updating(struct nfp_hwinfo *hwinfo) +{ + return hwinfo->version & NFP_HWINFO_VERSION_UPDATING; +} + +static int +nfp_hwinfo_db_walk(struct nfp_hwinfo *hwinfo, uint32_t size) +{ + const char *key, *val, *end = hwinfo->data + size; + + for (key = hwinfo->data; *key && key < end; + key = val + strlen(val) + 1) { + val = key + strlen(key) + 1; + if (val >= end) { + printf("Bad HWINFO - overflowing key\n"); + return -EINVAL; + } + + if (val + strlen(val) + 1 > end) { + printf("Bad HWINFO - overflowing value\n"); + return -EINVAL; + } + } + return 0; +} + +static int +nfp_hwinfo_db_validate(struct nfp_hwinfo *db, uint32_t len) +{ + uint32_t size, new_crc, *crc; + + size = db->size; + if (size > len) { + printf("Unsupported hwinfo size %u > %u\n", size, len); + return -EINVAL; + } + + size -= sizeof(uint32_t); + new_crc = nfp_crc32_posix((char *)db, size); + crc = (uint32_t *)(db->start + size); + if (new_crc != *crc) { + printf("Corrupt hwinfo table (CRC mismatch)\n"); + printf("\tcalculated 0x%x, expected 0x%x\n", new_crc, *crc); + return -EINVAL; + } + + return nfp_hwinfo_db_walk(db, size); +} + +static struct nfp_hwinfo * +nfp_hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size) +{ + struct nfp_hwinfo *header; + void *res; + uint64_t cpp_addr; + uint32_t cpp_id; + int err; + uint8_t *db; + + res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_HWINFO); + if (res) { + cpp_id = nfp_resource_cpp_id(res); + cpp_addr = nfp_resource_address(res); + *cpp_size = nfp_resource_size(res); + + nfp_resource_release(res); + + if (*cpp_size < HWINFO_SIZE_MIN) + return NULL; + } else { + return NULL; + } + + db = malloc(*cpp_size + 1); + if (!db) + return NULL; + + err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size); + if (err != (int)*cpp_size) + goto exit_free; + + header = (void *)db; + printf("NFP HWINFO header: %08x\n", *(uint32_t *)header); + if (nfp_hwinfo_is_updating(header)) + goto exit_free; + + if (header->version != NFP_HWINFO_VERSION_2) { + printf("Unknown HWInfo version: 0x%08x\n", + header->version); + goto exit_free; + } + + /* NULL-terminate for safety */ + db[*cpp_size] = '\0'; + + return (void *)db; +exit_free: + free(db); + return NULL; +} + +static struct nfp_hwinfo * +nfp_hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size) +{ + struct timespec wait; + struct nfp_hwinfo *db; + int count; + + wait.tv_sec = 0; + wait.tv_nsec = 10000000; + count = 0; + + for (;;) { + db = nfp_hwinfo_try_fetch(cpp, hwdb_size); + if (db) + return db; + + nanosleep(&wait, NULL); + if (count++ > 200) { + printf("NFP access error\n"); + return NULL; + } + } +} + +struct nfp_hwinfo * +nfp_hwinfo_read(struct nfp_cpp *cpp) +{ + struct nfp_hwinfo *db; + size_t hwdb_size = 0; + int err; + + db = nfp_hwinfo_fetch(cpp, &hwdb_size); + if (!db) + return NULL; + + err = nfp_hwinfo_db_validate(db, hwdb_size); + if (err) { + free(db); + return NULL; + } + return db; +} + +/* + * nfp_hwinfo_lookup() - Find a value in the HWInfo table by name + * @hwinfo: NFP HWinfo table + * @lookup: HWInfo name to search for + * + * Return: Value of the HWInfo name, or NULL + */ +const char * +nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup) +{ + const char *key, *val, *end; + + if (!hwinfo || !lookup) + return NULL; + + end = hwinfo->data + hwinfo->size - sizeof(uint32_t); + + for (key = hwinfo->data; *key && key < end; + key = val + strlen(val) + 1) { + val = key + strlen(key) + 1; + + if (strcmp(key, lookup) == 0) + return val; + } + + return NULL; +} diff --git a/drivers/net/nfp/nfpcore/nfp_hwinfo.h b/drivers/net/nfp/nfpcore/nfp_hwinfo.h new file mode 100644 index 00000000..ccc61632 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_hwinfo.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_HWINFO_H__ +#define __NFP_HWINFO_H__ + +#include + +#define HWINFO_SIZE_MIN 0x100 + +/* + * The Hardware Info Table defines the properties of the system. + * + * HWInfo v1 Table (fixed size) + * + * 0x0000: uint32_t version Hardware Info Table version (1.0) + * 0x0004: uint32_t size Total size of the table, including the + * CRC32 (IEEE 802.3) + * 0x0008: uint32_t jumptab Offset of key/value table + * 0x000c: uint32_t keys Total number of keys in the key/value + * table + * NNNNNN: Key/value jump table and string data + * (size - 4): uint32_t crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc) + * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE + * + * HWInfo v2 Table (variable size) + * + * 0x0000: uint32_t version Hardware Info Table version (2.0) + * 0x0004: uint32_t size Current size of the data area, excluding + * CRC32 + * 0x0008: uint32_t limit Maximum size of the table + * 0x000c: uint32_t reserved Unused, set to zero + * NNNNNN: Key/value data + * (size - 4): uint32_t crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc) + * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE + * + * If the HWInfo table is in the process of being updated, the low bit of + * version will be set. + * + * HWInfo v1 Key/Value Table + * ------------------------- + * + * The key/value table is a set of offsets to ASCIIZ strings which have + * been strcmp(3) sorted (yes, please use bsearch(3) on the table). + * + * All keys are guaranteed to be unique. + * + * N+0: uint32_t key_1 Offset to the first key + * N+4: uint32_t val_1 Offset to the first value + * N+8: uint32_t key_2 Offset to the second key + * N+c: uint32_t val_2 Offset to the second value + * ... + * + * HWInfo v2 Key/Value Table + * ------------------------- + * + * Packed UTF8Z strings, ie 'key1\000value1\000key2\000value2\000' + * + * Unsorted. + */ + +#define NFP_HWINFO_VERSION_1 ('H' << 24 | 'I' << 16 | 1 << 8 | 0 << 1 | 0) +#define NFP_HWINFO_VERSION_2 ('H' << 24 | 'I' << 16 | 2 << 8 | 0 << 1 | 0) +#define NFP_HWINFO_VERSION_UPDATING BIT(0) + +struct nfp_hwinfo { + uint8_t start[0]; + + uint32_t version; + uint32_t size; + + /* v2 specific fields */ + uint32_t limit; + uint32_t resv; + + char data[]; +}; + +struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp); + +const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup); + +#endif diff --git a/drivers/net/nfp/nfpcore/nfp_mip.c b/drivers/net/nfp/nfpcore/nfp_mip.c new file mode 100644 index 00000000..c86966df --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_mip.c @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include +#include + +#include "nfp_cpp.h" +#include "nfp_mip.h" +#include "nfp_nffw.h" + +#define NFP_MIP_SIGNATURE rte_cpu_to_le_32(0x0050494d) /* "MIP\0" */ +#define NFP_MIP_VERSION rte_cpu_to_le_32(1) +#define NFP_MIP_MAX_OFFSET (256 * 1024) + +struct nfp_mip { + uint32_t signature; + uint32_t mip_version; + uint32_t mip_size; + uint32_t first_entry; + + uint32_t version; + uint32_t buildnum; + uint32_t buildtime; + uint32_t loadtime; + + uint32_t symtab_addr; + uint32_t symtab_size; + uint32_t strtab_addr; + uint32_t strtab_size; + + char name[16]; + char toolchain[32]; +}; + +/* Read memory and check if it could be a valid MIP */ +static int +nfp_mip_try_read(struct nfp_cpp *cpp, uint32_t cpp_id, uint64_t addr, + struct nfp_mip *mip) +{ + int ret; + + ret = nfp_cpp_read(cpp, cpp_id, addr, mip, sizeof(*mip)); + if (ret != sizeof(*mip)) { + printf("Failed to read MIP data (%d, %zu)\n", + ret, sizeof(*mip)); + return -EIO; + } + if (mip->signature != NFP_MIP_SIGNATURE) { + printf("Incorrect MIP signature (0x%08x)\n", + rte_le_to_cpu_32(mip->signature)); + return -EINVAL; + } + if (mip->mip_version != NFP_MIP_VERSION) { + printf("Unsupported MIP version (%d)\n", + rte_le_to_cpu_32(mip->mip_version)); + return -EINVAL; + } + + return 0; +} + +/* Try to locate MIP using the resource table */ +static int +nfp_mip_read_resource(struct nfp_cpp *cpp, struct nfp_mip *mip) +{ + struct nfp_nffw_info *nffw_info; + uint32_t cpp_id; + uint64_t addr; + int err; + + nffw_info = nfp_nffw_info_open(cpp); + if (!nffw_info) + return -ENODEV; + + err = nfp_nffw_info_mip_first(nffw_info, &cpp_id, &addr); + if (err) + goto exit_close_nffw; + + err = nfp_mip_try_read(cpp, cpp_id, addr, mip); +exit_close_nffw: + nfp_nffw_info_close(nffw_info); + return err; +} + +/* + * nfp_mip_open() - Get device MIP structure + * @cpp: NFP CPP Handle + * + * Copy MIP structure from NFP device and return it. The returned + * structure is handled internally by the library and should be + * freed by calling nfp_mip_close(). + * + * Return: pointer to mip, NULL on failure. + */ +struct nfp_mip * +nfp_mip_open(struct nfp_cpp *cpp) +{ + struct nfp_mip *mip; + int err; + + mip = malloc(sizeof(*mip)); + if (!mip) + return NULL; + + err = nfp_mip_read_resource(cpp, mip); + if (err) { + free(mip); + return NULL; + } + + mip->name[sizeof(mip->name) - 1] = 0; + + return mip; +} + +void +nfp_mip_close(struct nfp_mip *mip) +{ + free(mip); +} + +const char * +nfp_mip_name(const struct nfp_mip *mip) +{ + return mip->name; +} + +/* + * nfp_mip_symtab() - Get the address and size of the MIP symbol table + * @mip: MIP handle + * @addr: Location for NFP DDR address of MIP symbol table + * @size: Location for size of MIP symbol table + */ +void +nfp_mip_symtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size) +{ + *addr = rte_le_to_cpu_32(mip->symtab_addr); + *size = rte_le_to_cpu_32(mip->symtab_size); +} + +/* + * nfp_mip_strtab() - Get the address and size of the MIP symbol name table + * @mip: MIP handle + * @addr: Location for NFP DDR address of MIP symbol name table + * @size: Location for size of MIP symbol name table + */ +void +nfp_mip_strtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size) +{ + *addr = rte_le_to_cpu_32(mip->strtab_addr); + *size = rte_le_to_cpu_32(mip->strtab_size); +} diff --git a/drivers/net/nfp/nfpcore/nfp_mip.h b/drivers/net/nfp/nfpcore/nfp_mip.h new file mode 100644 index 00000000..d0919b58 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_mip.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_MIP_H__ +#define __NFP_MIP_H__ + +#include "nfp_nffw.h" + +struct nfp_mip; + +struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp); +void nfp_mip_close(struct nfp_mip *mip); + +const char *nfp_mip_name(const struct nfp_mip *mip); +void nfp_mip_symtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size); +void nfp_mip_strtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size); +int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, uint32_t *cpp_id, + uint64_t *off); +#endif diff --git a/drivers/net/nfp/nfpcore/nfp_mutex.c b/drivers/net/nfp/nfpcore/nfp_mutex.c new file mode 100644 index 00000000..318c5800 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_mutex.c @@ -0,0 +1,424 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include + +#include +#include +#include + +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" + +#define MUTEX_LOCKED(interface) ((((uint32_t)(interface)) << 16) | 0x000f) +#define MUTEX_UNLOCK(interface) (0 | 0x0000) + +#define MUTEX_IS_LOCKED(value) (((value) & 0xffff) == 0x000f) +#define MUTEX_IS_UNLOCKED(value) (((value) & 0xffff) == 0x0000) +#define MUTEX_INTERFACE(value) (((value) >> 16) & 0xffff) + +/* + * If you need more than 65536 recursive locks, please + * rethink your code. + */ +#define MUTEX_DEPTH_MAX 0xffff + +struct nfp_cpp_mutex { + struct nfp_cpp *cpp; + uint8_t target; + uint16_t depth; + unsigned long long address; + uint32_t key; + unsigned int usage; + struct nfp_cpp_mutex *prev, *next; +}; + +static int +_nfp_cpp_mutex_validate(uint32_t model, int *target, unsigned long long address) +{ + /* Address must be 64-bit aligned */ + if (address & 7) + return NFP_ERRNO(EINVAL); + + if (NFP_CPP_MODEL_IS_6000(model)) { + if (*target != NFP_CPP_TARGET_MU) + return NFP_ERRNO(EINVAL); + } else { + return NFP_ERRNO(EINVAL); + } + + return 0; +} + +/* + * Initialize a mutex location + * + * The CPP target:address must point to a 64-bit aligned location, and + * will initialize 64 bits of data at the location. + * + * This creates the initial mutex state, as locked by this + * nfp_cpp_interface(). + * + * This function should only be called when setting up + * the initial lock state upon boot-up of the system. + * + * @param mutex NFP CPP Mutex handle + * @param target NFP CPP target ID (ie NFP_CPP_TARGET_CLS or + * NFP_CPP_TARGET_MU) + * @param address Offset into the address space of the NFP CPP target ID + * @param key Unique 32-bit value for this mutex + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int +nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target, unsigned long long address, + uint32_t key) +{ + uint32_t model = nfp_cpp_model(cpp); + uint32_t muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ + int err; + + err = _nfp_cpp_mutex_validate(model, &target, address); + if (err < 0) + return err; + + err = nfp_cpp_writel(cpp, muw, address + 4, key); + if (err < 0) + return err; + + err = + nfp_cpp_writel(cpp, muw, address + 0, + MUTEX_LOCKED(nfp_cpp_interface(cpp))); + if (err < 0) + return err; + + return 0; +} + +/* + * Create a mutex handle from an address controlled by a MU Atomic engine + * + * The CPP target:address must point to a 64-bit aligned location, and + * reserve 64 bits of data at the location for use by the handle. + * + * Only target/address pairs that point to entities that support the + * MU Atomic Engine are supported. + * + * @param cpp NFP CPP handle + * @param target NFP CPP target ID (ie NFP_CPP_TARGET_CLS or + * NFP_CPP_TARGET_MU) + * @param address Offset into the address space of the NFP CPP target ID + * @param key 32-bit unique key (must match the key at this location) + * + * @return A non-NULL struct nfp_cpp_mutex * on success, NULL on failure. + */ +struct nfp_cpp_mutex * +nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, + unsigned long long address, uint32_t key) +{ + uint32_t model = nfp_cpp_model(cpp); + struct nfp_cpp_mutex *mutex; + uint32_t mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ + int err; + uint32_t tmp; + + /* Look for cached mutex */ + for (mutex = cpp->mutex_cache; mutex; mutex = mutex->next) { + if (mutex->target == target && mutex->address == address) + break; + } + + if (mutex) { + if (mutex->key == key) { + mutex->usage++; + return mutex; + } + + /* If the key doesn't match... */ + return NFP_ERRPTR(EEXIST); + } + + err = _nfp_cpp_mutex_validate(model, &target, address); + if (err < 0) + return NULL; + + err = nfp_cpp_readl(cpp, mur, address + 4, &tmp); + if (err < 0) + return NULL; + + if (tmp != key) + return NFP_ERRPTR(EEXIST); + + mutex = calloc(sizeof(*mutex), 1); + if (!mutex) + return NFP_ERRPTR(ENOMEM); + + mutex->cpp = cpp; + mutex->target = target; + mutex->address = address; + mutex->key = key; + mutex->depth = 0; + mutex->usage = 1; + + /* Add mutex to the cache */ + if (cpp->mutex_cache) { + cpp->mutex_cache->prev = mutex; + mutex->next = cpp->mutex_cache; + cpp->mutex_cache = mutex; + } else { + cpp->mutex_cache = mutex; + } + + return mutex; +} + +struct nfp_cpp * +nfp_cpp_mutex_cpp(struct nfp_cpp_mutex *mutex) +{ + return mutex->cpp; +} + +uint32_t +nfp_cpp_mutex_key(struct nfp_cpp_mutex *mutex) +{ + return mutex->key; +} + +uint16_t +nfp_cpp_mutex_owner(struct nfp_cpp_mutex *mutex) +{ + uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + uint32_t value, key; + int err; + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value); + if (err < 0) + return err; + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key); + if (err < 0) + return err; + + if (key != mutex->key) + return NFP_ERRNO(EPERM); + + if (!MUTEX_IS_LOCKED(value)) + return 0; + + return MUTEX_INTERFACE(value); +} + +int +nfp_cpp_mutex_target(struct nfp_cpp_mutex *mutex) +{ + return mutex->target; +} + +uint64_t +nfp_cpp_mutex_address(struct nfp_cpp_mutex *mutex) +{ + return mutex->address; +} + +/* + * Free a mutex handle - does not alter the lock state + * + * @param mutex NFP CPP Mutex handle + */ +void +nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex) +{ + mutex->usage--; + if (mutex->usage > 0) + return; + + /* Remove mutex from the cache */ + if (mutex->next) + mutex->next->prev = mutex->prev; + if (mutex->prev) + mutex->prev->next = mutex->next; + + /* If mutex->cpp == NULL, something broke */ + if (mutex->cpp && mutex == mutex->cpp->mutex_cache) + mutex->cpp->mutex_cache = mutex->next; + + free(mutex); +} + +/* + * Lock a mutex handle, using the NFP MU Atomic Engine + * + * @param mutex NFP CPP Mutex handle + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int +nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex) +{ + int err; + time_t warn_at = time(NULL) + 15; + + while ((err = nfp_cpp_mutex_trylock(mutex)) != 0) { + /* If errno != EBUSY, then the lock was damaged */ + if (err < 0 && errno != EBUSY) + return err; + if (time(NULL) >= warn_at) { + printf("Warning: waiting for NFP mutex\n"); + printf("\tusage:%u\n", mutex->usage); + printf("\tdepth:%hd]\n", mutex->depth); + printf("\ttarget:%d\n", mutex->target); + printf("\taddr:%llx\n", mutex->address); + printf("\tkey:%08x]\n", mutex->key); + warn_at = time(NULL) + 60; + } + sched_yield(); + } + return 0; +} + +/* + * Unlock a mutex handle, using the NFP MU Atomic Engine + * + * @param mutex NFP CPP Mutex handle + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int +nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex) +{ + uint32_t muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ + uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + struct nfp_cpp *cpp = mutex->cpp; + uint32_t key, value; + uint16_t interface = nfp_cpp_interface(cpp); + int err; + + if (mutex->depth > 1) { + mutex->depth--; + return 0; + } + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value); + if (err < 0) + goto exit; + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key); + if (err < 0) + goto exit; + + if (key != mutex->key) { + err = NFP_ERRNO(EPERM); + goto exit; + } + + if (value != MUTEX_LOCKED(interface)) { + err = NFP_ERRNO(EACCES); + goto exit; + } + + err = nfp_cpp_writel(cpp, muw, mutex->address, MUTEX_UNLOCK(interface)); + if (err < 0) + goto exit; + + mutex->depth = 0; + +exit: + return err; +} + +/* + * Attempt to lock a mutex handle, using the NFP MU Atomic Engine + * + * Valid lock states: + * + * 0x....0000 - Unlocked + * 0x....000f - Locked + * + * @param mutex NFP CPP Mutex handle + * @return 0 if the lock succeeded, -1 on failure (and errno set + * appropriately). + */ +int +nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) +{ + uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + uint32_t muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ + uint32_t mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */ + uint32_t key, value, tmp; + struct nfp_cpp *cpp = mutex->cpp; + int err; + + if (mutex->depth > 0) { + if (mutex->depth == MUTEX_DEPTH_MAX) + return NFP_ERRNO(E2BIG); + + mutex->depth++; + return 0; + } + + /* Verify that the lock marker is not damaged */ + err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key); + if (err < 0) + goto exit; + + if (key != mutex->key) { + err = NFP_ERRNO(EPERM); + goto exit; + } + + /* + * Compare against the unlocked state, and if true, + * write the interface id into the top 16 bits, and + * mark as locked. + */ + value = MUTEX_LOCKED(nfp_cpp_interface(cpp)); + + /* + * We use test_set_imm here, as it implies a read + * of the current state, and sets the bits in the + * bytemask of the command to 1s. Since the mutex + * is guaranteed to be 64-bit aligned, the bytemask + * of this 32-bit command is ensured to be 8'b00001111, + * which implies that the lower 4 bits will be set to + * ones regardless of the initial state. + * + * Since this is a 'Readback' operation, with no Pull + * data, we can treat this as a normal Push (read) + * atomic, which returns the original value. + */ + err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp); + if (err < 0) + goto exit; + + /* Was it unlocked? */ + if (MUTEX_IS_UNLOCKED(tmp)) { + /* + * The read value can only be 0x....0000 in the unlocked state. + * If there was another contending for this lock, then + * the lock state would be 0x....000f + * + * Write our owner ID into the lock + * While not strictly necessary, this helps with + * debug and bookkeeping. + */ + err = nfp_cpp_writel(cpp, muw, mutex->address, value); + if (err < 0) + goto exit; + + mutex->depth = 1; + goto exit; + } + + /* Already locked by us? Success! */ + if (tmp == value) { + mutex->depth = 1; + goto exit; + } + + err = NFP_ERRNO(MUTEX_IS_LOCKED(tmp) ? EBUSY : EINVAL); + +exit: + return err; +} diff --git a/drivers/net/nfp/nfpcore/nfp_nffw.c b/drivers/net/nfp/nfpcore/nfp_nffw.c new file mode 100644 index 00000000..8bec0e3c --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_nffw.c @@ -0,0 +1,235 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include "nfp_cpp.h" +#include "nfp_nffw.h" +#include "nfp_mip.h" +#include "nfp6000/nfp6000.h" +#include "nfp_resource.h" + +/* + * flg_info_version = flags[0]<27:16> + * This is a small version counter intended only to detect if the current + * implementation can read the current struct. Struct changes should be very + * rare and as such a 12-bit counter should cover large spans of time. By the + * time it wraps around, we don't expect to have 4096 versions of this struct + * to be in use at the same time. + */ +static uint32_t +nffw_res_info_version_get(const struct nfp_nffw_info_data *res) +{ + return (res->flags[0] >> 16) & 0xfff; +} + +/* flg_init = flags[0]<0> */ +static uint32_t +nffw_res_flg_init_get(const struct nfp_nffw_info_data *res) +{ + return (res->flags[0] >> 0) & 1; +} + +/* loaded = loaded__mu_da__mip_off_hi<31:31> */ +static uint32_t +nffw_fwinfo_loaded_get(const struct nffw_fwinfo *fi) +{ + return (fi->loaded__mu_da__mip_off_hi >> 31) & 1; +} + +/* mip_cppid = mip_cppid */ +static uint32_t +nffw_fwinfo_mip_cppid_get(const struct nffw_fwinfo *fi) +{ + return fi->mip_cppid; +} + +/* loaded = loaded__mu_da__mip_off_hi<8:8> */ +static uint32_t +nffw_fwinfo_mip_mu_da_get(const struct nffw_fwinfo *fi) +{ + return (fi->loaded__mu_da__mip_off_hi >> 8) & 1; +} + +/* mip_offset = (loaded__mu_da__mip_off_hi<7:0> << 8) | mip_offset_lo */ +static uint64_t +nffw_fwinfo_mip_offset_get(const struct nffw_fwinfo *fi) +{ + uint64_t mip_off_hi = fi->loaded__mu_da__mip_off_hi; + + return (mip_off_hi & 0xFF) << 32 | fi->mip_offset_lo; +} + +#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7) +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12) +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0 +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12) + +static int +nfp_mip_mu_locality_lsb(struct nfp_cpp *cpp) +{ + unsigned int mode, addr40; + uint32_t xpbaddr, imbcppat; + int err; + + /* Hardcoded XPB IMB Base, island 0 */ + xpbaddr = 0x000a0000 + NFP_CPP_TARGET_MU * 4; + err = nfp_xpb_readl(cpp, xpbaddr, &imbcppat); + if (err < 0) + return err; + + mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat); + addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE); + + return nfp_cppat_mu_locality_lsb(mode, addr40); +} + +static unsigned int +nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr) +{ + /* + * For the this code, version 0 is most likely to be version 1 in this + * case. Since the kernel driver does not take responsibility for + * initialising the nfp.nffw resource, any previous code (CA firmware or + * userspace) that left the version 0 and did set the init flag is going + * to be version 1. + */ + switch (nffw_res_info_version_get(fwinf)) { + case 0: + case 1: + *arr = &fwinf->info.v1.fwinfo[0]; + return NFFW_FWINFO_CNT_V1; + case 2: + *arr = &fwinf->info.v2.fwinfo[0]; + return NFFW_FWINFO_CNT_V2; + default: + *arr = NULL; + return 0; + } +} + +/* + * nfp_nffw_info_open() - Acquire the lock on the NFFW table + * @cpp: NFP CPP handle + * + * Return: 0, or -ERRNO + */ +struct nfp_nffw_info * +nfp_nffw_info_open(struct nfp_cpp *cpp) +{ + struct nfp_nffw_info_data *fwinf; + struct nfp_nffw_info *state; + uint32_t info_ver; + int err; + + state = malloc(sizeof(*state)); + if (!state) + return NULL; + + memset(state, 0, sizeof(*state)); + + state->res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_NFFW); + if (!state->res) + goto err_free; + + fwinf = &state->fwinf; + + if (sizeof(*fwinf) > nfp_resource_size(state->res)) + goto err_release; + + err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), + nfp_resource_address(state->res), + fwinf, sizeof(*fwinf)); + if (err < (int)sizeof(*fwinf)) + goto err_release; + + if (!nffw_res_flg_init_get(fwinf)) + goto err_release; + + info_ver = nffw_res_info_version_get(fwinf); + if (info_ver > NFFW_INFO_VERSION_CURRENT) + goto err_release; + + state->cpp = cpp; + return state; + +err_release: + nfp_resource_release(state->res); +err_free: + free(state); + return NULL; +} + +/* + * nfp_nffw_info_release() - Release the lock on the NFFW table + * @state: NFP FW info state + * + * Return: 0, or -ERRNO + */ +void +nfp_nffw_info_close(struct nfp_nffw_info *state) +{ + nfp_resource_release(state->res); + free(state); +} + +/* + * nfp_nffw_info_fwid_first() - Return the first firmware ID in the NFFW + * @state: NFP FW info state + * + * Return: First NFFW firmware info, NULL on failure + */ +static struct nffw_fwinfo * +nfp_nffw_info_fwid_first(struct nfp_nffw_info *state) +{ + struct nffw_fwinfo *fwinfo; + unsigned int cnt, i; + + cnt = nffw_res_fwinfos(&state->fwinf, &fwinfo); + if (!cnt) + return NULL; + + for (i = 0; i < cnt; i++) + if (nffw_fwinfo_loaded_get(&fwinfo[i])) + return &fwinfo[i]; + + return NULL; +} + +/* + * nfp_nffw_info_mip_first() - Retrieve the location of the first FW's MIP + * @state: NFP FW info state + * @cpp_id: Pointer to the CPP ID of the MIP + * @off: Pointer to the CPP Address of the MIP + * + * Return: 0, or -ERRNO + */ +int +nfp_nffw_info_mip_first(struct nfp_nffw_info *state, uint32_t *cpp_id, + uint64_t *off) +{ + struct nffw_fwinfo *fwinfo; + + fwinfo = nfp_nffw_info_fwid_first(state); + if (!fwinfo) + return -EINVAL; + + *cpp_id = nffw_fwinfo_mip_cppid_get(fwinfo); + *off = nffw_fwinfo_mip_offset_get(fwinfo); + + if (nffw_fwinfo_mip_mu_da_get(fwinfo)) { + int locality_off; + + if (NFP_CPP_ID_TARGET_of(*cpp_id) != NFP_CPP_TARGET_MU) + return 0; + + locality_off = nfp_mip_mu_locality_lsb(state->cpp); + if (locality_off < 0) + return locality_off; + + *off &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off); + *off |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off; + } + + return 0; +} diff --git a/drivers/net/nfp/nfpcore/nfp_nffw.h b/drivers/net/nfp/nfpcore/nfp_nffw.h new file mode 100644 index 00000000..3bbdf1c1 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_nffw.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_NFFW_H__ +#define __NFP_NFFW_H__ + +#include "nfp-common/nfp_platform.h" +#include "nfp_cpp.h" + +/* + * Init-CSR owner IDs for firmware map to firmware IDs which start at 4. + * Lower IDs are reserved for target and loader IDs. + */ +#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */ +#define NFFW_FWID_BASE 4 + +#define NFFW_FWID_ALL 255 + +/* Init-CSR owner IDs for firmware map to firmware IDs which start at 4. + * Lower IDs are reserved for target and loader IDs. + */ +#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */ +#define NFFW_FWID_BASE 4 + +#define NFFW_FWID_ALL 255 + +/** + * NFFW_INFO_VERSION history: + * 0: This was never actually used (before versioning), but it refers to + * the previous struct which had FWINFO_CNT = MEINFO_CNT = 120 that later + * changed to 200. + * 1: First versioned struct, with + * FWINFO_CNT = 120 + * MEINFO_CNT = 120 + * 2: FWINFO_CNT = 200 + * MEINFO_CNT = 200 + */ +#define NFFW_INFO_VERSION_CURRENT 2 + +/* Enough for all current chip families */ +#define NFFW_MEINFO_CNT_V1 120 +#define NFFW_FWINFO_CNT_V1 120 +#define NFFW_MEINFO_CNT_V2 200 +#define NFFW_FWINFO_CNT_V2 200 + +struct nffw_meinfo { + uint32_t ctxmask__fwid__meid; +}; + +struct nffw_fwinfo { + uint32_t loaded__mu_da__mip_off_hi; + uint32_t mip_cppid; /* 0 means no MIP */ + uint32_t mip_offset_lo; +}; + +struct nfp_nffw_info_v1 { + struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V1]; + struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V1]; +}; + +struct nfp_nffw_info_v2 { + struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V2]; + struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V2]; +}; + +struct nfp_nffw_info_data { + uint32_t flags[2]; + union { + struct nfp_nffw_info_v1 v1; + struct nfp_nffw_info_v2 v2; + } info; +}; + +struct nfp_nffw_info { + struct nfp_cpp *cpp; + struct nfp_resource *res; + + struct nfp_nffw_info_data fwinf; +}; + +struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp); +void nfp_nffw_info_close(struct nfp_nffw_info *state); + +#endif diff --git a/drivers/net/nfp/nfpcore/nfp_nsp.c b/drivers/net/nfp/nfpcore/nfp_nsp.c new file mode 100644 index 00000000..876a4017 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_nsp.c @@ -0,0 +1,427 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#define NFP_SUBSYS "nfp_nsp" + +#include +#include + +#include + +#include "nfp_cpp.h" +#include "nfp_nsp.h" +#include "nfp_resource.h" + +int +nfp_nsp_config_modified(struct nfp_nsp *state) +{ + return state->modified; +} + +void +nfp_nsp_config_set_modified(struct nfp_nsp *state, int modified) +{ + state->modified = modified; +} + +void * +nfp_nsp_config_entries(struct nfp_nsp *state) +{ + return state->entries; +} + +unsigned int +nfp_nsp_config_idx(struct nfp_nsp *state) +{ + return state->idx; +} + +void +nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, unsigned int idx) +{ + state->entries = entries; + state->idx = idx; +} + +void +nfp_nsp_config_clear_state(struct nfp_nsp *state) +{ + state->entries = NULL; + state->idx = 0; +} + +static void +nfp_nsp_print_extended_error(uint32_t ret_val) +{ + int i; + + if (!ret_val) + return; + + for (i = 0; i < (int)ARRAY_SIZE(nsp_errors); i++) + if (ret_val == (uint32_t)nsp_errors[i].code) + printf("err msg: %s\n", nsp_errors[i].msg); +} + +static int +nfp_nsp_check(struct nfp_nsp *state) +{ + struct nfp_cpp *cpp = state->cpp; + uint64_t nsp_status, reg; + uint32_t nsp_cpp; + int err; + + nsp_cpp = nfp_resource_cpp_id(state->res); + nsp_status = nfp_resource_address(state->res) + NSP_STATUS; + + err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, ®); + if (err < 0) + return err; + + if (FIELD_GET(NSP_STATUS_MAGIC, reg) != NSP_MAGIC) { + printf("Cannot detect NFP Service Processor\n"); + return -ENODEV; + } + + state->ver.major = FIELD_GET(NSP_STATUS_MAJOR, reg); + state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg); + + if (state->ver.major != NSP_MAJOR || state->ver.minor < NSP_MINOR) { + printf("Unsupported ABI %hu.%hu\n", state->ver.major, + state->ver.minor); + return -EINVAL; + } + + if (reg & NSP_STATUS_BUSY) { + printf("Service processor busy!\n"); + return -EBUSY; + } + + return 0; +} + +/* + * nfp_nsp_open() - Prepare for communication and lock the NSP resource. + * @cpp: NFP CPP Handle + */ +struct nfp_nsp * +nfp_nsp_open(struct nfp_cpp *cpp) +{ + struct nfp_resource *res; + struct nfp_nsp *state; + int err; + + res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP); + if (!res) + return NULL; + + state = malloc(sizeof(*state)); + if (!state) { + nfp_resource_release(res); + return NULL; + } + memset(state, 0, sizeof(*state)); + state->cpp = cpp; + state->res = res; + + err = nfp_nsp_check(state); + if (err) { + nfp_nsp_close(state); + return NULL; + } + + return state; +} + +/* + * nfp_nsp_close() - Clean up and unlock the NSP resource. + * @state: NFP SP state + */ +void +nfp_nsp_close(struct nfp_nsp *state) +{ + nfp_resource_release(state->res); + free(state); +} + +uint16_t +nfp_nsp_get_abi_ver_major(struct nfp_nsp *state) +{ + return state->ver.major; +} + +uint16_t +nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state) +{ + return state->ver.minor; +} + +static int +nfp_nsp_wait_reg(struct nfp_cpp *cpp, uint64_t *reg, uint32_t nsp_cpp, + uint64_t addr, uint64_t mask, uint64_t val) +{ + struct timespec wait; + int count; + int err; + + wait.tv_sec = 0; + wait.tv_nsec = 25000000; + count = 0; + + for (;;) { + err = nfp_cpp_readq(cpp, nsp_cpp, addr, reg); + if (err < 0) + return err; + + if ((*reg & mask) == val) + return 0; + + nanosleep(&wait, 0); + if (count++ > 1000) + return -ETIMEDOUT; + } +} + +/* + * nfp_nsp_command() - Execute a command on the NFP Service Processor + * @state: NFP SP state + * @code: NFP SP Command Code + * @option: NFP SP Command Argument + * @buff_cpp: NFP SP Buffer CPP Address info + * @buff_addr: NFP SP Buffer Host address + * + * Return: 0 for success with no result + * + * positive value for NSP completion with a result code + * + * -EAGAIN if the NSP is not yet present + * -ENODEV if the NSP is not a supported model + * -EBUSY if the NSP is stuck + * -EINTR if interrupted while waiting for completion + * -ETIMEDOUT if the NSP took longer than 30 seconds to complete + */ +static int +nfp_nsp_command(struct nfp_nsp *state, uint16_t code, uint32_t option, + uint32_t buff_cpp, uint64_t buff_addr) +{ + uint64_t reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command; + struct nfp_cpp *cpp = state->cpp; + uint32_t nsp_cpp; + int err; + + nsp_cpp = nfp_resource_cpp_id(state->res); + nsp_base = nfp_resource_address(state->res); + nsp_status = nsp_base + NSP_STATUS; + nsp_command = nsp_base + NSP_COMMAND; + nsp_buffer = nsp_base + NSP_BUFFER; + + err = nfp_nsp_check(state); + if (err) + return err; + + if (!FIELD_FIT(NSP_BUFFER_CPP, buff_cpp >> 8) || + !FIELD_FIT(NSP_BUFFER_ADDRESS, buff_addr)) { + printf("Host buffer out of reach %08x %" PRIx64 "\n", + buff_cpp, buff_addr); + return -EINVAL; + } + + err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer, + FIELD_PREP(NSP_BUFFER_CPP, buff_cpp >> 8) | + FIELD_PREP(NSP_BUFFER_ADDRESS, buff_addr)); + if (err < 0) + return err; + + err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command, + FIELD_PREP(NSP_COMMAND_OPTION, option) | + FIELD_PREP(NSP_COMMAND_CODE, code) | + FIELD_PREP(NSP_COMMAND_START, 1)); + if (err < 0) + return err; + + /* Wait for NSP_COMMAND_START to go to 0 */ + err = nfp_nsp_wait_reg(cpp, ®, nsp_cpp, nsp_command, + NSP_COMMAND_START, 0); + if (err) { + printf("Error %d waiting for code 0x%04x to start\n", + err, code); + return err; + } + + /* Wait for NSP_STATUS_BUSY to go to 0 */ + err = nfp_nsp_wait_reg(cpp, ®, nsp_cpp, nsp_status, NSP_STATUS_BUSY, + 0); + if (err) { + printf("Error %d waiting for code 0x%04x to complete\n", + err, code); + return err; + } + + err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &ret_val); + if (err < 0) + return err; + ret_val = FIELD_GET(NSP_COMMAND_OPTION, ret_val); + + err = FIELD_GET(NSP_STATUS_RESULT, reg); + if (err) { + printf("Result (error) code set: %d (%d) command: %d\n", + -err, (int)ret_val, code); + nfp_nsp_print_extended_error(ret_val); + return -err; + } + + return ret_val; +} + +#define SZ_1M 0x00100000 + +static int +nfp_nsp_command_buf(struct nfp_nsp *nsp, uint16_t code, uint32_t option, + const void *in_buf, unsigned int in_size, void *out_buf, + unsigned int out_size) +{ + struct nfp_cpp *cpp = nsp->cpp; + unsigned int max_size; + uint64_t reg, cpp_buf; + int ret, err; + uint32_t cpp_id; + + if (nsp->ver.minor < 13) { + printf("NSP: Code 0x%04x with buffer not supported\n", code); + printf("\t(ABI %hu.%hu)\n", nsp->ver.major, nsp->ver.minor); + return -EOPNOTSUPP; + } + + err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), + nfp_resource_address(nsp->res) + + NSP_DFLT_BUFFER_CONFIG, + ®); + if (err < 0) + return err; + + max_size = RTE_MAX(in_size, out_size); + if (FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M < max_size) { + printf("NSP: default buffer too small for command 0x%04x\n", + code); + printf("\t(%llu < %u)\n", + FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M, + max_size); + return -EINVAL; + } + + err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), + nfp_resource_address(nsp->res) + + NSP_DFLT_BUFFER, + ®); + if (err < 0) + return err; + + cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8; + cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg); + + if (in_buf && in_size) { + err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size); + if (err < 0) + return err; + } + /* Zero out remaining part of the buffer */ + if (out_buf && out_size && out_size > in_size) { + memset(out_buf, 0, out_size - in_size); + err = nfp_cpp_write(cpp, cpp_id, cpp_buf + in_size, out_buf, + out_size - in_size); + if (err < 0) + return err; + } + + ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf); + if (ret < 0) + return ret; + + if (out_buf && out_size) { + err = nfp_cpp_read(cpp, cpp_id, cpp_buf, out_buf, out_size); + if (err < 0) + return err; + } + + return ret; +} + +int +nfp_nsp_wait(struct nfp_nsp *state) +{ + struct timespec wait; + int count; + int err; + + wait.tv_sec = 0; + wait.tv_nsec = 25000000; + count = 0; + + for (;;) { + err = nfp_nsp_command(state, SPCODE_NOOP, 0, 0, 0); + if (err != -EAGAIN) + break; + + nanosleep(&wait, 0); + + if (count++ > 1000) { + err = -ETIMEDOUT; + break; + } + } + if (err) + printf("NSP failed to respond %d\n", err); + + return err; +} + +int +nfp_nsp_device_soft_reset(struct nfp_nsp *state) +{ + return nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0); +} + +int +nfp_nsp_mac_reinit(struct nfp_nsp *state) +{ + return nfp_nsp_command(state, SPCODE_MAC_INIT, 0, 0, 0); +} + +int +nfp_nsp_load_fw(struct nfp_nsp *state, void *buf, unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, size, buf, size, + NULL, 0); +} + +int +nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0, + buf, size); +} + +int +nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf, + unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size, + NULL, 0); +} + +int +nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_NSP_IDENTIFY, size, NULL, 0, + buf, size); +} + +int +nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, void *buf, + unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_NSP_SENSORS, sensor_mask, NULL, + 0, buf, size); +} diff --git a/drivers/net/nfp/nfpcore/nfp_nsp.h b/drivers/net/nfp/nfpcore/nfp_nsp.h new file mode 100644 index 00000000..c9c7b0d0 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_nsp.h @@ -0,0 +1,304 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef NSP_NSP_H +#define NSP_NSP_H 1 + +#include "nfp_cpp.h" +#include "nfp_nsp.h" + +#define GENMASK_ULL(h, l) \ + (((~0ULL) - (1ULL << (l)) + 1) & \ + (~0ULL >> (64 - 1 - (h)))) + +#define __bf_shf(x) (__builtin_ffsll(x) - 1) + +#define FIELD_GET(_mask, _reg) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + (typeof(_x))(((_reg) & (_x)) >> __bf_shf(_x)); \ + })) + +#define FIELD_FIT(_mask, _val) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + !((((typeof(_x))_val) << __bf_shf(_x)) & ~(_x)); \ + })) + +#define FIELD_PREP(_mask, _val) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + ((typeof(_x))(_val) << __bf_shf(_x)) & (_x); \ + })) + +/* Offsets relative to the CSR base */ +#define NSP_STATUS 0x00 +#define NSP_STATUS_MAGIC GENMASK_ULL(63, 48) +#define NSP_STATUS_MAJOR GENMASK_ULL(47, 44) +#define NSP_STATUS_MINOR GENMASK_ULL(43, 32) +#define NSP_STATUS_CODE GENMASK_ULL(31, 16) +#define NSP_STATUS_RESULT GENMASK_ULL(15, 8) +#define NSP_STATUS_BUSY BIT_ULL(0) + +#define NSP_COMMAND 0x08 +#define NSP_COMMAND_OPTION GENMASK_ULL(63, 32) +#define NSP_COMMAND_CODE GENMASK_ULL(31, 16) +#define NSP_COMMAND_START BIT_ULL(0) + +/* CPP address to retrieve the data from */ +#define NSP_BUFFER 0x10 +#define NSP_BUFFER_CPP GENMASK_ULL(63, 40) +#define NSP_BUFFER_PCIE GENMASK_ULL(39, 38) +#define NSP_BUFFER_ADDRESS GENMASK_ULL(37, 0) + +#define NSP_DFLT_BUFFER 0x18 + +#define NSP_DFLT_BUFFER_CONFIG 0x20 +#define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0) + +#define NSP_MAGIC 0xab10 +#define NSP_MAJOR 0 +#define NSP_MINOR 8 + +#define NSP_CODE_MAJOR GENMASK(15, 12) +#define NSP_CODE_MINOR GENMASK(11, 0) + +enum nfp_nsp_cmd { + SPCODE_NOOP = 0, /* No operation */ + SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */ + SPCODE_FW_DEFAULT = 2, /* Load default (UNDI) FW */ + SPCODE_PHY_INIT = 3, /* Initialize the PHY */ + SPCODE_MAC_INIT = 4, /* Initialize the MAC */ + SPCODE_PHY_RXADAPT = 5, /* Re-run PHY RX Adaptation */ + SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */ + SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */ + SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */ + SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */ + SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */ +}; + +static const struct { + int code; + const char *msg; +} nsp_errors[] = { + { 6010, "could not map to phy for port" }, + { 6011, "not an allowed rate/lanes for port" }, + { 6012, "not an allowed rate/lanes for port" }, + { 6013, "high/low error, change other port first" }, + { 6014, "config not found in flash" }, +}; + +struct nfp_nsp { + struct nfp_cpp *cpp; + struct nfp_resource *res; + struct { + uint16_t major; + uint16_t minor; + } ver; + + /* Eth table config state */ + int modified; + unsigned int idx; + void *entries; +}; + +struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp); +void nfp_nsp_close(struct nfp_nsp *state); +uint16_t nfp_nsp_get_abi_ver_major(struct nfp_nsp *state); +uint16_t nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state); +int nfp_nsp_wait(struct nfp_nsp *state); +int nfp_nsp_device_soft_reset(struct nfp_nsp *state); +int nfp_nsp_load_fw(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_mac_reinit(struct nfp_nsp *state); +int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, + void *buf, unsigned int size); + +static inline int nfp_nsp_has_mac_reinit(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 20; +} + +enum nfp_eth_interface { + NFP_INTERFACE_NONE = 0, + NFP_INTERFACE_SFP = 1, + NFP_INTERFACE_SFPP = 10, + NFP_INTERFACE_SFP28 = 28, + NFP_INTERFACE_QSFP = 40, + NFP_INTERFACE_CXP = 100, + NFP_INTERFACE_QSFP28 = 112, +}; + +enum nfp_eth_media { + NFP_MEDIA_DAC_PASSIVE = 0, + NFP_MEDIA_DAC_ACTIVE, + NFP_MEDIA_FIBRE, +}; + +enum nfp_eth_aneg { + NFP_ANEG_AUTO = 0, + NFP_ANEG_SEARCH, + NFP_ANEG_25G_CONSORTIUM, + NFP_ANEG_25G_IEEE, + NFP_ANEG_DISABLED, +}; + +enum nfp_eth_fec { + NFP_FEC_AUTO_BIT = 0, + NFP_FEC_BASER_BIT, + NFP_FEC_REED_SOLOMON_BIT, + NFP_FEC_DISABLED_BIT, +}; + +#define NFP_FEC_AUTO BIT(NFP_FEC_AUTO_BIT) +#define NFP_FEC_BASER BIT(NFP_FEC_BASER_BIT) +#define NFP_FEC_REED_SOLOMON BIT(NFP_FEC_REED_SOLOMON_BIT) +#define NFP_FEC_DISABLED BIT(NFP_FEC_DISABLED_BIT) + +#define ETH_ALEN 6 + +/** + * struct nfp_eth_table - ETH table information + * @count: number of table entries + * @max_index: max of @index fields of all @ports + * @ports: table of ports + * + * @eth_index: port index according to legacy ethX numbering + * @index: chip-wide first channel index + * @nbi: NBI index + * @base: first channel index (within NBI) + * @lanes: number of channels + * @speed: interface speed (in Mbps) + * @interface: interface (module) plugged in + * @media: media type of the @interface + * @fec: forward error correction mode + * @aneg: auto negotiation mode + * @mac_addr: interface MAC address + * @label_port: port id + * @label_subport: id of interface within port (for split ports) + * @enabled: is enabled? + * @tx_enabled: is TX enabled? + * @rx_enabled: is RX enabled? + * @override_changed: is media reconfig pending? + * + * @port_type: one of %PORT_* defines for ethtool + * @port_lanes: total number of lanes on the port (sum of lanes of all subports) + * @is_split: is interface part of a split port + * @fec_modes_supported: bitmap of FEC modes supported + */ +struct nfp_eth_table { + unsigned int count; + unsigned int max_index; + struct nfp_eth_table_port { + unsigned int eth_index; + unsigned int index; + unsigned int nbi; + unsigned int base; + unsigned int lanes; + unsigned int speed; + + unsigned int interface; + enum nfp_eth_media media; + + enum nfp_eth_fec fec; + enum nfp_eth_aneg aneg; + + uint8_t mac_addr[ETH_ALEN]; + + uint8_t label_port; + uint8_t label_subport; + + int enabled; + int tx_enabled; + int rx_enabled; + + int override_changed; + + /* Computed fields */ + uint8_t port_type; + + unsigned int port_lanes; + + int is_split; + + unsigned int fec_modes_supported; + } ports[0]; +}; + +struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp); + +int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, int enable); +int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, + int configed); +int +nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode); + +int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf, + unsigned int size); +void nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, + unsigned int idx); +void nfp_nsp_config_clear_state(struct nfp_nsp *state); +void nfp_nsp_config_set_modified(struct nfp_nsp *state, int modified); +void *nfp_nsp_config_entries(struct nfp_nsp *state); +int nfp_nsp_config_modified(struct nfp_nsp *state); +unsigned int nfp_nsp_config_idx(struct nfp_nsp *state); + +static inline int nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port) +{ + return !!eth_port->fec_modes_supported; +} + +static inline unsigned int +nfp_eth_supported_fec_modes(struct nfp_eth_table_port *eth_port) +{ + return eth_port->fec_modes_supported; +} + +struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx); +int nfp_eth_config_commit_end(struct nfp_nsp *nsp); +void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp); + +int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode); +int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed); +int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes); + +/** + * struct nfp_nsp_identify - NSP static information + * @version: opaque version string + * @flags: version flags + * @br_primary: branch id of primary bootloader + * @br_secondary: branch id of secondary bootloader + * @br_nsp: branch id of NSP + * @primary: version of primarary bootloader + * @secondary: version id of secondary bootloader + * @nsp: version id of NSP + * @sensor_mask: mask of present sensors available on NIC + */ +struct nfp_nsp_identify { + char version[40]; + uint8_t flags; + uint8_t br_primary; + uint8_t br_secondary; + uint8_t br_nsp; + uint16_t primary; + uint16_t secondary; + uint16_t nsp; + uint64_t sensor_mask; +}; + +struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp); + +enum nfp_nsp_sensor_id { + NFP_SENSOR_CHIP_TEMPERATURE, + NFP_SENSOR_ASSEMBLY_POWER, + NFP_SENSOR_ASSEMBLY_12V_POWER, + NFP_SENSOR_ASSEMBLY_3V3_POWER, +}; + +int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id, + long *val); + +#endif diff --git a/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c b/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c new file mode 100644 index 00000000..bfd1eddb --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include +#include +#include "nfp_cpp.h" +#include "nfp_nsp.h" +#include "nfp_nffw.h" + +struct nsp_identify { + uint8_t version[40]; + uint8_t flags; + uint8_t br_primary; + uint8_t br_secondary; + uint8_t br_nsp; + uint16_t primary; + uint16_t secondary; + uint16_t nsp; + uint8_t reserved[6]; + uint64_t sensor_mask; +}; + +struct nfp_nsp_identify * +__nfp_nsp_identify(struct nfp_nsp *nsp) +{ + struct nfp_nsp_identify *nspi = NULL; + struct nsp_identify *ni; + int ret; + + if (nfp_nsp_get_abi_ver_minor(nsp) < 15) + return NULL; + + ni = malloc(sizeof(*ni)); + if (!ni) + return NULL; + + memset(ni, 0, sizeof(*ni)); + ret = nfp_nsp_read_identify(nsp, ni, sizeof(*ni)); + if (ret < 0) { + printf("reading bsp version failed %d\n", + ret); + goto exit_free; + } + + nspi = malloc(sizeof(*nspi)); + if (!nspi) + goto exit_free; + + memset(nspi, 0, sizeof(*nspi)); + memcpy(nspi->version, ni->version, sizeof(nspi->version)); + nspi->version[sizeof(nspi->version) - 1] = '\0'; + nspi->flags = ni->flags; + nspi->br_primary = ni->br_primary; + nspi->br_secondary = ni->br_secondary; + nspi->br_nsp = ni->br_nsp; + nspi->primary = rte_le_to_cpu_16(ni->primary); + nspi->secondary = rte_le_to_cpu_16(ni->secondary); + nspi->nsp = rte_le_to_cpu_16(ni->nsp); + nspi->sensor_mask = rte_le_to_cpu_64(ni->sensor_mask); + +exit_free: + free(ni); + return nspi; +} + +struct nfp_sensors { + uint32_t chip_temp; + uint32_t assembly_power; + uint32_t assembly_12v_power; + uint32_t assembly_3v3_power; +}; + +int +nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id, long *val) +{ + struct nfp_sensors s; + struct nfp_nsp *nsp; + int ret; + + nsp = nfp_nsp_open(cpp); + if (!nsp) + return -EIO; + + ret = nfp_nsp_read_sensors(nsp, BIT(id), &s, sizeof(s)); + nfp_nsp_close(nsp); + + if (ret < 0) + return ret; + + switch (id) { + case NFP_SENSOR_CHIP_TEMPERATURE: + *val = rte_le_to_cpu_32(s.chip_temp); + break; + case NFP_SENSOR_ASSEMBLY_POWER: + *val = rte_le_to_cpu_32(s.assembly_power); + break; + case NFP_SENSOR_ASSEMBLY_12V_POWER: + *val = rte_le_to_cpu_32(s.assembly_12v_power); + break; + case NFP_SENSOR_ASSEMBLY_3V3_POWER: + *val = rte_le_to_cpu_32(s.assembly_3v3_power); + break; + default: + return -EINVAL; + } + return 0; +} diff --git a/drivers/net/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c new file mode 100644 index 00000000..67946891 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c @@ -0,0 +1,665 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include +#include +#include +#include "nfp_cpp.h" +#include "nfp_nsp.h" +#include "nfp6000/nfp6000.h" + +#define GENMASK_ULL(h, l) \ + (((~0ULL) - (1ULL << (l)) + 1) & \ + (~0ULL >> (64 - 1 - (h)))) + +#define __bf_shf(x) (__builtin_ffsll(x) - 1) + +#define FIELD_GET(_mask, _reg) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + (typeof(_x))(((_reg) & (_x)) >> __bf_shf(_x)); \ + })) + +#define FIELD_FIT(_mask, _val) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + !((((typeof(_x))_val) << __bf_shf(_x)) & ~(_x)); \ + })) + +#define FIELD_PREP(_mask, _val) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + ((typeof(_x))(_val) << __bf_shf(_x)) & (_x); \ + })) + +#define NSP_ETH_NBI_PORT_COUNT 24 +#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT) +#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * \ + sizeof(union eth_table_entry)) + +#define NSP_ETH_PORT_LANES GENMASK_ULL(3, 0) +#define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8) +#define NSP_ETH_PORT_LABEL GENMASK_ULL(53, 48) +#define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54) +#define NSP_ETH_PORT_FEC_SUPP_BASER BIT_ULL(60) +#define NSP_ETH_PORT_FEC_SUPP_RS BIT_ULL(61) + +#define NSP_ETH_PORT_LANES_MASK rte_cpu_to_le_64(NSP_ETH_PORT_LANES) + +#define NSP_ETH_STATE_CONFIGURED BIT_ULL(0) +#define NSP_ETH_STATE_ENABLED BIT_ULL(1) +#define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2) +#define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3) +#define NSP_ETH_STATE_RATE GENMASK_ULL(11, 8) +#define NSP_ETH_STATE_INTERFACE GENMASK_ULL(19, 12) +#define NSP_ETH_STATE_MEDIA GENMASK_ULL(21, 20) +#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22) +#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23) +#define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26) + +#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0) +#define NSP_ETH_CTRL_ENABLED BIT_ULL(1) +#define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2) +#define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3) +#define NSP_ETH_CTRL_SET_RATE BIT_ULL(4) +#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5) +#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6) +#define NSP_ETH_CTRL_SET_FEC BIT_ULL(7) + +/* Which connector port. */ +#define PORT_TP 0x00 +#define PORT_AUI 0x01 +#define PORT_MII 0x02 +#define PORT_FIBRE 0x03 +#define PORT_BNC 0x04 +#define PORT_DA 0x05 +#define PORT_NONE 0xef +#define PORT_OTHER 0xff + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define SPEED_5000 5000 +#define SPEED_10000 10000 +#define SPEED_14000 14000 +#define SPEED_20000 20000 +#define SPEED_25000 25000 +#define SPEED_40000 40000 +#define SPEED_50000 50000 +#define SPEED_56000 56000 +#define SPEED_100000 100000 + +enum nfp_eth_raw { + NSP_ETH_RAW_PORT = 0, + NSP_ETH_RAW_STATE, + NSP_ETH_RAW_MAC, + NSP_ETH_RAW_CONTROL, + + NSP_ETH_NUM_RAW +}; + +enum nfp_eth_rate { + RATE_INVALID = 0, + RATE_10M, + RATE_100M, + RATE_1G, + RATE_10G, + RATE_25G, +}; + +union eth_table_entry { + struct { + uint64_t port; + uint64_t state; + uint8_t mac_addr[6]; + uint8_t resv[2]; + uint64_t control; + }; + uint64_t raw[NSP_ETH_NUM_RAW]; +}; + +static const struct { + enum nfp_eth_rate rate; + unsigned int speed; +} nsp_eth_rate_tbl[] = { + { RATE_INVALID, 0, }, + { RATE_10M, SPEED_10, }, + { RATE_100M, SPEED_100, }, + { RATE_1G, SPEED_1000, }, + { RATE_10G, SPEED_10000, }, + { RATE_25G, SPEED_25000, }, +}; + +static unsigned int +nfp_eth_rate2speed(enum nfp_eth_rate rate) +{ + int i; + + for (i = 0; i < (int)ARRAY_SIZE(nsp_eth_rate_tbl); i++) + if (nsp_eth_rate_tbl[i].rate == rate) + return nsp_eth_rate_tbl[i].speed; + + return 0; +} + +static unsigned int +nfp_eth_speed2rate(unsigned int speed) +{ + int i; + + for (i = 0; i < (int)ARRAY_SIZE(nsp_eth_rate_tbl); i++) + if (nsp_eth_rate_tbl[i].speed == speed) + return nsp_eth_rate_tbl[i].rate; + + return RATE_INVALID; +} + +static void +nfp_eth_copy_mac_reverse(uint8_t *dst, const uint8_t *src) +{ + int i; + + for (i = 0; i < (int)ETH_ALEN; i++) + dst[ETH_ALEN - i - 1] = src[i]; +} + +static void +nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src, + unsigned int index, struct nfp_eth_table_port *dst) +{ + unsigned int rate; + unsigned int fec; + uint64_t port, state; + + port = rte_le_to_cpu_64(src->port); + state = rte_le_to_cpu_64(src->state); + + dst->eth_index = FIELD_GET(NSP_ETH_PORT_INDEX, port); + dst->index = index; + dst->nbi = index / NSP_ETH_NBI_PORT_COUNT; + dst->base = index % NSP_ETH_NBI_PORT_COUNT; + dst->lanes = FIELD_GET(NSP_ETH_PORT_LANES, port); + + dst->enabled = FIELD_GET(NSP_ETH_STATE_ENABLED, state); + dst->tx_enabled = FIELD_GET(NSP_ETH_STATE_TX_ENABLED, state); + dst->rx_enabled = FIELD_GET(NSP_ETH_STATE_RX_ENABLED, state); + + rate = nfp_eth_rate2speed(FIELD_GET(NSP_ETH_STATE_RATE, state)); + dst->speed = dst->lanes * rate; + + dst->interface = FIELD_GET(NSP_ETH_STATE_INTERFACE, state); + dst->media = FIELD_GET(NSP_ETH_STATE_MEDIA, state); + + nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr); + + dst->label_port = FIELD_GET(NSP_ETH_PORT_PHYLABEL, port); + dst->label_subport = FIELD_GET(NSP_ETH_PORT_LABEL, port); + + if (nfp_nsp_get_abi_ver_minor(nsp) < 17) + return; + + dst->override_changed = FIELD_GET(NSP_ETH_STATE_OVRD_CHNG, state); + dst->aneg = FIELD_GET(NSP_ETH_STATE_ANEG, state); + + if (nfp_nsp_get_abi_ver_minor(nsp) < 22) + return; + + fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_BASER, port); + dst->fec_modes_supported |= fec << NFP_FEC_BASER_BIT; + fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_RS, port); + dst->fec_modes_supported |= fec << NFP_FEC_REED_SOLOMON_BIT; + if (dst->fec_modes_supported) + dst->fec_modes_supported |= NFP_FEC_AUTO | NFP_FEC_DISABLED; + + dst->fec = 1 << FIELD_GET(NSP_ETH_STATE_FEC, state); +} + +static void +nfp_eth_calc_port_geometry(struct nfp_eth_table *table) +{ + unsigned int i, j; + + for (i = 0; i < table->count; i++) { + table->max_index = RTE_MAX(table->max_index, + table->ports[i].index); + + for (j = 0; j < table->count; j++) { + if (table->ports[i].label_port != + table->ports[j].label_port) + continue; + table->ports[i].port_lanes += table->ports[j].lanes; + + if (i == j) + continue; + if (table->ports[i].label_subport == + table->ports[j].label_subport) + printf("Port %d subport %d is a duplicate\n", + table->ports[i].label_port, + table->ports[i].label_subport); + + table->ports[i].is_split = 1; + } + } +} + +static void +nfp_eth_calc_port_type(struct nfp_eth_table_port *entry) +{ + if (entry->interface == NFP_INTERFACE_NONE) { + entry->port_type = PORT_NONE; + return; + } + + if (entry->media == NFP_MEDIA_FIBRE) + entry->port_type = PORT_FIBRE; + else + entry->port_type = PORT_DA; +} + +static struct nfp_eth_table * +__nfp_eth_read_ports(struct nfp_nsp *nsp) +{ + union eth_table_entry *entries; + struct nfp_eth_table *table; + uint32_t table_sz; + int i, j, ret, cnt = 0; + + entries = malloc(NSP_ETH_TABLE_SIZE); + if (!entries) + return NULL; + + memset(entries, 0, NSP_ETH_TABLE_SIZE); + ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); + if (ret < 0) { + printf("reading port table failed %d\n", ret); + goto err; + } + + for (i = 0; i < NSP_ETH_MAX_COUNT; i++) + if (entries[i].port & NSP_ETH_PORT_LANES_MASK) + cnt++; + + /* Some versions of flash will give us 0 instead of port count. For + * those that give a port count, verify it against the value calculated + * above. + */ + if (ret && ret != cnt) { + printf("table entry count (%d) unmatch entries present (%d)\n", + ret, cnt); + goto err; + } + + table_sz = sizeof(*table) + sizeof(struct nfp_eth_table_port) * cnt; + table = malloc(table_sz); + if (!table) + goto err; + + memset(table, 0, table_sz); + table->count = cnt; + for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++) + if (entries[i].port & NSP_ETH_PORT_LANES_MASK) + nfp_eth_port_translate(nsp, &entries[i], i, + &table->ports[j++]); + + nfp_eth_calc_port_geometry(table); + for (i = 0; i < (int)table->count; i++) + nfp_eth_calc_port_type(&table->ports[i]); + + free(entries); + + return table; + +err: + free(entries); + return NULL; +} + +/* + * nfp_eth_read_ports() - retrieve port information + * @cpp: NFP CPP handle + * + * Read the port information from the device. Returned structure should + * be freed with kfree() once no longer needed. + * + * Return: populated ETH table or NULL on error. + */ +struct nfp_eth_table * +nfp_eth_read_ports(struct nfp_cpp *cpp) +{ + struct nfp_eth_table *ret; + struct nfp_nsp *nsp; + + nsp = nfp_nsp_open(cpp); + if (!nsp) + return NULL; + + ret = __nfp_eth_read_ports(nsp); + nfp_nsp_close(nsp); + + return ret; +} + +struct nfp_nsp * +nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx) +{ + union eth_table_entry *entries; + struct nfp_nsp *nsp; + int ret; + + entries = malloc(NSP_ETH_TABLE_SIZE); + if (!entries) + return NULL; + + memset(entries, 0, NSP_ETH_TABLE_SIZE); + nsp = nfp_nsp_open(cpp); + if (!nsp) { + free(entries); + return nsp; + } + + ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); + if (ret < 0) { + printf("reading port table failed %d\n", ret); + goto err; + } + + if (!(entries[idx].port & NSP_ETH_PORT_LANES_MASK)) { + printf("trying to set port state on disabled port %d\n", idx); + goto err; + } + + nfp_nsp_config_set_state(nsp, entries, idx); + return nsp; + +err: + nfp_nsp_close(nsp); + free(entries); + return NULL; +} + +void +nfp_eth_config_cleanup_end(struct nfp_nsp *nsp) +{ + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); + + nfp_nsp_config_set_modified(nsp, 0); + nfp_nsp_config_clear_state(nsp); + nfp_nsp_close(nsp); + free(entries); +} + +/* + * nfp_eth_config_commit_end() - perform recorded configuration changes + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * + * Perform the configuration which was requested with __nfp_eth_set_*() + * helpers and recorded in @nsp state. If device was already configured + * as requested or no __nfp_eth_set_*() operations were made no NSP command + * will be performed. + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int +nfp_eth_config_commit_end(struct nfp_nsp *nsp) +{ + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); + int ret = 1; + + if (nfp_nsp_config_modified(nsp)) { + ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); + ret = ret < 0 ? ret : 0; + } + + nfp_eth_config_cleanup_end(nsp); + + return ret; +} + +/* + * nfp_eth_set_mod_enable() - set PHY module enable control bit + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @enable: Desired state + * + * Enable or disable PHY module (this usually means setting the TX lanes + * disable bits). + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int +nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, int enable) +{ + union eth_table_entry *entries; + struct nfp_nsp *nsp; + uint64_t reg; + + nsp = nfp_eth_config_start(cpp, idx); + if (!nsp) + return -1; + + entries = nfp_nsp_config_entries(nsp); + + /* Check if we are already in requested state */ + reg = rte_le_to_cpu_64(entries[idx].state); + if (enable != (int)FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) { + reg = rte_le_to_cpu_64(entries[idx].control); + reg &= ~NSP_ETH_CTRL_ENABLED; + reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable); + entries[idx].control = rte_cpu_to_le_64(reg); + + nfp_nsp_config_set_modified(nsp, 1); + } + + return nfp_eth_config_commit_end(nsp); +} + +/* + * nfp_eth_set_configured() - set PHY module configured control bit + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @configed: Desired state + * + * Set the ifup/ifdown state on the PHY. + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int +nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, int configed) +{ + union eth_table_entry *entries; + struct nfp_nsp *nsp; + uint64_t reg; + + nsp = nfp_eth_config_start(cpp, idx); + if (!nsp) + return -EIO; + + /* + * Older ABI versions did support this feature, however this has only + * been reliable since ABI 20. + */ + if (nfp_nsp_get_abi_ver_minor(nsp) < 20) { + nfp_eth_config_cleanup_end(nsp); + return -EOPNOTSUPP; + } + + entries = nfp_nsp_config_entries(nsp); + + /* Check if we are already in requested state */ + reg = rte_le_to_cpu_64(entries[idx].state); + if (configed != (int)FIELD_GET(NSP_ETH_STATE_CONFIGURED, reg)) { + reg = rte_le_to_cpu_64(entries[idx].control); + reg &= ~NSP_ETH_CTRL_CONFIGURED; + reg |= FIELD_PREP(NSP_ETH_CTRL_CONFIGURED, configed); + entries[idx].control = rte_cpu_to_le_64(reg); + + nfp_nsp_config_set_modified(nsp, 1); + } + + return nfp_eth_config_commit_end(nsp); +} + +static int +nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx, + const uint64_t mask, const unsigned int shift, + unsigned int val, const uint64_t ctrl_bit) +{ + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); + unsigned int idx = nfp_nsp_config_idx(nsp); + uint64_t reg; + + /* + * Note: set features were added in ABI 0.14 but the error + * codes were initially not populated correctly. + */ + if (nfp_nsp_get_abi_ver_minor(nsp) < 17) { + printf("set operations not supported, please update flash\n"); + return -EOPNOTSUPP; + } + + /* Check if we are already in requested state */ + reg = rte_le_to_cpu_64(entries[idx].raw[raw_idx]); + if (val == (reg & mask) >> shift) + return 0; + + reg &= ~mask; + reg |= (val << shift) & mask; + entries[idx].raw[raw_idx] = rte_cpu_to_le_64(reg); + + entries[idx].control |= rte_cpu_to_le_64(ctrl_bit); + + nfp_nsp_config_set_modified(nsp, 1); + + return 0; +} + +#define NFP_ETH_SET_BIT_CONFIG(nsp, raw_idx, mask, val, ctrl_bit) \ + (__extension__ ({ \ + typeof(mask) _x = (mask); \ + nfp_eth_set_bit_config(nsp, raw_idx, _x, __bf_shf(_x), \ + val, ctrl_bit); \ + })) + +/* + * __nfp_eth_set_aneg() - set PHY autonegotiation control bit + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @mode: Desired autonegotiation mode + * + * Allow/disallow PHY module to advertise/perform autonegotiation. + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +int +__nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode) +{ + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, + NSP_ETH_STATE_ANEG, mode, + NSP_ETH_CTRL_SET_ANEG); +} + +/* + * __nfp_eth_set_fec() - set PHY forward error correction control bit + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @mode: Desired fec mode + * + * Set the PHY module forward error correction mode. + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +static int +__nfp_eth_set_fec(struct nfp_nsp *nsp, enum nfp_eth_fec mode) +{ + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, + NSP_ETH_STATE_FEC, mode, + NSP_ETH_CTRL_SET_FEC); +} + +/* + * nfp_eth_set_fec() - set PHY forward error correction control mode + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @mode: Desired fec mode + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int +nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode) +{ + struct nfp_nsp *nsp; + int err; + + nsp = nfp_eth_config_start(cpp, idx); + if (!nsp) + return -EIO; + + err = __nfp_eth_set_fec(nsp, mode); + if (err) { + nfp_eth_config_cleanup_end(nsp); + return err; + } + + return nfp_eth_config_commit_end(nsp); +} + +/* + * __nfp_eth_set_speed() - set interface speed/rate + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @speed: Desired speed (per lane) + * + * Set lane speed. Provided @speed value should be subport speed divided + * by number of lanes this subport is spanning (i.e. 10000 for 40G, 25000 for + * 50G, etc.) + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +int +__nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed) +{ + enum nfp_eth_rate rate; + + rate = nfp_eth_speed2rate(speed); + if (rate == RATE_INVALID) { + printf("could not find matching lane rate for speed %u\n", + speed); + return -EINVAL; + } + + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, + NSP_ETH_STATE_RATE, rate, + NSP_ETH_CTRL_SET_RATE); +} + +/* + * __nfp_eth_set_split() - set interface lane split + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @lanes: Desired lanes per port + * + * Set number of lanes in the port. + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +int +__nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes) +{ + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES, + lanes, NSP_ETH_CTRL_SET_LANES); +} diff --git a/drivers/net/nfp/nfpcore/nfp_resource.c b/drivers/net/nfp/nfpcore/nfp_resource.c new file mode 100644 index 00000000..dd41fa4d --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_resource.c @@ -0,0 +1,266 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include +#include +#include + +#include + +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" +#include "nfp_resource.h" +#include "nfp_crc.h" + +#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU +#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL + +/* NFP Resource Table self-identifier */ +#define NFP_RESOURCE_TBL_NAME "nfp.res" +#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */ + +#define NFP_RESOURCE_ENTRY_NAME_SZ 8 + +/* + * struct nfp_resource_entry - Resource table entry + * @owner: NFP CPP Lock, interface owner + * @key: NFP CPP Lock, posix_crc32(name, 8) + * @region: Memory region descriptor + * @name: ASCII, zero padded name + * @reserved + * @cpp_action: CPP Action + * @cpp_token: CPP Token + * @cpp_target: CPP Target ID + * @page_offset: 256-byte page offset into target's CPP address + * @page_size: size, in 256-byte pages + */ +struct nfp_resource_entry { + struct nfp_resource_entry_mutex { + uint32_t owner; + uint32_t key; + } mutex; + struct nfp_resource_entry_region { + uint8_t name[NFP_RESOURCE_ENTRY_NAME_SZ]; + uint8_t reserved[5]; + uint8_t cpp_action; + uint8_t cpp_token; + uint8_t cpp_target; + uint32_t page_offset; + uint32_t page_size; + } region; +}; + +#define NFP_RESOURCE_TBL_SIZE 4096 +#define NFP_RESOURCE_TBL_ENTRIES (int)(NFP_RESOURCE_TBL_SIZE / \ + sizeof(struct nfp_resource_entry)) + +struct nfp_resource { + char name[NFP_RESOURCE_ENTRY_NAME_SZ + 1]; + uint32_t cpp_id; + uint64_t addr; + uint64_t size; + struct nfp_cpp_mutex *mutex; +}; + +static int +nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res) +{ + char name_pad[NFP_RESOURCE_ENTRY_NAME_SZ + 2]; + struct nfp_resource_entry entry; + uint32_t cpp_id, key; + int ret, i; + + cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0); /* Atomic read */ + + memset(name_pad, 0, sizeof(name_pad)); + strlcpy(name_pad, res->name, sizeof(name_pad)); + + /* Search for a matching entry */ + if (!memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) { + printf("Grabbing device lock not supported\n"); + return -EOPNOTSUPP; + } + key = nfp_crc32_posix(name_pad, NFP_RESOURCE_ENTRY_NAME_SZ); + + for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) { + uint64_t addr = NFP_RESOURCE_TBL_BASE + + sizeof(struct nfp_resource_entry) * i; + + ret = nfp_cpp_read(cpp, cpp_id, addr, &entry, sizeof(entry)); + if (ret != sizeof(entry)) + return -EIO; + + if (entry.mutex.key != key) + continue; + + /* Found key! */ + res->mutex = + nfp_cpp_mutex_alloc(cpp, + NFP_RESOURCE_TBL_TARGET, addr, key); + res->cpp_id = NFP_CPP_ID(entry.region.cpp_target, + entry.region.cpp_action, + entry.region.cpp_token); + res->addr = ((uint64_t)entry.region.page_offset) << 8; + res->size = (uint64_t)entry.region.page_size << 8; + return 0; + } + + return -ENOENT; +} + +static int +nfp_resource_try_acquire(struct nfp_cpp *cpp, struct nfp_resource *res, + struct nfp_cpp_mutex *dev_mutex) +{ + int err; + + if (nfp_cpp_mutex_lock(dev_mutex)) + return -EINVAL; + + err = nfp_cpp_resource_find(cpp, res); + if (err) + goto err_unlock_dev; + + err = nfp_cpp_mutex_trylock(res->mutex); + if (err) + goto err_res_mutex_free; + + nfp_cpp_mutex_unlock(dev_mutex); + + return 0; + +err_res_mutex_free: + nfp_cpp_mutex_free(res->mutex); +err_unlock_dev: + nfp_cpp_mutex_unlock(dev_mutex); + + return err; +} + +/* + * nfp_resource_acquire() - Acquire a resource handle + * @cpp: NFP CPP handle + * @name: Name of the resource + * + * NOTE: This function locks the acquired resource + * + * Return: NFP Resource handle, or ERR_PTR() + */ +struct nfp_resource * +nfp_resource_acquire(struct nfp_cpp *cpp, const char *name) +{ + struct nfp_cpp_mutex *dev_mutex; + struct nfp_resource *res; + int err; + struct timespec wait; + int count; + + res = malloc(sizeof(*res)); + if (!res) + return NULL; + + memset(res, 0, sizeof(*res)); + + strncpy(res->name, name, NFP_RESOURCE_ENTRY_NAME_SZ); + + dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET, + NFP_RESOURCE_TBL_BASE, + NFP_RESOURCE_TBL_KEY); + if (!dev_mutex) { + free(res); + return NULL; + } + + wait.tv_sec = 0; + wait.tv_nsec = 1000000; + count = 0; + + for (;;) { + err = nfp_resource_try_acquire(cpp, res, dev_mutex); + if (!err) + break; + if (err != -EBUSY) + goto err_free; + + if (count++ > 1000) { + printf("Error: resource %s timed out\n", name); + err = -EBUSY; + goto err_free; + } + + nanosleep(&wait, NULL); + } + + nfp_cpp_mutex_free(dev_mutex); + + return res; + +err_free: + nfp_cpp_mutex_free(dev_mutex); + free(res); + return NULL; +} + +/* + * nfp_resource_release() - Release a NFP Resource handle + * @res: NFP Resource handle + * + * NOTE: This function implictly unlocks the resource handle + */ +void +nfp_resource_release(struct nfp_resource *res) +{ + nfp_cpp_mutex_unlock(res->mutex); + nfp_cpp_mutex_free(res->mutex); + free(res); +} + +/* + * nfp_resource_cpp_id() - Return the cpp_id of a resource handle + * @res: NFP Resource handle + * + * Return: NFP CPP ID + */ +uint32_t +nfp_resource_cpp_id(const struct nfp_resource *res) +{ + return res->cpp_id; +} + +/* + * nfp_resource_name() - Return the name of a resource handle + * @res: NFP Resource handle + * + * Return: const char pointer to the name of the resource + */ +const char +*nfp_resource_name(const struct nfp_resource *res) +{ + return res->name; +} + +/* + * nfp_resource_address() - Return the address of a resource handle + * @res: NFP Resource handle + * + * Return: Address of the resource + */ +uint64_t +nfp_resource_address(const struct nfp_resource *res) +{ + return res->addr; +} + +/* + * nfp_resource_size() - Return the size in bytes of a resource handle + * @res: NFP Resource handle + * + * Return: Size of the resource in bytes + */ +uint64_t +nfp_resource_size(const struct nfp_resource *res) +{ + return res->size; +} diff --git a/drivers/net/nfp/nfpcore/nfp_resource.h b/drivers/net/nfp/nfpcore/nfp_resource.h new file mode 100644 index 00000000..06cc6f74 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_resource.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef NFP_RESOURCE_H +#define NFP_RESOURCE_H + +#include "nfp_cpp.h" + +#define NFP_RESOURCE_NFP_NFFW "nfp.nffw" +#define NFP_RESOURCE_NFP_HWINFO "nfp.info" +#define NFP_RESOURCE_NSP "nfp.sp" + +/** + * Opaque handle to a NFP Resource + */ +struct nfp_resource; + +struct nfp_resource *nfp_resource_acquire(struct nfp_cpp *cpp, + const char *name); + +/** + * Release a NFP Resource, and free the handle + * @param[in] res NFP Resource handle + */ +void nfp_resource_release(struct nfp_resource *res); + +/** + * Return the CPP ID of a NFP Resource + * @param[in] res NFP Resource handle + * @return CPP ID of the NFP Resource + */ +uint32_t nfp_resource_cpp_id(const struct nfp_resource *res); + +/** + * Return the name of a NFP Resource + * @param[in] res NFP Resource handle + * @return Name of the NFP Resource + */ +const char *nfp_resource_name(const struct nfp_resource *res); + +/** + * Return the target address of a NFP Resource + * @param[in] res NFP Resource handle + * @return Address of the NFP Resource + */ +uint64_t nfp_resource_address(const struct nfp_resource *res); + +uint64_t nfp_resource_size(const struct nfp_resource *res); + +#endif /* NFP_RESOURCE_H */ diff --git a/drivers/net/nfp/nfpcore/nfp_rtsym.c b/drivers/net/nfp/nfpcore/nfp_rtsym.c new file mode 100644 index 00000000..cb7d83db --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_rtsym.c @@ -0,0 +1,327 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +/* + * nfp_rtsym.c + * Interface for accessing run-time symbol table + */ + +#include +#include +#include "nfp_cpp.h" +#include "nfp_mip.h" +#include "nfp_rtsym.h" +#include "nfp6000/nfp6000.h" + +/* These need to match the linker */ +#define SYM_TGT_LMEM 0 +#define SYM_TGT_EMU_CACHE 0x17 + +struct nfp_rtsym_entry { + uint8_t type; + uint8_t target; + uint8_t island; + uint8_t addr_hi; + uint32_t addr_lo; + uint16_t name; + uint8_t menum; + uint8_t size_hi; + uint32_t size_lo; +}; + +struct nfp_rtsym_table { + struct nfp_cpp *cpp; + int num; + char *strtab; + struct nfp_rtsym symtab[]; +}; + +static int +nfp_meid(uint8_t island_id, uint8_t menum) +{ + return (island_id & 0x3F) == island_id && menum < 12 ? + (island_id << 4) | (menum + 4) : -1; +} + +static void +nfp_rtsym_sw_entry_init(struct nfp_rtsym_table *cache, uint32_t strtab_size, + struct nfp_rtsym *sw, struct nfp_rtsym_entry *fw) +{ + sw->type = fw->type; + sw->name = cache->strtab + rte_le_to_cpu_16(fw->name) % strtab_size; + sw->addr = ((uint64_t)fw->addr_hi << 32) | + rte_le_to_cpu_32(fw->addr_lo); + sw->size = ((uint64_t)fw->size_hi << 32) | + rte_le_to_cpu_32(fw->size_lo); + +#ifdef DEBUG + printf("rtsym_entry_init\n"); + printf("\tname=%s, addr=%" PRIx64 ", size=%" PRIu64 ",target=%d\n", + sw->name, sw->addr, sw->size, sw->target); +#endif + switch (fw->target) { + case SYM_TGT_LMEM: + sw->target = NFP_RTSYM_TARGET_LMEM; + break; + case SYM_TGT_EMU_CACHE: + sw->target = NFP_RTSYM_TARGET_EMU_CACHE; + break; + default: + sw->target = fw->target; + break; + } + + if (fw->menum != 0xff) + sw->domain = nfp_meid(fw->island, fw->menum); + else if (fw->island != 0xff) + sw->domain = fw->island; + else + sw->domain = -1; +} + +struct nfp_rtsym_table * +nfp_rtsym_table_read(struct nfp_cpp *cpp) +{ + struct nfp_rtsym_table *rtbl; + struct nfp_mip *mip; + + mip = nfp_mip_open(cpp); + rtbl = __nfp_rtsym_table_read(cpp, mip); + nfp_mip_close(mip); + + return rtbl; +} + +/* + * This looks more complex than it should be. But we need to get the type for + * the ~ right in round_down (it needs to be as wide as the result!), and we + * want to evaluate the macro arguments just once each. + */ +#define __round_mask(x, y) ((__typeof__(x))((y) - 1)) + +#define round_up(x, y) \ + (__extension__ ({ \ + typeof(x) _x = (x); \ + ((((_x) - 1) | __round_mask(_x, y)) + 1); \ + })) + +#define round_down(x, y) \ + (__extension__ ({ \ + typeof(x) _x = (x); \ + ((_x) & ~__round_mask(_x, y)); \ + })) + +struct nfp_rtsym_table * +__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip) +{ + uint32_t strtab_addr, symtab_addr, strtab_size, symtab_size; + struct nfp_rtsym_entry *rtsymtab; + struct nfp_rtsym_table *cache; + const uint32_t dram = + NFP_CPP_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0) | + NFP_ISL_EMEM0; + int err, n, size; + + if (!mip) + return NULL; + + nfp_mip_strtab(mip, &strtab_addr, &strtab_size); + nfp_mip_symtab(mip, &symtab_addr, &symtab_size); + + if (!symtab_size || !strtab_size || symtab_size % sizeof(*rtsymtab)) + return NULL; + + /* Align to 64 bits */ + symtab_size = round_up(symtab_size, 8); + strtab_size = round_up(strtab_size, 8); + + rtsymtab = malloc(symtab_size); + if (!rtsymtab) + return NULL; + + size = sizeof(*cache); + size += symtab_size / sizeof(*rtsymtab) * sizeof(struct nfp_rtsym); + size += strtab_size + 1; + cache = malloc(size); + if (!cache) + goto exit_free_rtsym_raw; + + cache->cpp = cpp; + cache->num = symtab_size / sizeof(*rtsymtab); + cache->strtab = (void *)&cache->symtab[cache->num]; + + err = nfp_cpp_read(cpp, dram, symtab_addr, rtsymtab, symtab_size); + if (err != (int)symtab_size) + goto exit_free_cache; + + err = nfp_cpp_read(cpp, dram, strtab_addr, cache->strtab, strtab_size); + if (err != (int)strtab_size) + goto exit_free_cache; + cache->strtab[strtab_size] = '\0'; + + for (n = 0; n < cache->num; n++) + nfp_rtsym_sw_entry_init(cache, strtab_size, + &cache->symtab[n], &rtsymtab[n]); + + free(rtsymtab); + + return cache; + +exit_free_cache: + free(cache); +exit_free_rtsym_raw: + free(rtsymtab); + return NULL; +} + +/* + * nfp_rtsym_count() - Get the number of RTSYM descriptors + * @rtbl: NFP RTsym table + * + * Return: Number of RTSYM descriptors + */ +int +nfp_rtsym_count(struct nfp_rtsym_table *rtbl) +{ + if (!rtbl) + return -EINVAL; + + return rtbl->num; +} + +/* + * nfp_rtsym_get() - Get the Nth RTSYM descriptor + * @rtbl: NFP RTsym table + * @idx: Index (0-based) of the RTSYM descriptor + * + * Return: const pointer to a struct nfp_rtsym descriptor, or NULL + */ +const struct nfp_rtsym * +nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx) +{ + if (!rtbl) + return NULL; + + if (idx >= rtbl->num) + return NULL; + + return &rtbl->symtab[idx]; +} + +/* + * nfp_rtsym_lookup() - Return the RTSYM descriptor for a symbol name + * @rtbl: NFP RTsym table + * @name: Symbol name + * + * Return: const pointer to a struct nfp_rtsym descriptor, or NULL + */ +const struct nfp_rtsym * +nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name) +{ + int n; + + if (!rtbl) + return NULL; + + for (n = 0; n < rtbl->num; n++) + if (strcmp(name, rtbl->symtab[n].name) == 0) + return &rtbl->symtab[n]; + + return NULL; +} + +/* + * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol + * @rtbl: NFP RTsym table + * @name: Symbol name + * @error: Poniter to error code (optional) + * + * Lookup a symbol, map, read it and return it's value. Value of the symbol + * will be interpreted as a simple little-endian unsigned value. Symbol can + * be 4 or 8 bytes in size. + * + * Return: value read, on error sets the error and returns ~0ULL. + */ +uint64_t +nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error) +{ + const struct nfp_rtsym *sym; + uint32_t val32, id; + uint64_t val; + int err; + + sym = nfp_rtsym_lookup(rtbl, name); + if (!sym) { + err = -ENOENT; + goto exit; + } + + id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); + +#ifdef DEBUG + printf("Reading symbol %s with size %" PRIu64 " at %" PRIx64 "\n", + name, sym->size, sym->addr); +#endif + switch (sym->size) { + case 4: + err = nfp_cpp_readl(rtbl->cpp, id, sym->addr, &val32); + val = val32; + break; + case 8: + err = nfp_cpp_readq(rtbl->cpp, id, sym->addr, &val); + break; + default: + printf("rtsym '%s' unsupported size: %" PRId64 "\n", + name, sym->size); + err = -EINVAL; + break; + } + + if (err) + err = -EIO; +exit: + if (error) + *error = err; + + if (err) + return ~0ULL; + + return val; +} + +uint8_t * +nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, + unsigned int min_size, struct nfp_cpp_area **area) +{ + const struct nfp_rtsym *sym; + uint8_t *mem; + +#ifdef DEBUG + printf("mapping symbol %s\n", name); +#endif + sym = nfp_rtsym_lookup(rtbl, name); + if (!sym) { + printf("symbol lookup fails for %s\n", name); + return NULL; + } + + if (sym->size < min_size) { + printf("Symbol %s too small (%" PRIu64 " < %u)\n", name, + sym->size, min_size); + return NULL; + } + + mem = nfp_cpp_map_area(rtbl->cpp, sym->domain, sym->target, sym->addr, + sym->size, area); + if (!mem) { + printf("Failed to map symbol %s\n", name); + return NULL; + } +#ifdef DEBUG + printf("symbol %s with address %p\n", name, mem); +#endif + + return mem; +} diff --git a/drivers/net/nfp/nfpcore/nfp_rtsym.h b/drivers/net/nfp/nfpcore/nfp_rtsym.h new file mode 100644 index 00000000..8b494211 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_rtsym.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_RTSYM_H__ +#define __NFP_RTSYM_H__ + +#define NFP_RTSYM_TYPE_NONE 0 +#define NFP_RTSYM_TYPE_OBJECT 1 +#define NFP_RTSYM_TYPE_FUNCTION 2 +#define NFP_RTSYM_TYPE_ABS 3 + +#define NFP_RTSYM_TARGET_NONE 0 +#define NFP_RTSYM_TARGET_LMEM -1 +#define NFP_RTSYM_TARGET_EMU_CACHE -7 + +/* + * Structure describing a run-time NFP symbol. + * + * The memory target of the symbol is generally the CPP target number and can be + * used directly by the nfp_cpp API calls. However, in some cases (i.e., for + * local memory or control store) the target is encoded using a negative number. + * + * When the target type can not be used to fully describe the location of a + * symbol the domain field is used to further specify the location (i.e., the + * specific ME or island number). + * + * For ME target resources, 'domain' is an MEID. + * For Island target resources, 'domain' is an island ID, with the one exception + * of "sram" symbols for backward compatibility, which are viewed as global. + */ +struct nfp_rtsym { + const char *name; + uint64_t addr; + uint64_t size; + int type; + int target; + int domain; +}; + +struct nfp_rtsym_table; + +struct nfp_rtsym_table *nfp_rtsym_table_read(struct nfp_cpp *cpp); + +struct nfp_rtsym_table * +__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip); + +int nfp_rtsym_count(struct nfp_rtsym_table *rtbl); + +const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx); + +const struct nfp_rtsym * +nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name); + +uint64_t nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, + int *error); +uint8_t * +nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, + unsigned int min_size, struct nfp_cpp_area **area); +#endif diff --git a/drivers/net/nfp/nfpcore/nfp_target.h b/drivers/net/nfp/nfpcore/nfp_target.h new file mode 100644 index 00000000..2884a003 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_target.h @@ -0,0 +1,579 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef NFP_TARGET_H +#define NFP_TARGET_H + +#include "nfp-common/nfp_resid.h" +#include "nfp-common/nfp_cppat.h" +#include "nfp-common/nfp_platform.h" +#include "nfp_cpp.h" + +#define P32 1 +#define P64 2 + +#define PUSHPULL(_pull, _push) (((_pull) << 4) | ((_push) << 0)) + +#ifndef NFP_ERRNO +#include +#define NFP_ERRNO(x) (errno = (x), -1) +#endif + +static inline int +pushpull_width(int pp) +{ + pp &= 0xf; + + if (pp == 0) + return NFP_ERRNO(EINVAL); + return (2 << pp); +} + +#define PUSH_WIDTH(_pushpull) pushpull_width((_pushpull) >> 0) +#define PULL_WIDTH(_pushpull) pushpull_width((_pushpull) >> 4) + +static inline int +target_rw(uint32_t cpp_id, int pp, int start, int len) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island && (island < start || island > (start + len))) + return NFP_ERRNO(EINVAL); + + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 0): + return PUSHPULL(0, pp); + case NFP_CPP_ID(0, 1, 0): + return PUSHPULL(pp, 0); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): + return PUSHPULL(pp, pp); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp6000_nbi_dma(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 0): /* ReadNbiDma */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 1, 0): /* WriteNbiDma */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): + return PUSHPULL(P64, P64); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp6000_nbi_stats(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 0): /* ReadNbiStats */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 1, 0): /* WriteNbiStats */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): + return PUSHPULL(P64, P64); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp6000_nbi_tm(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 0): /* ReadNbiTM */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 1, 0): /* WriteNbiTM */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): + return PUSHPULL(P64, P64); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp6000_nbi_ppc(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 0): /* ReadNbiPreclassifier */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 1, 0): /* WriteNbiPreclassifier */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): + return PUSHPULL(P64, P64); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp6000_nbi(uint32_t cpp_id, uint64_t address) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + uint64_t rel_addr = address & 0x3fFFFF; + + if (island && (island < 8 || island > 9)) + return NFP_ERRNO(EINVAL); + + if (rel_addr < (1 << 20)) + return nfp6000_nbi_dma(cpp_id); + if (rel_addr < (2 << 20)) + return nfp6000_nbi_stats(cpp_id); + if (rel_addr < (3 << 20)) + return nfp6000_nbi_tm(cpp_id); + return nfp6000_nbi_ppc(cpp_id); +} + +/* + * This structure ONLY includes items that can be done with a read or write of + * 32-bit or 64-bit words. All others are not listed. + */ +static inline int +nfp6000_mu_common(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): /* read_be/write_be */ + return PUSHPULL(P64, P64); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 1): /* read_le/write_le */ + return PUSHPULL(P64, P64); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 2): /* {read/write}_swap_be */ + return PUSHPULL(P64, P64); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 3): /* {read/write}_swap_le */ + return PUSHPULL(P64, P64); + case NFP_CPP_ID(0, 0, 0): /* read_be */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 0, 1): /* read_le */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 0, 2): /* read_swap_be */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 0, 3): /* read_swap_le */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 1, 0): /* write_be */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, 1, 1): /* write_le */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, 1, 2): /* write_swap_be */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, 1, 3): /* write_swap_le */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, 3, 0): /* atomic_read */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 3, 2): /* mask_compare_write */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 4, 0): /* atomic_write */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 4, 2): /* atomic_write_imm */ + return PUSHPULL(0, 0); + case NFP_CPP_ID(0, 4, 3): /* swap_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 5, 0): /* set */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 5, 3): /* test_set_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 6, 0): /* clr */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 6, 3): /* test_clr_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 7, 0): /* add */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 7, 3): /* test_add_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 8, 0): /* addsat */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 8, 3): /* test_subsat_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 9, 0): /* sub */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 9, 3): /* test_sub_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 10, 0): /* subsat */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 10, 3): /* test_subsat_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 13, 0): /* microq128_get */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 13, 1): /* microq128_pop */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 13, 2): /* microq128_put */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 15, 0): /* xor */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 15, 3): /* test_xor_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 28, 0): /* read32_be */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 28, 1): /* read32_le */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 28, 2): /* read32_swap_be */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 28, 3): /* read32_swap_le */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 31, 0): /* write32_be */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 31, 1): /* write32_le */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 31, 2): /* write32_swap_be */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 31, 3): /* write32_swap_le */ + return PUSHPULL(P32, 0); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp6000_mu_ctm(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 16, 1): /* packet_read_packet_status */ + return PUSHPULL(0, P32); + default: + return nfp6000_mu_common(cpp_id); + } +} + +static inline int +nfp6000_mu_emu(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 18, 0): /* read_queue */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 18, 1): /* read_queue_ring */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 18, 2): /* write_queue */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 18, 3): /* write_queue_ring */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 20, 2): /* journal */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 21, 0): /* get */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 21, 1): /* get_eop */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 21, 2): /* get_freely */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 22, 0): /* pop */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 22, 1): /* pop_eop */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 22, 2): /* pop_freely */ + return PUSHPULL(0, P32); + default: + return nfp6000_mu_common(cpp_id); + } +} + +static inline int +nfp6000_mu_imu(uint32_t cpp_id) +{ + return nfp6000_mu_common(cpp_id); +} + +static inline int +nfp6000_mu(uint32_t cpp_id, uint64_t address) +{ + int pp; + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island == 0) { + if (address < 0x2000000000ULL) + pp = nfp6000_mu_ctm(cpp_id); + else if (address < 0x8000000000ULL) + pp = nfp6000_mu_emu(cpp_id); + else if (address < 0x9800000000ULL) + pp = nfp6000_mu_ctm(cpp_id); + else if (address < 0x9C00000000ULL) + pp = nfp6000_mu_emu(cpp_id); + else if (address < 0xA000000000ULL) + pp = nfp6000_mu_imu(cpp_id); + else + pp = nfp6000_mu_ctm(cpp_id); + } else if (island >= 24 && island <= 27) { + pp = nfp6000_mu_emu(cpp_id); + } else if (island >= 28 && island <= 31) { + pp = nfp6000_mu_imu(cpp_id); + } else if (island == 1 || + (island >= 4 && island <= 7) || + (island >= 12 && island <= 13) || + (island >= 32 && island <= 47) || + (island >= 48 && island <= 51)) { + pp = nfp6000_mu_ctm(cpp_id); + } else { + pp = NFP_ERRNO(EINVAL); + } + + return pp; +} + +static inline int +nfp6000_ila(uint32_t cpp_id) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island && (island < 48 || island > 51)) + return NFP_ERRNO(EINVAL); + + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 1): /* read_check_error */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 2, 0): /* read_int */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 3, 0): /* write_int */ + return PUSHPULL(P32, 0); + default: + return target_rw(cpp_id, P32, 48, 4); + } +} + +static inline int +nfp6000_pci(uint32_t cpp_id) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island && (island < 4 || island > 7)) + return NFP_ERRNO(EINVAL); + + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 2, 0): + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 3, 0): + return PUSHPULL(P32, 0); + default: + return target_rw(cpp_id, P32, 4, 4); + } +} + +static inline int +nfp6000_crypto(uint32_t cpp_id) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island && (island < 12 || island > 15)) + return NFP_ERRNO(EINVAL); + + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 2, 0): + return PUSHPULL(P64, 0); + default: + return target_rw(cpp_id, P64, 12, 4); + } +} + +static inline int +nfp6000_cap_xpb(uint32_t cpp_id) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island && (island < 1 || island > 63)) + return NFP_ERRNO(EINVAL); + + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 1): /* RingGet */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 0, 2): /* Interthread Signal */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 1, 1): /* RingPut */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 1, 2): /* CTNNWr */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 2, 0): /* ReflectRd, signal none */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 2, 1): /* ReflectRd, signal self */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 2, 2): /* ReflectRd, signal remote */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 2, 3): /* ReflectRd, signal both */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 3, 0): /* ReflectWr, signal none */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 3, 1): /* ReflectWr, signal self */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 3, 2): /* ReflectWr, signal remote */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 3, 3): /* ReflectWr, signal both */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 1): + return PUSHPULL(P32, P32); + default: + return target_rw(cpp_id, P32, 1, 63); + } +} + +static inline int +nfp6000_cls(uint32_t cpp_id) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island && (island < 1 || island > 63)) + return NFP_ERRNO(EINVAL); + + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 3): /* xor */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 2, 0): /* set */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 2, 1): /* clr */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 4, 0): /* add */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 4, 1): /* add64 */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 6, 0): /* sub */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 6, 1): /* sub64 */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 6, 2): /* subsat */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 8, 2): /* hash_mask */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 8, 3): /* hash_clear */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 9, 0): /* ring_get */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 9, 1): /* ring_pop */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 9, 2): /* ring_get_freely */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 9, 3): /* ring_pop_freely */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 10, 0): /* ring_put */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 10, 2): /* ring_journal */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 14, 0): /* reflect_write_sig_local */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 15, 1): /* reflect_read_sig_local */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 17, 2): /* statistic */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 24, 0): /* ring_read */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 24, 1): /* ring_write */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 25, 0): /* ring_workq_add_thread */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 25, 1): /* ring_workq_add_work */ + return PUSHPULL(P32, 0); + default: + return target_rw(cpp_id, P32, 0, 64); + } +} + +static inline int +nfp6000_target_pushpull(uint32_t cpp_id, uint64_t address) +{ + switch (NFP_CPP_ID_TARGET_of(cpp_id)) { + case NFP6000_CPPTGT_NBI: + return nfp6000_nbi(cpp_id, address); + case NFP6000_CPPTGT_VQDR: + return target_rw(cpp_id, P32, 24, 4); + case NFP6000_CPPTGT_ILA: + return nfp6000_ila(cpp_id); + case NFP6000_CPPTGT_MU: + return nfp6000_mu(cpp_id, address); + case NFP6000_CPPTGT_PCIE: + return nfp6000_pci(cpp_id); + case NFP6000_CPPTGT_ARM: + if (address < 0x10000) + return target_rw(cpp_id, P64, 1, 1); + else + return target_rw(cpp_id, P32, 1, 1); + case NFP6000_CPPTGT_CRYPTO: + return nfp6000_crypto(cpp_id); + case NFP6000_CPPTGT_CTXPB: + return nfp6000_cap_xpb(cpp_id); + case NFP6000_CPPTGT_CLS: + return nfp6000_cls(cpp_id); + case 0: + return target_rw(cpp_id, P32, 4, 4); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp_target_pushpull_width(int pp, int write_not_read) +{ + if (pp < 0) + return pp; + + if (write_not_read) + return PULL_WIDTH(pp); + else + return PUSH_WIDTH(pp); +} + +static inline int +nfp6000_target_action_width(uint32_t cpp_id, uint64_t address, + int write_not_read) +{ + int pp; + + pp = nfp6000_target_pushpull(cpp_id, address); + + return nfp_target_pushpull_width(pp, write_not_read); +} + +static inline int +nfp_target_action_width(uint32_t model, uint32_t cpp_id, uint64_t address, + int write_not_read) +{ + if (NFP_CPP_MODEL_IS_6000(model)) { + return nfp6000_target_action_width(cpp_id, address, + write_not_read); + } else { + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp_target_cpp(uint32_t cpp_island_id, uint64_t cpp_island_address, + uint32_t *cpp_target_id, uint64_t *cpp_target_address, + const uint32_t *imb_table) +{ + int err; + int island = NFP_CPP_ID_ISLAND_of(cpp_island_id); + int target = NFP_CPP_ID_TARGET_of(cpp_island_id); + uint32_t imb; + + if (target < 0 || target >= 16) + return NFP_ERRNO(EINVAL); + + if (island == 0) { + /* Already translated */ + *cpp_target_id = cpp_island_id; + *cpp_target_address = cpp_island_address; + return 0; + } + + if (!imb_table) { + /* CPP + Island only allowed on systems with IMB tables */ + return NFP_ERRNO(EINVAL); + } + + imb = imb_table[target]; + + *cpp_target_address = cpp_island_address; + err = _nfp6000_cppat_addr_encode(cpp_target_address, island, target, + ((imb >> 13) & 7), + ((imb >> 12) & 1), + ((imb >> 6) & 0x3f), + ((imb >> 0) & 0x3f)); + if (err == 0) { + *cpp_target_id = + NFP_CPP_ID(target, NFP_CPP_ID_ACTION_of(cpp_island_id), + NFP_CPP_ID_TOKEN_of(cpp_island_id)); + } + + return err; +} + +#endif /* NFP_TARGET_H */ diff --git a/drivers/net/null/meson.build b/drivers/net/null/meson.build index 68ac0d2a..60e2ce6c 100644 --- a/drivers/net/null/meson.build +++ b/drivers/net/null/meson.build @@ -1,4 +1,5 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation +version = 2 sources = files('rte_eth_null.c') diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c index d003b283..244f8654 100644 --- a/drivers/net/null/rte_eth_null.c +++ b/drivers/net/null/rte_eth_null.c @@ -73,6 +73,7 @@ struct pmd_internals { struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT]; struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT]; + struct ether_addr eth_addr; /** Bit mask of RSS offloads, the bit offset also means flow type */ uint64_t flow_type_rss_offloads; @@ -84,16 +85,19 @@ struct pmd_internals { uint8_t rss_key[40]; /**< 40-byte hash key. */ }; - - -static struct ether_addr eth_addr = { .addr_bytes = {0} }; static struct rte_eth_link pmd_link = { .link_speed = ETH_SPEED_NUM_10G, .link_duplex = ETH_LINK_FULL_DUPLEX, .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_AUTONEG, + .link_autoneg = ETH_LINK_FIXED, }; +static int eth_null_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, eth_null_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + static uint16_t eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) { @@ -105,10 +109,10 @@ eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) return 0; packet_size = h->internals->packet_size; + if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0) + return 0; + for (i = 0; i < nb_bufs; i++) { - bufs[i] = rte_pktmbuf_alloc(h->mb_pool); - if (!bufs[i]) - break; bufs[i]->data_len = (uint16_t)packet_size; bufs[i]->pkt_len = packet_size; bufs[i]->port = h->internals->port_id; @@ -130,10 +134,10 @@ eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) return 0; packet_size = h->internals->packet_size; + if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0) + return 0; + for (i = 0; i < nb_bufs; i++) { - bufs[i] = rte_pktmbuf_alloc(h->mb_pool); - if (!bufs[i]) - break; rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet, packet_size); bufs[i]->data_len = (uint16_t)packet_size; @@ -301,6 +305,7 @@ eth_dev_info(struct rte_eth_dev *dev, dev_info->min_rx_bufsize = 0; dev_info->reta_size = internals->reta_size; dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads; + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP; } static int @@ -461,10 +466,11 @@ eth_rss_hash_conf_get(struct rte_eth_dev *dev, return 0; } -static void +static int eth_mac_address_set(__rte_unused struct rte_eth_dev *dev, __rte_unused struct ether_addr *addr) { + return 0; } static const struct eth_dev_ops ops = { @@ -496,7 +502,7 @@ eth_dev_null_create(struct rte_vdev_device *dev, { const unsigned nb_rx_queues = 1; const unsigned nb_tx_queues = 1; - struct rte_eth_dev_data *data = NULL; + struct rte_eth_dev_data *data; struct pmd_internals *internals = NULL; struct rte_eth_dev *eth_dev = NULL; @@ -510,22 +516,12 @@ eth_dev_null_create(struct rte_vdev_device *dev, if (dev->device.numa_node == SOCKET_ID_ANY) dev->device.numa_node = rte_socket_id(); - RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n", - dev->device.numa_node); - - /* now do all data allocation - for eth_dev structure, dummy pci driver - * and internal (private) data - */ - data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0, + PMD_LOG(INFO, "Creating null ethdev on numa socket %u", dev->device.numa_node); - if (!data) - return -ENOMEM; eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals)); - if (!eth_dev) { - rte_free(data); + if (!eth_dev) return -ENOMEM; - } /* now put it all together * - store queue data in internals, @@ -540,19 +536,19 @@ eth_dev_null_create(struct rte_vdev_device *dev, internals->packet_size = packet_size; internals->packet_copy = packet_copy; internals->port_id = eth_dev->data->port_id; + eth_random_addr(internals->eth_addr.addr_bytes); internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK; internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE; rte_memcpy(internals->rss_key, default_rss_key, 40); - rte_memcpy(data, eth_dev->data, sizeof(*data)); + data = eth_dev->data; data->nb_rx_queues = (uint16_t)nb_rx_queues; data->nb_tx_queues = (uint16_t)nb_tx_queues; data->dev_link = pmd_link; - data->mac_addrs = ð_addr; + data->mac_addrs = &internals->eth_addr; - eth_dev->data = data; eth_dev->dev_ops = &ops; /* finally assign rx and tx ops */ @@ -564,6 +560,7 @@ eth_dev_null_create(struct rte_vdev_device *dev, eth_dev->tx_pkt_burst = eth_null_tx; } + rte_eth_dev_probing_finish(eth_dev); return 0; } @@ -608,6 +605,7 @@ rte_pmd_null_probe(struct rte_vdev_device *dev) unsigned packet_size = default_packet_size; unsigned packet_copy = default_packet_copy; struct rte_kvargs *kvlist = NULL; + struct rte_eth_dev *eth_dev; int ret; if (!dev) @@ -615,7 +613,21 @@ rte_pmd_null_probe(struct rte_vdev_device *dev) name = rte_vdev_device_name(dev); params = rte_vdev_device_args(dev); - RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name); + PMD_LOG(INFO, "Initializing pmd_null for %s", name); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(params) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + PMD_LOG(ERR, "Failed to probe %s", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &ops; + eth_dev->device = &dev->device; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } if (params != NULL) { kvlist = rte_kvargs_parse(params, valid_arguments); @@ -641,8 +653,8 @@ rte_pmd_null_probe(struct rte_vdev_device *dev) } } - RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, " - "packet copy is %s\n", packet_size, + PMD_LOG(INFO, "Configure pmd_null: packet size is %d, " + "packet copy is %s", packet_size, packet_copy ? "enabled" : "disabled"); ret = eth_dev_null_create(dev, packet_size, packet_copy); @@ -661,7 +673,7 @@ rte_pmd_null_remove(struct rte_vdev_device *dev) if (!dev) return -EINVAL; - RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n", + PMD_LOG(INFO, "Closing null ethdev on numa socket %u", rte_socket_id()); /* find the ethdev entry */ @@ -670,7 +682,6 @@ rte_pmd_null_remove(struct rte_vdev_device *dev) return -1; rte_free(eth_dev->data->dev_private); - rte_free(eth_dev->data); rte_eth_dev_release_port(eth_dev); @@ -687,3 +698,10 @@ RTE_PMD_REGISTER_ALIAS(net_null, eth_null); RTE_PMD_REGISTER_PARAM_STRING(net_null, "size= " "copy="); + +RTE_INIT(eth_null_init_log) +{ + eth_null_logtype = rte_log_register("pmd.net.null"); + if (eth_null_logtype >= 0) + rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/octeontx/Makefile b/drivers/net/octeontx/Makefile index 3e4a1066..885f1768 100644 --- a/drivers/net/octeontx/Makefile +++ b/drivers/net/octeontx/Makefile @@ -10,6 +10,7 @@ include $(RTE_SDK)/mk/rte.vars.mk LIB = librte_pmd_octeontx.a CFLAGS += $(WERROR_FLAGS) +CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/ CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/ EXPORT_MAP := rte_pmd_octeontx_version.map @@ -46,7 +47,7 @@ endif CFLAGS_octeontx_ethdev.o += -DALLOW_EXPERIMENTAL_API LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_common_octeontx LDLIBS += -lrte_mempool_octeontx LDLIBS += -lrte_eventdev LDLIBS += -lrte_bus_pci diff --git a/drivers/net/octeontx/base/octeontx_bgx.c b/drivers/net/octeontx/base/octeontx_bgx.c index 8576d8ed..0e238826 100644 --- a/drivers/net/octeontx/base/octeontx_bgx.c +++ b/drivers/net/octeontx/base/octeontx_bgx.c @@ -19,7 +19,7 @@ octeontx_bgx_port_open(int port, octeontx_mbox_bgx_port_conf_t *conf) hdr.msg = MBOX_BGX_PORT_OPEN; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_conf, len); + res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_conf, len); if (res < 0) return -EACCES; @@ -49,7 +49,7 @@ octeontx_bgx_port_close(int port) hdr.msg = MBOX_BGX_PORT_CLOSE; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0); + res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); if (res < 0) return -EACCES; @@ -66,7 +66,7 @@ octeontx_bgx_port_start(int port) hdr.msg = MBOX_BGX_PORT_START; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0); + res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); if (res < 0) return -EACCES; @@ -83,7 +83,7 @@ octeontx_bgx_port_stop(int port) hdr.msg = MBOX_BGX_PORT_STOP; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0); + res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); if (res < 0) return -EACCES; @@ -103,7 +103,7 @@ octeontx_bgx_port_get_config(int port, octeontx_mbox_bgx_port_conf_t *conf) hdr.vfid = port; memset(&bgx_conf, 0, sizeof(octeontx_mbox_bgx_port_conf_t)); - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_conf, len); + res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_conf, len); if (res < 0) return -EACCES; @@ -135,7 +135,7 @@ octeontx_bgx_port_status(int port, octeontx_mbox_bgx_port_status_t *stat) hdr.msg = MBOX_BGX_PORT_GET_STATUS; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_stat, len); + res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_stat, len); if (res < 0) return -EACCES; @@ -156,7 +156,7 @@ octeontx_bgx_port_stats(int port, octeontx_mbox_bgx_port_stats_t *stats) hdr.msg = MBOX_BGX_PORT_GET_STATS; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_stats, len); + res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_stats, len); if (res < 0) return -EACCES; @@ -181,7 +181,7 @@ octeontx_bgx_port_stats_clr(int port) hdr.msg = MBOX_BGX_PORT_CLR_STATS; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0); + res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); if (res < 0) return -EACCES; @@ -200,7 +200,7 @@ octeontx_bgx_port_link_status(int port) hdr.msg = MBOX_BGX_PORT_GET_LINK_STATUS; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &link, len); + res = octeontx_mbox_send(&hdr, NULL, 0, &link, len); if (res < 0) return -EACCES; @@ -219,7 +219,7 @@ octeontx_bgx_port_promisc_set(int port, int en) hdr.vfid = port; prom = en ? 1 : 0; - res = octeontx_ssovf_mbox_send(&hdr, &prom, sizeof(prom), NULL, 0); + res = octeontx_mbox_send(&hdr, &prom, sizeof(prom), NULL, 0); if (res < 0) return -EACCES; @@ -237,7 +237,7 @@ octeontx_bgx_port_mac_set(int port, uint8_t *mac_addr) hdr.msg = MBOX_BGX_PORT_SET_MACADDR; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, mac_addr, len, NULL, 0); + res = octeontx_mbox_send(&hdr, mac_addr, len, NULL, 0); if (res < 0) return -EACCES; diff --git a/drivers/net/octeontx/base/octeontx_pkivf.c b/drivers/net/octeontx/base/octeontx_pkivf.c index 58a7f110..1babea0e 100644 --- a/drivers/net/octeontx/base/octeontx_pkivf.c +++ b/drivers/net/octeontx/base/octeontx_pkivf.c @@ -19,7 +19,7 @@ octeontx_pki_port_open(int port) hdr.msg = MBOX_PKI_PORT_OPEN; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0); + res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); if (res < 0) return -EACCES; return res; @@ -38,7 +38,7 @@ octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg) hdr.msg = MBOX_PKI_PORT_HASH_CONFIG; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, &h_cfg, len, NULL, 0); + res = octeontx_mbox_send(&hdr, &h_cfg, len, NULL, 0); if (res < 0) return -EACCES; @@ -58,7 +58,7 @@ octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg) hdr.msg = MBOX_PKI_PORT_PKTBUF_CONFIG; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, &b_cfg, len, NULL, 0); + res = octeontx_mbox_send(&hdr, &b_cfg, len, NULL, 0); if (res < 0) return -EACCES; return res; @@ -77,7 +77,7 @@ octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg) hdr.msg = MBOX_PKI_PORT_CREATE_QOS; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0); + res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0); if (res < 0) return -EACCES; @@ -99,7 +99,7 @@ octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg) hdr.msg = MBOX_PKI_PORT_ERRCHK_CONFIG; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, &e_cfg, len, NULL, 0); + res = octeontx_mbox_send(&hdr, &e_cfg, len, NULL, 0); if (res < 0) return -EACCES; diff --git a/drivers/net/octeontx/base/octeontx_pkivf.h b/drivers/net/octeontx/base/octeontx_pkivf.h index d036054c..764aff53 100644 --- a/drivers/net/octeontx/base/octeontx_pkivf.h +++ b/drivers/net/octeontx/base/octeontx_pkivf.h @@ -422,7 +422,7 @@ octeontx_pki_port_modify_qos(int port, pki_mod_qos_t *qos_cfg) hdr.msg = MBOX_PKI_PORT_MODIFY_QOS; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0); + res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0); if (res < 0) return -EACCES; @@ -442,7 +442,7 @@ octeontx_pki_port_delete_qos(int port, pki_del_qos_t *qos_cfg) hdr.msg = MBOX_PKI_PORT_DELETE_QOS; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0); + res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0); if (res < 0) return -EACCES; @@ -464,7 +464,7 @@ octeontx_pki_port_close(int port) hdr.msg = MBOX_PKI_PORT_CLOSE; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0); + res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0); if (res < 0) return -EACCES; @@ -486,7 +486,7 @@ octeontx_pki_port_start(int port) hdr.msg = MBOX_PKI_PORT_START; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0); + res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0); if (res < 0) return -EACCES; @@ -508,7 +508,7 @@ octeontx_pki_port_stop(int port) hdr.msg = MBOX_PKI_PORT_STOP; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0); + res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0); if (res < 0) return -EACCES; diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c index b739c0b3..0f3d5d67 100644 --- a/drivers/net/octeontx/octeontx_ethdev.c +++ b/drivers/net/octeontx/octeontx_ethdev.c @@ -46,9 +46,7 @@ int otx_net_logtype_mbox; int otx_net_logtype_init; int otx_net_logtype_driver; -RTE_INIT(otx_net_init_log); -static void -otx_net_init_log(void) +RTE_INIT(otx_net_init_log) { otx_net_logtype_mbox = rte_log_register("pmd.net.octeontx.mbox"); if (otx_net_logtype_mbox >= 0) @@ -122,7 +120,7 @@ octeontx_port_open(struct octeontx_nic *nic) int res; res = 0; - + memset(&bgx_port_conf, 0x0, sizeof(bgx_port_conf)); PMD_INIT_FUNC_TRACE(); res = octeontx_bgx_port_open(nic->port_id, &bgx_port_conf); @@ -283,34 +281,17 @@ octeontx_dev_configure(struct rte_eth_dev *dev) return -EINVAL; } - if (!rxmode->hw_strip_crc) { + /* KEEP_CRC offload flag is not supported by PMD + * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed + */ + if (rte_eth_dev_must_keep_crc(rxmode->offloads)) { PMD_INIT_LOG(NOTICE, "can't disable hw crc strip"); - rxmode->hw_strip_crc = 1; - } - - if (rxmode->hw_ip_checksum) { - PMD_INIT_LOG(NOTICE, "rxcksum not supported"); - rxmode->hw_ip_checksum = 0; - } - - if (rxmode->split_hdr_size) { - octeontx_log_err("rxmode does not support split header"); - return -EINVAL; - } - - if (rxmode->hw_vlan_filter) { - octeontx_log_err("VLAN filter not supported"); - return -EINVAL; + rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP; } - if (rxmode->hw_vlan_extend) { - octeontx_log_err("VLAN extended not supported"); - return -EINVAL; - } - - if (rxmode->enable_lro) { - octeontx_log_err("LRO not supported"); - return -EINVAL; + if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) { + PMD_INIT_LOG(NOTICE, "cant disable lockfree tx"); + txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE; } if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { @@ -372,6 +353,9 @@ octeontx_dev_close(struct rte_eth_dev *dev) rte_free(txq); } + + dev->tx_pkt_burst = NULL; + dev->rx_pkt_burst = NULL; } static int @@ -465,9 +449,6 @@ octeontx_dev_stop(struct rte_eth_dev *dev) ret); return; } - - dev->tx_pkt_burst = NULL; - dev->rx_pkt_burst = NULL; } static void @@ -488,20 +469,6 @@ octeontx_dev_promisc_disable(struct rte_eth_dev *dev) octeontx_port_promisc_set(nic, 0); } -static inline int -octeontx_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &dev->data->dev_link; - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - static int octeontx_port_link_status(struct octeontx_nic *nic) { @@ -532,7 +499,6 @@ octeontx_dev_link_update(struct rte_eth_dev *dev, struct rte_eth_link link; int res; - res = 0; PMD_INIT_FUNC_TRACE(); res = octeontx_port_link_status(nic); @@ -566,6 +532,7 @@ octeontx_dev_link_update(struct rte_eth_dev *dev, case OCTEONTX_LINK_SPEED_RESERVE1: case OCTEONTX_LINK_SPEED_RESERVE2: default: + link.link_speed = ETH_SPEED_NUM_NONE; octeontx_log_err("incorrect link speed %d", nic->speed); break; } @@ -573,7 +540,7 @@ octeontx_dev_link_update(struct rte_eth_dev *dev, link.link_duplex = ETH_LINK_FULL_DUPLEX; link.link_autoneg = ETH_LINK_AUTONEG; - return octeontx_atomic_write_link_status(dev, &link); + return rte_eth_linkstatus_set(dev, &link); } static int @@ -594,7 +561,7 @@ octeontx_dev_stats_reset(struct rte_eth_dev *dev) octeontx_port_stats_clr(nic); } -static void +static int octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) { @@ -605,6 +572,8 @@ octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev, if (ret != 0) octeontx_log_err("failed to set MAC address on port %d", nic->port_id); + + return ret; } static void @@ -619,28 +588,25 @@ octeontx_dev_info(struct rte_eth_dev *dev, ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G; - dev_info->driver_name = RTE_STR(rte_octeontx_pmd); dev_info->max_mac_addrs = 1; dev_info->max_rx_pktlen = PKI_MAX_PKTLEN; dev_info->max_rx_queues = 1; dev_info->max_tx_queues = PKO_MAX_NUM_DQ; dev_info->min_rx_bufsize = 0; - dev_info->pci_dev = NULL; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_free_thresh = 0, .rx_drop_en = 0, + .offloads = OCTEONTX_RX_OFFLOADS, }; dev_info->default_txconf = (struct rte_eth_txconf) { .tx_free_thresh = 0, - .txq_flags = - ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS | - ETH_TXQ_FLAGS_NOXSUMS, + .offloads = OCTEONTX_TX_OFFLOADS, }; - dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MT_LOCKFREE; + dev_info->rx_offload_capa = OCTEONTX_RX_OFFLOADS; + dev_info->tx_offload_capa = OCTEONTX_TX_OFFLOADS; } static void @@ -744,7 +710,7 @@ octeontx_dev_tx_queue_release(void *tx_queue) static int octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, uint16_t nb_desc, unsigned int socket_id, - const struct rte_eth_txconf *tx_conf) + const struct rte_eth_txconf *tx_conf __rte_unused) { struct octeontx_nic *nic = octeontx_pmd_priv(dev); struct octeontx_txq *txq = NULL; @@ -753,7 +719,6 @@ octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, RTE_SET_USED(nb_desc); RTE_SET_USED(socket_id); - RTE_SET_USED(tx_conf); dq_num = (nic->port_id * PKO_VF_NUM_DQ) + qidx; @@ -823,7 +788,7 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, pki_qos_cfg_t pki_qos; uintptr_t pool; int ret, port; - uint8_t gaura; + uint16_t gaura; unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx; unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx; @@ -934,8 +899,8 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, pool = (uintptr_t)mb_pool->pool_id; - /* Get the gpool Id */ - gaura = octeontx_fpa_bufpool_gpool(pool); + /* Get the gaura Id */ + gaura = octeontx_fpa_bufpool_gaura(pool); pki_qos.qpg_qos = PKI_QPG_QOS_NONE; pki_qos.num_entry = 1; @@ -1039,7 +1004,7 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev, char octtx_name[OCTEONTX_MAX_NAME_LEN]; struct octeontx_nic *nic = NULL; struct rte_eth_dev *eth_dev = NULL; - struct rte_eth_dev_data *data = NULL; + struct rte_eth_dev_data *data; const char *name = rte_vdev_device_name(dev); PMD_INIT_FUNC_TRACE(); @@ -1050,18 +1015,14 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev, if (eth_dev == NULL) return -ENODEV; + eth_dev->dev_ops = &octeontx_dev_ops; + eth_dev->device = &dev->device; eth_dev->tx_pkt_burst = octeontx_xmit_pkts; eth_dev->rx_pkt_burst = octeontx_recv_pkts; + rte_eth_dev_probing_finish(eth_dev); return 0; } - data = rte_zmalloc_socket(octtx_name, sizeof(*data), 0, socket_id); - if (data == NULL) { - octeontx_log_err("failed to allocate devdata"); - res = -ENOMEM; - goto err; - } - nic = rte_zmalloc_socket(octtx_name, sizeof(*nic), 0, socket_id); if (nic == NULL) { octeontx_log_err("failed to allocate nic structure"); @@ -1097,11 +1058,9 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev, eth_dev->data->kdrv = RTE_KDRV_NONE; eth_dev->data->numa_node = dev->device.numa_node; - rte_memcpy(data, (eth_dev)->data, sizeof(*data)); + data = eth_dev->data; data->dev_private = nic; - data->port_id = eth_dev->data->port_id; - snprintf(data->name, sizeof(data->name), "%s", eth_dev->data->name); nic->ev_queues = 1; nic->ev_ports = 1; @@ -1120,7 +1079,6 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev, goto err; } - eth_dev->data = data; eth_dev->dev_ops = &octeontx_dev_ops; /* Finally save ethdev pointer to the NIC structure */ @@ -1146,10 +1104,11 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev, rte_octeontx_pchan_map[(nic->base_ochan >> 8) & 0x7] [(nic->base_ochan >> 4) & 0xF] = data->port_id; + rte_eth_dev_probing_finish(eth_dev); return data->port_id; err: - if (port) + if (nic) octeontx_port_close(nic); if (eth_dev != NULL) { @@ -1188,7 +1147,6 @@ octeontx_remove(struct rte_vdev_device *dev) rte_free(eth_dev->data->mac_addrs); rte_free(eth_dev->data->dev_private); - rte_free(eth_dev->data); rte_eth_dev_release_port(eth_dev); rte_event_dev_close(nic->evdev); } @@ -1210,12 +1168,28 @@ octeontx_probe(struct rte_vdev_device *dev) struct rte_event_dev_config dev_conf; const char *eventdev_name = "event_octeontx"; struct rte_event_dev_info info; + struct rte_eth_dev *eth_dev; struct octeontx_vdev_init_params init_params = { OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT }; dev_name = rte_vdev_device_name(dev); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(rte_vdev_device_args(dev)) == 0) { + eth_dev = rte_eth_dev_attach_secondary(dev_name); + if (!eth_dev) { + RTE_LOG(ERR, PMD, "Failed to probe %s\n", dev_name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &octeontx_dev_ops; + eth_dev->device = &dev->device; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + res = octeontx_parse_vdev_init_params(&init_params, dev); if (res < 0) return -EINVAL; diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h index 10e42e14..14f16969 100644 --- a/drivers/net/octeontx/octeontx_ethdev.h +++ b/drivers/net/octeontx/octeontx_ethdev.h @@ -28,6 +28,10 @@ #define OCTEONTX_MAX_BGX_PORTS 4 #define OCTEONTX_MAX_LMAC_PER_BGX 4 +#define OCTEONTX_RX_OFFLOADS (DEV_RX_OFFLOAD_CRC_STRIP \ + | DEV_RX_OFFLOAD_CHECKSUM) +#define OCTEONTX_TX_OFFLOADS DEV_TX_OFFLOAD_MT_LOCKFREE + static inline struct octeontx_nic * octeontx_pmd_priv(struct rte_eth_dev *dev) { diff --git a/drivers/net/octeontx/octeontx_rxtx.c b/drivers/net/octeontx/octeontx_rxtx.c index 2502d90e..a9149b4e 100644 --- a/drivers/net/octeontx/octeontx_rxtx.c +++ b/drivers/net/octeontx/octeontx_rxtx.c @@ -31,7 +31,7 @@ __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va, return -ENOSPC; /* Get the gaura Id */ - gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)tx_pkt->pool->pool_id); + gaura_id = octeontx_fpa_bufpool_gaura((uintptr_t)tx_pkt->pool->pool_id); /* Setup PKO_SEND_HDR_S */ cmd_buf[0] = tx_pkt->data_len & 0xffff; diff --git a/drivers/net/pcap/meson.build b/drivers/net/pcap/meson.build index 8b81214e..0c4e0201 100644 --- a/drivers/net/pcap/meson.build +++ b/drivers/net/pcap/meson.build @@ -1,22 +1,12 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation -if meson.is_cross_build() - pcap_dep = cc.find_library('pcap', required: false) - if pcap_dep.found() - ext_deps += pcap_dep - else - build = false - endif +pcap_dep = cc.find_library('pcap', required: false) +if pcap_dep.found() and cc.has_header('pcap.h', dependencies: pcap_dep) + build = true else - pcap_dep = dependency('pcap', required: false) - if pcap_dep.found() == true - ext_deps += pcap_dep - elif find_program('pcap-config', required: false).found() == true - ext_deps += cc.find_library('pcap') - else - build = false - endif + build = false endif sources = files('rte_eth_pcap.c') +ext_deps += pcap_dep pkgconfig_extra_libs += '-lpcap' diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c index c1571e1f..e8810a17 100644 --- a/drivers/net/pcap/rte_eth_pcap.c +++ b/drivers/net/pcap/rte_eth_pcap.c @@ -26,6 +26,7 @@ #define ETH_PCAP_RX_PCAP_ARG "rx_pcap" #define ETH_PCAP_TX_PCAP_ARG "tx_pcap" #define ETH_PCAP_RX_IFACE_ARG "rx_iface" +#define ETH_PCAP_RX_IFACE_IN_ARG "rx_iface_in" #define ETH_PCAP_TX_IFACE_ARG "tx_iface" #define ETH_PCAP_IFACE_ARG "iface" @@ -83,6 +84,7 @@ static const char *valid_arguments[] = { ETH_PCAP_RX_PCAP_ARG, ETH_PCAP_TX_PCAP_ARG, ETH_PCAP_RX_IFACE_ARG, + ETH_PCAP_RX_IFACE_IN_ARG, ETH_PCAP_TX_IFACE_ARG, ETH_PCAP_IFACE_ARG, NULL @@ -96,9 +98,15 @@ static struct rte_eth_link pmd_link = { .link_speed = ETH_SPEED_NUM_10G, .link_duplex = ETH_LINK_FULL_DUPLEX, .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_AUTONEG, + .link_autoneg = ETH_LINK_FIXED, }; +static int eth_pcap_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, eth_pcap_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + static int eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf, const u_char *data, uint16_t data_len) @@ -256,8 +264,8 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) pcap_dump((u_char *)dumper_q->dumper, &header, tx_pcap_data); } else { - RTE_LOG(ERR, PMD, - "Dropping PCAP packet. Size (%d) > max jumbo size (%d).\n", + PMD_LOG(ERR, + "Dropping PCAP packet. Size (%d) > max jumbo size (%d).", mbuf->pkt_len, ETHER_MAX_JUMBO_FRAME_LEN); @@ -313,8 +321,8 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) ret = pcap_sendpacket(tx_queue->pcap, tx_pcap_data, mbuf->pkt_len); } else { - RTE_LOG(ERR, PMD, - "Dropping PCAP packet. Size (%d) > max jumbo size (%d).\n", + PMD_LOG(ERR, + "Dropping PCAP packet. Size (%d) > max jumbo size (%d).", mbuf->pkt_len, ETHER_MAX_JUMBO_FRAME_LEN); @@ -346,7 +354,7 @@ open_iface_live(const char *iface, pcap_t **pcap) { RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf); if (*pcap == NULL) { - RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", iface, errbuf); + PMD_LOG(ERR, "Couldn't open %s: %s", iface, errbuf); return -1; } @@ -357,7 +365,7 @@ static int open_single_iface(const char *iface, pcap_t **pcap) { if (open_iface_live(iface, pcap) < 0) { - RTE_LOG(ERR, PMD, "Couldn't open interface %s\n", iface); + PMD_LOG(ERR, "Couldn't open interface %s", iface); return -1; } @@ -376,7 +384,7 @@ open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper) */ tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN); if (tx_pcap == NULL) { - RTE_LOG(ERR, PMD, "Couldn't create dead pcap\n"); + PMD_LOG(ERR, "Couldn't create dead pcap"); return -1; } @@ -384,7 +392,7 @@ open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper) *dumper = pcap_dump_open(tx_pcap, pcap_filename); if (*dumper == NULL) { pcap_close(tx_pcap); - RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n", + PMD_LOG(ERR, "Couldn't open %s for writing.", pcap_filename); return -1; } @@ -398,7 +406,7 @@ open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap) { *pcap = pcap_open_offline(pcap_filename, errbuf); if (*pcap == NULL) { - RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", pcap_filename, + PMD_LOG(ERR, "Couldn't open %s: %s", pcap_filename, errbuf); return -1; } @@ -424,6 +432,7 @@ eth_dev_start(struct rte_eth_dev *dev) return -1; rx->pcap = tx->pcap; } + goto status_up; } @@ -459,6 +468,12 @@ eth_dev_start(struct rte_eth_dev *dev) } status_up: + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + dev->data->dev_link.link_status = ETH_LINK_UP; return 0; @@ -511,6 +526,12 @@ eth_dev_stop(struct rte_eth_dev *dev) } status_down: + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + dev->data->dev_link.link_status = ETH_LINK_DOWN; } @@ -532,6 +553,7 @@ eth_dev_info(struct rte_eth_dev *dev, dev_info->max_rx_queues = dev->data->nb_rx_queues; dev_info->max_tx_queues = dev->data->nb_tx_queues; dev_info->min_rx_bufsize = 0; + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP; } static int @@ -637,6 +659,38 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, return 0; } +static int +eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +static int +eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +static int +eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +static int +eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + static const struct eth_dev_ops ops = { .dev_start = eth_dev_start, .dev_stop = eth_dev_stop, @@ -645,6 +699,10 @@ static const struct eth_dev_ops ops = { .dev_infos_get = eth_dev_info, .rx_queue_setup = eth_rx_queue_setup, .tx_queue_setup = eth_tx_queue_setup, + .rx_queue_start = eth_rx_queue_start, + .tx_queue_start = eth_tx_queue_start, + .rx_queue_stop = eth_rx_queue_stop, + .tx_queue_stop = eth_tx_queue_stop, .rx_queue_release = eth_queue_release, .tx_queue_release = eth_queue_release, .link_update = eth_link_update, @@ -652,6 +710,22 @@ static const struct eth_dev_ops ops = { .stats_reset = eth_stats_reset, }; +static int +add_queue(struct pmd_devargs *pmd, const char *name, const char *type, + pcap_t *pcap, pcap_dumper_t *dumper) +{ + if (pmd->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES) + return -1; + if (pcap) + pmd->queue[pmd->num_of_queue].pcap = pcap; + if (dumper) + pmd->queue[pmd->num_of_queue].dumper = dumper; + pmd->queue[pmd->num_of_queue].name = name; + pmd->queue[pmd->num_of_queue].type = type; + pmd->num_of_queue++; + return 0; +} + /* * Function handler that opens the pcap file for reading a stores a * reference of it for use it later on. @@ -659,18 +733,16 @@ static const struct eth_dev_ops ops = { static int open_rx_pcap(const char *key, const char *value, void *extra_args) { - unsigned int i; const char *pcap_filename = value; struct pmd_devargs *rx = extra_args; pcap_t *pcap = NULL; - for (i = 0; i < rx->num_of_queue; i++) { - if (open_single_rx_pcap(pcap_filename, &pcap) < 0) - return -1; + if (open_single_rx_pcap(pcap_filename, &pcap) < 0) + return -1; - rx->queue[i].pcap = pcap; - rx->queue[i].name = pcap_filename; - rx->queue[i].type = key; + if (add_queue(rx, pcap_filename, key, pcap, NULL) < 0) { + pcap_close(pcap); + return -1; } return 0; @@ -683,18 +755,16 @@ open_rx_pcap(const char *key, const char *value, void *extra_args) static int open_tx_pcap(const char *key, const char *value, void *extra_args) { - unsigned int i; const char *pcap_filename = value; struct pmd_devargs *dumpers = extra_args; pcap_dumper_t *dumper; - for (i = 0; i < dumpers->num_of_queue; i++) { - if (open_single_tx_pcap(pcap_filename, &dumper) < 0) - return -1; + if (open_single_tx_pcap(pcap_filename, &dumper) < 0) + return -1; - dumpers->queue[i].dumper = dumper; - dumpers->queue[i].name = pcap_filename; - dumpers->queue[i].type = key; + if (add_queue(dumpers, pcap_filename, key, NULL, dumper) < 0) { + pcap_dump_close(dumper); + return -1; } return 0; @@ -720,48 +790,76 @@ open_rx_tx_iface(const char *key, const char *value, void *extra_args) return 0; } +static inline int +set_iface_direction(const char *iface, pcap_t *pcap, + pcap_direction_t direction) +{ + const char *direction_str = (direction == PCAP_D_IN) ? "IN" : "OUT"; + if (pcap_setdirection(pcap, direction) < 0) { + PMD_LOG(ERR, "Setting %s pcap direction %s failed - %s\n", + iface, direction_str, pcap_geterr(pcap)); + return -1; + } + PMD_LOG(INFO, "Setting %s pcap direction %s\n", + iface, direction_str); + return 0; +} + +static inline int +open_iface(const char *key, const char *value, void *extra_args) +{ + const char *iface = value; + struct pmd_devargs *pmd = extra_args; + pcap_t *pcap = NULL; + + if (open_single_iface(iface, &pcap) < 0) + return -1; + if (add_queue(pmd, iface, key, pcap, NULL) < 0) { + pcap_close(pcap); + return -1; + } + + return 0; +} + /* * Opens a NIC for reading packets from it */ static inline int open_rx_iface(const char *key, const char *value, void *extra_args) { - unsigned int i; - const char *iface = value; - struct pmd_devargs *rx = extra_args; - pcap_t *pcap = NULL; + int ret = open_iface(key, value, extra_args); + if (ret < 0) + return ret; + if (strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) { + struct pmd_devargs *pmd = extra_args; + unsigned int qid = pmd->num_of_queue - 1; - for (i = 0; i < rx->num_of_queue; i++) { - if (open_single_iface(iface, &pcap) < 0) - return -1; - rx->queue[i].pcap = pcap; - rx->queue[i].name = iface; - rx->queue[i].type = key; + set_iface_direction(pmd->queue[qid].name, + pmd->queue[qid].pcap, + PCAP_D_IN); } return 0; } +static inline int +rx_iface_args_process(const char *key, const char *value, void *extra_args) +{ + if (strcmp(key, ETH_PCAP_RX_IFACE_ARG) == 0 || + strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) + return open_rx_iface(key, value, extra_args); + + return 0; +} + /* * Opens a NIC for writing packets to it */ static int open_tx_iface(const char *key, const char *value, void *extra_args) { - unsigned int i; - const char *iface = value; - struct pmd_devargs *tx = extra_args; - pcap_t *pcap; - - for (i = 0; i < tx->num_of_queue; i++) { - if (open_single_iface(iface, &pcap) < 0) - return -1; - tx->queue[i].pcap = pcap; - tx->queue[i].name = iface; - tx->queue[i].type = key; - } - - return 0; + return open_iface(key, value, extra_args); } static struct rte_vdev_driver pmd_pcap_drv; @@ -773,27 +871,16 @@ pmd_init_internals(struct rte_vdev_device *vdev, struct pmd_internals **internals, struct rte_eth_dev **eth_dev) { - struct rte_eth_dev_data *data = NULL; + struct rte_eth_dev_data *data; unsigned int numa_node = vdev->device.numa_node; - const char *name; - name = rte_vdev_device_name(vdev); - RTE_LOG(INFO, PMD, "Creating pcap-backed ethdev on numa socket %d\n", + PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d", numa_node); - /* now do all data allocation - for eth_dev structure - * and internal (private) data - */ - data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); - if (data == NULL) - return -1; - /* reserve an ethdev entry */ *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals)); - if (*eth_dev == NULL) { - rte_free(data); + if (!(*eth_dev)) return -1; - } /* now put it all together * - store queue data in internals, @@ -802,7 +889,7 @@ pmd_init_internals(struct rte_vdev_device *vdev, * - and point eth_dev structure to new eth_dev_data structure */ *internals = (*eth_dev)->data->dev_private; - rte_memcpy(data, (*eth_dev)->data, sizeof(*data)); + data = (*eth_dev)->data; data->nb_rx_queues = (uint16_t)nb_rx_queues; data->nb_tx_queues = (uint16_t)nb_tx_queues; data->dev_link = pmd_link; @@ -812,7 +899,6 @@ pmd_init_internals(struct rte_vdev_device *vdev, * NOTE: we'll replace the data element, of originally allocated * eth_dev so the rings are local per-process */ - (*eth_dev)->data = data; (*eth_dev)->dev_ops = &ops; return 0; @@ -899,6 +985,7 @@ eth_from_pcaps(struct rte_vdev_device *vdev, else eth_dev->tx_pkt_burst = eth_pcap_tx; + rte_eth_dev_probing_finish(eth_dev); return 0; } @@ -910,16 +997,31 @@ pmd_pcap_probe(struct rte_vdev_device *dev) struct rte_kvargs *kvlist; struct pmd_devargs pcaps = {0}; struct pmd_devargs dumpers = {0}; + struct rte_eth_dev *eth_dev; int single_iface = 0; int ret; name = rte_vdev_device_name(dev); - RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name); + PMD_LOG(INFO, "Initializing pmd_pcap for %s", name); gettimeofday(&start_time, NULL); start_cycles = rte_get_timer_cycles(); hz = rte_get_timer_hz(); + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(rte_vdev_device_args(dev)) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + PMD_LOG(ERR, "Failed to probe %s", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &ops; + eth_dev->device = &dev->device; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments); if (kvlist == NULL) return -1; @@ -949,22 +1051,16 @@ pmd_pcap_probe(struct rte_vdev_device *dev) * We check whether we want to open a RX stream from a real NIC or a * pcap file */ - pcaps.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG); - if (pcaps.num_of_queue) - is_rx_pcap = 1; - else - pcaps.num_of_queue = rte_kvargs_count(kvlist, - ETH_PCAP_RX_IFACE_ARG); - - if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES) - pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES; + is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0; + pcaps.num_of_queue = 0; - if (is_rx_pcap) + if (is_rx_pcap) { ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG, &open_rx_pcap, &pcaps); - else - ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG, - &open_rx_iface, &pcaps); + } else { + ret = rte_kvargs_process(kvlist, NULL, + &rx_iface_args_process, &pcaps); + } if (ret < 0) goto free_kvlist; @@ -973,15 +1069,8 @@ pmd_pcap_probe(struct rte_vdev_device *dev) * We check whether we want to open a TX stream to a real NIC or a * pcap file */ - dumpers.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG); - if (dumpers.num_of_queue) - is_tx_pcap = 1; - else - dumpers.num_of_queue = rte_kvargs_count(kvlist, - ETH_PCAP_TX_IFACE_ARG); - - if (dumpers.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES) - dumpers.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES; + is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0; + dumpers.num_of_queue = 0; if (is_tx_pcap) ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG, @@ -1008,7 +1097,7 @@ pmd_pcap_remove(struct rte_vdev_device *dev) { struct rte_eth_dev *eth_dev = NULL; - RTE_LOG(INFO, PMD, "Closing pcap ethdev on numa socket %d\n", + PMD_LOG(INFO, "Closing pcap ethdev on numa socket %d", rte_socket_id()); if (!dev) @@ -1020,7 +1109,6 @@ pmd_pcap_remove(struct rte_vdev_device *dev) return -1; rte_free(eth_dev->data->dev_private); - rte_free(eth_dev->data); rte_eth_dev_release_port(eth_dev); @@ -1038,5 +1126,13 @@ RTE_PMD_REGISTER_PARAM_STRING(net_pcap, ETH_PCAP_RX_PCAP_ARG "= " ETH_PCAP_TX_PCAP_ARG "= " ETH_PCAP_RX_IFACE_ARG "= " + ETH_PCAP_RX_IFACE_IN_ARG "= " ETH_PCAP_TX_IFACE_ARG "= " ETH_PCAP_IFACE_ARG "="); + +RTE_INIT(eth_pcap_init_log) +{ + eth_pcap_logtype = rte_log_register("pmd.net.pcap"); + if (eth_pcap_logtype >= 0) + rte_log_set_level(eth_pcap_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/qede/LICENSE.qede_pmd b/drivers/net/qede/LICENSE.qede_pmd deleted file mode 100644 index c7cbdccc..00000000 --- a/drivers/net/qede/LICENSE.qede_pmd +++ /dev/null @@ -1,28 +0,0 @@ -/* - * BSD LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of QLogic Corporation nor the name of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written consent. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - */ diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile index ccbffa45..488ca1d9 100644 --- a/drivers/net/qede/Makefile +++ b/drivers/net/qede/Makefile @@ -1,8 +1,7 @@ -# Copyright (c) 2016 QLogic Corporation. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2016 - 2018 Cavium Inc. # All rights reserved. -# www.qlogic.com -# -# See LICENSE.qede_pmd for copyright and licensing details. +# www.cavium.com include $(RTE_SDK)/mk/rte.vars.mk @@ -73,8 +72,7 @@ ifeq ($(shell clang -Wno-pointer-bool-conversion -Werror -E - < /dev/null > /dev CFLAGS_BASE_DRIVER += -Wno-pointer-bool-conversion endif else #ICC -CFLAGS_BASE_DRIVER += -wd188 #188: enumerated type mixed with another type -CFLAGS_qede_ethdev.o += -wd279 #279: controlling expression is constant +CFLAGS_qede_ethdev.o += -diag-disable 279 #279: controlling expression is constant endif # diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c index fe42f325..d5d6f8e2 100644 --- a/drivers/net/qede/base/bcm_osal.c +++ b/drivers/net/qede/base/bcm_osal.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include @@ -133,10 +131,10 @@ void *osal_dma_alloc_coherent(struct ecore_dev *p_dev, snprintf(mz_name, sizeof(mz_name) - 1, "%lx", (unsigned long)rte_get_timer_cycles()); if (core_id == (unsigned int)LCORE_ID_ANY) - core_id = 0; + core_id = rte_get_master_lcore(); socket_id = rte_lcore_to_socket_id(core_id); - mz = rte_memzone_reserve_aligned(mz_name, size, - socket_id, 0, RTE_CACHE_LINE_SIZE); + mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, + RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE); if (!mz) { DP_ERR(p_dev, "Unable to allocate DMA memory " "of size %zu bytes - %s\n", @@ -172,9 +170,10 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev, snprintf(mz_name, sizeof(mz_name) - 1, "%lx", (unsigned long)rte_get_timer_cycles()); if (core_id == (unsigned int)LCORE_ID_ANY) - core_id = 0; + core_id = rte_get_master_lcore(); socket_id = rte_lcore_to_socket_id(core_id); - mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, 0, align); + mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, + RTE_MEMZONE_IOVA_CONTIG, align); if (!mz) { DP_ERR(p_dev, "Unable to allocate DMA memory " "of size %zu bytes - %s\n", @@ -200,6 +199,11 @@ void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys) DP_VERBOSE(p_dev, ECORE_MSG_SP, "Free memzone %s\n", ecore_mz_mapping[j]->name); rte_memzone_free(ecore_mz_mapping[j]); + while (j < ecore_mz_count - 1) { + ecore_mz_mapping[j] = ecore_mz_mapping[j + 1]; + j++; + } + ecore_mz_count--; return; } } diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h index 52c2f0ec..630867fa 100644 --- a/drivers/net/qede/base/bcm_osal.h +++ b/drivers/net/qede/base/bcm_osal.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __BCM_OSAL_H diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h index 9a6059ac..ca8e59db 100644 --- a/drivers/net/qede/base/common_hsi.h +++ b/drivers/net/qede/base/common_hsi.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __COMMON_HSI__ @@ -96,10 +94,10 @@ /****************************************************************************/ -#define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 30 -#define FW_REVISION_VERSION 12 -#define FW_ENGINEERING_VERSION 0 +#define FW_MAJOR_VERSION 8 +#define FW_MINOR_VERSION 33 +#define FW_REVISION_VERSION 12 +#define FW_ENGINEERING_VERSION 0 /***********************/ /* COMMON HW CONSTANTS */ diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h index ce5f3a90..5d79fdf0 100644 --- a/drivers/net/qede/base/ecore.h +++ b/drivers/net/qede/base/ecore.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_H @@ -41,6 +39,9 @@ ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \ (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION) +#define IS_ECORE_PACING(p_hwfn) \ + (!!(p_hwfn->b_en_pacing)) + #define MAX_HWFNS_PER_DEVICE 2 #define NAME_SIZE 128 /* @DPDK */ #define ECORE_WFQ_UNIT 100 @@ -432,8 +433,10 @@ struct ecore_hw_info { #define DMAE_MAX_RW_SIZE 0x2000 struct ecore_dmae_info { - /* Mutex for synchronizing access to functions */ - osal_mutex_t mutex; + /* Spinlock for synchronizing access to functions */ + osal_spinlock_t lock; + + bool b_mem_ready; u8 channel; @@ -534,6 +537,12 @@ enum ecore_mf_mode_bit { ECORE_MF_UFP_SPECIFIC, ECORE_MF_DISABLE_ARFS, + + /* Use vlan for steering */ + ECORE_MF_8021Q_TAGGING, + + /* Use stag for steering */ + ECORE_MF_8021AD_TAGGING, }; enum ecore_ufp_mode { @@ -672,6 +681,13 @@ struct ecore_hwfn { /* Mechanism for recovering from doorbell drop */ struct ecore_db_recovery_info db_recovery_info; + /* Enable/disable pacing, if request to enable then + * IOV and mcos configuration will be skipped. + * this actually reflects the value requested in + * struct ecore_hw_prepare_params by ecore client. + */ + bool b_en_pacing; + /* @DPDK */ struct ecore_ptt *p_arfs_ptt; }; @@ -924,12 +940,16 @@ void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb, #define PQ_FLAGS_ACK (1 << 4) #define PQ_FLAGS_OFLD (1 << 5) #define PQ_FLAGS_VFS (1 << 6) +#define PQ_FLAGS_LLT (1 << 7) /* physical queue index for cm context intialization */ u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags); u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc); u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf); -u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 qpid); +u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl); + +/* qm vport for rate limit configuration */ +u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl); const char *ecore_hw_get_resc_name(enum ecore_resources res_id); diff --git a/drivers/net/qede/base/ecore_attn_values.h b/drivers/net/qede/base/ecore_attn_values.h index d8951bcc..ec773fbd 100644 --- a/drivers/net/qede/base/ecore_attn_values.h +++ b/drivers/net/qede/base/ecore_attn_values.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ATTN_VALUES_H__ diff --git a/drivers/net/qede/base/ecore_chain.h b/drivers/net/qede/base/ecore_chain.h index d8f69ad6..6d0382d3 100644 --- a/drivers/net/qede/base/ecore_chain.h +++ b/drivers/net/qede/base/ecore_chain.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_CHAIN_H__ @@ -526,7 +524,7 @@ static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain) p_chain->p_prod_elem = p_chain->p_virt_addr; if (p_chain->mode == ECORE_CHAIN_MODE_PBL) { - /* Use (page_cnt - 1) as a reset value for the prod/cons page's + /* Use "page_cnt-1" as a reset value for the prod/cons page's * indices, to avoid unnecessary page advancing on the first * call to ecore_chain_produce/consume. Instead, the indices * will be advanced to page_cnt and then will be wrapped to 0. @@ -726,6 +724,21 @@ out: static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain, u32 prod_idx, void *p_prod_elem) { + if (p_chain->mode == ECORE_CHAIN_MODE_PBL) { + /* Use "prod_idx-1" since ecore_chain_produce() advances the + * page index before the producer index when getting to + * "next_page_mask". + */ + u32 elem_idx = + (prod_idx - 1 + p_chain->capacity) % p_chain->capacity; + u32 page_idx = elem_idx / p_chain->elem_per_page; + + if (is_chain_u16(p_chain)) + p_chain->pbl.c.u16.prod_page_idx = (u16)page_idx; + else + p_chain->pbl.c.u32.prod_page_idx = page_idx; + } + if (is_chain_u16(p_chain)) p_chain->u.chain16.prod_idx = (u16)prod_idx; else @@ -733,6 +746,38 @@ static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain, p_chain->p_prod_elem = p_prod_elem; } +/** + * @brief ecore_chain_set_cons - sets the cons to the given value + * + * @param cons_idx + * @param p_cons_elem + */ +static OSAL_INLINE void ecore_chain_set_cons(struct ecore_chain *p_chain, + u32 cons_idx, void *p_cons_elem) +{ + if (p_chain->mode == ECORE_CHAIN_MODE_PBL) { + /* Use "cons_idx-1" since ecore_chain_consume() advances the + * page index before the consumer index when getting to + * "next_page_mask". + */ + u32 elem_idx = + (cons_idx - 1 + p_chain->capacity) % p_chain->capacity; + u32 page_idx = elem_idx / p_chain->elem_per_page; + + if (is_chain_u16(p_chain)) + p_chain->pbl.c.u16.cons_page_idx = (u16)page_idx; + else + p_chain->pbl.c.u32.cons_page_idx = page_idx; + } + + if (is_chain_u16(p_chain)) + p_chain->u.chain16.cons_idx = (u16)cons_idx; + else + p_chain->u.chain32.cons_idx = cons_idx; + + p_chain->p_cons_elem = p_cons_elem; +} + /** * @brief ecore_chain_pbl_zero_mem - set chain memory to 0 * diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c index 50bd66da..bf36ce58 100644 --- a/drivers/net/qede/base/ecore_cxt.c +++ b/drivers/net/qede/base/ecore_cxt.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -834,7 +832,7 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn) p_mngr->t2_num_pages * sizeof(struct ecore_dma_mem)); if (!p_mngr->t2) { - DP_NOTICE(p_hwfn, true, "Failed to allocate t2 table\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate t2 table\n"); rc = ECORE_NOMEM; goto t2_fail; } @@ -919,6 +917,9 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn) struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 ilt_size, i; + if (p_mngr->ilt_shadow == OSAL_NULL) + return; + ilt_size = ecore_cxt_ilt_shadow_size(p_cli); for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) { @@ -931,6 +932,7 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn) p_dma->p_virt = OSAL_NULL; } OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow); + p_mngr->ilt_shadow = OSAL_NULL; } static enum _ecore_status_t @@ -1000,8 +1002,7 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn) size * sizeof(struct ecore_dma_mem)); if (!p_mngr->ilt_shadow) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate ilt shadow table\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate ilt shadow table\n"); rc = ECORE_NOMEM; goto ilt_shadow_fail; } @@ -1044,12 +1045,14 @@ static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn) for (type = 0; type < MAX_CONN_TYPES; type++) { OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map); + p_mngr->acquired[type].cid_map = OSAL_NULL; p_mngr->acquired[type].max_count = 0; p_mngr->acquired[type].start_cid = 0; for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) { OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired_vf[type][vf].cid_map); + p_mngr->acquired_vf[type][vf].cid_map = OSAL_NULL; p_mngr->acquired_vf[type][vf].max_count = 0; p_mngr->acquired_vf[type][vf].start_cid = 0; } @@ -1126,8 +1129,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn) p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr)); if (!p_mngr) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate `struct ecore_cxt_mngr'\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_cxt_mngr'\n"); return ECORE_NOMEM; } @@ -1189,21 +1191,21 @@ enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn) /* Allocate the ILT shadow table */ rc = ecore_ilt_shadow_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed to allocate ilt memory\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate ilt memory\n"); goto tables_alloc_fail; } /* Allocate the T2 table */ rc = ecore_cxt_src_t2_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed to allocate T2 memory\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate T2 memory\n"); goto tables_alloc_fail; } /* Allocate and initialize the acquired cids bitmaps */ rc = ecore_cid_map_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed to allocate cid maps\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate cid maps\n"); goto tables_alloc_fail; } @@ -1427,7 +1429,8 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn) } } -void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + bool is_pf_loading) { struct ecore_qm_info *qm_info = &p_hwfn->qm_info; struct ecore_mcp_link_state *p_link; @@ -1438,8 +1441,9 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output; - ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->port_id, - p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port, + ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->rel_pf_id, + qm_info->max_phys_tcs_per_port, + is_pf_loading, iids.cids, iids.vf_cids, iids.tids, qm_info->start_pq, qm_info->num_pqs - qm_info->num_vf_pqs, @@ -1797,7 +1801,7 @@ void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn) void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { - ecore_qm_init_pf(p_hwfn, p_ptt); + ecore_qm_init_pf(p_hwfn, p_ptt, true); ecore_cm_init_pf(p_hwfn); ecore_dq_init_pf(p_hwfn); ecore_cdu_init_pf(p_hwfn); diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h index 54761e4e..f8c955ca 100644 --- a/drivers/net/qede/base/ecore_cxt.h +++ b/drivers/net/qede/base/ecore_cxt.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef _ECORE_CID_ @@ -107,8 +105,10 @@ void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); * * @param p_hwfn * @param p_ptt + * @param is_pf_loading */ -void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); +void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + bool is_pf_loading); /** * @brief Reconfigures QM pf on the fly diff --git a/drivers/net/qede/base/ecore_cxt_api.h b/drivers/net/qede/base/ecore_cxt_api.h index 6d87620d..6c8b2831 100644 --- a/drivers/net/qede/base/ecore_cxt_api.h +++ b/drivers/net/qede/base/ecore_cxt_api.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_CXT_API_H__ diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c index 21ddda92..96678745 100644 --- a/drivers/net/qede/base/ecore_dcbx.c +++ b/drivers/net/qede/base/ecore_dcbx.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -149,6 +147,10 @@ ecore_dcbx_set_params(struct ecore_dcbx_results *p_data, } p_data->arr[type].update = UPDATE_DCB_DSCP; + /* Do not add valn tag 0 when DCB is enabled and port is in UFP mode */ + if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) + p_data->arr[type].dont_add_vlan0 = true; + /* QM reconf data */ if (p_hwfn->hw_info.personality == personality) p_hwfn->hw_info.offload_tc = tc; @@ -910,7 +912,7 @@ enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn) p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_hwfn->p_dcbx_info)); if (!p_hwfn->p_dcbx_info) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_dcbx_info'"); return ECORE_NOMEM; } @@ -935,6 +937,7 @@ static void ecore_dcbx_update_protocol_data(struct protocol_dcb_data *p_data, p_data->dcb_tc = p_src->arr[type].tc; p_data->dscp_enable_flag = p_src->arr[type].dscp_enable; p_data->dscp_val = p_src->arr[type].dscp_val; + p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0; } /* Set pf update ramrod command params */ diff --git a/drivers/net/qede/base/ecore_dcbx.h b/drivers/net/qede/base/ecore_dcbx.h index 469e42dd..519e6cea 100644 --- a/drivers/net/qede/base/ecore_dcbx.h +++ b/drivers/net/qede/base/ecore_dcbx.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_DCBX_H__ diff --git a/drivers/net/qede/base/ecore_dcbx_api.h b/drivers/net/qede/base/ecore_dcbx_api.h index 9ff4df4c..eaf8e082 100644 --- a/drivers/net/qede/base/ecore_dcbx_api.h +++ b/drivers/net/qede/base/ecore_dcbx_api.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_DCBX_API_H__ @@ -29,6 +27,7 @@ struct ecore_dcbx_app_data { u8 tc; /* Traffic Class */ bool dscp_enable; /* DSCP enabled */ u8 dscp_val; /* DSCP value */ + bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */ }; #ifndef __EXTRACT__LINUX__ diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c index 744d2043..31f1f3ee 100644 --- a/drivers/net/qede/base/ecore_dev.c +++ b/drivers/net/qede/base/ecore_dev.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -39,7 +37,7 @@ * there's more than a single compiled ecore component in system]. */ static osal_spinlock_t qm_lock; -static bool qm_lock_init; +static u32 qm_lock_ref_cnt; /******************** Doorbell Recovery *******************/ /* The doorbell recovery mechanism consists of a list of entries which represent @@ -227,7 +225,8 @@ enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn) OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list); #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock); + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock)) + return ECORE_NOMEM; #endif OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock); p_hwfn->db_recovery_info.db_recovery_counter = 0; @@ -411,7 +410,7 @@ void ecore_init_dp(struct ecore_dev *p_dev, } } -void ecore_init_struct(struct ecore_dev *p_dev) +enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev) { u8 i; @@ -423,9 +422,10 @@ void ecore_init_struct(struct ecore_dev *p_dev) p_hwfn->b_active = false; #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex); + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock)) + goto handle_err; #endif - OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_LOCK_INIT(&p_hwfn->dmae_info.lock); } /* hwfn 0 is always active */ @@ -433,6 +433,17 @@ void ecore_init_struct(struct ecore_dev *p_dev) /* set the default cache alignment to 128 (may be overridden later) */ p_dev->cache_shift = 7; + return ECORE_SUCCESS; +#ifdef CONFIG_ECORE_LOCK_ALLOC +handle_err: + while (--i) { + struct ecore_hwfn *p_hwfn = OSAL_NULL; + + p_hwfn = &p_dev->hwfns[i]; + OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock); + } + return ECORE_NOMEM; +#endif } static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn) @@ -500,11 +511,14 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn) /* feature flags */ if (IS_ECORE_SRIOV(p_hwfn->p_dev)) flags |= PQ_FLAGS_VFS; + if (IS_ECORE_PACING(p_hwfn)) + flags |= PQ_FLAGS_RLS; /* protocol flags */ switch (p_hwfn->hw_info.personality) { case ECORE_PCI_ETH: - flags |= PQ_FLAGS_MCOS; + if (!IS_ECORE_PACING(p_hwfn)) + flags |= PQ_FLAGS_MCOS; break; case ECORE_PCI_FCOE: flags |= PQ_FLAGS_OFLD; @@ -513,11 +527,14 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn) flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; break; case ECORE_PCI_ETH_ROCE: - flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD; + flags |= PQ_FLAGS_OFLD | PQ_FLAGS_LLT; + if (!IS_ECORE_PACING(p_hwfn)) + flags |= PQ_FLAGS_MCOS; break; case ECORE_PCI_ETH_IWARP: - flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | - PQ_FLAGS_OFLD; + flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; + if (!IS_ECORE_PACING(p_hwfn)) + flags |= PQ_FLAGS_MCOS; break; default: DP_ERR(p_hwfn, "unknown personality %d\n", @@ -721,6 +738,7 @@ static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn, "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); /* init pq params */ + qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id; qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + qm_info->num_vports; qm_info->qm_pq_params[pq_idx].tc_id = tc; @@ -823,7 +841,7 @@ u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf) return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; } -u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl) +u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl) { u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn); @@ -833,6 +851,23 @@ u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl) return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl; } +u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl) +{ + u16 start_pq, pq, qm_pq_idx; + + pq = ecore_get_cm_pq_idx_rl(p_hwfn, rl); + start_pq = p_hwfn->qm_info.start_pq; + qm_pq_idx = pq - start_pq - CM_TX_PQ_BASE; + + if (qm_pq_idx > p_hwfn->qm_info.num_pqs) { + DP_ERR(p_hwfn, + "qm_pq_idx %d must be smaller than %d\n", + qm_pq_idx, p_hwfn->qm_info.num_pqs); + } + + return p_hwfn->qm_info.qm_pq_params[qm_pq_idx].vport_id; +} + /* Functions for creating specific types of pqs */ static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn) { @@ -1025,10 +1060,9 @@ static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn) for (i = 0; i < qm_info->num_pqs; i++) { pq = &qm_info->qm_pq_params[i]; DP_VERBOSE(p_hwfn, ECORE_MSG_HW, - "pq idx %d, vport_id %d, tc %d, wrr_grp %d," - " rl_valid %d\n", - qm_info->start_pq + i, pq->vport_id, pq->tc_id, - pq->wrr_group, pq->rl_valid); + "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n", + qm_info->start_pq + i, pq->port_id, pq->vport_id, + pq->tc_id, pq->wrr_group, pq->rl_valid); } } @@ -1083,7 +1117,7 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn, ecore_init_clear_rt_data(p_hwfn); /* prepare QM portion of runtime array */ - ecore_qm_init_pf(p_hwfn, p_ptt); + ecore_qm_init_pf(p_hwfn, p_ptt, false); /* activate init tool on runtime array */ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, @@ -1289,16 +1323,14 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) /* DMA info initialization */ rc = ecore_dmae_info_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate memory for dmae_info" - " structure\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate memory for dmae_info structure\n"); goto alloc_err; } /* DCBX initialization */ rc = ecore_dcbx_info_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate memory for dcbx structure\n"); goto alloc_err; } @@ -1307,7 +1339,7 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL, sizeof(*p_dev->reset_stats)); if (!p_dev->reset_stats) { - DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n"); + DP_NOTICE(p_dev, false, "Failed to allocate reset statistics\n"); goto alloc_no_mem; } @@ -1658,7 +1690,8 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn, ecore_init_cache_line_size(p_hwfn, p_ptt); - rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); + rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ECORE_PATH_ID(p_hwfn), + hw_mode); if (rc != ECORE_SUCCESS) return rc; @@ -2160,6 +2193,11 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, /* perform debug configuration when chip is out of reset */ OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id); + /* Sanity check before the PF init sequence that uses DMAE */ + rc = ecore_dmae_sanity(p_hwfn, p_ptt, "pf_phase"); + if (rc) + return rc; + /* PF Init sequence */ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); if (rc) @@ -2205,42 +2243,43 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, DP_NOTICE(p_hwfn, true, "Function start ramrod failed\n"); } else { - prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); - - if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) { - ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, - (1 << 2)); - ecore_wr(p_hwfn, p_ptt, - PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, - 0x100); - } - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH registers after start PFn\n"); - prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_TCP: %x\n", prs_reg); - prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_UDP: %x\n", prs_reg); - prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_FCOE: %x\n", prs_reg); - prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_ROCE: %x\n", prs_reg); - prs_reg = ecore_rd(p_hwfn, p_ptt, - PRS_REG_SEARCH_TCP_FIRST_FRAG); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n", - prs_reg); - prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); + return rc; } + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); + + if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) { + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, + (1 << 2)); + ecore_wr(p_hwfn, p_ptt, + PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, + 0x100); + } + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH registers after start PFn\n"); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TCP: %x\n", prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_UDP: %x\n", prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_FCOE: %x\n", prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_ROCE: %x\n", prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, + PRS_REG_SEARCH_TCP_FIRST_FRAG); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n", + prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); } - return rc; + return ECORE_SUCCESS; } enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn, @@ -2283,14 +2322,15 @@ static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn, } static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn, - struct ecore_ptt *p_ptt) + struct ecore_ptt *p_ptt) { ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id); } -static void -ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req, +static enum _ecore_status_t +ecore_fill_load_req_params(struct ecore_hwfn *p_hwfn, + struct ecore_load_req_params *p_load_req, struct ecore_drv_load_params *p_drv_load) { /* Make sure that if ecore-client didn't provide inputs, all the @@ -2302,15 +2342,51 @@ ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req, OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req)); - if (p_drv_load != OSAL_NULL) { - p_load_req->drv_role = p_drv_load->is_crash_kernel ? - ECORE_DRV_ROLE_KDUMP : - ECORE_DRV_ROLE_OS; + if (p_drv_load == OSAL_NULL) + goto out; + + p_load_req->drv_role = p_drv_load->is_crash_kernel ? + ECORE_DRV_ROLE_KDUMP : + ECORE_DRV_ROLE_OS; + p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; + p_load_req->override_force_load = p_drv_load->override_force_load; + + /* Old MFW versions don't support timeout values other than default and + * none, so these values are replaced according to the fall-back action. + */ + + if (p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT || + p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_NONE || + (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO)) { p_load_req->timeout_val = p_drv_load->mfw_timeout_val; - p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; - p_load_req->override_force_load = - p_drv_load->override_force_load; + goto out; + } + + switch (p_drv_load->mfw_timeout_fallback) { + case ECORE_TO_FALLBACK_TO_NONE: + p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_NONE; + break; + case ECORE_TO_FALLBACK_TO_DEFAULT: + p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT; + break; + case ECORE_TO_FALLBACK_FAIL_LOAD: + DP_NOTICE(p_hwfn, false, + "Received %d as a value for MFW timeout while the MFW supports only default [%d] or none [%d]. Abort.\n", + p_drv_load->mfw_timeout_val, + ECORE_LOAD_REQ_LOCK_TO_DEFAULT, + ECORE_LOAD_REQ_LOCK_TO_NONE); + return ECORE_ABORTED; } + + DP_INFO(p_hwfn, + "Modified the MFW timeout value from %d to %s [%d] due to lack of MFW support\n", + p_drv_load->mfw_timeout_val, + (p_load_req->timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT) ? + "default" : "none", + p_load_req->timeout_val); +out: + return ECORE_SUCCESS; } enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn, @@ -2366,12 +2442,17 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, if (rc != ECORE_SUCCESS) return rc; - ecore_fill_load_req_params(&load_req_params, - p_params->p_drv_load_params); + ecore_set_spq_block_timeout(p_hwfn, p_params->spq_timeout_ms); + + rc = ecore_fill_load_req_params(p_hwfn, &load_req_params, + p_params->p_drv_load_params); + if (rc != ECORE_SUCCESS) + return rc; + rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_req_params); if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed sending a LOAD_REQ command\n"); return rc; } @@ -2404,10 +2485,17 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, p_hwfn->first_on_engine = (load_code == FW_MSG_CODE_DRV_LOAD_ENGINE); - if (!qm_lock_init) { + if (!qm_lock_ref_cnt) { +#ifdef CONFIG_ECORE_LOCK_ALLOC + rc = OSAL_SPIN_LOCK_ALLOC(p_hwfn, &qm_lock); + if (rc) { + DP_ERR(p_hwfn, "qm_lock allocation failed\n"); + goto qm_lock_fail; + } +#endif OSAL_SPIN_LOCK_INIT(&qm_lock); - qm_lock_init = true; } + ++qm_lock_ref_cnt; /* Clean up chip from previous driver if such remains exist. * This is not needed when the PF is the first one on the @@ -2423,9 +2511,8 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, } } - /* Log and clean previous pglue_b errors if such exist */ - ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt); - ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); + /* Log and clear previous pglue_b errors if such exist */ + ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true); /* Enable the PF's internal FID_enable in the PXP */ rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, @@ -2433,6 +2520,13 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, if (rc != ECORE_SUCCESS) goto load_err; + /* Clear the pglue_b was_error indication. + * In E4 it must be done after the BME and the internal + * FID_enable for the PF are set, since VDMs may cause the + * indication to be set again. + */ + ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); + switch (load_code) { case FW_MSG_CODE_DRV_LOAD_ENGINE: rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, @@ -2462,15 +2556,23 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, } if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "init phase failed for loadcode 0x%x (rc %d)\n", load_code, rc); goto load_err; } rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); - if (rc != ECORE_SUCCESS) + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Sending load done failed, rc = %d\n", rc); + if (rc == ECORE_NOMEM) { + DP_NOTICE(p_hwfn, false, + "Sending load done was failed due to memory allocation failure\n"); + goto load_err; + } return rc; + } /* send DCBX attention request command */ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, @@ -2480,7 +2582,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, 1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, &resp, ¶m); if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to send DCBX attention request\n"); return rc; } @@ -2513,6 +2615,12 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, return rc; load_err: + --qm_lock_ref_cnt; +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (!qm_lock_ref_cnt) + OSAL_SPIN_LOCK_DEALLOC(&qm_lock); +qm_lock_fail: +#endif /* The MFW load lock should be released regardless of success or failure * of initialization. * TODO: replace this with an attempt to send cancel_load. @@ -2547,8 +2655,8 @@ static void ecore_hw_timers_stop(struct ecore_dev *p_dev, if (i < ECORE_HW_STOP_RETRY_LIMIT) return; - DP_NOTICE(p_hwfn, true, "Timers linear scans are not over" - " [Connection %02x Tasks %02x]\n", + DP_NOTICE(p_hwfn, false, + "Timers linear scans are not over [Connection %02x Tasks %02x]\n", (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); } @@ -2613,7 +2721,7 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) if (!p_dev->recov_in_prog) { rc = ecore_mcp_unload_req(p_hwfn, p_ptt); if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed sending a UNLOAD_REQ command. rc = %d.\n", rc); rc2 = ECORE_UNKNOWN_ERROR; @@ -2628,7 +2736,7 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) rc = ecore_sp_pf_stop(p_hwfn); if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", rc); rc2 = ECORE_UNKNOWN_ERROR; @@ -2682,10 +2790,21 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); + --qm_lock_ref_cnt; +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (!qm_lock_ref_cnt) + OSAL_SPIN_LOCK_DEALLOC(&qm_lock); +#endif + if (!p_dev->recov_in_prog) { - ecore_mcp_unload_done(p_hwfn, p_ptt); + rc = ecore_mcp_unload_done(p_hwfn, p_ptt); + if (rc == ECORE_NOMEM) { + DP_NOTICE(p_hwfn, false, + "Failed sending an UNLOAD_DONE command due to a memory allocation failure. Resending.\n"); + rc = ecore_mcp_unload_done(p_hwfn, p_ptt); + } if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed sending a UNLOAD_DONE command. rc = %d.\n", rc); rc2 = ECORE_UNKNOWN_ERROR; @@ -2936,7 +3055,7 @@ __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn, rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id, resc_max_val, p_mcp_resp); if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "MFW response failure for a max value setting of resource %d [%s]\n", res_id, ecore_hw_get_resc_name(res_id)); return rc; @@ -3496,9 +3615,14 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, break; case NVM_CFG1_GLOB_MF_MODE_UFP: p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS | - 1 << ECORE_MF_UFP_SPECIFIC; + 1 << ECORE_MF_UFP_SPECIFIC | + 1 << ECORE_MF_8021Q_TAGGING; + break; + case NVM_CFG1_GLOB_MF_MODE_BD: + p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS | + 1 << ECORE_MF_LLH_PROTO_CLSS | + 1 << ECORE_MF_8021AD_TAGGING; break; - case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS | 1 << ECORE_MF_LLH_PROTO_CLSS | @@ -3527,6 +3651,7 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, */ switch (mf_mode) { case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: + case NVM_CFG1_GLOB_MF_MODE_BD: p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN; break; case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: @@ -3780,8 +3905,13 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, bool drv_resc_alloc = p_params->drv_resc_alloc; enum _ecore_status_t rc; + if (IS_ECORE_PACING(p_hwfn)) { + DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_IOV, + "Skipping IOV as packet pacing is requested\n"); + } + /* Since all information is common, only first hwfns should do this */ - if (IS_LEAD_HWFN(p_hwfn)) { + if (IS_LEAD_HWFN(p_hwfn) && !IS_ECORE_PACING(p_hwfn)) { rc = ecore_iov_hw_info(p_hwfn); if (rc != ECORE_SUCCESS) { if (p_params->b_relaxed_probe) @@ -3866,7 +3996,10 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, * that can result in performance penalty in some cases. 4 * represents a good tradeoff between performance and flexibility. */ - p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; + if (IS_ECORE_PACING(p_hwfn)) + p_hwfn->hw_info.num_hw_tc = 1; + else + p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; /* start out with a single active tc. This can be increased either * by dcbx negotiation or by upper layer driver @@ -4037,7 +4170,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, /* Allocate PTT pool */ rc = ecore_ptt_pool_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n"); + DP_NOTICE(p_hwfn, false, "Failed to prepare hwfn's hw\n"); if (p_params->b_relaxed_probe) p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; goto err0; @@ -4062,7 +4195,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, /* Initialize MCP structure */ rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n"); + DP_NOTICE(p_hwfn, false, "Failed initializing mcp command\n"); if (p_params->b_relaxed_probe) p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; goto err1; @@ -4072,7 +4205,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, p_params->personality, p_params); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed to get HW information\n"); + DP_NOTICE(p_hwfn, false, "Failed to get HW information\n"); goto err2; } @@ -4115,7 +4248,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, /* Allocate the init RT array and initialize the init-ops engine */ rc = ecore_init_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate the init array\n"); if (p_params->b_relaxed_probe) p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; goto err2; @@ -4153,6 +4286,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, p_dev->chk_reg_fifo = p_params->chk_reg_fifo; p_dev->allow_mdump = p_params->allow_mdump; + p_hwfn->b_en_pacing = p_params->b_en_pacing; if (p_params->b_relaxed_probe) p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS; @@ -4188,6 +4322,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, BAR_ID_1) / 2; p_doorbell = (void OSAL_IOMEM *)addr; + p_dev->hwfns[1].b_en_pacing = p_params->b_en_pacing; /* prepare second hw function */ rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview, p_doorbell, p_params); @@ -4205,8 +4340,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, ecore_mcp_free(p_hwfn); ecore_hw_hwfn_free(p_hwfn); } else { - DP_NOTICE(p_dev, true, - "What do we need to free when VF hwfn1 init fails\n"); + DP_NOTICE(p_dev, false, "What do we need to free when VF hwfn1 init fails\n"); } return rc; } @@ -4237,7 +4371,7 @@ void ecore_hw_remove(struct ecore_dev *p_dev) ecore_mcp_free(p_hwfn); #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock); #endif } @@ -4368,7 +4502,7 @@ ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain) p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); if (!p_virt) { - DP_NOTICE(p_dev, true, + DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n"); return ECORE_NOMEM; } @@ -4401,7 +4535,7 @@ ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain) p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); if (!p_virt) { - DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n"); + DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n"); return ECORE_NOMEM; } @@ -4425,7 +4559,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev, size = page_cnt * sizeof(*pp_virt_addr_tbl); pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size); if (!pp_virt_addr_tbl) { - DP_NOTICE(p_dev, true, + DP_NOTICE(p_dev, false, "Failed to allocate memory for the chain virtual addresses table\n"); return ECORE_NOMEM; } @@ -4449,7 +4583,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev, ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_virt_addr_tbl); if (!p_pbl_virt) { - DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n"); + DP_NOTICE(p_dev, false, "Failed to allocate chain pbl memory\n"); return ECORE_NOMEM; } @@ -4457,7 +4591,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev, p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); if (!p_virt) { - DP_NOTICE(p_dev, true, + DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n"); return ECORE_NOMEM; } @@ -4497,7 +4631,7 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev, rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size, page_cnt); if (rc) { - DP_NOTICE(p_dev, true, + DP_NOTICE(p_dev, false, "Cannot allocate a chain with the given arguments:\n" "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", intended_use, mode, cnt_type, num_elems, elem_size); diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h index 98bcabe8..02bacc22 100644 --- a/drivers/net/qede/base/ecore_dev_api.h +++ b/drivers/net/qede/base/ecore_dev_api.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_DEV_API_H__ @@ -32,7 +30,7 @@ void ecore_init_dp(struct ecore_dev *p_dev, * * @param p_dev */ -void ecore_init_struct(struct ecore_dev *p_dev); +enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev); /** * @brief ecore_resc_free - @@ -57,6 +55,12 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev); */ void ecore_resc_setup(struct ecore_dev *p_dev); +enum ecore_mfw_timeout_fallback { + ECORE_TO_FALLBACK_TO_NONE, + ECORE_TO_FALLBACK_TO_DEFAULT, + ECORE_TO_FALLBACK_FAIL_LOAD, +}; + enum ecore_override_force_load { ECORE_OVERRIDE_FORCE_LOAD_NONE, ECORE_OVERRIDE_FORCE_LOAD_ALWAYS, @@ -79,6 +83,11 @@ struct ecore_drv_load_params { #define ECORE_LOAD_REQ_LOCK_TO_DEFAULT 0 #define ECORE_LOAD_REQ_LOCK_TO_NONE 255 + /* Action to take in case the MFW doesn't support timeout values other + * than default and none. + */ + enum ecore_mfw_timeout_fallback mfw_timeout_fallback; + /* Avoid engine reset when first PF loads on it */ bool avoid_eng_reset; @@ -104,6 +113,9 @@ struct ecore_hw_init_params { /* Driver load parameters */ struct ecore_drv_load_params *p_drv_load_params; + + /* SPQ block timeout in msec */ + u32 spq_timeout_ms; }; /** @@ -256,6 +268,9 @@ struct ecore_hw_prepare_params { */ bool b_relaxed_probe; enum ecore_hw_prepare_result p_relaxed_res; + + /* Enable/disable request by ecore client for pacing */ + bool b_en_pacing; }; /** @@ -363,6 +378,7 @@ struct ecore_eth_stats_common { u64 tx_mac_mc_packets; u64 tx_mac_bc_packets; u64 tx_mac_ctrl_frames; + u64 link_change_count; }; struct ecore_eth_stats_bb { diff --git a/drivers/net/qede/base/ecore_gtt_reg_addr.h b/drivers/net/qede/base/ecore_gtt_reg_addr.h index 2acd864d..8c8fed4e 100644 --- a/drivers/net/qede/base/ecore_gtt_reg_addr.h +++ b/drivers/net/qede/base/ecore_gtt_reg_addr.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef GTT_REG_ADDR_H diff --git a/drivers/net/qede/base/ecore_gtt_values.h b/drivers/net/qede/base/ecore_gtt_values.h index 2ddc5f19..adc20c0c 100644 --- a/drivers/net/qede/base/ecore_gtt_values.h +++ b/drivers/net/qede/base/ecore_gtt_values.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __PREVENT_PXP_GLOBAL_WIN__ diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h index d8abd604..2d761b97 100644 --- a/drivers/net/qede/base/ecore_hsi_common.h +++ b/drivers/net/qede/base/ecore_hsi_common.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_HSI_COMMON__ @@ -381,7 +379,7 @@ struct e4_xstorm_core_conn_ag_ctx { __le16 reserved16 /* physical_q2 */; __le16 tx_bd_cons /* word3 */; __le16 tx_bd_or_spq_prod /* word4 */; - __le16 word5 /* word5 */; + __le16 updated_qm_pq_id /* word5 */; __le16 conn_dpi /* conn_dpi */; u8 byte3 /* byte3 */; u8 byte4 /* byte4 */; @@ -904,8 +902,10 @@ struct core_rx_start_ramrod_data { /* if set, 802.1q tags will be removed and copied to CQE */ /* if set, 802.1q tags will be removed and copied to CQE */ u8 inner_vlan_stripping_en; -/* if set, outer tag wont be stripped, valid only in MF OVLAN. */ - u8 outer_vlan_stripping_dis; +/* if set and inner vlan does not exist, the outer vlan will copied to CQE as + * inner vlan. should be used in MF_OVLAN mode only. + */ + u8 report_outer_vlan; u8 queue_id /* Light L2 RX Queue ID */; u8 main_func_queue /* Is this the main queue for the PF */; /* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if @@ -946,7 +946,9 @@ struct core_tx_bd_data { /* Do not allow additional VLAN manipulations on this packet (DCB) */ #define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1 #define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0 -/* Insert VLAN into packet */ +/* Insert VLAN into packet. Cannot be set for LB packets + * (tx_dst == CORE_TX_DEST_LB) + */ #define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1 #define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1 /* This is the first BD of the packet (for debug) */ @@ -1069,11 +1071,11 @@ struct core_tx_update_ramrod_data { * Enum flag for what type of dcb data to update */ enum dcb_dscp_update_mode { -/* use when no change should be done to dcb data */ +/* use when no change should be done to DCB data */ DONT_UPDATE_DCB_DSCP, - UPDATE_DCB /* use to update only l2 (vlan) priority */, - UPDATE_DSCP /* use to update only l3 dscp */, - UPDATE_DCB_DSCP /* update vlan pri and dscp */, + UPDATE_DCB /* use to update only L2 (vlan) priority */, + UPDATE_DSCP /* use to update only IP DSCP */, + UPDATE_DCB_DSCP /* update vlan pri and DSCP */, MAX_DCB_DSCP_UPDATE_FLAG }; @@ -1291,10 +1293,15 @@ enum fw_flow_ctrl_mode { * GFT profile type. */ enum gft_profile_type { - GFT_PROFILE_TYPE_4_TUPLE /* 4 tuple, IP type and L4 type match. */, -/* L4 destination port, IP type and L4 type match. */ +/* tunnel type, inner 4 tuple, IP type and L4 type match. */ + GFT_PROFILE_TYPE_4_TUPLE, +/* tunnel type, inner L4 destination port, IP type and L4 type match. */ GFT_PROFILE_TYPE_L4_DST_PORT, - GFT_PROFILE_TYPE_IP_DST_PORT /* IP destination port and IP type. */, +/* tunnel type, inner IP destination address and IP type match. */ + GFT_PROFILE_TYPE_IP_DST_ADDR, +/* tunnel type, inner IP source address and IP type match. */ + GFT_PROFILE_TYPE_IP_SRC_ADDR, + GFT_PROFILE_TYPE_TUNNEL_TYPE /* tunnel type and outer IP type match. */, MAX_GFT_PROFILE_TYPE }; @@ -1411,8 +1418,9 @@ struct vlan_header { * outer tag configurations */ struct outer_tag_config_struct { -/* Enables the STAG Priority Change , Should be 1 for Bette Davis and UFP with - * Host Control mode. Else - 0 +/* Enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette + * Davis, UFP with Host Control mode, and UFP with DCB over base interface. + * else - 0. */ u8 enable_stag_pri_change; /* If inner_to_outer_pri_map is initialize then set pri_map_valid */ @@ -1507,15 +1515,18 @@ struct pf_start_ramrod_data { /* - * Data for port update ramrod + * Per protocol DCB data */ struct protocol_dcb_data { - u8 dcb_enable_flag /* dcbEnable flag value */; - u8 dscp_enable_flag /* If set use dscp value */; - u8 dcb_priority /* dcbPri flag value */; - u8 dcb_tc /* dcb TC value */; - u8 dscp_val /* dscp value to write if dscp_enable_flag is set */; - u8 reserved0; + u8 dcb_enable_flag /* Enable DCB */; + u8 dscp_enable_flag /* Enable updating DSCP value */; + u8 dcb_priority /* DCB priority */; + u8 dcb_tc /* DCB TC */; + u8 dscp_val /* DSCP value to write if dscp_enable_flag is set */; +/* When DCB is enabled - if this flag is set, dont add VLAN 0 tag to untagged + * frames + */ + u8 dcb_dont_add_vlan0; }; /* @@ -1575,8 +1586,9 @@ struct pf_update_ramrod_data { /* core iwarp related fields */ struct protocol_dcb_data iwarp_dcb_data; __le16 mf_vlan /* new outer vlan id value */; -/* enables the inner to outer TAG priority mapping. Should be 1 for Bette Davis - * and UFP with Host Control mode, else - 0. +/* enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette + * Davis, UFP with Host Control mode, and UFP with DCB over base interface. + * else - 0 */ u8 enable_stag_pri_change; u8 reserved; @@ -1739,6 +1751,7 @@ struct tstorm_per_port_stat { struct regpair eth_vxlan_tunn_filter_discard; /* GENEVE dropped packets */ struct regpair eth_geneve_tunn_filter_discard; + struct regpair eth_gft_drop_pkt /* GFT dropped packets */; }; @@ -2130,6 +2143,53 @@ struct e4_ystorm_core_conn_ag_ctx { }; +struct fw_asserts_ram_section { +/* The offset of the section in the RAM in RAM lines (64-bit units) */ + __le16 section_ram_line_offset; +/* The size of the section in RAM lines (64-bit units) */ + __le16 section_ram_line_size; +/* The offset of the asserts list within the section in dwords */ + u8 list_dword_offset; +/* The size of an assert list element in dwords */ + u8 list_element_dword_size; + u8 list_num_elements /* The number of elements in the asserts list */; +/* The offset of the next list index field within the section in dwords */ + u8 list_next_index_dword_offset; +}; + + +struct fw_ver_num { + u8 major /* Firmware major version number */; + u8 minor /* Firmware minor version number */; + u8 rev /* Firmware revision version number */; + u8 eng /* Firmware engineering version number (for bootleg versions) */; +}; + +struct fw_ver_info { + __le16 tools_ver /* Tools version number */; + u8 image_id /* FW image ID (e.g. main, l2b, kuku) */; + u8 reserved1; + struct fw_ver_num num /* FW version number */; + __le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */; + __le32 reserved2; +}; + +struct fw_info { + struct fw_ver_info ver /* FW version information */; +/* Info regarding the FW asserts section in the Storm RAM */ + struct fw_asserts_ram_section fw_asserts_section; +}; + + +struct fw_info_location { + __le32 grc_addr /* GRC address where the fw_info struct is located. */; +/* Size of the fw_info structure (thats located at the grc_addr). */ + __le32 size; +}; + + + + /* * IGU cleanup command */ diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h index ebb66482..bf548722 100644 --- a/drivers/net/qede/base/ecore_hsi_debug_tools.h +++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_HSI_DEBUG_TOOLS__ @@ -225,7 +223,7 @@ enum bin_dbg_buffer_type { * Attention bit mapping */ struct dbg_attn_bit_mapping { - __le16 data; + u16 data; /* The index of an attention in the blocks attentions list * (if is_unused_bit_cnt=0), or a number of consecutive unused attention bits * (if is_unused_bit_cnt=1) @@ -247,14 +245,14 @@ struct dbg_attn_block_type_data { /* Offset of this block attention names in the debug attention name offsets * array */ - __le16 names_offset; - __le16 reserved1; + u16 names_offset; + u16 reserved1; u8 num_regs /* Number of attention registers in this block */; u8 reserved2; /* Offset of this blocks attention registers in the attention registers array * (in dbg_attn_reg units) */ - __le16 regs_offset; + u16 regs_offset; }; /* @@ -272,20 +270,20 @@ struct dbg_attn_block { * Attention register result */ struct dbg_attn_reg_result { - __le32 data; + u32 data; /* STS attention register GRC address (in dwords) */ #define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF #define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0 /* Number of attention indexes in this register */ #define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF #define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24 -/* The offset of this registers attentions within the blocks attentions - * list (a value in the range 0..number of block attentions-1) +/* The offset of this registers attentions within the blocks attentions list + * (a value in the range 0..number of block attentions-1) */ - __le16 attn_idx_offset; - __le16 reserved; - __le32 sts_val /* Value read from the STS attention register */; - __le32 mask_val /* Value read from the MASK attention register */; + u16 block_attn_offset; + u16 reserved; + u32 sts_val /* Value read from the STS attention register */; + u32 mask_val /* Value read from the MASK attention register */; }; /* @@ -303,7 +301,7 @@ struct dbg_attn_block_result { /* Offset of this registers block attention names in the attention name offsets * array */ - __le16 names_offset; + u16 names_offset; /* result data for each register in the block in which at least one attention * bit is set */ @@ -316,7 +314,7 @@ struct dbg_attn_block_result { * mode header */ struct dbg_mode_hdr { - __le16 data; + u16 data; /* indicates if a mode expression should be evaluated (0/1) */ #define DBG_MODE_HDR_EVAL_MODE_MASK 0x1 #define DBG_MODE_HDR_EVAL_MODE_SHIFT 0 @@ -331,12 +329,11 @@ struct dbg_mode_hdr { * Attention register */ struct dbg_attn_reg { - struct dbg_mode_hdr mode /* Mode header */; -/* The offset of this registers attentions within the blocks attentions - * list (a value in the range 0..number of block attentions-1) +/* The offset of this registers attentions within the blocks attentions list + * (a value in the range 0..number of block attentions-1) */ - __le16 attn_idx_offset; - __le32 data; + u16 block_attn_offset; + u32 data; /* STS attention register GRC address (in dwords) */ #define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF #define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0 @@ -344,9 +341,8 @@ struct dbg_attn_reg { #define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF #define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24 /* STS_CLR attention register GRC address (in dwords) */ - __le32 sts_clr_address; -/* MASK attention register GRC address (in dwords) */ - __le32 mask_address; + u32 sts_clr_address; + u32 mask_address /* MASK attention register GRC address (in dwords) */; }; @@ -370,7 +366,7 @@ struct dbg_bus_block { /* Indicates if this block has a latency events debug line (0/1). */ u8 has_latency_events; /* Offset of this blocks lines in the Debug Bus lines array. */ - __le16 lines_offset; + u16 lines_offset; }; @@ -383,7 +379,7 @@ struct dbg_bus_block_user_data { /* Indicates if this block has a latency events debug line (0/1). */ u8 has_latency_events; /* Offset of this blocks lines in the debug bus line name offsets array. */ - __le16 names_offset; + u16 names_offset; }; @@ -422,13 +418,13 @@ struct dbg_dump_cond_hdr { * memory data for registers dump */ struct dbg_dump_mem { - __le32 dword0; + u32 dword0; /* register address (in dwords) */ #define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF #define DBG_DUMP_MEM_ADDRESS_SHIFT 0 #define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF /* memory group ID */ #define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24 - __le32 dword1; + u32 dword1; /* register size (in dwords) */ #define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF #define DBG_DUMP_MEM_LENGTH_SHIFT 0 @@ -444,7 +440,7 @@ struct dbg_dump_mem { * register data for registers dump */ struct dbg_dump_reg { - __le32 data; + u32 data; /* register address (in dwords) */ #define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */ #define DBG_DUMP_REG_ADDRESS_SHIFT 0 @@ -460,7 +456,7 @@ struct dbg_dump_reg { * split header for registers dump */ struct dbg_dump_split_hdr { - __le32 hdr; + u32 hdr; /* size in dwords of the data following this header */ #define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF #define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0 @@ -474,8 +470,7 @@ struct dbg_dump_split_hdr { */ struct dbg_idle_chk_cond_hdr { struct dbg_mode_hdr mode /* Mode header */; -/* size in dwords of the data following this header */ - __le16 data_size; + u16 data_size /* size in dwords of the data following this header */; }; @@ -483,7 +478,7 @@ struct dbg_idle_chk_cond_hdr { * Idle Check condition register */ struct dbg_idle_chk_cond_reg { - __le32 data; + u32 data; /* Register GRC address (in dwords) */ #define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF #define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0 @@ -493,7 +488,7 @@ struct dbg_idle_chk_cond_reg { /* value from block_id enum */ #define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF #define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24 - __le16 num_entries /* number of registers entries to check */; + u16 num_entries /* number of registers entries to check */; u8 entry_size /* size of registers entry (in dwords) */; u8 start_entry /* index of the first entry to check */; }; @@ -503,7 +498,7 @@ struct dbg_idle_chk_cond_reg { * Idle Check info register */ struct dbg_idle_chk_info_reg { - __le32 data; + u32 data; /* Register GRC address (in dwords) */ #define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF #define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0 @@ -513,7 +508,7 @@ struct dbg_idle_chk_info_reg { /* value from block_id enum */ #define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF #define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24 - __le16 size /* register size in dwords */; + u16 size /* register size in dwords */; struct dbg_mode_hdr mode /* Mode header */; }; @@ -531,8 +526,8 @@ union dbg_idle_chk_reg { * Idle Check result header */ struct dbg_idle_chk_result_hdr { - __le16 rule_id /* Failing rule index */; - __le16 mem_entry_id /* Failing memory entry index */; + u16 rule_id /* Failing rule index */; + u16 mem_entry_id /* Failing memory entry index */; u8 num_dumped_cond_regs /* number of dumped condition registers */; u8 num_dumped_info_regs /* number of dumped condition registers */; u8 severity /* from dbg_idle_chk_severity_types enum */; @@ -552,7 +547,7 @@ struct dbg_idle_chk_result_reg_hdr { #define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F #define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1 u8 start_entry /* index of the first checked entry */; - __le16 size /* register size in dwords */; + u16 size /* register size in dwords */; }; @@ -560,7 +555,7 @@ struct dbg_idle_chk_result_reg_hdr { * Idle Check rule */ struct dbg_idle_chk_rule { - __le16 rule_id /* Idle Check rule ID */; + u16 rule_id /* Idle Check rule ID */; u8 severity /* value from dbg_idle_chk_severity_types enum */; u8 cond_id /* Condition ID */; u8 num_cond_regs /* number of condition registers */; @@ -570,11 +565,11 @@ struct dbg_idle_chk_rule { /* offset of this rules registers in the idle check register array * (in dbg_idle_chk_reg units) */ - __le16 reg_offset; + u16 reg_offset; /* offset of this rules immediate values in the immediate values array * (in dwords) */ - __le16 imm_offset; + u16 imm_offset; }; @@ -582,7 +577,7 @@ struct dbg_idle_chk_rule { * Idle Check rule parsing data */ struct dbg_idle_chk_rule_parsing_data { - __le32 data; + u32 data; /* indicates if this register has a FW message */ #define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1 #define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0 @@ -693,8 +688,8 @@ struct dbg_bus_trigger_state_data { * Debug Bus memory address */ struct dbg_bus_mem_addr { - __le32 lo; - __le32 hi; + u32 lo; + u32 hi; }; /* @@ -703,7 +698,7 @@ struct dbg_bus_mem_addr { struct dbg_bus_pci_buf_data { struct dbg_bus_mem_addr phys_addr /* PCI buffer physical address */; struct dbg_bus_mem_addr virt_addr /* PCI buffer virtual address */; - __le32 size /* PCI buffer size in bytes */; + u32 size /* PCI buffer size in bytes */; }; /* @@ -747,21 +742,20 @@ struct dbg_bus_storm_data { u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */; /* EID filter params to filter on. Valid only if eid_filter_en is set. */ union dbg_bus_storm_eid_params eid_filter_params; -/* CID to filter on. Valid only if cid_filter_en is set. */ - __le32 cid; + u32 cid /* CID to filter on. Valid only if cid_filter_en is set. */; }; /* * Debug Bus data */ struct dbg_bus_data { - __le32 app_version /* The tools version number of the application */; + u32 app_version /* The tools version number of the application */; u8 state /* The current debug bus state */; u8 hw_dwords /* HW dwords per cycle */; /* The HW IDs of the recorded HW blocks, where bits i*3..i*3+2 contain the * HW ID of dword/qword i */ - __le16 hw_id_mask; + u16 hw_id_mask; u8 num_enabled_blocks /* Number of blocks enabled for recording */; u8 num_enabled_storms /* Number of Storms enabled for recording */; u8 target /* Output target */; @@ -783,7 +777,7 @@ struct dbg_bus_data { * Valid only if both filter and trigger are enabled (0/1) */ u8 filter_post_trigger; - __le16 reserved; + u16 reserved; /* Indicates if the recording trigger is enabled (0/1) */ u8 trigger_en; /* trigger states data */ @@ -933,9 +927,10 @@ struct dbg_grc_data { /* Indicates if the GRC parameters were initialized */ u8 params_initialized; u8 reserved1; - __le16 reserved2; -/* Value of each GRC parameter. Array size must match enum dbg_grc_params. */ - __le32 param_val[48]; + u16 reserved2; +/* Value of each GRC parameter. Array size must match the enum dbg_grc_params. + */ + u32 param_val[48]; }; @@ -960,7 +955,8 @@ enum dbg_grc_params { DBG_GRC_PARAM_DUMP_CAU /* dump CAU memories (0/1) */, DBG_GRC_PARAM_DUMP_QM /* dump QM memories (0/1) */, DBG_GRC_PARAM_DUMP_MCP /* dump MCP memories (0/1) */, - DBG_GRC_PARAM_RESERVED /* reserved */, +/* MCP Trace meta data size in bytes */ + DBG_GRC_PARAM_MCP_TRACE_META_SIZE, DBG_GRC_PARAM_DUMP_CFC /* dump CFC memories (0/1) */, DBG_GRC_PARAM_DUMP_IGU /* dump IGU memories (0/1) */, DBG_GRC_PARAM_DUMP_BRB /* dump BRB memories (0/1) */, @@ -1087,11 +1083,11 @@ enum dbg_storms { * Idle Check data */ struct idle_chk_data { - __le32 buf_size /* Idle check buffer size in dwords */; + u32 buf_size /* Idle check buffer size in dwords */; /* Indicates if the idle check buffer size was set (0/1) */ u8 buf_size_set; u8 reserved1; - __le16 reserved2; + u16 reserved2; }; /* @@ -1109,7 +1105,7 @@ struct dbg_tools_data { u8 initialized /* Indicates if the data was initialized */; u8 use_dmae /* Indicates if DMAE should be used */; /* Numbers of registers that were read since last log */ - __le32 num_regs_read; + u32 num_regs_read; }; diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h index ffbf5c71..6b512305 100644 --- a/drivers/net/qede/base/ecore_hsi_eth.h +++ b/drivers/net/qede/base/ecore_hsi_eth.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_HSI_ETH__ @@ -346,7 +344,7 @@ struct e4_xstorm_eth_conn_ag_ctx { __le16 edpm_num_bds /* physical_q2 */; __le16 tx_bd_cons /* word3 */; __le16 tx_bd_prod /* word4 */; - __le16 tx_class /* word5 */; + __le16 updated_qm_pq_id /* word5 */; __le16 conn_dpi /* conn_dpi */; u8 byte3 /* byte3 */; u8 byte4 /* byte4 */; @@ -1034,7 +1032,6 @@ struct eth_vport_rx_mode { #define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5 #define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF #define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6 - __le16 reserved2[3]; }; @@ -1046,11 +1043,11 @@ struct eth_vport_tpa_param { u8 tpa_ipv6_en_flg /* Enable TPA for IPv6 packets */; u8 tpa_ipv4_tunn_en_flg /* Enable TPA for IPv4 over tunnel */; u8 tpa_ipv6_tunn_en_flg /* Enable TPA for IPv6 over tunnel */; -/* If set, start each tpa segment on new SGE (GRO mode). One SGE per segment - * allowed +/* If set, start each TPA segment on new BD (GRO mode). One BD per segment + * allowed. */ u8 tpa_pkt_split_flg; -/* If set, put header of first TPA segment on bd and data on SGE */ +/* If set, put header of first TPA segment on first BD and data on second BD. */ u8 tpa_hdr_data_split_flg; /* If set, GRO data consistent will checked for TPA continue */ u8 tpa_gro_consistent_flg; @@ -1089,7 +1086,6 @@ struct eth_vport_tx_mode { #define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4 #define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF #define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5 - __le16 reserved2[3]; }; @@ -1216,7 +1212,9 @@ struct rx_queue_update_ramrod_data { u8 complete_cqe_flg /* post completion to the CQE ring if set */; u8 complete_event_flg /* post completion to the event ring if set */; u8 vport_id /* ID of virtual port */; - u8 reserved[4]; +/* If set, update default rss queue to this RX queue. */ + u8 set_default_rss_queue; + u8 reserved[3]; u8 reserved1 /* FW reserved. */; u8 reserved2 /* FW reserved. */; u8 reserved3 /* FW reserved. */; @@ -1257,7 +1255,8 @@ struct rx_update_gft_filter_data { __le16 action_icid; __le16 rx_qid /* RX queue ID. Valid if rx_qid_valid set. */; __le16 flow_id /* RX flow ID. Valid if flow_id_valid set. */; - u8 vport_id /* RX vport Id. */; +/* RX vport Id. For drop flow, set to ETH_GFT_TRASHCAN_VPORT. */ + __le16 vport_id; /* If set, action_icid will used for GFT filter update. */ u8 action_icid_valid; /* If set, rx_qid will used for traffic steering, in additional to vport_id. @@ -1273,7 +1272,10 @@ struct rx_update_gft_filter_data { * case of error. */ u8 assert_on_error; - u8 reserved[2]; +/* If set, inner VLAN will be removed regardless to VPORT configuration. + * Supported by E4 only. + */ + u8 inner_vlan_removal_en; }; @@ -1403,7 +1405,7 @@ struct vport_start_ramrod_data { u8 ctl_frame_mac_check_en; /* If set, control frames will be filtered according to ethtype check. */ u8 ctl_frame_ethtype_check_en; - u8 reserved[5]; + u8 reserved[1]; }; @@ -1486,6 +1488,7 @@ struct vport_update_ramrod_data { struct vport_update_ramrod_data_cmn common; struct eth_vport_rx_mode rx_mode /* vport rx mode bitmap */; struct eth_vport_tx_mode tx_mode /* vport tx mode bitmap */; + __le32 reserved[3]; /* TPA configuration parameters */ struct eth_vport_tpa_param tpa_param; struct vport_update_ramrod_mcast approx_mcast; @@ -1809,7 +1812,7 @@ struct E4XstormEthConnAgCtxDqExtLdPart { __le16 edpm_num_bds /* physical_q2 */; __le16 tx_bd_cons /* word3 */; __le16 tx_bd_prod /* word4 */; - __le16 tx_class /* word5 */; + __le16 updated_qm_pq_id /* word5 */; __le16 conn_dpi /* conn_dpi */; u8 byte3 /* byte3 */; u8 byte4 /* byte4 */; @@ -2153,7 +2156,7 @@ struct e4_xstorm_eth_hw_conn_ag_ctx { __le16 edpm_num_bds /* physical_q2 */; __le16 tx_bd_cons /* word3 */; __le16 tx_bd_prod /* word4 */; - __le16 tx_class /* word5 */; + __le16 updated_qm_pq_id /* word5 */; __le16 conn_dpi /* conn_dpi */; }; diff --git a/drivers/net/qede/base/ecore_hsi_init_func.h b/drivers/net/qede/base/ecore_hsi_init_func.h index 48b0048f..d77edaa1 100644 --- a/drivers/net/qede/base/ecore_hsi_init_func.h +++ b/drivers/net/qede/base/ecore_hsi_init_func.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_HSI_INIT_FUNC__ @@ -24,10 +22,10 @@ * BRB RAM init requirements */ struct init_brb_ram_req { - __le32 guranteed_per_tc /* guaranteed size per TC, in bytes */; - __le32 headroom_per_tc /* headroom size per TC, in bytes */; - __le32 min_pkt_size /* min packet size, in bytes */; - __le32 max_ports_per_engine /* min packet size, in bytes */; + u32 guranteed_per_tc /* guaranteed size per TC, in bytes */; + u32 headroom_per_tc /* headroom size per TC, in bytes */; + u32 min_pkt_size /* min packet size, in bytes */; + u32 max_ports_per_engine /* min packet size, in bytes */; u8 num_active_tcs[MAX_NUM_PORTS] /* number of active TCs per port */; }; @@ -44,15 +42,14 @@ struct init_ets_tc_req { * (indicated by the weight field) */ u8 use_wfq; -/* An arbitration weight. Valid only if use_wfq is set. */ - __le16 weight; + u16 weight /* An arbitration weight. Valid only if use_wfq is set. */; }; /* * ETS init requirements */ struct init_ets_req { - __le32 mtu /* Max packet size (in bytes) */; + u32 mtu /* Max packet size (in bytes) */; /* ETS initialization requirements per TC. */ struct init_ets_tc_req tc_req[NUM_OF_TCS]; }; @@ -64,12 +61,12 @@ struct init_ets_req { */ struct init_nig_lb_rl_req { /* Global MAC+LB RL rate (in Mbps). If set to 0, the RL will be disabled. */ - __le16 lb_mac_rate; + u16 lb_mac_rate; /* Global LB RL rate (in Mbps). If set to 0, the RL will be disabled. */ - __le16 lb_rate; - __le32 mtu /* Max packet size (in bytes) */; + u16 lb_rate; + u32 mtu /* Max packet size (in bytes) */; /* RL rate per physical TC (in Mbps). If set to 0, the RL will be disabled. */ - __le16 tc_rate[NUM_OF_PHYS_TCS]; + u16 tc_rate[NUM_OF_PHYS_TCS]; }; @@ -98,10 +95,10 @@ struct init_qm_port_params { /* Vector of valid bits for active TCs used by this port */ u8 active_phys_tcs; /* number of PBF command lines that can be used by this port */ - __le16 num_pbf_cmd_lines; + u16 num_pbf_cmd_lines; /* number of BTB blocks that can be used by this port */ - __le16 num_btb_blocks; - __le16 reserved; + u16 num_btb_blocks; + u16 reserved; }; @@ -114,6 +111,9 @@ struct init_qm_pq_params { u8 wrr_group /* WRR group */; /* Indicates if a rate limiter should be allocated for the PQ (0/1) */ u8 rl_valid; + u8 port_id /* Port ID */; + u8 reserved0; + u16 reserved1; }; @@ -124,13 +124,13 @@ struct init_qm_vport_params { /* rate limit in Mb/sec units. a value of 0 means dont configure. ignored if * VPORT RL is globally disabled. */ - __le32 vport_rl; + u32 vport_rl; /* WFQ weight. A value of 0 means dont configure. ignored if VPORT WFQ is * globally disabled. */ - __le16 vport_wfq; + u16 vport_wfq; /* the first Tx PQ ID associated with this VPORT for each TC. */ - __le16 first_tx_pq_id[NUM_OF_TCS]; + u16 first_tx_pq_id[NUM_OF_TCS]; }; #endif /* __ECORE_HSI_INIT_FUNC__ */ diff --git a/drivers/net/qede/base/ecore_hsi_init_tool.h b/drivers/net/qede/base/ecore_hsi_init_tool.h index 1f57e9b2..0e157f9b 100644 --- a/drivers/net/qede/base/ecore_hsi_init_tool.h +++ b/drivers/net/qede/base/ecore_hsi_init_tool.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_HSI_INIT_TOOL__ @@ -30,59 +28,13 @@ enum chip_ids { }; -struct fw_asserts_ram_section { -/* The offset of the section in the RAM in RAM lines (64-bit units) */ - __le16 section_ram_line_offset; -/* The size of the section in RAM lines (64-bit units) */ - __le16 section_ram_line_size; -/* The offset of the asserts list within the section in dwords */ - u8 list_dword_offset; -/* The size of an assert list element in dwords */ - u8 list_element_dword_size; - u8 list_num_elements /* The number of elements in the asserts list */; -/* The offset of the next list index field within the section in dwords */ - u8 list_next_index_dword_offset; -}; - - -struct fw_ver_num { - u8 major /* Firmware major version number */; - u8 minor /* Firmware minor version number */; - u8 rev /* Firmware revision version number */; -/* Firmware engineering version number (for bootleg versions) */ - u8 eng; -}; - -struct fw_ver_info { - __le16 tools_ver /* Tools version number */; - u8 image_id /* FW image ID (e.g. main, l2b, kuku) */; - u8 reserved1; - struct fw_ver_num num /* FW version number */; - __le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */; - __le32 reserved2; -}; - -struct fw_info { - struct fw_ver_info ver /* FW version information */; -/* Info regarding the FW asserts section in the Storm RAM */ - struct fw_asserts_ram_section fw_asserts_section; -}; - - -struct fw_info_location { -/* GRC address where the fw_info struct is located. */ - __le32 grc_addr; -/* Size of the fw_info structure (thats located at the grc_addr). */ - __le32 size; -}; - /* * Binary buffer header */ struct bin_buffer_hdr { /* buffer offset in bytes from the beginning of the binary file */ - __le32 offset; - __le32 length /* buffer length in bytes */; + u32 offset; + u32 length /* buffer length in bytes */; }; @@ -103,7 +55,7 @@ enum bin_init_buffer_type { * init array header: raw */ struct init_array_raw_hdr { - __le32 data; + u32 data; /* Init array type, from init_array_types enum */ #define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF #define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0 @@ -116,7 +68,7 @@ struct init_array_raw_hdr { * init array header: standard */ struct init_array_standard_hdr { - __le32 data; + u32 data; /* Init array type, from init_array_types enum */ #define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF #define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0 @@ -129,7 +81,7 @@ struct init_array_standard_hdr { * init array header: zipped */ struct init_array_zipped_hdr { - __le32 data; + u32 data; /* Init array type, from init_array_types enum */ #define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF #define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0 @@ -142,7 +94,7 @@ struct init_array_zipped_hdr { * init array header: pattern */ struct init_array_pattern_hdr { - __le32 data; + u32 data; /* Init array type, from init_array_types enum */ #define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF #define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0 @@ -223,14 +175,14 @@ enum init_array_types { * init operation: callback */ struct init_callback_op { - __le32 op_data; + u32 op_data; /* Init operation, from init_op_types enum */ #define INIT_CALLBACK_OP_OP_MASK 0xF #define INIT_CALLBACK_OP_OP_SHIFT 0 #define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF #define INIT_CALLBACK_OP_RESERVED_SHIFT 4 - __le16 callback_id /* Callback ID */; - __le16 block_id /* Blocks ID */; + u16 callback_id /* Callback ID */; + u16 block_id /* Blocks ID */; }; @@ -238,7 +190,7 @@ struct init_callback_op { * init operation: delay */ struct init_delay_op { - __le32 op_data; + u32 op_data; /* Init operation, from init_op_types enum */ #define INIT_DELAY_OP_OP_MASK 0xF #define INIT_DELAY_OP_OP_SHIFT 0 @@ -252,7 +204,7 @@ struct init_delay_op { * init operation: if_mode */ struct init_if_mode_op { - __le32 op_data; + u32 op_data; /* Init operation, from init_op_types enum */ #define INIT_IF_MODE_OP_OP_MASK 0xF #define INIT_IF_MODE_OP_OP_SHIFT 0 @@ -261,9 +213,8 @@ struct init_if_mode_op { /* Commands to skip if the modes dont match */ #define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF #define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16 - __le16 reserved2; -/* offset (in bytes) in modes expression buffer */ - __le16 modes_buf_offset; + u16 reserved2; + u16 modes_buf_offset /* offset (in bytes) in modes expression buffer */; }; @@ -271,7 +222,7 @@ struct init_if_mode_op { * init operation: if_phase */ struct init_if_phase_op { - __le32 op_data; + u32 op_data; /* Init operation, from init_op_types enum */ #define INIT_IF_PHASE_OP_OP_MASK 0xF #define INIT_IF_PHASE_OP_OP_SHIFT 0 @@ -283,7 +234,7 @@ struct init_if_phase_op { /* Commands to skip if the phases dont match */ #define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF #define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16 - __le32 phase_data; + u32 phase_data; #define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */ #define INIT_IF_PHASE_OP_PHASE_SHIFT 0 #define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF @@ -308,21 +259,21 @@ enum init_mode_ops { * init operation: raw */ struct init_raw_op { - __le32 op_data; + u32 op_data; /* Init operation, from init_op_types enum */ #define INIT_RAW_OP_OP_MASK 0xF #define INIT_RAW_OP_OP_SHIFT 0 #define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */ #define INIT_RAW_OP_PARAM1_SHIFT 4 - __le32 param2 /* Init param 2 */; + u32 param2 /* Init param 2 */; }; /* * init array params */ struct init_op_array_params { - __le16 size /* array size in dwords */; - __le16 offset /* array start offset in dwords */; + u16 size /* array size in dwords */; + u16 offset /* array start offset in dwords */; }; /* @@ -330,11 +281,11 @@ struct init_op_array_params { */ union init_write_args { /* value to write, used when init source is INIT_SRC_INLINE */ - __le32 inline_val; + u32 inline_val; /* number of zeros to write, used when init source is INIT_SRC_ZEROS */ - __le32 zeros_count; + u32 zeros_count; /* array offset to write, used when init source is INIT_SRC_ARRAY */ - __le32 array_offset; + u32 array_offset; /* runtime array params to write, used when init source is INIT_SRC_RUNTIME */ struct init_op_array_params runtime; }; @@ -343,7 +294,7 @@ union init_write_args { * init operation: write */ struct init_write_op { - __le32 data; + u32 data; /* init operation, from init_op_types enum */ #define INIT_WRITE_OP_OP_MASK 0xF #define INIT_WRITE_OP_OP_SHIFT 0 @@ -365,7 +316,7 @@ struct init_write_op { * init operation: read */ struct init_read_op { - __le32 op_data; + u32 op_data; /* init operation, from init_op_types enum */ #define INIT_READ_OP_OP_MASK 0xF #define INIT_READ_OP_OP_SHIFT 0 @@ -378,7 +329,7 @@ struct init_read_op { #define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF #define INIT_READ_OP_ADDRESS_SHIFT 9 /* expected polling value, used only when polling is done */ - __le32 expected_val; + u32 expected_val; }; /* @@ -444,11 +395,11 @@ enum init_source_types { * Internal RAM Offsets macro data */ struct iro { - __le32 base /* RAM field offset */; - __le16 m1 /* multiplier 1 */; - __le16 m2 /* multiplier 2 */; - __le16 m3 /* multiplier 3 */; - __le16 size /* RAM field size */; + u32 base /* RAM field offset */; + u16 m1 /* multiplier 1 */; + u16 m2 /* multiplier 2 */; + u16 m3 /* multiplier 3 */; + u16 size /* RAM field size */; }; #endif /* __ECORE_HSI_INIT_TOOL__ */ diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c index 84f273b0..51bba27e 100644 --- a/drivers/net/qede/base/ecore_hw.c +++ b/drivers/net/qede/base/ecore_hw.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -38,6 +36,12 @@ struct ecore_ptt_pool { struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM]; }; +void __ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn) +{ + OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool); + p_hwfn->p_ptt_pool = OSAL_NULL; +} + enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn) { struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev, @@ -65,10 +69,12 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn) p_hwfn->p_ptt_pool = p_pool; #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock); + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock)) { + __ecore_ptt_pool_free(p_hwfn); + return ECORE_NOMEM; + } #endif OSAL_SPIN_LOCK_INIT(&p_pool->lock); - return ECORE_SUCCESS; } @@ -89,7 +95,7 @@ void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn) if (p_hwfn->p_ptt_pool) OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock); #endif - OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool); + __ecore_ptt_pool_free(p_hwfn); } struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn) @@ -569,7 +575,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn) *p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32)); if (*p_comp == OSAL_NULL) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `p_completion_word'\n"); goto err; } @@ -578,7 +584,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn) *p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(struct dmae_cmd)); if (*p_cmd == OSAL_NULL) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct dmae_cmd'\n"); goto err; } @@ -587,12 +593,13 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn) *p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32) * DMAE_MAX_RW_SIZE); if (*p_buff == OSAL_NULL) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `intermediate_buffer'\n"); goto err; } - p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id; + p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id; + p_hwfn->dmae_info.b_mem_ready = true; return ECORE_SUCCESS; err: @@ -604,8 +611,9 @@ void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn) { dma_addr_t p_phys; - /* Just make sure no one is in the middle */ - OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock); + p_hwfn->dmae_info.b_mem_ready = false; + OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock); if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) { p_phys = p_hwfn->dmae_info.completion_word_phys_addr; @@ -630,8 +638,6 @@ void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn) p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE); p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL; } - - OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex); } static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn) @@ -777,6 +783,15 @@ ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn, enum _ecore_status_t ecore_status = ECORE_SUCCESS; u32 offset = 0; + if (!p_hwfn->dmae_info.b_mem_ready) { + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "No buffers allocated. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n", + (unsigned long)src_addr, src_type, + (unsigned long)dst_addr, dst_type, + size_in_dwords); + return ECORE_NOMEM; + } + if (p_hwfn->p_dev->recov_in_prog) { DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n", @@ -870,7 +885,7 @@ ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn, OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params)); params.flags = flags; - OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock); rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr, grc_addr_in_dw, @@ -878,7 +893,7 @@ ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn, ECORE_DMAE_ADDRESS_GRC, size_in_dwords, ¶ms); - OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock); return rc; } @@ -896,14 +911,14 @@ ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn, OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params)); params.flags = flags; - OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock); rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw, dest_addr, ECORE_DMAE_ADDRESS_GRC, ECORE_DMAE_ADDRESS_HOST_VIRT, size_in_dwords, ¶ms); - OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock); return rc; } @@ -917,7 +932,7 @@ ecore_dmae_host2host(struct ecore_hwfn *p_hwfn, { enum _ecore_status_t rc; - OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock); rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr, dest_addr, @@ -925,7 +940,7 @@ ecore_dmae_host2host(struct ecore_hwfn *p_hwfn, ECORE_DMAE_ADDRESS_HOST_PHYS, size_in_dwords, p_params); - OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock); return rc; } @@ -944,3 +959,74 @@ void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn, OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type); } + +enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + const char *phase) +{ + u32 size = OSAL_PAGE_SIZE / 2, val; + struct ecore_dmae_params params; + enum _ecore_status_t rc = ECORE_SUCCESS; + dma_addr_t p_phys; + void *p_virt; + u32 *p_tmp; + + p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, 2 * size); + if (!p_virt) { + DP_NOTICE(p_hwfn, false, + "DMAE sanity [%s]: failed to allocate memory\n", + phase); + return ECORE_NOMEM; + } + + /* Fill the bottom half of the allocated memory with a known pattern */ + for (p_tmp = (u32 *)p_virt; + p_tmp < (u32 *)((u8 *)p_virt + size); + p_tmp++) { + /* Save the address itself as the value */ + val = (u32)(osal_uintptr_t)p_tmp; + *p_tmp = val; + } + + /* Zero the top half of the allocated memory */ + OSAL_MEM_ZERO((u8 *)p_virt + size, size); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "DMAE sanity [%s]: src_addr={phys 0x%lx, virt %p}, dst_addr={phys 0x%lx, virt %p}, size 0x%x\n", + phase, (unsigned long)p_phys, p_virt, + (unsigned long)(p_phys + size), + (u8 *)p_virt + size, size); + + OSAL_MEMSET(¶ms, 0, sizeof(params)); + rc = ecore_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size, + size / 4 /* size_in_dwords */, ¶ms); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "DMAE sanity [%s]: ecore_dmae_host2host() failed. rc = %d.\n", + phase, rc); + goto out; + } + + /* Verify that the top half of the allocated memory has the pattern */ + for (p_tmp = (u32 *)((u8 *)p_virt + size); + p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); + p_tmp++) { + /* The corresponding address in the bottom half */ + val = (u32)(osal_uintptr_t)p_tmp - size; + + if (*p_tmp != val) { + DP_NOTICE(p_hwfn, false, + "DMAE sanity [%s]: addr={phys 0x%lx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n", + phase, + (unsigned long)p_phys + + ((u8 *)p_tmp - (u8 *)p_virt), + p_tmp, *p_tmp, val); + rc = ECORE_UNKNOWN_ERROR; + goto out; + } + } + +out: + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_virt, p_phys, 2 * size); + return rc; +} diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h index 0b9814f5..394207eb 100644 --- a/drivers/net/qede/base/ecore_hw.h +++ b/drivers/net/qede/base/ecore_hw.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_HW_H__ @@ -255,4 +253,8 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev, void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn, enum ecore_hw_err_type err_type); +enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + const char *phase); + #endif /* __ECORE_HW_H__ */ diff --git a/drivers/net/qede/base/ecore_hw_defs.h b/drivers/net/qede/base/ecore_hw_defs.h index 4456af43..b8c2686f 100644 --- a/drivers/net/qede/base/ecore_hw_defs.h +++ b/drivers/net/qede/base/ecore_hw_defs.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef _ECORE_IGU_DEF_H_ diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c index 1da80a65..b8496cb2 100644 --- a/drivers/net/qede/base/ecore_init_fw_funcs.c +++ b/drivers/net/qede/base/ecore_init_fw_funcs.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -76,12 +74,12 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) /* RL increment value - rate is specified in mbps. the factor of 1.01 was -* added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC -* 2544 test. In this scenario the PF RL was reducing the line rate to 99% -* although the credit increment value was the correct one and FW calculated -* correct packet sizes. The reason for the inaccuracy of the RL is unknown at -* this point. -*/ + * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC + * 2544 test. In this scenario the PF RL was reducing the line rate to 99% + * although the credit increment value was the correct one and FW calculated + * correct packet sizes. The reason for the inaccuracy of the RL is unknown at + * this point. + */ #define QM_RL_INC_VAL(rate) \ OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \ (8 * 100)), 1) @@ -182,7 +180,7 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | \ ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24)) #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \ - (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21768 + (pq_id) * 4) + (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4) /******************** INTERNAL IMPLEMENTATION *********************/ @@ -421,9 +419,9 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn, /* Prepare Tx PQ mapping runtime init values for the specified PF */ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, - u8 port_id, u8 pf_id, u8 max_phys_tcs_per_port, + bool is_pf_loading, u32 num_pf_cids, u32 num_vf_cids, u16 start_pq, @@ -437,7 +435,7 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, /* A bit per Tx PQ indicating if the PQ is associated with a VF */ u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE; - u16 num_pqs, first_pq_group, last_pq_group, i, pq_id, pq_group; + u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group; u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb; num_pqs = num_pf_pqs + num_vf_pqs; @@ -467,11 +465,11 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, bool is_vf_pq, rl_valid; u16 first_tx_pq_id; - ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id, + ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id, + pq_params[i].tc_id, max_phys_tcs_per_port); is_vf_pq = (i >= num_pf_pqs); - rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id < - max_qm_global_rls; + rl_valid = pq_params[i].rl_valid > 0; /* Update first Tx PQ of VPORT/TC */ vport_id_in_pf = pq_params[i].vport_id - start_vport; @@ -492,28 +490,38 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, } /* Check RL ID */ - if (pq_params[i].rl_valid && pq_params[i].vport_id >= - max_qm_global_rls) + if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) { DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter config\n"); + rl_valid = false; + } /* Prepare PQ map entry */ struct qm_rf_pq_map_e4 tx_pq_map; + QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group); - /* Set base address */ + /* Set PQ base address */ STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, mem_addr_4kb); + /* Clear PQ pointer table entry (64 bit) */ + if (is_pf_loading) + for (j = 0; j < 2; j++) + STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET + + (pq_id * 2) + j, 0); + /* Write PQ info to RAM */ if (WRITE_PQ_INFO_TO_RAM != 0) { u32 pq_info = 0; + pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id, - pq_params[i].tc_id, port_id, + pq_params[i].tc_id, + pq_params[i].port_id, rl_valid ? 1 : 0, rl_valid ? pq_params[i].vport_id : 0); ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id), @@ -540,12 +548,13 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, /* Prepare Other PQ mapping runtime init values for the specified PF */ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, + bool is_pf_loading, u32 num_pf_cids, u32 num_tids, u32 base_mem_addr_4kb) { u32 pq_size, pq_mem_4kb, mem_addr_4kb; - u16 i, pq_id, pq_group; + u16 i, j, pq_id, pq_group; /* A single other PQ group is used in each PF, where PQ group i is used * in PF i. @@ -563,11 +572,19 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn, STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, QM_PQ_SIZE_256B(pq_size)); - /* Set base address */ for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; i < QM_OTHER_PQS_PER_PF; i++, pq_id++) { + /* Set PQ base address */ STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, mem_addr_4kb); + + /* Clear PQ pointer table entry */ + if (is_pf_loading) + for (j = 0; j < 2; j++) + STORE_RT_REG(p_hwfn, + QM_REG_PTRTBLOTHER_RT_OFFSET + + (pq_id * 2) + j, 0); + mem_addr_4kb += pq_mem_4kb; } } @@ -576,7 +593,6 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn, * Return -1 on error. */ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn, - u8 port_id, u8 pf_id, u16 pf_wfq, u8 max_phys_tcs_per_port, @@ -595,7 +611,8 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn, } for (i = 0; i < num_tx_pqs; i++) { - ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id, + ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id, + pq_params[i].tc_id, max_phys_tcs_per_port); crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET : @@ -604,12 +621,12 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn, (pf_id % MAX_NUM_PFS_BB); OVERWRITE_RT_REG(p_hwfn, crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT); - STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id, - QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); - STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, - inc_val); } + STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + + pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val); + return 0; } @@ -820,9 +837,9 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn, int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, - u8 port_id, u8 pf_id, u8 max_phys_tcs_per_port, + bool is_pf_loading, u32 num_pf_cids, u32 num_vf_cids, u32 num_tids, @@ -850,20 +867,21 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn, /* Map Other PQs (if any) */ #if QM_OTHER_PQS_PER_PF > 0 - ecore_other_pq_map_rt_init(p_hwfn, pf_id, num_pf_cids, num_tids, 0); + ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids, + num_tids, 0); #endif /* Map Tx PQs */ - ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id, - max_phys_tcs_per_port, num_pf_cids, num_vf_cids, + ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port, + is_pf_loading, num_pf_cids, num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs, start_vport, other_mem_size_4kb, pq_params, vport_params); /* Init PF WFQ */ if (pf_wfq) - if (ecore_pf_wfq_rt_init - (p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port, - num_pf_pqs + num_vf_pqs, pq_params)) + if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq, + max_phys_tcs_per_port, + num_pf_pqs + num_vf_pqs, pq_params)) return -1; /* Init PF RL */ @@ -1419,7 +1437,9 @@ void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType) #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \ (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0)) -#define PRS_ETH_TUNN_FIC_FORMAT -188897008 +#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008 +#define PRS_ETH_OUTPUT_FORMAT -46832 + void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 dest_port) { @@ -1444,9 +1464,14 @@ void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn, PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT, vxlan_enable); ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); - if (reg_val) { - ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, - (u32)PRS_ETH_TUNN_FIC_FORMAT); + if (reg_val) { /* TODO: handle E5 init */ + reg_val = ecore_rd(p_hwfn, p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + + /* Update output only if tunnel blocks not included. */ + if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } /* Update NIG register */ @@ -1476,9 +1501,14 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT, ip_gre_enable); ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); - if (reg_val) { - ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, - (u32)PRS_ETH_TUNN_FIC_FORMAT); + if (reg_val) { /* TODO: handle E5 init */ + reg_val = ecore_rd(p_hwfn, p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + + /* Update output only if tunnel blocks not included. */ + if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } /* Update NIG register */ @@ -1526,9 +1556,14 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT, ip_geneve_enable); ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); - if (reg_val) { - ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, - (u32)PRS_ETH_TUNN_FIC_FORMAT); + if (reg_val) { /* TODO: handle E5 init */ + reg_val = ecore_rd(p_hwfn, p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + + /* Update output only if tunnel blocks not included. */ + if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } /* Update NIG register */ @@ -1548,6 +1583,36 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn, ip_geneve_enable ? 1 : 0); } +#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4 +#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512 + +void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool enable) +{ + u32 reg_val, cfg_mask; + + /* read PRS config register */ + reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO); + + /* set VXLAN_NO_L2_ENABLE mask */ + cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET); + + if (enable) { + /* set VXLAN_NO_L2_ENABLE flag */ + reg_val |= cfg_mask; + + /* update PRS FIC register */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT); + } else { + /* clear VXLAN_NO_L2_ENABLE flag */ + reg_val &= ~cfg_mask; + } + + /* write PRS config register */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val); +} #define T_ETH_PACKET_ACTION_GFT_EVENTID 23 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272 @@ -1664,6 +1729,10 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn, ram_line_lo = 0; ram_line_hi = 0; + /* Tunnel type */ + SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); + if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) { SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1); @@ -1675,9 +1744,14 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn, SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1); - } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) { + } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) { SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); + } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) { + SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); + } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) { + SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); } ecore_wr(p_hwfn, p_ptt, @@ -1921,3 +1995,53 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn, ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); } + +#define RSS_IND_TABLE_BASE_ADDR 4112 +#define RSS_IND_TABLE_VPORT_SIZE 16 +#define RSS_IND_TABLE_ENTRY_PER_LINE 8 + +/* Update RSS indirection table entry. */ +void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 rss_id, + u8 ind_table_index, + u16 ind_table_value) +{ + u32 cnt, rss_addr; + u32 *reg_val; + u16 rss_ind_entry[RSS_IND_TABLE_ENTRY_PER_LINE]; + u16 rss_ind_mask[RSS_IND_TABLE_ENTRY_PER_LINE]; + + /* get entry address */ + rss_addr = RSS_IND_TABLE_BASE_ADDR + + RSS_IND_TABLE_VPORT_SIZE * rss_id + + ind_table_index / RSS_IND_TABLE_ENTRY_PER_LINE; + + /* prepare update command */ + ind_table_index %= RSS_IND_TABLE_ENTRY_PER_LINE; + + for (cnt = 0; cnt < RSS_IND_TABLE_ENTRY_PER_LINE; cnt++) { + if (cnt == ind_table_index) { + rss_ind_entry[cnt] = ind_table_value; + rss_ind_mask[cnt] = 0xFFFF; + } else { + rss_ind_entry[cnt] = 0; + rss_ind_mask[cnt] = 0; + } + } + + /* Update entry in HW*/ + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr); + + reg_val = (u32 *)rss_ind_mask; + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK, reg_val[0]); + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 4, reg_val[1]); + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 8, reg_val[2]); + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 12, reg_val[3]); + + reg_val = (u32 *)rss_ind_entry; + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA, reg_val[0]); + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 4, reg_val[1]); + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 8, reg_val[2]); + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 12, reg_val[3]); +} diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h index ab560e59..1024bb26 100644 --- a/drivers/net/qede/base/ecore_init_fw_funcs.h +++ b/drivers/net/qede/base/ecore_init_fw_funcs.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef _INIT_FW_FUNCS_H @@ -61,9 +59,10 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn, * * @param p_hwfn * @param p_ptt - ptt window used for writing the registers - * @param port_id - port ID * @param pf_id - PF ID * @param max_phys_tcs_per_port - max number of physical TCs per port in HW + * @param is_pf_loading - indicates if the PF is currently loading, + * i.e. it has no allocated QM resources. * @param num_pf_cids - number of connections used by this PF * @param num_vf_cids - number of connections used by VFs of this PF * @param num_tids - number of tasks used by this PF @@ -87,9 +86,9 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn, */ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, - u8 port_id, u8 pf_id, u8 max_phys_tcs_per_port, + bool is_pf_loading, u32 num_pf_cids, u32 num_vf_cids, u32 num_tids, @@ -259,6 +258,16 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn, struct init_brb_ram_req *req); #endif /* UNUSED_HSI_FUNC */ +/** + * @brief ecore_set_vxlan_no_l2_enable - enable or disable VXLAN no L2 parsing + * + * @param p_ptt - ptt window used for writing the registers. + * @param enable - VXLAN no L2 enable flag. + */ +void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool enable); + #ifndef UNUSED_HSI_FUNC /** * @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to @@ -462,4 +471,22 @@ void ecore_memset_session_ctx(void *p_ctx_mem, void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); + +/** + * @brief ecore_update_eth_rss_ind_table_entry - Update RSS indirection table + * entry. + * The function must run in exclusive mode to prevent wrong RSS configuration. + * + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers. + * @param rss_id - RSS engine ID. + * @param ind_table_index - RSS indirect table index. + * @param ind_table_value - RSS indirect table new value. + */ +void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 rss_id, + u8 ind_table_index, + u16 ind_table_value); + #endif diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c index 91633c11..b7636f36 100644 --- a/drivers/net/qede/base/ecore_init_ops.c +++ b/drivers/net/qede/base/ecore_init_ops.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ /* include the precompiled configuration values - only once */ @@ -389,23 +387,29 @@ static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn, } if (i == ECORE_INIT_MAX_POLL_COUNT) - DP_ERR(p_hwfn, - "Timeout when polling reg: 0x%08x [ Waiting-for: %08x" - " Got: %08x (comparsion %08x)]\n", + DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n", addr, OSAL_LE32_TO_CPU(cmd->expected_val), val, OSAL_LE32_TO_CPU(cmd->op_data)); } -/* init_ops callbacks entry point. - * OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings. - * Should be removed when the function is actually used. - */ -static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn, - struct ecore_ptt OSAL_UNUSED * p_ptt, - struct init_callback_op OSAL_UNUSED * p_cmd) +/* init_ops callbacks entry point */ +static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_callback_op *p_cmd) { - DP_NOTICE(p_hwfn, true, - "Currently init values have no need of callbacks\n"); + enum _ecore_status_t rc; + + switch (p_cmd->callback_id) { + case DMAE_READY_CB: + rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase"); + break; + default: + DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n", + p_cmd->callback_id); + return ECORE_INVAL; + } + + return rc; } static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn, @@ -513,7 +517,7 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn, break; case INIT_OP_CALLBACK: - ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); + rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); break; } diff --git a/drivers/net/qede/base/ecore_init_ops.h b/drivers/net/qede/base/ecore_init_ops.h index e293a4a3..de7846d4 100644 --- a/drivers/net/qede/base/ecore_init_ops.h +++ b/drivers/net/qede/base/ecore_init_ops.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_INIT_OPS__ diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c index e6cef85b..4c271d35 100644 --- a/drivers/net/qede/base/ecore_int.c +++ b/drivers/net/qede/base/ecore_int.c @@ -1,11 +1,11 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ +#include + #include "bcm_osal.h" #include "ecore.h" #include "ecore_spq.h" @@ -231,15 +231,19 @@ static const char *grc_timeout_attn_master_to_str(u8 master) static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn) { + enum _ecore_status_t rc = ECORE_SUCCESS; u32 tmp, tmp2; /* We've already cleared the timeout interrupt register, so we learn - * of interrupts via the validity register + * of interrupts via the validity register. + * Any attention which is not for a timeout event is treated as fatal. */ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); - if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) + if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) { + rc = ECORE_INVAL; goto out; + } /* Read the GRC timeout information */ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, @@ -263,11 +267,11 @@ static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn) (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >> ECORE_GRC_ATTENTION_VF_SHIFT); -out: - /* Regardles of anything else, clean the validity bit */ + /* Clean the validity bit */ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); - return ECORE_SUCCESS; +out: + return rc; } #define ECORE_PGLUE_ATTENTION_VALID (1 << 29) @@ -285,9 +289,11 @@ out: #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23) enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, - struct ecore_ptt *p_ptt) + struct ecore_ptt *p_ptt, + bool is_hw_init) { u32 tmp; + char str[512] = {0}; tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); if (tmp & ECORE_PGLUE_ATTENTION_VALID) { @@ -299,9 +305,8 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, PGLUE_B_REG_TX_ERR_WR_ADD_63_32); details = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS); - - DP_NOTICE(p_hwfn, false, - "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", + OSAL_SNPRINTF(str, 512, + "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", addr_hi, addr_lo, details, (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> @@ -318,6 +323,10 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, 1 : 0), (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0)); + if (is_hw_init) + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "%s", str); + else + DP_NOTICE(p_hwfn, false, "%s", str); } tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); @@ -393,7 +402,7 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn) { - return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); + return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false); } static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn) @@ -1104,9 +1113,9 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, p_aeu->bit_name, num); else - OSAL_STRNCPY(bit_name, - p_aeu->bit_name, - 30); + strlcpy(bit_name, + p_aeu->bit_name, + sizeof(bit_name)); /* We now need to pass bitmask in its * correct position. @@ -1406,8 +1415,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, /* SB struct */ p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb)); if (!p_sb) { - DP_NOTICE(p_dev, true, - "Failed to allocate `struct ecore_sb_attn_info'\n"); + DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n"); return ECORE_NOMEM; } @@ -1415,8 +1423,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, SB_ATTN_ALIGNED_SIZE(p_hwfn)); if (!p_virt) { - DP_NOTICE(p_dev, true, - "Failed to allocate status block (attentions)\n"); + DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n"); OSAL_FREE(p_dev, p_sb); return ECORE_NOMEM; } @@ -1795,8 +1802,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sb)); if (!p_sb) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate `struct ecore_sb_info'\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n"); return ECORE_NOMEM; } @@ -1804,7 +1810,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, SB_ALIGNED_SIZE(p_hwfn)); if (!p_virt) { - DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n"); OSAL_FREE(p_hwfn->p_dev, p_sb); return ECORE_NOMEM; } diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h index 563051c3..041240d7 100644 --- a/drivers/net/qede/base/ecore_int.h +++ b/drivers/net/qede/base/ecore_int.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_INT_H__ @@ -256,6 +254,7 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn, #endif enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, - struct ecore_ptt *p_ptt); + struct ecore_ptt *p_ptt, + bool is_hw_init); #endif /* __ECORE_INT_H__ */ diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h index 24cdf5ed..aeaf469e 100644 --- a/drivers/net/qede/base/ecore_int_api.h +++ b/drivers/net/qede/base/ecore_int_api.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_INT_API_H__ diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h index 218ef50b..29001d71 100644 --- a/drivers/net/qede/base/ecore_iov_api.h +++ b/drivers/net/qede/base/ecore_iov_api.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_SRIOV_API_H__ @@ -539,6 +537,17 @@ bool ecore_iov_is_valid_vfpf_msg_length(u32 length); */ u32 ecore_iov_pfvf_msg_length(void); +/** + * @brief Returns MAC address if one is configured + * + * @parm p_hwfn + * @parm rel_vf_id + * + * @return OSAL_NULL if mac isn't set; Otherwise, returns MAC. + */ +u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); + /** * @brief Returns forced MAC address if one is configured * diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h index 360d7f88..05693029 100644 --- a/drivers/net/qede/base/ecore_iro.h +++ b/drivers/net/qede/base/ecore_iro.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __IRO_H__ diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h index 41532eeb..685fa2e8 100644 --- a/drivers/net/qede/base/ecore_iro_values.h +++ b/drivers/net/qede/base/ecore_iro_values.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __IRO_VALUES_H__ @@ -13,9 +11,9 @@ static const struct iro iro_arr[51] = { /* YSTORM_FLOW_CONTROL_MODE_OFFSET */ { 0x0, 0x0, 0x0, 0x0, 0x8}, /* TSTORM_PORT_STAT_OFFSET(port_id) */ - { 0x4cb0, 0x80, 0x0, 0x0, 0x80}, + { 0x4cb8, 0x88, 0x0, 0x0, 0x88}, /* TSTORM_LL2_PORT_STAT_OFFSET(port_id) */ - { 0x6508, 0x20, 0x0, 0x0, 0x20}, + { 0x6530, 0x20, 0x0, 0x0, 0x20}, /* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) */ { 0xb00, 0x8, 0x0, 0x0, 0x4}, /* USTORM_FLR_FINAL_ACK_OFFSET(pf_id) */ @@ -27,49 +25,49 @@ static const struct iro iro_arr[51] = { /* USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) */ { 0x84, 0x8, 0x0, 0x0, 0x2}, /* XSTORM_INTEG_TEST_DATA_OFFSET */ - { 0x4c40, 0x0, 0x0, 0x0, 0x78}, + { 0x4c48, 0x0, 0x0, 0x0, 0x78}, /* YSTORM_INTEG_TEST_DATA_OFFSET */ - { 0x3e10, 0x0, 0x0, 0x0, 0x78}, + { 0x3e38, 0x0, 0x0, 0x0, 0x78}, /* PSTORM_INTEG_TEST_DATA_OFFSET */ - { 0x2b50, 0x0, 0x0, 0x0, 0x78}, + { 0x2b78, 0x0, 0x0, 0x0, 0x78}, /* TSTORM_INTEG_TEST_DATA_OFFSET */ - { 0x4c38, 0x0, 0x0, 0x0, 0x78}, + { 0x4c40, 0x0, 0x0, 0x0, 0x78}, /* MSTORM_INTEG_TEST_DATA_OFFSET */ - { 0x4990, 0x0, 0x0, 0x0, 0x78}, + { 0x4998, 0x0, 0x0, 0x0, 0x78}, /* USTORM_INTEG_TEST_DATA_OFFSET */ - { 0x7f48, 0x0, 0x0, 0x0, 0x78}, + { 0x7f50, 0x0, 0x0, 0x0, 0x78}, /* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */ { 0xa28, 0x8, 0x0, 0x0, 0x8}, /* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */ - { 0x61e8, 0x10, 0x0, 0x0, 0x10}, + { 0x6210, 0x10, 0x0, 0x0, 0x10}, /* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */ { 0xb820, 0x30, 0x0, 0x0, 0x30}, /* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */ - { 0x96b8, 0x30, 0x0, 0x0, 0x30}, + { 0x96c0, 0x30, 0x0, 0x0, 0x30}, /* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */ - { 0x4b60, 0x80, 0x0, 0x0, 0x40}, + { 0x4b68, 0x80, 0x0, 0x0, 0x40}, /* MSTORM_ETH_PF_PRODS_OFFSET(queue_id) */ { 0x1f8, 0x4, 0x0, 0x0, 0x4}, /* MSTORM_ETH_VF_PRODS_OFFSET(vf_id,vf_queue_id) */ - { 0x53a0, 0x80, 0x4, 0x0, 0x4}, + { 0x53a8, 0x80, 0x4, 0x0, 0x4}, /* MSTORM_TPA_TIMEOUT_US_OFFSET */ - { 0xc7c8, 0x0, 0x0, 0x0, 0x4}, + { 0xc7d0, 0x0, 0x0, 0x0, 0x4}, /* MSTORM_ETH_PF_STAT_OFFSET(pf_id) */ - { 0x4ba0, 0x80, 0x0, 0x0, 0x20}, + { 0x4ba8, 0x80, 0x0, 0x0, 0x20}, /* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */ - { 0x8150, 0x40, 0x0, 0x0, 0x30}, + { 0x8158, 0x40, 0x0, 0x0, 0x30}, /* USTORM_ETH_PF_STAT_OFFSET(pf_id) */ { 0xe770, 0x60, 0x0, 0x0, 0x60}, /* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */ - { 0x2ce8, 0x80, 0x0, 0x0, 0x38}, + { 0x2d10, 0x80, 0x0, 0x0, 0x38}, /* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */ - { 0xf2b0, 0x78, 0x0, 0x0, 0x78}, + { 0xf2b8, 0x78, 0x0, 0x0, 0x78}, /* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */ { 0x1f8, 0x4, 0x0, 0x0, 0x4}, /* TSTORM_ETH_PRS_INPUT_OFFSET */ - { 0xaef8, 0x0, 0x0, 0x0, 0xf0}, + { 0xaf20, 0x0, 0x0, 0x0, 0xf0}, /* ETH_RX_RATE_LIMIT_OFFSET(pf_id) */ - { 0xafe8, 0x8, 0x0, 0x0, 0x8}, + { 0xb010, 0x8, 0x0, 0x0, 0x8}, /* XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) */ { 0x1f8, 0x8, 0x0, 0x0, 0x8}, /* YSTORM_TOE_CQ_PROD_OFFSET(rss_id) */ @@ -81,37 +79,37 @@ static const struct iro iro_arr[51] = { /* TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) */ { 0x0, 0x8, 0x0, 0x0, 0x8}, /* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */ - { 0x200, 0x18, 0x8, 0x0, 0x8}, + { 0x400, 0x18, 0x8, 0x0, 0x8}, /* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */ { 0xb78, 0x18, 0x8, 0x0, 0x2}, /* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */ - { 0xd878, 0x50, 0x0, 0x0, 0x3c}, + { 0xd898, 0x50, 0x0, 0x0, 0x3c}, /* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */ { 0x12908, 0x18, 0x0, 0x0, 0x10}, /* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */ { 0x11aa8, 0x40, 0x0, 0x0, 0x18}, /* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */ - { 0xa580, 0x50, 0x0, 0x0, 0x20}, + { 0xa588, 0x50, 0x0, 0x0, 0x20}, /* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */ - { 0x86f8, 0x40, 0x0, 0x0, 0x28}, + { 0x8700, 0x40, 0x0, 0x0, 0x28}, /* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */ - { 0x102f8, 0x18, 0x0, 0x0, 0x10}, + { 0x10300, 0x18, 0x0, 0x0, 0x10}, /* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */ - { 0xde28, 0x48, 0x0, 0x0, 0x38}, + { 0xde48, 0x48, 0x0, 0x0, 0x38}, /* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */ - { 0x10760, 0x20, 0x0, 0x0, 0x20}, + { 0x10768, 0x20, 0x0, 0x0, 0x20}, /* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */ - { 0x2d20, 0x80, 0x0, 0x0, 0x10}, + { 0x2d48, 0x80, 0x0, 0x0, 0x10}, /* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */ - { 0x5020, 0x10, 0x0, 0x0, 0x10}, + { 0x5048, 0x10, 0x0, 0x0, 0x10}, /* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */ - { 0xc9b0, 0x30, 0x0, 0x0, 0x10}, + { 0xc9b8, 0x30, 0x0, 0x0, 0x10}, /* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */ - { 0xeec0, 0x10, 0x0, 0x0, 0x10}, + { 0xed90, 0x10, 0x0, 0x0, 0x10}, /* YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) */ - { 0xa398, 0x10, 0x0, 0x0, 0x10}, + { 0xa520, 0x10, 0x0, 0x0, 0x10}, /* PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) */ - { 0x13100, 0x8, 0x0, 0x0, 0x8}, + { 0x13108, 0x8, 0x0, 0x0, 0x8}, }; #endif /* __IRO_VALUES_H__ */ diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c index e3afc8a3..d71f4616 100644 --- a/drivers/net/qede/base/ecore_l2.c +++ b/drivers/net/qede/base/ecore_l2.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -77,7 +75,8 @@ enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn) } #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock); + if (OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock)) + return ECORE_NOMEM; #endif return ECORE_SUCCESS; @@ -110,6 +109,7 @@ void ecore_l2_free(struct ecore_hwfn *p_hwfn) break; OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage[i]); + p_hwfn->p_l2_info->pp_qid_usage[i] = OSAL_NULL; } #ifdef CONFIG_ECORE_LOCK_ALLOC @@ -119,6 +119,7 @@ void ecore_l2_free(struct ecore_hwfn *p_hwfn) #endif OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage); + p_hwfn->p_l2_info->pp_qid_usage = OSAL_NULL; out_l2_info: OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info); @@ -687,7 +688,7 @@ ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod, p_ramrod->common.update_approx_mcast_flg = 1; for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { - u32 *p_bins = (u32 *)p_params->bins; + u32 *p_bins = p_params->bins; p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]); } @@ -1185,11 +1186,20 @@ ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM * *pp_doorbell) { enum _ecore_status_t rc; + u16 pq_id; + + /* TODO - set tc in the pq_params for multi-cos. + * If pacing is enabled then select queue according to + * rate limiter availability otherwise select queue based + * on multi cos. + */ + if (IS_ECORE_PACING(p_hwfn)) + pq_id = ecore_get_cm_pq_idx_rl(p_hwfn, p_cid->rel.queue_id); + else + pq_id = ecore_get_cm_pq_idx_mcos(p_hwfn, tc); - /* TODO - set tc in the pq_params for multi-cos */ - rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, - pbl_addr, pbl_size, - ecore_get_cm_pq_idx_mcos(p_hwfn, tc)); + rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, pbl_addr, + pbl_size, pq_id); if (rc != ECORE_SUCCESS) return rc; @@ -1556,8 +1566,8 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn, enum spq_mode comp_mode, struct ecore_spq_comp_cb *p_comp_data) { - unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; struct vport_update_ramrod_data *p_ramrod = OSAL_NULL; + u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; struct ecore_spq_entry *p_ent = OSAL_NULL; struct ecore_sp_init_data init_data; u8 abs_vport_id = 0; @@ -1596,8 +1606,7 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn, /* explicitly clear out the entire vector */ OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0, sizeof(p_ramrod->approx_mcast.bins)); - OSAL_MEMSET(bins, 0, sizeof(unsigned long) * - ETH_MULTICAST_MAC_BINS_IN_REGS); + OSAL_MEMSET(bins, 0, sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); /* filter ADD op is explicit set op and it removes * any existing filters for the vport. */ @@ -1606,16 +1615,15 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn, u32 bit; bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); - OSAL_SET_BIT(bit, bins); + bins[bit / 32] |= 1 << (bit % 32); } /* Convert to correct endianity */ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { struct vport_update_ramrod_mcast *p_ramrod_bins; - u32 *p_bins = (u32 *)bins; p_ramrod_bins = &p_ramrod->approx_mcast; - p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]); + p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(bins[i]); } } @@ -1945,6 +1953,11 @@ static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn, p_ah->tx_1519_to_max_byte_packets = port_stats.eth.u1.ah1.t1519_to_max; } + + p_common->link_change_count = ecore_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, + link_change_count)); } void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn, @@ -2061,11 +2074,14 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev) /* PORT statistics are not necessarily reset, so we need to * read and create a baseline for future statistics. + * Link change stat is maintained by MFW, return its value as is. */ if (!p_dev->reset_stats) DP_INFO(p_dev, "Reset stats not allocated\n"); - else + else { _ecore_get_vport_stats(p_dev, p_dev->reset_stats); + p_dev->reset_stats->common.link_change_count = 0; + } } void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn, @@ -2150,7 +2166,7 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn, p_ramrod->flow_id_valid = 0; p_ramrod->flow_id = 0; - p_ramrod->vport_id = abs_vport_id; + p_ramrod->vport_id = OSAL_CPU_TO_LE16((u16)abs_vport_id); p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER; @@ -2267,3 +2283,22 @@ out: return rc; } + +enum _ecore_status_t +ecore_eth_tx_queue_maxrate(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_queue_cid *p_cid, u32 rate) +{ + struct ecore_mcp_link_state *p_link; + u8 vport; + + vport = (u8)ecore_get_qm_vport_idx_rl(p_hwfn, p_cid->rel.queue_id); + p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output; + + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "About to rate limit qm vport %d for queue %d with rate %d\n", + vport, p_cid->rel.queue_id, rate); + + return ecore_init_vport_rl(p_hwfn, p_ptt, vport, rate, + p_link->speed); +} diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h index f4212cf2..8fa40302 100644 --- a/drivers/net/qede/base/ecore_l2.h +++ b/drivers/net/qede/base/ecore_l2.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_L2_H__ diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h index ed9837bf..575b9e3a 100644 --- a/drivers/net/qede/base/ecore_l2_api.h +++ b/drivers/net/qede/base/ecore_l2_api.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_L2_API_H__ @@ -332,7 +330,7 @@ struct ecore_sp_vport_update_params { u8 anti_spoofing_en; u8 update_accept_any_vlan_flg; u8 accept_any_vlan; - unsigned long bins[8]; + u32 bins[8]; struct ecore_rss_params *rss_params; struct ecore_filter_accept_flags accept_flags; struct ecore_sge_tpa_params *sge_tpa_params; diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c index 8edd2e96..ea14c172 100644 --- a/drivers/net/qede/base/ecore_mcp.c +++ b/drivers/net/qede/base/ecore_mcp.c @@ -1,14 +1,13 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" #include "ecore.h" #include "ecore_status.h" +#include "nvm_cfg.h" #include "ecore_mcp.h" #include "mcp_public.h" #include "reg_addr.h" @@ -240,15 +239,24 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, /* Allocate mcp_info structure */ p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, - sizeof(*p_hwfn->mcp_info)); - if (!p_hwfn->mcp_info) - goto err; + sizeof(*p_hwfn->mcp_info)); + if (!p_hwfn->mcp_info) { + DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n"); + return ECORE_NOMEM; + } p_info = p_hwfn->mcp_info; /* Initialize the MFW spinlocks */ #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock); - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock); + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) { + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); + return ECORE_NOMEM; + } + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) { + OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); + return ECORE_NOMEM; + } #endif OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock); OSAL_SPIN_LOCK_INIT(&p_info->link_lock); @@ -272,7 +280,7 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, return ECORE_SUCCESS; err: - DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n"); ecore_mcp_free(p_hwfn); return ECORE_NOMEM; } @@ -593,7 +601,7 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, /* MCP not initialized */ if (!ecore_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); + DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n"); return ECORE_BUSY; } @@ -2121,19 +2129,20 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *p_media_type) { + enum _ecore_status_t rc = ECORE_SUCCESS; /* TODO - Add support for VFs */ if (IS_VF(p_hwfn->p_dev)) return ECORE_INVAL; if (!ecore_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); + DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); return ECORE_BUSY; } if (!p_ptt) { *p_media_type = MEDIA_UNSPECIFIED; - return ECORE_INVAL; + rc = ECORE_INVAL; } else { *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + @@ -2144,6 +2153,197 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, return ECORE_SUCCESS; } +enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_tranceiver_type) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* TODO - Add support for VFs */ + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + if (!ecore_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); + return ECORE_BUSY; + } + if (!p_ptt) { + *p_tranceiver_type = ETH_TRANSCEIVER_TYPE_NONE; + rc = ECORE_INVAL; + } else { + *p_tranceiver_type = ecore_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + transceiver_data)); + } + + return rc; +} + +static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type) +{ + if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) && + ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) && + (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE)) + return 1; + + return 0; +} + +enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_speed_mask) +{ + u32 transceiver_data, transceiver_type, transceiver_state; + + ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_data); + + transceiver_state = GET_MFW_FIELD(transceiver_data, + ETH_TRANSCEIVER_STATE); + + transceiver_type = GET_MFW_FIELD(transceiver_data, + ETH_TRANSCEIVER_TYPE); + + if (is_transceiver_ready(transceiver_state, transceiver_type) == 0) + return ECORE_INVAL; + + switch (transceiver_type) { + case ETH_TRANSCEIVER_TYPE_1G_LX: + case ETH_TRANSCEIVER_TYPE_1G_SX: + case ETH_TRANSCEIVER_TYPE_1G_PCC: + case ETH_TRANSCEIVER_TYPE_1G_ACC: + case ETH_TRANSCEIVER_TYPE_1000BASET: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_10G_SR: + case ETH_TRANSCEIVER_TYPE_10G_LR: + case ETH_TRANSCEIVER_TYPE_10G_LRM: + case ETH_TRANSCEIVER_TYPE_10G_ER: + case ETH_TRANSCEIVER_TYPE_10G_PCC: + case ETH_TRANSCEIVER_TYPE_10G_ACC: + case ETH_TRANSCEIVER_TYPE_4x10G: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + + case ETH_TRANSCEIVER_TYPE_40G_LR4: + case ETH_TRANSCEIVER_TYPE_40G_SR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + + case ETH_TRANSCEIVER_TYPE_100G_AOC: + case ETH_TRANSCEIVER_TYPE_100G_SR4: + case ETH_TRANSCEIVER_TYPE_100G_LR4: + case ETH_TRANSCEIVER_TYPE_100G_ER4: + case ETH_TRANSCEIVER_TYPE_100G_ACC: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + break; + + case ETH_TRANSCEIVER_TYPE_25G_SR: + case ETH_TRANSCEIVER_TYPE_25G_LR: + case ETH_TRANSCEIVER_TYPE_25G_AOC: + case ETH_TRANSCEIVER_TYPE_25G_ACC_S: + case ETH_TRANSCEIVER_TYPE_25G_ACC_M: + case ETH_TRANSCEIVER_TYPE_25G_ACC_L: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + break; + + case ETH_TRANSCEIVER_TYPE_25G_CA_N: + case ETH_TRANSCEIVER_TYPE_25G_CA_S: + case ETH_TRANSCEIVER_TYPE_25G_CA_L: + case ETH_TRANSCEIVER_TYPE_4x25G_CR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_40G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_100G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + + case ETH_TRANSCEIVER_TYPE_XLPPI: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; + break; + + case ETH_TRANSCEIVER_TYPE_10G_BASET: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + default: + DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n", + transceiver_type); + *p_speed_mask = 0xff; + break; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_board_config) +{ + u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* TODO - Add support for VFs */ + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + if (!ecore_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); + return ECORE_BUSY; + } + if (!p_ptt) { + *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; + rc = ECORE_INVAL; + } else { + nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, + MISC_REG_GEN_PURP_CR0); + nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, + nvm_cfg_addr + 4); + port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); + *p_board_config = ecore_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, + board_cfg)); + } + + return rc; +} + /* @DPDK */ /* Old MFW has a global configuration for all PFs regarding RDMA support */ static void diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h index 6afaf7de..8e125310 100644 --- a/drivers/net/qede/base/ecore_mcp.h +++ b/drivers/net/qede/base/ecore_mcp.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_MCP_H__ diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h index 225890e2..cfb9f99d 100644 --- a/drivers/net/qede/base/ecore_mcp_api.h +++ b/drivers/net/qede/base/ecore_mcp_api.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_MCP_API_H__ @@ -594,6 +592,52 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *media_type); +/** + * @brief Get transceiver data of the port. + * + * @param p_dev - ecore dev pointer + * @param p_ptt + * @param p_transceiver_type - media type value + * + * @return enum _ecore_status_t - + * ECORE_SUCCESS - Operation was successful. + * ECORE_BUSY - Operation failed + */ +enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_tranceiver_type); + +/** + * @brief Get transceiver supported speed mask. + * + * @param p_dev - ecore dev pointer + * @param p_ptt + * @param p_speed_mask - Bit mask of all supported speeds. + * + * @return enum _ecore_status_t - + * ECORE_SUCCESS - Operation was successful. + * ECORE_BUSY - Operation failed + */ + +enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_speed_mask); + +/** + * @brief Get board configuration. + * + * @param p_dev - ecore dev pointer + * @param p_ptt + * @param p_board_config - Board config. + * + * @return enum _ecore_status_t - + * ECORE_SUCCESS - Operation was successful. + * ECORE_BUSY - Operation failed + */ +enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_board_config); + /** * @brief - Sends a command to the MCP mailbox. * diff --git a/drivers/net/qede/base/ecore_mng_tlv.c b/drivers/net/qede/base/ecore_mng_tlv.c index 3a1de094..f7666472 100644 --- a/drivers/net/qede/base/ecore_mng_tlv.c +++ b/drivers/net/qede/base/ecore_mng_tlv.c @@ -1,3 +1,9 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + #include "bcm_osal.h" #include "ecore.h" #include "ecore_status.h" diff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h index abca7408..f91b25e2 100644 --- a/drivers/net/qede/base/ecore_proto_if.h +++ b/drivers/net/qede/base/ecore_proto_if.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_PROTO_IF_H__ @@ -31,6 +29,9 @@ struct ecore_eth_pf_params { * This will set the maximal number of configured steering-filters. */ u32 num_arfs_filters; + + /* To allow VF to change its MAC despite of PF set forced MAC. */ + bool allow_vf_mac_change; }; /* Most of the parameters below are described in the FW iSCSI / TCP HSI */ diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h index 1d085815..721b8c15 100644 --- a/drivers/net/qede/base/ecore_rt_defs.h +++ b/drivers/net/qede/base/ecore_rt_defs.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __RT_DEFS_H__ @@ -205,329 +203,336 @@ #define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 34082 #define QM_REG_BASEADDROTHERPQ_RT_OFFSET 34083 #define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 -#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34211 -#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34212 -#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34213 -#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34214 -#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34215 -#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34216 -#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34217 -#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34218 -#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34219 -#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34220 -#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34221 -#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34222 -#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34223 -#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34224 -#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34225 -#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34226 -#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34227 -#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34228 -#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34229 -#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34230 -#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34231 -#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34232 -#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34233 -#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34234 -#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34235 -#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34236 -#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34237 -#define QM_REG_PQTX2PF_0_RT_OFFSET 34238 -#define QM_REG_PQTX2PF_1_RT_OFFSET 34239 -#define QM_REG_PQTX2PF_2_RT_OFFSET 34240 -#define QM_REG_PQTX2PF_3_RT_OFFSET 34241 -#define QM_REG_PQTX2PF_4_RT_OFFSET 34242 -#define QM_REG_PQTX2PF_5_RT_OFFSET 34243 -#define QM_REG_PQTX2PF_6_RT_OFFSET 34244 -#define QM_REG_PQTX2PF_7_RT_OFFSET 34245 -#define QM_REG_PQTX2PF_8_RT_OFFSET 34246 -#define QM_REG_PQTX2PF_9_RT_OFFSET 34247 -#define QM_REG_PQTX2PF_10_RT_OFFSET 34248 -#define QM_REG_PQTX2PF_11_RT_OFFSET 34249 -#define QM_REG_PQTX2PF_12_RT_OFFSET 34250 -#define QM_REG_PQTX2PF_13_RT_OFFSET 34251 -#define QM_REG_PQTX2PF_14_RT_OFFSET 34252 -#define QM_REG_PQTX2PF_15_RT_OFFSET 34253 -#define QM_REG_PQTX2PF_16_RT_OFFSET 34254 -#define QM_REG_PQTX2PF_17_RT_OFFSET 34255 -#define QM_REG_PQTX2PF_18_RT_OFFSET 34256 -#define QM_REG_PQTX2PF_19_RT_OFFSET 34257 -#define QM_REG_PQTX2PF_20_RT_OFFSET 34258 -#define QM_REG_PQTX2PF_21_RT_OFFSET 34259 -#define QM_REG_PQTX2PF_22_RT_OFFSET 34260 -#define QM_REG_PQTX2PF_23_RT_OFFSET 34261 -#define QM_REG_PQTX2PF_24_RT_OFFSET 34262 -#define QM_REG_PQTX2PF_25_RT_OFFSET 34263 -#define QM_REG_PQTX2PF_26_RT_OFFSET 34264 -#define QM_REG_PQTX2PF_27_RT_OFFSET 34265 -#define QM_REG_PQTX2PF_28_RT_OFFSET 34266 -#define QM_REG_PQTX2PF_29_RT_OFFSET 34267 -#define QM_REG_PQTX2PF_30_RT_OFFSET 34268 -#define QM_REG_PQTX2PF_31_RT_OFFSET 34269 -#define QM_REG_PQTX2PF_32_RT_OFFSET 34270 -#define QM_REG_PQTX2PF_33_RT_OFFSET 34271 -#define QM_REG_PQTX2PF_34_RT_OFFSET 34272 -#define QM_REG_PQTX2PF_35_RT_OFFSET 34273 -#define QM_REG_PQTX2PF_36_RT_OFFSET 34274 -#define QM_REG_PQTX2PF_37_RT_OFFSET 34275 -#define QM_REG_PQTX2PF_38_RT_OFFSET 34276 -#define QM_REG_PQTX2PF_39_RT_OFFSET 34277 -#define QM_REG_PQTX2PF_40_RT_OFFSET 34278 -#define QM_REG_PQTX2PF_41_RT_OFFSET 34279 -#define QM_REG_PQTX2PF_42_RT_OFFSET 34280 -#define QM_REG_PQTX2PF_43_RT_OFFSET 34281 -#define QM_REG_PQTX2PF_44_RT_OFFSET 34282 -#define QM_REG_PQTX2PF_45_RT_OFFSET 34283 -#define QM_REG_PQTX2PF_46_RT_OFFSET 34284 -#define QM_REG_PQTX2PF_47_RT_OFFSET 34285 -#define QM_REG_PQTX2PF_48_RT_OFFSET 34286 -#define QM_REG_PQTX2PF_49_RT_OFFSET 34287 -#define QM_REG_PQTX2PF_50_RT_OFFSET 34288 -#define QM_REG_PQTX2PF_51_RT_OFFSET 34289 -#define QM_REG_PQTX2PF_52_RT_OFFSET 34290 -#define QM_REG_PQTX2PF_53_RT_OFFSET 34291 -#define QM_REG_PQTX2PF_54_RT_OFFSET 34292 -#define QM_REG_PQTX2PF_55_RT_OFFSET 34293 -#define QM_REG_PQTX2PF_56_RT_OFFSET 34294 -#define QM_REG_PQTX2PF_57_RT_OFFSET 34295 -#define QM_REG_PQTX2PF_58_RT_OFFSET 34296 -#define QM_REG_PQTX2PF_59_RT_OFFSET 34297 -#define QM_REG_PQTX2PF_60_RT_OFFSET 34298 -#define QM_REG_PQTX2PF_61_RT_OFFSET 34299 -#define QM_REG_PQTX2PF_62_RT_OFFSET 34300 -#define QM_REG_PQTX2PF_63_RT_OFFSET 34301 -#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34302 -#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34303 -#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34304 -#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34305 -#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34306 -#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34307 -#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34308 -#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34309 -#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34310 -#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34311 -#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34312 -#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34313 -#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34314 -#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34315 -#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34316 -#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34317 -#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34318 -#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34319 -#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34320 -#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34321 -#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34322 -#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34323 -#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34324 -#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34325 -#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34326 -#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34327 -#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34328 -#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34329 -#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34330 +#define QM_REG_PTRTBLOTHER_RT_OFFSET 34211 +#define QM_REG_PTRTBLOTHER_RT_SIZE 256 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34467 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34468 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34469 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34470 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34471 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34472 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34473 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34474 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34475 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34476 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34477 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34478 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34479 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34480 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34481 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34482 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34483 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34484 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34485 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34486 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34487 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34488 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34489 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34490 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34491 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34492 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34493 +#define QM_REG_PQTX2PF_0_RT_OFFSET 34494 +#define QM_REG_PQTX2PF_1_RT_OFFSET 34495 +#define QM_REG_PQTX2PF_2_RT_OFFSET 34496 +#define QM_REG_PQTX2PF_3_RT_OFFSET 34497 +#define QM_REG_PQTX2PF_4_RT_OFFSET 34498 +#define QM_REG_PQTX2PF_5_RT_OFFSET 34499 +#define QM_REG_PQTX2PF_6_RT_OFFSET 34500 +#define QM_REG_PQTX2PF_7_RT_OFFSET 34501 +#define QM_REG_PQTX2PF_8_RT_OFFSET 34502 +#define QM_REG_PQTX2PF_9_RT_OFFSET 34503 +#define QM_REG_PQTX2PF_10_RT_OFFSET 34504 +#define QM_REG_PQTX2PF_11_RT_OFFSET 34505 +#define QM_REG_PQTX2PF_12_RT_OFFSET 34506 +#define QM_REG_PQTX2PF_13_RT_OFFSET 34507 +#define QM_REG_PQTX2PF_14_RT_OFFSET 34508 +#define QM_REG_PQTX2PF_15_RT_OFFSET 34509 +#define QM_REG_PQTX2PF_16_RT_OFFSET 34510 +#define QM_REG_PQTX2PF_17_RT_OFFSET 34511 +#define QM_REG_PQTX2PF_18_RT_OFFSET 34512 +#define QM_REG_PQTX2PF_19_RT_OFFSET 34513 +#define QM_REG_PQTX2PF_20_RT_OFFSET 34514 +#define QM_REG_PQTX2PF_21_RT_OFFSET 34515 +#define QM_REG_PQTX2PF_22_RT_OFFSET 34516 +#define QM_REG_PQTX2PF_23_RT_OFFSET 34517 +#define QM_REG_PQTX2PF_24_RT_OFFSET 34518 +#define QM_REG_PQTX2PF_25_RT_OFFSET 34519 +#define QM_REG_PQTX2PF_26_RT_OFFSET 34520 +#define QM_REG_PQTX2PF_27_RT_OFFSET 34521 +#define QM_REG_PQTX2PF_28_RT_OFFSET 34522 +#define QM_REG_PQTX2PF_29_RT_OFFSET 34523 +#define QM_REG_PQTX2PF_30_RT_OFFSET 34524 +#define QM_REG_PQTX2PF_31_RT_OFFSET 34525 +#define QM_REG_PQTX2PF_32_RT_OFFSET 34526 +#define QM_REG_PQTX2PF_33_RT_OFFSET 34527 +#define QM_REG_PQTX2PF_34_RT_OFFSET 34528 +#define QM_REG_PQTX2PF_35_RT_OFFSET 34529 +#define QM_REG_PQTX2PF_36_RT_OFFSET 34530 +#define QM_REG_PQTX2PF_37_RT_OFFSET 34531 +#define QM_REG_PQTX2PF_38_RT_OFFSET 34532 +#define QM_REG_PQTX2PF_39_RT_OFFSET 34533 +#define QM_REG_PQTX2PF_40_RT_OFFSET 34534 +#define QM_REG_PQTX2PF_41_RT_OFFSET 34535 +#define QM_REG_PQTX2PF_42_RT_OFFSET 34536 +#define QM_REG_PQTX2PF_43_RT_OFFSET 34537 +#define QM_REG_PQTX2PF_44_RT_OFFSET 34538 +#define QM_REG_PQTX2PF_45_RT_OFFSET 34539 +#define QM_REG_PQTX2PF_46_RT_OFFSET 34540 +#define QM_REG_PQTX2PF_47_RT_OFFSET 34541 +#define QM_REG_PQTX2PF_48_RT_OFFSET 34542 +#define QM_REG_PQTX2PF_49_RT_OFFSET 34543 +#define QM_REG_PQTX2PF_50_RT_OFFSET 34544 +#define QM_REG_PQTX2PF_51_RT_OFFSET 34545 +#define QM_REG_PQTX2PF_52_RT_OFFSET 34546 +#define QM_REG_PQTX2PF_53_RT_OFFSET 34547 +#define QM_REG_PQTX2PF_54_RT_OFFSET 34548 +#define QM_REG_PQTX2PF_55_RT_OFFSET 34549 +#define QM_REG_PQTX2PF_56_RT_OFFSET 34550 +#define QM_REG_PQTX2PF_57_RT_OFFSET 34551 +#define QM_REG_PQTX2PF_58_RT_OFFSET 34552 +#define QM_REG_PQTX2PF_59_RT_OFFSET 34553 +#define QM_REG_PQTX2PF_60_RT_OFFSET 34554 +#define QM_REG_PQTX2PF_61_RT_OFFSET 34555 +#define QM_REG_PQTX2PF_62_RT_OFFSET 34556 +#define QM_REG_PQTX2PF_63_RT_OFFSET 34557 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34558 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34559 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34560 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34561 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34562 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34563 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34564 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34565 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34566 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34567 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34568 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34569 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34570 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34571 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34572 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34573 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34574 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34575 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34576 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34577 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34578 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34579 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34580 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34581 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34582 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34583 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34584 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34585 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34586 #define QM_REG_RLGLBLINCVAL_RT_SIZE 256 -#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34586 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34842 #define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 -#define QM_REG_RLGLBLCRD_RT_OFFSET 34842 +#define QM_REG_RLGLBLCRD_RT_OFFSET 35098 #define QM_REG_RLGLBLCRD_RT_SIZE 256 -#define QM_REG_RLGLBLENABLE_RT_OFFSET 35098 -#define QM_REG_RLPFPERIOD_RT_OFFSET 35099 -#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35100 -#define QM_REG_RLPFINCVAL_RT_OFFSET 35101 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 35354 +#define QM_REG_RLPFPERIOD_RT_OFFSET 35355 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35356 +#define QM_REG_RLPFINCVAL_RT_OFFSET 35357 #define QM_REG_RLPFINCVAL_RT_SIZE 16 -#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35117 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35373 #define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_RLPFCRD_RT_OFFSET 35133 +#define QM_REG_RLPFCRD_RT_OFFSET 35389 #define QM_REG_RLPFCRD_RT_SIZE 16 -#define QM_REG_RLPFENABLE_RT_OFFSET 35149 -#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35150 -#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35151 +#define QM_REG_RLPFENABLE_RT_OFFSET 35405 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35406 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35407 #define QM_REG_WFQPFWEIGHT_RT_SIZE 16 -#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35167 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35423 #define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_WFQPFCRD_RT_OFFSET 35183 +#define QM_REG_WFQPFCRD_RT_OFFSET 35439 #define QM_REG_WFQPFCRD_RT_SIZE 256 -#define QM_REG_WFQPFENABLE_RT_OFFSET 35439 -#define QM_REG_WFQVPENABLE_RT_OFFSET 35440 -#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35441 +#define QM_REG_WFQPFENABLE_RT_OFFSET 35695 +#define QM_REG_WFQVPENABLE_RT_OFFSET 35696 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35697 #define QM_REG_BASEADDRTXPQ_RT_SIZE 512 -#define QM_REG_TXPQMAP_RT_OFFSET 35953 +#define QM_REG_TXPQMAP_RT_OFFSET 36209 #define QM_REG_TXPQMAP_RT_SIZE 512 -#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36465 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36721 #define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 36977 +#define QM_REG_WFQVPCRD_RT_OFFSET 37233 #define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 37489 +#define QM_REG_WFQVPMAP_RT_OFFSET 37745 #define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 38001 +#define QM_REG_PTRTBLTX_RT_OFFSET 38257 +#define QM_REG_PTRTBLTX_RT_SIZE 1024 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 39281 #define QM_REG_WFQPFCRD_MSB_RT_SIZE 320 -#define QM_REG_VOQCRDLINE_RT_OFFSET 38321 +#define QM_REG_VOQCRDLINE_RT_OFFSET 39601 #define QM_REG_VOQCRDLINE_RT_SIZE 36 -#define QM_REG_VOQINITCRDLINE_RT_OFFSET 38357 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 39637 #define QM_REG_VOQINITCRDLINE_RT_SIZE 36 -#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 38393 -#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 38394 -#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 38395 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 38396 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 38397 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 38398 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 38399 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 38400 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 38401 +#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 39673 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 39674 +#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 39675 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 39676 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 39677 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 39678 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 39679 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 39680 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 39681 #define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 38405 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 39685 #define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 38409 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 39689 #define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 38441 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 39721 #define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 38457 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 39737 #define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 38473 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 39753 #define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 38489 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 38505 -#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 38506 -#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 38507 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785 +#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787 #define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 38515 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795 #define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 39539 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819 #define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 40051 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331 #define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 40563 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843 #define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 41075 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355 #define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 41587 +#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867 #define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 41619 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 41620 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 41621 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 41622 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 41623 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 41624 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 41625 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 41626 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 41627 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 41628 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 41629 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 41630 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 41631 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 41632 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 41633 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 41634 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 41635 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 41636 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 41637 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 41638 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 41639 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 41640 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 41641 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 41642 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 41643 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 41644 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 41645 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 41646 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 41647 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 41648 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 41649 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 41650 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 41651 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 41652 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 41653 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 41654 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 41655 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 41656 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 41657 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 41658 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 41659 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 41660 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 41661 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 41662 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 41663 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 41664 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 41665 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 41666 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 41667 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 41668 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 41669 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 41670 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 41671 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 41672 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 41673 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 41674 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 41675 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 41676 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 41677 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 41678 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 41679 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 41680 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 41681 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 41682 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 41683 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 41684 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 41685 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 41686 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 41687 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 41688 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 41689 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 41690 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 41691 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 41692 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 41693 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 41694 -#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 41695 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 41696 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 41697 -#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 41698 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 41699 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 41700 -#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 41701 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 41702 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 41703 -#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 41704 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 41705 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 41706 -#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 41707 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 41708 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 41709 -#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 41710 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 41711 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 41712 -#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 41713 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 41714 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 41715 -#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 41716 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 41717 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 41718 -#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 41719 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 41720 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 41721 -#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 41722 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 41723 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 41724 -#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 41725 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 41726 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 41727 -#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 41728 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 41729 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 41730 -#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 41731 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 41732 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 41733 -#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 41734 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 41735 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 41736 -#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 41737 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 41738 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 41739 -#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 41740 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 41741 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 41742 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974 +#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977 +#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980 +#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983 +#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986 +#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989 +#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992 +#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995 +#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998 +#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001 +#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004 +#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007 +#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010 +#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013 +#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016 +#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019 +#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022 -#define RUNTIME_ARRAY_SIZE 41743 +#define RUNTIME_ARRAY_SIZE 43023 + +/* Init Callbacks */ +#define DMAE_READY_CB 0 #endif /* __RT_DEFS_H__ */ diff --git a/drivers/net/qede/base/ecore_sp_api.h b/drivers/net/qede/base/ecore_sp_api.h index 86e84964..4633dbeb 100644 --- a/drivers/net/qede/base/ecore_sp_api.h +++ b/drivers/net/qede/base/ecore_sp_api.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_SP_API_H__ diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c index 7598e7a6..b43baf9d 100644 --- a/drivers/net/qede/base/ecore_sp_commands.c +++ b/drivers/net/qede/base/ecore_sp_commands.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -295,6 +293,7 @@ ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn, } #define ETH_P_8021Q 0x8100 +#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, @@ -308,7 +307,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn, struct ecore_sp_init_data init_data; enum _ecore_status_t rc = ECORE_NOTIMPL; u8 page_cnt; - int i; + u8 i; /* update initial eq producer */ ecore_eq_prod_update(p_hwfn, @@ -343,18 +342,27 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn, p_ramrod->outer_tag_config.outer_tag.tci = OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan); + if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING, &p_hwfn->p_dev->mf_bits)) { + p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q; + } else if (OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING, + &p_hwfn->p_dev->mf_bits)) { + p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD; + p_ramrod->outer_tag_config.enable_stag_pri_change = 1; + } + + p_ramrod->outer_tag_config.pri_map_valid = 1; + for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) + p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i; + /* enable_stag_pri_change should be set if port is in BD mode or, + * UFP with Host Control mode or, UFP with DCB over base interface. + */ if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) { - p_ramrod->outer_tag_config.outer_tag.tpid = - OSAL_CPU_TO_LE16(ETH_P_8021Q); - if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) + if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) || + (p_hwfn->p_dcbx_info->results.dcbx_enabled)) p_ramrod->outer_tag_config.enable_stag_pri_change = 1; else p_ramrod->outer_tag_config.enable_stag_pri_change = 0; - p_ramrod->outer_tag_config.pri_map_valid = 1; - for (i = 0; i < 8; i++) - p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = - (u8)i; } /* Place EQ address in RAMROD */ @@ -451,7 +459,8 @@ enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn) return rc; p_ent->ramrod.pf_update.update_enable_stag_pri_change = true; - if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) + if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) || + (p_hwfn->p_dcbx_info->results.dcbx_enabled)) p_ent->ramrod.pf_update.enable_stag_pri_change = 1; else p_ent->ramrod.pf_update.enable_stag_pri_change = 0; diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h index 98009c65..e57414cf 100644 --- a/drivers/net/qede/base/ecore_sp_commands.h +++ b/drivers/net/qede/base/ecore_sp_commands.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_SP_COMMANDS_H__ diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c index 70ffa8cd..776c86f7 100644 --- a/drivers/net/qede/base/ecore_spq.c +++ b/drivers/net/qede/base/ecore_spq.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -30,7 +28,7 @@ #define SPQ_BLOCK_DELAY_MAX_ITER (10) #define SPQ_BLOCK_DELAY_US (10) -#define SPQ_BLOCK_SLEEP_MAX_ITER (1000) +#define SPQ_BLOCK_SLEEP_MAX_ITER (200) #define SPQ_BLOCK_SLEEP_MS (5) /*************************************************************************** @@ -60,8 +58,12 @@ static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn, u32 iter_cnt; comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie; - iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER + iter_cnt = sleep_between_iter ? p_hwfn->p_spq->block_sleep_max_iter : SPQ_BLOCK_DELAY_MAX_ITER; +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter) + iter_cnt *= 5; +#endif while (iter_cnt--) { OSAL_POLL_MODE_DPC(p_hwfn); @@ -138,6 +140,14 @@ err: return ECORE_BUSY; } +void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn, + u32 spq_timeout_ms) +{ + p_hwfn->p_spq->block_sleep_max_iter = spq_timeout_ms ? + spq_timeout_ms / SPQ_BLOCK_SLEEP_MS : + SPQ_BLOCK_SLEEP_MAX_ITER; +} + /*************************************************************************** * SPQ entries inner API ***************************************************************************/ @@ -389,7 +399,7 @@ enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem) /* Allocate EQ struct */ p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq)); if (!p_eq) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_eq'\n"); return ECORE_NOMEM; } @@ -402,7 +412,7 @@ enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem) num_elem, sizeof(union event_ring_element), &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n"); goto eq_allocate_fail; } @@ -547,8 +557,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn) p_spq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq)); if (!p_spq) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate `struct ecore_spq'\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n"); return ECORE_NOMEM; } @@ -560,7 +569,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn) 0, /* N/A when the mode is SINGLE */ sizeof(struct slow_path_element), &p_spq->chain, OSAL_NULL)) { - DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n"); goto spq_allocate_fail; } @@ -576,7 +585,8 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn) p_spq->p_phys = p_phys; #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock); + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock)) + goto spq_allocate_fail; #endif p_hwfn->p_spq = p_spq; @@ -630,9 +640,7 @@ ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent) if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) { p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent)); if (!p_ent) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate an SPQ entry for a pending" - " ramrod\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n"); rc = ECORE_NOMEM; goto out_unlock; } @@ -1013,7 +1021,7 @@ enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn) p_consq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq)); if (!p_consq) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_consq'\n"); return ECORE_NOMEM; } @@ -1026,7 +1034,7 @@ enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn) ECORE_CHAIN_PAGE_SIZE / 0x80, 0x80, &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain"); + DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain"); goto consq_allocate_fail; } diff --git a/drivers/net/qede/base/ecore_spq.h b/drivers/net/qede/base/ecore_spq.h index 526cff08..6142c399 100644 --- a/drivers/net/qede/base/ecore_spq.h +++ b/drivers/net/qede/base/ecore_spq.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_SPQ_H__ @@ -116,6 +114,9 @@ struct ecore_spq { dma_addr_t p_phys; struct ecore_spq_entry *p_virt; + /* SPQ max sleep iterations used in __ecore_spq_block() */ + u32 block_sleep_max_iter; + /* Bitmap for handling out-of-order completions */ #define SPQ_RING_SIZE \ (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element)) @@ -149,6 +150,16 @@ struct ecore_spq { struct ecore_port; struct ecore_hwfn; +/** + * @brief ecore_set_spq_block_timeout - calculates the maximum sleep + * iterations used in __ecore_spq_block(); + * + * @param p_hwfn + * @param spq_timeout_ms + */ +void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn, + u32 spq_timeout_ms); + /** * @brief ecore_spq_post - Posts a Slow hwfn request to FW, or lacking that * Pends it to the future list. diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c index b1e26d6f..f7ebf7ad 100644 --- a/drivers/net/qede/base/ecore_sriov.c +++ b/drivers/net/qede/base/ecore_sriov.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -61,6 +59,8 @@ const char *ecore_channel_tlvs_string[] = { "CHANNEL_TLV_COALESCE_UPDATE", "CHANNEL_TLV_QID", "CHANNEL_TLV_COALESCE_READ", + "CHANNEL_TLV_BULLETIN_UPDATE_MAC", + "CHANNEL_TLV_UPDATE_MTU", "CHANNEL_TLV_MAX" }; @@ -590,8 +590,7 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn) p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov)); if (!p_sriov) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate `struct ecore_sriov'\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n"); return ECORE_NOMEM; } @@ -648,7 +647,7 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn) p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL, sizeof(*p_dev->p_iov_info)); if (!p_dev->p_iov_info) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Can't support IOV due to lack of memory\n"); return ECORE_NOMEM; } @@ -1968,7 +1967,8 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn, if (!p_vf->vport_instance) return ECORE_INVAL; - if (events & (1 << MAC_ADDR_FORCED)) { + if ((events & (1 << MAC_ADDR_FORCED)) || + p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) { /* Since there's no way [currently] of removing the MAC, * we can always assume this means we need to force it. */ @@ -1989,7 +1989,11 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn, return rc; } - p_vf->configured_features |= 1 << MAC_ADDR_FORCED; + if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) + p_vf->configured_features |= + 1 << VFPF_BULLETIN_MAC_ADDR; + else + p_vf->configured_features |= 1 << MAC_ADDR_FORCED; } if (events & (1 << VLAN_ADDR_FORCED)) { @@ -2854,6 +2858,45 @@ out: length, status); } +static enum _ecore_status_t +ecore_iov_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *p_vf) +{ + struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct ecore_sp_vport_update_params params; + enum _ecore_status_t rc = ECORE_SUCCESS; + struct vfpf_update_mtu_tlv *p_req; + u8 status = PFVF_STATUS_SUCCESS; + + /* Valiate PF can send such a request */ + if (!p_vf->vport_instance) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "No VPORT instance available for VF[%d], failing MTU update\n", + p_vf->abs_vf_id); + status = PFVF_STATUS_FAILURE; + goto send_status; + } + + p_req = &mbx->req_virt->update_mtu; + + OSAL_MEMSET(¶ms, 0, sizeof(params)); + params.opaque_fid = p_vf->opaque_fid; + params.vport_id = p_vf->vport_id; + params.mtu = p_req->mtu; + rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK, + OSAL_NULL); + + if (rc) + status = PFVF_STATUS_FAILURE; +send_status: + ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, + CHANNEL_TLV_UPDATE_MTU, + sizeof(struct pfvf_def_resp_tlv), + status); + return rc; +} + void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn, void *p_tlvs_list, u16 req_type) { @@ -2975,8 +3018,7 @@ ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn, p_data->update_approx_mcast_flg = 1; OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins, - sizeof(unsigned long) * - ETH_MULTICAST_MAC_BINS_IN_REGS); + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST; } @@ -4137,6 +4179,9 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn, case CHANNEL_TLV_COALESCE_READ: ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); break; + case CHANNEL_TLV_UPDATE_MTU: + ecore_iov_vf_pf_update_mtu(p_hwfn, p_ptt, p_vf); + break; } } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) { /* If we've received a message from a VF we consider malicious @@ -4370,7 +4415,11 @@ void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn, return; } - feature = 1 << MAC_ADDR_FORCED; + if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) + feature = 1 << VFPF_BULLETIN_MAC_ADDR; + else + feature = 1 << MAC_ADDR_FORCED; + OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); vf_info->bulletin.p_virt->valid_bitmap |= feature; @@ -4411,9 +4460,13 @@ enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn, vf_info->bulletin.p_virt->valid_bitmap |= feature; + if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) + ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature); + return ECORE_SUCCESS; } +#ifndef LINUX_REMOVE enum _ecore_status_t ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn, bool b_untagged_only, int vfid) @@ -4470,6 +4523,7 @@ void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid, *opaque_fid = vf_info->opaque_fid; } +#endif void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn, u16 pvid, int vfid) @@ -4657,6 +4711,22 @@ u32 ecore_iov_pfvf_msg_length(void) return sizeof(union pfvf_tlvs); } +u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return OSAL_NULL; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & + (1 << VFPF_BULLETIN_MAC_ADDR))) + return OSAL_NULL; + + return p_vf->bulletin.p_virt->mac; +} + u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) { struct ecore_vf_info *p_vf; diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h index 850b1052..50c7d2c9 100644 --- a/drivers/net/qede/base/ecore_sriov.h +++ b/drivers/net/qede/base/ecore_sriov.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_SRIOV_H__ diff --git a/drivers/net/qede/base/ecore_status.h b/drivers/net/qede/base/ecore_status.h index c77ec260..b893f1d4 100644 --- a/drivers/net/qede/base/ecore_status.h +++ b/drivers/net/qede/base/ecore_status.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_STATUS_H__ diff --git a/drivers/net/qede/base/ecore_utils.h b/drivers/net/qede/base/ecore_utils.h index 034cf1eb..249136b0 100644 --- a/drivers/net/qede/base/ecore_utils.h +++ b/drivers/net/qede/base/ecore_utils.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_UTILS_H__ diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c index e0f2dd5a..d2213f79 100644 --- a/drivers/net/qede/base/ecore_vf.c +++ b/drivers/net/qede/base/ecore_vf.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -1275,8 +1273,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn, resp_size += sizeof(struct pfvf_def_resp_tlv); OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins, - sizeof(unsigned long) * - ETH_MULTICAST_MAC_BINS_IN_REGS); + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); } update_rx = p_params->accept_flags.update_rx_mode_config; @@ -1473,7 +1470,7 @@ void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn, u32 bit; bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); - OSAL_SET_BIT(bit, sp_params.bins); + sp_params.bins[bit / 32] |= 1 << (bit % 32); } } @@ -1629,6 +1626,39 @@ exit: return rc; } +enum _ecore_status_t +ecore_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, u16 mtu) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_update_mtu_tlv *p_req; + struct pfvf_def_resp_tlv *p_resp; + enum _ecore_status_t rc; + + if (!mtu) + return ECORE_INVAL; + + /* clear mailbox and prep header tlv */ + p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_MTU, + sizeof(*p_req)); + p_req->mtu = mtu; + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Requesting MTU update to %d\n", mtu); + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + p_resp = &p_iov->pf2vf_reply->default_resp; + rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); + if (p_resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) + rc = ECORE_INVAL; + + ecore_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) { diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h index de2758cb..a07f82eb 100644 --- a/drivers/net/qede/base/ecore_vf.h +++ b/drivers/net/qede/base/ecore_vf.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_VF_H__ @@ -319,5 +317,14 @@ void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun); u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id); + +/** + * @brief - ecore_vf_pf_update_mtu Update MTU for VF. + * + * @param p_hwfn + * @param - mtu + */ +enum _ecore_status_t +ecore_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, u16 mtu); #endif #endif /* __ECORE_VF_H__ */ diff --git a/drivers/net/qede/base/ecore_vf_api.h b/drivers/net/qede/base/ecore_vf_api.h index 9815cf8a..1a9fb3b1 100644 --- a/drivers/net/qede/base/ecore_vf_api.h +++ b/drivers/net/qede/base/ecore_vf_api.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_VF_API_H__ diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h index ecb00649..c30677ab 100644 --- a/drivers/net/qede/base/ecore_vfpf_if.h +++ b/drivers/net/qede/base/ecore_vfpf_if.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_VF_PF_IF_H__ @@ -396,7 +394,13 @@ struct vfpf_vport_update_mcast_bin_tlv { struct channel_tlv tl; u8 padding[4]; - u64 bins[8]; + /* This was a mistake; There are only 256 approx bins, + * and in HSI they're divided into 32-bit values. + * As old VFs used to set-bit to the values on its side, + * the upper half of the array is never expected to contain any data. + */ + u64 bins[4]; + u64 obsolete_bins[4]; }; struct vfpf_vport_update_accept_param_tlv { @@ -525,6 +529,18 @@ struct pfvf_read_coal_resp_tlv { u8 padding[6]; }; +struct vfpf_bulletin_update_mac_tlv { + struct vfpf_first_tlv first_tlv; + u8 mac[ETH_ALEN]; + u8 padding[2]; +}; + +struct vfpf_update_mtu_tlv { + struct vfpf_first_tlv first_tlv; + u16 mtu; + u8 padding[6]; +}; + union vfpf_tlvs { struct vfpf_first_tlv first_tlv; struct vfpf_acquire_tlv acquire; @@ -539,6 +555,8 @@ union vfpf_tlvs { struct vfpf_update_tunn_param_tlv tunn_param_update; struct vfpf_update_coalesce update_coalesce; struct vfpf_read_coal_req_tlv read_coal_req; + struct vfpf_bulletin_update_mac_tlv bulletin_update_mac; + struct vfpf_update_mtu_tlv update_mtu; struct tlv_buffer_size tlv_buf_size; }; @@ -669,6 +687,8 @@ enum { CHANNEL_TLV_COALESCE_UPDATE, CHANNEL_TLV_QID, CHANNEL_TLV_COALESCE_READ, + CHANNEL_TLV_BULLETIN_UPDATE_MAC, + CHANNEL_TLV_UPDATE_MTU, CHANNEL_TLV_MAX, /* Required for iterating over vport-update tlvs. diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h index 45a0356d..abfa6854 100644 --- a/drivers/net/qede/base/eth_common.h +++ b/drivers/net/qede/base/eth_common.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ETH_COMMON__ @@ -119,6 +117,9 @@ /* Number of etherType values configured by driver for control frame check */ #define ETH_CTL_FRAME_ETH_TYPE_NUM 4 +/* GFS constants */ +#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */ + /* diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h index 81ca6634..81aa88e7 100644 --- a/drivers/net/qede/base/mcp_public.h +++ b/drivers/net/qede/base/mcp_public.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ /**************************************************************************** @@ -800,6 +798,7 @@ struct public_port { #define ETH_TRANSCEIVER_TYPE_4x10G 0x1f #define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20 #define ETH_TRANSCEIVER_TYPE_1000BASET 0x21 +#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22 #define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30 #define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31 #define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32 @@ -1777,6 +1776,8 @@ struct public_drv_mb { #define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001 /* MFW supports EEE */ #define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 +/* MFW supports DRV_LOAD Timeout */ +#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO 0x00000004 /* MFW supports virtual link */ #define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000 diff --git a/drivers/net/qede/base/nvm_cfg.h b/drivers/net/qede/base/nvm_cfg.h index c99e805d..ab86260e 100644 --- a/drivers/net/qede/base/nvm_cfg.h +++ b/drivers/net/qede/base/nvm_cfg.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ /**************************************************************************** diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h index ad15d28a..402f6204 100644 --- a/drivers/net/qede/base/reg_addr.h +++ b/drivers/net/qede/base/reg_addr.h @@ -1,17 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. - */ - -/* - * Copyright (c) 2016 QLogic Corporation. - * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \ @@ -1222,3 +1212,5 @@ #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1 << 10) #define PRS_REG_SEARCH_TENANT_ID 0x1f044cUL #define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL + +#define RSS_REG_RSS_RAM_MASK 0x238c10UL diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index a91f4368..df52ea92 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "qede_ethdev.h" @@ -16,7 +14,7 @@ int qede_logtype_init; int qede_logtype_driver; static const struct qed_eth_ops *qed_ops; -static int64_t timer_period = 1; +#define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */ /* VXLAN tunnel classification mapping */ const struct _qede_udp_tunn_types { @@ -338,6 +336,24 @@ static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); } +static void +qede_interrupt_handler_intx(void *param) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + u64 status; + + /* Check if our device actually raised an interrupt */ + status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev)); + if (status & 0x1) { + qede_interrupt_action(ECORE_LEADING_HWFN(edev)); + + if (rte_intr_enable(eth_dev->intr_handle)) + DP_ERR(edev, "rte_intr_enable failed\n"); + } +} + static void qede_interrupt_handler(void *param) { @@ -499,6 +515,9 @@ qede_start_vport(struct qede_dev *qdev, uint16_t mtu) return 0; } +#define QEDE_NPAR_TX_SWITCHING "npar_tx_switching" +#define QEDE_VF_TX_SWITCHING "vf_tx_switching" + /* Activate or deactivate vport via vport-update */ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) { @@ -515,12 +534,9 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) params.update_vport_active_tx_flg = 1; params.vport_active_rx_flg = flg; params.vport_active_tx_flg = flg; - if (!qdev->enable_tx_switching) { - if (IS_VF(edev)) { - params.update_tx_switching_flg = 1; - params.tx_switching_flg = !flg; - DP_INFO(edev, "VF tx-switching is disabled\n"); - } + if (~qdev->enable_tx_switching & flg) { + params.update_tx_switching_flg = 1; + params.tx_switching_flg = !flg; } for_each_hwfn(edev, i) { p_hwfn = &edev->hwfns[i]; @@ -591,38 +607,9 @@ int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg) } } qdev->enable_lro = flg; - DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled"); - - return 0; -} - -/* Update MTU via vport-update without doing port restart. - * The vport must be deactivated before calling this API. - */ -int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) -{ - struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); - struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - struct ecore_sp_vport_update_params params; - struct ecore_hwfn *p_hwfn; - int rc; - int i; + eth_dev->data->lro = flg; - memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); - params.vport_id = 0; - params.mtu = mtu; - params.vport_id = 0; - for_each_hwfn(edev, i) { - p_hwfn = &edev->hwfns[i]; - params.opaque_fid = p_hwfn->hw_info.opaque_fid; - rc = ecore_sp_vport_update(p_hwfn, ¶ms, - ECORE_SPQ_MODE_EBLOCK, NULL); - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "Failed to update MTU\n"); - return -1; - } - } - DP_INFO(edev, "MTU updated to %u\n", mtu); + DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled"); return 0; } @@ -779,6 +766,36 @@ qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss, return rc; } +static int +qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss, + bool enable) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum _ecore_status_t rc = ECORE_INVAL; + struct ecore_tunnel_info tunn; + + memset(&tunn, 0, sizeof(struct ecore_tunnel_info)); + tunn.ip_gre.b_update_mode = true; + tunn.ip_gre.b_mode_enabled = enable; + tunn.ip_gre.tun_cls = clss; + tunn.ip_gre.tun_cls = clss; + tunn.b_update_rx_cls = true; + tunn.b_update_tx_cls = true; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc == ECORE_SUCCESS) { + qdev->ipgre.enable = enable; + DP_INFO(edev, "IPGRE is %s\n", + enable ? "enabled" : "disabled"); + } else { + DP_ERR(edev, "Failed to update tunn_clss %u\n", + clss); + } + + return rc; +} + static int qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss, enum rte_eth_tunnel_type tunn_type, bool enable) @@ -792,6 +809,9 @@ qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss, case RTE_TUNNEL_TYPE_GENEVE: rc = qede_geneve_enable(eth_dev, clss, enable); break; + case RTE_TUNNEL_TYPE_IP_IN_GRE: + rc = qede_ipgre_enable(eth_dev, clss, enable); + break; default: rc = -EINVAL; break; @@ -817,10 +837,10 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, ETHER_ADDR_LEN) == 0) && ucast->vni == tmp->vni && ucast->vlan == tmp->vlan) { - DP_ERR(edev, "Unicast MAC is already added" - " with vlan = %u, vni = %u\n", - ucast->vlan, ucast->vni); - return -EEXIST; + DP_INFO(edev, "Unicast MAC is already added" + " with vlan = %u, vni = %u\n", + ucast->vlan, ucast->vni); + return 0; } } u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), @@ -854,47 +874,69 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, } static int -qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast, - bool add) +qede_add_mcast_filters(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs, + uint32_t mc_addrs_num) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - struct ether_addr *mac_addr; - struct qede_mcast_entry *tmp = NULL; - struct qede_mcast_entry *m; + struct ecore_filter_mcast mcast; + struct qede_mcast_entry *m = NULL; + uint8_t i; + int rc; - mac_addr = (struct ether_addr *)mcast->mac; - if (add) { - SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { - if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) { - DP_ERR(edev, - "Multicast MAC is already added\n"); - return -EEXIST; - } - } + for (i = 0; i < mc_addrs_num; i++) { m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), - RTE_CACHE_LINE_SIZE); + RTE_CACHE_LINE_SIZE); if (!m) { - DP_ERR(edev, - "Did not allocate memory for mcast\n"); + DP_ERR(edev, "Did not allocate memory for mcast\n"); return -ENOMEM; } - ether_addr_copy(mac_addr, &m->mac); + ether_addr_copy(&mc_addrs[i], &m->mac); SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); - qdev->num_mc_addr++; - } else { - SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { - if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) - break; - } - if (tmp == NULL) { - DP_INFO(edev, "Multicast mac is not found\n"); - return -EINVAL; - } - SLIST_REMOVE(&qdev->mc_list_head, tmp, - qede_mcast_entry, list); - qdev->num_mc_addr--; } + memset(&mcast, 0, sizeof(mcast)); + mcast.num_mc_addrs = mc_addrs_num; + mcast.opcode = ECORE_FILTER_ADD; + for (i = 0; i < mc_addrs_num; i++) + ether_addr_copy(&mc_addrs[i], (struct ether_addr *) + &mcast.mac[i]); + rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc); + return -1; + } + + return 0; +} + +static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct qede_mcast_entry *tmp = NULL; + struct ecore_filter_mcast mcast; + int j; + int rc; + + memset(&mcast, 0, sizeof(mcast)); + mcast.num_mc_addrs = qdev->num_mc_addr; + mcast.opcode = ECORE_FILTER_REMOVE; + j = 0; + SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { + ether_addr_copy(&tmp->mac, (struct ether_addr *)&mcast.mac[j]); + j++; + } + rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to delete multicast filter\n"); + return -1; + } + /* Init the list */ + while (!SLIST_EMPTY(&qdev->mc_list_head)) { + tmp = SLIST_FIRST(&qdev->mc_list_head); + SLIST_REMOVE_HEAD(&qdev->mc_list_head, list); + } + SLIST_INIT(&qdev->mc_list_head); return 0; } @@ -905,59 +947,25 @@ qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - enum _ecore_status_t rc; - struct ecore_filter_mcast mcast; - struct qede_mcast_entry *tmp; - uint16_t j = 0; + enum _ecore_status_t rc = ECORE_INVAL; - /* Multicast */ - if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) { - if (add) { - if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) { - DP_ERR(edev, - "Mcast filter table limit exceeded, " - "Please enable mcast promisc mode\n"); - return -ECORE_INVAL; - } - } - rc = qede_mcast_filter(eth_dev, ucast, add); - if (rc == 0) { - DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr); - memset(&mcast, 0, sizeof(mcast)); - mcast.num_mc_addrs = qdev->num_mc_addr; - mcast.opcode = ECORE_FILTER_ADD; - SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { - ether_addr_copy(&tmp->mac, - (struct ether_addr *)&mcast.mac[j]); - j++; - } - rc = ecore_filter_mcast_cmd(edev, &mcast, - ECORE_SPQ_MODE_CB, NULL); - } - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "Failed to add multicast filter" - " rc = %d, op = %d\n", rc, add); - } - } else { /* Unicast */ - if (add) { - if (qdev->num_uc_addr >= - qdev->dev_info.num_mac_filters) { - DP_ERR(edev, - "Ucast filter table limit exceeded," - " Please enable promisc mode\n"); - return -ECORE_INVAL; - } - } - rc = qede_ucast_filter(eth_dev, ucast, add); - if (rc == 0) - rc = ecore_filter_ucast_cmd(edev, ucast, - ECORE_SPQ_MODE_CB, NULL); - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", - rc, add); - } + if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) { + DP_ERR(edev, "Ucast filter table limit exceeded," + " Please enable promisc mode\n"); + return ECORE_INVAL; } + rc = qede_ucast_filter(eth_dev, ucast, add); + if (rc == 0) + rc = ecore_filter_ucast_cmd(edev, ucast, + ECORE_SPQ_MODE_CB, NULL); + /* Indicate error only for add filter operation. + * Delete filter operations are not severe. + */ + if ((rc != ECORE_SUCCESS) && add) + DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", + rc, add); + return rc; } @@ -968,7 +976,11 @@ qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr, struct ecore_filter_ucast ucast; int re; + if (!is_valid_assigned_ether_addr(mac_addr)) + return -EINVAL; + qede_set_ucast_cmn_params(&ucast); + ucast.opcode = ECORE_FILTER_ADD; ucast.type = ECORE_FILTER_MAC; ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac); re = (int)qede_mac_int_ops(eth_dev, &ucast, 1); @@ -990,6 +1002,9 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) return; } + if (!is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index])) + return; + qede_set_ucast_cmn_params(&ucast); ucast.opcode = ECORE_FILTER_REMOVE; ucast.type = ECORE_FILTER_MAC; @@ -998,10 +1013,10 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) ether_addr_copy(ð_dev->data->mac_addrs[index], (struct ether_addr *)&ucast.mac); - ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); + qede_mac_int_ops(eth_dev, &ucast, false); } -static void +static int qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); @@ -1010,12 +1025,12 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), mac_addr->addr_bytes)) { DP_ERR(edev, "Setting MAC address is not allowed\n"); - ether_addr_copy(&qdev->primary_mac, - ð_dev->data->mac_addrs[0]); - return; + return -EPERM; } - qede_mac_addr_add(eth_dev, mac_addr, 0, 0); + qede_mac_addr_remove(eth_dev, 0); + + return qede_mac_addr_add(eth_dev, mac_addr, 0, 0); } static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg) @@ -1093,9 +1108,9 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { if (tmp->vid == vlan_id) { - DP_ERR(edev, "VLAN %u already configured\n", - vlan_id); - return -EEXIST; + DP_INFO(edev, "VLAN %u already configured\n", + vlan_id); + return 0; } } @@ -1166,10 +1181,10 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; + uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; if (mask & ETH_VLAN_STRIP_MASK) { - if (rxmode->hw_vlan_strip) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) (void)qede_vlan_stripping(eth_dev, 1); else (void)qede_vlan_stripping(eth_dev, 0); @@ -1177,7 +1192,7 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) if (mask & ETH_VLAN_FILTER_MASK) { /* VLAN filtering kicks in when a VLAN is added */ - if (rxmode->hw_vlan_filter) { + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { qede_vlan_filter_set(eth_dev, 0, 1); } else { if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ @@ -1187,7 +1202,8 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) /* Signal app that VLAN filtering is still * enabled */ - rxmode->hw_vlan_filter = true; + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_VLAN_FILTER; } else { qede_vlan_filter_set(eth_dev, 0, 0); } @@ -1195,13 +1211,11 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) } if (mask & ETH_VLAN_EXTEND_MASK) - DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q" - " and classification is based on outer tag only\n"); + DP_ERR(edev, "Extend VLAN not supported\n"); qdev->vlan_offload_mask = mask; - DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n", - mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter); + DP_INFO(edev, "VLAN offload mask %d\n", mask); return 0; } @@ -1267,19 +1281,25 @@ static void qede_fastpath_start(struct ecore_dev *edev) static int qede_dev_start(struct rte_eth_dev *eth_dev) { - struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; PMD_INIT_FUNC_TRACE(edev); + /* Update MTU only if it has changed */ + if (eth_dev->data->mtu != qdev->mtu) { + if (qede_update_mtu(eth_dev, qdev->mtu)) + goto err; + } + /* Configure TPA parameters */ - if (rxmode->enable_lro) { + if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) { if (qede_enable_tpa(eth_dev, true)) return -EINVAL; /* Enable scatter mode for LRO */ - if (!rxmode->enable_scatter) - eth_dev->data->scattered_rx = 1; + if (!eth_dev->data->scattered_rx) + rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER; } /* Start queues */ @@ -1294,7 +1314,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) * Also, we would like to retain similar behavior in PF case, so we * don't do PF/VF specific check here. */ - if (rxmode->mq_mode == ETH_MQ_RX_RSS) + if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) if (qede_config_rss(eth_dev)) goto err; @@ -1339,10 +1359,9 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev) DP_INFO(edev, "Device is stopped\n"); } -#define QEDE_TX_SWITCHING "vf_txswitch" - const char *valid_args[] = { - QEDE_TX_SWITCHING, + QEDE_NPAR_TX_SWITCHING, + QEDE_VF_TX_SWITCHING, NULL, }; @@ -1361,8 +1380,13 @@ static int qede_args_check(const char *key, const char *val, void *opaque) return errno; } - if (strcmp(QEDE_TX_SWITCHING, key) == 0) + if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) || + ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) { qdev->enable_tx_switching = !!tmp; + DP_INFO(edev, "Disabling %s tx-switching\n", + strcmp(QEDE_NPAR_TX_SWITCHING, key) ? + "VF" : "NPAR"); + } return ret; } @@ -1411,15 +1435,15 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) /* Check requirements for 100G mode */ if (ECORE_IS_CMT(edev)) { if (eth_dev->data->nb_rx_queues < 2 || - eth_dev->data->nb_tx_queues < 2) { + eth_dev->data->nb_tx_queues < 2) { DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n"); return -EINVAL; } if ((eth_dev->data->nb_rx_queues % 2 != 0) || - (eth_dev->data->nb_tx_queues % 2 != 0)) { + (eth_dev->data->nb_tx_queues % 2 != 0)) { DP_ERR(edev, - "100G mode needs even no. of RX/TX queues\n"); + "100G mode needs even no. of RX/TX queues\n"); return -EINVAL; } } @@ -1437,22 +1461,11 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) /* Parse devargs and fix up rxmode */ if (qede_args(eth_dev)) - return -ENOTSUP; + DP_NOTICE(edev, false, + "Invalid devargs supplied, requested change will not take effect\n"); - /* Sanity checks and throw warnings */ - if (rxmode->enable_scatter) - eth_dev->data->scattered_rx = 1; - - if (!rxmode->hw_strip_crc) - DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n"); - - if (!rxmode->hw_ip_checksum) - DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled " - "in hw\n"); - if (rxmode->header_split) - DP_INFO(edev, "Header split enable is not supported\n"); - if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode == - ETH_MQ_RX_RSS)) { + if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || + rxmode->mq_mode == ETH_MQ_RX_RSS)) { DP_ERR(edev, "Unsupported multi-queue mode\n"); return -ENOTSUP; } @@ -1467,19 +1480,22 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) return -ENOMEM; /* If jumbo enabled adjust MTU */ - if (eth_dev->data->dev_conf.rxmode.jumbo_frame) + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) eth_dev->data->mtu = - eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - - ETHER_HDR_LEN - ETHER_CRC_LEN; + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - + ETHER_HDR_LEN - ETHER_CRC_LEN; + + if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) + eth_dev->data->scattered_rx = 1; if (qede_start_vport(qdev, eth_dev->data->mtu)) return -1; + qdev->mtu = eth_dev->data->mtu; /* Enable VLAN offloads by default */ ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | - ETH_VLAN_FILTER_MASK | - ETH_VLAN_EXTEND_MASK); + ETH_VLAN_FILTER_MASK); if (ret) return ret; @@ -1515,7 +1531,6 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, PMD_INIT_FUNC_TRACE(edev); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; dev_info->rx_desc_lim = qede_rx_desc_lim; @@ -1534,26 +1549,42 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; - - dev_info->default_txconf = (struct rte_eth_txconf) { - .txq_flags = QEDE_TXQ_FLAGS, - }; - - dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | + dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_TCP_LRO); - + DEV_RX_OFFLOAD_TCP_LRO | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_STRIP); + dev_info->rx_queue_offload_capa = 0; + + /* TX offloads are on a per-packet basis, so it is applicable + * to both at port and queue levels. + */ dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO); + dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .offloads = DEV_TX_OFFLOAD_MULTI_SEGS, + }; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + /* Packets are always dropped if no descriptors are available */ + .rx_drop_en = 1, + .offloads = 0, + }; memset(&link, 0, sizeof(struct qed_link_output)); qdev->ops->common->get_link(edev, &link); @@ -1578,18 +1609,20 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; + struct qed_link_output q_link; + struct rte_eth_link link; uint16_t link_duplex; - struct qed_link_output link; - struct rte_eth_link *curr = ð_dev->data->dev_link; - memset(&link, 0, sizeof(struct qed_link_output)); - qdev->ops->common->get_link(edev, &link); + memset(&q_link, 0, sizeof(q_link)); + memset(&link, 0, sizeof(link)); + + qdev->ops->common->get_link(edev, &q_link); /* Link Speed */ - curr->link_speed = link.speed; + link.link_speed = q_link.speed; /* Link Mode */ - switch (link.duplex) { + switch (q_link.duplex) { case QEDE_DUPLEX_HALF: link_duplex = ETH_LINK_HALF_DUPLEX; break; @@ -1600,21 +1633,20 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) default: link_duplex = -1; } - curr->link_duplex = link_duplex; + link.link_duplex = link_duplex; /* Link Status */ - curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN; + link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN; /* AN */ - curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? + link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? ETH_LINK_AUTONEG : ETH_LINK_FIXED; DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", - curr->link_speed, curr->link_duplex, - curr->link_autoneg, curr->link_status); + link.link_speed, link.link_duplex, + link.link_autoneg, link.link_status); - /* return 0 means link status changed, -1 means not changed */ - return ((curr->link_status == link.link_up) ? -1 : 0); + return rte_eth_linkstatus_set(eth_dev, &link); } static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) @@ -1661,7 +1693,7 @@ static void qede_poll_sp_sb_cb(void *param) qede_interrupt_action(ECORE_LEADING_HWFN(edev)); qede_interrupt_action(&edev->hwfns[1]); - rc = rte_eal_alarm_set(timer_period * US_PER_S, + rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, qede_poll_sp_sb_cb, (void *)eth_dev); if (rc != 0) { @@ -1700,8 +1732,20 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev) qdev->ops->common->slowpath_stop(edev); qdev->ops->common->remove(edev); rte_intr_disable(&pci_dev->intr_handle); - rte_intr_callback_unregister(&pci_dev->intr_handle, - qede_interrupt_handler, (void *)eth_dev); + + switch (pci_dev->intr_handle.type) { + case RTE_INTR_HANDLE_UIO_INTX: + case RTE_INTR_HANDLE_VFIO_LEGACY: + rte_intr_callback_unregister(&pci_dev->intr_handle, + qede_interrupt_handler_intx, + (void *)eth_dev); + break; + default: + rte_intr_callback_unregister(&pci_dev->intr_handle, + qede_interrupt_handler, + (void *)eth_dev); + } + if (ECORE_IS_CMT(edev)) rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); } @@ -1990,6 +2034,99 @@ static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev) QED_FILTER_RX_MODE_TYPE_REGULAR); } +static int +qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs, + uint32_t mc_addrs_num) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + uint8_t i; + + if (mc_addrs_num > ECORE_MAX_MC_ADDRS) { + DP_ERR(edev, "Reached max multicast filters limit," + "Please enable multicast promisc mode\n"); + return -ENOSPC; + } + + for (i = 0; i < mc_addrs_num; i++) { + if (!is_multicast_ether_addr(&mc_addrs[i])) { + DP_ERR(edev, "Not a valid multicast MAC\n"); + return -EINVAL; + } + } + + /* Flush all existing entries */ + if (qede_del_mcast_filters(eth_dev)) + return -1; + + /* Set new mcast list */ + return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num); +} + +/* Update MTU via vport-update without doing port restart. + * The vport must be deactivated before calling this API. + */ +int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_hwfn *p_hwfn; + int rc; + int i; + + if (IS_PF(edev)) { + struct ecore_sp_vport_update_params params; + + memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); + params.vport_id = 0; + params.mtu = mtu; + params.vport_id = 0; + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_update(p_hwfn, ¶ms, + ECORE_SPQ_MODE_EBLOCK, NULL); + if (rc != ECORE_SUCCESS) + goto err; + } + } else { + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + rc = ecore_vf_pf_update_mtu(p_hwfn, mtu); + if (rc == ECORE_INVAL) { + DP_INFO(edev, "VF MTU Update TLV not supported\n"); + /* Recreate vport */ + rc = qede_start_vport(qdev, mtu); + if (rc != ECORE_SUCCESS) + goto err; + + /* Restore config lost due to vport stop */ + if (eth_dev->data->promiscuous) + qede_promiscuous_enable(eth_dev); + else + qede_promiscuous_disable(eth_dev); + + if (eth_dev->data->all_multicast) + qede_allmulticast_enable(eth_dev); + else + qede_allmulticast_disable(eth_dev); + + qede_vlan_offload_set(eth_dev, + qdev->vlan_offload_mask); + } else if (rc != ECORE_SUCCESS) { + goto err; + } + } + } + DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu); + + return 0; + +err: + DP_ERR(edev, "Failed to update MTU\n"); + return -1; +} + static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, struct rte_eth_fc_conf *fc_conf) { @@ -2065,6 +2202,7 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) RTE_PTYPE_TUNNEL_VXLAN, RTE_PTYPE_L4_FRAG, RTE_PTYPE_TUNNEL_GENEVE, + RTE_PTYPE_TUNNEL_GRE, /* Inner */ RTE_PTYPE_INNER_L2_ETHER, RTE_PTYPE_INNER_L2_ETHER_VLAN, @@ -2143,7 +2281,7 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev, vport_update_params.vport_id = 0; /* pass the L2 handles instead of qids */ for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) { - idx = qdev->rss_ind_table[i]; + idx = i % QEDE_RSS_COUNT(qdev); rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle; } vport_update_params.rss_params = &rss_params; @@ -2393,7 +2531,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) restart = true; } rte_delay_ms(1000); - qede_start_vport(qdev, mtu); /* Recreate vport */ qdev->mtu = mtu; /* Fix up RX buf size for all queues of the port */ @@ -2413,23 +2550,9 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) } } if (max_rx_pkt_len > ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.jumbo_frame = 1; - else - dev->data->dev_conf.rxmode.jumbo_frame = 0; - - /* Restore config lost due to vport stop */ - qede_mac_addr_set(dev, &qdev->primary_mac); - if (dev->data->promiscuous) - qede_promiscuous_enable(dev); - else - qede_promiscuous_disable(dev); - - if (dev->data->all_multicast) - qede_allmulticast_enable(dev); + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else - qede_allmulticast_disable(dev); - - qede_vlan_offload_set(dev, qdev->vlan_offload_mask); + dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; if (!dev->data->dev_started && restart) { qede_dev_start(dev); @@ -2488,7 +2611,6 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev, ECORE_TUNN_CLSS_MAC_VLAN, false); break; - case RTE_TUNNEL_TYPE_GENEVE: if (qdev->geneve.udp_port != tunnel_udp->udp_port) { DP_ERR(edev, "UDP port %u doesn't exist\n", @@ -2578,7 +2700,6 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev, qdev->vxlan.udp_port = udp_port; break; - case RTE_TUNNEL_TYPE_GENEVE: if (qdev->geneve.udp_port == tunnel_udp->udp_port) { DP_INFO(edev, @@ -2616,7 +2737,6 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev, qdev->geneve.udp_port = udp_port; break; - default: return ECORE_INVAL; } @@ -2782,7 +2902,8 @@ qede_tunn_filter_config(struct rte_eth_dev *eth_dev, qdev->geneve.filter_type = conf->filter_type; } - if (!qdev->vxlan.enable || !qdev->geneve.enable) + if (!qdev->vxlan.enable || !qdev->geneve.enable || + !qdev->ipgre.enable) return qede_tunn_enable(eth_dev, clss, conf->tunnel_type, true); @@ -2818,15 +2939,14 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev, switch (filter_conf->tunnel_type) { case RTE_TUNNEL_TYPE_VXLAN: case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_IP_IN_GRE: DP_INFO(edev, "Packet steering to the specified Rx queue" " is not supported with UDP tunneling"); return(qede_tunn_filter_config(eth_dev, filter_op, filter_conf)); - /* Place holders for future tunneling support */ case RTE_TUNNEL_TYPE_TEREDO: case RTE_TUNNEL_TYPE_NVGRE: - case RTE_TUNNEL_TYPE_IP_IN_GRE: case RTE_L2_TUNNEL_TYPE_E_TAG: DP_ERR(edev, "Unsupported tunnel type %d\n", filter_conf->tunnel_type); @@ -2871,6 +2991,7 @@ static const struct eth_dev_ops qede_eth_dev_ops = { .promiscuous_disable = qede_promiscuous_disable, .allmulticast_enable = qede_allmulticast_enable, .allmulticast_disable = qede_allmulticast_disable, + .set_mc_addr_list = qede_set_mc_addr_list, .dev_stop = qede_dev_stop, .dev_close = qede_dev_close, .stats_get = qede_get_stats, @@ -2911,6 +3032,7 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = { .promiscuous_disable = qede_promiscuous_disable, .allmulticast_enable = qede_allmulticast_enable, .allmulticast_disable = qede_allmulticast_disable, + .set_mc_addr_list = qede_set_mc_addr_list, .dev_stop = qede_dev_stop, .dev_close = qede_dev_close, .stats_get = qede_get_stats, @@ -2928,6 +3050,9 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = { .mtu_set = qede_set_mtu, .udp_tunnel_port_add = qede_udp_dst_port_add, .udp_tunnel_port_del = qede_udp_dst_port_del, + .mac_addr_add = qede_mac_addr_add, + .mac_addr_remove = qede_mac_addr_remove, + .mac_addr_set = qede_mac_addr_set, }; static void qede_update_pf_params(struct ecore_dev *edev) @@ -2956,6 +3081,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) /* Fix up ecore debug level */ uint32_t dp_module = ~0 & ~ECORE_MSG_HW; uint8_t dp_level = ECORE_LEVEL_VERBOSE; + uint32_t int_mode; int rc; /* Extract key data structures */ @@ -3000,8 +3126,22 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) return -ENODEV; } qede_update_pf_params(edev); - rte_intr_callback_register(&pci_dev->intr_handle, - qede_interrupt_handler, (void *)eth_dev); + + switch (pci_dev->intr_handle.type) { + case RTE_INTR_HANDLE_UIO_INTX: + case RTE_INTR_HANDLE_VFIO_LEGACY: + int_mode = ECORE_INT_MODE_INTA; + rte_intr_callback_register(&pci_dev->intr_handle, + qede_interrupt_handler_intx, + (void *)eth_dev); + break; + default: + int_mode = ECORE_INT_MODE_MSIX; + rte_intr_callback_register(&pci_dev->intr_handle, + qede_interrupt_handler, + (void *)eth_dev); + } + if (rte_intr_enable(&pci_dev->intr_handle)) { DP_ERR(edev, "rte_intr_enable() failed\n"); return -ENODEV; @@ -3009,7 +3149,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) /* Start the Slowpath-process */ memset(¶ms, 0, sizeof(struct qed_slowpath_params)); - params.int_mode = ECORE_INT_MODE_MSIX; + + params.int_mode = int_mode; params.drv_major = QEDE_PMD_VERSION_MAJOR; params.drv_minor = QEDE_PMD_VERSION_MINOR; params.drv_rev = QEDE_PMD_VERSION_REVISION; @@ -3022,7 +3163,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) * interrupt vector but we need one for each engine. */ if (ECORE_IS_CMT(edev) && IS_PF(edev)) { - rc = rte_eal_alarm_set(timer_period * US_PER_S, + rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, qede_poll_sp_sb_cb, (void *)eth_dev); if (rc != 0) { @@ -3092,7 +3233,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) ECORE_LEADING_HWFN(edev), vf_mac, &is_mac_forced); - if (is_mac_exist && is_mac_forced) { + if (is_mac_exist) { DP_INFO(edev, "VF macaddr received from PF\n"); ether_addr_copy((struct ether_addr *)&vf_mac, ð_dev->data->mac_addrs[0]); @@ -3119,25 +3260,30 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) SLIST_INIT(&adapter->fdir_info.fdir_list_head); SLIST_INIT(&adapter->vlan_list_head); SLIST_INIT(&adapter->uc_list_head); + SLIST_INIT(&adapter->mc_list_head); adapter->mtu = ETHER_MTU; adapter->vport_started = false; /* VF tunnel offloads is enabled by default in PF driver */ adapter->vxlan.num_filters = 0; adapter->geneve.num_filters = 0; + adapter->ipgre.num_filters = 0; if (is_vf) { adapter->vxlan.enable = true; adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN; adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT; adapter->geneve.enable = true; - adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN; adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT; + adapter->ipgre.enable = true; + adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC | + ETH_TUNNEL_FILTER_IVLAN; } else { adapter->vxlan.enable = false; adapter->geneve.enable = false; + adapter->ipgre.enable = false; } DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", @@ -3295,9 +3441,7 @@ RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci"); -RTE_INIT(qede_init_log); -static void -qede_init_log(void) +RTE_INIT(qede_init_log) { qede_logtype_init = rte_log_register("pmd.net.qede.init"); if (qede_logtype_init >= 0) diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h index 23f7e0e2..6e9a5b4b 100644 --- a/drivers/net/qede/qede_ethdev.h +++ b/drivers/net/qede/qede_ethdev.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ @@ -34,6 +32,7 @@ #include "base/nvm_cfg.h" #include "base/ecore_sp_commands.h" #include "base/ecore_l2.h" +#include "base/ecore_vf.h" #include "qede_logs.h" #include "qede_if.h" @@ -45,7 +44,7 @@ /* Driver versions */ #define QEDE_PMD_VER_PREFIX "QEDE PMD" #define QEDE_PMD_VERSION_MAJOR 2 -#define QEDE_PMD_VERSION_MINOR 7 +#define QEDE_PMD_VERSION_MINOR 9 #define QEDE_PMD_VERSION_REVISION 0 #define QEDE_PMD_VERSION_PATCH 1 @@ -170,7 +169,7 @@ struct qede_fdir_info { #define QEDE_VXLAN_DEF_PORT (4789) #define QEDE_GENEVE_DEF_PORT (6081) -struct qede_udp_tunn { +struct qede_tunn_params { bool enable; uint16_t num_filters; uint16_t filter_type; @@ -205,8 +204,9 @@ struct qede_dev { SLIST_HEAD(uc_list_head, qede_ucast_entry) uc_list_head; uint16_t num_uc_addr; bool handle_hw_err; - struct qede_udp_tunn vxlan; - struct qede_udp_tunn geneve; + struct qede_tunn_params vxlan; + struct qede_tunn_params geneve; + struct qede_tunn_params ipgre; struct qede_fdir_info fdir_info; bool vlan_strip_flg; char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE]; diff --git a/drivers/net/qede/qede_fdir.c b/drivers/net/qede/qede_fdir.c index da6364ee..83580d04 100644 --- a/drivers/net/qede/qede_fdir.c +++ b/drivers/net/qede/qede_fdir.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2017 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2017 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include @@ -141,8 +139,8 @@ qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev, if (add) { SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) { if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) { - DP_ERR(edev, "flowdir filter exist\n"); - rc = -EEXIST; + DP_INFO(edev, "flowdir filter exist\n"); + rc = 0; goto err2; } } @@ -465,5 +463,8 @@ int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev, udpv4_flow->src_port = ntuple->src_port; udpv4_flow->dst_port = ntuple->dst_port; } + + fdir_entry.action.rx_queue = ntuple->queue; + return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add); } diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h index 246f0fd3..ee5e54c1 100644 --- a/drivers/net/qede/qede_if.h +++ b/drivers/net/qede/qede_if.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef _QEDE_IF_H diff --git a/drivers/net/qede/qede_logs.h b/drivers/net/qede/qede_logs.h index 159315e7..3187d97b 100644 --- a/drivers/net/qede/qede_logs.h +++ b/drivers/net/qede/qede_logs.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef _QEDE_LOGS_H_ diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c index ae187321..46fa8371 100644 --- a/drivers/net/qede/qede_main.c +++ b/drivers/net/qede/qede_main.c @@ -1,14 +1,13 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include #include #include +#include #include "qede_ethdev.h" @@ -19,7 +18,7 @@ char fw_file[PATH_MAX]; const char *QEDE_DEFAULT_FIRMWARE = - "/lib/firmware/qed/qed_init_values-8.30.12.0.bin"; + "/lib/firmware/qed/qed_init_values-8.33.12.0.bin"; static void qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params) @@ -62,6 +61,7 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev, hw_prepare_params.chk_reg_fifo = false; hw_prepare_params.initiate_pf_flr = true; hw_prepare_params.allow_mdump = false; + hw_prepare_params.b_en_pacing = false; hw_prepare_params.epoch = (u32)time(NULL); rc = ecore_hw_prepare(edev, &hw_prepare_params); if (rc) { @@ -279,7 +279,7 @@ static int qed_slowpath_start(struct ecore_dev *edev, /* Start the slowpath */ memset(&hw_init_params, 0, sizeof(hw_init_params)); hw_init_params.b_hw_start = true; - hw_init_params.int_mode = ECORE_INT_MODE_MSIX; + hw_init_params.int_mode = params->int_mode; hw_init_params.allow_npar_tx_switch = true; hw_init_params.bin_fw_data = data; @@ -302,9 +302,8 @@ static int qed_slowpath_start(struct ecore_dev *edev, drv_version.version = (params->drv_major << 24) | (params->drv_minor << 16) | (params->drv_rev << 8) | (params->drv_eng); - /* TBD: strlcpy() */ - strncpy((char *)drv_version.name, (const char *)params->name, - MCP_DRV_VER_STR_SIZE - 4); + strlcpy((char *)drv_version.name, (const char *)params->name, + sizeof(drv_version.name)); rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, &drv_version); if (rc) { @@ -633,8 +632,11 @@ void qed_link_update(struct ecore_hwfn *hwfn) { struct ecore_dev *edev = hwfn->p_dev; struct qede_dev *qdev = (struct qede_dev *)edev; + struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; - qede_link_update((struct rte_eth_dev *)qdev->ethdev, 0); + if (!qede_link_update(dev, 0)) + _rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC, NULL); } static int qed_drain(struct ecore_dev *edev) diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c index 0de7c6b8..0f157ded 100644 --- a/drivers/net/qede/qede_rxtx.c +++ b/drivers/net/qede/qede_rxtx.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include @@ -87,7 +85,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, /* Fix up RX buffer size */ bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; - if ((rxmode->enable_scatter) || + if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) || (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) { if (!dev->data->scattered_rx) { DP_INFO(edev, "Forcing scatter-gather mode\n"); @@ -192,9 +190,15 @@ static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq) void qede_rx_queue_release(void *rx_queue) { struct qede_rx_queue *rxq = rx_queue; + struct qede_dev *qdev = rxq->qdev; + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + + PMD_INIT_FUNC_TRACE(edev); if (rxq) { qede_rx_queue_release_mbufs(rxq); + qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring); + qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring); rte_free(rxq->sw_rx_ring); rte_free(rxq); } @@ -350,9 +354,14 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq) void qede_tx_queue_release(void *tx_queue) { struct qede_tx_queue *txq = tx_queue; + struct qede_dev *qdev = txq->qdev; + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + + PMD_INIT_FUNC_TRACE(edev); if (txq) { qede_tx_queue_release_mbufs(txq); + qdev->ops->common->chain_free(edev, &txq->tx_pbl); rte_free(txq->sw_tx_ring); rte_free(txq); } @@ -441,8 +450,6 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev) struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct qede_fastpath *fp; - struct qede_rx_queue *rxq; - struct qede_tx_queue *txq; uint16_t sb_idx; uint8_t i; @@ -467,21 +474,13 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev) for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { if (eth_dev->data->rx_queues[i]) { qede_rx_queue_release(eth_dev->data->rx_queues[i]); - rxq = eth_dev->data->rx_queues[i]; - qdev->ops->common->chain_free(edev, - &rxq->rx_bd_ring); - qdev->ops->common->chain_free(edev, - &rxq->rx_comp_ring); eth_dev->data->rx_queues[i] = NULL; } } for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { if (eth_dev->data->tx_queues[i]) { - txq = eth_dev->data->tx_queues[i]; qede_tx_queue_release(eth_dev->data->tx_queues[i]); - qdev->ops->common->chain_free(edev, - &txq->tx_pbl); eth_dev->data->tx_queues[i] = NULL; } } @@ -1466,6 +1465,8 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) */ rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM; packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb); + } else { + packet_type |= qede_rx_cqe_to_pkt_type(parse_flag); } /* Common handling for non-tunnel packets and for inner @@ -1487,7 +1488,6 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) ol_flags |= PKT_RX_IP_CKSUM_BAD; } else { ol_flags |= PKT_RX_IP_CKSUM_GOOD; - packet_type |= qede_rx_cqe_to_pkt_type(parse_flag); } if (CQE_HAS_VLAN(parse_flag) || @@ -1631,6 +1631,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg, QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len); PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len); } + start_seg++; m_seg = m_seg->next; } @@ -1837,17 +1838,14 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) * offloads. Don't rely on pkt_type marked by Rx, instead use * tx_ol_flags to decide. */ - if (((tx_ol_flags & PKT_TX_TUNNEL_MASK) == - PKT_TX_TUNNEL_VXLAN) || - ((tx_ol_flags & PKT_TX_TUNNEL_MASK) == - PKT_TX_TUNNEL_MPLSINUDP) || - ((tx_ol_flags & PKT_TX_TUNNEL_MASK) == - PKT_TX_TUNNEL_GENEVE)) { + tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK); + + if (tunn_flg) { /* Check against max which is Tunnel IPv6 + ext */ if (unlikely(txq->nb_tx_avail < ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT)) break; - tunn_flg = true; + /* First indicate its a tunnel pkt */ bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; @@ -1971,7 +1969,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } /* Descriptor based VLAN insertion */ - if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) { + if (tx_ol_flags & PKT_TX_VLAN_PKT) { vlan = rte_cpu_to_le_16(mbuf->vlan_tci); bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT; @@ -1986,7 +1984,8 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) * csum offload is requested then we need to force * recalculation of L4 tunnel header csum also. */ - if (tunn_flg) { + if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) != + PKT_TX_TUNNEL_GRE)) { bd1_bd_flags_bf |= ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK << ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT; diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h index f1d36661..e710fbae 100644 --- a/drivers/net/qede/qede_rxtx.h +++ b/drivers/net/qede/qede_rxtx.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ @@ -76,8 +74,6 @@ ETH_RSS_VXLAN |\ ETH_RSS_GENEVE) -#define QEDE_TXQ_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS) - #define for_each_rss(i) for (i = 0; i < qdev->num_rx_queues; i++) #define for_each_tss(i) for (i = 0; i < qdev->num_tx_queues; i++) #define QEDE_RXTX_MAX(qdev) \ @@ -149,11 +145,11 @@ PKT_TX_TCP_SEG) #define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \ - PKT_TX_QINQ_PKT | \ PKT_TX_VLAN_PKT | \ PKT_TX_TUNNEL_VXLAN | \ PKT_TX_TUNNEL_GENEVE | \ - PKT_TX_TUNNEL_MPLSINUDP) + PKT_TX_TUNNEL_MPLSINUDP | \ + PKT_TX_TUNNEL_GRE) #define QEDE_TX_OFFLOAD_NOTSUP_MASK \ (PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK) diff --git a/drivers/net/ring/meson.build b/drivers/net/ring/meson.build index e877a4b4..7659b04f 100644 --- a/drivers/net/ring/meson.build +++ b/drivers/net/ring/meson.build @@ -1,5 +1,6 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation +version = 2 sources = files('rte_eth_ring.c') install_headers('rte_eth_ring.h') diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c index df13c44b..791deb0b 100644 --- a/drivers/net/ring/rte_eth_ring.c +++ b/drivers/net/ring/rte_eth_ring.c @@ -60,9 +60,15 @@ static struct rte_eth_link pmd_link = { .link_speed = ETH_SPEED_NUM_10G, .link_duplex = ETH_LINK_FULL_DUPLEX, .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_AUTONEG + .link_autoneg = ETH_LINK_FIXED, }; +static int eth_ring_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, eth_ring_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + static uint16_t eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) { @@ -158,6 +164,7 @@ eth_dev_info(struct rte_eth_dev *dev, dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues; dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues; dev_info->min_rx_bufsize = 0; + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP; } static int @@ -256,18 +263,9 @@ do_eth_dev_ring_create(const char *name, void **tx_queues_local = NULL; unsigned i; - RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n", + PMD_LOG(INFO, "Creating rings-backed ethdev on numa socket %u", numa_node); - /* now do all data allocation - for eth_dev structure, dummy pci driver - * and internal (private) data - */ - data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); - if (data == NULL) { - rte_errno = ENOMEM; - goto error; - } - rx_queues_local = rte_zmalloc_socket(name, sizeof(void *) * nb_rx_queues, 0, numa_node); if (rx_queues_local == NULL) { @@ -301,10 +299,8 @@ do_eth_dev_ring_create(const char *name, * - point eth_dev_data to internals * - and point eth_dev structure to new eth_dev_data structure */ - /* NOTE: we'll replace the data element, of originally allocated eth_dev - * so the rings are local per-process */ - rte_memcpy(data, eth_dev->data, sizeof(*data)); + data = eth_dev->data; data->rx_queues = rx_queues_local; data->tx_queues = tx_queues_local; @@ -326,7 +322,6 @@ do_eth_dev_ring_create(const char *name, data->dev_link = pmd_link; data->mac_addrs = &internals->address; - eth_dev->data = data; eth_dev->dev_ops = &ops; data->kdrv = RTE_KDRV_NONE; data->numa_node = numa_node; @@ -335,6 +330,7 @@ do_eth_dev_ring_create(const char *name, eth_dev->rx_pkt_burst = eth_ring_rx; eth_dev->tx_pkt_burst = eth_ring_tx; + rte_eth_dev_probing_finish(eth_dev); *eth_dev_p = eth_dev; return data->port_id; @@ -342,7 +338,6 @@ do_eth_dev_ring_create(const char *name, error: rte_free(rx_queues_local); rte_free(tx_queues_local); - rte_free(data); rte_free(internals); return -1; @@ -459,13 +454,13 @@ static int parse_kvlist (const char *key __rte_unused, const char *value, void * ret = -EINVAL; if (!name) { - RTE_LOG(WARNING, PMD, "command line parameter is empty for ring pmd!\n"); + PMD_LOG(WARNING, "command line parameter is empty for ring pmd!"); goto out; } node = strchr(name, ':'); if (!node) { - RTE_LOG(WARNING, PMD, "could not parse node value from %s\n", + PMD_LOG(WARNING, "could not parse node value from %s", name); goto out; } @@ -475,7 +470,7 @@ static int parse_kvlist (const char *key __rte_unused, const char *value, void * action = strchr(node, ':'); if (!action) { - RTE_LOG(WARNING, PMD, "could not parse action value from %s\n", + PMD_LOG(WARNING, "could not parse action value from %s", node); goto out; } @@ -498,7 +493,8 @@ static int parse_kvlist (const char *key __rte_unused, const char *value, void * info->list[info->count].node = strtol(node, &end, 10); if ((errno != 0) || (*end != '\0')) { - RTE_LOG(WARNING, PMD, "node value %s is unparseable as a number\n", node); + PMD_LOG(WARNING, + "node value %s is unparseable as a number", node); goto out; } @@ -542,14 +538,14 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev) name = rte_vdev_device_name(dev); params = rte_vdev_device_args(dev); - RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name); + PMD_LOG(INFO, "Initializing pmd_ring for %s", name); if (params == NULL || params[0] == '\0') { ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE, ð_dev); if (ret == -1) { - RTE_LOG(INFO, PMD, - "Attach to pmd_ring for %s\n", name); + PMD_LOG(INFO, + "Attach to pmd_ring for %s", name); ret = eth_dev_ring_create(name, rte_socket_id(), DEV_ATTACH, ð_dev); } @@ -557,13 +553,13 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev) kvlist = rte_kvargs_parse(params, valid_arguments); if (!kvlist) { - RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating" - " rings-backed ethernet device\n"); + PMD_LOG(INFO, "Ignoring unsupported parameters when creating" + " rings-backed ethernet device"); ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE, ð_dev); if (ret == -1) { - RTE_LOG(INFO, PMD, - "Attach to pmd_ring for %s\n", + PMD_LOG(INFO, + "Attach to pmd_ring for %s", name); ret = eth_dev_ring_create(name, rte_socket_id(), DEV_ATTACH, ð_dev); @@ -617,8 +613,8 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev) ð_dev); if ((ret == -1) && (info->list[info->count].action == DEV_CREATE)) { - RTE_LOG(INFO, PMD, - "Attach to pmd_ring for %s\n", + PMD_LOG(INFO, + "Attach to pmd_ring for %s", name); ret = eth_dev_ring_create(name, info->list[info->count].node, @@ -647,7 +643,7 @@ rte_pmd_ring_remove(struct rte_vdev_device *dev) struct ring_queue *r = NULL; uint16_t i; - RTE_LOG(INFO, PMD, "Un-Initializing pmd_ring for %s\n", name); + PMD_LOG(INFO, "Un-Initializing pmd_ring for %s", name); if (name == NULL) return -EINVAL; @@ -675,8 +671,6 @@ rte_pmd_ring_remove(struct rte_vdev_device *dev) rte_free(eth_dev->data->tx_queues); rte_free(eth_dev->data->dev_private); - rte_free(eth_dev->data); - rte_eth_dev_release_port(eth_dev); return 0; } @@ -690,3 +684,10 @@ RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv); RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring); RTE_PMD_REGISTER_PARAM_STRING(net_ring, ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)"); + +RTE_INIT(eth_ring_init_log) +{ + eth_ring_logtype = rte_log_register("pmd.net.ring"); + if (eth_ring_logtype >= 0) + rte_log_set_level(eth_ring_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile index 8a671dd2..3bb41a00 100644 --- a/drivers/net/sfc/Makefile +++ b/drivers/net/sfc/Makefile @@ -46,11 +46,11 @@ else ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable # Suppress ICC false positive warning on 'bulk' may be used before its # value is set -CFLAGS_sfc_ef10_tx.o += -wd3656 +CFLAGS_sfc_ef10_tx.o += -diag-disable 3656 endif LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -LDLIBS += -lrte_bus_pci +LDLIBS += -lrte_bus_pci -lrte_pci # # List of base driver object files for which @@ -81,6 +81,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_filter.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_flow.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_dp.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_rx.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_essb_rx.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_tx.c VPATH += $(SRCDIR)/base @@ -115,6 +116,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_vpd.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_ev.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_filter.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_intr.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_image.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_mac.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_mcdi.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_nic.c @@ -125,5 +127,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_tx.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_vpd.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += hunt_nic.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += medford_nic.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += medford2_nic.c include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/sfc/base/ef10_ev.c b/drivers/net/sfc/base/ef10_ev.c index 05700c5c..7f89a7bf 100644 --- a/drivers/net/sfc/base/ef10_ev.c +++ b/drivers/net/sfc/base/ef10_ev.c @@ -10,7 +10,7 @@ #include "mcdi_mon.h" #endif -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 #if EFSYS_OPT_QSTATS #define EFX_EV_QSTAT_INCR(_eep, _stat) \ @@ -549,7 +549,8 @@ ef10_ev_qdestroy( efx_nic_t *enp = eep->ee_enp; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); (void) efx_mcdi_fini_evq(enp, eep->ee_index); } @@ -576,7 +577,7 @@ ef10_ev_qprime( EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, ERF_DD_EVQ_IND_RPTR, (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH)); - EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, + EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, B_FALSE); EFX_POPULATE_DWORD_2(dword, @@ -584,11 +585,11 @@ ef10_ev_qprime( EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, ERF_DD_EVQ_IND_RPTR, rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); - EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, + EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, B_FALSE); } else { EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr); - EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index, + EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index, &dword, B_FALSE); } @@ -701,13 +702,19 @@ ef10_ev_qmoderate( EFE_DD_EVQ_IND_TIMER_FLAGS, ERF_DD_EVQ_IND_TIMER_MODE, mode, ERF_DD_EVQ_IND_TIMER_VAL, ticks); - EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, + EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, 0); } else { - EFX_POPULATE_DWORD_2(dword, + /* + * NOTE: The TMR_REL field introduced in Medford2 is + * ignored on earlier EF10 controllers. See bug66418 + * comment 9 for details. + */ + EFX_POPULATE_DWORD_3(dword, ERF_DZ_TC_TIMER_MODE, mode, - ERF_DZ_TC_TIMER_VAL, ticks); - EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG, + ERF_DZ_TC_TIMER_VAL, ticks, + ERF_FZ_TC_TMR_REL_VAL, ticks); + EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG, eep->ee_index, &dword, 0); } } @@ -742,7 +749,7 @@ ef10_ev_qstats_update( } #endif /* EFSYS_OPT_QSTATS */ -#if EFSYS_OPT_RX_PACKED_STREAM +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER static __checkReturn boolean_t ef10_ev_rx_packed_stream( @@ -781,14 +788,25 @@ ef10_ev_rx_packed_stream( if (new_buffer) { flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER; +#if EFSYS_OPT_RX_PACKED_STREAM + /* + * If both packed stream and equal stride super-buffer + * modes are compiled in, in theory credits should be + * be maintained for packed stream only, but right now + * these modes are not distinguished in the event queue + * Rx queue state and it is OK to increment the counter + * regardless (it might be event cheaper than branching + * since neighbour structure member are updated as well). + */ eersp->eers_rx_packed_stream_credits++; +#endif eersp->eers_rx_read_ptr++; } current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask; /* Check for errors that invalidate checksum and L3/L4 fields */ - if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) { - /* RX frame truncated (error flag is misnamed) */ + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) { + /* RX frame truncated */ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); flags |= EFX_DISCARD; goto deliver; @@ -823,7 +841,7 @@ deliver: return (should_abort); } -#endif /* EFSYS_OPT_RX_PACKED_STREAM */ +#endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */ static __checkReturn boolean_t ef10_ev_rx( @@ -857,7 +875,7 @@ ef10_ev_rx( label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); eersp = &eep->ee_rxq_state[label]; -#if EFSYS_OPT_RX_PACKED_STREAM +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER /* * Packed stream events are very different, * so handle them separately @@ -867,12 +885,23 @@ ef10_ev_rx( #endif size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES); + cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT); next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS); mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS); l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS); - l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS); - cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT); + + /* + * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only + * 2 bits wide on Medford2. Check it is safe to use the Medford2 field + * and values for all EF10 controllers. + */ + EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN); + + l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS); if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) { /* Drop this event */ @@ -914,8 +943,8 @@ ef10_ev_rx( last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask; /* Check for errors that invalidate checksum and L3/L4 fields */ - if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) { - /* RX frame truncated (error flag is misnamed) */ + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) { + /* RX frame truncated */ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); flags |= EFX_DISCARD; goto deliver; @@ -951,10 +980,22 @@ ef10_ev_rx( flags |= EFX_CKSUM_IPV4; } - if (l4_class == ESE_DZ_L4_CLASS_TCP) { + /* + * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is + * only 2 bits wide on Medford2. Check it is safe to use the + * Medford2 field and values for all EF10 controllers. + */ + EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == + ESF_DE_RX_L4_CLASS_LBN); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == + ESE_DE_L4_CLASS_UNKNOWN); + + if (l4_class == ESE_FZ_L4_CLASS_TCP) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); flags |= EFX_PKT_TCP; - } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { + } else if (l4_class == ESE_FZ_L4_CLASS_UDP) { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); flags |= EFX_PKT_UDP; } else { @@ -966,10 +1007,22 @@ ef10_ev_rx( case ESE_DZ_L3_CLASS_IP6_FRAG: flags |= EFX_PKT_IPV6; - if (l4_class == ESE_DZ_L4_CLASS_TCP) { + /* + * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is + * only 2 bits wide on Medford2. Check it is safe to use the + * Medford2 field and values for all EF10 controllers. + */ + EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == + ESF_DE_RX_L4_CLASS_LBN); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == + ESE_DE_L4_CLASS_UNKNOWN); + + if (l4_class == ESE_FZ_L4_CLASS_TCP) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); flags |= EFX_PKT_TCP; - } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { + } else if (l4_class == ESE_FZ_L4_CLASS_UDP) { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); flags |= EFX_PKT_UDP; } else { @@ -1322,8 +1375,9 @@ ef10_ev_rxlabel_init( __in efx_rxq_type_t type) { efx_evq_rxq_state_t *eersp; -#if EFSYS_OPT_RX_PACKED_STREAM +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM); + boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER); #endif _NOTE(ARGUNUSED(type)) @@ -1345,9 +1399,11 @@ ef10_ev_rxlabel_init( eersp->eers_rx_read_ptr = 0; #endif eersp->eers_rx_mask = erp->er_mask; -#if EFSYS_OPT_RX_PACKED_STREAM +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER eersp->eers_rx_stream_npackets = 0; - eersp->eers_rx_packed_stream = packed_stream; + eersp->eers_rx_packed_stream = packed_stream || es_super_buffer; +#endif +#if EFSYS_OPT_RX_PACKED_STREAM if (packed_stream) { eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) / EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT, @@ -1381,11 +1437,13 @@ ef10_ev_rxlabel_fini( eersp->eers_rx_read_ptr = 0; eersp->eers_rx_mask = 0; -#if EFSYS_OPT_RX_PACKED_STREAM +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER eersp->eers_rx_stream_npackets = 0; eersp->eers_rx_packed_stream = B_FALSE; +#endif +#if EFSYS_OPT_RX_PACKED_STREAM eersp->eers_rx_packed_stream_credits = 0; #endif } -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_filter.c b/drivers/net/sfc/base/ef10_filter.c index 27b59987..ae872853 100644 --- a/drivers/net/sfc/base/ef10_filter.c +++ b/drivers/net/sfc/base/ef10_filter.c @@ -7,7 +7,7 @@ #include "efx.h" #include "efx_impl.h" -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 #if EFSYS_OPT_FILTER @@ -95,7 +95,8 @@ ef10_filter_init( ef10_filter_table_t *eftp; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); #define MATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match)) EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST == @@ -118,6 +119,10 @@ ef10_filter_init( MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO)); + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_VNI_OR_VSID == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID)); + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_LOC_MAC == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST == @@ -150,7 +155,8 @@ ef10_filter_fini( __in efx_nic_t *enp) { EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); if (enp->en_filter.ef_ef10_filter_table != NULL) { EFSYS_KMEM_FREE(enp->en_esip, sizeof (ef10_filter_table_t), @@ -166,17 +172,24 @@ efx_mcdi_filter_op_add( __inout ef10_filter_handle_t *handle) { efx_mcdi_req_t req; - uint8_t payload[MAX(MC_CMD_FILTER_OP_EXT_IN_LEN, + uint8_t payload[MAX(MC_CMD_FILTER_OP_V3_IN_LEN, MC_CMD_FILTER_OP_EXT_OUT_LEN)]; + efx_filter_match_flags_t match_flags; efx_rc_t rc; memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FILTER_OP; req.emr_in_buf = payload; - req.emr_in_length = MC_CMD_FILTER_OP_EXT_IN_LEN; + req.emr_in_length = MC_CMD_FILTER_OP_V3_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FILTER_OP_EXT_OUT_LEN; + /* + * Remove match flag for encapsulated filters that does not correspond + * to the MCDI match flags + */ + match_flags = spec->efs_match_flags & ~EFX_FILTER_MATCH_ENCAP_TYPE; + switch (filter_op) { case MC_CMD_FILTER_OP_IN_OP_REPLACE: MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_LO, @@ -197,11 +210,16 @@ efx_mcdi_filter_op_add( MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_MATCH_FIELDS, - spec->efs_match_flags); - MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST, - MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST); - MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_QUEUE, - spec->efs_dmaq_id); + match_flags); + if (spec->efs_dmaq_id == EFX_FILTER_SPEC_RX_DMAQ_ID_DROP) { + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST, + MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP); + } else { + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST, + MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST); + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_QUEUE, + spec->efs_dmaq_id); + } #if EFSYS_OPT_RX_SCALE if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) { @@ -290,18 +308,45 @@ efx_mcdi_filter_op_add( rc = EINVAL; goto fail2; } + + memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_VNI_OR_VSID), + spec->efs_vni_or_vsid, EFX_VNI_OR_VSID_LEN); + + memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_IFRM_DST_MAC), + spec->efs_ifrm_loc_mac, EFX_MAC_ADDR_LEN); + } + + /* + * Set the "MARK" or "FLAG" action for all packets matching this filter + * if necessary (only useful with equal stride packed stream Rx mode + * which provide the information in pseudo-header). + * These actions require MC_CMD_FILTER_OP_V3_IN msgrequest. + */ + if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) && + (spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG)) { + rc = EINVAL; + goto fail3; + } + if (spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) { + MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_ACTION, + MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK); + MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_MARK_VALUE, + spec->efs_mark); + } else if (spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG) { + MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_ACTION, + MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG); } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; - goto fail3; + goto fail4; } if (req.emr_out_length_used < MC_CMD_FILTER_OP_EXT_OUT_LEN) { rc = EMSGSIZE; - goto fail4; + goto fail5; } handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_EXT_OUT_HANDLE_LO); @@ -309,6 +354,8 @@ efx_mcdi_filter_op_add( return (0); +fail5: + EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: @@ -413,6 +460,12 @@ ef10_filter_equal( return (B_FALSE); if (left->efs_encap_type != right->efs_encap_type) return (B_FALSE); + if (memcmp(left->efs_vni_or_vsid, right->efs_vni_or_vsid, + EFX_VNI_OR_VSID_LEN)) + return (B_FALSE); + if (memcmp(left->efs_ifrm_loc_mac, right->efs_ifrm_loc_mac, + EFX_MAC_ADDR_LEN)) + return (B_FALSE); return (B_TRUE); @@ -495,7 +548,8 @@ ef10_filter_restore( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); for (tbl_id = 0; tbl_id < EFX_EF10_FILTER_TBL_ROWS; tbl_id++) { @@ -570,7 +624,8 @@ ef10_filter_add_internal( boolean_t locked = B_FALSE; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); hash = ef10_filter_hash(spec); @@ -842,7 +897,8 @@ ef10_filter_delete( boolean_t locked = B_FALSE; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); hash = ef10_filter_hash(spec); @@ -890,6 +946,7 @@ efx_mcdi_get_parser_disp_info( __in efx_nic_t *enp, __out_ecount(buffer_length) uint32_t *buffer, __in size_t buffer_length, + __in boolean_t encap, __out size_t *list_lengthp) { efx_mcdi_req_t req; @@ -906,7 +963,8 @@ efx_mcdi_get_parser_disp_info( req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX; - MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP, + MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP, encap ? + MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES : MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); efx_mcdi_execute(enp, &req); @@ -966,28 +1024,76 @@ ef10_filter_supported_filters( __in size_t buffer_length, __out size_t *list_lengthp) { - + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); size_t mcdi_list_length; + size_t mcdi_encap_list_length; size_t list_length; uint32_t i; + uint32_t next_buf_idx; + size_t next_buf_length; efx_rc_t rc; + boolean_t no_space = B_FALSE; efx_filter_match_flags_t all_filter_flags = (EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_REM_PORT | EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_INNER_VID | EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_VNI_OR_VSID | + EFX_FILTER_MATCH_IFRM_LOC_MAC | + EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST | + EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST | + EFX_FILTER_MATCH_ENCAP_TYPE | EFX_FILTER_MATCH_UNKNOWN_MCAST_DST | EFX_FILTER_MATCH_UNKNOWN_UCAST_DST); - rc = efx_mcdi_get_parser_disp_info(enp, buffer, buffer_length, - &mcdi_list_length); + /* + * Two calls to MC_CMD_GET_PARSER_DISP_INFO are needed: one to get the + * list of supported filters for ordinary packets, and then another to + * get the list of supported filters for encapsulated packets. To + * distinguish the second list from the first, the + * EFX_FILTER_MATCH_ENCAP_TYPE flag is added to each filter for + * encapsulated packets. + */ + rc = efx_mcdi_get_parser_disp_info(enp, buffer, buffer_length, B_FALSE, + &mcdi_list_length); if (rc != 0) { - if (rc == ENOSPC) { - /* Pass through mcdi_list_length for the list length */ - *list_lengthp = mcdi_list_length; + if (rc == ENOSPC) + no_space = B_TRUE; + else + goto fail1; + } + + if (no_space) { + next_buf_idx = 0; + next_buf_length = 0; + } else { + EFSYS_ASSERT(mcdi_list_length <= buffer_length); + next_buf_idx = mcdi_list_length; + next_buf_length = buffer_length - mcdi_list_length; + } + + if (encp->enc_tunnel_encapsulations_supported != 0) { + rc = efx_mcdi_get_parser_disp_info(enp, &buffer[next_buf_idx], + next_buf_length, B_TRUE, &mcdi_encap_list_length); + if (rc != 0) { + if (rc == ENOSPC) + no_space = B_TRUE; + else + goto fail2; + } else { + for (i = next_buf_idx; + i < next_buf_idx + mcdi_encap_list_length; i++) + buffer[i] |= EFX_FILTER_MATCH_ENCAP_TYPE; } - goto fail1; + } else { + mcdi_encap_list_length = 0; + } + + if (no_space) { + *list_lengthp = mcdi_list_length + mcdi_encap_list_length; + rc = ENOSPC; + goto fail3; } /* @@ -1000,9 +1106,10 @@ ef10_filter_supported_filters( * of the matches is preserved as they are ordered from highest to * lowest priority. */ - EFSYS_ASSERT(mcdi_list_length <= buffer_length); + EFSYS_ASSERT(mcdi_list_length + mcdi_encap_list_length <= + buffer_length); list_length = 0; - for (i = 0; i < mcdi_list_length; i++) { + for (i = 0; i < mcdi_list_length + mcdi_encap_list_length; i++) { if ((buffer[i] & ~all_filter_flags) == 0) { buffer[list_length] = buffer[i]; list_length++; @@ -1013,6 +1120,10 @@ ef10_filter_supported_filters( return (0); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); @@ -1636,4 +1747,4 @@ ef10_filter_default_rxq_clear( #endif /* EFSYS_OPT_FILTER */ -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_image.c b/drivers/net/sfc/base/ef10_image.c new file mode 100644 index 00000000..6fb7e476 --- /dev/null +++ b/drivers/net/sfc/base/ef10_image.c @@ -0,0 +1,885 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2017-2018 Solarflare Communications Inc. + * All rights reserved. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 + +#if EFSYS_OPT_IMAGE_LAYOUT + +/* + * Utility routines to support limited parsing of ASN.1 tags. This is not a + * general purpose ASN.1 parser, but is sufficient to locate the required + * objects in a signed image with CMS headers. + */ + +/* DER encodings for ASN.1 tags (see ITU-T X.690) */ +#define ASN1_TAG_INTEGER (0x02) +#define ASN1_TAG_OCTET_STRING (0x04) +#define ASN1_TAG_OBJ_ID (0x06) +#define ASN1_TAG_SEQUENCE (0x30) +#define ASN1_TAG_SET (0x31) + +#define ASN1_TAG_IS_PRIM(tag) ((tag & 0x20) == 0) + +#define ASN1_TAG_PRIM_CONTEXT(n) (0x80 + (n)) +#define ASN1_TAG_CONS_CONTEXT(n) (0xA0 + (n)) + +typedef struct efx_asn1_cursor_s { + uint8_t *buffer; + uint32_t length; + + uint8_t tag; + uint32_t hdr_size; + uint32_t val_size; +} efx_asn1_cursor_t; + + +/* Parse header of DER encoded ASN.1 TLV and match tag */ +static __checkReturn efx_rc_t +efx_asn1_parse_header_match_tag( + __inout efx_asn1_cursor_t *cursor, + __in uint8_t tag) +{ + efx_rc_t rc; + + if (cursor == NULL || cursor->buffer == NULL || cursor->length < 2) { + rc = EINVAL; + goto fail1; + } + + cursor->tag = cursor->buffer[0]; + if (cursor->tag != tag) { + /* Tag not matched */ + rc = ENOENT; + goto fail2; + } + + if ((cursor->tag & 0x1F) == 0x1F) { + /* Long tag format not used in CMS syntax */ + rc = EINVAL; + goto fail3; + } + + if ((cursor->buffer[1] & 0x80) == 0) { + /* Short form: length is 0..127 */ + cursor->hdr_size = 2; + cursor->val_size = cursor->buffer[1]; + } else { + /* Long form: length encoded as [0x80+nbytes][length bytes] */ + uint32_t nbytes = cursor->buffer[1] & 0x7F; + uint32_t offset; + + if (nbytes == 0) { + /* Indefinite length not allowed in DER encoding */ + rc = EINVAL; + goto fail4; + } + if (2 + nbytes > cursor->length) { + /* Header length overflows image buffer */ + rc = EINVAL; + goto fail6; + } + if (nbytes > sizeof (uint32_t)) { + /* Length encoding too big */ + rc = E2BIG; + goto fail5; + } + cursor->hdr_size = 2 + nbytes; + cursor->val_size = 0; + for (offset = 2; offset < cursor->hdr_size; offset++) { + cursor->val_size = + (cursor->val_size << 8) | cursor->buffer[offset]; + } + } + + if ((cursor->hdr_size + cursor->val_size) > cursor->length) { + /* Length overflows image buffer */ + rc = E2BIG; + goto fail7; + } + + return (0); + +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* Enter nested ASN.1 TLV (contained in value of current TLV) */ +static __checkReturn efx_rc_t +efx_asn1_enter_tag( + __inout efx_asn1_cursor_t *cursor, + __in uint8_t tag) +{ + efx_rc_t rc; + + if (cursor == NULL) { + rc = EINVAL; + goto fail1; + } + + if (ASN1_TAG_IS_PRIM(tag)) { + /* Cannot enter a primitive tag */ + rc = ENOTSUP; + goto fail2; + } + rc = efx_asn1_parse_header_match_tag(cursor, tag); + if (rc != 0) { + /* Invalid TLV or wrong tag */ + goto fail3; + } + + /* Limit cursor range to nested TLV */ + cursor->buffer += cursor->hdr_size; + cursor->length = cursor->val_size; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Check that the current ASN.1 TLV matches the given tag and value. + * Advance cursor to next TLV on a successful match. + */ +static __checkReturn efx_rc_t +efx_asn1_match_tag_value( + __inout efx_asn1_cursor_t *cursor, + __in uint8_t tag, + __in const void *valp, + __in uint32_t val_size) +{ + efx_rc_t rc; + + if (cursor == NULL) { + rc = EINVAL; + goto fail1; + } + rc = efx_asn1_parse_header_match_tag(cursor, tag); + if (rc != 0) { + /* Invalid TLV or wrong tag */ + goto fail2; + } + if (cursor->val_size != val_size) { + /* Value size is different */ + rc = EINVAL; + goto fail3; + } + if (memcmp(cursor->buffer + cursor->hdr_size, valp, val_size) != 0) { + /* Value content is different */ + rc = EINVAL; + goto fail4; + } + cursor->buffer += cursor->hdr_size + cursor->val_size; + cursor->length -= cursor->hdr_size + cursor->val_size; + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* Advance cursor to next TLV */ +static __checkReturn efx_rc_t +efx_asn1_skip_tag( + __inout efx_asn1_cursor_t *cursor, + __in uint8_t tag) +{ + efx_rc_t rc; + + if (cursor == NULL) { + rc = EINVAL; + goto fail1; + } + + rc = efx_asn1_parse_header_match_tag(cursor, tag); + if (rc != 0) { + /* Invalid TLV or wrong tag */ + goto fail2; + } + cursor->buffer += cursor->hdr_size + cursor->val_size; + cursor->length -= cursor->hdr_size + cursor->val_size; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* Return pointer to value octets and value size from current TLV */ +static __checkReturn efx_rc_t +efx_asn1_get_tag_value( + __inout efx_asn1_cursor_t *cursor, + __in uint8_t tag, + __out uint8_t **valp, + __out uint32_t *val_sizep) +{ + efx_rc_t rc; + + if (cursor == NULL || valp == NULL || val_sizep == NULL) { + rc = EINVAL; + goto fail1; + } + + rc = efx_asn1_parse_header_match_tag(cursor, tag); + if (rc != 0) { + /* Invalid TLV or wrong tag */ + goto fail2; + } + *valp = cursor->buffer + cursor->hdr_size; + *val_sizep = cursor->val_size; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +/* + * Utility routines for parsing CMS headers (see RFC2315, PKCS#7) + */ + +/* OID 1.2.840.113549.1.7.2 */ +static const uint8_t PKCS7_SignedData[] = +{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x07, 0x02 }; + +/* OID 1.2.840.113549.1.7.1 */ +static const uint8_t PKCS7_Data[] = +{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x07, 0x01 }; + +/* SignedData structure version */ +static const uint8_t SignedData_Version[] = +{ 0x03 }; + +/* + * Check for a valid image in signed image format. This uses CMS syntax + * (see RFC2315, PKCS#7) to provide signatures, and certificates required + * to validate the signatures. The encapsulated content is in unsigned image + * format (reflash header, image code, trailer checksum). + */ +static __checkReturn efx_rc_t +efx_check_signed_image_header( + __in void *bufferp, + __in uint32_t buffer_size, + __out uint32_t *content_offsetp, + __out uint32_t *content_lengthp) +{ + efx_asn1_cursor_t cursor; + uint8_t *valp; + uint32_t val_size; + efx_rc_t rc; + + if (content_offsetp == NULL || content_lengthp == NULL) { + rc = EINVAL; + goto fail1; + } + cursor.buffer = (uint8_t *)bufferp; + cursor.length = buffer_size; + + /* ContextInfo */ + rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_SEQUENCE); + if (rc != 0) + goto fail2; + + /* ContextInfo.contentType */ + rc = efx_asn1_match_tag_value(&cursor, ASN1_TAG_OBJ_ID, + PKCS7_SignedData, sizeof (PKCS7_SignedData)); + if (rc != 0) + goto fail3; + + /* ContextInfo.content */ + rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_CONS_CONTEXT(0)); + if (rc != 0) + goto fail4; + + /* SignedData */ + rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_SEQUENCE); + if (rc != 0) + goto fail5; + + /* SignedData.version */ + rc = efx_asn1_match_tag_value(&cursor, ASN1_TAG_INTEGER, + SignedData_Version, sizeof (SignedData_Version)); + if (rc != 0) + goto fail6; + + /* SignedData.digestAlgorithms */ + rc = efx_asn1_skip_tag(&cursor, ASN1_TAG_SET); + if (rc != 0) + goto fail7; + + /* SignedData.encapContentInfo */ + rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_SEQUENCE); + if (rc != 0) + goto fail8; + + /* SignedData.encapContentInfo.econtentType */ + rc = efx_asn1_match_tag_value(&cursor, ASN1_TAG_OBJ_ID, + PKCS7_Data, sizeof (PKCS7_Data)); + if (rc != 0) + goto fail9; + + /* SignedData.encapContentInfo.econtent */ + rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_CONS_CONTEXT(0)); + if (rc != 0) + goto fail10; + + /* + * The octet string contains the image header, image code bytes and + * image trailer CRC (same as unsigned image layout). + */ + valp = NULL; + val_size = 0; + rc = efx_asn1_get_tag_value(&cursor, ASN1_TAG_OCTET_STRING, + &valp, &val_size); + if (rc != 0) + goto fail11; + + if ((valp == NULL) || (val_size == 0)) { + rc = EINVAL; + goto fail12; + } + if (valp < (uint8_t *)bufferp) { + rc = EINVAL; + goto fail13; + } + if ((valp + val_size) > ((uint8_t *)bufferp + buffer_size)) { + rc = EINVAL; + goto fail14; + } + + *content_offsetp = (uint32_t)(valp - (uint8_t *)bufferp); + *content_lengthp = val_size; + + return (0); + +fail14: + EFSYS_PROBE(fail14); +fail13: + EFSYS_PROBE(fail13); +fail12: + EFSYS_PROBE(fail12); +fail11: + EFSYS_PROBE(fail11); +fail10: + EFSYS_PROBE(fail10); +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_check_unsigned_image( + __in void *bufferp, + __in uint32_t buffer_size) +{ + efx_image_header_t *header; + efx_image_trailer_t *trailer; + uint32_t crc; + efx_rc_t rc; + + EFX_STATIC_ASSERT(sizeof (*header) == EFX_IMAGE_HEADER_SIZE); + EFX_STATIC_ASSERT(sizeof (*trailer) == EFX_IMAGE_TRAILER_SIZE); + + /* Must have at least enough space for required image header fields */ + if (buffer_size < (EFX_FIELD_OFFSET(efx_image_header_t, eih_size) + + sizeof (header->eih_size))) { + rc = ENOSPC; + goto fail1; + } + header = (efx_image_header_t *)bufferp; + + if (header->eih_magic != EFX_IMAGE_HEADER_MAGIC) { + rc = EINVAL; + goto fail2; + } + + /* + * Check image header version is same or higher than lowest required + * version. + */ + if (header->eih_version < EFX_IMAGE_HEADER_VERSION) { + rc = EINVAL; + goto fail3; + } + + /* Buffer must have space for image header, code and image trailer. */ + if (buffer_size < (header->eih_size + header->eih_code_size + + EFX_IMAGE_TRAILER_SIZE)) { + rc = ENOSPC; + goto fail4; + } + + /* Check CRC from image buffer matches computed CRC. */ + trailer = (efx_image_trailer_t *)((uint8_t *)header + + header->eih_size + header->eih_code_size); + + crc = efx_crc32_calculate(0, (uint8_t *)header, + (header->eih_size + header->eih_code_size)); + + if (trailer->eit_crc != crc) { + rc = EINVAL; + goto fail5; + } + + return (0); + +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_check_reflash_image( + __in void *bufferp, + __in uint32_t buffer_size, + __out efx_image_info_t *infop) +{ + efx_image_format_t format = EFX_IMAGE_FORMAT_NO_IMAGE; + uint32_t image_offset; + uint32_t image_size; + void *imagep; + efx_rc_t rc; + + + EFSYS_ASSERT(infop != NULL); + if (infop == NULL) { + rc = EINVAL; + goto fail1; + } + memset(infop, 0, sizeof (*infop)); + + if (bufferp == NULL || buffer_size == 0) { + rc = EINVAL; + goto fail2; + } + + /* + * Check if the buffer contains an image in signed format, and if so, + * locate the image header. + */ + rc = efx_check_signed_image_header(bufferp, buffer_size, + &image_offset, &image_size); + if (rc == 0) { + /* + * Buffer holds signed image format. Check that the encapsulated + * content is in unsigned image format. + */ + format = EFX_IMAGE_FORMAT_SIGNED; + } else { + /* Check if the buffer holds image in unsigned image format */ + format = EFX_IMAGE_FORMAT_UNSIGNED; + image_offset = 0; + image_size = buffer_size; + } + if (image_offset + image_size > buffer_size) { + rc = E2BIG; + goto fail3; + } + imagep = (uint8_t *)bufferp + image_offset; + + /* Check unsigned image layout (image header, code, image trailer) */ + rc = efx_check_unsigned_image(imagep, image_size); + if (rc != 0) + goto fail4; + + /* Return image details */ + infop->eii_format = format; + infop->eii_imagep = bufferp; + infop->eii_image_size = buffer_size; + infop->eii_headerp = (efx_image_header_t *)imagep; + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); + infop->eii_format = EFX_IMAGE_FORMAT_INVALID; + infop->eii_imagep = NULL; + infop->eii_image_size = 0; + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_build_signed_image_write_buffer( + __out uint8_t *bufferp, + __in uint32_t buffer_size, + __in efx_image_info_t *infop, + __out efx_image_header_t **headerpp) +{ + signed_image_chunk_hdr_t chunk_hdr; + uint32_t hdr_offset; + struct { + uint32_t offset; + uint32_t size; + } cms_header, image_header, code, image_trailer, signature; + efx_rc_t rc; + + EFSYS_ASSERT((infop != NULL) && (headerpp != NULL)); + + if ((bufferp == NULL) || (buffer_size == 0) || + (infop == NULL) || (headerpp == NULL)) { + /* Invalid arguments */ + rc = EINVAL; + goto fail1; + } + if ((infop->eii_format != EFX_IMAGE_FORMAT_SIGNED) || + (infop->eii_imagep == NULL) || + (infop->eii_headerp == NULL) || + ((uint8_t *)infop->eii_headerp < (uint8_t *)infop->eii_imagep) || + (infop->eii_image_size < EFX_IMAGE_HEADER_SIZE) || + ((size_t)((uint8_t *)infop->eii_headerp - infop->eii_imagep) > + (infop->eii_image_size - EFX_IMAGE_HEADER_SIZE))) { + /* Invalid image info */ + rc = EINVAL; + goto fail2; + } + + /* Locate image chunks in original signed image */ + cms_header.offset = 0; + cms_header.size = + (uint32_t)((uint8_t *)infop->eii_headerp - infop->eii_imagep); + if ((cms_header.size > buffer_size) || + (cms_header.offset > (buffer_size - cms_header.size))) { + rc = EINVAL; + goto fail3; + } + + image_header.offset = cms_header.offset + cms_header.size; + image_header.size = infop->eii_headerp->eih_size; + if ((image_header.size > buffer_size) || + (image_header.offset > (buffer_size - image_header.size))) { + rc = EINVAL; + goto fail4; + } + + code.offset = image_header.offset + image_header.size; + code.size = infop->eii_headerp->eih_code_size; + if ((code.size > buffer_size) || + (code.offset > (buffer_size - code.size))) { + rc = EINVAL; + goto fail5; + } + + image_trailer.offset = code.offset + code.size; + image_trailer.size = EFX_IMAGE_TRAILER_SIZE; + if ((image_trailer.size > buffer_size) || + (image_trailer.offset > (buffer_size - image_trailer.size))) { + rc = EINVAL; + goto fail6; + } + + signature.offset = image_trailer.offset + image_trailer.size; + signature.size = (uint32_t)(infop->eii_image_size - signature.offset); + if ((signature.size > buffer_size) || + (signature.offset > (buffer_size - signature.size))) { + rc = EINVAL; + goto fail7; + } + + EFSYS_ASSERT3U(infop->eii_image_size, ==, cms_header.size + + image_header.size + code.size + image_trailer.size + + signature.size); + + /* BEGIN CSTYLED */ + /* + * Build signed image partition, inserting chunk headers. + * + * Signed Image: Image in NVRAM partition: + * + * +-----------------+ +-----------------+ + * | CMS header | | mcfw.update |<----+ + * +-----------------+ | | | + * | reflash header | +-----------------+ | + * +-----------------+ | chunk header: |-->--|-+ + * | mcfw.update | | REFLASH_TRAILER | | | + * | | +-----------------+ | | + * +-----------------+ +-->| CMS header | | | + * | reflash trailer | | +-----------------+ | | + * +-----------------+ | | chunk header: |->-+ | | + * | signature | | | REFLASH_HEADER | | | | + * +-----------------+ | +-----------------+ | | | + * | | reflash header |<--+ | | + * | +-----------------+ | | + * | | chunk header: |-->--+ | + * | | IMAGE | | + * | +-----------------+ | + * | | reflash trailer |<------+ + * | +-----------------+ + * | | chunk header: | + * | | SIGNATURE |->-+ + * | +-----------------+ | + * | | signature |<--+ + * | +-----------------+ + * | | ...unused... | + * | +-----------------+ + * +-<-| chunk header: | + * >-->| CMS_HEADER | + * +-----------------+ + * + * Each chunk header gives the partition offset and length of the image + * chunk's data. The image chunk data is immediately followed by the + * chunk header for the next chunk. + * + * The data chunk for the firmware code must be at the start of the + * partition (needed for the bootloader). The first chunk header in the + * chain (for the CMS header) is stored at the end of the partition. The + * chain of chunk headers maintains the same logical order of image + * chunks as the original signed image file. This set of constraints + * results in the layout used for the data chunks and chunk headers. + */ + /* END CSTYLED */ + memset(bufferp, buffer_size, 0xFF); + + EFX_STATIC_ASSERT(sizeof (chunk_hdr) == SIGNED_IMAGE_CHUNK_HDR_LEN); + memset(&chunk_hdr, 0, SIGNED_IMAGE_CHUNK_HDR_LEN); + + /* + * CMS header + */ + if (buffer_size < SIGNED_IMAGE_CHUNK_HDR_LEN) { + rc = ENOSPC; + goto fail8; + } + hdr_offset = buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN; + + chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC; + chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION; + chunk_hdr.id = SIGNED_IMAGE_CHUNK_CMS_HEADER; + chunk_hdr.offset = code.size + SIGNED_IMAGE_CHUNK_HDR_LEN; + chunk_hdr.len = cms_header.size; + + memcpy(bufferp + hdr_offset, &chunk_hdr, sizeof (chunk_hdr)); + + if ((chunk_hdr.len > buffer_size) || + (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) { + rc = ENOSPC; + goto fail9; + } + memcpy(bufferp + chunk_hdr.offset, + infop->eii_imagep + cms_header.offset, + cms_header.size); + + /* + * Image header + */ + hdr_offset = chunk_hdr.offset + chunk_hdr.len; + if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) { + rc = ENOSPC; + goto fail10; + } + chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC; + chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION; + chunk_hdr.id = SIGNED_IMAGE_CHUNK_REFLASH_HEADER; + chunk_hdr.offset = hdr_offset + SIGNED_IMAGE_CHUNK_HDR_LEN; + chunk_hdr.len = image_header.size; + + memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN); + + if ((chunk_hdr.len > buffer_size) || + (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) { + rc = ENOSPC; + goto fail11; + } + memcpy(bufferp + chunk_hdr.offset, + infop->eii_imagep + image_header.offset, + image_header.size); + + *headerpp = (efx_image_header_t *)(bufferp + chunk_hdr.offset); + + /* + * Firmware code + */ + hdr_offset = chunk_hdr.offset + chunk_hdr.len; + if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) { + rc = ENOSPC; + goto fail12; + } + chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC; + chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION; + chunk_hdr.id = SIGNED_IMAGE_CHUNK_IMAGE; + chunk_hdr.offset = 0; + chunk_hdr.len = code.size; + + memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN); + + if ((chunk_hdr.len > buffer_size) || + (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) { + rc = ENOSPC; + goto fail13; + } + memcpy(bufferp + chunk_hdr.offset, + infop->eii_imagep + code.offset, + code.size); + + /* + * Image trailer (CRC) + */ + chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC; + chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION; + chunk_hdr.id = SIGNED_IMAGE_CHUNK_REFLASH_TRAILER; + chunk_hdr.offset = hdr_offset + SIGNED_IMAGE_CHUNK_HDR_LEN; + chunk_hdr.len = image_trailer.size; + + hdr_offset = code.size; + if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) { + rc = ENOSPC; + goto fail14; + } + + memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN); + + if ((chunk_hdr.len > buffer_size) || + (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) { + rc = ENOSPC; + goto fail15; + } + memcpy((uint8_t *)bufferp + chunk_hdr.offset, + infop->eii_imagep + image_trailer.offset, + image_trailer.size); + + /* + * Signature + */ + hdr_offset = chunk_hdr.offset + chunk_hdr.len; + if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) { + rc = ENOSPC; + goto fail16; + } + chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC; + chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION; + chunk_hdr.id = SIGNED_IMAGE_CHUNK_SIGNATURE; + chunk_hdr.offset = chunk_hdr.offset + SIGNED_IMAGE_CHUNK_HDR_LEN; + chunk_hdr.len = signature.size; + + memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN); + + if ((chunk_hdr.len > buffer_size) || + (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) { + rc = ENOSPC; + goto fail17; + } + memcpy(bufferp + chunk_hdr.offset, + infop->eii_imagep + signature.offset, + signature.size); + + return (0); + +fail17: + EFSYS_PROBE(fail17); +fail16: + EFSYS_PROBE(fail16); +fail15: + EFSYS_PROBE(fail15); +fail14: + EFSYS_PROBE(fail14); +fail13: + EFSYS_PROBE(fail13); +fail12: + EFSYS_PROBE(fail12); +fail11: + EFSYS_PROBE(fail11); +fail10: + EFSYS_PROBE(fail10); +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + + +#endif /* EFSYS_OPT_IMAGE_LAYOUT */ + +#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_impl.h b/drivers/net/sfc/base/ef10_impl.h index e79f4d53..4751faf1 100644 --- a/drivers/net/sfc/base/ef10_impl.h +++ b/drivers/net/sfc/base/ef10_impl.h @@ -11,13 +11,27 @@ extern "C" { #endif -#if (EFSYS_OPT_HUNTINGTON && EFSYS_OPT_MEDFORD) -#define EF10_MAX_PIOBUF_NBUFS MAX(HUNT_PIOBUF_NBUFS, MEDFORD_PIOBUF_NBUFS) -#elif EFSYS_OPT_HUNTINGTON -#define EF10_MAX_PIOBUF_NBUFS HUNT_PIOBUF_NBUFS -#elif EFSYS_OPT_MEDFORD -#define EF10_MAX_PIOBUF_NBUFS MEDFORD_PIOBUF_NBUFS -#endif + +/* Number of hardware PIO buffers (for compile-time resource dimensions) */ +#define EF10_MAX_PIOBUF_NBUFS (16) + +#if EFSYS_OPT_HUNTINGTON +# if (EF10_MAX_PIOBUF_NBUFS < HUNT_PIOBUF_NBUFS) +# error "EF10_MAX_PIOBUF_NBUFS too small" +# endif +#endif /* EFSYS_OPT_HUNTINGTON */ +#if EFSYS_OPT_MEDFORD +# if (EF10_MAX_PIOBUF_NBUFS < MEDFORD_PIOBUF_NBUFS) +# error "EF10_MAX_PIOBUF_NBUFS too small" +# endif +#endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 +# if (EF10_MAX_PIOBUF_NBUFS < MEDFORD2_PIOBUF_NBUFS) +# error "EF10_MAX_PIOBUF_NBUFS too small" +# endif +#endif /* EFSYS_OPT_MEDFORD2 */ + + /* * FIXME: This is just a power of 2 which fits in an MCDI v1 message, and could @@ -742,6 +756,7 @@ extern void ef10_tx_qdesc_tso2_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, + __in uint16_t outer_ipv4_id, __in uint32_t tcp_seq, __in uint16_t tcp_mss, __out_ecount(count) efx_desc_t *edp, @@ -753,6 +768,11 @@ ef10_tx_qdesc_vlantci_create( __in uint16_t vlan_tci, __out efx_desc_t *edp); +extern void +ef10_tx_qdesc_checksum_create( + __in efx_txq_t *etp, + __in uint16_t flags, + __out efx_desc_t *edp); #if EFSYS_OPT_QSTATS @@ -947,13 +967,15 @@ extern void ef10_rx_qenable( __in efx_rxq_t *erp); +union efx_rxq_type_data_u; + extern __checkReturn efx_rc_t ef10_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, - __in uint32_t type_data, + __in const union efx_rxq_type_data_u *type_data, __in efsys_mem_t *esmp, __in size_t ndescs, __in uint32_t id, @@ -1130,6 +1152,11 @@ efx_mcdi_get_clock( __out uint32_t *dpcpu_freqp); +extern __checkReturn efx_rc_t +efx_mcdi_get_rxdp_config( + __in efx_nic_t *enp, + __out uint32_t *end_paddingp); + extern __checkReturn efx_rc_t efx_mcdi_get_vector_cfg( __in efx_nic_t *enp, @@ -1137,20 +1164,27 @@ efx_mcdi_get_vector_cfg( __out_opt uint32_t *pf_nvecp, __out_opt uint32_t *vf_nvecp); -extern __checkReturn efx_rc_t -ef10_get_datapath_caps( - __in efx_nic_t *enp); - extern __checkReturn efx_rc_t ef10_get_privilege_mask( __in efx_nic_t *enp, __out uint32_t *maskp); +#if EFSYS_OPT_FW_SUBVARIANT_AWARE + extern __checkReturn efx_rc_t -ef10_external_port_mapping( +efx_mcdi_get_nic_global( __in efx_nic_t *enp, - __in uint32_t port, - __out uint8_t *external_portp); + __in uint32_t key, + __out uint32_t *valuep); + +extern __checkReturn efx_rc_t +efx_mcdi_set_nic_global( + __in efx_nic_t *enp, + __in uint32_t key, + __in uint32_t value); + +#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */ + #if EFSYS_OPT_RX_PACKED_STREAM @@ -1182,6 +1216,16 @@ ef10_external_port_mapping( #endif /* EFSYS_OPT_RX_PACKED_STREAM */ +#if EFSYS_OPT_RX_ES_SUPER_BUFFER + +/* + * Maximum DMA length and buffer stride alignment. + * (see SF-119419-TC, 3.2) + */ +#define EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT 64 + +#endif + #ifdef __cplusplus } #endif diff --git a/drivers/net/sfc/base/ef10_intr.c b/drivers/net/sfc/base/ef10_intr.c index f79c44e3..1ffe266b 100644 --- a/drivers/net/sfc/base/ef10_intr.c +++ b/drivers/net/sfc/base/ef10_intr.c @@ -8,7 +8,7 @@ #include "efx_impl.h" -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 __checkReturn efx_rc_t ef10_intr_init( @@ -56,7 +56,8 @@ efx_mcdi_trigger_interrupt( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); if (level >= enp->en_nic_cfg.enc_intr_limit) { rc = EINVAL; @@ -129,7 +130,8 @@ ef10_intr_status_line( efx_dword_t dword; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* Read the queue mask and implicitly acknowledge the interrupt. */ EFX_BAR_READD(enp, ER_DZ_BIU_INT_ISR_REG, &dword, B_FALSE); @@ -147,7 +149,8 @@ ef10_intr_status_message( __out boolean_t *fatalp) { EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); _NOTE(ARGUNUSED(enp, message)) @@ -170,4 +173,4 @@ ef10_intr_fini( _NOTE(ARGUNUSED(enp)) } -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_mac.c b/drivers/net/sfc/base/ef10_mac.c index db7692ee..1031e836 100644 --- a/drivers/net/sfc/base/ef10_mac.c +++ b/drivers/net/sfc/base/ef10_mac.c @@ -8,7 +8,7 @@ #include "efx_impl.h" -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 __checkReturn efx_rc_t ef10_mac_poll( @@ -356,7 +356,8 @@ ef10_mac_multicast_list_set( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); if ((rc = emop->emo_reconfigure(enp)) != 0) goto fail1; @@ -522,8 +523,44 @@ ef10_mac_stats_get_mask( goto fail6; } + if (encp->enc_fec_counters) { + const struct efx_mac_stats_range ef10_fec[] = { + { EFX_MAC_FEC_UNCORRECTED_ERRORS, + EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3 }, + }; + if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, + ef10_fec, EFX_ARRAY_SIZE(ef10_fec))) != 0) + goto fail7; + } + + if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V4) { + const struct efx_mac_stats_range ef10_rxdp_sdt[] = { + { EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC, + EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC }, + }; + + if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, + ef10_rxdp_sdt, EFX_ARRAY_SIZE(ef10_rxdp_sdt))) != 0) + goto fail8; + } + + if (encp->enc_hlb_counters) { + const struct efx_mac_stats_range ef10_hlb[] = { + { EFX_MAC_RXDP_HLB_IDLE, EFX_MAC_RXDP_HLB_TIMEOUT }, + }; + if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, + ef10_hlb, EFX_ARRAY_SIZE(ef10_hlb))) != 0) + goto fail9; + } + return (0); +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: @@ -551,16 +588,45 @@ ef10_mac_stats_update( __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, __inout_opt uint32_t *generationp) { - efx_qword_t value; + const efx_nic_cfg_t *encp = &enp->en_nic_cfg; efx_qword_t generation_start; efx_qword_t generation_end; + efx_qword_t value; + efx_rc_t rc; - _NOTE(ARGUNUSED(enp)) + /* + * The MAC_STATS contain start and end generation counters used to + * detect when the DMA buffer has been updated during stats decode. + * All stats counters are 64bit unsigned values. + * + * Siena-compatible MAC stats contain MC_CMD_MAC_NSTATS 64bit counters. + * The generation end counter is at index MC_CMD_MAC_GENERATION_END + * (same as MC_CMD_MAC_NSTATS-1). + * + * Medford2 and later use a larger DMA buffer: MAC_STATS_NUM_STATS from + * MC_CMD_GET_CAPABILITIES_V4_OUT reports the number of 64bit counters. + * + * Firmware writes the generation end counter as the last counter in the + * DMA buffer. Do not use MC_CMD_MAC_GENERATION_END, as that is only + * correct for legacy Siena-compatible MAC stats. + */ + + if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) { + /* MAC stats count too small for legacy MAC stats */ + rc = ENOSPC; + goto fail1; + } + if (EFSYS_MEM_SIZE(esmp) < + (encp->enc_mac_stats_nstats * sizeof (efx_qword_t))) { + /* DMA buffer too small */ + rc = ENOSPC; + goto fail2; + } /* Read END first so we don't race with the MC */ - EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE); - EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END, - &generation_end); + EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp)); + EF10_MAC_STAT_READ(esmp, (encp->enc_mac_stats_nstats - 1), + &generation_end); EFSYS_MEM_READ_BARRIER(); /* TX */ @@ -851,7 +917,109 @@ ef10_mac_stats_update( EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_OVERFLOW]), &value); - EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE); + if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS_V2) + goto done; + + /* FEC */ + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_UNCORRECTED_ERRORS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_UNCORRECTED_ERRORS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_ERRORS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_ERRORS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE0]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE1]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE2]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3]), + &value); + + if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS_V3) + goto done; + + /* CTPIO exceptions */ + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_VI_BUSY_FALLBACK]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_LONG_WRITE_SUCCESS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_MISSING_DBELL_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_OVERFLOW_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_OVERFLOW_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_UNDERFLOW_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_TIMEOUT_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_TIMEOUT_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_NONCONTIG_WR_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_FRM_CLOBBER_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_INVALID_WR_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_INVALID_WR_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_VI_CLOBBER_FALLBACK]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_UNQUALIFIED_FALLBACK]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_RUNT_FALLBACK, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_RUNT_FALLBACK]), &value); + + /* CTPIO per-port stats */ + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_SUCCESS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_SUCCESS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_FALLBACK, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_FALLBACK]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_POISON, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_POISON]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_ERASE, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_ERASE]), &value); + + if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS_V4) + goto done; + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC]), + &value); + + /* Head-of-line blocking */ + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_IDLE, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_IDLE]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_TIMEOUT, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_TIMEOUT]), &value); + +done: + /* Read START generation counter */ + EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp)); EFSYS_MEM_READ_BARRIER(); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START, &generation_start); @@ -866,8 +1034,15 @@ ef10_mac_stats_update( *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0); return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); } #endif /* EFSYS_OPT_MAC_STATS */ -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_mcdi.c b/drivers/net/sfc/base/ef10_mcdi.c index 1f9e573f..8a3fc3b4 100644 --- a/drivers/net/sfc/base/ef10_mcdi.c +++ b/drivers/net/sfc/base/ef10_mcdi.c @@ -8,7 +8,7 @@ #include "efx_impl.h" -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 #if EFSYS_OPT_MCDI @@ -28,7 +28,8 @@ ef10_mcdi_init( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); EFSYS_ASSERT(enp->en_features & EFX_FEATURE_MCDI_DMA); /* @@ -135,7 +136,8 @@ ef10_mcdi_send_request( unsigned int pos; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* Write the header */ for (pos = 0; pos < hdr_len; pos += sizeof (efx_dword_t)) { @@ -186,13 +188,17 @@ ef10_mcdi_read_response( { const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; efsys_mem_t *esmp = emtp->emt_dma_mem; - unsigned int pos; + unsigned int pos = 0; efx_dword_t data; + size_t remaining = length; + + while (remaining > 0) { + size_t chunk = MIN(remaining, sizeof (data)); - for (pos = 0; pos < length; pos += sizeof (efx_dword_t)) { EFSYS_MEM_READD(esmp, offset + pos, &data); - memcpy((uint8_t *)bufferp + pos, &data, - MIN(sizeof (data), length - pos)); + memcpy((uint8_t *)bufferp + pos, &data, chunk); + pos += chunk; + remaining -= chunk; } } @@ -254,7 +260,8 @@ ef10_mcdi_feature_supported( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* * Use privilege mask state at MCDI attach. @@ -315,4 +322,4 @@ fail1: #endif /* EFSYS_OPT_MCDI */ -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_nic.c b/drivers/net/sfc/base/ef10_nic.c index eb9ec2be..7dbf843b 100644 --- a/drivers/net/sfc/base/ef10_nic.c +++ b/drivers/net/sfc/base/ef10_nic.c @@ -10,7 +10,7 @@ #include "mcdi_mon.h" #endif -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 #include "ef10_tlv_layout.h" @@ -25,7 +25,8 @@ efx_mcdi_get_port_assignment( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT; @@ -70,7 +71,8 @@ efx_mcdi_get_port_modes( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PORT_MODES; @@ -250,7 +252,8 @@ efx_mcdi_get_mac_address_pf( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES; @@ -308,7 +311,8 @@ efx_mcdi_get_mac_address_vf( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES; @@ -372,7 +376,8 @@ efx_mcdi_get_clock( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_CLOCK; @@ -415,6 +420,64 @@ fail2: fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_get_rxdp_config( + __in efx_nic_t *enp, + __out uint32_t *end_paddingp) +{ + efx_mcdi_req_t req; + uint8_t payload[MAX(MC_CMD_GET_RXDP_CONFIG_IN_LEN, + MC_CMD_GET_RXDP_CONFIG_OUT_LEN)]; + uint32_t end_padding; + efx_rc_t rc; + + memset(payload, 0, sizeof (payload)); + req.emr_cmd = MC_CMD_GET_RXDP_CONFIG; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN; + + efx_mcdi_execute(enp, &req); + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA, + GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) { + /* RX DMA end padding is disabled */ + end_padding = 0; + } else { + switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA, + GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) { + case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64: + end_padding = 64; + break; + case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128: + end_padding = 128; + break; + case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256: + end_padding = 256; + break; + default: + rc = ENOTSUP; + goto fail2; + } + } + + *end_paddingp = end_padding; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); } @@ -783,7 +846,8 @@ ef10_nic_pio_alloc( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); EFSYS_ASSERT(bufnump); EFSYS_ASSERT(handlep); EFSYS_ASSERT(blknump); @@ -925,62 +989,103 @@ fail1: return (rc); } - __checkReturn efx_rc_t +static __checkReturn efx_rc_t ef10_get_datapath_caps( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); - uint32_t flags; - uint32_t flags2; - uint32_t tso2nc; + efx_mcdi_req_t req; + uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN, + MC_CMD_GET_CAPABILITIES_V5_OUT_LEN)]; efx_rc_t rc; - if ((rc = efx_mcdi_get_capabilities(enp, &flags, NULL, NULL, - &flags2, &tso2nc)) != 0) - goto fail1; - if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0) goto fail1; -#define CAP_FLAG(flags1, field) \ - ((flags1) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN))) -#define CAP_FLAG2(flags2, field) \ - ((flags2) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN))) + (void) memset(payload, 0, sizeof (payload)); + req.emr_cmd = MC_CMD_GET_CAPABILITIES; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_CAPABILITIES_V5_OUT_LEN; + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + + if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) { + rc = EMSGSIZE; + goto fail3; + } + +#define CAP_FLAGS1(_req, _flag) \ + (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_OUT_FLAGS1) & \ + (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN))) + +#define CAP_FLAGS2(_req, _flag) \ + (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) && \ + (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V2_OUT_FLAGS2) & \ + (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN)))) /* * Huntington RXDP firmware inserts a 0 or 14 byte prefix. * We only support the 14 byte prefix here. */ - if (CAP_FLAG(flags, RX_PREFIX_LEN_14) == 0) { + if (CAP_FLAGS1(req, RX_PREFIX_LEN_14) == 0) { rc = ENOTSUP; - goto fail2; + goto fail4; } encp->enc_rx_prefix_size = 14; + /* Check if the firmware supports additional RSS modes */ + if (CAP_FLAGS1(req, ADDITIONAL_RSS_MODES)) + encp->enc_rx_scale_additional_modes_supported = B_TRUE; + else + encp->enc_rx_scale_additional_modes_supported = B_FALSE; + /* Check if the firmware supports TSO */ - encp->enc_fw_assisted_tso_enabled = - CAP_FLAG(flags, TX_TSO) ? B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, TX_TSO)) + encp->enc_fw_assisted_tso_enabled = B_TRUE; + else + encp->enc_fw_assisted_tso_enabled = B_FALSE; /* Check if the firmware supports FATSOv2 */ - encp->enc_fw_assisted_tso_v2_enabled = - CAP_FLAG2(flags2, TX_TSO_V2) ? B_TRUE : B_FALSE; + if (CAP_FLAGS2(req, TX_TSO_V2)) { + encp->enc_fw_assisted_tso_v2_enabled = B_TRUE; + encp->enc_fw_assisted_tso_v2_n_contexts = MCDI_OUT_WORD(req, + GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS); + } else { + encp->enc_fw_assisted_tso_v2_enabled = B_FALSE; + encp->enc_fw_assisted_tso_v2_n_contexts = 0; + } - /* Get the number of TSO contexts (FATSOv2) */ - encp->enc_fw_assisted_tso_v2_n_contexts = - CAP_FLAG2(flags2, TX_TSO_V2) ? tso2nc : 0; + /* Check if the firmware supports FATSOv2 encap */ + if (CAP_FLAGS2(req, TX_TSO_V2_ENCAP)) + encp->enc_fw_assisted_tso_v2_encap_enabled = B_TRUE; + else + encp->enc_fw_assisted_tso_v2_encap_enabled = B_FALSE; /* Check if the firmware has vadapter/vport/vswitch support */ - encp->enc_datapath_cap_evb = - CAP_FLAG(flags, EVB) ? B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, EVB)) + encp->enc_datapath_cap_evb = B_TRUE; + else + encp->enc_datapath_cap_evb = B_FALSE; /* Check if the firmware supports VLAN insertion */ - encp->enc_hw_tx_insert_vlan_enabled = - CAP_FLAG(flags, TX_VLAN_INSERTION) ? B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, TX_VLAN_INSERTION)) + encp->enc_hw_tx_insert_vlan_enabled = B_TRUE; + else + encp->enc_hw_tx_insert_vlan_enabled = B_FALSE; /* Check if the firmware supports RX event batching */ - encp->enc_rx_batching_enabled = - CAP_FLAG(flags, RX_BATCHING) ? B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, RX_BATCHING)) + encp->enc_rx_batching_enabled = B_TRUE; + else + encp->enc_rx_batching_enabled = B_FALSE; /* * Even if batching isn't reported as supported, we may still get @@ -989,38 +1094,61 @@ ef10_get_datapath_caps( encp->enc_rx_batch_max = 16; /* Check if the firmware supports disabling scatter on RXQs */ - encp->enc_rx_disable_scatter_supported = - CAP_FLAG(flags, RX_DISABLE_SCATTER) ? B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, RX_DISABLE_SCATTER)) + encp->enc_rx_disable_scatter_supported = B_TRUE; + else + encp->enc_rx_disable_scatter_supported = B_FALSE; /* Check if the firmware supports packed stream mode */ - encp->enc_rx_packed_stream_supported = - CAP_FLAG(flags, RX_PACKED_STREAM) ? B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, RX_PACKED_STREAM)) + encp->enc_rx_packed_stream_supported = B_TRUE; + else + encp->enc_rx_packed_stream_supported = B_FALSE; /* * Check if the firmware supports configurable buffer sizes * for packed stream mode (otherwise buffer size is 1Mbyte) */ - encp->enc_rx_var_packed_stream_supported = - CAP_FLAG(flags, RX_PACKED_STREAM_VAR_BUFFERS) ? B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, RX_PACKED_STREAM_VAR_BUFFERS)) + encp->enc_rx_var_packed_stream_supported = B_TRUE; + else + encp->enc_rx_var_packed_stream_supported = B_FALSE; + + /* Check if the firmware supports equal stride super-buffer mode */ + if (CAP_FLAGS2(req, EQUAL_STRIDE_SUPER_BUFFER)) + encp->enc_rx_es_super_buffer_supported = B_TRUE; + else + encp->enc_rx_es_super_buffer_supported = B_FALSE; + + /* Check if the firmware supports FW subvariant w/o Tx checksumming */ + if (CAP_FLAGS2(req, FW_SUBVARIANT_NO_TX_CSUM)) + encp->enc_fw_subvariant_no_tx_csum_supported = B_TRUE; + else + encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE; /* Check if the firmware supports set mac with running filters */ - encp->enc_allow_set_mac_with_installed_filters = - CAP_FLAG(flags, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED) ? - B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED)) + encp->enc_allow_set_mac_with_installed_filters = B_TRUE; + else + encp->enc_allow_set_mac_with_installed_filters = B_FALSE; /* * Check if firmware supports the extended MC_CMD_SET_MAC, which allows * specifying which parameters to configure. */ - encp->enc_enhanced_set_mac_supported = - CAP_FLAG(flags, SET_MAC_ENHANCED) ? B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, SET_MAC_ENHANCED)) + encp->enc_enhanced_set_mac_supported = B_TRUE; + else + encp->enc_enhanced_set_mac_supported = B_FALSE; /* * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows * us to let the firmware choose the settings to use on an EVQ. */ - encp->enc_init_evq_v2_supported = - CAP_FLAG2(flags2, INIT_EVQ_V2) ? B_TRUE : B_FALSE; + if (CAP_FLAGS2(req, INIT_EVQ_V2)) + encp->enc_init_evq_v2_supported = B_TRUE; + else + encp->enc_init_evq_v2_supported = B_FALSE; /* * Check if firmware-verified NVRAM updates must be used. @@ -1030,29 +1158,34 @@ ef10_get_datapath_caps( * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated * partition and report the result). */ - encp->enc_nvram_update_verify_result_supported = - CAP_FLAG2(flags2, NVRAM_UPDATE_REPORT_VERIFY_RESULT) ? - B_TRUE : B_FALSE; + if (CAP_FLAGS2(req, NVRAM_UPDATE_REPORT_VERIFY_RESULT)) + encp->enc_nvram_update_verify_result_supported = B_TRUE; + else + encp->enc_nvram_update_verify_result_supported = B_FALSE; /* * Check if firmware provides packet memory and Rx datapath * counters. */ - encp->enc_pm_and_rxdp_counters = - CAP_FLAG(flags, PM_AND_RXDP_COUNTERS) ? B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, PM_AND_RXDP_COUNTERS)) + encp->enc_pm_and_rxdp_counters = B_TRUE; + else + encp->enc_pm_and_rxdp_counters = B_FALSE; /* * Check if the 40G MAC hardware is capable of reporting * statistics for Tx size bins. */ - encp->enc_mac_stats_40g_tx_size_bins = - CAP_FLAG2(flags2, MAC_STATS_40G_TX_SIZE_BINS) ? B_TRUE : B_FALSE; + if (CAP_FLAGS2(req, MAC_STATS_40G_TX_SIZE_BINS)) + encp->enc_mac_stats_40g_tx_size_bins = B_TRUE; + else + encp->enc_mac_stats_40g_tx_size_bins = B_FALSE; /* * Check if firmware supports VXLAN and NVGRE tunnels. * The capability indicates Geneve protocol support as well. */ - if (CAP_FLAG(flags, VXLAN_NVGRE)) { + if (CAP_FLAGS1(req, VXLAN_NVGRE)) { encp->enc_tunnel_encapsulations_supported = (1u << EFX_TUNNEL_PROTOCOL_VXLAN) | (1u << EFX_TUNNEL_PROTOCOL_GENEVE) | @@ -1066,11 +1199,136 @@ ef10_get_datapath_caps( encp->enc_tunnel_config_udp_entries_max = 0; } -#undef CAP_FLAG -#undef CAP_FLAG2 + /* + * Check if firmware reports the VI window mode. + * Medford2 has a variable VI window size (8K, 16K or 64K). + * Medford and Huntington have a fixed 8K VI window size. + */ + if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) { + uint8_t mode = + MCDI_OUT_BYTE(req, GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE); + + switch (mode) { + case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K: + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K; + break; + case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K: + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_16K; + break; + case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K: + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_64K; + break; + default: + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID; + break; + } + } else if ((enp->en_family == EFX_FAMILY_HUNTINGTON) || + (enp->en_family == EFX_FAMILY_MEDFORD)) { + /* Huntington and Medford have fixed 8K window size */ + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K; + } else { + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID; + } + + /* Check if firmware supports extended MAC stats. */ + if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) { + /* Extended stats buffer supported */ + encp->enc_mac_stats_nstats = MCDI_OUT_WORD(req, + GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS); + } else { + /* Use Siena-compatible legacy MAC stats */ + encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS; + } + + if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V2) + encp->enc_fec_counters = B_TRUE; + else + encp->enc_fec_counters = B_FALSE; + + /* Check if the firmware provides head-of-line blocking counters */ + if (CAP_FLAGS2(req, RXDP_HLB_IDLE)) + encp->enc_hlb_counters = B_TRUE; + else + encp->enc_hlb_counters = B_FALSE; + + if (CAP_FLAGS1(req, RX_RSS_LIMITED)) { + /* Only one exclusive RSS context is available per port. */ + encp->enc_rx_scale_max_exclusive_contexts = 1; + + switch (enp->en_family) { + case EFX_FAMILY_MEDFORD2: + encp->enc_rx_scale_hash_alg_mask = + (1U << EFX_RX_HASHALG_TOEPLITZ); + break; + + case EFX_FAMILY_MEDFORD: + case EFX_FAMILY_HUNTINGTON: + /* + * Packed stream firmware variant maintains a + * non-standard algorithm for hash computation. + * It implies explicit XORing together + * source + destination IP addresses (or last + * four bytes in the case of IPv6) and using the + * resulting value as the input to a Toeplitz hash. + */ + encp->enc_rx_scale_hash_alg_mask = + (1U << EFX_RX_HASHALG_PACKED_STREAM); + break; + + default: + rc = EINVAL; + goto fail5; + } + + /* Port numbers cannot contribute to the hash value */ + encp->enc_rx_scale_l4_hash_supported = B_FALSE; + } else { + /* + * Maximum number of exclusive RSS contexts. + * EF10 hardware supports 64 in total, but 6 are reserved + * for shared contexts. They are a global resource so + * not all may be available. + */ + encp->enc_rx_scale_max_exclusive_contexts = 64 - 6; + + encp->enc_rx_scale_hash_alg_mask = + (1U << EFX_RX_HASHALG_TOEPLITZ); + + /* + * It is possible to use port numbers as + * the input data for hash computation. + */ + encp->enc_rx_scale_l4_hash_supported = B_TRUE; + } + /* Check if the firmware supports "FLAG" and "MARK" filter actions */ + if (CAP_FLAGS2(req, FILTER_ACTION_FLAG)) + encp->enc_filter_action_flag_supported = B_TRUE; + else + encp->enc_filter_action_flag_supported = B_FALSE; + + if (CAP_FLAGS2(req, FILTER_ACTION_MARK)) + encp->enc_filter_action_mark_supported = B_TRUE; + else + encp->enc_filter_action_mark_supported = B_FALSE; + + /* Get maximum supported value for "MARK" filter action */ + if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V5_OUT_LEN) + encp->enc_filter_action_mark_max = MCDI_OUT_DWORD(req, + GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX); + else + encp->enc_filter_action_mark_max = 0; + +#undef CAP_FLAGS1 +#undef CAP_FLAGS2 return (0); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: @@ -1132,78 +1390,238 @@ fail1: /* - * Table of mapping schemes from port number to the number of the external - * connector on the board. The external numbering does not distinguish - * off-board separated outputs such as from multi-headed cables. + * Table of mapping schemes from port number to external number. + * + * Each port number ultimately corresponds to a connector: either as part of + * a cable assembly attached to a module inserted in an SFP+/QSFP+ cage on + * the board, or fixed to the board (e.g. 10GBASE-T magjack on SFN5121T + * "Salina"). In general: * - * The count of adjacent port numbers that map to each external port + * Port number (0-based) + * | + * port mapping (n:1) + * | + * v + * External port number (normally 1-based) + * | + * fixed (1:1) or cable assembly (1:m) + * | + * v + * Connector + * + * The external numbering refers to the cages or magjacks on the board, + * as visibly annotated on the board or back panel. This table describes + * how to determine which external cage/magjack corresponds to the port + * numbers used by the driver. + * + * The count of adjacent port numbers that map to each external number, * and the offset in the numbering, is determined by the chip family and * current port mode. * * For the Huntington family, the current port mode cannot be discovered, + * but a single mapping is used by all modes for a given chip variant, * so the mapping used is instead the last match in the table to the full * set of port modes to which the NIC can be configured. Therefore the * ordering of entries in the mapping table is significant. */ -static struct { +static struct ef10_external_port_map_s { efx_family_t family; uint32_t modes_mask; int32_t count; int32_t offset; } __ef10_external_port_mappings[] = { - /* Supported modes with 1 output per external port */ + /* + * Modes used by Huntington family controllers where each port + * number maps to a separate cage. + * SFN7x22F (Torino): + * port 0 -> cage 1 + * port 1 -> cage 2 + * SFN7xx4F (Pavia): + * port 0 -> cage 1 + * port 1 -> cage 2 + * port 2 -> cage 3 + * port 3 -> cage 4 + */ { EFX_FAMILY_HUNTINGTON, - (1 << TLV_PORT_MODE_10G) | - (1 << TLV_PORT_MODE_10G_10G) | - (1 << TLV_PORT_MODE_10G_10G_10G_10G), - 1, - 1 + (1U << TLV_PORT_MODE_10G) | /* mode 0 */ + (1U << TLV_PORT_MODE_10G_10G) | /* mode 2 */ + (1U << TLV_PORT_MODE_10G_10G_10G_10G), /* mode 4 */ + 1, /* ports per cage */ + 1 /* first cage */ }, + /* + * Modes which for Huntington identify a chip variant where 2 + * adjacent port numbers map to each cage. + * SFN7x42Q (Monza): + * port 0 -> cage 1 + * port 1 -> cage 1 + * port 2 -> cage 2 + * port 3 -> cage 2 + */ { - EFX_FAMILY_MEDFORD, - (1 << TLV_PORT_MODE_10G) | - (1 << TLV_PORT_MODE_10G_10G), - 1, - 1 + EFX_FAMILY_HUNTINGTON, + (1U << TLV_PORT_MODE_40G) | /* mode 1 */ + (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */ + (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */ + (1U << TLV_PORT_MODE_10G_10G_40G), /* mode 7 */ + 2, /* ports per cage */ + 1 /* first cage */ }, - /* Supported modes with 2 outputs per external port */ + /* + * Modes that on Medford allocate each port number to a separate + * cage. + * port 0 -> cage 1 + * port 1 -> cage 2 + * port 2 -> cage 3 + * port 3 -> cage 4 + */ { - EFX_FAMILY_HUNTINGTON, - (1 << TLV_PORT_MODE_40G) | - (1 << TLV_PORT_MODE_40G_40G) | - (1 << TLV_PORT_MODE_40G_10G_10G) | - (1 << TLV_PORT_MODE_10G_10G_40G), - 2, - 1 + EFX_FAMILY_MEDFORD, + (1U << TLV_PORT_MODE_10G) | /* mode 0 */ + (1U << TLV_PORT_MODE_10G_10G), /* mode 2 */ + 1, /* ports per cage */ + 1 /* first cage */ }, + /* + * Modes that on Medford allocate 2 adjacent port numbers to each + * cage. + * port 0 -> cage 1 + * port 1 -> cage 1 + * port 2 -> cage 2 + * port 3 -> cage 2 + */ { EFX_FAMILY_MEDFORD, - (1 << TLV_PORT_MODE_40G) | - (1 << TLV_PORT_MODE_40G_40G) | - (1 << TLV_PORT_MODE_40G_10G_10G) | - (1 << TLV_PORT_MODE_10G_10G_40G) | - (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), - 2, - 1 + (1U << TLV_PORT_MODE_40G) | /* mode 1 */ + (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */ + (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */ + (1U << TLV_PORT_MODE_10G_10G_40G) | /* mode 7 */ + /* Do not use 10G_10G_10G_10G_Q1_Q2 (see bug63270) */ + (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), /* mode 9 */ + 2, /* ports per cage */ + 1 /* first cage */ }, - /* Supported modes with 4 outputs per external port */ + /* + * Modes that on Medford allocate 4 adjacent port numbers to each + * connector, starting on cage 1. + * port 0 -> cage 1 + * port 1 -> cage 1 + * port 2 -> cage 1 + * port 3 -> cage 1 + */ { EFX_FAMILY_MEDFORD, - (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q) | - (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1), - 4, - 1, + (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q) | /* mode 5 */ + /* Do not use 10G_10G_10G_10G_Q1 (see bug63270) */ + (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1), /* mode 4 */ + 4, /* ports per cage */ + 1 /* first cage */ }, + /* + * Modes that on Medford allocate 4 adjacent port numbers to each + * connector, starting on cage 2. + * port 0 -> cage 2 + * port 1 -> cage 2 + * port 2 -> cage 2 + * port 3 -> cage 2 + */ { EFX_FAMILY_MEDFORD, - (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q2), - 4, - 2 + (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q2), /* mode 8 */ + 4, /* ports per cage */ + 2 /* first cage */ + }, + /* + * Modes that on Medford2 allocate each port number to a separate + * cage. + * port 0 -> cage 1 + * port 1 -> cage 2 + * port 2 -> cage 3 + * port 3 -> cage 4 + */ + { + EFX_FAMILY_MEDFORD2, + (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */ + (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */ + (1U << TLV_PORT_MODE_1x1_1x1) | /* mode 2 */ + (1U << TLV_PORT_MODE_1x2_NA) | /* mode 10 */ + (1U << TLV_PORT_MODE_1x2_1x2) | /* mode 12 */ + (1U << TLV_PORT_MODE_1x4_1x2) | /* mode 15 */ + (1U << TLV_PORT_MODE_1x2_1x4), /* mode 16 */ + 1, /* ports per cage */ + 1 /* first cage */ + }, + /* + * FIXME: Some port modes are not representable in this mapping: + * - TLV_PORT_MODE_1x2_2x1 (mode 17): + * port 0 -> cage 1 + * port 1 -> cage 2 + * port 2 -> cage 2 + */ + /* + * Modes that on Medford2 allocate 2 adjacent port numbers to each + * cage, starting on cage 1. + * port 0 -> cage 1 + * port 1 -> cage 1 + * port 2 -> cage 2 + * port 3 -> cage 2 + */ + { + EFX_FAMILY_MEDFORD2, + (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */ + (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 4 */ + (1U << TLV_PORT_MODE_1x4_2x1) | /* mode 6 */ + (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */ + (1U << TLV_PORT_MODE_2x2_NA) | /* mode 13 */ + (1U << TLV_PORT_MODE_2x1_1x2), /* mode 18 */ + 2, /* ports per cage */ + 1 /* first cage */ + }, + /* + * Modes that on Medford2 allocate 2 adjacent port numbers to each + * cage, starting on cage 2. + * port 0 -> cage 2 + * port 1 -> cage 2 + */ + { + EFX_FAMILY_MEDFORD2, + (1U << TLV_PORT_MODE_NA_2x2), /* mode 14 */ + 2, /* ports per cage */ + 2 /* first cage */ + }, + /* + * Modes that on Medford2 allocate 4 adjacent port numbers to each + * connector, starting on cage 1. + * port 0 -> cage 1 + * port 1 -> cage 1 + * port 2 -> cage 1 + * port 3 -> cage 1 + */ + { + EFX_FAMILY_MEDFORD2, + (1U << TLV_PORT_MODE_4x1_NA), /* mode 5 */ + 4, /* ports per cage */ + 1 /* first cage */ + }, + /* + * Modes that on Medford2 allocate 4 adjacent port numbers to each + * connector, starting on cage 2. + * port 0 -> cage 2 + * port 1 -> cage 2 + * port 2 -> cage 2 + * port 3 -> cage 2 + */ + { + EFX_FAMILY_MEDFORD2, + (1U << TLV_PORT_MODE_NA_4x1) | /* mode 8 */ + (1U << TLV_PORT_MODE_NA_1x2), /* mode 11 */ + 4, /* ports per cage */ + 2 /* first cage */ }, }; - __checkReturn efx_rc_t +static __checkReturn efx_rc_t ef10_external_port_mapping( __in efx_nic_t *enp, __in uint32_t port, @@ -1219,7 +1637,7 @@ ef10_external_port_mapping( if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, ¤t)) != 0) { /* - * No current port mode information + * No current port mode information (i.e. Huntington) * - infer mapping from available modes */ if ((rc = efx_mcdi_get_port_modes(enp, @@ -1236,18 +1654,23 @@ ef10_external_port_mapping( } /* - * Infer the internal port -> external port mapping from + * Infer the internal port -> external number mapping from * the possible port modes for this NIC. */ for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) { - if (__ef10_external_port_mappings[i].family != - enp->en_family) + struct ef10_external_port_map_s *eepmp = + &__ef10_external_port_mappings[i]; + if (eepmp->family != enp->en_family) continue; - matches = (__ef10_external_port_mappings[i].modes_mask & - port_modes); + matches = (eepmp->modes_mask & port_modes); if (matches != 0) { - count = __ef10_external_port_mappings[i].count; - offset = __ef10_external_port_mappings[i].offset; + /* + * Some modes match. For some Huntington boards + * there will be multiple matches. The mapping on the + * last match is used. + */ + count = eepmp->count; + offset = eepmp->offset; port_modes &= ~matches; } } @@ -1272,18 +1695,193 @@ fail1: return (rc); } +static __checkReturn efx_rc_t +ef10_nic_board_cfg( + __in efx_nic_t *enp) +{ + const efx_nic_ops_t *enop = enp->en_enop; + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + ef10_link_state_t els; + efx_port_t *epp = &(enp->en_port); + uint32_t board_type = 0; + uint32_t base, nvec; + uint32_t port; + uint32_t mask; + uint32_t pf; + uint32_t vf; + uint8_t mac_addr[6] = { 0 }; + efx_rc_t rc; + + /* Get the (zero-based) MCDI port number */ + if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0) + goto fail1; + + /* EFX MCDI interface uses one-based port numbers */ + emip->emi_port = port + 1; + + if ((rc = ef10_external_port_mapping(enp, port, + &encp->enc_external_port)) != 0) + goto fail2; + + /* + * Get PCIe function number from firmware (used for + * per-function privilege and dynamic config info). + * - PCIe PF: pf = PF number, vf = 0xffff. + * - PCIe VF: pf = parent PF, vf = VF number. + */ + if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0) + goto fail3; + + encp->enc_pf = pf; + encp->enc_vf = vf; + + /* MAC address for this function */ + if (EFX_PCI_FUNCTION_IS_PF(encp)) { + rc = efx_mcdi_get_mac_address_pf(enp, mac_addr); +#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC + /* + * Disable static config checking, ONLY for manufacturing test + * and setup at the factory, to allow the static config to be + * installed. + */ +#else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ + if ((rc == 0) && (mac_addr[0] & 0x02)) { + /* + * If the static config does not include a global MAC + * address pool then the board may return a locally + * administered MAC address (this should only happen on + * incorrectly programmed boards). + */ + rc = EINVAL; + } +#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ + } else { + rc = efx_mcdi_get_mac_address_vf(enp, mac_addr); + } + if (rc != 0) + goto fail4; + + EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr); + + /* Board configuration (legacy) */ + rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL); + if (rc != 0) { + /* Unprivileged functions may not be able to read board cfg */ + if (rc == EACCES) + board_type = 0; + else + goto fail5; + } + + encp->enc_board_type = board_type; + encp->enc_clk_mult = 1; /* not used for EF10 */ + + /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */ + if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0) + goto fail6; + + /* Obtain the default PHY advertised capabilities */ + if ((rc = ef10_phy_get_link(enp, &els)) != 0) + goto fail7; + epp->ep_default_adv_cap_mask = els.els_adv_cap_mask; + epp->ep_adv_cap_mask = els.els_adv_cap_mask; + + /* Check capabilities of running datapath firmware */ + if ((rc = ef10_get_datapath_caps(enp)) != 0) + goto fail8; + + /* Alignment for WPTR updates */ + encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN; + + encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT); + /* No boundary crossing limits */ + encp->enc_tx_dma_desc_boundary = 0; + + /* + * Maximum number of bytes into the frame the TCP header can start for + * firmware assisted TSO to work. + */ + encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT; + + /* + * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use + * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available + * resources (allocated to this PCIe function), which is zero until + * after we have allocated VIs. + */ + encp->enc_evq_limit = 1024; + encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET; + encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET; + + encp->enc_buftbl_limit = 0xFFFFFFFF; + + /* Get interrupt vector limits */ + if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) { + if (EFX_PCI_FUNCTION_IS_PF(encp)) + goto fail9; + + /* Ignore error (cannot query vector limits from a VF). */ + base = 0; + nvec = 1024; + } + encp->enc_intr_vec_base = base; + encp->enc_intr_limit = nvec; + + /* + * Get the current privilege mask. Note that this may be modified + * dynamically, so this value is informational only. DO NOT use + * the privilege mask to check for sufficient privileges, as that + * can result in time-of-check/time-of-use bugs. + */ + if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0) + goto fail10; + encp->enc_privilege_mask = mask; + + /* Get remaining controller-specific board config */ + if ((rc = enop->eno_board_cfg(enp)) != 0) + if (rc != EACCES) + goto fail11; + + return (0); + +fail11: + EFSYS_PROBE(fail11); +fail10: + EFSYS_PROBE(fail10); +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} __checkReturn efx_rc_t ef10_nic_probe( __in efx_nic_t *enp) { - const efx_nic_ops_t *enop = enp->en_enop; efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* Read and clear any assertion state */ if ((rc = efx_mcdi_read_assertion(enp)) != 0) @@ -1297,9 +1895,8 @@ ef10_nic_probe( if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0) goto fail3; - if ((rc = enop->eno_board_cfg(enp)) != 0) - if (rc != EACCES) - goto fail4; + if ((rc = ef10_nic_board_cfg(enp)) != 0) + goto fail4; /* * Set default driver config limits (based on board config). @@ -1494,10 +2091,12 @@ ef10_nic_init( uint32_t i; uint32_t retry; uint32_t delay_us; + uint32_t vi_window_size; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* Enable reporting of some events (e.g. link change) */ if ((rc = efx_mcdi_log_ctrl(enp)) != 0) @@ -1555,15 +2154,21 @@ ef10_nic_init( enp->en_arch.ef10.ena_pio_write_vi_base = vi_count - enp->en_arch.ef10.ena_piobuf_count; + EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=, + EFX_VI_WINDOW_SHIFT_INVALID); + EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=, + EFX_VI_WINDOW_SHIFT_64K); + vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift; + /* Save UC memory mapping details */ enp->en_arch.ef10.ena_uc_mem_map_offset = 0; if (enp->en_arch.ef10.ena_piobuf_count > 0) { enp->en_arch.ef10.ena_uc_mem_map_size = - (ER_DZ_TX_PIOBUF_STEP * + (vi_window_size * enp->en_arch.ef10.ena_pio_write_vi_base); } else { enp->en_arch.ef10.ena_uc_mem_map_size = - (ER_DZ_TX_PIOBUF_STEP * + (vi_window_size * enp->en_arch.ef10.ena_vi_count); } @@ -1573,7 +2178,7 @@ ef10_nic_init( enp->en_arch.ef10.ena_uc_mem_map_size; enp->en_arch.ef10.ena_wc_mem_map_size = - (ER_DZ_TX_PIOBUF_STEP * + (vi_window_size * enp->en_arch.ef10.ena_piobuf_count); /* Link piobufs to extra VIs in WC mapping */ @@ -1653,7 +2258,8 @@ ef10_nic_get_vi_pool( __out uint32_t *vi_countp) { EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* * Report VIs that the client driver can use. @@ -1674,7 +2280,8 @@ ef10_nic_get_bar_region( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* * TODO: Specify host memory mapping alignment and granularity @@ -1770,5 +2377,87 @@ fail1: #endif /* EFSYS_OPT_DIAG */ +#if EFSYS_OPT_FW_SUBVARIANT_AWARE + + __checkReturn efx_rc_t +efx_mcdi_get_nic_global( + __in efx_nic_t *enp, + __in uint32_t key, + __out uint32_t *valuep) +{ + efx_mcdi_req_t req; + uint8_t payload[MAX(MC_CMD_GET_NIC_GLOBAL_IN_LEN, + MC_CMD_GET_NIC_GLOBAL_OUT_LEN)]; + efx_rc_t rc; + + (void) memset(payload, 0, sizeof (payload)); + req.emr_cmd = MC_CMD_GET_NIC_GLOBAL; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_NIC_GLOBAL_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_NIC_GLOBAL_OUT_LEN; + + MCDI_IN_SET_DWORD(req, GET_NIC_GLOBAL_IN_KEY, key); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used != MC_CMD_GET_NIC_GLOBAL_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + *valuep = MCDI_OUT_DWORD(req, GET_NIC_GLOBAL_OUT_VALUE); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_set_nic_global( + __in efx_nic_t *enp, + __in uint32_t key, + __in uint32_t value) +{ + efx_mcdi_req_t req; + uint8_t payload[MC_CMD_SET_NIC_GLOBAL_IN_LEN]; + efx_rc_t rc; + + (void) memset(payload, 0, sizeof (payload)); + req.emr_cmd = MC_CMD_SET_NIC_GLOBAL; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_SET_NIC_GLOBAL_IN_LEN; + req.emr_out_buf = NULL; + req.emr_out_length = 0; + + MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_KEY, key); + MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_VALUE, value); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */ -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_nvram.c b/drivers/net/sfc/base/ef10_nvram.c index 1904597c..2883ec8f 100644 --- a/drivers/net/sfc/base/ef10_nvram.c +++ b/drivers/net/sfc/base/ef10_nvram.c @@ -7,7 +7,7 @@ #include "efx.h" #include "efx_impl.h" -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 #if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM @@ -1349,12 +1349,16 @@ ef10_nvram_partn_read_tlv( */ retry = 10; do { - rc = ef10_nvram_read_tlv_segment(enp, partn, 0, - seg_data, partn_size); - } while ((rc == EAGAIN) && (--retry > 0)); + if ((rc = ef10_nvram_read_tlv_segment(enp, partn, 0, + seg_data, partn_size)) != 0) + --retry; + } while ((rc == EAGAIN) && (retry > 0)); if (rc != 0) { /* Failed to obtain consistent segment data */ + if (rc == EAGAIN) + rc = EIO; + goto fail4; } @@ -2152,6 +2156,20 @@ static ef10_parttbl_entry_t medford_parttbl[] = { PARTN_MAP_ENTRY(MUM_FIRMWARE, ALL, MUM_FIRMWARE), }; +static ef10_parttbl_entry_t medford2_parttbl[] = { + /* partn ports nvtype */ + PARTN_MAP_ENTRY(MC_FIRMWARE, ALL, MC_FIRMWARE), + PARTN_MAP_ENTRY(MC_FIRMWARE_BACKUP, ALL, MC_GOLDEN), + PARTN_MAP_ENTRY(EXPANSION_ROM, ALL, BOOTROM), + PARTN_MAP_ENTRY(EXPROM_CONFIG, ALL, BOOTROM_CFG), + PARTN_MAP_ENTRY(DYNAMIC_CONFIG, ALL, DYNAMIC_CFG), + PARTN_MAP_ENTRY(FPGA, ALL, FPGA), + PARTN_MAP_ENTRY(FPGA_BACKUP, ALL, FPGA_BACKUP), + PARTN_MAP_ENTRY(LICENSE, ALL, LICENSE), + PARTN_MAP_ENTRY(EXPANSION_UEFI, ALL, UEFIROM), + PARTN_MAP_ENTRY(MUM_FIRMWARE, ALL, MUM_FIRMWARE), +}; + static __checkReturn efx_rc_t ef10_parttbl_get( __in efx_nic_t *enp, @@ -2169,6 +2187,11 @@ ef10_parttbl_get( *parttbl_rowsp = EFX_ARRAY_SIZE(medford_parttbl); break; + case EFX_FAMILY_MEDFORD2: + *parttblp = medford2_parttbl; + *parttbl_rowsp = EFX_ARRAY_SIZE(medford2_parttbl); + break; + default: EFSYS_ASSERT(B_FALSE); return (EINVAL); @@ -2362,4 +2385,4 @@ fail1: #endif /* EFSYS_OPT_NVRAM */ -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_phy.c b/drivers/net/sfc/base/ef10_phy.c index aa8d6a2b..84acb70a 100644 --- a/drivers/net/sfc/base/ef10_phy.c +++ b/drivers/net/sfc/base/ef10_phy.c @@ -7,7 +7,7 @@ #include "efx.h" #include "efx_impl.h" -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static void mcdi_phy_decode_cap( @@ -16,6 +16,32 @@ mcdi_phy_decode_cap( { uint32_t mask; +#define CHECK_CAP(_cap) \ + EFX_STATIC_ASSERT(EFX_PHY_CAP_##_cap == MC_CMD_PHY_CAP_##_cap##_LBN) + + CHECK_CAP(10HDX); + CHECK_CAP(10FDX); + CHECK_CAP(100HDX); + CHECK_CAP(100FDX); + CHECK_CAP(1000HDX); + CHECK_CAP(1000FDX); + CHECK_CAP(10000FDX); + CHECK_CAP(25000FDX); + CHECK_CAP(40000FDX); + CHECK_CAP(50000FDX); + CHECK_CAP(100000FDX); + CHECK_CAP(PAUSE); + CHECK_CAP(ASYM); + CHECK_CAP(AN); + CHECK_CAP(DDM); + CHECK_CAP(BASER_FEC); + CHECK_CAP(BASER_FEC_REQUESTED); + CHECK_CAP(RS_FEC); + CHECK_CAP(RS_FEC_REQUESTED); + CHECK_CAP(25G_BASER_FEC); + CHECK_CAP(25G_BASER_FEC_REQUESTED); +#undef CHECK_CAP + mask = 0; if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) mask |= (1 << EFX_PHY_CAP_10HDX); @@ -31,8 +57,15 @@ mcdi_phy_decode_cap( mask |= (1 << EFX_PHY_CAP_1000FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) mask |= (1 << EFX_PHY_CAP_10000FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) + mask |= (1 << EFX_PHY_CAP_25000FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) mask |= (1 << EFX_PHY_CAP_40000FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN)) + mask |= (1 << EFX_PHY_CAP_50000FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) + mask |= (1 << EFX_PHY_CAP_100000FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) mask |= (1 << EFX_PHY_CAP_PAUSE); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) @@ -40,6 +73,22 @@ mcdi_phy_decode_cap( if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) mask |= (1 << EFX_PHY_CAP_AN); + /* FEC caps (supported on Medford2 and later) */ + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN)) + mask |= (1 << EFX_PHY_CAP_BASER_FEC); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN)) + mask |= (1 << EFX_PHY_CAP_BASER_FEC_REQUESTED); + + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN)) + mask |= (1 << EFX_PHY_CAP_RS_FEC); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN)) + mask |= (1 << EFX_PHY_CAP_RS_FEC_REQUESTED); + + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN)) + mask |= (1 << EFX_PHY_CAP_25G_BASER_FEC); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN)) + mask |= (1 << EFX_PHY_CAP_25G_BASER_FEC_REQUESTED); + *maskp = mask; } @@ -61,8 +110,14 @@ mcdi_phy_decode_link_mode( if (!up) *link_modep = EFX_LINK_DOWN; + else if (speed == 100000 && fd) + *link_modep = EFX_LINK_100000FDX; + else if (speed == 50000 && fd) + *link_modep = EFX_LINK_50000FDX; else if (speed == 40000 && fd) *link_modep = EFX_LINK_40000FDX; + else if (speed == 25000 && fd) + *link_modep = EFX_LINK_25000FDX; else if (speed == 10000 && fd) *link_modep = EFX_LINK_10000FDX; else if (speed == 1000) @@ -116,9 +171,18 @@ ef10_phy_link_ev( case MCDI_EVENT_LINKCHANGE_SPEED_10G: speed = 10000; break; + case MCDI_EVENT_LINKCHANGE_SPEED_25G: + speed = 25000; + break; case MCDI_EVENT_LINKCHANGE_SPEED_40G: speed = 40000; break; + case MCDI_EVENT_LINKCHANGE_SPEED_50G: + speed = 50000; + break; + case MCDI_EVENT_LINKCHANGE_SPEED_100G: + speed = 100000; + break; default: speed = 0; break; @@ -212,26 +276,10 @@ ef10_phy_get_link( &elsp->els_link_mode, &elsp->els_fcntl); #if EFSYS_OPT_LOOPBACK - /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */ - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD); - + /* + * MC_CMD_LOOPBACK and EFX_LOOPBACK names are equivalent, so use the + * MCDI value directly. Agreement is checked in efx_loopback_mask(). + */ elsp->els_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE); #endif /* EFSYS_OPT_LOOPBACK */ @@ -288,8 +336,33 @@ ef10_phy_reconfigure( PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1, PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1); /* Too many fields for for POPULATE macros, so insert this afterwards */ + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_25000FDX, (cap_mask >> EFX_PHY_CAP_25000FDX) & 0x1); MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, PHY_CAP_40000FDX, (cap_mask >> EFX_PHY_CAP_40000FDX) & 0x1); + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_50000FDX, (cap_mask >> EFX_PHY_CAP_50000FDX) & 0x1); + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_100000FDX, (cap_mask >> EFX_PHY_CAP_100000FDX) & 0x1); + + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_BASER_FEC, (cap_mask >> EFX_PHY_CAP_BASER_FEC) & 0x1); + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_BASER_FEC_REQUESTED, + (cap_mask >> EFX_PHY_CAP_BASER_FEC_REQUESTED) & 0x1); + + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_RS_FEC, (cap_mask >> EFX_PHY_CAP_RS_FEC) & 0x1); + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_RS_FEC_REQUESTED, + (cap_mask >> EFX_PHY_CAP_RS_FEC_REQUESTED) & 0x1); + + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_25G_BASER_FEC, + (cap_mask >> EFX_PHY_CAP_25G_BASER_FEC) & 0x1); + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_25G_BASER_FEC_REQUESTED, + (cap_mask >> EFX_PHY_CAP_25G_BASER_FEC_REQUESTED) & 0x1); #if EFSYS_OPT_LOOPBACK MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, @@ -304,9 +377,18 @@ ef10_phy_reconfigure( case EFX_LINK_10000FDX: speed = 10000; break; + case EFX_LINK_25000FDX: + speed = 25000; + break; case EFX_LINK_40000FDX: speed = 40000; break; + case EFX_LINK_50000FDX: + speed = 50000; + break; + case EFX_LINK_100000FDX: + speed = 100000; + break; default: speed = 0; } @@ -606,4 +688,4 @@ ef10_bist_stop( #endif /* EFSYS_OPT_BIST */ -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_rx.c b/drivers/net/sfc/base/ef10_rx.c index 2bb6705d..313a3691 100644 --- a/drivers/net/sfc/base/ef10_rx.c +++ b/drivers/net/sfc/base/ef10_rx.c @@ -8,7 +8,7 @@ #include "efx_impl.h" -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static __checkReturn efx_rc_t @@ -21,12 +21,16 @@ efx_mcdi_init_rxq( __in efsys_mem_t *esmp, __in boolean_t disable_scatter, __in boolean_t want_inner_classes, - __in uint32_t ps_bufsize) + __in uint32_t ps_bufsize, + __in uint32_t es_bufs_per_desc, + __in uint32_t es_max_dma_len, + __in uint32_t es_buf_stride, + __in uint32_t hol_block_timeout) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_mcdi_req_t req; - uint8_t payload[MAX(MC_CMD_INIT_RXQ_EXT_IN_LEN, - MC_CMD_INIT_RXQ_EXT_OUT_LEN)]; + uint8_t payload[MAX(MC_CMD_INIT_RXQ_V3_IN_LEN, + MC_CMD_INIT_RXQ_V3_OUT_LEN)]; int npages = EFX_RXQ_NBUFS(ndescs); int i; efx_qword_t *dma_addr; @@ -37,8 +41,15 @@ efx_mcdi_init_rxq( EFSYS_ASSERT3U(ndescs, <=, EFX_RXQ_MAXNDESCS); + if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_RXQ_SIZE(ndescs))) { + rc = EINVAL; + goto fail1; + } + if (ps_bufsize > 0) dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM; + else if (es_bufs_per_desc > 0) + dma_mode = MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER; else dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET; @@ -65,9 +76,9 @@ efx_mcdi_init_rxq( (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_RXQ; req.emr_in_buf = payload; - req.emr_in_length = MC_CMD_INIT_RXQ_EXT_IN_LEN; + req.emr_in_length = MC_CMD_INIT_RXQ_V3_IN_LEN; req.emr_out_buf = payload; - req.emr_out_length = MC_CMD_INIT_RXQ_EXT_OUT_LEN; + req.emr_out_length = MC_CMD_INIT_RXQ_V3_OUT_LEN; MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs); MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, target_evq); @@ -87,6 +98,19 @@ efx_mcdi_init_rxq( MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0); MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); + if (es_bufs_per_desc > 0) { + MCDI_IN_SET_DWORD(req, + INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET, + es_bufs_per_desc); + MCDI_IN_SET_DWORD(req, + INIT_RXQ_V3_IN_ES_MAX_DMA_LEN, es_max_dma_len); + MCDI_IN_SET_DWORD(req, + INIT_RXQ_V3_IN_ES_PACKET_STRIDE, es_buf_stride); + MCDI_IN_SET_DWORD(req, + INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT, + hol_block_timeout); + } + dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR); addr = EFSYS_MEM_ADDR(esmp); @@ -103,11 +127,13 @@ efx_mcdi_init_rxq( if (req.emr_rc != 0) { rc = req.emr_rc; - goto fail1; + goto fail2; } return (0); +fail2: + EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); @@ -291,11 +317,34 @@ efx_mcdi_rss_context_set_flags( __in uint32_t rss_context, __in efx_rx_hash_type_t type) { + efx_nic_cfg_t *encp = &enp->en_nic_cfg; + efx_rx_hash_type_t type_ipv4; + efx_rx_hash_type_t type_ipv4_tcp; + efx_rx_hash_type_t type_ipv6; + efx_rx_hash_type_t type_ipv6_tcp; + efx_rx_hash_type_t modes; efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN, MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN)]; efx_rc_t rc; + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_LBN == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_WIDTH == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_LBN == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_WIDTH == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_TCP_LBN == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_TCP_WIDTH == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_LBN == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_WIDTH == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH); + if (rss_context == EF10_RSS_CONTEXT_INVALID) { rc = EINVAL; goto fail1; @@ -311,15 +360,57 @@ efx_mcdi_rss_context_set_flags( MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, rss_context); - MCDI_IN_POPULATE_DWORD_4(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, + type_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE) | EFX_RX_HASH(IPV4_TCP, 2TUPLE) | + EFX_RX_HASH(IPV4_UDP, 2TUPLE); + type_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE); + type_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE) | EFX_RX_HASH(IPV6_TCP, 2TUPLE) | + EFX_RX_HASH(IPV6_UDP, 2TUPLE); + type_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE); + + /* + * Create a copy of the original hash type. + * The copy will be used to fill in RSS_MODE bits and + * may be cleared beforehand. The original variable + * and, thus, EN bits will remain unaffected. + */ + modes = type; + + /* + * If the firmware lacks support for additional modes, RSS_MODE + * fields must contain zeros, otherwise the operation will fail. + */ + if (encp->enc_rx_scale_additional_modes_supported == B_FALSE) + modes = 0; + +#define EXTRACT_RSS_MODE(_type, _class) \ + (EFX_EXTRACT_NATIVE(_type, 0, 31, \ + EFX_LOW_BIT(EFX_RX_CLASS_##_class), \ + EFX_HIGH_BIT(EFX_RX_CLASS_##_class)) & \ + EFX_MASK32(EFX_RX_CLASS_##_class)) + + MCDI_IN_POPULATE_DWORD_10(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN, - (type & EFX_RX_HASH_IPV4) ? 1 : 0, + ((type & type_ipv4) == type_ipv4) ? 1 : 0, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN, - (type & EFX_RX_HASH_TCPIPV4) ? 1 : 0, + ((type & type_ipv4_tcp) == type_ipv4_tcp) ? 1 : 0, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN, - (type & EFX_RX_HASH_IPV6) ? 1 : 0, + ((type & type_ipv6) == type_ipv6) ? 1 : 0, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN, - (type & EFX_RX_HASH_TCPIPV6) ? 1 : 0); + ((type & type_ipv6_tcp) == type_ipv6_tcp) ? 1 : 0, + RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE, + EXTRACT_RSS_MODE(modes, IPV4_TCP), + RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE, + EXTRACT_RSS_MODE(modes, IPV4_UDP), + RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE, + EXTRACT_RSS_MODE(modes, IPV4), + RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE, + EXTRACT_RSS_MODE(modes, IPV6_TCP), + RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE, + EXTRACT_RSS_MODE(modes, IPV6_UDP), + RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE, + EXTRACT_RSS_MODE(modes, IPV6)); + +#undef EXTRACT_RSS_MODE efx_mcdi_execute(enp, &req); @@ -544,12 +635,13 @@ ef10_rx_scale_mode_set( __in efx_rx_hash_type_t type, __in boolean_t insert) { + efx_nic_cfg_t *encp = &enp->en_nic_cfg; efx_rc_t rc; - EFSYS_ASSERT3U(alg, ==, EFX_RX_HASHALG_TOEPLITZ); EFSYS_ASSERT3U(insert, ==, B_TRUE); - if ((alg != EFX_RX_HASHALG_TOEPLITZ) || (insert == B_FALSE)) { + if ((encp->enc_rx_scale_hash_alg_mask & (1U << alg)) == 0 || + insert == B_FALSE) { rc = EINVAL; goto fail1; } @@ -698,6 +790,7 @@ ef10_rx_prefix_hash( _NOTE(ARGUNUSED(enp)) switch (func) { + case EFX_RX_HASHALG_PACKED_STREAM: case EFX_RX_HASHALG_TOEPLITZ: return (buffer[0] | (buffer[1] << 8) | @@ -795,8 +888,8 @@ ef10_rx_qpush( EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1, wptr, pushed & erp->er_mask); EFSYS_PIO_WRITE_BARRIER(); - EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG, - erp->er_index, &dword, B_FALSE); + EFX_BAR_VI_WRITED(enp, ER_DZ_RX_DESC_UPD_REG, + erp->er_index, &dword, B_FALSE); } #if EFSYS_OPT_RX_PACKED_STREAM @@ -827,7 +920,7 @@ ef10_rx_qpush_ps_credits( ERF_DZ_RX_DESC_MAGIC_CMD, ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS, ERF_DZ_RX_DESC_MAGIC_DATA, credits); - EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG, + EFX_BAR_VI_WRITED(enp, ER_DZ_RX_DESC_UPD_REG, erp->er_index, &dword, B_FALSE); rxq_state->eers_rx_packed_stream_credits = 0; @@ -926,7 +1019,7 @@ ef10_rx_qcreate( __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, - __in uint32_t type_data, + __in const efx_rxq_type_data_t *type_data, __in efsys_mem_t *esmp, __in size_t ndescs, __in uint32_t id, @@ -939,6 +1032,10 @@ ef10_rx_qcreate( boolean_t disable_scatter; boolean_t want_inner_classes; unsigned int ps_buf_size; + uint32_t es_bufs_per_desc = 0; + uint32_t es_max_dma_len = 0; + uint32_t es_buf_stride = 0; + uint32_t hol_block_timeout = 0; _NOTE(ARGUNUSED(id, erp, type_data)) @@ -965,7 +1062,7 @@ ef10_rx_qcreate( break; #if EFSYS_OPT_RX_PACKED_STREAM case EFX_RXQ_TYPE_PACKED_STREAM: - switch (type_data) { + switch (type_data->ertd_packed_stream.eps_buf_size) { case EFX_RXQ_PACKED_STREAM_BUF_SIZE_1M: ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M; break; @@ -987,6 +1084,19 @@ ef10_rx_qcreate( } break; #endif /* EFSYS_OPT_RX_PACKED_STREAM */ +#if EFSYS_OPT_RX_ES_SUPER_BUFFER + case EFX_RXQ_TYPE_ES_SUPER_BUFFER: + ps_buf_size = 0; + es_bufs_per_desc = + type_data->ertd_es_super_buffer.eessb_bufs_per_desc; + es_max_dma_len = + type_data->ertd_es_super_buffer.eessb_max_dma_len; + es_buf_stride = + type_data->ertd_es_super_buffer.eessb_buf_stride; + hol_block_timeout = + type_data->ertd_es_super_buffer.eessb_hol_block_timeout; + break; +#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */ default: rc = ENOTSUP; goto fail4; @@ -1010,6 +1120,27 @@ ef10_rx_qcreate( EFSYS_ASSERT(ps_buf_size == 0); #endif /* EFSYS_OPT_RX_PACKED_STREAM */ +#if EFSYS_OPT_RX_ES_SUPER_BUFFER + if (es_bufs_per_desc > 0) { + if (encp->enc_rx_es_super_buffer_supported == B_FALSE) { + rc = ENOTSUP; + goto fail7; + } + if (!IS_P2ALIGNED(es_max_dma_len, + EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) { + rc = EINVAL; + goto fail8; + } + if (!IS_P2ALIGNED(es_buf_stride, + EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) { + rc = EINVAL; + goto fail9; + } + } +#else /* EFSYS_OPT_RX_ES_SUPER_BUFFER */ + EFSYS_ASSERT(es_bufs_per_desc == 0); +#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */ + /* Scatter can only be disabled if the firmware supports doing so */ if (flags & EFX_RXQ_FLAG_SCATTER) disable_scatter = B_FALSE; @@ -1023,8 +1154,9 @@ ef10_rx_qcreate( if ((rc = efx_mcdi_init_rxq(enp, ndescs, eep->ee_index, label, index, esmp, disable_scatter, want_inner_classes, - ps_buf_size)) != 0) - goto fail7; + ps_buf_size, es_bufs_per_desc, es_max_dma_len, + es_buf_stride, hol_block_timeout)) != 0) + goto fail10; erp->er_eep = eep; erp->er_label = label; @@ -1035,8 +1167,16 @@ ef10_rx_qcreate( return (0); +fail10: + EFSYS_PROBE(fail10); +#if EFSYS_OPT_RX_ES_SUPER_BUFFER +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); +#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */ #if EFSYS_OPT_RX_PACKED_STREAM fail6: EFSYS_PROBE(fail6); @@ -1087,4 +1227,4 @@ ef10_rx_fini( #endif /* EFSYS_OPT_RX_SCALE */ } -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_signed_image_layout.h b/drivers/net/sfc/base/ef10_signed_image_layout.h new file mode 100644 index 00000000..a35d1601 --- /dev/null +++ b/drivers/net/sfc/base/ef10_signed_image_layout.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2016-2018 Solarflare Communications Inc. + * All rights reserved. + */ + +/* These structures define the layouts for the signed firmware image binary + * saved in NVRAM. The original image is in the Cryptographic message + * syntax (CMS) format which contains the bootable firmware binary plus the + * signatures. The entire image is written into NVRAM to enable the firmware + * to validate the signatures. However, the bootrom still requires the + * bootable-image to start at offset 0 of the NVRAM partition. Hence the image + * is parsed upfront by host utilities (sfupdate) and written into nvram as + * 'signed_image_chunks' described by a header. + * + * This file is used by the MC as well as host-utilities (sfupdate). + */ + + +#ifndef CI_MGMT_SIGNED_IMAGE_LAYOUT_H +#define CI_MGMT_SIGNED_IMAGE_LAYOUT_H + +/* Signed image chunk type identifiers */ +enum { + SIGNED_IMAGE_CHUNK_CMS_HEADER, /* CMS header describing the signed data */ + SIGNED_IMAGE_CHUNK_REFLASH_HEADER, /* Reflash header */ + SIGNED_IMAGE_CHUNK_IMAGE, /* Bootable binary image */ + SIGNED_IMAGE_CHUNK_REFLASH_TRAILER, /* Reflash trailer */ + SIGNED_IMAGE_CHUNK_SIGNATURE, /* Remaining contents of the signed image, + * including the certifiates and signature */ + NUM_SIGNED_IMAGE_CHUNKS, +}; + +/* Magic */ +#define SIGNED_IMAGE_CHUNK_HDR_MAGIC 0xEF105161 /* EF10 SIGned Image */ + +/* Initial version definition - version 1 */ +#define SIGNED_IMAGE_CHUNK_HDR_VERSION 0x1 + +/* Header length is 32 bytes */ +#define SIGNED_IMAGE_CHUNK_HDR_LEN 32 +/* Structure describing the header of each chunk of signed image + * as stored in nvram + */ +typedef struct signed_image_chunk_hdr_e { + /* Magic field to recognise a valid entry + * should match SIGNED_IMAGE_CHUNK_HDR_MAGIC + */ + uint32_t magic; + /* Version number of this header */ + uint32_t version; + /* Chunk type identifier */ + uint32_t id; + /* Chunk offset */ + uint32_t offset; + /* Chunk length */ + uint32_t len; + /* Reserved for future expansion of this structure - always set to zeros */ + uint32_t reserved[3]; +} signed_image_chunk_hdr_t; + +#endif /* CI_MGMT_SIGNED_IMAGE_LAYOUT_H */ diff --git a/drivers/net/sfc/base/ef10_tlv_layout.h b/drivers/net/sfc/base/ef10_tlv_layout.h index 2473a66a..56cffaee 100644 --- a/drivers/net/sfc/base/ef10_tlv_layout.h +++ b/drivers/net/sfc/base/ef10_tlv_layout.h @@ -4,6 +4,14 @@ * All rights reserved. */ +/* + * This is NOT the original source file. Do NOT edit it. + * To update the tlv layout, please edit the copy in + * the sfregistry repo and then, in that repo, + * "make tlv_headers" or "make export" to + * regenerate and export all types of headers. + */ + /* These structures define the layouts for the TLV items stored in static and * dynamic configuration partitions in NVRAM for EF10 (Huntington etc.). * @@ -32,6 +40,7 @@ * 1: dynamic configuration * 2: firmware internal use * 3: license partition + * 4: tsa configuration * * - TTT is a type, which is just a unique value. The same type value * might appear in both locations, indicating a relationship between @@ -407,6 +416,8 @@ struct tlv_firmware_options { #define TLV_FIRMWARE_VARIANT_PACKED_STREAM_HASH_MODE_1 \ MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 #define TLV_FIRMWARE_VARIANT_RULES_ENGINE MC_CMD_FW_RULES_ENGINE +#define TLV_FIRMWARE_VARIANT_DPDK MC_CMD_FW_DPDK +#define TLV_FIRMWARE_VARIANT_L3XUDP MC_CMD_FW_L3XUDP }; /* Voltage settings @@ -525,6 +536,17 @@ struct tlv_pcie_config_r2 { * number of externally visible ports (and, hence, PF to port mapping), so must * be done at boot time. * + * Port mode naming convention is + * + * [nports_on_cage0]x[port_lane_width]_[nports_on_cage1]x[port_lane_width] + * + * Port lane width determines the capabilities (speeds) of the ports, subject + * to architecture capabilities (e.g. 25G support) and switch bandwidth + * constraints: + * - single lane ports can do 25G/10G/1G + * - dual lane ports can do 50G/25G/10G/1G (with fallback to 1 lane) + * - quad lane ports can do 100G/40G/50G/25G/10G/1G (with fallback to 2 or 1 lanes) + * This tag supercedes tlv_global_port_config. */ @@ -535,18 +557,58 @@ struct tlv_global_port_mode { uint32_t length; uint32_t port_mode; #define TLV_PORT_MODE_DEFAULT (0xffffffff) /* Default for given platform */ -#define TLV_PORT_MODE_10G (0) /* 10G, single SFP/10G-KR */ -#define TLV_PORT_MODE_40G (1) /* 40G, single QSFP/40G-KR */ -#define TLV_PORT_MODE_10G_10G (2) /* 2x10G, dual SFP/10G-KR or single QSFP */ -#define TLV_PORT_MODE_40G_40G (3) /* 40G + 40G, dual QSFP/40G-KR (Greenport, Medford) */ -#define TLV_PORT_MODE_10G_10G_10G_10G (4) /* 2x10G + 2x10G, quad SFP/10G-KR or dual QSFP (Greenport) */ -#define TLV_PORT_MODE_10G_10G_10G_10G_Q1 (4) /* 4x10G, single QSFP, cage 0 (Medford) */ -#define TLV_PORT_MODE_10G_10G_10G_10G_Q (5) /* 4x10G, single QSFP, cage 0 (Medford) OBSOLETE DO NOT USE */ -#define TLV_PORT_MODE_40G_10G_10G (6) /* 1x40G + 2x10G, dual QSFP (Greenport, Medford) */ -#define TLV_PORT_MODE_10G_10G_40G (7) /* 2x10G + 1x40G, dual QSFP (Greenport, Medford) */ -#define TLV_PORT_MODE_10G_10G_10G_10G_Q2 (8) /* 4x10G, single QSFP, cage 1 (Medford) */ -#define TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2 (9) /* 2x10G + 2x10G, dual QSFP (Medford) */ -#define TLV_PORT_MODE_MAX TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2 + +/* Huntington port modes */ +#define TLV_PORT_MODE_10G (0) +#define TLV_PORT_MODE_40G (1) +#define TLV_PORT_MODE_10G_10G (2) +#define TLV_PORT_MODE_40G_40G (3) +#define TLV_PORT_MODE_10G_10G_10G_10G (4) +#define TLV_PORT_MODE_40G_10G_10G (6) +#define TLV_PORT_MODE_10G_10G_40G (7) + +/* Medford (and later) port modes */ +#define TLV_PORT_MODE_1x1_NA (0) /* Single 10G/25G on mdi0 */ +#define TLV_PORT_MODE_1x4_NA (1) /* Single 100G/40G on mdi0 */ +#define TLV_PORT_MODE_NA_1x4 (22) /* Single 100G/40G on mdi1 */ +#define TLV_PORT_MODE_1x2_NA (10) /* Single 50G on mdi0 */ +#define TLV_PORT_MODE_NA_1x2 (11) /* Single 50G on mdi1 */ +#define TLV_PORT_MODE_1x1_1x1 (2) /* Single 10G/25G on mdi0, single 10G/25G on mdi1 */ +#define TLV_PORT_MODE_1x4_1x4 (3) /* Single 40G on mdi0, single 40G on mdi1 */ +#define TLV_PORT_MODE_2x1_2x1 (5) /* Dual 10G/25G on mdi0, dual 10G/25G on mdi1 */ +#define TLV_PORT_MODE_4x1_NA (4) /* Quad 10G/25G on mdi0 */ +#define TLV_PORT_MODE_NA_4x1 (8) /* Quad 10G/25G on mdi1 */ +#define TLV_PORT_MODE_1x4_2x1 (6) /* Single 40G on mdi0, dual 10G/25G on mdi1 */ +#define TLV_PORT_MODE_2x1_1x4 (7) /* Dual 10G/25G on mdi0, single 40G on mdi1 */ +#define TLV_PORT_MODE_1x2_1x2 (12) /* Single 50G on mdi0, single 50G on mdi1 */ +#define TLV_PORT_MODE_2x2_NA (13) /* Dual 50G on mdi0 */ +#define TLV_PORT_MODE_NA_2x2 (14) /* Dual 50G on mdi1 */ +#define TLV_PORT_MODE_1x4_1x2 (15) /* Single 40G on mdi0, single 50G on mdi1 */ +#define TLV_PORT_MODE_1x2_1x4 (16) /* Single 50G on mdi0, single 40G on mdi1 */ +#define TLV_PORT_MODE_1x2_2x1 (17) /* Single 50G on mdi0, dual 10G/25G on mdi1 */ +#define TLV_PORT_MODE_2x1_1x2 (18) /* Dual 10G/25G on mdi0, single 50G on mdi1 */ + +/* Snapper-only Medford2 port modes. + * These modes are eftest only, to allow snapper explicit + * selection between multi-channel and LLPCS. In production, + * this selection is automatic and outside world should not + * care about LLPCS. + */ +#define TLV_PORT_MODE_2x1_2x1_LL (19) /* Dual 10G/25G on mdi0, dual 10G/25G on mdi1, low-latency PCS */ +#define TLV_PORT_MODE_4x1_NA_LL (20) /* Quad 10G/25G on mdi0, low-latency PCS */ +#define TLV_PORT_MODE_NA_4x1_LL (21) /* Quad 10G/25G on mdi1, low-latency PCS */ +#define TLV_PORT_MODE_1x1_NA_LL (23) /* Single 10G/25G on mdi0, low-latency PCS */ +#define TLV_PORT_MODE_1x1_1x1_LL (24) /* Single 10G/25G on mdi0, single 10G/25G on mdi1, low-latency PCS */ +#define TLV_PORT_MODE_BUG63720_DO_NOT_USE (9) /* bug63720: Do not use */ +#define TLV_PORT_MODE_MAX TLV_PORT_MODE_1x1_1x1_LL + +/* Deprecated Medford aliases - DO NOT USE IN NEW CODE */ +#define TLV_PORT_MODE_10G_10G_10G_10G_Q (5) +#define TLV_PORT_MODE_10G_10G_10G_10G_Q1 (4) +#define TLV_PORT_MODE_10G_10G_10G_10G_Q2 (8) +#define TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2 (9) + +#define TLV_PORT_MODE_MAX TLV_PORT_MODE_1x1_1x1_LL }; /* Type of the v-switch created implicitly by the firmware */ @@ -791,7 +853,7 @@ typedef struct tlv_license { uint8_t data[]; } tlv_license_t; -/* TSA NIC IP address configuration +/* TSA NIC IP address configuration (DEPRECATED) * * Sets the TSA NIC IP address statically via configuration tool or dynamically * via DHCP via snooping based on the mode selection (0=Static, 1=DHCP, 2=Snoop) @@ -801,7 +863,7 @@ typedef struct tlv_license { * released code yet. */ -#define TLV_TAG_TMP_TSAN_CONFIG (0x10220000) +#define TLV_TAG_TMP_TSAN_CONFIG (0x10220000) /* DEPRECATED */ #define TLV_TSAN_IP_MODE_STATIC (0) #define TLV_TSAN_IP_MODE_DHCP (1) @@ -818,7 +880,7 @@ typedef struct tlv_tsan_config { uint32_t bind_bkout; /* DEPRECATED */ } tlv_tsan_config_t; -/* TSA Controller IP address configuration +/* TSA Controller IP address configuration (DEPRECATED) * * Sets the TSA Controller IP address statically via configuration tool * @@ -827,7 +889,7 @@ typedef struct tlv_tsan_config { * released code yet. */ -#define TLV_TAG_TMP_TSAC_CONFIG (0x10230000) +#define TLV_TAG_TMP_TSAC_CONFIG (0x10230000) /* DEPRECATED */ #define TLV_MAX_TSACS (4) typedef struct tlv_tsac_config { @@ -838,7 +900,7 @@ typedef struct tlv_tsac_config { uint32_t port[TLV_MAX_TSACS]; } tlv_tsac_config_t; -/* Binding ticket +/* Binding ticket (DEPRECATED) * * Sets the TSA NIC binding ticket used for binding process between the TSA NIC * and the TSA Controller @@ -848,7 +910,7 @@ typedef struct tlv_tsac_config { * released code yet. */ -#define TLV_TAG_TMP_BINDING_TICKET (0x10240000) +#define TLV_TAG_TMP_BINDING_TICKET (0x10240000) /* DEPRECATED */ typedef struct tlv_binding_ticket { uint32_t tag; @@ -873,7 +935,7 @@ typedef struct tlv_pik_sf { uint8_t bytes[]; } tlv_pik_sf_t; -/* CA root certificate +/* CA root certificate (DEPRECATED) * * Sets the CA root certificate used for TSA Controller verfication during * TLS connection setup between the TSA NIC and the TSA Controller @@ -883,7 +945,7 @@ typedef struct tlv_pik_sf { * released code yet. */ -#define TLV_TAG_TMP_CA_ROOT_CERT (0x10260000) +#define TLV_TAG_TMP_CA_ROOT_CERT (0x10260000) /* DEPRECATED */ typedef struct tlv_ca_root_cert { uint32_t tag; @@ -933,4 +995,17 @@ struct tlv_fastpd_mode { #define TLV_FASTPD_MODE_FAST_SUPPORTED 2 /* Supported packet types to the FastPD; everything else to the SoftPD */ }; +/* L3xUDP datapath firmware UDP port configuration + * + * Sets the list of UDP ports on which the encapsulation will be handled. + * The number of ports in the list is implied by the length of the TLV item. + */ +#define TLV_TAG_L3XUDP_PORTS (0x102a0000) +struct tlv_l3xudp_ports { + uint32_t tag; + uint32_t length; + uint16_t ports[]; +#define TLV_TAG_L3XUDP_PORTS_MAX_NUM_PORTS 16 +}; + #endif /* CI_MGMT_TLV_LAYOUT_H */ diff --git a/drivers/net/sfc/base/ef10_tx.c b/drivers/net/sfc/base/ef10_tx.c index 69c75700..7d27f710 100644 --- a/drivers/net/sfc/base/ef10_tx.c +++ b/drivers/net/sfc/base/ef10_tx.c @@ -8,7 +8,7 @@ #include "efx_impl.h" -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 #if EFSYS_OPT_QSTATS #define EFX_TX_QSTAT_INCR(_etp, _stat) \ @@ -42,10 +42,15 @@ efx_mcdi_init_txq( EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >= EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs)); + if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_TXQ_SIZE(ndescs))) { + rc = EINVAL; + goto fail1; + } + npages = EFX_TXQ_NBUFS(ndescs); if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) { rc = EINVAL; - goto fail1; + goto fail2; } (void) memset(payload, 0, sizeof (payload)); @@ -94,11 +99,13 @@ efx_mcdi_init_txq( if (req.emr_rc != 0) { rc = req.emr_rc; - goto fail2; + goto fail3; } return (0); +fail3: + EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: @@ -176,7 +183,7 @@ ef10_tx_qcreate( { efx_nic_cfg_t *encp = &enp->en_nic_cfg; uint16_t inner_csum; - efx_qword_t desc; + efx_desc_t desc; efx_rc_t rc; _NOTE(ARGUNUSED(id)) @@ -201,19 +208,9 @@ ef10_tx_qcreate( * a no-op TX option descriptor. See bug29981 for details. */ *addedp = 1; - EFX_POPULATE_QWORD_6(desc, - ESF_DZ_TX_DESC_IS_OPT, 1, - ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM, - ESF_DZ_TX_OPTION_UDP_TCP_CSUM, - (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0, - ESF_DZ_TX_OPTION_IP_CSUM, - (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0, - ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM, - (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, - ESF_DZ_TX_OPTION_INNER_IP_CSUM, - (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0); + ef10_tx_qdesc_checksum_create(etp, flags, &desc); - EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc); + EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc.ed_eq); ef10_tx_qpush(etp, *addedp, 0); return (0); @@ -511,8 +508,8 @@ ef10_tx_qpush( EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id); EFSYS_PIO_WRITE_BARRIER(); - EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, - etp->et_index, &oword); + EFX_BAR_VI_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, + etp->et_index, &oword); } else { efx_dword_t dword; @@ -527,8 +524,8 @@ ef10_tx_qpush( EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id); EFSYS_PIO_WRITE_BARRIER(); - EFX_BAR_TBL_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG, - etp->et_index, &dword, B_FALSE); + EFX_BAR_VI_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG, + etp->et_index, &dword, B_FALSE); } } @@ -626,6 +623,7 @@ ef10_tx_qdesc_tso_create( ef10_tx_qdesc_tso2_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, + __in uint16_t outer_ipv4_id, __in uint32_t tcp_seq, __in uint16_t tcp_mss, __out_ecount(count) efx_desc_t *edp, @@ -639,13 +637,14 @@ ef10_tx_qdesc_tso2_create( EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS); - EFX_POPULATE_QWORD_5(edp[0].ed_eq, + EFX_POPULATE_QWORD_6(edp[0].ed_eq, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, ESF_DZ_TX_TSO_OPTION_TYPE, ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, ESF_DZ_TX_TSO_IP_ID, ipv4_id, + ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id, ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq); EFX_POPULATE_QWORD_4(edp[1].ed_eq, ESF_DZ_TX_DESC_IS_OPT, 1, @@ -675,6 +674,30 @@ ef10_tx_qdesc_vlantci_create( ESF_DZ_TX_VLAN_TAG1, tci); } + void +ef10_tx_qdesc_checksum_create( + __in efx_txq_t *etp, + __in uint16_t flags, + __out efx_desc_t *edp) +{ + _NOTE(ARGUNUSED(etp)); + + EFSYS_PROBE2(tx_desc_checksum_create, unsigned int, etp->et_index, + uint32_t, flags); + + EFX_POPULATE_QWORD_6(edp->ed_eq, + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM, + ESF_DZ_TX_OPTION_UDP_TCP_CSUM, + (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0, + ESF_DZ_TX_OPTION_IP_CSUM, + (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0, + ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM, + (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, + ESF_DZ_TX_OPTION_INNER_IP_CSUM, + (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0); +} + __checkReturn efx_rc_t ef10_tx_qpace( @@ -752,4 +775,4 @@ ef10_tx_qstats_update( #endif /* EFSYS_OPT_QSTATS */ -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_vpd.c b/drivers/net/sfc/base/ef10_vpd.c index ad522e82..097fe1d4 100644 --- a/drivers/net/sfc/base/ef10_vpd.c +++ b/drivers/net/sfc/base/ef10_vpd.c @@ -10,7 +10,7 @@ #if EFSYS_OPT_VPD -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 #include "ef10_tlv_layout.h" @@ -26,7 +26,8 @@ ef10_vpd_init( EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); if (enp->en_nic_cfg.enc_vpd_is_global) { tag = TLV_TAG_GLOBAL_STATIC_VPD; @@ -82,7 +83,8 @@ ef10_vpd_size( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* * This function returns the total size the user should allocate @@ -115,7 +117,8 @@ ef10_vpd_read( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); if (enp->en_nic_cfg.enc_vpd_is_global) { tag = TLV_TAG_GLOBAL_DYNAMIC_VPD; @@ -133,19 +136,22 @@ ef10_vpd_read( rc = ENOSPC; goto fail2; } - memcpy(data, dvpd, dvpd_size); + if (dvpd != NULL) + memcpy(data, dvpd, dvpd_size); /* Pad data with all-1s, consistent with update operations */ memset(data + dvpd_size, 0xff, size - dvpd_size); - EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd); + if (dvpd != NULL) + EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd); return (0); fail2: EFSYS_PROBE(fail2); - EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd); + if (dvpd != NULL) + EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); @@ -167,7 +173,8 @@ ef10_vpd_verify( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* * Strictly you could take the view that dynamic vpd is optional. @@ -288,7 +295,8 @@ ef10_vpd_get( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* Attempt to satisfy the request from svpd first */ if (enp->en_arch.ef10.ena_svpd_length > 0) { @@ -334,7 +342,8 @@ ef10_vpd_set( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* If the provided (tag,keyword) exists in svpd, then it is readonly */ if (enp->en_arch.ef10.ena_svpd_length > 0) { @@ -387,7 +396,8 @@ ef10_vpd_write( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); if (enp->en_nic_cfg.enc_vpd_is_global) { tag = TLV_TAG_GLOBAL_DYNAMIC_VPD; @@ -423,7 +433,8 @@ ef10_vpd_fini( __in efx_nic_t *enp) { EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); if (enp->en_arch.ef10.ena_svpd_length > 0) { EFSYS_KMEM_FREE(enp->en_esip, enp->en_arch.ef10.ena_svpd_length, @@ -434,6 +445,6 @@ ef10_vpd_fini( } } -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ #endif /* EFSYS_OPT_VPD */ diff --git a/drivers/net/sfc/base/efx.h b/drivers/net/sfc/base/efx.h index fe996e7c..5108b9b1 100644 --- a/drivers/net/sfc/base/efx.h +++ b/drivers/net/sfc/base/efx.h @@ -40,6 +40,7 @@ typedef enum efx_family_e { EFX_FAMILY_SIENA, EFX_FAMILY_HUNTINGTON, EFX_FAMILY_MEDFORD, + EFX_FAMILY_MEDFORD2, EFX_FAMILY_NTYPES } efx_family_t; @@ -47,7 +48,8 @@ extern __checkReturn efx_rc_t efx_family( __in uint16_t venid, __in uint16_t devid, - __out efx_family_t *efp); + __out efx_family_t *efp, + __out unsigned int *membarp); #define EFX_PCI_VENID_SFC 0x1924 @@ -69,7 +71,21 @@ efx_family( #define EFX_PCI_DEVID_MEDFORD 0x0A03 /* SFC9240 PF */ #define EFX_PCI_DEVID_MEDFORD_VF 0x1A03 /* SFC9240 VF */ -#define EFX_MEM_BAR 2 +#define EFX_PCI_DEVID_MEDFORD2_PF_UNINIT 0x0B13 +#define EFX_PCI_DEVID_MEDFORD2 0x0B03 /* SFC9250 PF */ +#define EFX_PCI_DEVID_MEDFORD2_VF 0x1B03 /* SFC9250 VF */ + + +#define EFX_MEM_BAR_SIENA 2 + +#define EFX_MEM_BAR_HUNTINGTON_PF 2 +#define EFX_MEM_BAR_HUNTINGTON_VF 0 + +#define EFX_MEM_BAR_MEDFORD_PF 2 +#define EFX_MEM_BAR_MEDFORD_VF 0 + +#define EFX_MEM_BAR_MEDFORD2 0 + /* Error codes */ @@ -113,9 +129,22 @@ efx_nic_create( __in efsys_lock_t *eslp, __deref_out efx_nic_t **enpp); +/* EFX_FW_VARIANT codes map one to one on MC_CMD_FW codes */ +typedef enum efx_fw_variant_e { + EFX_FW_VARIANT_FULL_FEATURED, + EFX_FW_VARIANT_LOW_LATENCY, + EFX_FW_VARIANT_PACKED_STREAM, + EFX_FW_VARIANT_HIGH_TX_RATE, + EFX_FW_VARIANT_PACKED_STREAM_HASH_MODE_1, + EFX_FW_VARIANT_RULES_ENGINE, + EFX_FW_VARIANT_DPDK, + EFX_FW_VARIANT_DONT_CARE = 0xffffffff +} efx_fw_variant_t; + extern __checkReturn efx_rc_t efx_nic_probe( - __in efx_nic_t *enp); + __in efx_nic_t *enp, + __in efx_fw_variant_t efv); extern __checkReturn efx_rc_t efx_nic_init( @@ -171,7 +200,7 @@ efx_nic_check_pcie_link_speed( #if EFSYS_OPT_MCDI -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 /* Huntington and Medford require MCDIv2 commands */ #define WITH_MCDI_V2 1 #endif @@ -307,7 +336,7 @@ efx_intr_fini( #if EFSYS_OPT_MAC_STATS -/* START MKCONFIG GENERATED EfxHeaderMacBlock e323546097fd7c65 */ +/* START MKCONFIG GENERATED EfxHeaderMacBlock ea466a9bc8789994 */ typedef enum efx_mac_stat_e { EFX_MAC_RX_OCTETS, EFX_MAC_RX_PKTS, @@ -390,6 +419,31 @@ typedef enum efx_mac_stat_e { EFX_MAC_VADAPTER_TX_BAD_PACKETS, EFX_MAC_VADAPTER_TX_BAD_BYTES, EFX_MAC_VADAPTER_TX_OVERFLOW, + EFX_MAC_FEC_UNCORRECTED_ERRORS, + EFX_MAC_FEC_CORRECTED_ERRORS, + EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE0, + EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE1, + EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE2, + EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3, + EFX_MAC_CTPIO_VI_BUSY_FALLBACK, + EFX_MAC_CTPIO_LONG_WRITE_SUCCESS, + EFX_MAC_CTPIO_MISSING_DBELL_FAIL, + EFX_MAC_CTPIO_OVERFLOW_FAIL, + EFX_MAC_CTPIO_UNDERFLOW_FAIL, + EFX_MAC_CTPIO_TIMEOUT_FAIL, + EFX_MAC_CTPIO_NONCONTIG_WR_FAIL, + EFX_MAC_CTPIO_FRM_CLOBBER_FAIL, + EFX_MAC_CTPIO_INVALID_WR_FAIL, + EFX_MAC_CTPIO_VI_CLOBBER_FALLBACK, + EFX_MAC_CTPIO_UNQUALIFIED_FALLBACK, + EFX_MAC_CTPIO_RUNT_FALLBACK, + EFX_MAC_CTPIO_SUCCESS, + EFX_MAC_CTPIO_FALLBACK, + EFX_MAC_CTPIO_POISON, + EFX_MAC_CTPIO_ERASE, + EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC, + EFX_MAC_RXDP_HLB_IDLE, + EFX_MAC_RXDP_HLB_TIMEOUT, EFX_MAC_NSTATS } efx_mac_stat_t; @@ -408,11 +462,16 @@ typedef enum efx_link_mode_e { EFX_LINK_1000FDX, EFX_LINK_10000FDX, EFX_LINK_40000FDX, + EFX_LINK_25000FDX, + EFX_LINK_50000FDX, + EFX_LINK_100000FDX, EFX_LINK_NMODES } efx_link_mode_t; #define EFX_MAC_ADDR_LEN 6 +#define EFX_VNI_OR_VSID_LEN 3 + #define EFX_MAC_ADDR_IS_MULTICAST(_address) (((uint8_t *)_address)[0] & 0x01) #define EFX_MAC_MULTICAST_LIST_MAX 256 @@ -536,7 +595,6 @@ efx_mac_stats_get_mask( ((_mask)[(_stat) / EFX_MAC_STATS_MASK_BITS_PER_PAGE] & \ (1ULL << ((_stat) & (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1)))) -#define EFX_MAC_STATS_SIZE 0x400 extern __checkReturn efx_rc_t efx_mac_stats_clear( @@ -545,8 +603,8 @@ efx_mac_stats_clear( /* * Upload mac statistics supported by the hardware into the given buffer. * - * The reference buffer must be at least %EFX_MAC_STATS_SIZE bytes, - * and page aligned. + * The DMA buffer must be 4Kbyte aligned and sized to hold at least + * efx_nic_cfg_t::enc_mac_stats_nstats 64bit counters. * * The hardware will only DMA statistics that it understands (of course). * Drivers should not make any assumptions about which statistics are @@ -603,7 +661,7 @@ efx_mon_init( #define EFX_MON_STATS_PAGE_SIZE 0x100 #define EFX_MON_MASK_ELEMENT_SIZE 32 -/* START MKCONFIG GENERATED MonitorHeaderStatsBlock aa0233c80156308e */ +/* START MKCONFIG GENERATED MonitorHeaderStatsBlock 400fdb0517af1fca */ typedef enum efx_mon_stat_e { EFX_MON_STAT_2_5V, EFX_MON_STAT_VCCP1, @@ -684,6 +742,10 @@ typedef enum efx_mon_stat_e { EFX_MON_STAT_BOARD_BACK_TEMP, EFX_MON_STAT_I1V8, EFX_MON_STAT_I2V5, + EFX_MON_STAT_I3V3, + EFX_MON_STAT_I12V0, + EFX_MON_STAT_1_3V, + EFX_MON_STAT_I1V3, EFX_MON_NSTATS } efx_mon_stat_t; @@ -788,6 +850,9 @@ typedef enum efx_loopback_type_e { EFX_LOOPBACK_SD_FEP1_5_WS = 32, EFX_LOOPBACK_SD_FEP_WS = 33, EFX_LOOPBACK_SD_FES_WS = 34, + EFX_LOOPBACK_AOE_INT_NEAR = 35, + EFX_LOOPBACK_DATA_WS = 36, + EFX_LOOPBACK_FORCE_EXT_LINK = 37, EFX_LOOPBACK_NTYPES } efx_loopback_type_t; @@ -843,6 +908,16 @@ typedef enum efx_phy_cap_type_e { EFX_PHY_CAP_ASYM, EFX_PHY_CAP_AN, EFX_PHY_CAP_40000FDX, + EFX_PHY_CAP_DDM, + EFX_PHY_CAP_100000FDX, + EFX_PHY_CAP_25000FDX, + EFX_PHY_CAP_50000FDX, + EFX_PHY_CAP_BASER_FEC, + EFX_PHY_CAP_BASER_FEC_REQUESTED, + EFX_PHY_CAP_RS_FEC, + EFX_PHY_CAP_RS_FEC_REQUESTED, + EFX_PHY_CAP_25G_BASER_FEC, + EFX_PHY_CAP_25G_BASER_FEC_REQUESTED, EFX_PHY_CAP_NTYPES } efx_phy_cap_type_t; @@ -1080,6 +1155,13 @@ typedef enum efx_tunnel_protocol_e { EFX_TUNNEL_NPROTOS } efx_tunnel_protocol_t; +typedef enum efx_vi_window_shift_e { + EFX_VI_WINDOW_SHIFT_INVALID = 0, + EFX_VI_WINDOW_SHIFT_8K = 13, + EFX_VI_WINDOW_SHIFT_16K = 14, + EFX_VI_WINDOW_SHIFT_64K = 16, +} efx_vi_window_shift_t; + typedef struct efx_nic_cfg_s { uint32_t enc_board_type; uint32_t enc_phy_type; @@ -1093,6 +1175,7 @@ typedef struct efx_nic_cfg_s { uint32_t enc_mon_stat_mask[(EFX_MON_NSTATS + 31) / 32]; #endif unsigned int enc_features; + efx_vi_window_shift_t enc_vi_window_shift; uint8_t enc_mac_addr[6]; uint8_t enc_port; /* PHY port number */ uint32_t enc_intr_vec_base; @@ -1112,6 +1195,17 @@ typedef struct efx_nic_cfg_s { uint32_t enc_rx_buf_align_start; uint32_t enc_rx_buf_align_end; uint32_t enc_rx_scale_max_exclusive_contexts; + /* + * Mask of supported hash algorithms. + * Hash algorithm types are used as the bit indices. + */ + uint32_t enc_rx_scale_hash_alg_mask; + /* + * Indicates whether port numbers can be included to the + * input data for hash computation. + */ + boolean_t enc_rx_scale_l4_hash_supported; + boolean_t enc_rx_scale_additional_modes_supported; #if EFSYS_OPT_LOOPBACK efx_qword_t enc_loopback_types[EFX_LINK_NMODES]; #endif /* EFSYS_OPT_LOOPBACK */ @@ -1137,11 +1231,11 @@ typedef struct efx_nic_cfg_s { #if EFSYS_OPT_BIST uint32_t enc_bist_mask; #endif /* EFSYS_OPT_BIST */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 uint32_t enc_pf; uint32_t enc_vf; uint32_t enc_privilege_mask; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ boolean_t enc_bug26807_workaround; boolean_t enc_bug35388_workaround; boolean_t enc_bug41750_workaround; @@ -1165,6 +1259,7 @@ typedef struct efx_nic_cfg_s { uint32_t enc_tx_tso_tcp_header_offset_limit; boolean_t enc_fw_assisted_tso_enabled; boolean_t enc_fw_assisted_tso_v2_enabled; + boolean_t enc_fw_assisted_tso_v2_encap_enabled; /* Number of TSO contexts on the NIC (FATSOv2) */ uint32_t enc_fw_assisted_tso_v2_n_contexts; boolean_t enc_hw_tx_insert_vlan_enabled; @@ -1178,6 +1273,8 @@ typedef struct efx_nic_cfg_s { boolean_t enc_init_evq_v2_supported; boolean_t enc_rx_packed_stream_supported; boolean_t enc_rx_var_packed_stream_supported; + boolean_t enc_rx_es_super_buffer_supported; + boolean_t enc_fw_subvariant_no_tx_csum_supported; boolean_t enc_pm_and_rxdp_counters; boolean_t enc_mac_stats_40g_tx_size_bins; uint32_t enc_tunnel_encapsulations_supported; @@ -1196,6 +1293,14 @@ typedef struct efx_nic_cfg_s { uint32_t enc_max_pcie_link_gen; /* Firmware verifies integrity of NVRAM updates */ uint32_t enc_nvram_update_verify_result_supported; + /* Firmware support for extended MAC_STATS buffer */ + uint32_t enc_mac_stats_nstats; + boolean_t enc_fec_counters; + boolean_t enc_hlb_counters; + /* Firmware support for "FLAG" and "MARK" filter actions */ + boolean_t enc_filter_action_flag_supported; + boolean_t enc_filter_action_mark_supported; + uint32_t enc_filter_action_mark_max; } efx_nic_cfg_t; #define EFX_PCI_FUNCTION_IS_PF(_encp) ((_encp)->enc_vf == 0xffff) @@ -1210,6 +1315,13 @@ extern const efx_nic_cfg_t * efx_nic_cfg_get( __in efx_nic_t *enp); +/* RxDPCPU firmware id values by which FW variant can be identified */ +#define EFX_RXDP_FULL_FEATURED_FW_ID 0x0 +#define EFX_RXDP_LOW_LATENCY_FW_ID 0x1 +#define EFX_RXDP_PACKED_STREAM_FW_ID 0x2 +#define EFX_RXDP_RULES_ENGINE_FW_ID 0x5 +#define EFX_RXDP_DPDK_FW_ID 0x6 + typedef struct efx_nic_fw_info_s { /* Basic FW version information */ uint16_t enfi_mc_fw_version[4]; @@ -1498,6 +1610,92 @@ efx_bootcfg_write( #endif /* EFSYS_OPT_BOOTCFG */ +#if EFSYS_OPT_IMAGE_LAYOUT + +#include "ef10_signed_image_layout.h" + +/* + * Image header used in unsigned and signed image layouts (see SF-102785-PS). + * + * NOTE: + * The image header format is extensible. However, older drivers require an + * exact match of image header version and header length when validating and + * writing firmware images. + * + * To avoid breaking backward compatibility, we use the upper bits of the + * controller version fields to contain an extra version number used for + * combined bootROM and UEFI ROM images on EF10 and later (to hold the UEFI ROM + * version). See bug39254 and SF-102785-PS for details. + */ +typedef struct efx_image_header_s { + uint32_t eih_magic; + uint32_t eih_version; + uint32_t eih_type; + uint32_t eih_subtype; + uint32_t eih_code_size; + uint32_t eih_size; + union { + uint32_t eih_controller_version_min; + struct { + uint16_t eih_controller_version_min_short; + uint8_t eih_extra_version_a; + uint8_t eih_extra_version_b; + }; + }; + union { + uint32_t eih_controller_version_max; + struct { + uint16_t eih_controller_version_max_short; + uint8_t eih_extra_version_c; + uint8_t eih_extra_version_d; + }; + }; + uint16_t eih_code_version_a; + uint16_t eih_code_version_b; + uint16_t eih_code_version_c; + uint16_t eih_code_version_d; +} efx_image_header_t; + +#define EFX_IMAGE_HEADER_SIZE (40) +#define EFX_IMAGE_HEADER_VERSION (4) +#define EFX_IMAGE_HEADER_MAGIC (0x106F1A5) + + +typedef struct efx_image_trailer_s { + uint32_t eit_crc; +} efx_image_trailer_t; + +#define EFX_IMAGE_TRAILER_SIZE (4) + +typedef enum efx_image_format_e { + EFX_IMAGE_FORMAT_NO_IMAGE, + EFX_IMAGE_FORMAT_INVALID, + EFX_IMAGE_FORMAT_UNSIGNED, + EFX_IMAGE_FORMAT_SIGNED, +} efx_image_format_t; + +typedef struct efx_image_info_s { + efx_image_format_t eii_format; + uint8_t * eii_imagep; + size_t eii_image_size; + efx_image_header_t * eii_headerp; +} efx_image_info_t; + +extern __checkReturn efx_rc_t +efx_check_reflash_image( + __in void *bufferp, + __in uint32_t buffer_size, + __out efx_image_info_t *infop); + +extern __checkReturn efx_rc_t +efx_build_signed_image_write_buffer( + __out uint8_t *bufferp, + __in uint32_t buffer_size, + __in efx_image_info_t *infop, + __out efx_image_header_t **headerpp); + +#endif /* EFSYS_OPT_IMAGE_LAYOUT */ + #if EFSYS_OPT_DIAG typedef enum efx_pattern_type_t { @@ -1674,7 +1872,7 @@ typedef __checkReturn boolean_t __in uint32_t size, __in uint16_t flags); -#if EFSYS_OPT_RX_PACKED_STREAM +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER /* * Packed stream mode is documented in SF-112241-TC. @@ -1684,6 +1882,13 @@ typedef __checkReturn boolean_t * packets are put there in a continuous stream. * The main advantage of such an approach is that RX queue refilling * happens much less frequently. + * + * Equal stride packed stream mode is documented in SF-119419-TC. + * The general idea is to utilize advantages of the packed stream, + * but avoid indirection in packets representation. + * The main advantage of such an approach is that RX queue refilling + * happens much less frequently and packets buffers are independent + * from upper layers point of view. */ typedef __checkReturn boolean_t @@ -1784,7 +1989,7 @@ typedef __checkReturn boolean_t typedef struct efx_ev_callbacks_s { efx_initialized_ev_t eec_initialized; efx_rx_ev_t eec_rx; -#if EFSYS_OPT_RX_PACKED_STREAM +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER efx_rx_ps_ev_t eec_rx_ps; #endif efx_tx_ev_t eec_tx; @@ -1888,14 +2093,35 @@ efx_rx_scatter_enable( typedef enum efx_rx_hash_alg_e { EFX_RX_HASHALG_LFSR = 0, - EFX_RX_HASHALG_TOEPLITZ + EFX_RX_HASHALG_TOEPLITZ, + EFX_RX_HASHALG_PACKED_STREAM, + EFX_RX_NHASHALGS } efx_rx_hash_alg_t; +/* + * Legacy hash type flags. + * + * They represent standard tuples for distinct traffic classes. + */ #define EFX_RX_HASH_IPV4 (1U << 0) #define EFX_RX_HASH_TCPIPV4 (1U << 1) #define EFX_RX_HASH_IPV6 (1U << 2) #define EFX_RX_HASH_TCPIPV6 (1U << 3) +#define EFX_RX_HASH_LEGACY_MASK \ + (EFX_RX_HASH_IPV4 | \ + EFX_RX_HASH_TCPIPV4 | \ + EFX_RX_HASH_IPV6 | \ + EFX_RX_HASH_TCPIPV6) + +/* + * The type of the argument used by efx_rx_scale_mode_set() to + * provide a means for the client drivers to configure hashing. + * + * A properly constructed value can either be: + * - a combination of legacy flags + * - a combination of EFX_RX_HASH() flags + */ typedef unsigned int efx_rx_hash_type_t; typedef enum efx_rx_hash_support_e { @@ -1914,6 +2140,92 @@ typedef enum efx_rx_scale_context_type_e { EFX_RX_SCALE_SHARED /* Read-only key/indirection table */ } efx_rx_scale_context_type_t; +/* + * Traffic classes eligible for hash computation. + * + * Select packet headers used in computing the receive hash. + * This uses the same encoding as the RSS_MODES field of + * MC_CMD_RSS_CONTEXT_SET_FLAGS. + */ +#define EFX_RX_CLASS_IPV4_TCP_LBN 8 +#define EFX_RX_CLASS_IPV4_TCP_WIDTH 4 +#define EFX_RX_CLASS_IPV4_UDP_LBN 12 +#define EFX_RX_CLASS_IPV4_UDP_WIDTH 4 +#define EFX_RX_CLASS_IPV4_LBN 16 +#define EFX_RX_CLASS_IPV4_WIDTH 4 +#define EFX_RX_CLASS_IPV6_TCP_LBN 20 +#define EFX_RX_CLASS_IPV6_TCP_WIDTH 4 +#define EFX_RX_CLASS_IPV6_UDP_LBN 24 +#define EFX_RX_CLASS_IPV6_UDP_WIDTH 4 +#define EFX_RX_CLASS_IPV6_LBN 28 +#define EFX_RX_CLASS_IPV6_WIDTH 4 + +#define EFX_RX_NCLASSES 6 + +/* + * Ancillary flags used to construct generic hash tuples. + * This uses the same encoding as RSS_MODE_HASH_SELECTOR. + */ +#define EFX_RX_CLASS_HASH_SRC_ADDR (1U << 0) +#define EFX_RX_CLASS_HASH_DST_ADDR (1U << 1) +#define EFX_RX_CLASS_HASH_SRC_PORT (1U << 2) +#define EFX_RX_CLASS_HASH_DST_PORT (1U << 3) + +/* + * Generic hash tuples. + * + * They express combinations of packet fields + * which can contribute to the hash value for + * a particular traffic class. + */ +#define EFX_RX_CLASS_HASH_DISABLE 0 + +#define EFX_RX_CLASS_HASH_1TUPLE_SRC EFX_RX_CLASS_HASH_SRC_ADDR +#define EFX_RX_CLASS_HASH_1TUPLE_DST EFX_RX_CLASS_HASH_DST_ADDR + +#define EFX_RX_CLASS_HASH_2TUPLE \ + (EFX_RX_CLASS_HASH_SRC_ADDR | \ + EFX_RX_CLASS_HASH_DST_ADDR) + +#define EFX_RX_CLASS_HASH_2TUPLE_SRC \ + (EFX_RX_CLASS_HASH_SRC_ADDR | \ + EFX_RX_CLASS_HASH_SRC_PORT) + +#define EFX_RX_CLASS_HASH_2TUPLE_DST \ + (EFX_RX_CLASS_HASH_DST_ADDR | \ + EFX_RX_CLASS_HASH_DST_PORT) + +#define EFX_RX_CLASS_HASH_4TUPLE \ + (EFX_RX_CLASS_HASH_SRC_ADDR | \ + EFX_RX_CLASS_HASH_DST_ADDR | \ + EFX_RX_CLASS_HASH_SRC_PORT | \ + EFX_RX_CLASS_HASH_DST_PORT) + +#define EFX_RX_CLASS_HASH_NTUPLES 7 + +/* + * Hash flag constructor. + * + * Resulting flags encode hash tuples for specific traffic classes. + * The client drivers are encouraged to use these flags to form + * a hash type value. + */ +#define EFX_RX_HASH(_class, _tuple) \ + EFX_INSERT_FIELD_NATIVE32(0, 31, \ + EFX_RX_CLASS_##_class, EFX_RX_CLASS_HASH_##_tuple) + +/* + * The maximum number of EFX_RX_HASH() flags. + */ +#define EFX_RX_HASH_NFLAGS (EFX_RX_NCLASSES * EFX_RX_CLASS_HASH_NTUPLES) + +extern __checkReturn efx_rc_t +efx_rx_scale_hash_flags_get( + __in efx_nic_t *enp, + __in efx_rx_hash_alg_t hash_alg, + __inout_ecount(EFX_RX_HASH_NFLAGS) unsigned int *flags, + __out unsigned int *nflagsp); + extern __checkReturn efx_rc_t efx_rx_hash_default_support_get( __in efx_nic_t *enp, @@ -1984,6 +2296,7 @@ efx_pseudo_hdr_pkt_length_get( typedef enum efx_rxq_type_e { EFX_RXQ_TYPE_DEFAULT, EFX_RXQ_TYPE_PACKED_STREAM, + EFX_RXQ_TYPE_ES_SUPER_BUFFER, EFX_RXQ_NTYPES } efx_rxq_type_t; @@ -2037,6 +2350,28 @@ efx_rx_qcreate_packed_stream( #endif +#if EFSYS_OPT_RX_ES_SUPER_BUFFER + +/* Maximum head-of-line block timeout in nanoseconds */ +#define EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX (400U * 1000 * 1000) + +extern __checkReturn efx_rc_t +efx_rx_qcreate_es_super_buffer( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in uint32_t n_bufs_per_desc, + __in uint32_t max_dma_len, + __in uint32_t buf_stride, + __in uint32_t hol_block_timeout, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in unsigned int flags, + __in efx_evq_t *eep, + __deref_out efx_rxq_t **erpp); + +#endif + typedef struct efx_buffer_s { efsys_dma_addr_t eb_addr; size_t eb_size; @@ -2226,6 +2561,7 @@ extern void efx_tx_qdesc_tso2_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, + __in uint16_t outer_ipv4_id, __in uint32_t tcp_seq, __in uint16_t tcp_mss, __out_ecount(count) efx_desc_t *edp, @@ -2237,6 +2573,12 @@ efx_tx_qdesc_vlantci_create( __in uint16_t tci, __out efx_desc_t *edp); +extern void +efx_tx_qdesc_checksum_create( + __in efx_txq_t *etp, + __in uint16_t flags, + __out efx_desc_t *edp); + #if EFSYS_OPT_QSTATS #if EFSYS_OPT_NAMES @@ -2285,6 +2627,10 @@ efx_tx_qdestroy( #define EFX_FILTER_FLAG_RX 0x08 /* Filter is for TX */ #define EFX_FILTER_FLAG_TX 0x10 +/* Set match flag on the received packet */ +#define EFX_FILTER_FLAG_ACTION_FLAG 0x20 +/* Set match mark on the received packet */ +#define EFX_FILTER_FLAG_ACTION_MARK 0x40 typedef uint8_t efx_filter_flags_t; @@ -2313,10 +2659,19 @@ typedef uint8_t efx_filter_flags_t; #define EFX_FILTER_MATCH_OUTER_VID 0x00000100 /* Match by IP transport protocol */ #define EFX_FILTER_MATCH_IP_PROTO 0x00000200 +/* Match by VNI or VSID */ +#define EFX_FILTER_MATCH_VNI_OR_VSID 0x00000800 +/* For encapsulated packets, match by inner frame local MAC address */ +#define EFX_FILTER_MATCH_IFRM_LOC_MAC 0x00010000 /* For encapsulated packets, match all multicast inner frames */ #define EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST 0x01000000 /* For encapsulated packets, match all unicast inner frames */ #define EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST 0x02000000 +/* + * Match by encap type, this flag does not correspond to + * the MCDI match flags and any unoccupied value may be used + */ +#define EFX_FILTER_MATCH_ENCAP_TYPE 0x20000000 /* Match otherwise-unmatched multicast and broadcast packets */ #define EFX_FILTER_MATCH_UNKNOWN_MCAST_DST 0x40000000 /* Match otherwise-unmatched unicast packets */ @@ -2359,6 +2714,9 @@ typedef struct efx_filter_spec_s { uint16_t efs_rem_port; efx_oword_t efs_rem_host; efx_oword_t efs_loc_host; + uint8_t efs_vni_or_vsid[EFX_VNI_OR_VSID_LEN]; + uint8_t efs_ifrm_loc_mac[EFX_MAC_ADDR_LEN]; + uint32_t efs_mark; } efx_filter_spec_t; @@ -2454,6 +2812,13 @@ efx_filter_spec_set_encap_type( __in efx_tunnel_protocol_t encap_type, __in efx_filter_inner_frame_match_t inner_frame_match); +extern __checkReturn efx_rc_t +efx_filter_spec_set_vxlan_full( + __inout efx_filter_spec_t *spec, + __in const uint8_t *vxlan_id, + __in const uint8_t *inner_addr, + __in const uint8_t *outer_addr); + #if EFSYS_OPT_RX_SCALE extern __checkReturn efx_rc_t efx_filter_spec_set_rss_context( @@ -2659,6 +3024,38 @@ efx_tunnel_reconfigure( #endif /* EFSYS_OPT_TUNNEL */ +#if EFSYS_OPT_FW_SUBVARIANT_AWARE + +/** + * Firmware subvariant choice options. + * + * It may be switched to no Tx checksum if attached drivers are either + * preboot or firmware subvariant aware and no VIS are allocated. + * If may be always switched to default explicitly using set request or + * implicitly if unaware driver is attaching. If switching is done when + * a driver is attached, it gets MC_REBOOT event and should recreate its + * datapath. + * + * See SF-119419-TC DPDK Firmware Driver Interface and + * SF-109306-TC EF10 for Driver Writers for details. + */ +typedef enum efx_nic_fw_subvariant_e { + EFX_NIC_FW_SUBVARIANT_DEFAULT = 0, + EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM = 1, + EFX_NIC_FW_SUBVARIANT_NTYPES +} efx_nic_fw_subvariant_t; + +extern __checkReturn efx_rc_t +efx_nic_get_fw_subvariant( + __in efx_nic_t *enp, + __out efx_nic_fw_subvariant_t *subvariantp); + +extern __checkReturn efx_rc_t +efx_nic_set_fw_subvariant( + __in efx_nic_t *enp, + __in efx_nic_fw_subvariant_t subvariant); + +#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */ #ifdef __cplusplus } diff --git a/drivers/net/sfc/base/efx_bootcfg.c b/drivers/net/sfc/base/efx_bootcfg.c index 0f71936f..715e18e8 100644 --- a/drivers/net/sfc/base/efx_bootcfg.c +++ b/drivers/net/sfc/base/efx_bootcfg.c @@ -68,6 +68,20 @@ efx_bootcfg_sector_info( } #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: { + /* Shared partition (array indexed by PF) */ + max_size = BOOTCFG_PER_PF; + count = BOOTCFG_PF_COUNT; + if (pf >= count) { + rc = EINVAL; + goto fail3; + } + offset = max_size * pf; + break; + } +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; @@ -82,6 +96,10 @@ efx_bootcfg_sector_info( return (0); +#if EFSYS_OPT_MEDFORD2 +fail3: + EFSYS_PROBE(fail3); +#endif #if EFSYS_OPT_MEDFORD fail2: EFSYS_PROBE(fail2); @@ -191,19 +209,25 @@ efx_bootcfg_copy_sector( size_t used_bytes; efx_rc_t rc; + /* Minimum buffer is checksum byte and DHCP_END terminator */ + if (data_size < 2) { + rc = ENOSPC; + goto fail1; + } + /* Verify that the area is correctly formatted and checksummed */ rc = efx_bootcfg_verify(enp, sector, sector_length, &used_bytes); if (!handle_format_errors) { if (rc != 0) - goto fail1; + goto fail2; if ((used_bytes < 2) || (sector[used_bytes - 1] != DHCP_END)) { /* Block too short, or DHCP_END missing */ rc = ENOENT; - goto fail2; + goto fail3; } } @@ -237,9 +261,13 @@ efx_bootcfg_copy_sector( */ if (used_bytes > data_size) { rc = ENOSPC; - goto fail3; + goto fail4; } - memcpy(data, sector, used_bytes); + + data[0] = 0; /* checksum, updated below */ + + /* Copy all after the checksum to the target buffer */ + memcpy(data + 1, sector + 1, used_bytes - 1); /* Zero out the unused portion of the target buffer */ if (used_bytes < data_size) @@ -253,6 +281,8 @@ efx_bootcfg_copy_sector( return (0); +fail4: + EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: @@ -277,20 +307,31 @@ efx_bootcfg_read( efx_rc_t rc; uint32_t sector_number; -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD + /* Minimum buffer is checksum byte and DHCP_END terminator */ + if (size < 2) { + rc = ENOSPC; + goto fail1; + } + +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 sector_number = enp->en_nic_cfg.enc_pf; #else sector_number = 0; #endif rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &partn_length); if (rc != 0) - goto fail1; + goto fail2; /* The bootcfg sector may be stored in a (larger) shared partition */ rc = efx_bootcfg_sector_info(enp, sector_number, NULL, §or_offset, §or_length); if (rc != 0) - goto fail2; + goto fail3; + + if (sector_length < 2) { + rc = EINVAL; + goto fail4; + } if (sector_length > BOOTCFG_MAX_SIZE) sector_length = BOOTCFG_MAX_SIZE; @@ -298,7 +339,7 @@ efx_bootcfg_read( if (sector_offset + sector_length > partn_length) { /* Partition is too small */ rc = EFBIG; - goto fail3; + goto fail5; } /* @@ -311,28 +352,28 @@ efx_bootcfg_read( EFSYS_KMEM_ALLOC(enp->en_esip, sector_length, payload); if (payload == NULL) { rc = ENOMEM; - goto fail4; + goto fail6; } } else payload = (uint8_t *)data; if ((rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0) - goto fail5; + goto fail7; if ((rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG, sector_offset, (caddr_t)payload, sector_length)) != 0) { (void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL); - goto fail6; + goto fail8; } if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0) - goto fail7; + goto fail9; /* Verify that the area is correctly formatted and checksummed */ rc = efx_bootcfg_verify(enp, payload, sector_length, &used_bytes); if (rc != 0 || used_bytes == 0) { - payload[0] = (uint8_t)(~DHCP_END & 0xff); + payload[0] = 0; payload[1] = DHCP_END; used_bytes = 2; } @@ -347,10 +388,8 @@ efx_bootcfg_read( * so reinitialise the sector if there isn't room for the character. */ if (payload[used_bytes - 1] != DHCP_END) { - if (used_bytes + 1 > sector_length) { - payload[0] = 0; + if (used_bytes >= sector_length) used_bytes = 1; - } payload[used_bytes] = DHCP_END; ++used_bytes; @@ -362,10 +401,14 @@ efx_bootcfg_read( */ if (used_bytes > size) { rc = ENOSPC; - goto fail8; + goto fail10; } + + data[0] = 0; /* checksum, updated below */ + if (sector_length > size) { - memcpy(data, payload, used_bytes); + /* Copy all after the checksum to the target buffer */ + memcpy(data + 1, payload + 1, used_bytes - 1); EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload); } @@ -381,16 +424,20 @@ efx_bootcfg_read( return (0); +fail10: + EFSYS_PROBE(fail10); +fail9: + EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); + if (sector_length > size) + EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); - if (sector_length > size) - EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload); fail4: EFSYS_PROBE(fail4); fail3: @@ -418,7 +465,7 @@ efx_bootcfg_write( efx_rc_t rc; uint32_t sector_number; -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 sector_number = enp->en_nic_cfg.enc_pf; #else sector_number = 0; diff --git a/drivers/net/sfc/base/efx_check.h b/drivers/net/sfc/base/efx_check.h index 58377759..ef5eadc6 100644 --- a/drivers/net/sfc/base/efx_check.h +++ b/drivers/net/sfc/base/efx_check.h @@ -30,8 +30,9 @@ #if EFSYS_OPT_CHECK_REG /* Verify chip implements accessed registers */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "CHECK_REG requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "CHECK_REG requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_CHECK_REG */ @@ -44,15 +45,17 @@ #if EFSYS_OPT_DIAG /* Support diagnostic hardware tests */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "DIAG requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "DIAG requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_DIAG */ #if EFSYS_OPT_EV_PREFETCH /* Support optimized EVQ data access */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "EV_PREFETCH requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "EV_PREFETCH requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_EV_PREFETCH */ @@ -62,21 +65,23 @@ #if EFSYS_OPT_FILTER /* Support hardware packet filters */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "FILTER requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "FILTER requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_FILTER */ -#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) +#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) # if !EFSYS_OPT_FILTER -# error "HUNTINGTON or MEDFORD requires FILTER" +# error "HUNTINGTON or MEDFORD or MEDFORD2 requires FILTER" # endif #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_LOOPBACK /* Support hardware loopback modes */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "LOOPBACK requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "LOOPBACK requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_LOOPBACK */ @@ -90,21 +95,24 @@ #if EFSYS_OPT_MAC_STATS /* Support MAC statistics */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "MAC_STATS requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "MAC_STATS requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_MAC_STATS */ #if EFSYS_OPT_MCDI /* Support management controller messages */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "MCDI requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "MCDI requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_MCDI */ -#if (EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) +#if (EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) # if !EFSYS_OPT_MCDI -# error "SIENA or HUNTINGTON or MEDFORD requires MCDI" +# error "SIENA or HUNTINGTON or MEDFORD or MEDFORD2 requires MCDI" # endif #endif @@ -144,15 +152,17 @@ #if EFSYS_OPT_MON_STATS /* Support monitor statistics (voltage/temperature) */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "MON_STATS requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "MON_STATS requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_MON_STATS */ #if EFSYS_OPT_MON_MCDI /* Support Monitor via mcdi */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "MON_MCDI requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "MON_MCDI requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_MON_MCDI*/ @@ -166,11 +176,19 @@ #if EFSYS_OPT_NVRAM /* Support non volatile configuration */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "NVRAM requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "NVRAM requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_NVRAM */ +#if EFSYS_OPT_IMAGE_LAYOUT +/* Support signed image layout handling */ +# if !(EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "IMAGE_LAYOUT requires MEDFORD or MEDFORD2" +# endif +#endif /* EFSYS_OPT_IMAGE_LAYOUT */ + #ifdef EFSYS_OPT_NVRAM_FALCON_BOOTROM # error "NVRAM_FALCON_BOOTROM is obsolete and is not supported." #endif @@ -200,8 +218,9 @@ #if EFSYS_OPT_PHY_LED_CONTROL /* Support for PHY LED control */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "PHY_LED_CONTROL requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "PHY_LED_CONTROL requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_PHY_LED_CONTROL */ @@ -246,8 +265,9 @@ #if EFSYS_OPT_QSTATS /* Support EVQ/RXQ/TXQ statistics */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "QSTATS requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "QSTATS requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_QSTATS */ @@ -257,15 +277,17 @@ #if EFSYS_OPT_RX_SCALE /* Support receive scaling (RSS) */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "RX_SCALE requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "RX_SCALE requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCATTER /* Support receive scatter DMA */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "RX_SCATTER requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "RX_SCATTER requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_RX_SCATTER */ @@ -275,8 +297,9 @@ #if EFSYS_OPT_VPD /* Support PCI Vital Product Data (VPD) */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "VPD requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "VPD requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_VPD */ @@ -290,8 +313,9 @@ #if EFSYS_OPT_BIST /* Support BIST */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "BIST requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "BIST requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_BIST */ @@ -307,23 +331,37 @@ #if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC /* Support adapters with missing static config (for factory use only) */ -# if !EFSYS_OPT_MEDFORD -# error "ALLOW_UNCONFIGURED_NIC requires MEDFORD" +# if !(EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "ALLOW_UNCONFIGURED_NIC requires MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ #if EFSYS_OPT_RX_PACKED_STREAM /* Support packed stream mode */ -# if !(EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "PACKED_STREAM requires HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "PACKED_STREAM requires HUNTINGTON or MEDFORD or MEDFORD2" +# endif +#endif + +#if EFSYS_OPT_RX_ES_SUPER_BUFFER +/* Support equal stride super-buffer mode */ +# if !(EFSYS_OPT_MEDFORD2) +# error "ES_SUPER_BUFFER requires MEDFORD2" # endif #endif /* Support hardware assistance for tunnels */ #if EFSYS_OPT_TUNNEL -# if !EFSYS_OPT_MEDFORD -# error "TUNNEL requires MEDFORD" +# if !(EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "TUNNEL requires MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_TUNNEL */ +#if EFSYS_OPT_FW_SUBVARIANT_AWARE +/* Advertise that the driver is firmware subvariant aware */ +# if !(EFSYS_OPT_MEDFORD2) +# error "FW_SUBVARIANT_AWARE requires MEDFORD2" +# endif +#endif + #endif /* _SYS_EFX_CHECK_H */ diff --git a/drivers/net/sfc/base/efx_ev.c b/drivers/net/sfc/base/efx_ev.c index 949d352a..1139cc26 100644 --- a/drivers/net/sfc/base/efx_ev.c +++ b/drivers/net/sfc/base/efx_ev.c @@ -91,7 +91,7 @@ static const efx_ev_ops_t __efx_ev_siena_ops = { }; #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static const efx_ev_ops_t __efx_ev_ef10_ops = { ef10_ev_init, /* eevo_init */ ef10_ev_fini, /* eevo_fini */ @@ -104,7 +104,7 @@ static const efx_ev_ops_t __efx_ev_ef10_ops = { ef10_ev_qstats_update, /* eevo_qstats_update */ #endif }; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t @@ -141,6 +141,12 @@ efx_ev_init( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + eevop = &__efx_ev_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; diff --git a/drivers/net/sfc/base/efx_filter.c b/drivers/net/sfc/base/efx_filter.c index b92541aa..412298ac 100644 --- a/drivers/net/sfc/base/efx_filter.c +++ b/drivers/net/sfc/base/efx_filter.c @@ -56,7 +56,7 @@ static const efx_filter_ops_t __efx_filter_siena_ops = { }; #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static const efx_filter_ops_t __efx_filter_ef10_ops = { ef10_filter_init, /* efo_init */ ef10_filter_fini, /* efo_fini */ @@ -66,7 +66,7 @@ static const efx_filter_ops_t __efx_filter_ef10_ops = { ef10_filter_supported_filters, /* efo_supported_filters */ ef10_filter_reconfigure, /* efo_reconfigure */ }; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t efx_filter_insert( @@ -74,12 +74,33 @@ efx_filter_insert( __inout efx_filter_spec_t *spec) { const efx_filter_ops_t *efop = enp->en_efop; + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX); + if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) && + !encp->enc_filter_action_mark_supported) { + rc = ENOTSUP; + goto fail1; + } + + if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG) && + !encp->enc_filter_action_flag_supported) { + rc = ENOTSUP; + goto fail2; + } + return (efop->efo_add(enp, spec, B_FALSE)); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); } __checkReturn efx_rc_t @@ -145,6 +166,12 @@ efx_filter_init( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + efop = &__efx_filter_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; @@ -412,7 +439,7 @@ efx_filter_spec_set_encap_type( __in efx_tunnel_protocol_t encap_type, __in efx_filter_inner_frame_match_t inner_frame_match) { - uint32_t match_flags = 0; + uint32_t match_flags = EFX_FILTER_MATCH_ENCAP_TYPE; uint8_t ip_proto; efx_rc_t rc; @@ -462,6 +489,43 @@ fail1: return (rc); } +/* + * Specify inner and outer Ethernet address and VXLAN ID in filter + * specification. + */ + __checkReturn efx_rc_t +efx_filter_spec_set_vxlan_full( + __inout efx_filter_spec_t *spec, + __in const uint8_t *vxlan_id, + __in const uint8_t *inner_addr, + __in const uint8_t *outer_addr) +{ + EFSYS_ASSERT3P(spec, !=, NULL); + EFSYS_ASSERT3P(vxlan_id, !=, NULL); + EFSYS_ASSERT3P(inner_addr, !=, NULL); + EFSYS_ASSERT3P(outer_addr, !=, NULL); + + if ((inner_addr == NULL) && (outer_addr == NULL)) + return (EINVAL); + + if (vxlan_id != NULL) { + spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID; + memcpy(spec->efs_vni_or_vsid, vxlan_id, EFX_VNI_OR_VSID_LEN); + } + if (outer_addr != NULL) { + spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC; + memcpy(spec->efs_loc_mac, outer_addr, EFX_MAC_ADDR_LEN); + } + if (inner_addr != NULL) { + spec->efs_match_flags |= EFX_FILTER_MATCH_IFRM_LOC_MAC; + memcpy(spec->efs_ifrm_loc_mac, inner_addr, EFX_MAC_ADDR_LEN); + } + spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN; + + return (0); +} + #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t efx_filter_spec_set_rss_context( @@ -953,6 +1017,7 @@ siena_filter_build( default: EFSYS_ASSERT(B_FALSE); + EFX_ZERO_OWORD(*filter); return (0); } diff --git a/drivers/net/sfc/base/efx_impl.h b/drivers/net/sfc/base/efx_impl.h index ed685cba..548834f9 100644 --- a/drivers/net/sfc/base/efx_impl.h +++ b/drivers/net/sfc/base/efx_impl.h @@ -29,9 +29,13 @@ #include "medford_impl.h" #endif /* EFSYS_OPT_MEDFORD */ -#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) +#if EFSYS_OPT_MEDFORD2 +#include "medford2_impl.h" +#endif /* EFSYS_OPT_MEDFORD2 */ + +#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) #include "ef10_impl.h" -#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */ +#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) */ #ifdef __cplusplus extern "C" { @@ -61,6 +65,7 @@ typedef enum efx_mac_type_e { EFX_MAC_SIENA, EFX_MAC_HUNTINGTON, EFX_MAC_MEDFORD, + EFX_MAC_MEDFORD2, EFX_MAC_NTYPES } efx_mac_type_t; @@ -112,16 +117,36 @@ typedef struct efx_tx_ops_s { uint32_t, uint8_t, efx_desc_t *); void (*etxo_qdesc_tso2_create)(efx_txq_t *, uint16_t, - uint32_t, uint16_t, + uint16_t, uint32_t, uint16_t, efx_desc_t *, int); void (*etxo_qdesc_vlantci_create)(efx_txq_t *, uint16_t, efx_desc_t *); + void (*etxo_qdesc_checksum_create)(efx_txq_t *, uint16_t, + efx_desc_t *); #if EFSYS_OPT_QSTATS void (*etxo_qstats_update)(efx_txq_t *, efsys_stat_t *); #endif } efx_tx_ops_t; +typedef union efx_rxq_type_data_u { + /* Dummy member to have non-empty union if no options are enabled */ + uint32_t ertd_dummy; +#if EFSYS_OPT_RX_PACKED_STREAM + struct { + uint32_t eps_buf_size; + } ertd_packed_stream; +#endif +#if EFSYS_OPT_RX_ES_SUPER_BUFFER + struct { + uint32_t eessb_bufs_per_desc; + uint32_t eessb_max_dma_len; + uint32_t eessb_buf_stride; + uint32_t eessb_hol_block_timeout; + } ertd_es_super_buffer; +#endif +} efx_rxq_type_data_t; + typedef struct efx_rx_ops_s { efx_rc_t (*erxo_init)(efx_nic_t *); void (*erxo_fini)(efx_nic_t *); @@ -158,7 +183,8 @@ typedef struct efx_rx_ops_s { efx_rc_t (*erxo_qflush)(efx_rxq_t *); void (*erxo_qenable)(efx_rxq_t *); efx_rc_t (*erxo_qcreate)(efx_nic_t *enp, unsigned int, - unsigned int, efx_rxq_type_t, uint32_t, + unsigned int, efx_rxq_type_t, + const efx_rxq_type_data_t *, efsys_mem_t *, size_t, uint32_t, unsigned int, efx_evq_t *, efx_rxq_t *); @@ -398,9 +424,9 @@ typedef struct efx_filter_s { #if EFSYS_OPT_SIENA siena_filter_t *ef_siena_filter; #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 ef10_filter_table_t *ef_ef10_filter_table; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ } efx_filter_t; #if EFSYS_OPT_SIENA @@ -640,6 +666,7 @@ struct efx_nic_s { const efx_ev_ops_t *en_eevop; const efx_tx_ops_t *en_etxop; const efx_rx_ops_t *en_erxop; + efx_fw_variant_t efv; #if EFSYS_OPT_FILTER efx_filter_t en_filter; const efx_filter_ops_t *en_efop; @@ -683,7 +710,7 @@ struct efx_nic_s { #endif /* EFSYS_OPT_SIENA */ int enu_unused; } en_u; -#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) +#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) union en_arch { struct { int ena_vi_base; @@ -704,7 +731,7 @@ struct efx_nic_s { size_t ena_wc_mem_map_size; } ef10; } en_arch; -#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */ +#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) */ }; @@ -716,9 +743,11 @@ typedef boolean_t (*efx_ev_handler_t)(efx_evq_t *, efx_qword_t *, typedef struct efx_evq_rxq_state_s { unsigned int eers_rx_read_ptr; unsigned int eers_rx_mask; -#if EFSYS_OPT_RX_PACKED_STREAM +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER unsigned int eers_rx_stream_npackets; boolean_t eers_rx_packed_stream; +#endif +#if EFSYS_OPT_RX_PACKED_STREAM unsigned int eers_rx_packed_stream_credits; #endif } efx_evq_rxq_state_t; @@ -825,6 +854,10 @@ struct efx_txq_s { rev = 'E'; \ break; \ \ + case EFX_FAMILY_MEDFORD2: \ + rev = 'F'; \ + break; \ + \ default: \ rev = '?'; \ break; \ @@ -915,6 +948,15 @@ struct efx_txq_s { _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) +/* + * Accessors for memory BAR non-VI tables. + * + * Code used on EF10 *must* use EFX_BAR_VI_*() macros for per-VI registers, + * to ensure the correct runtime VI window size is used on Medford2. + * + * Siena-only code may continue using EFX_BAR_TBL_*() macros for VI registers. + */ + #define EFX_BAR_TBL_READD(_enp, _reg, _index, _edp, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ @@ -941,21 +983,6 @@ struct efx_txq_s { _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) -#define EFX_BAR_TBL_WRITED2(_enp, _reg, _index, _edp, _lock) \ - do { \ - EFX_CHECK_REG((_enp), (_reg)); \ - EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \ - uint32_t, (_index), \ - uint32_t, _reg ## _OFST, \ - uint32_t, (_edp)->ed_u32[0]); \ - EFSYS_BAR_WRITED((_enp)->en_esbp, \ - (_reg ## _OFST + \ - (2 * sizeof (efx_dword_t)) + \ - ((_index) * _reg ## _STEP)), \ - (_edp), (_lock)); \ - _NOTE(CONSTANTCONDITION) \ - } while (B_FALSE) - #define EFX_BAR_TBL_WRITED3(_enp, _reg, _index, _edp, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ @@ -1032,16 +1059,66 @@ struct efx_txq_s { } while (B_FALSE) /* - * Allow drivers to perform optimised 128-bit doorbell writes. + * Accessors for memory BAR per-VI registers. + * + * The VI window size is 8KB for Medford and all earlier controllers. + * For Medford2, the VI window size can be 8KB, 16KB or 64KB. + */ + +#define EFX_BAR_VI_READD(_enp, _reg, _index, _edp, _lock) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_BAR_READD((_enp)->en_esbp, \ + ((_reg ## _OFST) + \ + ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \ + (_edp), (_lock)); \ + EFSYS_PROBE4(efx_bar_vi_readd, const char *, #_reg, \ + uint32_t, (_index), \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_edp)->ed_u32[0]); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_BAR_VI_WRITED(_enp, _reg, _index, _edp, _lock) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_PROBE4(efx_bar_vi_writed, const char *, #_reg, \ + uint32_t, (_index), \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_edp)->ed_u32[0]); \ + EFSYS_BAR_WRITED((_enp)->en_esbp, \ + ((_reg ## _OFST) + \ + ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \ + (_edp), (_lock)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_BAR_VI_WRITED2(_enp, _reg, _index, _edp, _lock) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_PROBE4(efx_bar_vi_writed, const char *, #_reg, \ + uint32_t, (_index), \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_edp)->ed_u32[0]); \ + EFSYS_BAR_WRITED((_enp)->en_esbp, \ + ((_reg ## _OFST) + \ + (2 * sizeof (efx_dword_t)) + \ + ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \ + (_edp), (_lock)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +/* + * Allow drivers to perform optimised 128-bit VI doorbell writes. * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are * special-cased in the BIU on the Falcon/Siena and EF10 architectures to avoid * the need for locking in the host, and are the only ones known to be safe to * use 128-bites write with. */ -#define EFX_BAR_TBL_DOORBELL_WRITEO(_enp, _reg, _index, _eop) \ +#define EFX_BAR_VI_DOORBELL_WRITEO(_enp, _reg, _index, _eop) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ - EFSYS_PROBE7(efx_bar_tbl_doorbell_writeo, \ + EFSYS_PROBE7(efx_bar_vi_doorbell_writeo, \ const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ @@ -1050,7 +1127,8 @@ struct efx_txq_s { uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ EFSYS_BAR_DOORBELL_WRITEO((_enp)->en_esbp, \ - (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ + (_reg ## _OFST + \ + ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \ (_eop)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) diff --git a/drivers/net/sfc/base/efx_intr.c b/drivers/net/sfc/base/efx_intr.c index 83ca177d..b518916d 100644 --- a/drivers/net/sfc/base/efx_intr.c +++ b/drivers/net/sfc/base/efx_intr.c @@ -75,7 +75,7 @@ static const efx_intr_ops_t __efx_intr_siena_ops = { }; #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static const efx_intr_ops_t __efx_intr_ef10_ops = { ef10_intr_init, /* eio_init */ ef10_intr_enable, /* eio_enable */ @@ -87,7 +87,7 @@ static const efx_intr_ops_t __efx_intr_ef10_ops = { ef10_intr_fatal, /* eio_fatal */ ef10_intr_fini, /* eio_fini */ }; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t efx_intr_init( @@ -132,6 +132,12 @@ efx_intr_init( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + eiop = &__efx_intr_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(B_FALSE); rc = ENOTSUP; @@ -283,6 +289,12 @@ siena_intr_init( { efx_intr_t *eip = &(enp->en_intr); efx_oword_t oword; + efx_rc_t rc; + + if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_INTR_SIZE)) { + rc = EINVAL; + goto fail1; + } /* * bug17213 workaround. @@ -314,6 +326,11 @@ siena_intr_init( EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword); return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); } static void diff --git a/drivers/net/sfc/base/efx_lic.c b/drivers/net/sfc/base/efx_lic.c index ad4d2211..49c00347 100644 --- a/drivers/net/sfc/base/efx_lic.c +++ b/drivers/net/sfc/base/efx_lic.c @@ -10,6 +10,9 @@ #if EFSYS_OPT_LICENSING #include "ef10_tlv_layout.h" +#if EFSYS_OPT_SIENA +#include "efx_regs_mcdi_aoe.h" +#endif #if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON @@ -162,7 +165,7 @@ static const efx_lic_ops_t __efx_lic_v2_ops = { #endif /* EFSYS_OPT_HUNTINGTON */ -#if EFSYS_OPT_MEDFORD +#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static __checkReturn efx_rc_t efx_mcdi_licensing_v3_update_licenses( @@ -286,7 +289,7 @@ static const efx_lic_ops_t __efx_lic_v3_ops = { efx_lic_v3_finish_partition, /* elo_finish_partition */ }; -#endif /* EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ /* V1 Licensing - used in Siena Modena only */ @@ -819,7 +822,7 @@ fail1: /* V3 Licensing - used starting from Medford family. See SF-114884-SW */ -#if EFSYS_OPT_MEDFORD +#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static __checkReturn efx_rc_t efx_mcdi_licensing_v3_update_licenses( @@ -829,7 +832,8 @@ efx_mcdi_licensing_v3_update_licenses( uint8_t payload[MC_CMD_LICENSING_V3_IN_LEN]; efx_rc_t rc; - EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD); + EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) || + (enp->en_family == EFX_FAMILY_MEDFORD2)); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_LICENSING_V3; @@ -866,7 +870,8 @@ efx_mcdi_licensing_v3_report_license( MC_CMD_LICENSING_V3_OUT_LEN)]; efx_rc_t rc; - EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD); + EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) || + (enp->en_family == EFX_FAMILY_MEDFORD2)); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_LICENSING_V3; @@ -930,7 +935,8 @@ efx_mcdi_licensing_v3_app_state( uint32_t app_state; efx_rc_t rc; - EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD); + EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) || + (enp->en_family == EFX_FAMILY_MEDFORD2)); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_LICENSED_V3_APP_STATE; @@ -1262,7 +1268,7 @@ fail1: } -#endif /* EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t efx_lic_init( @@ -1296,6 +1302,12 @@ efx_lic_init( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + elop = &__efx_lic_v3_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; diff --git a/drivers/net/sfc/base/efx_mac.c b/drivers/net/sfc/base/efx_mac.c index 511f3eb5..57436b95 100644 --- a/drivers/net/sfc/base/efx_mac.c +++ b/drivers/net/sfc/base/efx_mac.c @@ -39,7 +39,7 @@ static const efx_mac_ops_t __efx_mac_siena_ops = { }; #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static const efx_mac_ops_t __efx_mac_ef10_ops = { ef10_mac_poll, /* emo_poll */ ef10_mac_up, /* emo_up */ @@ -62,7 +62,7 @@ static const efx_mac_ops_t __efx_mac_ef10_ops = { ef10_mac_stats_update /* emo_stats_update */ #endif /* EFSYS_OPT_MAC_STATS */ }; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t efx_mac_pdu_set( @@ -492,7 +492,7 @@ efx_mac_filter_default_rxq_clear( #if EFSYS_OPT_NAMES -/* START MKCONFIG GENERATED EfxMacStatNamesBlock c11b91b42f922516 */ +/* START MKCONFIG GENERATED EfxMacStatNamesBlock 1a45a82fcfb30c1b */ static const char * const __efx_mac_stat_name[] = { "rx_octets", "rx_pkts", @@ -575,6 +575,31 @@ static const char * const __efx_mac_stat_name[] = { "vadapter_tx_bad_packets", "vadapter_tx_bad_bytes", "vadapter_tx_overflow", + "fec_uncorrected_errors", + "fec_corrected_errors", + "fec_corrected_symbols_lane0", + "fec_corrected_symbols_lane1", + "fec_corrected_symbols_lane2", + "fec_corrected_symbols_lane3", + "ctpio_vi_busy_fallback", + "ctpio_long_write_success", + "ctpio_missing_dbell_fail", + "ctpio_overflow_fail", + "ctpio_underflow_fail", + "ctpio_timeout_fail", + "ctpio_noncontig_wr_fail", + "ctpio_frm_clobber_fail", + "ctpio_invalid_wr_fail", + "ctpio_vi_clobber_fallback", + "ctpio_unqualified_fallback", + "ctpio_runt_fallback", + "ctpio_success", + "ctpio_fallback", + "ctpio_poison", + "ctpio_erase", + "rxdp_scatter_disabled_trunc", + "rxdp_hlb_idle", + "rxdp_hlb_timeout", }; /* END MKCONFIG GENERATED EfxMacStatNamesBlock */ @@ -826,6 +851,13 @@ efx_mac_select( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + emop = &__efx_mac_ef10_ops; + type = EFX_MAC_MEDFORD2; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: rc = EINVAL; goto fail1; diff --git a/drivers/net/sfc/base/efx_mcdi.c b/drivers/net/sfc/base/efx_mcdi.c index 347a5b35..d4ebcf26 100644 --- a/drivers/net/sfc/base/efx_mcdi.c +++ b/drivers/net/sfc/base/efx_mcdi.c @@ -45,7 +45,7 @@ static const efx_mcdi_ops_t __efx_mcdi_siena_ops = { #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = { ef10_mcdi_init, /* emco_init */ @@ -58,7 +58,7 @@ static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = { ef10_mcdi_get_timeout, /* emco_get_timeout */ }; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ @@ -92,6 +92,12 @@ efx_mcdi_init( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + emcop = &__efx_mcdi_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; @@ -1258,13 +1264,21 @@ efx_mcdi_drv_attach( req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN; /* - * Use DONT_CARE for the datapath firmware type to ensure that the - * driver can attach to an unprivileged function. The datapath firmware - * type to use is controlled by the 'sfboot' utility. + * Typically, client drivers use DONT_CARE for the datapath firmware + * type to ensure that the driver can attach to an unprivileged + * function. The datapath firmware type to use is controlled by the + * 'sfboot' utility. + * If a client driver wishes to attach with a specific datapath firmware + * type, that can be passed in second argument of efx_nic_probe API. One + * such example is the ESXi native driver that attempts attaching with + * FULL_FEATURED datapath firmware type first and fall backs to + * DONT_CARE datapath firmware type if MC_CMD_DRV_ATTACH fails. */ - MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_NEW_STATE, attach ? 1 : 0); + MCDI_IN_POPULATE_DWORD_2(req, DRV_ATTACH_IN_NEW_STATE, + DRV_ATTACH_IN_ATTACH, attach ? 1 : 0, + DRV_ATTACH_IN_SUBVARIANT_AWARE, EFSYS_OPT_FW_SUBVARIANT_AWARE); MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1); - MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_DONT_CARE); + MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, enp->efv); efx_mcdi_execute(enp, &req); @@ -1426,6 +1440,11 @@ efx_mcdi_get_phy_cfg( efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PHY_CFG_IN_LEN, MC_CMD_GET_PHY_CFG_OUT_LEN)]; +#if EFSYS_OPT_NAMES + const char *namep; + size_t namelen; +#endif + uint32_t phy_media_type; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); @@ -1449,10 +1468,12 @@ efx_mcdi_get_phy_cfg( encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE); #if EFSYS_OPT_NAMES - (void) strncpy(encp->enc_phy_name, - MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME), - MIN(sizeof (encp->enc_phy_name) - 1, - MC_CMD_GET_PHY_CFG_OUT_NAME_LEN)); + namep = MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME); + namelen = MIN(sizeof (encp->enc_phy_name) - 1, + strnlen(namep, MC_CMD_GET_PHY_CFG_OUT_NAME_LEN)); + (void) memset(encp->enc_phy_name, 0, + sizeof (encp->enc_phy_name)); + memcpy(encp->enc_phy_name, namep, namelen); #endif /* EFSYS_OPT_NAMES */ (void) memset(encp->enc_phy_revision, 0, sizeof (encp->enc_phy_revision)); @@ -1474,8 +1495,8 @@ efx_mcdi_get_phy_cfg( EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS); EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T); EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS); - epp->ep_fixed_port_type = - (efx_phy_media_type_t) MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE); + phy_media_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE); + epp->ep_fixed_port_type = (efx_phy_media_type_t)phy_media_type; if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES) epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID; @@ -1621,7 +1642,7 @@ fail1: #if EFSYS_OPT_BIST -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 /* * Enter bist offline mode. This is a fw mode which puts the NIC into a state * where memory BIST tests can be run and not much else can interfere or happen. @@ -1657,7 +1678,7 @@ fail1: return (rc); } -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t efx_mcdi_bist_start( @@ -1778,7 +1799,7 @@ efx_mcdi_mac_stats( { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_MAC_STATS_IN_LEN, - MC_CMD_MAC_STATS_OUT_DMA_LEN)]; + MC_CMD_MAC_STATS_V2_OUT_DMA_LEN)]; int clear = (action == EFX_STATS_CLEAR); int upload = (action == EFX_STATS_UPLOAD); int enable = (action == EFX_STATS_ENABLE_NOEVENTS); @@ -1791,7 +1812,7 @@ efx_mcdi_mac_stats( req.emr_in_buf = payload; req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN; req.emr_out_buf = payload; - req.emr_out_length = MC_CMD_MAC_STATS_OUT_DMA_LEN; + req.emr_out_length = MC_CMD_MAC_STATS_V2_OUT_DMA_LEN; MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD, MAC_STATS_IN_DMA, upload, @@ -1801,19 +1822,35 @@ efx_mcdi_mac_stats( MAC_STATS_IN_PERIODIC_NOEVENT, !events, MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0); - if (esmp != NULL) { - int bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t); + if (enable || events || upload) { + const efx_nic_cfg_t *encp = &enp->en_nic_cfg; + uint32_t bytes; + + /* Periodic stats or stats upload require a DMA buffer */ + if (esmp == NULL) { + rc = EINVAL; + goto fail1; + } + + if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) { + /* MAC stats count too small for legacy MAC stats */ + rc = ENOSPC; + goto fail2; + } + + bytes = encp->enc_mac_stats_nstats * sizeof (efx_qword_t); - EFX_STATIC_ASSERT(MC_CMD_MAC_NSTATS * sizeof (uint64_t) <= - EFX_MAC_STATS_SIZE); + if (EFSYS_MEM_SIZE(esmp) < bytes) { + /* DMA buffer too small */ + rc = ENOSPC; + goto fail3; + } MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO, EFSYS_MEM_ADDR(esmp) & 0xffffffff); MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI, EFSYS_MEM_ADDR(esmp) >> 32); MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes); - } else { - EFSYS_ASSERT(!upload && !enable && !events); } /* @@ -1831,12 +1868,18 @@ efx_mcdi_mac_stats( if ((req.emr_rc != ENOENT) || (enp->en_rx_qcount + enp->en_tx_qcount != 0)) { rc = req.emr_rc; - goto fail1; + goto fail4; } } return (0); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); @@ -1921,7 +1964,7 @@ fail1: #endif /* EFSYS_OPT_MAC_STATS */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 /* * This function returns the pf and vf number of a function. If it is a pf the @@ -2020,7 +2063,7 @@ fail1: return (rc); } -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t efx_mcdi_set_workaround( diff --git a/drivers/net/sfc/base/efx_mcdi.h b/drivers/net/sfc/base/efx_mcdi.h index 4e69f048..253a9e60 100644 --- a/drivers/net/sfc/base/efx_mcdi.h +++ b/drivers/net/sfc/base/efx_mcdi.h @@ -166,11 +166,11 @@ efx_mcdi_mac_spoofing_supported( #if EFSYS_OPT_BIST -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 extern __checkReturn efx_rc_t efx_mcdi_bist_enable_offline( __in efx_nic_t *enp); -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ extern __checkReturn efx_rc_t efx_mcdi_bist_start( __in efx_nic_t *enp, diff --git a/drivers/net/sfc/base/efx_mon.c b/drivers/net/sfc/base/efx_mon.c index 234a4201..9fc268ec 100644 --- a/drivers/net/sfc/base/efx_mon.c +++ b/drivers/net/sfc/base/efx_mon.c @@ -99,7 +99,7 @@ fail1: #if EFSYS_OPT_NAMES -/* START MKCONFIG GENERATED MonitorStatNamesBlock d92af1538001301f */ +/* START MKCONFIG GENERATED MonitorStatNamesBlock 8150a068198c0f96 */ static const char * const __mon_stat_name[] = { "value_2_5v", "value_vccp1", @@ -180,6 +180,10 @@ static const char * const __mon_stat_name[] = { "board_back_temp", "i1v8", "i2v5", + "i3v3", + "i12v0", + "1v3", + "i1v3", }; /* END MKCONFIG GENERATED MonitorStatNamesBlock */ diff --git a/drivers/net/sfc/base/efx_nic.c b/drivers/net/sfc/base/efx_nic.c index e318c17d..6c162e03 100644 --- a/drivers/net/sfc/base/efx_nic.c +++ b/drivers/net/sfc/base/efx_nic.c @@ -7,11 +7,13 @@ #include "efx.h" #include "efx_impl.h" + __checkReturn efx_rc_t efx_family( __in uint16_t venid, __in uint16_t devid, - __out efx_family_t *efp) + __out efx_family_t *efp, + __out unsigned int *membarp) { if (venid == EFX_PCI_VENID_SFC) { switch (devid) { @@ -21,12 +23,10 @@ efx_family( * Hardware default for PF0 of uninitialised Siena. * manftest must be able to cope with this device id. */ - *efp = EFX_FAMILY_SIENA; - return (0); - case EFX_PCI_DEVID_BETHPAGE: case EFX_PCI_DEVID_SIENA: *efp = EFX_FAMILY_SIENA; + *membarp = EFX_MEM_BAR_SIENA; return (0); #endif /* EFSYS_OPT_SIENA */ @@ -36,17 +36,16 @@ efx_family( * Hardware default for PF0 of uninitialised Huntington. * manftest must be able to cope with this device id. */ - *efp = EFX_FAMILY_HUNTINGTON; - return (0); - case EFX_PCI_DEVID_FARMINGDALE: case EFX_PCI_DEVID_GREENPORT: *efp = EFX_FAMILY_HUNTINGTON; + *membarp = EFX_MEM_BAR_HUNTINGTON_PF; return (0); case EFX_PCI_DEVID_FARMINGDALE_VF: case EFX_PCI_DEVID_GREENPORT_VF: *efp = EFX_FAMILY_HUNTINGTON; + *membarp = EFX_MEM_BAR_HUNTINGTON_VF; return (0); #endif /* EFSYS_OPT_HUNTINGTON */ @@ -56,18 +55,30 @@ efx_family( * Hardware default for PF0 of uninitialised Medford. * manftest must be able to cope with this device id. */ - *efp = EFX_FAMILY_MEDFORD; - return (0); - case EFX_PCI_DEVID_MEDFORD: *efp = EFX_FAMILY_MEDFORD; + *membarp = EFX_MEM_BAR_MEDFORD_PF; return (0); case EFX_PCI_DEVID_MEDFORD_VF: *efp = EFX_FAMILY_MEDFORD; + *membarp = EFX_MEM_BAR_MEDFORD_VF; return (0); #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_PCI_DEVID_MEDFORD2_PF_UNINIT: + /* + * Hardware default for PF0 of uninitialised Medford2. + * manftest must be able to cope with this device id. + */ + case EFX_PCI_DEVID_MEDFORD2: + case EFX_PCI_DEVID_MEDFORD2_VF: + *efp = EFX_FAMILY_MEDFORD2; + *membarp = EFX_MEM_BAR_MEDFORD2; + return (0); +#endif /* EFSYS_OPT_MEDFORD2 */ + case EFX_PCI_DEVID_FALCON: /* Obsolete, not supported */ default: break; @@ -78,6 +89,7 @@ efx_family( return (ENOTSUP); } + #if EFSYS_OPT_SIENA static const efx_nic_ops_t __efx_nic_siena_ops = { @@ -135,6 +147,25 @@ static const efx_nic_ops_t __efx_nic_medford_ops = { #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + +static const efx_nic_ops_t __efx_nic_medford2_ops = { + ef10_nic_probe, /* eno_probe */ + medford2_board_cfg, /* eno_board_cfg */ + ef10_nic_set_drv_limits, /* eno_set_drv_limits */ + ef10_nic_reset, /* eno_reset */ + ef10_nic_init, /* eno_init */ + ef10_nic_get_vi_pool, /* eno_get_vi_pool */ + ef10_nic_get_bar_region, /* eno_get_bar_region */ +#if EFSYS_OPT_DIAG + ef10_nic_register_test, /* eno_register_test */ +#endif /* EFSYS_OPT_DIAG */ + ef10_nic_fini, /* eno_fini */ + ef10_nic_unprobe, /* eno_unprobe */ +}; + +#endif /* EFSYS_OPT_MEDFORD2 */ + __checkReturn efx_rc_t efx_nic_create( @@ -213,6 +244,22 @@ efx_nic_create( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + enp->en_enop = &__efx_nic_medford2_ops; + enp->en_features = + EFX_FEATURE_IPV6 | + EFX_FEATURE_LINK_EVENTS | + EFX_FEATURE_PERIODIC_MAC_STATS | + EFX_FEATURE_MCDI | + EFX_FEATURE_MAC_HEADER_FILTERS | + EFX_FEATURE_MCDI_DMA | + EFX_FEATURE_PIO_BUFFERS | + EFX_FEATURE_FW_ASSISTED_TSO_V2 | + EFX_FEATURE_PACKED_STREAM; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: rc = ENOTSUP; goto fail2; @@ -243,7 +290,8 @@ fail1: __checkReturn efx_rc_t efx_nic_probe( - __in efx_nic_t *enp) + __in efx_nic_t *enp, + __in efx_fw_variant_t efv) { const efx_nic_ops_t *enop; efx_rc_t rc; @@ -254,7 +302,27 @@ efx_nic_probe( #endif /* EFSYS_OPT_MCDI */ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROBE)); + /* Ensure FW variant codes match with MC_CMD_FW codes */ + EFX_STATIC_ASSERT(EFX_FW_VARIANT_FULL_FEATURED == + MC_CMD_FW_FULL_FEATURED); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_LOW_LATENCY == + MC_CMD_FW_LOW_LATENCY); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_PACKED_STREAM == + MC_CMD_FW_PACKED_STREAM); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_HIGH_TX_RATE == + MC_CMD_FW_HIGH_TX_RATE); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_PACKED_STREAM_HASH_MODE_1 == + MC_CMD_FW_PACKED_STREAM_HASH_MODE_1); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_RULES_ENGINE == + MC_CMD_FW_RULES_ENGINE); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_DPDK == + MC_CMD_FW_DPDK); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_DONT_CARE == + (int)MC_CMD_FW_DONT_CARE); + enop = enp->en_enop; + enp->efv = efv; + if ((rc = enop->eno_probe(enp)) != 0) goto fail1; @@ -536,6 +604,18 @@ efx_nic_get_fw_version( EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); + /* Ensure RXDP_FW_ID codes match with MC_CMD_GET_CAPABILITIES codes */ + EFX_STATIC_ASSERT(EFX_RXDP_FULL_FEATURED_FW_ID == + MC_CMD_GET_CAPABILITIES_OUT_RXDP); + EFX_STATIC_ASSERT(EFX_RXDP_LOW_LATENCY_FW_ID == + MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY); + EFX_STATIC_ASSERT(EFX_RXDP_PACKED_STREAM_FW_ID == + MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM); + EFX_STATIC_ASSERT(EFX_RXDP_RULES_ENGINE_FW_ID == + MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE); + EFX_STATIC_ASSERT(EFX_RXDP_DPDK_FW_ID == + MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK); + rc = efx_mcdi_version(enp, mc_fw_version, NULL, NULL); if (rc != 0) goto fail2; @@ -607,48 +687,49 @@ efx_loopback_mask( EFSYS_ASSERT3U(loopback_kind, <, EFX_LOOPBACK_NKINDS); EFSYS_ASSERT(maskp != NULL); - /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */ - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XPORT == EFX_LOOPBACK_XPORT); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII_WS == EFX_LOOPBACK_XGMII_WS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS == EFX_LOOPBACK_XAUI_WS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_FAR == - EFX_LOOPBACK_XAUI_WS_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_NEAR == - EFX_LOOPBACK_XAUI_WS_NEAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_WS == EFX_LOOPBACK_GMII_WS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS == EFX_LOOPBACK_XFI_WS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS_FAR == - EFX_LOOPBACK_XFI_WS_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS_WS == EFX_LOOPBACK_PHYXS_WS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT == EFX_LOOPBACK_PMA_INT); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_NEAR == EFX_LOOPBACK_SD_NEAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FAR == EFX_LOOPBACK_SD_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT_WS == - EFX_LOOPBACK_PMA_INT_WS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP2_WS == - EFX_LOOPBACK_SD_FEP2_WS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP1_5_WS == - EFX_LOOPBACK_SD_FEP1_5_WS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP_WS == EFX_LOOPBACK_SD_FEP_WS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FES_WS == EFX_LOOPBACK_SD_FES_WS); + /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespaces agree */ +#define LOOPBACK_CHECK(_mcdi, _efx) \ + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_##_mcdi == EFX_LOOPBACK_##_efx) + + LOOPBACK_CHECK(NONE, OFF); + LOOPBACK_CHECK(DATA, DATA); + LOOPBACK_CHECK(GMAC, GMAC); + LOOPBACK_CHECK(XGMII, XGMII); + LOOPBACK_CHECK(XGXS, XGXS); + LOOPBACK_CHECK(XAUI, XAUI); + LOOPBACK_CHECK(GMII, GMII); + LOOPBACK_CHECK(SGMII, SGMII); + LOOPBACK_CHECK(XGBR, XGBR); + LOOPBACK_CHECK(XFI, XFI); + LOOPBACK_CHECK(XAUI_FAR, XAUI_FAR); + LOOPBACK_CHECK(GMII_FAR, GMII_FAR); + LOOPBACK_CHECK(SGMII_FAR, SGMII_FAR); + LOOPBACK_CHECK(XFI_FAR, XFI_FAR); + LOOPBACK_CHECK(GPHY, GPHY); + LOOPBACK_CHECK(PHYXS, PHY_XS); + LOOPBACK_CHECK(PCS, PCS); + LOOPBACK_CHECK(PMAPMD, PMA_PMD); + LOOPBACK_CHECK(XPORT, XPORT); + LOOPBACK_CHECK(XGMII_WS, XGMII_WS); + LOOPBACK_CHECK(XAUI_WS, XAUI_WS); + LOOPBACK_CHECK(XAUI_WS_FAR, XAUI_WS_FAR); + LOOPBACK_CHECK(XAUI_WS_NEAR, XAUI_WS_NEAR); + LOOPBACK_CHECK(GMII_WS, GMII_WS); + LOOPBACK_CHECK(XFI_WS, XFI_WS); + LOOPBACK_CHECK(XFI_WS_FAR, XFI_WS_FAR); + LOOPBACK_CHECK(PHYXS_WS, PHYXS_WS); + LOOPBACK_CHECK(PMA_INT, PMA_INT); + LOOPBACK_CHECK(SD_NEAR, SD_NEAR); + LOOPBACK_CHECK(SD_FAR, SD_FAR); + LOOPBACK_CHECK(PMA_INT_WS, PMA_INT_WS); + LOOPBACK_CHECK(SD_FEP2_WS, SD_FEP2_WS); + LOOPBACK_CHECK(SD_FEP1_5_WS, SD_FEP1_5_WS); + LOOPBACK_CHECK(SD_FEP_WS, SD_FEP_WS); + LOOPBACK_CHECK(SD_FES_WS, SD_FES_WS); + LOOPBACK_CHECK(AOE_INT_NEAR, AOE_INT_NEAR); + LOOPBACK_CHECK(DATA_WS, DATA_WS); + LOOPBACK_CHECK(FORCE_EXT_LINK, FORCE_EXT_LINK); +#undef LOOPBACK_CHECK /* Build bitmask of possible loopback types */ EFX_ZERO_QWORD(mask); @@ -706,7 +787,7 @@ efx_mcdi_get_loopback_modes( efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_LOOPBACK_MODES_IN_LEN, - MC_CMD_GET_LOOPBACK_MODES_OUT_LEN)]; + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN)]; efx_qword_t mask; efx_qword_t modes; efx_rc_t rc; @@ -716,7 +797,7 @@ efx_mcdi_get_loopback_modes( req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_LOOPBACK_MODES_IN_LEN; req.emr_out_buf = payload; - req.emr_out_length = MC_CMD_GET_LOOPBACK_MODES_OUT_LEN; + req.emr_out_length = MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN; efx_mcdi_execute(enp, &req); @@ -757,18 +838,51 @@ efx_mcdi_get_loopback_modes( MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST + MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN) { /* Response includes 40G loopback modes */ - modes = - *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_40G); + modes = *MCDI_OUT2(req, efx_qword_t, + GET_LOOPBACK_MODES_OUT_40G); EFX_AND_QWORD(modes, mask); encp->enc_loopback_types[EFX_LINK_40000FDX] = modes; } + if (req.emr_out_length_used >= + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST + + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN) { + /* Response includes 25G loopback modes */ + modes = *MCDI_OUT2(req, efx_qword_t, + GET_LOOPBACK_MODES_OUT_V2_25G); + EFX_AND_QWORD(modes, mask); + encp->enc_loopback_types[EFX_LINK_25000FDX] = modes; + } + + if (req.emr_out_length_used >= + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST + + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN) { + /* Response includes 50G loopback modes */ + modes = *MCDI_OUT2(req, efx_qword_t, + GET_LOOPBACK_MODES_OUT_V2_50G); + EFX_AND_QWORD(modes, mask); + encp->enc_loopback_types[EFX_LINK_50000FDX] = modes; + } + + if (req.emr_out_length_used >= + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST + + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN) { + /* Response includes 100G loopback modes */ + modes = *MCDI_OUT2(req, efx_qword_t, + GET_LOOPBACK_MODES_OUT_V2_100G); + EFX_AND_QWORD(modes, mask); + encp->enc_loopback_types[EFX_LINK_100000FDX] = modes; + } + EFX_ZERO_QWORD(modes); EFX_SET_QWORD_BIT(modes, EFX_LOOPBACK_OFF); EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100FDX]); EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_1000FDX]); EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_10000FDX]); EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_40000FDX]); + EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_25000FDX]); + EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_50000FDX]); + EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100000FDX]); encp->enc_loopback_types[EFX_LINK_UNKNOWN] = modes; return (0); @@ -830,6 +944,82 @@ fail1: return (rc); } +#if EFSYS_OPT_FW_SUBVARIANT_AWARE + + __checkReturn efx_rc_t +efx_nic_get_fw_subvariant( + __in efx_nic_t *enp, + __out efx_nic_fw_subvariant_t *subvariantp) +{ + efx_rc_t rc; + uint32_t value; + + rc = efx_mcdi_get_nic_global(enp, + MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT, &value); + if (rc != 0) + goto fail1; + + /* Mapping is not required since values match MCDI */ + EFX_STATIC_ASSERT(EFX_NIC_FW_SUBVARIANT_DEFAULT == + MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT); + EFX_STATIC_ASSERT(EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM == + MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM); + + switch (value) { + case MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT: + case MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM: + *subvariantp = value; + break; + default: + rc = EINVAL; + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_nic_set_fw_subvariant( + __in efx_nic_t *enp, + __in efx_nic_fw_subvariant_t subvariant) +{ + efx_rc_t rc; + + switch (subvariant) { + case EFX_NIC_FW_SUBVARIANT_DEFAULT: + case EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM: + /* Mapping is not required since values match MCDI */ + break; + default: + rc = EINVAL; + goto fail1; + } + + rc = efx_mcdi_set_nic_global(enp, + MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT, subvariant); + if (rc != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */ __checkReturn efx_rc_t efx_nic_check_pcie_link_speed( diff --git a/drivers/net/sfc/base/efx_nvram.c b/drivers/net/sfc/base/efx_nvram.c index c2cc9ad3..be409c3a 100644 --- a/drivers/net/sfc/base/efx_nvram.c +++ b/drivers/net/sfc/base/efx_nvram.c @@ -30,7 +30,7 @@ static const efx_nvram_ops_t __efx_nvram_siena_ops = { #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static const efx_nvram_ops_t __efx_nvram_ef10_ops = { #if EFSYS_OPT_DIAG @@ -49,7 +49,7 @@ static const efx_nvram_ops_t __efx_nvram_ef10_ops = { ef10_nvram_buffer_validate, /* envo_buffer_validate */ }; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t efx_nvram_init( @@ -81,6 +81,12 @@ efx_nvram_init( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + envop = &__efx_nvram_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; diff --git a/drivers/net/sfc/base/efx_phy.c b/drivers/net/sfc/base/efx_phy.c index 069c2836..ba2f51c1 100644 --- a/drivers/net/sfc/base/efx_phy.c +++ b/drivers/net/sfc/base/efx_phy.c @@ -27,7 +27,7 @@ static const efx_phy_ops_t __efx_phy_siena_ops = { }; #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static const efx_phy_ops_t __efx_phy_ef10_ops = { ef10_phy_power, /* epo_power */ NULL, /* epo_reset */ @@ -44,7 +44,7 @@ static const efx_phy_ops_t __efx_phy_ef10_ops = { ef10_bist_stop, /* epo_bist_stop */ #endif /* EFSYS_OPT_BIST */ }; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t efx_phy_probe( @@ -67,16 +67,25 @@ efx_phy_probe( epop = &__efx_phy_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ + #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: epop = &__efx_phy_ef10_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ + #if EFSYS_OPT_MEDFORD case EFX_FAMILY_MEDFORD: epop = &__efx_phy_ef10_ops; break; #endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + epop = &__efx_phy_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: rc = ENOTSUP; goto fail1; @@ -176,6 +185,7 @@ efx_phy_adv_cap_get( break; default: EFSYS_ASSERT(B_FALSE); + *maskp = 0; break; } } diff --git a/drivers/net/sfc/base/efx_port.c b/drivers/net/sfc/base/efx_port.c index a792a9ef..33a1a084 100644 --- a/drivers/net/sfc/base/efx_port.c +++ b/drivers/net/sfc/base/efx_port.c @@ -120,7 +120,7 @@ efx_port_loopback_set( EFSYS_ASSERT(link_mode < EFX_LINK_NMODES); if (EFX_TEST_QWORD_BIT(encp->enc_loopback_types[link_mode], - loopback_type) == 0) { + (int)loopback_type) == 0) { rc = ENOTSUP; goto fail1; } @@ -180,6 +180,9 @@ static const char * const __efx_loopback_type_name[] = { "SD_FEP1_5_WS", "SD_FEP_WS", "SD_FES_WS", + "AOE_INT_NEAR", + "DATA_WS", + "FORCE_EXT_LINK", }; __checkReturn const char * diff --git a/drivers/net/sfc/base/efx_regs_ef10.h b/drivers/net/sfc/base/efx_regs_ef10.h index 5f978305..968aaaca 100644 --- a/drivers/net/sfc/base/efx_regs_ef10.h +++ b/drivers/net/sfc/base/efx_regs_ef10.h @@ -24,7 +24,7 @@ extern "C" { */ #define ER_DZ_BIU_HW_REV_ID_REG_OFST 0x00000000 -/* hunta0,medforda0=pcie_pf_bar2 */ +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ #define ER_DZ_BIU_HW_REV_ID_REG_RESET 0xeb14face @@ -38,7 +38,7 @@ extern "C" { */ #define ER_DZ_BIU_MC_SFT_STATUS_REG_OFST 0x00000010 -/* hunta0,medforda0=pcie_pf_bar2 */ +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ #define ER_DZ_BIU_MC_SFT_STATUS_REG_STEP 4 #define ER_DZ_BIU_MC_SFT_STATUS_REG_ROWS 8 #define ER_DZ_BIU_MC_SFT_STATUS_REG_RESET 0x1111face @@ -54,7 +54,7 @@ extern "C" { */ #define ER_DZ_BIU_INT_ISR_REG_OFST 0x00000090 -/* hunta0,medforda0=pcie_pf_bar2 */ +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ #define ER_DZ_BIU_INT_ISR_REG_RESET 0x0 @@ -68,7 +68,7 @@ extern "C" { */ #define ER_DZ_MC_DB_LWRD_REG_OFST 0x00000200 -/* hunta0,medforda0=pcie_pf_bar2 */ +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ #define ER_DZ_MC_DB_LWRD_REG_RESET 0x0 @@ -82,7 +82,7 @@ extern "C" { */ #define ER_DZ_MC_DB_HWRD_REG_OFST 0x00000204 -/* hunta0,medforda0=pcie_pf_bar2 */ +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ #define ER_DZ_MC_DB_HWRD_REG_RESET 0x0 @@ -96,7 +96,7 @@ extern "C" { */ #define ER_DZ_EVQ_RPTR_REG_OFST 0x00000400 -/* hunta0,medforda0=pcie_pf_bar2 */ +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ #define ER_DZ_EVQ_RPTR_REG_STEP 8192 #define ER_DZ_EVQ_RPTR_REG_ROWS 2048 #define ER_DZ_EVQ_RPTR_REG_RESET 0x0 @@ -108,31 +108,125 @@ extern "C" { #define ERF_DZ_EVQ_RPTR_WIDTH 15 +/* + * EVQ_RPTR_REG_64K(32bit): + * + */ + +#define ER_FZ_EVQ_RPTR_REG_64K_OFST 0x00000400 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_EVQ_RPTR_REG_64K_STEP 65536 +#define ER_FZ_EVQ_RPTR_REG_64K_ROWS 2048 +#define ER_FZ_EVQ_RPTR_REG_64K_RESET 0x0 + + +#define ERF_FZ_EVQ_RPTR_VLD_LBN 15 +#define ERF_FZ_EVQ_RPTR_VLD_WIDTH 1 +#define ERF_FZ_EVQ_RPTR_LBN 0 +#define ERF_FZ_EVQ_RPTR_WIDTH 15 + + +/* + * EVQ_RPTR_REG_16K(32bit): + * + */ + +#define ER_FZ_EVQ_RPTR_REG_16K_OFST 0x00000400 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_EVQ_RPTR_REG_16K_STEP 16384 +#define ER_FZ_EVQ_RPTR_REG_16K_ROWS 2048 +#define ER_FZ_EVQ_RPTR_REG_16K_RESET 0x0 + + +/* defined as ERF_FZ_EVQ_RPTR_VLD_LBN 15; */ +/* defined as ERF_FZ_EVQ_RPTR_VLD_WIDTH 1 */ +/* defined as ERF_FZ_EVQ_RPTR_LBN 0; */ +/* defined as ERF_FZ_EVQ_RPTR_WIDTH 15 */ + + +/* + * EVQ_TMR_REG_64K(32bit): + * + */ + +#define ER_FZ_EVQ_TMR_REG_64K_OFST 0x00000420 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_EVQ_TMR_REG_64K_STEP 65536 +#define ER_FZ_EVQ_TMR_REG_64K_ROWS 2048 +#define ER_FZ_EVQ_TMR_REG_64K_RESET 0x0 + + +#define ERF_FZ_TC_TMR_REL_VAL_LBN 16 +#define ERF_FZ_TC_TMR_REL_VAL_WIDTH 14 +#define ERF_FZ_TC_TIMER_MODE_LBN 14 +#define ERF_FZ_TC_TIMER_MODE_WIDTH 2 +#define ERF_FZ_TC_TIMER_VAL_LBN 0 +#define ERF_FZ_TC_TIMER_VAL_WIDTH 14 + + +/* + * EVQ_TMR_REG_16K(32bit): + * + */ + +#define ER_FZ_EVQ_TMR_REG_16K_OFST 0x00000420 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_EVQ_TMR_REG_16K_STEP 16384 +#define ER_FZ_EVQ_TMR_REG_16K_ROWS 2048 +#define ER_FZ_EVQ_TMR_REG_16K_RESET 0x0 + + +/* defined as ERF_FZ_TC_TMR_REL_VAL_LBN 16; */ +/* defined as ERF_FZ_TC_TMR_REL_VAL_WIDTH 14 */ +/* defined as ERF_FZ_TC_TIMER_MODE_LBN 14; */ +/* defined as ERF_FZ_TC_TIMER_MODE_WIDTH 2 */ +/* defined as ERF_FZ_TC_TIMER_VAL_LBN 0; */ +/* defined as ERF_FZ_TC_TIMER_VAL_WIDTH 14 */ + + /* * EVQ_TMR_REG(32bit): * */ #define ER_DZ_EVQ_TMR_REG_OFST 0x00000420 -/* hunta0,medforda0=pcie_pf_bar2 */ +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ #define ER_DZ_EVQ_TMR_REG_STEP 8192 #define ER_DZ_EVQ_TMR_REG_ROWS 2048 #define ER_DZ_EVQ_TMR_REG_RESET 0x0 +/* defined as ERF_FZ_TC_TMR_REL_VAL_LBN 16; */ +/* defined as ERF_FZ_TC_TMR_REL_VAL_WIDTH 14 */ #define ERF_DZ_TC_TIMER_MODE_LBN 14 #define ERF_DZ_TC_TIMER_MODE_WIDTH 2 #define ERF_DZ_TC_TIMER_VAL_LBN 0 #define ERF_DZ_TC_TIMER_VAL_WIDTH 14 +/* + * RX_DESC_UPD_REG_16K(32bit): + * + */ + +#define ER_FZ_RX_DESC_UPD_REG_16K_OFST 0x00000830 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_RX_DESC_UPD_REG_16K_STEP 16384 +#define ER_FZ_RX_DESC_UPD_REG_16K_ROWS 2048 +#define ER_FZ_RX_DESC_UPD_REG_16K_RESET 0x0 + + +#define ERF_FZ_RX_DESC_WPTR_LBN 0 +#define ERF_FZ_RX_DESC_WPTR_WIDTH 12 + + /* * RX_DESC_UPD_REG(32bit): * */ #define ER_DZ_RX_DESC_UPD_REG_OFST 0x00000830 -/* hunta0,medforda0=pcie_pf_bar2 */ +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ #define ER_DZ_RX_DESC_UPD_REG_STEP 8192 #define ER_DZ_RX_DESC_UPD_REG_ROWS 2048 #define ER_DZ_RX_DESC_UPD_REG_RESET 0x0 @@ -141,13 +235,74 @@ extern "C" { #define ERF_DZ_RX_DESC_WPTR_LBN 0 #define ERF_DZ_RX_DESC_WPTR_WIDTH 12 + +/* + * RX_DESC_UPD_REG_64K(32bit): + * + */ + +#define ER_FZ_RX_DESC_UPD_REG_64K_OFST 0x00000830 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_RX_DESC_UPD_REG_64K_STEP 65536 +#define ER_FZ_RX_DESC_UPD_REG_64K_ROWS 2048 +#define ER_FZ_RX_DESC_UPD_REG_64K_RESET 0x0 + + +/* defined as ERF_FZ_RX_DESC_WPTR_LBN 0; */ +/* defined as ERF_FZ_RX_DESC_WPTR_WIDTH 12 */ + + +/* + * TX_DESC_UPD_REG_64K(96bit): + * + */ + +#define ER_FZ_TX_DESC_UPD_REG_64K_OFST 0x00000a10 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_TX_DESC_UPD_REG_64K_STEP 65536 +#define ER_FZ_TX_DESC_UPD_REG_64K_ROWS 2048 +#define ER_FZ_TX_DESC_UPD_REG_64K_RESET 0x0 + + +#define ERF_FZ_RSVD_LBN 76 +#define ERF_FZ_RSVD_WIDTH 20 +#define ERF_FZ_TX_DESC_WPTR_LBN 64 +#define ERF_FZ_TX_DESC_WPTR_WIDTH 12 +#define ERF_FZ_TX_DESC_HWORD_LBN 32 +#define ERF_FZ_TX_DESC_HWORD_WIDTH 32 +#define ERF_FZ_TX_DESC_LWORD_LBN 0 +#define ERF_FZ_TX_DESC_LWORD_WIDTH 32 + + +/* + * TX_DESC_UPD_REG_16K(96bit): + * + */ + +#define ER_FZ_TX_DESC_UPD_REG_16K_OFST 0x00000a10 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_TX_DESC_UPD_REG_16K_STEP 16384 +#define ER_FZ_TX_DESC_UPD_REG_16K_ROWS 2048 +#define ER_FZ_TX_DESC_UPD_REG_16K_RESET 0x0 + + +/* defined as ERF_FZ_RSVD_LBN 76; */ +/* defined as ERF_FZ_RSVD_WIDTH 20 */ +/* defined as ERF_FZ_TX_DESC_WPTR_LBN 64; */ +/* defined as ERF_FZ_TX_DESC_WPTR_WIDTH 12 */ +/* defined as ERF_FZ_TX_DESC_HWORD_LBN 32; */ +/* defined as ERF_FZ_TX_DESC_HWORD_WIDTH 32 */ +/* defined as ERF_FZ_TX_DESC_LWORD_LBN 0; */ +/* defined as ERF_FZ_TX_DESC_LWORD_WIDTH 32 */ + + /* * TX_DESC_UPD_REG(96bit): * */ #define ER_DZ_TX_DESC_UPD_REG_OFST 0x00000a10 -/* hunta0,medforda0=pcie_pf_bar2 */ +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ #define ER_DZ_TX_DESC_UPD_REG_STEP 8192 #define ER_DZ_TX_DESC_UPD_REG_ROWS 2048 #define ER_DZ_TX_DESC_UPD_REG_RESET 0x0 @@ -233,16 +388,24 @@ extern "C" { #define ESF_DZ_RX_EV_SOFT2_WIDTH 2 #define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48 #define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4 -#define ESF_DZ_RX_L4_CLASS_LBN 45 -#define ESF_DZ_RX_L4_CLASS_WIDTH 3 -#define ESE_DZ_L4_CLASS_RSVD7 7 -#define ESE_DZ_L4_CLASS_RSVD6 6 -#define ESE_DZ_L4_CLASS_RSVD5 5 -#define ESE_DZ_L4_CLASS_RSVD4 4 -#define ESE_DZ_L4_CLASS_RSVD3 3 -#define ESE_DZ_L4_CLASS_UDP 2 -#define ESE_DZ_L4_CLASS_TCP 1 -#define ESE_DZ_L4_CLASS_UNKNOWN 0 +#define ESF_DE_RX_L4_CLASS_LBN 45 +#define ESF_DE_RX_L4_CLASS_WIDTH 3 +#define ESE_DE_L4_CLASS_RSVD7 7 +#define ESE_DE_L4_CLASS_RSVD6 6 +#define ESE_DE_L4_CLASS_RSVD5 5 +#define ESE_DE_L4_CLASS_RSVD4 4 +#define ESE_DE_L4_CLASS_RSVD3 3 +#define ESE_DE_L4_CLASS_UDP 2 +#define ESE_DE_L4_CLASS_TCP 1 +#define ESE_DE_L4_CLASS_UNKNOWN 0 +#define ESF_FZ_RX_FASTPD_INDCTR_LBN 47 +#define ESF_FZ_RX_FASTPD_INDCTR_WIDTH 1 +#define ESF_FZ_RX_L4_CLASS_LBN 45 +#define ESF_FZ_RX_L4_CLASS_WIDTH 2 +#define ESE_FZ_L4_CLASS_RSVD3 3 +#define ESE_FZ_L4_CLASS_UDP 2 +#define ESE_FZ_L4_CLASS_TCP 1 +#define ESE_FZ_L4_CLASS_UNKNOWN 0 #define ESF_DZ_RX_L3_CLASS_LBN 42 #define ESF_DZ_RX_L3_CLASS_WIDTH 3 #define ESE_DZ_L3_CLASS_RSVD7 7 @@ -289,6 +452,8 @@ extern "C" { #define ESF_EZ_RX_ABORT_WIDTH 1 #define ESF_DZ_RX_ECC_ERR_LBN 29 #define ESF_DZ_RX_ECC_ERR_WIDTH 1 +#define ESF_DZ_RX_TRUNC_ERR_LBN 29 +#define ESF_DZ_RX_TRUNC_ERR_WIDTH 1 #define ESF_DZ_RX_CRC1_ERR_LBN 28 #define ESF_DZ_RX_CRC1_ERR_WIDTH 1 #define ESF_DZ_RX_CRC0_ERR_LBN 27 @@ -419,6 +584,8 @@ extern "C" { #define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 #define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56 #define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4 +#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3 +#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2 #define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1 #define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0 #define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48 @@ -429,7 +596,7 @@ extern "C" { #define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32 -/* TX_TSO_FATSO2A_DESC */ +/* ES_TX_TSO_V2_DESC_A */ #define ESF_DZ_TX_DESC_IS_OPT_LBN 63 #define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 #define ESF_DZ_TX_OPTION_TYPE_LBN 60 @@ -449,7 +616,7 @@ extern "C" { #define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32 -/* TX_TSO_FATSO2B_DESC */ +/* ES_TX_TSO_V2_DESC_B */ #define ESF_DZ_TX_DESC_IS_OPT_LBN 63 #define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 #define ESF_DZ_TX_OPTION_TYPE_LBN 60 @@ -463,12 +630,10 @@ extern "C" { #define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2 #define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1 #define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0 -#define ESF_DZ_TX_TSO_OUTER_IP_ID_LBN 16 -#define ESF_DZ_TX_TSO_OUTER_IP_ID_WIDTH 16 #define ESF_DZ_TX_TSO_TCP_MSS_LBN 32 #define ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16 -#define ESF_DZ_TX_TSO_INNER_PE_CSUM_LBN 0 -#define ESF_DZ_TX_TSO_INNER_PE_CSUM_WIDTH 16 +#define ESF_DZ_TX_TSO_OUTER_IPID_LBN 0 +#define ESF_DZ_TX_TSO_OUTER_IPID_WIDTH 16 /* ES_TX_VLAN_DESC */ @@ -533,6 +698,21 @@ extern "C" { #define ES_DZ_PS_RX_PREFIX_ORIG_LEN_LBN 48 #define ES_DZ_PS_RX_PREFIX_ORIG_LEN_WIDTH 16 +/* Equal stride super-buffer RX packet prefix (see SF-119419-TC) */ +#define ES_EZ_ESSB_RX_PREFIX_LEN 8 +#define ES_EZ_ESSB_RX_PREFIX_DATA_LEN_LBN 0 +#define ES_EZ_ESSB_RX_PREFIX_DATA_LEN_WIDTH 16 +#define ES_EZ_ESSB_RX_PREFIX_MARK_LBN 16 +#define ES_EZ_ESSB_RX_PREFIX_MARK_WIDTH 8 +#define ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN 28 +#define ES_EZ_ESSB_RX_PREFIX_HASH_VALID_WIDTH 1 +#define ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN 29 +#define ES_EZ_ESSB_RX_PREFIX_MARK_VALID_WIDTH 1 +#define ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN 30 +#define ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_WIDTH 1 +#define ES_EZ_ESSB_RX_PREFIX_HASH_LBN 32 +#define ES_EZ_ESSB_RX_PREFIX_HASH_WIDTH 32 + /* * An extra flag for the packed stream mode, * signalling the start of a new buffer diff --git a/drivers/net/sfc/base/efx_regs_mcdi.h b/drivers/net/sfc/base/efx_regs_mcdi.h index 7389877a..cf8a7936 100644 --- a/drivers/net/sfc/base/efx_regs_mcdi.h +++ b/drivers/net/sfc/base/efx_regs_mcdi.h @@ -280,7 +280,8 @@ #define MC_CMD_ERR_NO_PRIVILEGE 0x1013 /* Workaround 26807 could not be turned on/off because some functions * have already installed filters. See the comment at - * MC_CMD_WORKAROUND_BUG26807. */ + * MC_CMD_WORKAROUND_BUG26807. + * May also returned for other operations such as sub-variant switching. */ #define MC_CMD_ERR_FILTERS_PRESENT 0x1014 /* The clock whose frequency you've attempted to set set * doesn't exist on this NIC */ @@ -291,6 +292,18 @@ /* This command needs to be processed in the background but there were no * resources to do so. Send it again after a command has completed. */ #define MC_CMD_ERR_QUEUE_FULL 0x1017 +/* The operation could not be completed because the PCIe link has gone + * away. This error code is never expected to be returned over the TLP + * transport. */ +#define MC_CMD_ERR_NO_PCIE 0x1018 +/* The operation could not be completed because the datapath has gone + * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the + * datapath absence may be temporary*/ +#define MC_CMD_ERR_NO_DATAPATH 0x1019 +/* The operation could not complete because some VIs are allocated */ +#define MC_CMD_ERR_VIS_PRESENT 0x101a +/* The operation could not complete because some PIO buffers are allocated */ +#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b #define MC_CMD_ERR_CODE_OFST 0 @@ -311,10 +324,17 @@ #define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4) #define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4) #define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4) -/* Points to the recovery mode entry point. */ +/* Points to the recovery mode entry point. Misnamed but kept for compatibility. */ #define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4) #define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4) #define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4) +/* Points to the recovery mode entry point. Same as above, but the right name. */ +#define SIENA_MC_BOOTROM_RECOVERY_VEC (0x800 - 2 * 0x4) +#define HUNT_MC_BOOTROM_RECOVERY_VEC (0x8000 - 2 * 0x4) +#define MEDFORD_MC_BOOTROM_RECOVERY_VEC (0x10000 - 2 * 0x4) + +/* Points to noflash mode entry point. */ +#define MEDFORD_MC_BOOTROM_REAL_NOFLASH_VEC (0x10000 - 4 * 0x4) /* The command set exported by the boot ROM (MCDI v0) */ #define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \ @@ -368,7 +388,7 @@ #define MCDI_EVENT_LEVEL_LBN 33 #define MCDI_EVENT_LEVEL_WIDTH 3 /* enum: Info. */ -#define MCDI_EVENT_LEVEL_INFO 0x0 +#define MCDI_EVENT_LEVEL_INFO 0x0 /* enum: Warning. */ #define MCDI_EVENT_LEVEL_WARN 0x1 /* enum: Error. */ @@ -376,6 +396,7 @@ /* enum: Fatal. */ #define MCDI_EVENT_LEVEL_FATAL 0x3 #define MCDI_EVENT_DATA_OFST 0 +#define MCDI_EVENT_DATA_LEN 4 #define MCDI_EVENT_CMDDONE_SEQ_LBN 0 #define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8 #define MCDI_EVENT_CMDDONE_DATALEN_LBN 8 @@ -386,14 +407,22 @@ #define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16 #define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16 #define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4 +/* enum: Link is down or link speed could not be determined */ +#define MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN 0x0 /* enum: 100Mbs */ -#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 +#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 /* enum: 1Gbs */ -#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 +#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 /* enum: 10Gbs */ -#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 +#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 /* enum: 40Gbs */ -#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4 +#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4 +/* enum: 25Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_25G 0x5 +/* enum: 50Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6 +/* enum: 100Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7 #define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20 #define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4 #define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24 @@ -482,8 +511,23 @@ #define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf /* enum: Notify that the attempt to run FPGA Controller firmware timedout */ #define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10 +/* enum: Failure to probe one or more FPGA boot flash chips */ +#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11 +/* enum: FPGA boot-flash contains an invalid image header */ +#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12 +/* enum: Failed to program clocks required by the FPGA */ +#define MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13 +/* enum: Notify that FPGA Controller is alive to serve MCDI requests */ +#define MCDI_EVENT_AOE_FC_RUNNING 0x14 #define MCDI_EVENT_AOE_ERR_DATA_LBN 8 #define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8 +/* enum: FC Assert happened, but the register information is not available */ +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0 +/* enum: The register information for FC Assert is ready for readinng by driver + */ +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1 #define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8 #define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8 /* enum: Reading from NV failed */ @@ -536,6 +580,22 @@ #define MCDI_EVENT_MUM_WATCHDOG 0x3 #define MCDI_EVENT_MUM_ERR_DATA_LBN 8 #define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_DBRET_SEQ_LBN 0 +#define MCDI_EVENT_DBRET_SEQ_WIDTH 8 +#define MCDI_EVENT_SUC_ERR_TYPE_LBN 0 +#define MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8 +/* enum: Corrupted or bad SUC application. */ +#define MCDI_EVENT_SUC_BAD_APP 0x1 +/* enum: SUC application reported an assert. */ +#define MCDI_EVENT_SUC_ASSERT 0x2 +/* enum: SUC application reported an exception. */ +#define MCDI_EVENT_SUC_EXCEPTION 0x3 +/* enum: SUC watchdog timer expired. */ +#define MCDI_EVENT_SUC_WATCHDOG 0x4 +#define MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8 +#define MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24 +#define MCDI_EVENT_SUC_ERR_DATA_LBN 8 +#define MCDI_EVENT_SUC_ERR_DATA_WIDTH 24 #define MCDI_EVENT_DATA_LBN 0 #define MCDI_EVENT_DATA_WIDTH 32 #define MCDI_EVENT_SRC_LBN 36 @@ -569,23 +629,23 @@ /* enum: Transmit error */ #define MCDI_EVENT_CODE_TX_ERR 0xb /* enum: Tx flush has completed */ -#define MCDI_EVENT_CODE_TX_FLUSH 0xc +#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum: PTP packet received timestamp */ -#define MCDI_EVENT_CODE_PTP_RX 0xd +#define MCDI_EVENT_CODE_PTP_RX 0xd /* enum: PTP NIC failure */ -#define MCDI_EVENT_CODE_PTP_FAULT 0xe +#define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum: PTP PPS event */ -#define MCDI_EVENT_CODE_PTP_PPS 0xf +#define MCDI_EVENT_CODE_PTP_PPS 0xf /* enum: Rx flush has completed */ -#define MCDI_EVENT_CODE_RX_FLUSH 0x10 +#define MCDI_EVENT_CODE_RX_FLUSH 0x10 /* enum: Receive error */ #define MCDI_EVENT_CODE_RX_ERR 0x11 /* enum: AOE fault */ -#define MCDI_EVENT_CODE_AOE 0x12 +#define MCDI_EVENT_CODE_AOE 0x12 /* enum: Network port calibration failed (VCAL). */ -#define MCDI_EVENT_CODE_VCAL_FAIL 0x13 +#define MCDI_EVENT_CODE_VCAL_FAIL 0x13 /* enum: HW PPS event */ -#define MCDI_EVENT_CODE_HW_PPS 0x14 +#define MCDI_EVENT_CODE_HW_PPS 0x14 /* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and * a different format) */ @@ -608,73 +668,99 @@ * been processed and it may now resend the command */ #define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d +/* enum: MCDI command accepted. New commands can be issued but this command is + * not done yet. + */ +#define MCDI_EVENT_CODE_DBRET 0x1e +/* enum: The MC has detected a fault on the SUC */ +#define MCDI_EVENT_CODE_SUC 0x1f /* enum: Artificial event generated by host and posted via MC for test * purposes. */ -#define MCDI_EVENT_CODE_TESTGEN 0xfa +#define MCDI_EVENT_CODE_TESTGEN 0xfa #define MCDI_EVENT_CMDDONE_DATA_OFST 0 +#define MCDI_EVENT_CMDDONE_DATA_LEN 4 #define MCDI_EVENT_CMDDONE_DATA_LBN 0 #define MCDI_EVENT_CMDDONE_DATA_WIDTH 32 #define MCDI_EVENT_LINKCHANGE_DATA_OFST 0 +#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4 #define MCDI_EVENT_LINKCHANGE_DATA_LBN 0 #define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32 #define MCDI_EVENT_SENSOREVT_DATA_OFST 0 +#define MCDI_EVENT_SENSOREVT_DATA_LEN 4 #define MCDI_EVENT_SENSOREVT_DATA_LBN 0 #define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32 #define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4 #define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0 #define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32 #define MCDI_EVENT_TX_ERR_DATA_OFST 0 +#define MCDI_EVENT_TX_ERR_DATA_LEN 4 #define MCDI_EVENT_TX_ERR_DATA_LBN 0 #define MCDI_EVENT_TX_ERR_DATA_WIDTH 32 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of * timestamp */ #define MCDI_EVENT_PTP_SECONDS_OFST 0 +#define MCDI_EVENT_PTP_SECONDS_LEN 4 #define MCDI_EVENT_PTP_SECONDS_LBN 0 #define MCDI_EVENT_PTP_SECONDS_WIDTH 32 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of * timestamp */ #define MCDI_EVENT_PTP_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_MAJOR_LEN 4 #define MCDI_EVENT_PTP_MAJOR_LBN 0 #define MCDI_EVENT_PTP_MAJOR_WIDTH 32 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field * of timestamp */ #define MCDI_EVENT_PTP_NANOSECONDS_OFST 0 +#define MCDI_EVENT_PTP_NANOSECONDS_LEN 4 #define MCDI_EVENT_PTP_NANOSECONDS_LBN 0 #define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of * timestamp */ #define MCDI_EVENT_PTP_MINOR_OFST 0 +#define MCDI_EVENT_PTP_MINOR_LEN 4 #define MCDI_EVENT_PTP_MINOR_LBN 0 #define MCDI_EVENT_PTP_MINOR_WIDTH 32 /* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet */ #define MCDI_EVENT_PTP_UUID_OFST 0 +#define MCDI_EVENT_PTP_UUID_LEN 4 #define MCDI_EVENT_PTP_UUID_LBN 0 #define MCDI_EVENT_PTP_UUID_WIDTH 32 #define MCDI_EVENT_RX_ERR_DATA_OFST 0 +#define MCDI_EVENT_RX_ERR_DATA_LEN 4 #define MCDI_EVENT_RX_ERR_DATA_LBN 0 #define MCDI_EVENT_RX_ERR_DATA_WIDTH 32 #define MCDI_EVENT_PAR_ERR_DATA_OFST 0 +#define MCDI_EVENT_PAR_ERR_DATA_LEN 4 #define MCDI_EVENT_PAR_ERR_DATA_LBN 0 #define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32 #define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4 #define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0 #define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32 #define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4 #define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0 #define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32 /* For CODE_PTP_TIME events, the major value of the PTP clock */ #define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_TIME_MAJOR_LEN 4 #define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0 #define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32 /* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */ #define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36 #define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8 +/* For CODE_PTP_TIME events, most significant bits of the minor value of the + * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19. + */ +#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36 +#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8 /* For CODE_PTP_TIME events where report sync status is enabled, indicates * whether the NIC clock has ever been set */ @@ -690,10 +776,17 @@ */ #define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38 #define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6 +/* For CODE_PTP_TIME events, most significant bits of the minor value of the + * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21. + */ +#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38 +#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6 #define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4 #define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0 #define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32 #define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4 #define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0 #define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32 /* Zero means that the request has been completed or authorized, and the driver @@ -702,6 +795,10 @@ */ #define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36 #define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8 +#define MCDI_EVENT_DBRET_DATA_OFST 0 +#define MCDI_EVENT_DBRET_DATA_LEN 4 +#define MCDI_EVENT_DBRET_DATA_LBN 0 +#define MCDI_EVENT_DBRET_DATA_WIDTH 32 /* FCDI_EVENT structuredef */ #define FCDI_EVENT_LEN 8 @@ -710,7 +807,7 @@ #define FCDI_EVENT_LEVEL_LBN 33 #define FCDI_EVENT_LEVEL_WIDTH 3 /* enum: Info. */ -#define FCDI_EVENT_LEVEL_INFO 0x0 +#define FCDI_EVENT_LEVEL_INFO 0x0 /* enum: Warning. */ #define FCDI_EVENT_LEVEL_WARN 0x1 /* enum: Error. */ @@ -718,6 +815,7 @@ /* enum: Fatal. */ #define FCDI_EVENT_LEVEL_FATAL 0x3 #define FCDI_EVENT_DATA_OFST 0 +#define FCDI_EVENT_DATA_LEN 4 #define FCDI_EVENT_LINK_STATE_STATUS_LBN 0 #define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1 #define FCDI_EVENT_LINK_DOWN 0x0 /* enum */ @@ -757,6 +855,7 @@ #define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */ #define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */ #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32 #define FCDI_EVENT_ASSERT_TYPE_LBN 36 @@ -764,12 +863,15 @@ #define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36 #define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8 #define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4 #define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0 #define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32 #define FCDI_EVENT_LINK_STATE_DATA_OFST 0 +#define FCDI_EVENT_LINK_STATE_DATA_LEN 4 #define FCDI_EVENT_LINK_STATE_DATA_LBN 0 #define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32 #define FCDI_EVENT_PTP_STATE_OFST 0 +#define FCDI_EVENT_PTP_STATE_LEN 4 #define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */ #define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */ #define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */ @@ -778,6 +880,7 @@ #define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36 #define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32 /* Index of MC port being referred to */ @@ -785,9 +888,11 @@ #define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8 /* FC Port index that matches the MC port index in SRC */ #define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4 #define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0 #define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32 #define FCDI_EVENT_BOOT_RESULT_OFST 0 +#define FCDI_EVENT_BOOT_RESULT_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */ #define FCDI_EVENT_BOOT_RESULT_LBN 0 @@ -804,14 +909,17 @@ #define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num)) /* Number of timestamps following */ #define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4 #define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0 #define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32 /* Seconds field of a timestamp record */ #define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4 #define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64 #define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32 /* Nanoseconds field of a timestamp record */ #define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4 #define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96 #define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32 /* Timestamp records comprising the event */ @@ -831,7 +939,7 @@ #define MUM_EVENT_LEVEL_LBN 33 #define MUM_EVENT_LEVEL_WIDTH 3 /* enum: Info. */ -#define MUM_EVENT_LEVEL_INFO 0x0 +#define MUM_EVENT_LEVEL_INFO 0x0 /* enum: Warning. */ #define MUM_EVENT_LEVEL_WARN 0x1 /* enum: Error. */ @@ -839,6 +947,7 @@ /* enum: Fatal. */ #define MUM_EVENT_LEVEL_FATAL 0x3 #define MUM_EVENT_DATA_OFST 0 +#define MUM_EVENT_DATA_LEN 4 #define MUM_EVENT_SENSOR_ID_LBN 0 #define MUM_EVENT_SENSOR_ID_WIDTH 8 /* Enum values, see field(s): */ @@ -876,18 +985,23 @@ /* enum: Link fault has been asserted, or has cleared. */ #define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4 #define MUM_EVENT_SENSOR_DATA_OFST 0 +#define MUM_EVENT_SENSOR_DATA_LEN 4 #define MUM_EVENT_SENSOR_DATA_LBN 0 #define MUM_EVENT_SENSOR_DATA_WIDTH 32 #define MUM_EVENT_PORT_PHY_FLAGS_OFST 0 +#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4 #define MUM_EVENT_PORT_PHY_FLAGS_LBN 0 #define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32 #define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4 #define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0 #define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32 #define MUM_EVENT_PORT_PHY_CAPS_OFST 0 +#define MUM_EVENT_PORT_PHY_CAPS_LEN 4 #define MUM_EVENT_PORT_PHY_CAPS_LBN 0 #define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32 #define MUM_EVENT_PORT_PHY_TECH_OFST 0 +#define MUM_EVENT_PORT_PHY_TECH_LEN 4 #define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */ #define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */ #define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */ @@ -911,7 +1025,9 @@ /***********************************/ /* MC_CMD_READ32 - * Read multiple 32byte words from MC memory. + * Read multiple 32byte words from MC memory. Note - this command really + * belongs to INSECURE category but is required by shmboot. The command handler + * has additional checks to reject insecure calls. */ #define MC_CMD_READ32 0x1 #undef MC_CMD_0x1_PRIVILEGE_CTG @@ -921,7 +1037,9 @@ /* MC_CMD_READ32_IN msgrequest */ #define MC_CMD_READ32_IN_LEN 8 #define MC_CMD_READ32_IN_ADDR_OFST 0 +#define MC_CMD_READ32_IN_ADDR_LEN 4 #define MC_CMD_READ32_IN_NUMWORDS_OFST 4 +#define MC_CMD_READ32_IN_NUMWORDS_LEN 4 /* MC_CMD_READ32_OUT msgresponse */ #define MC_CMD_READ32_OUT_LENMIN 4 @@ -940,13 +1058,14 @@ #define MC_CMD_WRITE32 0x2 #undef MC_CMD_0x2_PRIVILEGE_CTG -#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_WRITE32_IN msgrequest */ #define MC_CMD_WRITE32_IN_LENMIN 8 #define MC_CMD_WRITE32_IN_LENMAX 252 #define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num)) #define MC_CMD_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_WRITE32_IN_ADDR_LEN 4 #define MC_CMD_WRITE32_IN_BUFFER_OFST 4 #define MC_CMD_WRITE32_IN_BUFFER_LEN 4 #define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1 @@ -958,12 +1077,14 @@ /***********************************/ /* MC_CMD_COPYCODE - * Copy MC code between two locations and jump. + * Copy MC code between two locations and jump. Note - this command really + * belongs to INSECURE category but is required by shmboot. The command handler + * has additional checks to reject insecure calls. */ #define MC_CMD_COPYCODE 0x3 #undef MC_CMD_0x3_PRIVILEGE_CTG -#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_COPYCODE_IN msgrequest */ #define MC_CMD_COPYCODE_IN_LEN 16 @@ -974,6 +1095,7 @@ * is a bitfield, with each bit as documented below. */ #define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 +#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4 /* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */ #define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000 /* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and @@ -999,9 +1121,12 @@ #define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1 /* Destination address */ #define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4 +#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4 #define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8 +#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4 /* Address of where to jump after copy. */ #define MC_CMD_COPYCODE_IN_JUMP_OFST 12 +#define MC_CMD_COPYCODE_IN_JUMP_LEN 4 /* enum: Control should return to the caller rather than jumping */ #define MC_CMD_COPYCODE_JUMP_NONE 0x1 @@ -1016,12 +1141,13 @@ #define MC_CMD_SET_FUNC 0x4 #undef MC_CMD_0x4_PRIVILEGE_CTG -#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_SET_FUNC_IN msgrequest */ #define MC_CMD_SET_FUNC_IN_LEN 4 /* Set function */ #define MC_CMD_SET_FUNC_IN_FUNC_OFST 0 +#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4 /* MC_CMD_SET_FUNC_OUT msgresponse */ #define MC_CMD_SET_FUNC_OUT_LEN 0 @@ -1034,7 +1160,7 @@ #define MC_CMD_GET_BOOT_STATUS 0x5 #undef MC_CMD_0x5_PRIVILEGE_CTG -#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_GET_BOOT_STATUS_IN msgrequest */ #define MC_CMD_GET_BOOT_STATUS_IN_LEN 0 @@ -1043,9 +1169,11 @@ #define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8 /* ?? */ #define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0 +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4 /* enum: indicates that the MC wasn't flash booted */ -#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1 @@ -1069,11 +1197,13 @@ #define MC_CMD_GET_ASSERTS_IN_LEN 4 /* Set to clear assertion */ #define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0 +#define MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4 /* MC_CMD_GET_ASSERTS_OUT msgresponse */ #define MC_CMD_GET_ASSERTS_OUT_LEN 140 /* Assertion status flag. */ #define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4 /* enum: No assertions have failed. */ #define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 /* enum: A system-level assertion has failed. */ @@ -1086,6 +1216,7 @@ #define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 /* Failing PC value */ #define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4 /* Saved GP regs */ #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8 #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4 @@ -1096,7 +1227,9 @@ #define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 /* Failing thread address */ #define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132 +#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4 #define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4 /***********************************/ @@ -1113,12 +1246,14 @@ #define MC_CMD_LOG_CTRL_IN_LEN 8 /* Log destination */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0 +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4 /* enum: UART. */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum: Event queue. */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 /* Legacy argument. Must be zero. */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4 +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4 /* MC_CMD_LOG_CTRL_OUT msgresponse */ #define MC_CMD_LOG_CTRL_OUT_LEN 0 @@ -1140,23 +1275,29 @@ #define MC_CMD_GET_VERSION_EXT_IN_LEN 4 /* placeholder, set to 0 */ #define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0 +#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4 /* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */ #define MC_CMD_GET_VERSION_V0_OUT_LEN 4 #define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 /* enum: Reserved version number to indicate "any" version. */ #define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff /* enum: Bootrom version value for Siena. */ #define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000 /* enum: Bootrom version value for Huntington. */ #define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001 +/* enum: Bootrom version value for Medford2. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002 /* MC_CMD_GET_VERSION_OUT msgresponse */ #define MC_CMD_GET_VERSION_OUT_LEN 32 /* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ /* Enum values, see field(s): */ /* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ #define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_OUT_PCOL_LEN 4 /* 128bit mask of functions supported by the current firmware */ #define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8 #define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16 @@ -1168,9 +1309,11 @@ /* MC_CMD_GET_VERSION_EXT_OUT msgresponse */ #define MC_CMD_GET_VERSION_EXT_OUT_LEN 48 /* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ /* Enum values, see field(s): */ /* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ #define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4 /* 128bit mask of functions supported by the current firmware */ #define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8 #define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16 @@ -1183,2421 +1326,6 @@ #define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16 -/***********************************/ -/* MC_CMD_FC - * Perform an FC operation - */ -#define MC_CMD_FC 0x9 - -/* MC_CMD_FC_IN msgrequest */ -#define MC_CMD_FC_IN_LEN 4 -#define MC_CMD_FC_IN_OP_HDR_OFST 0 -#define MC_CMD_FC_IN_OP_LBN 0 -#define MC_CMD_FC_IN_OP_WIDTH 8 -/* enum: NULL MCDI command to FC. */ -#define MC_CMD_FC_OP_NULL 0x1 -/* enum: Unused opcode */ -#define MC_CMD_FC_OP_UNUSED 0x2 -/* enum: MAC driver commands */ -#define MC_CMD_FC_OP_MAC 0x3 -/* enum: Read FC memory */ -#define MC_CMD_FC_OP_READ32 0x4 -/* enum: Write to FC memory */ -#define MC_CMD_FC_OP_WRITE32 0x5 -/* enum: Read FC memory */ -#define MC_CMD_FC_OP_TRC_READ 0x6 -/* enum: Write to FC memory */ -#define MC_CMD_FC_OP_TRC_WRITE 0x7 -/* enum: FC firmware Version */ -#define MC_CMD_FC_OP_GET_VERSION 0x8 -/* enum: Read FC memory */ -#define MC_CMD_FC_OP_TRC_RX_READ 0x9 -/* enum: Write to FC memory */ -#define MC_CMD_FC_OP_TRC_RX_WRITE 0xa -/* enum: SFP parameters */ -#define MC_CMD_FC_OP_SFP 0xb -/* enum: DDR3 test */ -#define MC_CMD_FC_OP_DDR_TEST 0xc -/* enum: Get Crash context from FC */ -#define MC_CMD_FC_OP_GET_ASSERT 0xd -/* enum: Get FPGA Build registers */ -#define MC_CMD_FC_OP_FPGA_BUILD 0xe -/* enum: Read map support commands */ -#define MC_CMD_FC_OP_READ_MAP 0xf -/* enum: FC Capabilities */ -#define MC_CMD_FC_OP_CAPABILITIES 0x10 -/* enum: FC Global flags */ -#define MC_CMD_FC_OP_GLOBAL_FLAGS 0x11 -/* enum: FC IO using relative addressing modes */ -#define MC_CMD_FC_OP_IO_REL 0x12 -/* enum: FPGA link information */ -#define MC_CMD_FC_OP_UHLINK 0x13 -/* enum: Configure loopbacks and link on FPGA ports */ -#define MC_CMD_FC_OP_SET_LINK 0x14 -/* enum: Licensing operations relating to AOE */ -#define MC_CMD_FC_OP_LICENSE 0x15 -/* enum: Startup information to the FC */ -#define MC_CMD_FC_OP_STARTUP 0x16 -/* enum: Configure a DMA read */ -#define MC_CMD_FC_OP_DMA 0x17 -/* enum: Configure a timed read */ -#define MC_CMD_FC_OP_TIMED_READ 0x18 -/* enum: Control UART logging */ -#define MC_CMD_FC_OP_LOG 0x19 -/* enum: Get the value of a given clock_id */ -#define MC_CMD_FC_OP_CLOCK 0x1a -/* enum: DDR3/QDR3 parameters */ -#define MC_CMD_FC_OP_DDR 0x1b -/* enum: PTP and timestamp control */ -#define MC_CMD_FC_OP_TIMESTAMP 0x1c -/* enum: Commands for SPI Flash interface */ -#define MC_CMD_FC_OP_SPI 0x1d -/* enum: Commands for diagnostic components */ -#define MC_CMD_FC_OP_DIAG 0x1e -/* enum: External AOE port. */ -#define MC_CMD_FC_IN_PORT_EXT_OFST 0x0 -/* enum: Internal AOE port. */ -#define MC_CMD_FC_IN_PORT_INT_OFST 0x40 - -/* MC_CMD_FC_IN_NULL msgrequest */ -#define MC_CMD_FC_IN_NULL_LEN 4 -#define MC_CMD_FC_IN_CMD_OFST 0 - -/* MC_CMD_FC_IN_PHY msgrequest */ -#define MC_CMD_FC_IN_PHY_LEN 5 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* FC PHY driver operation code */ -#define MC_CMD_FC_IN_PHY_OP_OFST 4 -#define MC_CMD_FC_IN_PHY_OP_LEN 1 -/* enum: PHY init handler */ -#define MC_CMD_FC_OP_PHY_OP_INIT 0x1 -/* enum: PHY reconfigure handler */ -#define MC_CMD_FC_OP_PHY_OP_RECONFIGURE 0x2 -/* enum: PHY reboot handler */ -#define MC_CMD_FC_OP_PHY_OP_REBOOT 0x3 -/* enum: PHY get_supported_cap handler */ -#define MC_CMD_FC_OP_PHY_OP_GET_SUPPORTED_CAP 0x4 -/* enum: PHY get_config handler */ -#define MC_CMD_FC_OP_PHY_OP_GET_CONFIG 0x5 -/* enum: PHY get_media_info handler */ -#define MC_CMD_FC_OP_PHY_OP_GET_MEDIA_INFO 0x6 -/* enum: PHY set_led handler */ -#define MC_CMD_FC_OP_PHY_OP_SET_LED 0x7 -/* enum: PHY lasi_interrupt handler */ -#define MC_CMD_FC_OP_PHY_OP_LASI_INTERRUPT 0x8 -/* enum: PHY check_link handler */ -#define MC_CMD_FC_OP_PHY_OP_CHECK_LINK 0x9 -/* enum: PHY fill_stats handler */ -#define MC_CMD_FC_OP_PHY_OP_FILL_STATS 0xa -/* enum: PHY bpx_link_state_changed handler */ -#define MC_CMD_FC_OP_PHY_OP_BPX_LINK_STATE_CHANGED 0xb -/* enum: PHY get_state handler */ -#define MC_CMD_FC_OP_PHY_OP_GET_STATE 0xc -/* enum: PHY start_bist handler */ -#define MC_CMD_FC_OP_PHY_OP_START_BIST 0xd -/* enum: PHY poll_bist handler */ -#define MC_CMD_FC_OP_PHY_OP_POLL_BIST 0xe -/* enum: PHY nvram_test handler */ -#define MC_CMD_FC_OP_PHY_OP_NVRAM_TEST 0xf -/* enum: PHY relinquish handler */ -#define MC_CMD_FC_OP_PHY_OP_RELINQUISH_SPI 0x10 -/* enum: PHY read connection from FC - may be not required */ -#define MC_CMD_FC_OP_PHY_OP_GET_CONNECTION 0x11 -/* enum: PHY read flags from FC - may be not required */ -#define MC_CMD_FC_OP_PHY_OP_GET_FLAGS 0x12 - -/* MC_CMD_FC_IN_PHY_INIT msgrequest */ -#define MC_CMD_FC_IN_PHY_INIT_LEN 4 -#define MC_CMD_FC_IN_PHY_CMD_OFST 0 - -/* MC_CMD_FC_IN_MAC msgrequest */ -#define MC_CMD_FC_IN_MAC_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_MAC_HEADER_OFST 4 -#define MC_CMD_FC_IN_MAC_OP_LBN 0 -#define MC_CMD_FC_IN_MAC_OP_WIDTH 8 -/* enum: MAC reconfigure handler */ -#define MC_CMD_FC_OP_MAC_OP_RECONFIGURE 0x1 -/* enum: MAC Set command - same as MC_CMD_SET_MAC */ -#define MC_CMD_FC_OP_MAC_OP_SET_LINK 0x2 -/* enum: MAC statistics */ -#define MC_CMD_FC_OP_MAC_OP_GET_STATS 0x3 -/* enum: MAC RX statistics */ -#define MC_CMD_FC_OP_MAC_OP_GET_RX_STATS 0x6 -/* enum: MAC TX statistics */ -#define MC_CMD_FC_OP_MAC_OP_GET_TX_STATS 0x7 -/* enum: MAC Read status */ -#define MC_CMD_FC_OP_MAC_OP_READ_STATUS 0x8 -#define MC_CMD_FC_IN_MAC_PORT_TYPE_LBN 8 -#define MC_CMD_FC_IN_MAC_PORT_TYPE_WIDTH 8 -/* enum: External FPGA port. */ -#define MC_CMD_FC_PORT_EXT 0x0 -/* enum: Internal Siena-facing FPGA ports. */ -#define MC_CMD_FC_PORT_INT 0x1 -#define MC_CMD_FC_IN_MAC_PORT_IDX_LBN 16 -#define MC_CMD_FC_IN_MAC_PORT_IDX_WIDTH 8 -#define MC_CMD_FC_IN_MAC_CMD_FORMAT_LBN 24 -#define MC_CMD_FC_IN_MAC_CMD_FORMAT_WIDTH 8 -/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are - * irrelevant. Port number is derived from pci_fn; passed in FC header. - */ -#define MC_CMD_FC_OP_MAC_CMD_FORMAT_DEFAULT 0x0 -/* enum: Override default port number. Port number determined by fields - * PORT_TYPE and PORT_IDX. - */ -#define MC_CMD_FC_OP_MAC_CMD_FORMAT_PORT_OVERRIDE 0x1 - -/* MC_CMD_FC_IN_MAC_RECONFIGURE msgrequest */ -#define MC_CMD_FC_IN_MAC_RECONFIGURE_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ - -/* MC_CMD_FC_IN_MAC_SET_LINK msgrequest */ -#define MC_CMD_FC_IN_MAC_SET_LINK_LEN 32 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ -/* MTU size */ -#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_OFST 8 -/* Drain Tx FIFO */ -#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_OFST 12 -#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_OFST 16 -#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LEN 8 -#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LO_OFST 16 -#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_HI_OFST 20 -#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_OFST 24 -#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_LBN 0 -#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_WIDTH 1 -#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_LBN 1 -#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_WIDTH 1 -#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_OFST 28 - -/* MC_CMD_FC_IN_MAC_READ_STATUS msgrequest */ -#define MC_CMD_FC_IN_MAC_READ_STATUS_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ - -/* MC_CMD_FC_IN_MAC_GET_RX_STATS msgrequest */ -#define MC_CMD_FC_IN_MAC_GET_RX_STATS_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ - -/* MC_CMD_FC_IN_MAC_GET_TX_STATS msgrequest */ -#define MC_CMD_FC_IN_MAC_GET_TX_STATS_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ - -/* MC_CMD_FC_IN_MAC_GET_STATS msgrequest */ -#define MC_CMD_FC_IN_MAC_GET_STATS_LEN 20 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ -/* MC Statistics index */ -#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_OFST 8 -#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_OFST 12 -#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_LBN 0 -#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_WIDTH 1 -#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_LBN 1 -#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_WIDTH 1 -#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_LBN 2 -#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_WIDTH 1 -/* Number of statistics to read */ -#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_OFST 16 -#define MC_CMD_FC_MAC_NSTATS_PER_BLOCK 0x1e /* enum */ -#define MC_CMD_FC_MAC_NBYTES_PER_STAT 0x8 /* enum */ - -/* MC_CMD_FC_IN_READ32 msgrequest */ -#define MC_CMD_FC_IN_READ32_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_READ32_ADDR_HI_OFST 4 -#define MC_CMD_FC_IN_READ32_ADDR_LO_OFST 8 -#define MC_CMD_FC_IN_READ32_NUMWORDS_OFST 12 - -/* MC_CMD_FC_IN_WRITE32 msgrequest */ -#define MC_CMD_FC_IN_WRITE32_LENMIN 16 -#define MC_CMD_FC_IN_WRITE32_LENMAX 252 -#define MC_CMD_FC_IN_WRITE32_LEN(num) (12+4*(num)) -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_WRITE32_ADDR_HI_OFST 4 -#define MC_CMD_FC_IN_WRITE32_ADDR_LO_OFST 8 -#define MC_CMD_FC_IN_WRITE32_BUFFER_OFST 12 -#define MC_CMD_FC_IN_WRITE32_BUFFER_LEN 4 -#define MC_CMD_FC_IN_WRITE32_BUFFER_MINNUM 1 -#define MC_CMD_FC_IN_WRITE32_BUFFER_MAXNUM 60 - -/* MC_CMD_FC_IN_TRC_READ msgrequest */ -#define MC_CMD_FC_IN_TRC_READ_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_TRC_READ_TRC_OFST 4 -#define MC_CMD_FC_IN_TRC_READ_CHANNEL_OFST 8 - -/* MC_CMD_FC_IN_TRC_WRITE msgrequest */ -#define MC_CMD_FC_IN_TRC_WRITE_LEN 28 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_TRC_WRITE_TRC_OFST 4 -#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_OFST 8 -#define MC_CMD_FC_IN_TRC_WRITE_DATA_OFST 12 -#define MC_CMD_FC_IN_TRC_WRITE_DATA_LEN 4 -#define MC_CMD_FC_IN_TRC_WRITE_DATA_NUM 4 - -/* MC_CMD_FC_IN_GET_VERSION msgrequest */ -#define MC_CMD_FC_IN_GET_VERSION_LEN 4 -/* MC_CMD_FC_IN_CMD_OFST 0 */ - -/* MC_CMD_FC_IN_TRC_RX_READ msgrequest */ -#define MC_CMD_FC_IN_TRC_RX_READ_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_TRC_RX_READ_TRC_OFST 4 -#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_OFST 8 - -/* MC_CMD_FC_IN_TRC_RX_WRITE msgrequest */ -#define MC_CMD_FC_IN_TRC_RX_WRITE_LEN 20 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_OFST 4 -#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_OFST 8 -#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_OFST 12 -#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_LEN 4 -#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_NUM 2 - -/* MC_CMD_FC_IN_SFP msgrequest */ -#define MC_CMD_FC_IN_SFP_LEN 28 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* Link speed is 100, 1000, 10000, 40000 */ -#define MC_CMD_FC_IN_SFP_SPEED_OFST 4 -/* Length of copper cable - zero when not relevant (e.g. if cable is fibre) */ -#define MC_CMD_FC_IN_SFP_COPPER_LEN_OFST 8 -/* Not relevant for cards with QSFP modules. For older cards, true if module is - * a dual speed SFP+ module. - */ -#define MC_CMD_FC_IN_SFP_DUAL_SPEED_OFST 12 -/* True if an SFP Module is present (other fields valid when true) */ -#define MC_CMD_FC_IN_SFP_PRESENT_OFST 16 -/* The type of the SFP+ Module. For later cards with QSFP modules, this field - * is unused and the type is communicated by other means. - */ -#define MC_CMD_FC_IN_SFP_TYPE_OFST 20 -/* Capabilities corresponding to 1 bits. */ -#define MC_CMD_FC_IN_SFP_CAPS_OFST 24 - -/* MC_CMD_FC_IN_DDR_TEST msgrequest */ -#define MC_CMD_FC_IN_DDR_TEST_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 -#define MC_CMD_FC_IN_DDR_TEST_OP_LBN 0 -#define MC_CMD_FC_IN_DDR_TEST_OP_WIDTH 8 -/* enum: DRAM Test Start */ -#define MC_CMD_FC_OP_DDR_TEST_START 0x1 -/* enum: DRAM Test Poll */ -#define MC_CMD_FC_OP_DDR_TEST_POLL 0x2 - -/* MC_CMD_FC_IN_DDR_TEST_START msgrequest */ -#define MC_CMD_FC_IN_DDR_TEST_START_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */ -#define MC_CMD_FC_IN_DDR_TEST_START_MASK_OFST 8 -#define MC_CMD_FC_IN_DDR_TEST_START_T0_LBN 0 -#define MC_CMD_FC_IN_DDR_TEST_START_T0_WIDTH 1 -#define MC_CMD_FC_IN_DDR_TEST_START_T1_LBN 1 -#define MC_CMD_FC_IN_DDR_TEST_START_T1_WIDTH 1 -#define MC_CMD_FC_IN_DDR_TEST_START_B0_LBN 2 -#define MC_CMD_FC_IN_DDR_TEST_START_B0_WIDTH 1 -#define MC_CMD_FC_IN_DDR_TEST_START_B1_LBN 3 -#define MC_CMD_FC_IN_DDR_TEST_START_B1_WIDTH 1 - -/* MC_CMD_FC_IN_DDR_TEST_POLL msgrequest */ -#define MC_CMD_FC_IN_DDR_TEST_POLL_LEN 12 -#define MC_CMD_FC_IN_DDR_TEST_CMD_OFST 0 -/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */ -/* Clear previous test result and prepare for restarting DDR test */ -#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_OFST 8 - -/* MC_CMD_FC_IN_GET_ASSERT msgrequest */ -#define MC_CMD_FC_IN_GET_ASSERT_LEN 4 -/* MC_CMD_FC_IN_CMD_OFST 0 */ - -/* MC_CMD_FC_IN_FPGA_BUILD msgrequest */ -#define MC_CMD_FC_IN_FPGA_BUILD_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* FPGA build info operation code */ -#define MC_CMD_FC_IN_FPGA_BUILD_OP_OFST 4 -/* enum: Get the build registers */ -#define MC_CMD_FC_IN_FPGA_BUILD_BUILD 0x1 -/* enum: Get the services registers */ -#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES 0x2 -/* enum: Get the BSP version */ -#define MC_CMD_FC_IN_FPGA_BUILD_BSP_VERSION 0x3 -/* enum: Get build register for V2 (SFA974X) */ -#define MC_CMD_FC_IN_FPGA_BUILD_BUILD_V2 0x4 -/* enum: GEt the services register for V2 (SFA974X) */ -#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES_V2 0x5 - -/* MC_CMD_FC_IN_READ_MAP msgrequest */ -#define MC_CMD_FC_IN_READ_MAP_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 -#define MC_CMD_FC_IN_READ_MAP_OP_LBN 0 -#define MC_CMD_FC_IN_READ_MAP_OP_WIDTH 8 -/* enum: Get the number of map regions */ -#define MC_CMD_FC_OP_READ_MAP_COUNT 0x1 -/* enum: Get the specified map */ -#define MC_CMD_FC_OP_READ_MAP_INDEX 0x2 - -/* MC_CMD_FC_IN_READ_MAP_COUNT msgrequest */ -#define MC_CMD_FC_IN_READ_MAP_COUNT_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */ - -/* MC_CMD_FC_IN_READ_MAP_INDEX msgrequest */ -#define MC_CMD_FC_IN_READ_MAP_INDEX_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */ -#define MC_CMD_FC_IN_MAP_INDEX_OFST 8 - -/* MC_CMD_FC_IN_CAPABILITIES msgrequest */ -#define MC_CMD_FC_IN_CAPABILITIES_LEN 4 -/* MC_CMD_FC_IN_CMD_OFST 0 */ - -/* MC_CMD_FC_IN_GLOBAL_FLAGS msgrequest */ -#define MC_CMD_FC_IN_GLOBAL_FLAGS_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_OFST 4 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_LBN 0 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_WIDTH 1 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_LBN 1 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_WIDTH 1 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_LBN 2 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_WIDTH 1 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_LBN 3 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_WIDTH 1 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_LBN 4 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_WIDTH 1 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_LBN 5 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_WIDTH 1 - -/* MC_CMD_FC_IN_IO_REL msgrequest */ -#define MC_CMD_FC_IN_IO_REL_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 -#define MC_CMD_FC_IN_IO_REL_OP_LBN 0 -#define MC_CMD_FC_IN_IO_REL_OP_WIDTH 8 -/* enum: Get the base address that the FC applies to relative commands */ -#define MC_CMD_FC_IN_IO_REL_GET_ADDR 0x1 -/* enum: Read data */ -#define MC_CMD_FC_IN_IO_REL_READ32 0x2 -/* enum: Write data */ -#define MC_CMD_FC_IN_IO_REL_WRITE32 0x3 -#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_LBN 8 -#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_WIDTH 8 -/* enum: Application address space */ -#define MC_CMD_FC_COMP_TYPE_APP_ADDR_SPACE 0x1 -/* enum: Flash address space */ -#define MC_CMD_FC_COMP_TYPE_FLASH 0x2 - -/* MC_CMD_FC_IN_IO_REL_GET_ADDR msgrequest */ -#define MC_CMD_FC_IN_IO_REL_GET_ADDR_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */ - -/* MC_CMD_FC_IN_IO_REL_READ32 msgrequest */ -#define MC_CMD_FC_IN_IO_REL_READ32_LEN 20 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */ -#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_OFST 8 -#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_OFST 12 -#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_OFST 16 - -/* MC_CMD_FC_IN_IO_REL_WRITE32 msgrequest */ -#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMIN 20 -#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMAX 252 -#define MC_CMD_FC_IN_IO_REL_WRITE32_LEN(num) (16+4*(num)) -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */ -#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_OFST 8 -#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_OFST 12 -#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_OFST 16 -#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_LEN 4 -#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MINNUM 1 -#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MAXNUM 59 - -/* MC_CMD_FC_IN_UHLINK msgrequest */ -#define MC_CMD_FC_IN_UHLINK_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 -#define MC_CMD_FC_IN_UHLINK_OP_LBN 0 -#define MC_CMD_FC_IN_UHLINK_OP_WIDTH 8 -/* enum: Get PHY configuration info */ -#define MC_CMD_FC_OP_UHLINK_PHY 0x1 -/* enum: Get MAC configuration info */ -#define MC_CMD_FC_OP_UHLINK_MAC 0x2 -/* enum: Get Rx eye table */ -#define MC_CMD_FC_OP_UHLINK_RX_EYE 0x3 -/* enum: Get Rx eye plot */ -#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT 0x4 -/* enum: Get Rx eye plot */ -#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT 0x5 -/* enum: Retune Rx settings */ -#define MC_CMD_FC_OP_UHLINK_RX_TUNE 0x6 -/* enum: Set loopback mode on fpga port */ -#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET 0x7 -/* enum: Get loopback mode config state on fpga port */ -#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET 0x8 -#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_LBN 8 -#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_WIDTH 8 -#define MC_CMD_FC_IN_UHLINK_PORT_IDX_LBN 16 -#define MC_CMD_FC_IN_UHLINK_PORT_IDX_WIDTH 8 -#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_LBN 24 -#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_WIDTH 8 -/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are - * irrelevant. Port number is derived from pci_fn; passed in FC header. - */ -#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_DEFAULT 0x0 -/* enum: Override default port number. Port number determined by fields - * PORT_TYPE and PORT_IDX. - */ -#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_PORT_OVERRIDE 0x1 - -/* MC_CMD_FC_OP_UHLINK_PHY msgrequest */ -#define MC_CMD_FC_OP_UHLINK_PHY_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ - -/* MC_CMD_FC_OP_UHLINK_MAC msgrequest */ -#define MC_CMD_FC_OP_UHLINK_MAC_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ - -/* MC_CMD_FC_OP_UHLINK_RX_EYE msgrequest */ -#define MC_CMD_FC_OP_UHLINK_RX_EYE_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ -#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_OFST 8 -#define MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK 0x30 /* enum */ - -/* MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT msgrequest */ -#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ - -/* MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT msgrequest */ -#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_LEN 20 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ -#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_OFST 8 -#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_OFST 12 -#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_OFST 16 -#define MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK 0x1e /* enum */ - -/* MC_CMD_FC_OP_UHLINK_RX_TUNE msgrequest */ -#define MC_CMD_FC_OP_UHLINK_RX_TUNE_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ - -/* MC_CMD_FC_OP_UHLINK_LOOPBACK_SET msgrequest */ -#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ -#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_OFST 8 -#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PCS_SERIAL 0x0 /* enum */ -#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_PRE_CDR 0x1 /* enum */ -#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_POST_CDR 0x2 /* enum */ -#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_OFST 12 -#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_OFF 0x0 /* enum */ -#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_ON 0x1 /* enum */ - -/* MC_CMD_FC_OP_UHLINK_LOOPBACK_GET msgrequest */ -#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ -#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_OFST 8 - -/* MC_CMD_FC_IN_SET_LINK msgrequest */ -#define MC_CMD_FC_IN_SET_LINK_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* See MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ -#define MC_CMD_FC_IN_SET_LINK_MODE_OFST 4 -#define MC_CMD_FC_IN_SET_LINK_SPEED_OFST 8 -#define MC_CMD_FC_IN_SET_LINK_FLAGS_OFST 12 -#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_LBN 0 -#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_WIDTH 1 -#define MC_CMD_FC_IN_SET_LINK_POWEROFF_LBN 1 -#define MC_CMD_FC_IN_SET_LINK_POWEROFF_WIDTH 1 -#define MC_CMD_FC_IN_SET_LINK_TXDIS_LBN 2 -#define MC_CMD_FC_IN_SET_LINK_TXDIS_WIDTH 1 - -/* MC_CMD_FC_IN_LICENSE msgrequest */ -#define MC_CMD_FC_IN_LICENSE_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_LICENSE_OP_OFST 4 -#define MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE 0x0 /* enum */ -#define MC_CMD_FC_IN_LICENSE_GET_KEY_STATS 0x1 /* enum */ - -/* MC_CMD_FC_IN_STARTUP msgrequest */ -#define MC_CMD_FC_IN_STARTUP_LEN 40 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_STARTUP_BASE_OFST 4 -#define MC_CMD_FC_IN_STARTUP_LENGTH_OFST 8 -/* Length of identifier */ -#define MC_CMD_FC_IN_STARTUP_IDLENGTH_OFST 12 -/* Identifier for AOE FPGA */ -#define MC_CMD_FC_IN_STARTUP_ID_OFST 16 -#define MC_CMD_FC_IN_STARTUP_ID_LEN 1 -#define MC_CMD_FC_IN_STARTUP_ID_NUM 24 - -/* MC_CMD_FC_IN_DMA msgrequest */ -#define MC_CMD_FC_IN_DMA_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DMA_OP_OFST 4 -#define MC_CMD_FC_IN_DMA_STOP 0x0 /* enum */ -#define MC_CMD_FC_IN_DMA_READ 0x1 /* enum */ - -/* MC_CMD_FC_IN_DMA_STOP msgrequest */ -#define MC_CMD_FC_IN_DMA_STOP_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_DMA_OP_OFST 4 */ -/* FC supplied handle */ -#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_OFST 8 - -/* MC_CMD_FC_IN_DMA_READ msgrequest */ -#define MC_CMD_FC_IN_DMA_READ_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_DMA_OP_OFST 4 */ -#define MC_CMD_FC_IN_DMA_READ_OFFSET_OFST 8 -#define MC_CMD_FC_IN_DMA_READ_LENGTH_OFST 12 - -/* MC_CMD_FC_IN_TIMED_READ msgrequest */ -#define MC_CMD_FC_IN_TIMED_READ_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 -#define MC_CMD_FC_IN_TIMED_READ_SET 0x0 /* enum */ -#define MC_CMD_FC_IN_TIMED_READ_GET 0x1 /* enum */ -#define MC_CMD_FC_IN_TIMED_READ_CLEAR 0x2 /* enum */ - -/* MC_CMD_FC_IN_TIMED_READ_SET msgrequest */ -#define MC_CMD_FC_IN_TIMED_READ_SET_LEN 52 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */ -/* Host supplied handle (unique) */ -#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_OFST 8 -/* Address into which to transfer data in host */ -#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_OFST 12 -#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LEN 8 -#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LO_OFST 12 -#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_HI_OFST 16 -/* AOE address from which to transfer data */ -#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_OFST 20 -#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LEN 8 -#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LO_OFST 20 -#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_HI_OFST 24 -/* Length of AOE transfer (total) */ -#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_OFST 28 -/* Length of host transfer (total) */ -#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_OFST 32 -/* Offset back from aoe_address to apply operation to */ -#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_OFST 36 -/* Data to apply at offset */ -#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_OFST 40 -#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_OFST 44 -#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_LBN 0 -#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_WIDTH 1 -#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_LBN 1 -#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_WIDTH 1 -#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_LBN 2 -#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_WIDTH 1 -#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_LBN 3 -#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_WIDTH 2 -#define MC_CMD_FC_IN_TIMED_READ_SET_NONE 0x0 /* enum */ -#define MC_CMD_FC_IN_TIMED_READ_SET_READ 0x1 /* enum */ -#define MC_CMD_FC_IN_TIMED_READ_SET_WRITE 0x2 /* enum */ -#define MC_CMD_FC_IN_TIMED_READ_SET_READWRITE 0x3 /* enum */ -/* Period at which reads are performed (100ms units) */ -#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_OFST 48 - -/* MC_CMD_FC_IN_TIMED_READ_GET msgrequest */ -#define MC_CMD_FC_IN_TIMED_READ_GET_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */ -/* FC supplied handle */ -#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_OFST 8 - -/* MC_CMD_FC_IN_TIMED_READ_CLEAR msgrequest */ -#define MC_CMD_FC_IN_TIMED_READ_CLEAR_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */ -/* FC supplied handle */ -#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_OFST 8 - -/* MC_CMD_FC_IN_LOG msgrequest */ -#define MC_CMD_FC_IN_LOG_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_LOG_OP_OFST 4 -#define MC_CMD_FC_IN_LOG_ADDR_RANGE 0x0 /* enum */ -#define MC_CMD_FC_IN_LOG_JTAG_UART 0x1 /* enum */ - -/* MC_CMD_FC_IN_LOG_ADDR_RANGE msgrequest */ -#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LEN 20 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_LOG_OP_OFST 4 */ -/* Partition offset into flash */ -#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_OFST 8 -/* Partition length */ -#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_OFST 12 -/* Partition erase size */ -#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_OFST 16 - -/* MC_CMD_FC_IN_LOG_JTAG_UART msgrequest */ -#define MC_CMD_FC_IN_LOG_JTAG_UART_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_LOG_OP_OFST 4 */ -/* Enable/disable printing to JTAG UART */ -#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_OFST 8 - -/* MC_CMD_FC_IN_CLOCK msgrequest */ -#define MC_CMD_FC_IN_CLOCK_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_CLOCK_OP_OFST 4 -#define MC_CMD_FC_IN_CLOCK_GET_TIME 0x0 /* enum */ -#define MC_CMD_FC_IN_CLOCK_SET_TIME 0x1 /* enum */ -/* Perform a clock operation */ -#define MC_CMD_FC_IN_CLOCK_ID_OFST 8 -#define MC_CMD_FC_IN_CLOCK_STATS 0x0 /* enum */ -#define MC_CMD_FC_IN_CLOCK_MAC 0x1 /* enum */ - -/* MC_CMD_FC_IN_CLOCK_GET_TIME msgrequest */ -#define MC_CMD_FC_IN_CLOCK_GET_TIME_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */ -/* Retrieve the clock value of the specified clock */ -/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */ - -/* MC_CMD_FC_IN_CLOCK_SET_TIME msgrequest */ -#define MC_CMD_FC_IN_CLOCK_SET_TIME_LEN 24 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */ -/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */ -#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_OFST 12 -#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LEN 8 -#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LO_OFST 12 -#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_HI_OFST 16 -/* Set the clock value of the specified clock */ -#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_OFST 20 - -/* MC_CMD_FC_IN_DDR msgrequest */ -#define MC_CMD_FC_IN_DDR_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DDR_OP_OFST 4 -#define MC_CMD_FC_IN_DDR_SET_SPD 0x0 /* enum */ -#define MC_CMD_FC_IN_DDR_GET_STATUS 0x1 /* enum */ -#define MC_CMD_FC_IN_DDR_SET_INFO 0x2 /* enum */ -#define MC_CMD_FC_IN_DDR_BANK_OFST 8 -#define MC_CMD_FC_IN_DDR_BANK_B0 0x0 /* enum */ -#define MC_CMD_FC_IN_DDR_BANK_B1 0x1 /* enum */ -#define MC_CMD_FC_IN_DDR_BANK_T0 0x2 /* enum */ -#define MC_CMD_FC_IN_DDR_BANK_T1 0x3 /* enum */ -#define MC_CMD_FC_IN_DDR_NUM_BANKS 0x4 /* enum */ - -/* MC_CMD_FC_IN_DDR_SET_SPD msgrequest */ -#define MC_CMD_FC_IN_DDR_SET_SPD_LEN 148 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_DDR_OP_OFST 4 */ -/* Affected bank */ -/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */ -/* Flags */ -#define MC_CMD_FC_IN_DDR_FLAGS_OFST 12 -#define MC_CMD_FC_IN_DDR_SET_SPD_ACTIVE 0x1 /* enum */ -/* 128-byte page of serial presence detect data read from module's EEPROM */ -#define MC_CMD_FC_IN_DDR_SPD_OFST 16 -#define MC_CMD_FC_IN_DDR_SPD_LEN 1 -#define MC_CMD_FC_IN_DDR_SPD_NUM 128 -/* Page index of the spd data copied into MC_CMD_FC_IN_DDR_SPD */ -#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_OFST 144 - -/* MC_CMD_FC_IN_DDR_SET_INFO msgrequest */ -#define MC_CMD_FC_IN_DDR_SET_INFO_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_DDR_OP_OFST 4 */ -/* Affected bank */ -/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */ -/* Size of DDR */ -#define MC_CMD_FC_IN_DDR_SIZE_OFST 12 - -/* MC_CMD_FC_IN_DDR_GET_STATUS msgrequest */ -#define MC_CMD_FC_IN_DDR_GET_STATUS_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_DDR_OP_OFST 4 */ -/* Affected bank */ -/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */ - -/* MC_CMD_FC_IN_TIMESTAMP msgrequest */ -#define MC_CMD_FC_IN_TIMESTAMP_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* FC timestamp operation code */ -#define MC_CMD_FC_IN_TIMESTAMP_OP_OFST 4 -/* enum: Read transmit timestamp(s) */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT 0x0 -/* enum: Read snapshot timestamps */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT 0x1 -/* enum: Clear all transmit timestamps */ -#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT 0x2 - -/* MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT msgrequest */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LEN 28 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_OFST 4 -/* Control filtering of the returned timestamp and sequence number specified - * here - */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_OFST 8 -/* enum: Return most recent timestamp. No filtering */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LATEST 0x0 -/* enum: Match timestamp against the PTP clock ID, port number and sequence - * number specified - */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_MATCH 0x1 -/* Clock identity of PTP packet for which timestamp required */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_OFST 12 -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LEN 8 -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LO_OFST 12 -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_HI_OFST 16 -/* Port number of PTP packet for which timestamp required */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_OFST 20 -/* Sequence number of PTP packet for which timestamp required */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_OFST 24 - -/* MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT msgrequest */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_OFST 4 - -/* MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT msgrequest */ -#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_OFST 4 - -/* MC_CMD_FC_IN_SPI msgrequest */ -#define MC_CMD_FC_IN_SPI_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* Basic commands for SPI Flash. */ -#define MC_CMD_FC_IN_SPI_OP_OFST 4 -/* enum: SPI Flash read */ -#define MC_CMD_FC_IN_SPI_READ 0x0 -/* enum: SPI Flash write */ -#define MC_CMD_FC_IN_SPI_WRITE 0x1 -/* enum: SPI Flash erase */ -#define MC_CMD_FC_IN_SPI_ERASE 0x2 - -/* MC_CMD_FC_IN_SPI_READ msgrequest */ -#define MC_CMD_FC_IN_SPI_READ_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_SPI_READ_OP_OFST 4 -#define MC_CMD_FC_IN_SPI_READ_ADDR_OFST 8 -#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_OFST 12 - -/* MC_CMD_FC_IN_SPI_WRITE msgrequest */ -#define MC_CMD_FC_IN_SPI_WRITE_LENMIN 16 -#define MC_CMD_FC_IN_SPI_WRITE_LENMAX 252 -#define MC_CMD_FC_IN_SPI_WRITE_LEN(num) (12+4*(num)) -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_SPI_WRITE_OP_OFST 4 -#define MC_CMD_FC_IN_SPI_WRITE_ADDR_OFST 8 -#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_OFST 12 -#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_LEN 4 -#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MINNUM 1 -#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MAXNUM 60 - -/* MC_CMD_FC_IN_SPI_ERASE msgrequest */ -#define MC_CMD_FC_IN_SPI_ERASE_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_SPI_ERASE_OP_OFST 4 -#define MC_CMD_FC_IN_SPI_ERASE_ADDR_OFST 8 -#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_OFST 12 - -/* MC_CMD_FC_IN_DIAG msgrequest */ -#define MC_CMD_FC_IN_DIAG_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* Operation code indicating component type */ -#define MC_CMD_FC_IN_DIAG_OP_OFST 4 -/* enum: Power noise generator. */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE 0x0 -/* enum: DDR soak test component. */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK 0x1 -/* enum: Diagnostics datapath control component. */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL 0x2 - -/* MC_CMD_FC_IN_DIAG_POWER_NOISE msgrequest */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_OFST 4 -/* Sub-opcode describing the operation to be carried out */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_OFST 8 -/* enum: Read the configuration (the 32-bit values in each of the clock enable - * count and toggle count registers) - */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG 0x0 -/* enum: Write a new configuration to the clock enable count and toggle count - * registers - */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG 0x1 - -/* MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG msgrequest */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_OFST 4 -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_OFST 8 - -/* MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG msgrequest */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 20 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_OFST 4 -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_OFST 8 -/* The 32-bit value to be written to the toggle count register */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_OFST 12 -/* The 32-bit value to be written to the clock enable count register */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_OFST 16 - -/* MC_CMD_FC_IN_DIAG_DDR_SOAK msgrequest */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_OFST 4 -/* Sub-opcode describing the operation to be carried out */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_OFST 8 -/* enum: Starts DDR soak test on selected banks */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START 0x0 -/* enum: Read status of DDR soak test */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT 0x1 -/* enum: Stop test */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP 0x2 -/* enum: Set or clear bit that triggers fake errors. These cause subsequent - * tests to fail until the bit is cleared. - */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR 0x3 - -/* MC_CMD_FC_IN_DIAG_DDR_SOAK_START msgrequest */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_LEN 24 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_OFST 4 -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_OFST 8 -/* Mask of DDR banks to be tested */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_OFST 12 -/* Pattern to use in the soak test */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_OFST 16 -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ZEROS 0x0 /* enum */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONES 0x1 /* enum */ -/* Either multiple automatic tests until a STOP command is issued, or one - * single test - */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_OFST 20 -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONGOING_TEST 0x0 /* enum */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SINGLE_TEST 0x1 /* enum */ - -/* MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT msgrequest */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_OFST 4 -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_OFST 8 -/* DDR bank to read status from */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_OFST 12 -#define MC_CMD_FC_DDR_BANK0 0x0 /* enum */ -#define MC_CMD_FC_DDR_BANK1 0x1 /* enum */ -#define MC_CMD_FC_DDR_BANK2 0x2 /* enum */ -#define MC_CMD_FC_DDR_BANK3 0x3 /* enum */ -#define MC_CMD_FC_DDR_AOEMEM_MAX_BANKS 0x4 /* enum */ - -/* MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP msgrequest */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_OFST 4 -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_OFST 8 -/* Mask of DDR banks to be tested */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_OFST 12 - -/* MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR msgrequest */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_LEN 20 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_OFST 4 -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_OFST 8 -/* Mask of DDR banks to set/clear error flag on */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_OFST 12 -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_OFST 16 -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_CLEAR 0x0 /* enum */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SET 0x1 /* enum */ - -/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL msgrequest */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_OFST 4 -/* Sub-opcode describing the operation to be carried out */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_OFST 8 -/* enum: Set a known datapath configuration */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE 0x0 -/* enum: Apply raw config to datapath control registers */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG 0x1 - -/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE msgrequest */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_OFST 4 -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_OFST 8 -/* Datapath configuration identifier */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_OFST 12 -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_PASSTHROUGH 0x0 /* enum */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SNAKE 0x1 /* enum */ - -/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG msgrequest */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 24 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_OFST 4 -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_OFST 8 -/* Value to write into control register 1 */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_OFST 12 -/* Value to write into control register 2 */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_OFST 16 -/* Value to write into control register 3 */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_OFST 20 - -/* MC_CMD_FC_OUT msgresponse */ -#define MC_CMD_FC_OUT_LEN 0 - -/* MC_CMD_FC_OUT_NULL msgresponse */ -#define MC_CMD_FC_OUT_NULL_LEN 0 - -/* MC_CMD_FC_OUT_READ32 msgresponse */ -#define MC_CMD_FC_OUT_READ32_LENMIN 4 -#define MC_CMD_FC_OUT_READ32_LENMAX 252 -#define MC_CMD_FC_OUT_READ32_LEN(num) (0+4*(num)) -#define MC_CMD_FC_OUT_READ32_BUFFER_OFST 0 -#define MC_CMD_FC_OUT_READ32_BUFFER_LEN 4 -#define MC_CMD_FC_OUT_READ32_BUFFER_MINNUM 1 -#define MC_CMD_FC_OUT_READ32_BUFFER_MAXNUM 63 - -/* MC_CMD_FC_OUT_WRITE32 msgresponse */ -#define MC_CMD_FC_OUT_WRITE32_LEN 0 - -/* MC_CMD_FC_OUT_TRC_READ msgresponse */ -#define MC_CMD_FC_OUT_TRC_READ_LEN 16 -#define MC_CMD_FC_OUT_TRC_READ_DATA_OFST 0 -#define MC_CMD_FC_OUT_TRC_READ_DATA_LEN 4 -#define MC_CMD_FC_OUT_TRC_READ_DATA_NUM 4 - -/* MC_CMD_FC_OUT_TRC_WRITE msgresponse */ -#define MC_CMD_FC_OUT_TRC_WRITE_LEN 0 - -/* MC_CMD_FC_OUT_GET_VERSION msgresponse */ -#define MC_CMD_FC_OUT_GET_VERSION_LEN 12 -#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_OFST 0 -#define MC_CMD_FC_OUT_GET_VERSION_VERSION_OFST 4 -#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LEN 8 -#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LO_OFST 4 -#define MC_CMD_FC_OUT_GET_VERSION_VERSION_HI_OFST 8 - -/* MC_CMD_FC_OUT_TRC_RX_READ msgresponse */ -#define MC_CMD_FC_OUT_TRC_RX_READ_LEN 8 -#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_OFST 0 -#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_LEN 4 -#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_NUM 2 - -/* MC_CMD_FC_OUT_TRC_RX_WRITE msgresponse */ -#define MC_CMD_FC_OUT_TRC_RX_WRITE_LEN 0 - -/* MC_CMD_FC_OUT_MAC_RECONFIGURE msgresponse */ -#define MC_CMD_FC_OUT_MAC_RECONFIGURE_LEN 0 - -/* MC_CMD_FC_OUT_MAC_SET_LINK msgresponse */ -#define MC_CMD_FC_OUT_MAC_SET_LINK_LEN 0 - -/* MC_CMD_FC_OUT_MAC_READ_STATUS msgresponse */ -#define MC_CMD_FC_OUT_MAC_READ_STATUS_LEN 4 -#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_OFST 0 - -/* MC_CMD_FC_OUT_MAC_GET_RX_STATS msgresponse */ -#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_RX_NSTATS))+1))>>3) -#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_OFST 0 -#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LEN 8 -#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LO_OFST 0 -#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_HI_OFST 4 -#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_RX_NSTATS -#define MC_CMD_FC_MAC_RX_STATS_OCTETS 0x0 /* enum */ -#define MC_CMD_FC_MAC_RX_OCTETS_OK 0x1 /* enum */ -#define MC_CMD_FC_MAC_RX_ALIGNMENT_ERRORS 0x2 /* enum */ -#define MC_CMD_FC_MAC_RX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */ -#define MC_CMD_FC_MAC_RX_FRAMES_OK 0x4 /* enum */ -#define MC_CMD_FC_MAC_RX_CRC_ERRORS 0x5 /* enum */ -#define MC_CMD_FC_MAC_RX_VLAN_OK 0x6 /* enum */ -#define MC_CMD_FC_MAC_RX_ERRORS 0x7 /* enum */ -#define MC_CMD_FC_MAC_RX_UCAST_PKTS 0x8 /* enum */ -#define MC_CMD_FC_MAC_RX_MULTICAST_PKTS 0x9 /* enum */ -#define MC_CMD_FC_MAC_RX_BROADCAST_PKTS 0xa /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_DROP_EVENTS 0xb /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_PKTS 0xc /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_UNDERSIZE_PKTS 0xd /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_PKTS_64 0xe /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_PKTS_65_127 0xf /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_PKTS_128_255 0x10 /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_PKTS_256_511 0x11 /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_PKTS_512_1023 0x12 /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_PKTS_1024_1518 0x13 /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_PKTS_1519_MAX 0x14 /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_OVERSIZE_PKTS 0x15 /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_JABBERS 0x16 /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_FRAGMENTS 0x17 /* enum */ -#define MC_CMD_FC_MAC_RX_MAC_CONTROL_FRAMES 0x18 /* enum */ -/* enum: (Last entry) */ -#define MC_CMD_FC_MAC_RX_NSTATS 0x19 - -/* MC_CMD_FC_OUT_MAC_GET_TX_STATS msgresponse */ -#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_TX_NSTATS))+1))>>3) -#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_OFST 0 -#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LEN 8 -#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LO_OFST 0 -#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_HI_OFST 4 -#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_TX_NSTATS -#define MC_CMD_FC_MAC_TX_STATS_OCTETS 0x0 /* enum */ -#define MC_CMD_FC_MAC_TX_OCTETS_OK 0x1 /* enum */ -#define MC_CMD_FC_MAC_TX_ALIGNMENT_ERRORS 0x2 /* enum */ -#define MC_CMD_FC_MAC_TX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */ -#define MC_CMD_FC_MAC_TX_FRAMES_OK 0x4 /* enum */ -#define MC_CMD_FC_MAC_TX_CRC_ERRORS 0x5 /* enum */ -#define MC_CMD_FC_MAC_TX_VLAN_OK 0x6 /* enum */ -#define MC_CMD_FC_MAC_TX_ERRORS 0x7 /* enum */ -#define MC_CMD_FC_MAC_TX_UCAST_PKTS 0x8 /* enum */ -#define MC_CMD_FC_MAC_TX_MULTICAST_PKTS 0x9 /* enum */ -#define MC_CMD_FC_MAC_TX_BROADCAST_PKTS 0xa /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_DROP_EVENTS 0xb /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_PKTS 0xc /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_UNDERSIZE_PKTS 0xd /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_PKTS_64 0xe /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_PKTS_65_127 0xf /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_PKTS_128_255 0x10 /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_PKTS_256_511 0x11 /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_PKTS_512_1023 0x12 /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_PKTS_1024_1518 0x13 /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_PKTS_1519_TX_MTU 0x14 /* enum */ -#define MC_CMD_FC_MAC_TX_MAC_CONTROL_FRAMES 0x15 /* enum */ -/* enum: (Last entry) */ -#define MC_CMD_FC_MAC_TX_NSTATS 0x16 - -/* MC_CMD_FC_OUT_MAC_GET_STATS msgresponse */ -#define MC_CMD_FC_OUT_MAC_GET_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_NSTATS_PER_BLOCK))+1))>>3) -/* MAC Statistics */ -#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_OFST 0 -#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LEN 8 -#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LO_OFST 0 -#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_HI_OFST 4 -#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_NUM MC_CMD_FC_MAC_NSTATS_PER_BLOCK - -/* MC_CMD_FC_OUT_MAC msgresponse */ -#define MC_CMD_FC_OUT_MAC_LEN 0 - -/* MC_CMD_FC_OUT_SFP msgresponse */ -#define MC_CMD_FC_OUT_SFP_LEN 0 - -/* MC_CMD_FC_OUT_DDR_TEST_START msgresponse */ -#define MC_CMD_FC_OUT_DDR_TEST_START_LEN 0 - -/* MC_CMD_FC_OUT_DDR_TEST_POLL msgresponse */ -#define MC_CMD_FC_OUT_DDR_TEST_POLL_LEN 8 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_OFST 0 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_LBN 0 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_WIDTH 8 -/* enum: Test not yet initiated */ -#define MC_CMD_FC_OP_DDR_TEST_NONE 0x0 -/* enum: Test is in progress */ -#define MC_CMD_FC_OP_DDR_TEST_INPROGRESS 0x1 -/* enum: Timed completed */ -#define MC_CMD_FC_OP_DDR_TEST_SUCCESS 0x2 -/* enum: Test did not complete in specified time */ -#define MC_CMD_FC_OP_DDR_TEST_TIMER_EXPIRED 0x3 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_LBN 11 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_WIDTH 1 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_LBN 10 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_WIDTH 1 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_LBN 9 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_WIDTH 1 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_LBN 8 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_WIDTH 1 -/* Test result from FPGA */ -#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_OFST 4 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_LBN 31 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_WIDTH 1 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_LBN 30 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_WIDTH 1 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_LBN 29 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_WIDTH 1 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_LBN 28 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_WIDTH 1 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_LBN 15 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_WIDTH 5 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_LBN 10 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_WIDTH 5 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_LBN 5 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_WIDTH 5 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_LBN 0 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_WIDTH 5 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_COMPLETE 0x0 /* enum */ -#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_FAIL 0x1 /* enum */ -#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_PASS 0x2 /* enum */ -#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_FAIL 0x3 /* enum */ -#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_SUCCESS 0x4 /* enum */ - -/* MC_CMD_FC_OUT_DDR_TEST msgresponse */ -#define MC_CMD_FC_OUT_DDR_TEST_LEN 0 - -/* MC_CMD_FC_OUT_GET_ASSERT msgresponse */ -#define MC_CMD_FC_OUT_GET_ASSERT_LEN 144 -/* Assertion status flag. */ -#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_OFST 0 -#define MC_CMD_FC_OUT_GET_ASSERT_STATE_LBN 8 -#define MC_CMD_FC_OUT_GET_ASSERT_STATE_WIDTH 8 -/* enum: No crash data available */ -#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0 -/* enum: New crash data available */ -#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1 -/* enum: Crash data has been sent */ -#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2 -#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_LBN 0 -#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_WIDTH 8 -/* enum: No crash has been recorded. */ -#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0 -/* enum: Crash due to exception. */ -#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1 -/* enum: Crash due to assertion. */ -#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2 -/* Failing PC value */ -#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_OFST 4 -/* Saved GP regs */ -#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_OFST 8 -#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_LEN 4 -#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_NUM 31 -/* Exception Type */ -#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_OFST 132 -/* Instruction at which exception occurred */ -#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_OFST 136 -/* BAD Address that triggered address-based exception */ -#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_OFST 140 - -/* MC_CMD_FC_OUT_FPGA_BUILD msgresponse */ -#define MC_CMD_FC_OUT_FPGA_BUILD_LEN 32 -#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_OFST 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_LBN 31 -#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_LBN 30 -#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_LBN 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_WIDTH 14 -#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_LBN 12 -#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_WIDTH 4 -#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_LBN 4 -#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_WIDTH 8 -#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_WIDTH 4 -/* Build timestamp (seconds since epoch) */ -#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_OFST 4 -#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_OFST 8 -#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_WIDTH 8 -#define MC_CMD_FC_FPGA_TYPE_A7 0xa7 /* enum */ -#define MC_CMD_FC_FPGA_TYPE_A5 0xa5 /* enum */ -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_LBN 8 -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_WIDTH 10 -#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_LBN 18 -#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_LBN 19 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_LBN 20 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_LBN 21 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_LBN 22 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_LBN 23 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_LBN 24 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_LBN 25 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_LBN 26 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_LBN 27 -#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_LBN 28 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_LBN 29 -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_WIDTH 2 -#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_LBN 31 -#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_OFST 12 -#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_LBN 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_WIDTH 1 -#define MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 /* enum */ -#define MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 /* enum */ -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_LBN 17 -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_WIDTH 15 -#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_OFST 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_LBN 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_OFST 20 -#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_LBN 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_OFST 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LEN 8 -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LO_OFST 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_HI_OFST 20 -#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_OFST 24 -#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_OFST 28 -#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_WIDTH 16 - -/* MC_CMD_FC_OUT_FPGA_BUILD_V2 msgresponse */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_LEN 32 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_OFST 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_LBN 31 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_LBN 30 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_LBN 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_WIDTH 14 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_LBN 12 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_WIDTH 4 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_LBN 4 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_WIDTH 8 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_WIDTH 4 -/* Build timestamp (seconds since epoch) */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_OFST 4 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_OFST 8 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_LBN 31 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_LBN 29 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_LBN 28 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_LBN 27 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_LBN 26 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_LBN 25 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_LBN 24 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_LBN 23 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_LBN 22 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_LBN 21 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_LBN 20 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_LBN 19 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_LBN 18 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_10G 0x0 /* enum */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_40G 0x1 /* enum */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_LBN 17 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_10G 0x0 /* enum */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_40G 0x1 /* enum */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_LBN 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_10G 0x0 /* enum */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_40G 0x1 /* enum */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_LBN 15 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_LBN 14 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_LBN 13 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_LBN 12 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_LBN 11 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_LBN 10 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_LBN 9 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_LBN 8 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_LBN 7 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_LBN 6 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_LBN 5 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_LBN 4 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_WIDTH 4 -#define MC_CMD_FC_FPGA_V2_TYPE_A3 0x0 /* enum */ -#define MC_CMD_FC_FPGA_V2_TYPE_A4 0x1 /* enum */ -#define MC_CMD_FC_FPGA_V2_TYPE_A5 0x2 /* enum */ -#define MC_CMD_FC_FPGA_V2_TYPE_A7 0x3 /* enum */ -#define MC_CMD_FC_FPGA_V2_TYPE_D3 0x8 /* enum */ -#define MC_CMD_FC_FPGA_V2_TYPE_D4 0x9 /* enum */ -#define MC_CMD_FC_FPGA_V2_TYPE_D5 0xa /* enum */ -#define MC_CMD_FC_FPGA_V2_TYPE_D7 0xb /* enum */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_OFST 12 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_LBN 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_WIDTH 1 -/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */ -/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_OFST 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_LBN 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_OFST 20 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_LBN 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_OFST 24 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_OFST 28 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_WIDTH 16 - -/* MC_CMD_FC_OUT_FPGA_SERVICES msgresponse */ -#define MC_CMD_FC_OUT_FPGA_SERVICES_LEN 32 -#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_OFST 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_LBN 31 -#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_LBN 30 -#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_LBN 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_WIDTH 14 -#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_LBN 12 -#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_WIDTH 4 -#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_LBN 4 -#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_WIDTH 8 -#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_LBN 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_WIDTH 4 -/* Build timestamp (seconds since epoch) */ -#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_OFST 4 -#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_OFST 8 -#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_LBN 8 -#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_LBN 27 -#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_LBN 28 -#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_LBN 29 -#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_LBN 30 -#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_LBN 31 -#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_OFST 12 -#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_LBN 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_LBN 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_OFST 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_LBN 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_LBN 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_OFST 20 -#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_LBN 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_LBN 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_OFST 24 -#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_OFST 28 -#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_LBN 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_WIDTH 16 - -/* MC_CMD_FC_OUT_FPGA_SERVICES_V2 msgresponse */ -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_LEN 32 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_OFST 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_LBN 31 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_LBN 30 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_LBN 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_WIDTH 14 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_LBN 12 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_WIDTH 4 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_LBN 4 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_WIDTH 8 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_LBN 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_WIDTH 4 -/* Build timestamp (seconds since epoch) */ -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_OFST 4 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_OFST 8 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_LBN 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_LBN 8 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_OFST 12 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_LBN 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_LBN 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_WIDTH 1 -/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */ -/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */ -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_OFST 24 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_OFST 28 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_LBN 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_WIDTH 16 - -/* MC_CMD_FC_OUT_BSP_VERSION msgresponse */ -#define MC_CMD_FC_OUT_BSP_VERSION_LEN 4 -/* Qsys system ID */ -#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_OFST 0 -#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_LBN 12 -#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_WIDTH 4 -#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_LBN 4 -#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_WIDTH 8 -#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_LBN 0 -#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_WIDTH 4 - -/* MC_CMD_FC_OUT_READ_MAP_COUNT msgresponse */ -#define MC_CMD_FC_OUT_READ_MAP_COUNT_LEN 4 -/* Number of maps */ -#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_OFST 0 - -/* MC_CMD_FC_OUT_READ_MAP_INDEX msgresponse */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN 164 -/* Index of the map */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_OFST 0 -/* Options for the map */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_OFST 4 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_8 0x0 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_16 0x1 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_32 0x2 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_64 0x3 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_MASK 0x3 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_FC 0x4 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_MEM 0x8 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_READ 0x10 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_WRITE 0x20 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_FREE 0x0 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_LICENSED 0x40 /* enum */ -/* Address of start of map */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_OFST 8 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LEN 8 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LO_OFST 8 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_HI_OFST 12 -/* Length of address map */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_OFST 16 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LEN 8 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LO_OFST 16 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_HI_OFST 20 -/* Component information field */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_OFST 24 -/* License expiry data for map */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_OFST 28 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LEN 8 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LO_OFST 28 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_HI_OFST 32 -/* Name of the component */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_OFST 36 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_LEN 1 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_NUM 128 - -/* MC_CMD_FC_OUT_READ_MAP msgresponse */ -#define MC_CMD_FC_OUT_READ_MAP_LEN 0 - -/* MC_CMD_FC_OUT_CAPABILITIES msgresponse */ -#define MC_CMD_FC_OUT_CAPABILITIES_LEN 8 -/* Number of internal ports */ -#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_OFST 0 -/* Number of external ports */ -#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_OFST 4 - -/* MC_CMD_FC_OUT_GLOBAL_FLAGS msgresponse */ -#define MC_CMD_FC_OUT_GLOBAL_FLAGS_LEN 4 -#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_OFST 0 - -/* MC_CMD_FC_OUT_IO_REL msgresponse */ -#define MC_CMD_FC_OUT_IO_REL_LEN 0 - -/* MC_CMD_FC_OUT_IO_REL_GET_ADDR msgresponse */ -#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_LEN 8 -#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_OFST 0 -#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_OFST 4 - -/* MC_CMD_FC_OUT_IO_REL_READ32 msgresponse */ -#define MC_CMD_FC_OUT_IO_REL_READ32_LENMIN 4 -#define MC_CMD_FC_OUT_IO_REL_READ32_LENMAX 252 -#define MC_CMD_FC_OUT_IO_REL_READ32_LEN(num) (0+4*(num)) -#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_OFST 0 -#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_LEN 4 -#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MINNUM 1 -#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MAXNUM 63 - -/* MC_CMD_FC_OUT_IO_REL_WRITE32 msgresponse */ -#define MC_CMD_FC_OUT_IO_REL_WRITE32_LEN 0 - -/* MC_CMD_FC_OUT_UHLINK_PHY msgresponse */ -#define MC_CMD_FC_OUT_UHLINK_PHY_LEN 48 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_OFST 0 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_LBN 0 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_WIDTH 16 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_LBN 16 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_WIDTH 16 -/* Transceiver Transmit settings */ -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_OFST 4 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_LBN 0 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_WIDTH 16 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_LBN 16 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_WIDTH 16 -/* Transceiver Receive settings */ -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_OFST 8 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_LBN 0 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_WIDTH 16 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_LBN 16 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_WIDTH 16 -/* Rx eye opening */ -#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_OFST 12 -#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_LBN 0 -#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_WIDTH 16 -#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_LBN 16 -#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_WIDTH 16 -/* PCS status word */ -#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_OFST 16 -/* Link status word */ -#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_OFST 20 -#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_LBN 0 -#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WIDTH 1 -#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_LBN 1 -#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_WIDTH 1 -/* Current SFp parameters applied */ -#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_OFST 24 -#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_LEN 20 -/* Link speed is 100, 1000, 10000 */ -#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_OFST 24 -/* Length of copper cable - zero when not relevant */ -#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_OFST 28 -/* True if a dual speed SFP+ module */ -#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_OFST 32 -/* True if an SFP Module is present (other fields valid when true) */ -#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_OFST 36 -/* The type of the SFP+ Module */ -#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_OFST 40 -/* PHY config flags */ -#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_OFST 44 -#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_LBN 0 -#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_WIDTH 1 -#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_LBN 1 -#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_WIDTH 1 -#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_LBN 2 -#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_WIDTH 1 - -/* MC_CMD_FC_OUT_UHLINK_MAC msgresponse */ -#define MC_CMD_FC_OUT_UHLINK_MAC_LEN 20 -/* MAC configuration applied */ -#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_OFST 0 -/* MTU size */ -#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_OFST 4 -/* IF Mode status */ -#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_OFST 8 -/* MAC address configured */ -#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_OFST 12 -#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LEN 8 -#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LO_OFST 12 -#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_HI_OFST 16 - -/* MC_CMD_FC_OUT_UHLINK_RX_EYE msgresponse */ -#define MC_CMD_FC_OUT_UHLINK_RX_EYE_LEN ((((0-1+(32*MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK))+1))>>3) -/* Rx Eye measurements */ -#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_OFST 0 -#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_LEN 4 -#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_NUM MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK - -/* MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT msgresponse */ -#define MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT_LEN 0 - -/* MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT msgresponse */ -#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_LEN ((((32-1+(64*MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK))+1))>>3) -/* Has the eye plot dump completed and data returned is valid? */ -#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_OFST 0 -/* Rx Eye binary plot */ -#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_OFST 4 -#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LEN 8 -#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LO_OFST 4 -#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_HI_OFST 8 -#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_NUM MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK - -/* MC_CMD_FC_OUT_UHLINK_RX_TUNE msgresponse */ -#define MC_CMD_FC_OUT_UHLINK_RX_TUNE_LEN 0 - -/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET msgresponse */ -#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET_LEN 0 - -/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET msgresponse */ -#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_LEN 4 -#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_OFST 0 - -/* MC_CMD_FC_OUT_UHLINK msgresponse */ -#define MC_CMD_FC_OUT_UHLINK_LEN 0 - -/* MC_CMD_FC_OUT_SET_LINK msgresponse */ -#define MC_CMD_FC_OUT_SET_LINK_LEN 0 - -/* MC_CMD_FC_OUT_LICENSE msgresponse */ -#define MC_CMD_FC_OUT_LICENSE_LEN 12 -/* Count of valid keys */ -#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_OFST 0 -/* Count of invalid keys */ -#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_OFST 4 -/* Count of blacklisted keys */ -#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_OFST 8 - -/* MC_CMD_FC_OUT_STARTUP msgresponse */ -#define MC_CMD_FC_OUT_STARTUP_LEN 4 -/* Capabilities of the FPGA/FC */ -#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_OFST 0 -#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_LBN 0 -#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_WIDTH 1 - -/* MC_CMD_FC_OUT_DMA_READ msgresponse */ -#define MC_CMD_FC_OUT_DMA_READ_LENMIN 1 -#define MC_CMD_FC_OUT_DMA_READ_LENMAX 252 -#define MC_CMD_FC_OUT_DMA_READ_LEN(num) (0+1*(num)) -/* The data read */ -#define MC_CMD_FC_OUT_DMA_READ_DATA_OFST 0 -#define MC_CMD_FC_OUT_DMA_READ_DATA_LEN 1 -#define MC_CMD_FC_OUT_DMA_READ_DATA_MINNUM 1 -#define MC_CMD_FC_OUT_DMA_READ_DATA_MAXNUM 252 - -/* MC_CMD_FC_OUT_TIMED_READ_SET msgresponse */ -#define MC_CMD_FC_OUT_TIMED_READ_SET_LEN 4 -/* Timer handle */ -#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_OFST 0 - -/* MC_CMD_FC_OUT_TIMED_READ_GET msgresponse */ -#define MC_CMD_FC_OUT_TIMED_READ_GET_LEN 52 -/* Host supplied handle (unique) */ -#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_OFST 0 -/* Address into which to transfer data in host */ -#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_OFST 4 -#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LEN 8 -#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LO_OFST 4 -#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_HI_OFST 8 -/* AOE address from which to transfer data */ -#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_OFST 12 -#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LEN 8 -#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LO_OFST 12 -#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_HI_OFST 16 -/* Length of AOE transfer (total) */ -#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_OFST 20 -/* Length of host transfer (total) */ -#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_OFST 24 -/* See FLAGS entry for MC_CMD_FC_IN_TIMED_READ_SET */ -#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_OFST 28 -#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_OFST 32 -/* When active, start read time */ -#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_OFST 36 -#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LEN 8 -#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LO_OFST 36 -#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_HI_OFST 40 -/* When active, end read time */ -#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_OFST 44 -#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LEN 8 -#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LO_OFST 44 -#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_HI_OFST 48 - -/* MC_CMD_FC_OUT_LOG_ADDR_RANGE msgresponse */ -#define MC_CMD_FC_OUT_LOG_ADDR_RANGE_LEN 0 - -/* MC_CMD_FC_OUT_LOG msgresponse */ -#define MC_CMD_FC_OUT_LOG_LEN 0 - -/* MC_CMD_FC_OUT_CLOCK_GET_TIME msgresponse */ -#define MC_CMD_FC_OUT_CLOCK_GET_TIME_LEN 24 -#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_OFST 0 -#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_OFST 4 -#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LEN 8 -#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LO_OFST 4 -#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_HI_OFST 8 -#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_OFST 12 -#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_OFST 16 -#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_OFST 20 - -/* MC_CMD_FC_OUT_CLOCK_SET_TIME msgresponse */ -#define MC_CMD_FC_OUT_CLOCK_SET_TIME_LEN 0 - -/* MC_CMD_FC_OUT_DDR_SET_SPD msgresponse */ -#define MC_CMD_FC_OUT_DDR_SET_SPD_LEN 0 - -/* MC_CMD_FC_OUT_DDR_SET_INFO msgresponse */ -#define MC_CMD_FC_OUT_DDR_SET_INFO_LEN 0 - -/* MC_CMD_FC_OUT_DDR_GET_STATUS msgresponse */ -#define MC_CMD_FC_OUT_DDR_GET_STATUS_LEN 4 -#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_OFST 0 -#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_LBN 0 -#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_WIDTH 1 -#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_LBN 1 -#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_WIDTH 1 - -/* MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT msgresponse */ -#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_LEN 8 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_OFST 0 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_OFST 4 - -/* MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT msgresponse */ -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMIN 8 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMAX 248 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LEN(num) (0+8*(num)) -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_OFST 0 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_OFST 4 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_OFST 0 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LEN 8 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LO_OFST 0 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_HI_OFST 4 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MINNUM 0 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MAXNUM 31 - -/* MC_CMD_FC_OUT_SPI_READ msgresponse */ -#define MC_CMD_FC_OUT_SPI_READ_LENMIN 4 -#define MC_CMD_FC_OUT_SPI_READ_LENMAX 252 -#define MC_CMD_FC_OUT_SPI_READ_LEN(num) (0+4*(num)) -#define MC_CMD_FC_OUT_SPI_READ_BUFFER_OFST 0 -#define MC_CMD_FC_OUT_SPI_READ_BUFFER_LEN 4 -#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MINNUM 1 -#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MAXNUM 63 - -/* MC_CMD_FC_OUT_SPI_WRITE msgresponse */ -#define MC_CMD_FC_OUT_SPI_WRITE_LEN 0 - -/* MC_CMD_FC_OUT_SPI_ERASE msgresponse */ -#define MC_CMD_FC_OUT_SPI_ERASE_LEN 0 - -/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG msgresponse */ -#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_LEN 8 -/* The 32-bit value read from the toggle count register */ -#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_OFST 0 -/* The 32-bit value read from the clock enable count register */ -#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_OFST 4 - -/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG msgresponse */ -#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 0 - -/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_START msgresponse */ -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_START_LEN 0 - -/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT msgresponse */ -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_LEN 8 -/* DDR soak test status word; bits [4:0] are relevant. */ -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_OFST 0 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_LBN 0 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_WIDTH 1 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_LBN 1 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_WIDTH 1 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_LBN 2 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_WIDTH 1 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_LBN 3 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_WIDTH 1 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_LBN 4 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_WIDTH 1 -/* DDR soak test error count */ -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_OFST 4 - -/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP msgresponse */ -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP_LEN 0 - -/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR msgresponse */ -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR_LEN 0 - -/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE msgresponse */ -#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE_LEN 0 - -/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG msgresponse */ -#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 0 - - -/***********************************/ -/* MC_CMD_AOE - * AOE operations on MC - */ -#define MC_CMD_AOE 0xa - -/* MC_CMD_AOE_IN msgrequest */ -#define MC_CMD_AOE_IN_LEN 4 -#define MC_CMD_AOE_IN_OP_HDR_OFST 0 -#define MC_CMD_AOE_IN_OP_LBN 0 -#define MC_CMD_AOE_IN_OP_WIDTH 8 -/* enum: FPGA and CPLD information */ -#define MC_CMD_AOE_OP_INFO 0x1 -/* enum: Currents and voltages read from MCP3424s; DEBUG */ -#define MC_CMD_AOE_OP_CURRENTS 0x2 -/* enum: Temperatures at locations around the PCB; DEBUG */ -#define MC_CMD_AOE_OP_TEMPERATURES 0x3 -/* enum: Set CPLD to idle */ -#define MC_CMD_AOE_OP_CPLD_IDLE 0x4 -/* enum: Read from CPLD register */ -#define MC_CMD_AOE_OP_CPLD_READ 0x5 -/* enum: Write to CPLD register */ -#define MC_CMD_AOE_OP_CPLD_WRITE 0x6 -/* enum: Execute CPLD instruction */ -#define MC_CMD_AOE_OP_CPLD_INSTRUCTION 0x7 -/* enum: Reprogram the CPLD on the AOE device */ -#define MC_CMD_AOE_OP_CPLD_REPROGRAM 0x8 -/* enum: AOE power control */ -#define MC_CMD_AOE_OP_POWER 0x9 -/* enum: AOE image loading */ -#define MC_CMD_AOE_OP_LOAD 0xa -/* enum: Fan monitoring */ -#define MC_CMD_AOE_OP_FAN_CONTROL 0xb -/* enum: Fan failures since last reset */ -#define MC_CMD_AOE_OP_FAN_FAILURES 0xc -/* enum: Get generic AOE MAC statistics */ -#define MC_CMD_AOE_OP_MAC_STATS 0xd -/* enum: Retrieve PHY specific information */ -#define MC_CMD_AOE_OP_GET_PHY_MEDIA_INFO 0xe -/* enum: Write a number of JTAG primitive commands, return will give data */ -#define MC_CMD_AOE_OP_JTAG_WRITE 0xf -/* enum: Control access to the FPGA via the Siena JTAG Chain */ -#define MC_CMD_AOE_OP_FPGA_ACCESS 0x10 -/* enum: Set the MTU offset between Siena and AOE MACs */ -#define MC_CMD_AOE_OP_SET_MTU_OFFSET 0x11 -/* enum: How link state is handled */ -#define MC_CMD_AOE_OP_LINK_STATE 0x12 -/* enum: How Siena MAC statistics are reported (deprecated - use - * MC_CMD_AOE_OP_ASIC_STATS) - */ -#define MC_CMD_AOE_OP_SIENA_STATS 0x13 -/* enum: How native ASIC MAC statistics are reported - replaces the deprecated - * command MC_CMD_AOE_OP_SIENA_STATS - */ -#define MC_CMD_AOE_OP_ASIC_STATS 0x13 -/* enum: DDR memory information */ -#define MC_CMD_AOE_OP_DDR 0x14 -/* enum: FC control */ -#define MC_CMD_AOE_OP_FC 0x15 -/* enum: DDR ECC status reads */ -#define MC_CMD_AOE_OP_DDR_ECC_STATUS 0x16 -/* enum: Commands for MC-SPI Master emulation */ -#define MC_CMD_AOE_OP_MC_SPI_MASTER 0x17 -/* enum: Commands for FC boot control */ -#define MC_CMD_AOE_OP_FC_BOOT 0x18 - -/* MC_CMD_AOE_OUT msgresponse */ -#define MC_CMD_AOE_OUT_LEN 0 - -/* MC_CMD_AOE_IN_INFO msgrequest */ -#define MC_CMD_AOE_IN_INFO_LEN 4 -#define MC_CMD_AOE_IN_CMD_OFST 0 - -/* MC_CMD_AOE_IN_CURRENTS msgrequest */ -#define MC_CMD_AOE_IN_CURRENTS_LEN 4 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ - -/* MC_CMD_AOE_IN_TEMPERATURES msgrequest */ -#define MC_CMD_AOE_IN_TEMPERATURES_LEN 4 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ - -/* MC_CMD_AOE_IN_CPLD_IDLE msgrequest */ -#define MC_CMD_AOE_IN_CPLD_IDLE_LEN 4 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ - -/* MC_CMD_AOE_IN_CPLD_READ msgrequest */ -#define MC_CMD_AOE_IN_CPLD_READ_LEN 12 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_OFST 4 -#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_OFST 8 - -/* MC_CMD_AOE_IN_CPLD_WRITE msgrequest */ -#define MC_CMD_AOE_IN_CPLD_WRITE_LEN 16 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_OFST 4 -#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_OFST 8 -#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_OFST 12 - -/* MC_CMD_AOE_IN_CPLD_INSTRUCTION msgrequest */ -#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_OFST 4 - -/* MC_CMD_AOE_IN_CPLD_REPROGRAM msgrequest */ -#define MC_CMD_AOE_IN_CPLD_REPROGRAM_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_OFST 4 -/* enum: Reprogram CPLD, poll for completion */ -#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM 0x1 -/* enum: Reprogram CPLD, send event on completion */ -#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM_EVENT 0x3 -/* enum: Get status of reprogramming operation */ -#define MC_CMD_AOE_IN_CPLD_REPROGRAM_STATUS 0x4 - -/* MC_CMD_AOE_IN_POWER msgrequest */ -#define MC_CMD_AOE_IN_POWER_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* Turn on or off AOE power */ -#define MC_CMD_AOE_IN_POWER_OP_OFST 4 -/* enum: Turn off FPGA power */ -#define MC_CMD_AOE_IN_POWER_OFF 0x0 -/* enum: Turn on FPGA power */ -#define MC_CMD_AOE_IN_POWER_ON 0x1 -/* enum: Clear peak power measurement */ -#define MC_CMD_AOE_IN_POWER_CLEAR 0x2 -/* enum: Show current power in sensors output */ -#define MC_CMD_AOE_IN_POWER_SHOW_CURRENT 0x3 -/* enum: Show peak power in sensors output */ -#define MC_CMD_AOE_IN_POWER_SHOW_PEAK 0x4 -/* enum: Show current DDR current */ -#define MC_CMD_AOE_IN_POWER_DDR_LAST 0x5 -/* enum: Show peak DDR current */ -#define MC_CMD_AOE_IN_POWER_DDR_PEAK 0x6 -/* enum: Clear peak DDR current */ -#define MC_CMD_AOE_IN_POWER_DDR_CLEAR 0x7 - -/* MC_CMD_AOE_IN_LOAD msgrequest */ -#define MC_CMD_AOE_IN_LOAD_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* Image to be loaded (0 - main or 1 - diagnostic) to load in normal sequence - */ -#define MC_CMD_AOE_IN_LOAD_IMAGE_OFST 4 - -/* MC_CMD_AOE_IN_FAN_CONTROL msgrequest */ -#define MC_CMD_AOE_IN_FAN_CONTROL_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* If non zero report measured fan RPM rather than nominal */ -#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_OFST 4 - -/* MC_CMD_AOE_IN_FAN_FAILURES msgrequest */ -#define MC_CMD_AOE_IN_FAN_FAILURES_LEN 4 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ - -/* MC_CMD_AOE_IN_MAC_STATS msgrequest */ -#define MC_CMD_AOE_IN_MAC_STATS_LEN 24 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* AOE port */ -#define MC_CMD_AOE_IN_MAC_STATS_PORT_OFST 4 -/* Host memory address for statistics */ -#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_OFST 8 -#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LEN 8 -#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LO_OFST 8 -#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_HI_OFST 12 -#define MC_CMD_AOE_IN_MAC_STATS_CMD_OFST 16 -#define MC_CMD_AOE_IN_MAC_STATS_DMA_LBN 0 -#define MC_CMD_AOE_IN_MAC_STATS_DMA_WIDTH 1 -#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_LBN 1 -#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_WIDTH 1 -#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_LBN 2 -#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_WIDTH 1 -#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_LBN 3 -#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_WIDTH 1 -#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_LBN 4 -#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_WIDTH 1 -#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_LBN 5 -#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_WIDTH 1 -#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_LBN 16 -#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_WIDTH 16 -/* Length of DMA data (optional) */ -#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_OFST 20 - -/* MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO msgrequest */ -#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_LEN 12 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* AOE port */ -#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_OFST 4 -#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_OFST 8 - -/* MC_CMD_AOE_IN_JTAG_WRITE msgrequest */ -#define MC_CMD_AOE_IN_JTAG_WRITE_LENMIN 12 -#define MC_CMD_AOE_IN_JTAG_WRITE_LENMAX 252 -#define MC_CMD_AOE_IN_JTAG_WRITE_LEN(num) (8+4*(num)) -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_OFST 4 -#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_OFST 8 -#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_LEN 4 -#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MINNUM 1 -#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MAXNUM 61 - -/* MC_CMD_AOE_IN_FPGA_ACCESS msgrequest */ -#define MC_CMD_AOE_IN_FPGA_ACCESS_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* Enable or disable access */ -#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_OFST 4 -/* enum: Enable access */ -#define MC_CMD_AOE_IN_FPGA_ACCESS_ENABLE 0x1 -/* enum: Disable access */ -#define MC_CMD_AOE_IN_FPGA_ACCESS_DISABLE 0x2 - -/* MC_CMD_AOE_IN_SET_MTU_OFFSET msgrequest */ -#define MC_CMD_AOE_IN_SET_MTU_OFFSET_LEN 12 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* AOE port - when not ALL_EXTERNAL or ALL_INTERNAL specifies port number */ -#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_OFST 4 -/* enum: Apply to all external ports */ -#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_EXTERNAL 0x8000 -/* enum: Apply to all internal ports */ -#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_INTERNAL 0x4000 -/* The MTU offset to be applied to the external ports */ -#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_OFST 8 - -/* MC_CMD_AOE_IN_LINK_STATE msgrequest */ -#define MC_CMD_AOE_IN_LINK_STATE_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_LINK_STATE_MODE_OFST 4 -#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_LBN 0 -#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_WIDTH 8 -/* enum: AOE and associated external port */ -#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_SEPARATE 0x0 -/* enum: AOE and OR of all external ports */ -#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_COMBINED 0x1 -/* enum: Individual ports */ -#define MC_CMD_AOE_IN_LINK_STATE_DIAGNOSTIC 0x2 -/* enum: Configure link state mode on given AOE port */ -#define MC_CMD_AOE_IN_LINK_STATE_CUSTOM 0x3 -#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_LBN 8 -#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_WIDTH 8 -/* enum: No-op */ -#define MC_CMD_AOE_IN_LINK_STATE_OP_NONE 0x0 -/* enum: logical OR of all SFP ports link status */ -#define MC_CMD_AOE_IN_LINK_STATE_OP_OR 0x1 -/* enum: logical AND of all SFP ports link status */ -#define MC_CMD_AOE_IN_LINK_STATE_OP_AND 0x2 -#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_LBN 16 -#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_WIDTH 16 - -/* MC_CMD_AOE_IN_SIENA_STATS msgrequest */ -#define MC_CMD_AOE_IN_SIENA_STATS_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* How MAC statistics are reported */ -#define MC_CMD_AOE_IN_SIENA_STATS_MODE_OFST 4 -/* enum: Statistics from Siena (default) */ -#define MC_CMD_AOE_IN_SIENA_STATS_STATS_SIENA 0x0 -/* enum: Statistics from AOE external ports */ -#define MC_CMD_AOE_IN_SIENA_STATS_STATS_AOE 0x1 - -/* MC_CMD_AOE_IN_ASIC_STATS msgrequest */ -#define MC_CMD_AOE_IN_ASIC_STATS_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* How MAC statistics are reported */ -#define MC_CMD_AOE_IN_ASIC_STATS_MODE_OFST 4 -/* enum: Statistics from the ASIC (default) */ -#define MC_CMD_AOE_IN_ASIC_STATS_STATS_ASIC 0x0 -/* enum: Statistics from AOE external ports */ -#define MC_CMD_AOE_IN_ASIC_STATS_STATS_AOE 0x1 - -/* MC_CMD_AOE_IN_DDR msgrequest */ -#define MC_CMD_AOE_IN_DDR_LEN 12 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_DDR_BANK_OFST 4 -/* Enum values, see field(s): */ -/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */ -/* Page index of SPD data */ -#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_OFST 8 - -/* MC_CMD_AOE_IN_FC msgrequest */ -#define MC_CMD_AOE_IN_FC_LEN 4 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ - -/* MC_CMD_AOE_IN_DDR_ECC_STATUS msgrequest */ -#define MC_CMD_AOE_IN_DDR_ECC_STATUS_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_OFST 4 -/* Enum values, see field(s): */ -/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */ - -/* MC_CMD_AOE_IN_MC_SPI_MASTER msgrequest */ -#define MC_CMD_AOE_IN_MC_SPI_MASTER_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* Basic commands for MC SPI Master emulation. */ -#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_OFST 4 -/* enum: MC SPI read */ -#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ 0x0 -/* enum: MC SPI write */ -#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE 0x1 - -/* MC_CMD_AOE_IN_MC_SPI_MASTER_READ msgrequest */ -#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_LEN 12 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_OFST 4 -#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_OFST 8 - -/* MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE msgrequest */ -#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_LEN 16 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_OFST 4 -#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_OFST 8 -#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_OFST 12 - -/* MC_CMD_AOE_IN_FC_BOOT msgrequest */ -#define MC_CMD_AOE_IN_FC_BOOT_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* FC boot control flags */ -#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_OFST 4 -#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_LBN 0 -#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_WIDTH 1 - -/* MC_CMD_AOE_OUT_INFO msgresponse */ -#define MC_CMD_AOE_OUT_INFO_LEN 44 -/* JTAG IDCODE of CPLD */ -#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_OFST 0 -/* Version of CPLD */ -#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_OFST 4 -/* JTAG IDCODE of FPGA */ -#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_OFST 8 -/* JTAG USERCODE of FPGA */ -#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_OFST 12 -/* FPGA type - read from CPLD straps */ -#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_OFST 16 -#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A5_C2 0x1 /* enum */ -#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A7_C2 0x2 /* enum */ -/* FPGA state (debug) */ -#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_OFST 20 -/* FPGA image - partition from which loaded */ -#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_OFST 24 -/* FC state */ -#define MC_CMD_AOE_OUT_INFO_FC_STATE_OFST 28 -/* enum: Set if watchdog working */ -#define MC_CMD_AOE_OUT_INFO_WATCHDOG 0x1 -/* enum: Set if MC-FC communications working */ -#define MC_CMD_AOE_OUT_INFO_COMMS 0x2 -/* Random pieces of information */ -#define MC_CMD_AOE_OUT_INFO_FLAGS_OFST 32 -/* enum: Power to FPGA supplied by PEG connector, not PCIe bus */ -#define MC_CMD_AOE_OUT_INFO_PEG_POWER 0x1 -/* enum: CPLD apparently good */ -#define MC_CMD_AOE_OUT_INFO_CPLD_GOOD 0x2 -/* enum: FPGA working normally */ -#define MC_CMD_AOE_OUT_INFO_FPGA_GOOD 0x4 -/* enum: FPGA is powered */ -#define MC_CMD_AOE_OUT_INFO_FPGA_POWER 0x8 -/* enum: Board has incompatible SODIMMs fitted */ -#define MC_CMD_AOE_OUT_INFO_BAD_SODIMM 0x10 -/* enum: Board has ByteBlaster connected */ -#define MC_CMD_AOE_OUT_INFO_HAS_BYTEBLASTER 0x20 -/* enum: FPGA Boot flash has an invalid header. */ -#define MC_CMD_AOE_OUT_INFO_FPGA_BAD_BOOT_HDR 0x40 -/* enum: FPGA Application flash is accessible. */ -#define MC_CMD_AOE_OUT_INFO_FPGA_APP_FLASH_GOOD 0x80 -/* Revision of Modena and Sorrento boards. Sorrento can be R1_2 or R1_3. */ -#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_OFST 36 -#define MC_CMD_AOE_OUT_INFO_UNKNOWN 0x0 /* enum */ -#define MC_CMD_AOE_OUT_INFO_R1_0 0x10 /* enum */ -#define MC_CMD_AOE_OUT_INFO_R1_1 0x11 /* enum */ -#define MC_CMD_AOE_OUT_INFO_R1_2 0x12 /* enum */ -#define MC_CMD_AOE_OUT_INFO_R1_3 0x13 /* enum */ -/* Result of FC booting - not valid while a ByteBlaster is connected. */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_OFST 40 -/* enum: No error */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_NO_ERROR 0x0 -/* enum: Bad address set in CPLD */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_ADDRESS 0x1 -/* enum: Bad header */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_MAGIC 0x2 -/* enum: Bad text section details */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_TEXT 0x3 -/* enum: Bad checksum */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_CHECKSUM 0x4 -/* enum: Bad BSP */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_BSP 0x5 -/* enum: Flash mode is invalid */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_INVALID_FLASH_MODE 0x6 -/* enum: FC application loaded and execution attempted */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_EXECUTE 0x80 -/* enum: FC application Started */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_STARTED 0x81 -/* enum: No bootrom in FPGA */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_NO_BOOTROM 0xff - -/* MC_CMD_AOE_OUT_CURRENTS msgresponse */ -#define MC_CMD_AOE_OUT_CURRENTS_LEN 68 -/* Set of currents and voltages (mA or mV as appropriate) */ -#define MC_CMD_AOE_OUT_CURRENTS_VALUES_OFST 0 -#define MC_CMD_AOE_OUT_CURRENTS_VALUES_LEN 4 -#define MC_CMD_AOE_OUT_CURRENTS_VALUES_NUM 17 -#define MC_CMD_AOE_OUT_CURRENTS_I_2V5 0x0 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_1V8 0x1 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_GXB 0x2 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_PGM 0x3 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_XCVR 0x4 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_1V5 0x5 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_V_3V3 0x6 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_V_1V5 0x7 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_IN 0x8 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_OUT 0x9 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_V_IN 0xa /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR1 0xb /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR1 0xc /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR2 0xd /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR2 0xe /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR3 0xf /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR3 0x10 /* enum */ - -/* MC_CMD_AOE_OUT_TEMPERATURES msgresponse */ -#define MC_CMD_AOE_OUT_TEMPERATURES_LEN 40 -/* Set of temperatures */ -#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_OFST 0 -#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_LEN 4 -#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_NUM 10 -/* enum: The first set of enum values are for Modena code. */ -#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_0 0x0 -#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_1 0x1 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_IND_0 0x2 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_IND_1 0x3 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO1 0x4 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO2 0x5 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO3 0x6 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_PSU 0x7 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_FPGA 0x8 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_SIENA 0x9 /* enum */ -/* enum: The second set of enum values are for Sorrento code. */ -#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_0 0x0 -#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_1 0x1 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_0 0x2 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_1 0x3 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_0 0x4 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_1 0x5 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_FPGA 0x6 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY0 0x7 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY1 0x8 /* enum */ - -/* MC_CMD_AOE_OUT_CPLD_READ msgresponse */ -#define MC_CMD_AOE_OUT_CPLD_READ_LEN 4 -/* The value read from the CPLD */ -#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_OFST 0 - -/* MC_CMD_AOE_OUT_FAN_FAILURES msgresponse */ -#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMIN 4 -#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMAX 252 -#define MC_CMD_AOE_OUT_FAN_FAILURES_LEN(num) (0+4*(num)) -/* Failure counts for each fan */ -#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_OFST 0 -#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_LEN 4 -#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MINNUM 1 -#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MAXNUM 63 - -/* MC_CMD_AOE_OUT_CPLD_REPROGRAM msgresponse */ -#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_LEN 4 -/* Results of status command (only) */ -#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_OFST 0 - -/* MC_CMD_AOE_OUT_POWER_OFF msgresponse */ -#define MC_CMD_AOE_OUT_POWER_OFF_LEN 0 - -/* MC_CMD_AOE_OUT_POWER_ON msgresponse */ -#define MC_CMD_AOE_OUT_POWER_ON_LEN 0 - -/* MC_CMD_AOE_OUT_LOAD msgresponse */ -#define MC_CMD_AOE_OUT_LOAD_LEN 0 - -/* MC_CMD_AOE_OUT_MAC_STATS_DMA msgresponse */ -#define MC_CMD_AOE_OUT_MAC_STATS_DMA_LEN 0 - -/* MC_CMD_AOE_OUT_MAC_STATS_NO_DMA msgresponse: See MC_CMD_MAC_STATS_OUT_NO_DMA - * for details - */ -#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3) -#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_OFST 0 -#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LEN 8 -#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LO_OFST 0 -#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_HI_OFST 4 -#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS - -/* MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO msgresponse */ -#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMIN 5 -#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMAX 252 -#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LEN(num) (4+1*(num)) -/* in bytes */ -#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_OFST 0 -#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_OFST 4 -#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_LEN 1 -#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MINNUM 1 -#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MAXNUM 248 - -/* MC_CMD_AOE_OUT_JTAG_WRITE msgresponse */ -#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMIN 12 -#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMAX 252 -#define MC_CMD_AOE_OUT_JTAG_WRITE_LEN(num) (8+4*(num)) -/* Used to align the in and out data blocks so the MC can re-use the cmd */ -#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_OFST 0 -/* out bytes */ -#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_OFST 4 -#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_OFST 8 -#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_LEN 4 -#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MINNUM 1 -#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MAXNUM 61 - -/* MC_CMD_AOE_OUT_FPGA_ACCESS msgresponse */ -#define MC_CMD_AOE_OUT_FPGA_ACCESS_LEN 0 - -/* MC_CMD_AOE_OUT_DDR msgresponse */ -#define MC_CMD_AOE_OUT_DDR_LENMIN 17 -#define MC_CMD_AOE_OUT_DDR_LENMAX 252 -#define MC_CMD_AOE_OUT_DDR_LEN(num) (16+1*(num)) -/* Information on the module. */ -#define MC_CMD_AOE_OUT_DDR_FLAGS_OFST 0 -#define MC_CMD_AOE_OUT_DDR_PRESENT_LBN 0 -#define MC_CMD_AOE_OUT_DDR_PRESENT_WIDTH 1 -#define MC_CMD_AOE_OUT_DDR_POWERED_LBN 1 -#define MC_CMD_AOE_OUT_DDR_POWERED_WIDTH 1 -#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_LBN 2 -#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_WIDTH 1 -#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_LBN 3 -#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_WIDTH 1 -/* Memory size, in MB. */ -#define MC_CMD_AOE_OUT_DDR_CAPACITY_OFST 4 -/* The memory type, as reported from SPD information */ -#define MC_CMD_AOE_OUT_DDR_TYPE_OFST 8 -/* Nominal voltage of the module (as applied) */ -#define MC_CMD_AOE_OUT_DDR_VOLTAGE_OFST 12 -/* SPD data read from the module */ -#define MC_CMD_AOE_OUT_DDR_SPD_OFST 16 -#define MC_CMD_AOE_OUT_DDR_SPD_LEN 1 -#define MC_CMD_AOE_OUT_DDR_SPD_MINNUM 1 -#define MC_CMD_AOE_OUT_DDR_SPD_MAXNUM 236 - -/* MC_CMD_AOE_OUT_SET_MTU_OFFSET msgresponse */ -#define MC_CMD_AOE_OUT_SET_MTU_OFFSET_LEN 0 - -/* MC_CMD_AOE_OUT_LINK_STATE msgresponse */ -#define MC_CMD_AOE_OUT_LINK_STATE_LEN 0 - -/* MC_CMD_AOE_OUT_SIENA_STATS msgresponse */ -#define MC_CMD_AOE_OUT_SIENA_STATS_LEN 0 - -/* MC_CMD_AOE_OUT_ASIC_STATS msgresponse */ -#define MC_CMD_AOE_OUT_ASIC_STATS_LEN 0 - -/* MC_CMD_AOE_OUT_FC msgresponse */ -#define MC_CMD_AOE_OUT_FC_LEN 0 - -/* MC_CMD_AOE_OUT_DDR_ECC_STATUS msgresponse */ -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_LEN 8 -/* Flags describing status info on the module. */ -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_OFST 0 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_LBN 0 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_WIDTH 1 -/* DDR ECC status on the module. */ -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_OFST 4 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_LBN 0 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_WIDTH 1 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_LBN 1 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_WIDTH 1 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_LBN 2 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_WIDTH 1 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_LBN 8 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_WIDTH 8 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_LBN 16 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_WIDTH 8 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_LBN 24 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_WIDTH 8 - -/* MC_CMD_AOE_OUT_MC_SPI_MASTER_READ msgresponse */ -#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_LEN 4 -#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_OFST 0 - -/* MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE msgresponse */ -#define MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE_LEN 0 - -/* MC_CMD_AOE_OUT_MC_SPI_MASTER msgresponse */ -#define MC_CMD_AOE_OUT_MC_SPI_MASTER_LEN 0 - -/* MC_CMD_AOE_OUT_FC_BOOT msgresponse */ -#define MC_CMD_AOE_OUT_FC_BOOT_LEN 0 - - /***********************************/ /* MC_CMD_PTP * Perform PTP operation @@ -3616,41 +1344,54 @@ #define MC_CMD_PTP_OP_ENABLE 0x1 /* enum: Disable PTP packet timestamping operation. */ #define MC_CMD_PTP_OP_DISABLE 0x2 -/* enum: Send a PTP packet. */ +/* enum: Send a PTP packet. This operation is used on Siena and Huntington. + * From Medford onwards it is not supported: on those platforms PTP transmit + * timestamping is done using the fast path. + */ #define MC_CMD_PTP_OP_TRANSMIT 0x3 /* enum: Read the current NIC time. */ #define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 -/* enum: Get the current PTP status. */ +/* enum: Get the current PTP status. Note that the clock frequency returned (in + * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666). + */ #define MC_CMD_PTP_OP_STATUS 0x5 /* enum: Adjust the PTP NIC's time. */ #define MC_CMD_PTP_OP_ADJUST 0x6 /* enum: Synchronize host and NIC time. */ #define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 -/* enum: Basic manufacturing tests. */ +/* enum: Basic manufacturing tests. Siena PTP adapters only. */ #define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 -/* enum: Packet based manufacturing tests. */ +/* enum: Packet based manufacturing tests. Siena PTP adapters only. */ #define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 /* enum: Reset some of the PTP related statistics */ #define MC_CMD_PTP_OP_RESET_STATS 0xa /* enum: Debug operations to MC. */ #define MC_CMD_PTP_OP_DEBUG 0xb -/* enum: Read an FPGA register */ +/* enum: Read an FPGA register. Siena PTP adapters only. */ #define MC_CMD_PTP_OP_FPGAREAD 0xc -/* enum: Write an FPGA register */ +/* enum: Write an FPGA register. Siena PTP adapters only. */ #define MC_CMD_PTP_OP_FPGAWRITE 0xd /* enum: Apply an offset to the NIC clock */ #define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe -/* enum: Change Apply an offset to the NIC clock */ +/* enum: Change the frequency correction applied to the NIC clock */ #define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf -/* enum: Set the MC packet filter VLAN tags for received PTP packets */ +/* enum: Set the MC packet filter VLAN tags for received PTP packets. + * Deprecated for Huntington onwards. + */ #define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10 -/* enum: Set the MC packet filter UUID for received PTP packets */ +/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for + * Huntington onwards. + */ #define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11 -/* enum: Set the MC packet filter Domain for received PTP packets */ +/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated + * for Huntington onwards. + */ #define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12 -/* enum: Set the clock source */ +/* enum: Set the clock source. Required for snapper tests on Huntington and + * Medford. Not implemented for Siena or Medford2. + */ #define MC_CMD_PTP_OP_SET_CLK_SRC 0x13 -/* enum: Reset value of Timer Reg. */ +/* enum: Reset value of Timer Reg. Not implemented. */ #define MC_CMD_PTP_OP_RST_CLK 0x14 /* enum: Enable the forwarding of PPS events to the host */ #define MC_CMD_PTP_OP_PPS_ENABLE 0x15 @@ -3671,7 +1412,7 @@ /* enum: Unsubscribe to stop receiving time events */ #define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19 /* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS - * input on the same NIC. + * input on the same NIC. Siena PTP adapters only. */ #define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a /* enum: Set the PTP sync status. Status is used by firmware to report to event @@ -3684,11 +1425,15 @@ /* MC_CMD_PTP_IN_ENABLE msgrequest */ #define MC_CMD_PTP_IN_ENABLE_LEN 16 #define MC_CMD_PTP_IN_CMD_OFST 0 +#define MC_CMD_PTP_IN_CMD_LEN 4 #define MC_CMD_PTP_IN_PERIPH_ID_OFST 4 -/* Event queue for PTP events */ +#define MC_CMD_PTP_IN_PERIPH_ID_LEN 4 +/* Not used. Events are always sent to function relative queue 0. */ #define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8 -/* PTP timestamping mode */ +#define MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4 +/* PTP timestamping mode. Not used from Huntington onwards. */ #define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12 +#define MC_CMD_PTP_IN_ENABLE_MODE_LEN 4 /* enum: PTP, version 1 */ #define MC_CMD_PTP_MODE_V1 0x0 /* enum: PTP, version 1, with VLAN headers - deprecated */ @@ -3705,16 +1450,21 @@ /* MC_CMD_PTP_IN_DISABLE msgrequest */ #define MC_CMD_PTP_IN_DISABLE_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_TRANSMIT msgrequest */ #define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13 #define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252 #define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num)) /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Transmit packet length */ #define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8 +#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4 /* Transmit packet data */ #define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12 #define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1 @@ -3724,17 +1474,30 @@ /* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */ #define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */ +#define MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_STATUS msgrequest */ #define MC_CMD_PTP_IN_STATUS_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_ADJUST msgrequest */ #define MC_CMD_PTP_IN_ADJUST_LEN 24 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Frequency adjustment 40 bit fixed point ns */ #define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8 #define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8 @@ -3742,21 +1505,67 @@ #define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12 /* enum: Number of fractional bits in frequency adjustment */ #define MC_CMD_PTP_IN_ADJUST_BITS 0x28 +/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ + * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES + * field. + */ +#define MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c /* Time adjustment in seconds */ #define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4 /* Time adjustment major value */ #define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4 /* Time adjustment in nanoseconds */ #define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4 /* Time adjustment minor value */ #define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4 + +/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */ +#define MC_CMD_PTP_IN_ADJUST_V2_LEN 28 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Frequency adjustment 40 bit fixed point ns */ +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12 +/* enum: Number of fractional bits in frequency adjustment */ +/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */ +/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ + * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES + * field. + */ +/* MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4 +/* Upper 32bits of major time offset adjustment */ +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24 +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4 /* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */ #define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Number of time readings to capture */ #define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8 +#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4 /* Host address in which to write "synchronization started" indication (64 * bits) */ @@ -3768,42 +1577,58 @@ /* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */ #define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */ #define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Enable or disable packet testing */ #define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4 -/* MC_CMD_PTP_IN_RESET_STATS msgrequest */ +/* MC_CMD_PTP_IN_RESET_STATS msgrequest: Reset PTP statistics */ #define MC_CMD_PTP_IN_RESET_STATS_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ -/* Reset PTP statistics */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_DEBUG msgrequest */ #define MC_CMD_PTP_IN_DEBUG_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Debug operations */ #define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8 +#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4 /* MC_CMD_PTP_IN_FPGAREAD msgrequest */ #define MC_CMD_PTP_IN_FPGAREAD_LEN 16 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ #define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8 +#define MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4 #define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12 +#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4 /* MC_CMD_PTP_IN_FPGAWRITE msgrequest */ #define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13 #define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252 #define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num)) /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ #define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8 +#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4 #define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12 #define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1 #define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1 @@ -3812,34 +1637,67 @@ /* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */ #define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Time adjustment in seconds */ #define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4 /* Time adjustment major value */ #define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4 /* Time adjustment in nanoseconds */ #define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4 /* Time adjustment minor value */ #define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4 + +/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4 +/* Upper 32bits of major time offset adjustment */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4 /* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */ #define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Frequency adjustment 40 bit fixed point ns */ #define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8 #define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8 #define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8 #define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12 -/* enum: Number of fractional bits in frequency adjustment */ -/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */ +/* Enum values, see field(s): */ +/* MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */ /* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */ #define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Number of VLAN tags, 0 if not VLAN */ #define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4 /* Set of VLAN tags to filter against */ #define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12 #define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4 @@ -3848,9 +1706,12 @@ /* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */ #define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* 1 to enable UUID filtering, 0 to disable */ #define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4 /* UUID to filter against */ #define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12 #define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8 @@ -3860,62 +1721,82 @@ /* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */ #define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* 1 to enable Domain filtering, 0 to disable */ #define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4 /* Domain number to filter against */ #define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4 /* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */ #define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Set the clock source. */ #define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8 +#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4 /* enum: Internal. */ #define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0 /* enum: External. */ #define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1 -/* MC_CMD_PTP_IN_RST_CLK msgrequest */ +/* MC_CMD_PTP_IN_RST_CLK msgrequest: Reset value of Timer Reg. */ #define MC_CMD_PTP_IN_RST_CLK_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ -/* Reset value of Timer Reg. */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */ #define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* Enable or disable */ #define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4 +#define MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4 /* enum: Enable */ #define MC_CMD_PTP_ENABLE_PPS 0x0 /* enum: Disable */ #define MC_CMD_PTP_DISABLE_PPS 0x1 -/* Queue id to send events back */ +/* Not used. Events are always sent to function relative queue 0. */ #define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8 +#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4 /* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */ #define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */ #define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */ #define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */ #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Original field containing queue ID. Now extended to include flags. */ #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4 #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0 #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16 #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31 @@ -3924,29 +1805,39 @@ /* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Unsubscribe options */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4 /* enum: Unsubscribe a single queue */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0 /* enum: Unsubscribe all queues */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1 /* Event queue ID */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12 +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4 /* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */ #define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* 1 to enable PPS test mode, 0 to disable and return result. */ #define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4 /* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */ #define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* NIC - Host System Clock Synchronization status */ #define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4 /* enum: Host System clock and NIC clock are not in sync */ #define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0 /* enum: Host System clock and NIC clock are synchronized */ @@ -3955,8 +1846,11 @@ * no longer in sync. */ #define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4 #define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4 #define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4 /* MC_CMD_PTP_OUT msgresponse */ #define MC_CMD_PTP_OUT_LEN 0 @@ -3965,12 +1859,16 @@ #define MC_CMD_PTP_OUT_TRANSMIT_LEN 8 /* Value of seconds timestamp */ #define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4 /* Timestamp major value */ #define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4 /* Value of nanoseconds timestamp */ #define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4 /* Timestamp minor value */ #define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4 /* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */ #define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0 @@ -3982,47 +1880,85 @@ #define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8 /* Value of seconds timestamp */ #define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4 /* Timestamp major value */ #define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4 /* Value of nanoseconds timestamp */ #define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4 /* Timestamp minor value */ #define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4 + +/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4 +/* Upper 32bits of major timestamp value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4 /* MC_CMD_PTP_OUT_STATUS msgresponse */ #define MC_CMD_PTP_OUT_STATUS_LEN 64 /* Frequency of NIC's hardware clock */ #define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0 +#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4 /* Number of packets transmitted and timestamped */ #define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4 +#define MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4 /* Number of packets received and timestamped */ #define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8 +#define MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4 /* Number of packets timestamped by the FPGA */ #define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12 +#define MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4 /* Number of packets filter matched */ #define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16 +#define MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4 /* Number of packets not filter matched */ #define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20 +#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4 /* Number of PPS overflows (noise on input?) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4 /* Number of PPS bad periods */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4 /* Minimum period of PPS pulse in nanoseconds */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4 /* Maximum period of PPS pulse in nanoseconds */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4 /* Last period of PPS pulse in nanoseconds */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4 /* Mean period of PPS pulse in nanoseconds */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4 /* Minimum offset of PPS pulse in nanoseconds (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4 /* Maximum offset of PPS pulse in nanoseconds (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4 /* Last offset of PPS pulse in nanoseconds (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4 /* Mean offset of PPS pulse in nanoseconds (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4 /* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20 @@ -4035,23 +1971,31 @@ #define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12 /* Host time immediately before NIC's hardware clock read */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4 /* Value of seconds timestamp */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4 /* Timestamp major value */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4 /* Value of nanoseconds timestamp */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4 /* Timestamp minor value */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4 /* Host time immediately after NIC's hardware clock read */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4 /* Number of nanoseconds waited after reading NIC's hardware clock */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4 /* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */ #define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8 /* Results of testing */ #define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4 /* enum: Successful test */ #define MC_CMD_PTP_MANF_SUCCESS 0x0 /* enum: FPGA load failed */ @@ -4084,15 +2028,19 @@ #define MC_CMD_PTP_MANF_CLOCK_READ 0xe /* Presence of external oscillator */ #define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4 +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4 /* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */ #define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12 /* Results of testing */ #define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4 /* Number of packets received by FPGA */ #define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4 /* Number of packets received by Siena filters */ #define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4 /* MC_CMD_PTP_OUT_FPGAREAD msgresponse */ #define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1 @@ -4108,9 +2056,11 @@ /* Time format required/used by for this NIC. Applies to all PTP MCDI * operations that pass times between the host and firmware. If this operation * is not supported (older firmware) a format of seconds and nanoseconds should - * be assumed. + * be assumed. Note this enum is deprecated. Do not add to it- use the + * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead. */ #define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4 /* enum: Times are in seconds and nanoseconds */ #define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0 /* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ @@ -4126,12 +2076,16 @@ * be assumed. */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4 /* enum: Times are in seconds and nanoseconds */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0 /* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1 /* enum: Major register has units of seconds, minor 2^-27s per tick */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2 +/* enum: Major register units are seconds, minor units are quarter nanoseconds + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3 /* Minimum acceptable value for a corrected synchronization timeset. When * comparing host and NIC clock times, the MC returns a set of samples that * contain the host start and end time, the MC time when the host start was @@ -4140,46 +2094,66 @@ * end and start times minus the time that the MC waited for host end. */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4 /* Various PTP capabilities */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16 /* Uncorrected error on PTP transmit timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4 /* Uncorrected error on PTP receive timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4 /* Uncorrected error on PPS output in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4 /* Uncorrected error on PPS input in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24 /* Uncorrected error on PTP transmit timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4 /* Uncorrected error on PTP receive timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4 /* Uncorrected error on PPS output in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4 /* Uncorrected error on PPS input in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4 /* Uncorrected error on non-PTP transmit timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4 /* Uncorrected error on non-PTP receive timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4 /* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */ #define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4 /* Results of testing */ #define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */ @@ -4194,14 +2168,17 @@ #define MC_CMD_CSR_READ32 0xc #undef MC_CMD_0xc_PRIVILEGE_CTG -#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_CSR_READ32_IN msgrequest */ #define MC_CMD_CSR_READ32_IN_LEN 12 /* Address */ #define MC_CMD_CSR_READ32_IN_ADDR_OFST 0 +#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4 #define MC_CMD_CSR_READ32_IN_STEP_OFST 4 +#define MC_CMD_CSR_READ32_IN_STEP_LEN 4 #define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8 +#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4 /* MC_CMD_CSR_READ32_OUT msgresponse */ #define MC_CMD_CSR_READ32_OUT_LENMIN 4 @@ -4221,7 +2198,7 @@ #define MC_CMD_CSR_WRITE32 0xd #undef MC_CMD_0xd_PRIVILEGE_CTG -#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_CSR_WRITE32_IN msgrequest */ #define MC_CMD_CSR_WRITE32_IN_LENMIN 12 @@ -4229,7 +2206,9 @@ #define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num)) /* Address */ #define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4 #define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4 +#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4 #define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8 #define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4 #define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1 @@ -4238,6 +2217,7 @@ /* MC_CMD_CSR_WRITE32_OUT msgresponse */ #define MC_CMD_CSR_WRITE32_OUT_LEN 4 #define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0 +#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4 /***********************************/ @@ -4248,7 +2228,7 @@ #define MC_CMD_HP 0x54 #undef MC_CMD_0x54_PRIVILEGE_CTG -#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_HP_IN msgrequest */ #define MC_CMD_HP_IN_LEN 16 @@ -4259,6 +2239,7 @@ * sensors. */ #define MC_CMD_HP_IN_SUBCMD_OFST 0 +#define MC_CMD_HP_IN_SUBCMD_LEN 4 /* enum: OCSD (Option Card Sensor Data) sub-command. */ #define MC_CMD_HP_IN_OCSD_SUBCMD 0x0 /* enum: Last known valid HP sub-command. */ @@ -4273,10 +2254,12 @@ * NULL.) */ #define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12 +#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4 /* MC_CMD_HP_OUT msgresponse */ #define MC_CMD_HP_OUT_LEN 4 #define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0 +#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4 /* enum: OCSD stopped for this card. */ #define MC_CMD_HP_OUT_OCSD_STOPPED 0x1 /* enum: OCSD was successfully started with the address provided. */ @@ -4323,29 +2306,35 @@ * external devices. */ #define MC_CMD_MDIO_READ_IN_BUS_OFST 0 +#define MC_CMD_MDIO_READ_IN_BUS_LEN 4 /* enum: Internal. */ #define MC_CMD_MDIO_BUS_INTERNAL 0x0 /* enum: External. */ #define MC_CMD_MDIO_BUS_EXTERNAL 0x1 /* Port address */ #define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4 +#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4 /* Device Address or clause 22. */ #define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8 +#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4 /* enum: By default all the MCDI MDIO operations perform clause45 mode. If you * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. */ #define MC_CMD_MDIO_CLAUSE22 0x20 /* Address */ #define MC_CMD_MDIO_READ_IN_ADDR_OFST 12 +#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4 /* MC_CMD_MDIO_READ_OUT msgresponse */ #define MC_CMD_MDIO_READ_OUT_LEN 8 /* Value */ #define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0 +#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4 /* Status the MDIO commands return the raw status bits from the MDIO block. A * "good" transaction should have the DONE bit set and all other bits clear. */ #define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4 +#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4 /* enum: Good. */ #define MC_CMD_MDIO_STATUS_GOOD 0x8 @@ -4365,22 +2354,27 @@ * external devices. */ #define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0 +#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4 /* enum: Internal. */ /* MC_CMD_MDIO_BUS_INTERNAL 0x0 */ /* enum: External. */ /* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */ /* Port address */ #define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4 +#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4 /* Device Address or clause 22. */ #define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8 +#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4 /* enum: By default all the MCDI MDIO operations perform clause45 mode. If you * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. */ /* MC_CMD_MDIO_CLAUSE22 0x20 */ /* Address */ #define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12 +#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4 /* Value */ #define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16 +#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4 /* MC_CMD_MDIO_WRITE_OUT msgresponse */ #define MC_CMD_MDIO_WRITE_OUT_LEN 4 @@ -4388,6 +2382,7 @@ * "good" transaction should have the DONE bit set and all other bits clear. */ #define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0 +#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4 /* enum: Good. */ /* MC_CMD_MDIO_STATUS_GOOD 0x8 */ @@ -4399,7 +2394,7 @@ #define MC_CMD_DBI_WRITE 0x12 #undef MC_CMD_0x12_PRIVILEGE_CTG -#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DBI_WRITE_IN msgrequest */ #define MC_CMD_DBI_WRITE_IN_LENMIN 12 @@ -4419,9 +2414,11 @@ /* MC_CMD_DBIWROP_TYPEDEF structuredef */ #define MC_CMD_DBIWROP_TYPEDEF_LEN 12 #define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4 #define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0 #define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32 #define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4 #define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16 #define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16 #define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15 @@ -4431,6 +2428,7 @@ #define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32 #define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32 #define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4 #define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64 #define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32 @@ -4446,13 +2444,16 @@ #define MC_CMD_PORT_READ32_IN_LEN 4 /* Address */ #define MC_CMD_PORT_READ32_IN_ADDR_OFST 0 +#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4 /* MC_CMD_PORT_READ32_OUT msgresponse */ #define MC_CMD_PORT_READ32_OUT_LEN 8 /* Value */ #define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0 +#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4 /* Status */ #define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4 +#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4 /***********************************/ @@ -4466,13 +2467,16 @@ #define MC_CMD_PORT_WRITE32_IN_LEN 8 /* Address */ #define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4 /* Value */ #define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4 +#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4 /* MC_CMD_PORT_WRITE32_OUT msgresponse */ #define MC_CMD_PORT_WRITE32_OUT_LEN 4 /* Status */ #define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0 +#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4 /***********************************/ @@ -4486,6 +2490,7 @@ #define MC_CMD_PORT_READ128_IN_LEN 4 /* Address */ #define MC_CMD_PORT_READ128_IN_ADDR_OFST 0 +#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4 /* MC_CMD_PORT_READ128_OUT msgresponse */ #define MC_CMD_PORT_READ128_OUT_LEN 20 @@ -4494,6 +2499,7 @@ #define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16 /* Status */ #define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16 +#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4 /***********************************/ @@ -4507,6 +2513,7 @@ #define MC_CMD_PORT_WRITE128_IN_LEN 20 /* Address */ #define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0 +#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4 /* Value */ #define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4 #define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16 @@ -4515,6 +2522,7 @@ #define MC_CMD_PORT_WRITE128_OUT_LEN 4 /* Status */ #define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0 +#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4 /* MC_CMD_CAPABILITIES structuredef */ #define MC_CMD_CAPABILITIES_LEN 4 @@ -4560,24 +2568,54 @@ #define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136 #define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num)) #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0 +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4 #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4 #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32 -/* See MC_CMD_CAPABILITIES */ +/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on + * EF10 and later (use MC_CMD_GET_CAPABILITIES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36 -/* See MC_CMD_CAPABILITIES */ +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4 +/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on + * EF10 and later (use MC_CMD_GET_CAPABILITIES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40 +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4 +/* Base MAC address for Siena Port0. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44 #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6 +/* Base MAC address for Siena Port1. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50 #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6 +/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4 +/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4 +/* Increment between addresses in MAC address pool for Siena Port0. Unused on + * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4 +/* Increment between addresses in MAC address pool for Siena Port1. Unused on + * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68 -/* This field contains a 16-bit value for each of the types of NVRAM area. The - * values are defined in the firmware/mc/platform/.c file for a specific board - * type, but otherwise have no meaning to the MC; they are used by the driver - * to manage selection of appropriate firmware updates. +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4 +/* Siena only. This field contains a 16-bit value for each of the types of + * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a + * specific board type, but otherwise have no meaning to the MC; they are used + * by the driver to manage selection of appropriate firmware updates. Unused on + * EF10 and later (use MC_CMD_NVRAM_METADATA). */ #define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72 #define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2 @@ -4592,7 +2630,7 @@ #define MC_CMD_DBI_READX 0x19 #undef MC_CMD_0x19_PRIVILEGE_CTG -#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DBI_READX_IN msgrequest */ #define MC_CMD_DBI_READX_IN_LENMIN 8 @@ -4619,9 +2657,11 @@ /* MC_CMD_DBIRDOP_TYPEDEF structuredef */ #define MC_CMD_DBIRDOP_TYPEDEF_LEN 8 #define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4 #define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0 #define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32 #define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4 #define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16 #define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16 #define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15 @@ -4639,7 +2679,7 @@ #define MC_CMD_SET_RAND_SEED 0x1a #undef MC_CMD_0x1a_PRIVILEGE_CTG -#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_SET_RAND_SEED_IN msgrequest */ #define MC_CMD_SET_RAND_SEED_IN_LEN 16 @@ -4689,14 +2729,25 @@ #define MC_CMD_DRV_ATTACH_IN_LEN 12 /* new state to set if UPDATE=1 */ #define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4 #define MC_CMD_DRV_ATTACH_LBN 0 #define MC_CMD_DRV_ATTACH_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_IN_ATTACH_WIDTH 1 #define MC_CMD_DRV_PREBOOT_LBN 1 #define MC_CMD_DRV_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_PREBOOT_LBN 1 +#define MC_CMD_DRV_ATTACH_IN_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_LBN 2 +#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_LBN 3 +#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_WIDTH 1 /* 1 to set new state, or 0 to just report the existing state */ #define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 +#define MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4 /* preferred datapath firmware (for Huntington; ignored for Siena) */ #define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8 +#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4 /* enum: Prefer to use full featured firmware */ #define MC_CMD_FW_FULL_FEATURED 0x0 /* enum: Prefer to use firmware with fewer features but lower latency */ @@ -4713,20 +2764,35 @@ * support */ #define MC_CMD_FW_RULES_ENGINE 0x5 +/* enum: Prefer to use firmware with additional DPDK support */ +#define MC_CMD_FW_DPDK 0x6 +/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and + * bug69716) + */ +#define MC_CMD_FW_L3XUDP 0x7 +/* enum: Requests that the MC keep whatever datapath firmware is currently + * running. It's used for test purposes, where we want to be able to shmboot + * special test firmware variants. This option is only recognised in eftest + * (i.e. non-production) builds. + */ +#define MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe /* enum: Only this option is allowed for non-admin functions */ -#define MC_CMD_FW_DONT_CARE 0xffffffff +#define MC_CMD_FW_DONT_CARE 0xffffffff /* MC_CMD_DRV_ATTACH_OUT msgresponse */ #define MC_CMD_DRV_ATTACH_OUT_LEN 4 /* previous or existing state, see the bitmask at NEW_STATE */ #define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4 /* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */ #define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8 /* previous or existing state, see the bitmask at NEW_STATE */ #define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4 /* Flags associated with this function */ #define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4 +#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4 /* enum: Labels the lowest-numbered function visible to the OS */ #define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0 /* enum: The function can control the link state of the physical port it is @@ -4739,6 +2805,11 @@ * refers to the Sorrento external FPGA port. */ #define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3 +/* enum: If set, indicates that VI spreading is currently enabled. Will always + * indicate the current state, regardless of the value in the WANT_VI_SPREADING + * input. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_VI_SPREADING_ENABLED 0x4 /***********************************/ @@ -4751,6 +2822,7 @@ #define MC_CMD_SHMUART_IN_LEN 4 /* ??? */ #define MC_CMD_SHMUART_IN_FLAG_OFST 0 +#define MC_CMD_SHMUART_IN_FLAG_LEN 4 /* MC_CMD_SHMUART_OUT msgresponse */ #define MC_CMD_SHMUART_OUT_LEN 0 @@ -4789,6 +2861,7 @@ * (TBD). */ #define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0 +#define MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4 #define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0 #define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1 @@ -4806,8 +2879,10 @@ #define MC_CMD_PCIE_CREDITS_IN_LEN 8 /* poll period. 0 is disabled */ #define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0 +#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4 /* wipe statistics */ #define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4 +#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4 /* MC_CMD_PCIE_CREDITS_OUT msgresponse */ #define MC_CMD_PCIE_CREDITS_OUT_LEN 16 @@ -4838,31 +2913,54 @@ /* MC_CMD_RXD_MONITOR_IN msgrequest */ #define MC_CMD_RXD_MONITOR_IN_LEN 12 #define MC_CMD_RXD_MONITOR_IN_QID_OFST 0 +#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4 #define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4 +#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4 #define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8 +#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4 /* MC_CMD_RXD_MONITOR_OUT msgresponse */ #define MC_CMD_RXD_MONITOR_OUT_LEN 80 #define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0 +#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48 +#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4 /***********************************/ @@ -4872,13 +2970,14 @@ #define MC_CMD_PUTS 0x23 #undef MC_CMD_0x23_PRIVILEGE_CTG -#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_PUTS_IN msgrequest */ #define MC_CMD_PUTS_IN_LENMIN 13 #define MC_CMD_PUTS_IN_LENMAX 252 #define MC_CMD_PUTS_IN_LEN(num) (12+1*(num)) #define MC_CMD_PUTS_IN_DEST_OFST 0 +#define MC_CMD_PUTS_IN_DEST_LEN 4 #define MC_CMD_PUTS_IN_UART_LBN 0 #define MC_CMD_PUTS_IN_UART_WIDTH 1 #define MC_CMD_PUTS_IN_PORT_LBN 1 @@ -4911,6 +3010,7 @@ #define MC_CMD_GET_PHY_CFG_OUT_LEN 72 /* flags */ #define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4 #define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0 #define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1 #define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1 @@ -4927,8 +3027,10 @@ #define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4 +#define MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4 /* Bitmask of supported capabilities */ #define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 +#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4 #define MC_CMD_PHY_CAP_10HDX_LBN 1 #define MC_CMD_PHY_CAP_10HDX_WIDTH 1 #define MC_CMD_PHY_CAP_10FDX_LBN 2 @@ -4953,17 +3055,39 @@ #define MC_CMD_PHY_CAP_40000FDX_WIDTH 1 #define MC_CMD_PHY_CAP_DDM_LBN 12 #define MC_CMD_PHY_CAP_DDM_WIDTH 1 +#define MC_CMD_PHY_CAP_100000FDX_LBN 13 +#define MC_CMD_PHY_CAP_100000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_25000FDX_LBN 14 +#define MC_CMD_PHY_CAP_25000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_50000FDX_LBN 15 +#define MC_CMD_PHY_CAP_50000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_BASER_FEC_LBN 16 +#define MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17 +#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1 +#define MC_CMD_PHY_CAP_RS_FEC_LBN 18 +#define MC_CMD_PHY_CAP_RS_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19 +#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12 +#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16 +#define MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20 +#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24 #define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44 +#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4 /* enum: Xaui. */ #define MC_CMD_MEDIA_XAUI 0x1 /* enum: CX4. */ @@ -4979,6 +3103,7 @@ /* enum: QSFP+. */ #define MC_CMD_MEDIA_QSFP_PLUS 0x7 #define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48 +#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4 /* enum: Native clause 22 */ #define MC_CMD_MMD_CLAUSE22 0x0 #define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */ @@ -5004,12 +3129,13 @@ #define MC_CMD_START_BIST 0x25 #undef MC_CMD_0x25_PRIVILEGE_CTG -#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_START_BIST_IN msgrequest */ #define MC_CMD_START_BIST_IN_LEN 4 /* Type of test. */ #define MC_CMD_START_BIST_IN_TYPE_OFST 0 +#define MC_CMD_START_BIST_IN_TYPE_LEN 4 /* enum: Run the PHY's short cable BIST. */ #define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 /* enum: Run the PHY's long cable BIST. */ @@ -5043,7 +3169,7 @@ #define MC_CMD_POLL_BIST 0x26 #undef MC_CMD_0x26_PRIVILEGE_CTG -#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_POLL_BIST_IN msgrequest */ #define MC_CMD_POLL_BIST_IN_LEN 0 @@ -5052,6 +3178,7 @@ #define MC_CMD_POLL_BIST_OUT_LEN 8 /* result */ #define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 +#define MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 /* enum: Running. */ #define MC_CMD_POLL_BIST_RUNNING 0x1 /* enum: Passed. */ @@ -5061,19 +3188,26 @@ /* enum: Timed-out. */ #define MC_CMD_POLL_BIST_TIMEOUT 0x4 #define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 +#define MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4 /* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */ #define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36 /* result */ /* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ /* Enum values, see field(s): */ /* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4 #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4 #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4 #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4 /* Status of each channel A */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4 /* enum: Ok. */ #define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 /* enum: Open. */ @@ -5086,14 +3220,17 @@ #define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 /* Status of each channel B */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4 /* Enum values, see field(s): */ /* CABLE_STATUS_A */ /* Status of each channel C */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4 /* Enum values, see field(s): */ /* CABLE_STATUS_A */ /* Status of each channel D */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4 /* Enum values, see field(s): */ /* CABLE_STATUS_A */ @@ -5101,9 +3238,11 @@ #define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8 /* result */ /* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ /* Enum values, see field(s): */ /* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ #define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4 +#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4 /* enum: Complete. */ #define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 /* enum: Bus switch off I2C write. */ @@ -5127,9 +3266,11 @@ #define MC_CMD_POLL_BIST_OUT_MEM_LEN 36 /* result */ /* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ /* Enum values, see field(s): */ /* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ #define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4 +#define MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4 /* enum: Test has completed. */ #define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0 /* enum: RAM test - walk ones. */ @@ -5146,8 +3287,10 @@ #define MC_CMD_POLL_BIST_MEM_ECC 0x6 /* Failure address, only valid if result is POLL_BIST_FAILED */ #define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8 +#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4 /* Bus or address space to which the failure address corresponds */ #define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12 +#define MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4 /* enum: MC MIPS bus. */ #define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0 /* enum: CSR IREG bus. */ @@ -5168,14 +3311,19 @@ #define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8 /* Pattern written to RAM / register */ #define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16 +#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4 /* Actual value read from RAM / register */ #define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20 +#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4 /* ECC error mask */ #define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4 /* ECC parity error mask */ #define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4 /* ECC fatal error mask */ #define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4 /***********************************/ @@ -5222,83 +3370,83 @@ #define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0 #define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4 /* enum: None. */ -#define MC_CMD_LOOPBACK_NONE 0x0 +#define MC_CMD_LOOPBACK_NONE 0x0 /* enum: Data. */ -#define MC_CMD_LOOPBACK_DATA 0x1 +#define MC_CMD_LOOPBACK_DATA 0x1 /* enum: GMAC. */ -#define MC_CMD_LOOPBACK_GMAC 0x2 +#define MC_CMD_LOOPBACK_GMAC 0x2 /* enum: XGMII. */ #define MC_CMD_LOOPBACK_XGMII 0x3 /* enum: XGXS. */ -#define MC_CMD_LOOPBACK_XGXS 0x4 +#define MC_CMD_LOOPBACK_XGXS 0x4 /* enum: XAUI. */ -#define MC_CMD_LOOPBACK_XAUI 0x5 +#define MC_CMD_LOOPBACK_XAUI 0x5 /* enum: GMII. */ -#define MC_CMD_LOOPBACK_GMII 0x6 +#define MC_CMD_LOOPBACK_GMII 0x6 /* enum: SGMII. */ -#define MC_CMD_LOOPBACK_SGMII 0x7 +#define MC_CMD_LOOPBACK_SGMII 0x7 /* enum: XGBR. */ -#define MC_CMD_LOOPBACK_XGBR 0x8 +#define MC_CMD_LOOPBACK_XGBR 0x8 /* enum: XFI. */ -#define MC_CMD_LOOPBACK_XFI 0x9 +#define MC_CMD_LOOPBACK_XFI 0x9 /* enum: XAUI Far. */ -#define MC_CMD_LOOPBACK_XAUI_FAR 0xa +#define MC_CMD_LOOPBACK_XAUI_FAR 0xa /* enum: GMII Far. */ -#define MC_CMD_LOOPBACK_GMII_FAR 0xb +#define MC_CMD_LOOPBACK_GMII_FAR 0xb /* enum: SGMII Far. */ -#define MC_CMD_LOOPBACK_SGMII_FAR 0xc +#define MC_CMD_LOOPBACK_SGMII_FAR 0xc /* enum: XFI Far. */ -#define MC_CMD_LOOPBACK_XFI_FAR 0xd +#define MC_CMD_LOOPBACK_XFI_FAR 0xd /* enum: GPhy. */ -#define MC_CMD_LOOPBACK_GPHY 0xe +#define MC_CMD_LOOPBACK_GPHY 0xe /* enum: PhyXS. */ -#define MC_CMD_LOOPBACK_PHYXS 0xf +#define MC_CMD_LOOPBACK_PHYXS 0xf /* enum: PCS. */ -#define MC_CMD_LOOPBACK_PCS 0x10 +#define MC_CMD_LOOPBACK_PCS 0x10 /* enum: PMA-PMD. */ -#define MC_CMD_LOOPBACK_PMAPMD 0x11 +#define MC_CMD_LOOPBACK_PMAPMD 0x11 /* enum: Cross-Port. */ -#define MC_CMD_LOOPBACK_XPORT 0x12 +#define MC_CMD_LOOPBACK_XPORT 0x12 /* enum: XGMII-Wireside. */ -#define MC_CMD_LOOPBACK_XGMII_WS 0x13 +#define MC_CMD_LOOPBACK_XGMII_WS 0x13 /* enum: XAUI Wireside. */ -#define MC_CMD_LOOPBACK_XAUI_WS 0x14 +#define MC_CMD_LOOPBACK_XAUI_WS 0x14 /* enum: XAUI Wireside Far. */ -#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 +#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 /* enum: XAUI Wireside near. */ -#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 +#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 /* enum: GMII Wireside. */ -#define MC_CMD_LOOPBACK_GMII_WS 0x17 +#define MC_CMD_LOOPBACK_GMII_WS 0x17 /* enum: XFI Wireside. */ -#define MC_CMD_LOOPBACK_XFI_WS 0x18 +#define MC_CMD_LOOPBACK_XFI_WS 0x18 /* enum: XFI Wireside Far. */ -#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 +#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 /* enum: PhyXS Wireside. */ -#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a +#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a /* enum: PMA lanes MAC-Serdes. */ -#define MC_CMD_LOOPBACK_PMA_INT 0x1b +#define MC_CMD_LOOPBACK_PMA_INT 0x1b /* enum: KR Serdes Parallel (Encoder). */ -#define MC_CMD_LOOPBACK_SD_NEAR 0x1c +#define MC_CMD_LOOPBACK_SD_NEAR 0x1c /* enum: KR Serdes Serial. */ -#define MC_CMD_LOOPBACK_SD_FAR 0x1d +#define MC_CMD_LOOPBACK_SD_FAR 0x1d /* enum: PMA lanes MAC-Serdes Wireside. */ -#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e +#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e /* enum: KR Serdes Parallel Wireside (Full PCS). */ -#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f +#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f /* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ -#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 +#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 /* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ -#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21 +#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21 /* enum: KR Serdes Serial Wireside. */ -#define MC_CMD_LOOPBACK_SD_FES_WS 0x22 +#define MC_CMD_LOOPBACK_SD_FES_WS 0x22 /* enum: Near side of AOE Siena side port */ -#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 +#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 /* enum: Medford Wireside datapath loopback */ -#define MC_CMD_LOOPBACK_DATA_WS 0x24 +#define MC_CMD_LOOPBACK_DATA_WS 0x24 /* enum: Force link up without setting up any physical loopback (snapper use * only) */ -#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 +#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 /* Supported loopbacks. */ #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8 #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8 @@ -5328,6 +3476,174 @@ /* Enum values, see field(s): */ /* 100M */ +/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for + * newer NICs with 25G/50G/100G support + */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4 +/* enum: None. */ +/* MC_CMD_LOOPBACK_NONE 0x0 */ +/* enum: Data. */ +/* MC_CMD_LOOPBACK_DATA 0x1 */ +/* enum: GMAC. */ +/* MC_CMD_LOOPBACK_GMAC 0x2 */ +/* enum: XGMII. */ +/* MC_CMD_LOOPBACK_XGMII 0x3 */ +/* enum: XGXS. */ +/* MC_CMD_LOOPBACK_XGXS 0x4 */ +/* enum: XAUI. */ +/* MC_CMD_LOOPBACK_XAUI 0x5 */ +/* enum: GMII. */ +/* MC_CMD_LOOPBACK_GMII 0x6 */ +/* enum: SGMII. */ +/* MC_CMD_LOOPBACK_SGMII 0x7 */ +/* enum: XGBR. */ +/* MC_CMD_LOOPBACK_XGBR 0x8 */ +/* enum: XFI. */ +/* MC_CMD_LOOPBACK_XFI 0x9 */ +/* enum: XAUI Far. */ +/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */ +/* enum: GMII Far. */ +/* MC_CMD_LOOPBACK_GMII_FAR 0xb */ +/* enum: SGMII Far. */ +/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */ +/* enum: XFI Far. */ +/* MC_CMD_LOOPBACK_XFI_FAR 0xd */ +/* enum: GPhy. */ +/* MC_CMD_LOOPBACK_GPHY 0xe */ +/* enum: PhyXS. */ +/* MC_CMD_LOOPBACK_PHYXS 0xf */ +/* enum: PCS. */ +/* MC_CMD_LOOPBACK_PCS 0x10 */ +/* enum: PMA-PMD. */ +/* MC_CMD_LOOPBACK_PMAPMD 0x11 */ +/* enum: Cross-Port. */ +/* MC_CMD_LOOPBACK_XPORT 0x12 */ +/* enum: XGMII-Wireside. */ +/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */ +/* enum: XAUI Wireside. */ +/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */ +/* enum: XAUI Wireside Far. */ +/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */ +/* enum: XAUI Wireside near. */ +/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */ +/* enum: GMII Wireside. */ +/* MC_CMD_LOOPBACK_GMII_WS 0x17 */ +/* enum: XFI Wireside. */ +/* MC_CMD_LOOPBACK_XFI_WS 0x18 */ +/* enum: XFI Wireside Far. */ +/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */ +/* enum: PhyXS Wireside. */ +/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */ +/* enum: PMA lanes MAC-Serdes. */ +/* MC_CMD_LOOPBACK_PMA_INT 0x1b */ +/* enum: KR Serdes Parallel (Encoder). */ +/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */ +/* enum: KR Serdes Serial. */ +/* MC_CMD_LOOPBACK_SD_FAR 0x1d */ +/* enum: PMA lanes MAC-Serdes Wireside. */ +/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */ +/* enum: KR Serdes Parallel Wireside (Full PCS). */ +/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */ +/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ +/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */ +/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ +/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */ +/* enum: KR Serdes Serial Wireside. */ +/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */ +/* enum: Near side of AOE Siena side port */ +/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */ +/* enum: Medford Wireside datapath loopback */ +/* MC_CMD_LOOPBACK_DATA_WS 0x24 */ +/* enum: Force link up without setting up any physical loopback (snapper use + * only) + */ +/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 25G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 50 loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 100G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60 +/* Enum values, see field(s): */ +/* 100M */ + +/* AN_TYPE structuredef: Auto-negotiation types defined in IEEE802.3 */ +#define AN_TYPE_LEN 4 +#define AN_TYPE_TYPE_OFST 0 +#define AN_TYPE_TYPE_LEN 4 +/* enum: None, AN disabled or not supported */ +#define MC_CMD_AN_NONE 0x0 +/* enum: Clause 28 - BASE-T */ +#define MC_CMD_AN_CLAUSE28 0x1 +/* enum: Clause 37 - BASE-X */ +#define MC_CMD_AN_CLAUSE37 0x2 +/* enum: Clause 73 - BASE-R startup protocol for backplane and copper cable + * assemblies. Includes Clause 72/Clause 92 link-training. + */ +#define MC_CMD_AN_CLAUSE73 0x3 +#define AN_TYPE_TYPE_LBN 0 +#define AN_TYPE_TYPE_WIDTH 32 + +/* FEC_TYPE structuredef: Forward error correction types defined in IEEE802.3 + */ +#define FEC_TYPE_LEN 4 +#define FEC_TYPE_TYPE_OFST 0 +#define FEC_TYPE_TYPE_LEN 4 +/* enum: No FEC */ +#define MC_CMD_FEC_NONE 0x0 +/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */ +#define MC_CMD_FEC_BASER 0x1 +/* enum: Clause 91/Clause 108 Reed-Solomon FEC */ +#define MC_CMD_FEC_RS 0x2 +#define FEC_TYPE_TYPE_LBN 0 +#define FEC_TYPE_TYPE_WIDTH 32 + /***********************************/ /* MC_CMD_GET_LINK @@ -5344,19 +3660,28 @@ /* MC_CMD_GET_LINK_OUT msgresponse */ #define MC_CMD_GET_LINK_OUT_LEN 28 -/* near-side advertised capabilities */ +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ #define MC_CMD_GET_LINK_OUT_CAP_OFST 0 -/* link-partner advertised capabilities */ +#define MC_CMD_GET_LINK_OUT_CAP_LEN 4 +/* Link-partner advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ #define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 +#define MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4 /* Autonegotiated speed in mbit/s. The link may still be down even if this * reads non-zero. */ #define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8 +#define MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4 /* Current loopback setting. */ #define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12 +#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ #define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16 +#define MC_CMD_GET_LINK_OUT_FLAGS_LEN 4 #define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0 #define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1 #define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1 @@ -5371,9 +3696,11 @@ #define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1 /* This returns the negotiated flow control value. */ #define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 +#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ #define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 +#define MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4 #define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 #define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 #define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 @@ -5383,6 +3710,97 @@ #define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 #define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 +/* MC_CMD_GET_LINK_OUT_V2 msgresponse: Extended link state information */ +#define MC_CMD_GET_LINK_OUT_V2_LEN 44 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_CAP_OFST 0 +#define MC_CMD_GET_LINK_OUT_V2_CAP_LEN 4 +/* Link-partner advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_OFST 4 +#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_LEN 4 +/* Autonegotiated speed in mbit/s. The link may still be down even if this + * reads non-zero. + */ +#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_OFST 8 +#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_LEN 4 +/* Current loopback setting. */ +#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_OFST 12 +#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +#define MC_CMD_GET_LINK_OUT_V2_FLAGS_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_LBN 0 +#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_LBN 1 +#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_LBN 2 +#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_LBN 3 +#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_LBN 6 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_LBN 7 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_WIDTH 1 +/* This returns the negotiated flow control value. */ +#define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20 +#define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ +#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24 +#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_LEN 4 +/* MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 */ +/* MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 */ +/* MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 */ +/* MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 */ +/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 */ +/* True local device capabilities (taking into account currently used PMD/MDI, + * e.g. plugged-in module). In general, subset of + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP, but may include extra _FEC_REQUEST + * bits, if the PMD requires FEC. 0 if unknown (e.g. module unplugged). Equal + * to SUPPORTED_CAP for non-pluggable PMDs. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_OFST 28 +#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_LEN 4 +/* Auto-negotiation type used on the link */ +#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_OFST 32 +#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* AN_TYPE/TYPE */ +/* Forward error correction used on the link */ +#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_OFST 36 +#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* FEC_TYPE/TYPE */ +#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_LBN 0 +#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_LBN 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_LBN 2 +#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_LBN 3 +#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_LBN 4 +#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_LBN 5 +#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_HI_BER_LBN 6 +#define MC_CMD_GET_LINK_OUT_V2_HI_BER_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_LBN 7 +#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_LBN 8 +#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_WIDTH 1 + /***********************************/ /* MC_CMD_SET_LINK @@ -5396,10 +3814,14 @@ /* MC_CMD_SET_LINK_IN msgrequest */ #define MC_CMD_SET_LINK_IN_LEN 16 -/* ??? */ +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ #define MC_CMD_SET_LINK_IN_CAP_OFST 0 +#define MC_CMD_SET_LINK_IN_CAP_LEN 4 /* Flags */ #define MC_CMD_SET_LINK_IN_FLAGS_OFST 4 +#define MC_CMD_SET_LINK_IN_FLAGS_LEN 4 #define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0 #define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1 #define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1 @@ -5408,12 +3830,14 @@ #define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1 /* Loopback mode. */ #define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8 +#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ /* A loopback speed of "0" is supported, and means (choose any available * speed). */ #define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12 +#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4 /* MC_CMD_SET_LINK_OUT msgresponse */ #define MC_CMD_SET_LINK_OUT_LEN 0 @@ -5432,9 +3856,10 @@ #define MC_CMD_SET_ID_LED_IN_LEN 4 /* Set LED state. */ #define MC_CMD_SET_ID_LED_IN_STATE_OFST 0 -#define MC_CMD_LED_OFF 0x0 /* enum */ -#define MC_CMD_LED_ON 0x1 /* enum */ -#define MC_CMD_LED_DEFAULT 0x2 /* enum */ +#define MC_CMD_SET_ID_LED_IN_STATE_LEN 4 +#define MC_CMD_LED_OFF 0x0 /* enum */ +#define MC_CMD_LED_ON 0x1 /* enum */ +#define MC_CMD_LED_DEFAULT 0x2 /* enum */ /* MC_CMD_SET_ID_LED_OUT msgresponse */ #define MC_CMD_SET_ID_LED_OUT_LEN 0 @@ -5455,17 +3880,21 @@ * EtherII, VLAN, bug16011 padding). */ #define MC_CMD_SET_MAC_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_IN_MTU_LEN 4 #define MC_CMD_SET_MAC_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_IN_DRAIN_LEN 4 #define MC_CMD_SET_MAC_IN_ADDR_OFST 8 #define MC_CMD_SET_MAC_IN_ADDR_LEN 8 #define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8 #define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12 #define MC_CMD_SET_MAC_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_IN_REJECT_LEN 4 #define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0 #define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1 #define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1 #define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 #define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 +#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4 /* enum: Flow control is off. */ #define MC_CMD_FCNTL_OFF 0x0 /* enum: Respond to flow control. */ @@ -5479,6 +3908,7 @@ /* enum: Issue flow control. */ #define MC_CMD_FCNTL_GENERATE 0x5 #define MC_CMD_SET_MAC_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4 #define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0 #define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1 @@ -5488,17 +3918,21 @@ * EtherII, VLAN, bug16011 padding). */ #define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4 #define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4 #define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8 #define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8 #define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8 #define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12 #define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4 #define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0 #define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1 #define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1 #define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1 #define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20 +#define MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4 /* enum: Flow control is off. */ /* MC_CMD_FCNTL_OFF 0x0 */ /* enum: Respond to flow control. */ @@ -5512,6 +3946,7 @@ /* enum: Issue flow control. */ /* MC_CMD_FCNTL_GENERATE 0x5 */ #define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4 #define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0 #define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1 /* Select which parameters to configure. A parameter will only be modified if @@ -5520,6 +3955,7 @@ * set). */ #define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4 #define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0 #define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1 #define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1 @@ -5541,6 +3977,7 @@ * to 0. */ #define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0 +#define MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4 /***********************************/ @@ -5574,53 +4011,53 @@ #define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4 #define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS /* enum: OUI. */ -#define MC_CMD_OUI 0x0 +#define MC_CMD_OUI 0x0 /* enum: PMA-PMD Link Up. */ -#define MC_CMD_PMA_PMD_LINK_UP 0x1 +#define MC_CMD_PMA_PMD_LINK_UP 0x1 /* enum: PMA-PMD RX Fault. */ -#define MC_CMD_PMA_PMD_RX_FAULT 0x2 +#define MC_CMD_PMA_PMD_RX_FAULT 0x2 /* enum: PMA-PMD TX Fault. */ -#define MC_CMD_PMA_PMD_TX_FAULT 0x3 +#define MC_CMD_PMA_PMD_TX_FAULT 0x3 /* enum: PMA-PMD Signal */ -#define MC_CMD_PMA_PMD_SIGNAL 0x4 +#define MC_CMD_PMA_PMD_SIGNAL 0x4 /* enum: PMA-PMD SNR A. */ -#define MC_CMD_PMA_PMD_SNR_A 0x5 +#define MC_CMD_PMA_PMD_SNR_A 0x5 /* enum: PMA-PMD SNR B. */ -#define MC_CMD_PMA_PMD_SNR_B 0x6 +#define MC_CMD_PMA_PMD_SNR_B 0x6 /* enum: PMA-PMD SNR C. */ -#define MC_CMD_PMA_PMD_SNR_C 0x7 +#define MC_CMD_PMA_PMD_SNR_C 0x7 /* enum: PMA-PMD SNR D. */ -#define MC_CMD_PMA_PMD_SNR_D 0x8 +#define MC_CMD_PMA_PMD_SNR_D 0x8 /* enum: PCS Link Up. */ -#define MC_CMD_PCS_LINK_UP 0x9 +#define MC_CMD_PCS_LINK_UP 0x9 /* enum: PCS RX Fault. */ -#define MC_CMD_PCS_RX_FAULT 0xa +#define MC_CMD_PCS_RX_FAULT 0xa /* enum: PCS TX Fault. */ -#define MC_CMD_PCS_TX_FAULT 0xb +#define MC_CMD_PCS_TX_FAULT 0xb /* enum: PCS BER. */ -#define MC_CMD_PCS_BER 0xc +#define MC_CMD_PCS_BER 0xc /* enum: PCS Block Errors. */ -#define MC_CMD_PCS_BLOCK_ERRORS 0xd +#define MC_CMD_PCS_BLOCK_ERRORS 0xd /* enum: PhyXS Link Up. */ -#define MC_CMD_PHYXS_LINK_UP 0xe +#define MC_CMD_PHYXS_LINK_UP 0xe /* enum: PhyXS RX Fault. */ -#define MC_CMD_PHYXS_RX_FAULT 0xf +#define MC_CMD_PHYXS_RX_FAULT 0xf /* enum: PhyXS TX Fault. */ -#define MC_CMD_PHYXS_TX_FAULT 0x10 +#define MC_CMD_PHYXS_TX_FAULT 0x10 /* enum: PhyXS Align. */ -#define MC_CMD_PHYXS_ALIGN 0x11 +#define MC_CMD_PHYXS_ALIGN 0x11 /* enum: PhyXS Sync. */ -#define MC_CMD_PHYXS_SYNC 0x12 +#define MC_CMD_PHYXS_SYNC 0x12 /* enum: AN link-up. */ -#define MC_CMD_AN_LINK_UP 0x13 +#define MC_CMD_AN_LINK_UP 0x13 /* enum: AN Complete. */ -#define MC_CMD_AN_COMPLETE 0x14 +#define MC_CMD_AN_COMPLETE 0x14 /* enum: AN 10GBaseT Status. */ -#define MC_CMD_AN_10GBT_STATUS 0x15 +#define MC_CMD_AN_10GBT_STATUS 0x15 /* enum: Clause 22 Link-Up. */ -#define MC_CMD_CL22_LINK_UP 0x16 +#define MC_CMD_CL22_LINK_UP 0x16 /* enum: (Last entry) */ -#define MC_CMD_PHY_NSTATS 0x17 +#define MC_CMD_PHY_NSTATS 0x17 /***********************************/ @@ -5647,6 +4084,7 @@ #define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0 #define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4 #define MC_CMD_MAC_STATS_IN_CMD_OFST 8 +#define MC_CMD_MAC_STATS_IN_CMD_LEN 4 #define MC_CMD_MAC_STATS_IN_DMA_LBN 0 #define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1 #define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1 @@ -5661,9 +4099,16 @@ #define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1 #define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16 #define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16 +/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as + * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not + * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to + * MC_CMD_MAC_NSTATS * sizeof(uint64_t) + */ #define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12 +#define MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4 /* port id so vadapter stats can be provided */ #define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16 +#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4 /* MC_CMD_MAC_STATS_OUT_DMA msgresponse */ #define MC_CMD_MAC_STATS_OUT_DMA_LEN 0 @@ -5675,141 +4120,289 @@ #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0 #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4 #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS -#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */ -#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */ -#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */ -#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */ -#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */ -#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */ -#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */ -#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */ -#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */ -#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */ -#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */ -#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */ -#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */ -#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */ -#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */ -#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */ -#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */ -#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */ -#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */ -#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */ -#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */ -#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */ -#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */ -#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */ -#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */ -#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */ -#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */ -#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */ -#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */ -#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */ -#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */ -#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */ -#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */ -#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */ -#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */ -#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */ -#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */ -#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */ -#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */ -#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */ -#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */ -#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */ -#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */ -#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */ -#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */ -#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */ -#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */ -#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */ -#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */ -#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */ -#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */ -#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */ -#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */ -#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */ -#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */ -#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */ -#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */ -#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */ -#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */ -#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */ -#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */ +#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */ +#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */ +#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */ +#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */ +#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */ +#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */ +#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */ +#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */ +#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */ +#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */ +#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */ +#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */ +#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */ +#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */ +#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */ +#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */ +#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */ +#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */ +#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */ +#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */ +#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */ +#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */ +#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */ +#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */ +#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */ +#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */ +#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */ +#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */ +#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */ +#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */ +#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */ +#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */ +#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */ +#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */ +#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */ +#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */ +#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */ +#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */ +#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */ +#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */ +#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */ +#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */ +#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */ +#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */ +#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */ +#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */ +#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */ +#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */ +#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */ +#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */ +#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */ +#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */ +#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */ +#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */ +#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */ +#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */ +#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */ +#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */ +#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */ +#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */ +#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */ /* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS * capability only. */ -#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c +#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c /* enum: PM discard_bb_overflow counter. Valid for EF10 with * PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d +#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d /* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS * capability only. */ -#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e +#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e /* enum: PM discard_vfifo_full counter. Valid for EF10 with * PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f +#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f /* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS * capability only. */ -#define MC_CMD_MAC_PM_TRUNC_QBB 0x40 +#define MC_CMD_MAC_PM_TRUNC_QBB 0x40 /* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS * capability only. */ -#define MC_CMD_MAC_PM_DISCARD_QBB 0x41 +#define MC_CMD_MAC_PM_DISCARD_QBB 0x41 /* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS * capability only. */ -#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42 +#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42 /* enum: RXDP counter: Number of packets dropped due to the queue being * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43 +#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43 /* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10 * with PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45 +#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45 /* enum: RXDP counter: Number of non-host packets. Valid for EF10 with * PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46 +#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46 /* enum: RXDP counter: Number of times an hlb descriptor fetch was performed. * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47 +#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47 /* enum: RXDP counter: Number of times the DPCPU waited for an existing * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48 -#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */ +#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48 +#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */ /* enum: Start of GMAC stats buffer space, for Siena only. */ -#define MC_CMD_GMAC_DMABUF_START 0x40 +#define MC_CMD_GMAC_DMABUF_START 0x40 /* enum: End of GMAC stats buffer space, for Siena only. */ -#define MC_CMD_GMAC_DMABUF_END 0x5f -#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */ -#define MC_CMD_MAC_NSTATS 0x61 /* enum */ +#define MC_CMD_GMAC_DMABUF_END 0x5f +/* enum: GENERATION_END value, used together with GENERATION_START to verify + * consistency of DMAd data. For legacy firmware / drivers without extended + * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS * + * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise, + * this value is invalid/ reserved and GENERATION_END is written as the last + * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that + * this is consistent with the legacy behaviour, in the sense that entry 96 is + * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS * + * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details. + */ +#define MC_CMD_MAC_GENERATION_END 0x60 +#define MC_CMD_MAC_NSTATS 0x61 /* enum */ + +/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3) +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2 +/* enum: Start of FEC stats buffer space, Medford2 and up */ +#define MC_CMD_MAC_FEC_DMABUF_START 0x61 +/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2) + */ +#define MC_CMD_MAC_FEC_UNCORRECTED_ERRORS 0x61 +/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2) + */ +#define MC_CMD_MAC_FEC_CORRECTED_ERRORS 0x62 +/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0 0x63 +/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1 0x64 +/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2 0x65 +/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3 0x66 +/* enum: This includes the space at offset 103 which is the final + * GENERATION_END in a MAC_STATS_V2 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V2 0x68 +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */ + +/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3) +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3 +/* enum: Start of CTPIO stats buffer space, Medford2 and up */ +#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68 +/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the + * target VI + */ +#define MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK 0x68 +/* enum: Number of times a CTPIO send wrote beyond frame end (informational + * only) + */ +#define MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS 0x69 +/* enum: Number of CTPIO failures because the TX doorbell was written before + * the end of the frame data + */ +#define MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL 0x6a +/* enum: Number of CTPIO failures because the internal FIFO overflowed */ +#define MC_CMD_MAC_CTPIO_OVERFLOW_FAIL 0x6b +/* enum: Number of CTPIO failures because the host did not deliver data fast + * enough to avoid MAC underflow + */ +#define MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL 0x6c +/* enum: Number of CTPIO failures because the host did not deliver all the + * frame data within the timeout + */ +#define MC_CMD_MAC_CTPIO_TIMEOUT_FAIL 0x6d +/* enum: Number of CTPIO failures because the frame data arrived out of order + * or with gaps + */ +#define MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL 0x6e +/* enum: Number of CTPIO failures because the host started a new frame before + * completing the previous one + */ +#define MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL 0x6f +/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits + * or not 32-bit aligned + */ +#define MC_CMD_MAC_CTPIO_INVALID_WR_FAIL 0x70 +/* enum: Number of CTPIO fallbacks because another VI on the same port was + * sending a CTPIO frame + */ +#define MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK 0x71 +/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled + */ +#define MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK 0x72 +/* enum: Number of CTPIO fallbacks because length in header was less than 29 + * bytes + */ +#define MC_CMD_MAC_CTPIO_RUNT_FALLBACK 0x73 +/* enum: Total number of successful CTPIO sends on this port */ +#define MC_CMD_MAC_CTPIO_SUCCESS 0x74 +/* enum: Total number of CTPIO fallbacks on this port */ +#define MC_CMD_MAC_CTPIO_FALLBACK 0x75 +/* enum: Total number of CTPIO poisoned frames on this port, whether erased or + * not + */ +#define MC_CMD_MAC_CTPIO_POISON 0x76 +/* enum: Total number of CTPIO erased frames on this port */ +#define MC_CMD_MAC_CTPIO_ERASE 0x77 +/* enum: This includes the space at offset 120 which is the final + * GENERATION_END in a MAC_STATS_V3 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V3 0x79 +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */ + +/* MC_CMD_MAC_STATS_V4_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V4_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V4*64))>>3) +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4 +/* enum: Start of V4 stats buffer space */ +#define MC_CMD_MAC_V4_DMABUF_START 0x79 +/* enum: RXDP counter: Number of packets truncated because scattering was + * disabled. + */ +#define MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC 0x79 +/* enum: RXDP counter: Number of times the RXDP head of line blocked waiting + * for descriptors. Will be zero unless RXDP_HLB_IDLE capability is set. + */ +#define MC_CMD_MAC_RXDP_HLB_IDLE 0x7a +/* enum: RXDP counter: Number of times the RXDP timed out while head of line + * blocking. Will be zero unless RXDP_HLB_IDLE capability is set. + */ +#define MC_CMD_MAC_RXDP_HLB_TIMEOUT 0x7b +/* enum: This includes the space at offset 124 which is the final + * GENERATION_END in a MAC_STATS_V4 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V4 0x7d +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */ /***********************************/ @@ -5821,21 +4414,28 @@ /* MC_CMD_SRIOV_IN msgrequest */ #define MC_CMD_SRIOV_IN_LEN 12 #define MC_CMD_SRIOV_IN_ENABLE_OFST 0 +#define MC_CMD_SRIOV_IN_ENABLE_LEN 4 #define MC_CMD_SRIOV_IN_VI_BASE_OFST 4 +#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4 #define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8 +#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4 /* MC_CMD_SRIOV_OUT msgresponse */ #define MC_CMD_SRIOV_OUT_LEN 8 #define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0 +#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4 #define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4 +#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4 /* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */ #define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32 /* this is only used for the first record */ #define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8 @@ -5845,6 +4445,7 @@ #define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */ #define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32 @@ -5855,6 +4456,7 @@ #define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32 @@ -5907,24 +4509,26 @@ /* MC_CMD_WOL_FILTER_SET_IN msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_LEN 192 #define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 -#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */ +#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 +#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */ #define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */ /* A type value of 1 is unused. */ #define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 +#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 /* enum: Magic */ -#define MC_CMD_WOL_TYPE_MAGIC 0x0 +#define MC_CMD_WOL_TYPE_MAGIC 0x0 /* enum: MS Windows Magic */ #define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 /* enum: IPv4 Syn */ -#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 +#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 /* enum: IPv6 Syn */ -#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 +#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 /* enum: Bitmap */ -#define MC_CMD_WOL_TYPE_BITMAP 0x5 +#define MC_CMD_WOL_TYPE_BITMAP 0x5 /* enum: Link */ -#define MC_CMD_WOL_TYPE_LINK 0x6 +#define MC_CMD_WOL_TYPE_LINK 0x6 /* enum: (Above this for future use) */ -#define MC_CMD_WOL_TYPE_MAX 0x7 +#define MC_CMD_WOL_TYPE_MAX 0x7 #define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8 #define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4 #define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46 @@ -5932,7 +4536,9 @@ /* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16 /* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ /* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ #define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8 #define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8 #define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8 @@ -5941,9 +4547,13 @@ /* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20 /* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ /* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4 #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4 #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16 #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2 #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18 @@ -5952,7 +4562,9 @@ /* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44 /* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ /* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8 #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16 #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24 @@ -5965,7 +4577,9 @@ /* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187 /* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ /* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8 #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48 #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56 @@ -5980,8 +4594,11 @@ /* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12 /* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ /* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ #define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4 #define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0 #define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1 #define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1 @@ -5990,6 +4607,7 @@ /* MC_CMD_WOL_FILTER_SET_OUT msgresponse */ #define MC_CMD_WOL_FILTER_SET_OUT_LEN 4 #define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4 /***********************************/ @@ -6004,6 +4622,7 @@ /* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */ #define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4 #define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4 /* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */ #define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0 @@ -6022,6 +4641,7 @@ /* MC_CMD_WOL_FILTER_RESET_IN msgrequest */ #define MC_CMD_WOL_FILTER_RESET_IN_LEN 4 #define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0 +#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4 #define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */ #define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */ @@ -6063,6 +4683,7 @@ #define MC_CMD_NVRAM_TYPES_OUT_LEN 4 /* Bit mask of supported types. */ #define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0 +#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4 /* enum: Disabled callisto. */ #define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 /* enum: MC firmware. */ @@ -6120,21 +4741,28 @@ /* MC_CMD_NVRAM_INFO_IN msgrequest */ #define MC_CMD_NVRAM_INFO_IN_LEN 4 #define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ /* MC_CMD_NVRAM_INFO_OUT msgresponse */ #define MC_CMD_NVRAM_INFO_OUT_LEN 24 #define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4 +#define MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4 #define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8 +#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4 #define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4 #define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0 #define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1 #define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5 #define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6 @@ -6142,36 +4770,51 @@ #define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7 #define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 +#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4 #define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20 +#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4 /* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */ #define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28 #define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4 #define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8 +#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4 #define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4 #define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0 #define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1 #define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1 #define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1 #define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5 #define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1 #define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7 #define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1 #define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4 #define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4 /* Writes must be multiples of this size. Added to support the MUM on Sorrento. */ #define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24 +#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4 /***********************************/ /* MC_CMD_NVRAM_UPDATE_START * Start a group of update operations on a virtual NVRAM partition. Locks * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if - * PHY_LOCK required and not held). + * PHY_LOCK required and not held). In an adapter bound to a TSA controller, + * MC_CMD_NVRAM_UPDATE_START can only be used on a subset of partition types + * i.e. static config, dynamic config and expansion ROM config. Attempting to + * perform this operation on a restricted partition will return the error + * EPERM. */ #define MC_CMD_NVRAM_UPDATE_START 0x38 #undef MC_CMD_0x38_PRIVILEGE_CTG @@ -6183,6 +4826,7 @@ */ #define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4 #define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ @@ -6193,9 +4837,11 @@ */ #define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8 #define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4 #define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0 #define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1 @@ -6217,20 +4863,26 @@ /* MC_CMD_NVRAM_READ_IN msgrequest */ #define MC_CMD_NVRAM_READ_IN_LEN 12 #define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_READ_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4 /* amount to read in bytes */ #define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4 /* MC_CMD_NVRAM_READ_IN_V2 msgrequest */ #define MC_CMD_NVRAM_READ_IN_V2_LEN 16 #define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0 +#define MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4 +#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4 /* amount to read in bytes */ #define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8 +#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4 /* Optional control info. If a partition is stored with an A/B versioning * scheme (i.e. in more than one physical partition in NVRAM) the host can set * this to control which underlying physical partition is used to read data @@ -6240,6 +4892,7 @@ * verifying by reading with MODE=TARGET_BACKUP. */ #define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12 +#define MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4 /* enum: Same as omitting MODE: caller sees data in current partition unless it * holds the write lock in which case it sees data in the partition it is * updating. @@ -6280,10 +4933,13 @@ #define MC_CMD_NVRAM_WRITE_IN_LENMAX 252 #define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num)) #define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4 #define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4 #define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12 #define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1 #define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1 @@ -6307,10 +4963,13 @@ /* MC_CMD_NVRAM_ERASE_IN msgrequest */ #define MC_CMD_NVRAM_ERASE_IN_LEN 12 #define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4 #define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4 /* MC_CMD_NVRAM_ERASE_OUT msgresponse */ #define MC_CMD_NVRAM_ERASE_OUT_LEN 0 @@ -6319,8 +4978,12 @@ /***********************************/ /* MC_CMD_NVRAM_UPDATE_FINISH * Finish a group of update operations on a virtual NVRAM partition. Locks - * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad - * type/offset/length), EACCES (if PHY_LOCK required and not held) + * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type/offset/ + * length), EACCES (if PHY_LOCK required and not held). In an adapter bound to + * a TSA controller, MC_CMD_NVRAM_UPDATE_FINISH can only be used on a subset of + * partition types i.e. static config, dynamic config and expansion ROM config. + * Attempting to perform this operation on a restricted partition will return + * the error EPERM. */ #define MC_CMD_NVRAM_UPDATE_FINISH 0x3c #undef MC_CMD_0x3c_PRIVILEGE_CTG @@ -6332,9 +4995,11 @@ */ #define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8 #define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4 /* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH * request with additional flags indicating version of NVRAM_UPDATE commands in @@ -6343,10 +5008,13 @@ */ #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12 #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4 #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4 #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0 #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1 @@ -6373,6 +5041,7 @@ #define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4 /* Result of nvram update completion processing */ #define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4 /* enum: Invalid return code; only non-zero values are defined. Defined as * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT. */ @@ -6407,6 +5076,8 @@ * only production signed images. */ #define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc +/* enum: The image has a lower security level than the current firmware. */ +#define MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd /***********************************/ @@ -6430,11 +5101,12 @@ #define MC_CMD_REBOOT 0x3d #undef MC_CMD_0x3d_PRIVILEGE_CTG -#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_REBOOT_IN msgrequest */ #define MC_CMD_REBOOT_IN_LEN 4 #define MC_CMD_REBOOT_IN_FLAGS_OFST 0 +#define MC_CMD_REBOOT_IN_FLAGS_LEN 4 #define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */ /* MC_CMD_REBOOT_OUT msgresponse */ @@ -6473,11 +5145,12 @@ #define MC_CMD_REBOOT_MODE 0x3f #undef MC_CMD_0x3f_PRIVILEGE_CTG -#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_REBOOT_MODE_IN msgrequest */ #define MC_CMD_REBOOT_MODE_IN_LEN 4 #define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0 +#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4 /* enum: Normal. */ #define MC_CMD_REBOOT_MODE_NORMAL 0x0 /* enum: Power-on Reset. */ @@ -6492,6 +5165,7 @@ /* MC_CMD_REBOOT_MODE_OUT msgresponse */ #define MC_CMD_REBOOT_MODE_OUT_LEN 4 #define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0 +#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4 /***********************************/ @@ -6528,7 +5202,7 @@ #define MC_CMD_SENSOR_INFO 0x41 #undef MC_CMD_0x41_PRIVILEGE_CTG -#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_SENSOR_INFO_IN msgrequest */ #define MC_CMD_SENSOR_INFO_IN_LEN 0 @@ -6542,174 +5216,190 @@ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc. */ #define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4 /* MC_CMD_SENSOR_INFO_OUT msgresponse */ #define MC_CMD_SENSOR_INFO_OUT_LENMIN 4 #define MC_CMD_SENSOR_INFO_OUT_LENMAX 252 #define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num)) #define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0 +#define MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4 /* enum: Controller temperature: degC */ -#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 +#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 /* enum: Phy common temperature: degC */ -#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 +#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 /* enum: Controller cooling: bool */ -#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 +#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 /* enum: Phy 0 temperature: degC */ -#define MC_CMD_SENSOR_PHY0_TEMP 0x3 +#define MC_CMD_SENSOR_PHY0_TEMP 0x3 /* enum: Phy 0 cooling: bool */ -#define MC_CMD_SENSOR_PHY0_COOLING 0x4 +#define MC_CMD_SENSOR_PHY0_COOLING 0x4 /* enum: Phy 1 temperature: degC */ -#define MC_CMD_SENSOR_PHY1_TEMP 0x5 +#define MC_CMD_SENSOR_PHY1_TEMP 0x5 /* enum: Phy 1 cooling: bool */ -#define MC_CMD_SENSOR_PHY1_COOLING 0x6 +#define MC_CMD_SENSOR_PHY1_COOLING 0x6 /* enum: 1.0v power: mV */ -#define MC_CMD_SENSOR_IN_1V0 0x7 +#define MC_CMD_SENSOR_IN_1V0 0x7 /* enum: 1.2v power: mV */ -#define MC_CMD_SENSOR_IN_1V2 0x8 +#define MC_CMD_SENSOR_IN_1V2 0x8 /* enum: 1.8v power: mV */ -#define MC_CMD_SENSOR_IN_1V8 0x9 +#define MC_CMD_SENSOR_IN_1V8 0x9 /* enum: 2.5v power: mV */ -#define MC_CMD_SENSOR_IN_2V5 0xa +#define MC_CMD_SENSOR_IN_2V5 0xa /* enum: 3.3v power: mV */ -#define MC_CMD_SENSOR_IN_3V3 0xb +#define MC_CMD_SENSOR_IN_3V3 0xb /* enum: 12v power: mV */ -#define MC_CMD_SENSOR_IN_12V0 0xc +#define MC_CMD_SENSOR_IN_12V0 0xc /* enum: 1.2v analogue power: mV */ -#define MC_CMD_SENSOR_IN_1V2A 0xd +#define MC_CMD_SENSOR_IN_1V2A 0xd /* enum: reference voltage: mV */ -#define MC_CMD_SENSOR_IN_VREF 0xe +#define MC_CMD_SENSOR_IN_VREF 0xe /* enum: AOE FPGA power: mV */ -#define MC_CMD_SENSOR_OUT_VAOE 0xf +#define MC_CMD_SENSOR_OUT_VAOE 0xf /* enum: AOE FPGA temperature: degC */ -#define MC_CMD_SENSOR_AOE_TEMP 0x10 +#define MC_CMD_SENSOR_AOE_TEMP 0x10 /* enum: AOE FPGA PSU temperature: degC */ -#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11 +#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11 /* enum: AOE PSU temperature: degC */ -#define MC_CMD_SENSOR_PSU_TEMP 0x12 +#define MC_CMD_SENSOR_PSU_TEMP 0x12 /* enum: Fan 0 speed: RPM */ -#define MC_CMD_SENSOR_FAN_0 0x13 +#define MC_CMD_SENSOR_FAN_0 0x13 /* enum: Fan 1 speed: RPM */ -#define MC_CMD_SENSOR_FAN_1 0x14 +#define MC_CMD_SENSOR_FAN_1 0x14 /* enum: Fan 2 speed: RPM */ -#define MC_CMD_SENSOR_FAN_2 0x15 +#define MC_CMD_SENSOR_FAN_2 0x15 /* enum: Fan 3 speed: RPM */ -#define MC_CMD_SENSOR_FAN_3 0x16 +#define MC_CMD_SENSOR_FAN_3 0x16 /* enum: Fan 4 speed: RPM */ -#define MC_CMD_SENSOR_FAN_4 0x17 +#define MC_CMD_SENSOR_FAN_4 0x17 /* enum: AOE FPGA input power: mV */ -#define MC_CMD_SENSOR_IN_VAOE 0x18 +#define MC_CMD_SENSOR_IN_VAOE 0x18 /* enum: AOE FPGA current: mA */ -#define MC_CMD_SENSOR_OUT_IAOE 0x19 +#define MC_CMD_SENSOR_OUT_IAOE 0x19 /* enum: AOE FPGA input current: mA */ -#define MC_CMD_SENSOR_IN_IAOE 0x1a +#define MC_CMD_SENSOR_IN_IAOE 0x1a /* enum: NIC power consumption: W */ -#define MC_CMD_SENSOR_NIC_POWER 0x1b +#define MC_CMD_SENSOR_NIC_POWER 0x1b /* enum: 0.9v power voltage: mV */ -#define MC_CMD_SENSOR_IN_0V9 0x1c +#define MC_CMD_SENSOR_IN_0V9 0x1c /* enum: 0.9v power current: mA */ -#define MC_CMD_SENSOR_IN_I0V9 0x1d +#define MC_CMD_SENSOR_IN_I0V9 0x1d /* enum: 1.2v power current: mA */ -#define MC_CMD_SENSOR_IN_I1V2 0x1e +#define MC_CMD_SENSOR_IN_I1V2 0x1e /* enum: Not a sensor: reserved for the next page flag */ -#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f +#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f /* enum: 0.9v power voltage (at ADC): mV */ -#define MC_CMD_SENSOR_IN_0V9_ADC 0x20 +#define MC_CMD_SENSOR_IN_0V9_ADC 0x20 /* enum: Controller temperature 2: degC */ -#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21 +#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21 /* enum: Voltage regulator internal temperature: degC */ -#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22 +#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22 /* enum: 0.9V voltage regulator temperature: degC */ -#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23 +#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23 /* enum: 1.2V voltage regulator temperature: degC */ -#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24 +#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24 /* enum: controller internal temperature sensor voltage (internal ADC): mV */ -#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25 +#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25 /* enum: controller internal temperature (internal ADC): degC */ -#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26 +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26 /* enum: controller internal temperature sensor voltage (external ADC): mV */ -#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27 +#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27 /* enum: controller internal temperature (external ADC): degC */ -#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28 +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28 /* enum: ambient temperature: degC */ -#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29 +#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29 /* enum: air flow: bool */ -#define MC_CMD_SENSOR_AIRFLOW 0x2a +#define MC_CMD_SENSOR_AIRFLOW 0x2a /* enum: voltage between VSS08D and VSS08D at CSR: mV */ -#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b /* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */ -#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c /* enum: Hotpoint temperature: degC */ -#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d +#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d /* enum: Port 0 PHY power switch over-current: bool */ -#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e +#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e /* enum: Port 1 PHY power switch over-current: bool */ -#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f -/* enum: Mop-up microcontroller reference voltage (millivolts) */ -#define MC_CMD_SENSOR_MUM_VCC 0x30 +#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f +/* enum: Mop-up microcontroller reference voltage: mV */ +#define MC_CMD_SENSOR_MUM_VCC 0x30 /* enum: 0.9v power phase A voltage: mV */ -#define MC_CMD_SENSOR_IN_0V9_A 0x31 +#define MC_CMD_SENSOR_IN_0V9_A 0x31 /* enum: 0.9v power phase A current: mA */ -#define MC_CMD_SENSOR_IN_I0V9_A 0x32 +#define MC_CMD_SENSOR_IN_I0V9_A 0x32 /* enum: 0.9V voltage regulator phase A temperature: degC */ -#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33 +#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33 /* enum: 0.9v power phase B voltage: mV */ -#define MC_CMD_SENSOR_IN_0V9_B 0x34 +#define MC_CMD_SENSOR_IN_0V9_B 0x34 /* enum: 0.9v power phase B current: mA */ -#define MC_CMD_SENSOR_IN_I0V9_B 0x35 +#define MC_CMD_SENSOR_IN_I0V9_B 0x35 /* enum: 0.9V voltage regulator phase B temperature: degC */ -#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36 +#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36 /* enum: CCOM AVREG 1v2 supply (interval ADC): mV */ -#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37 +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37 /* enum: CCOM AVREG 1v2 supply (external ADC): mV */ -#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38 +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38 /* enum: CCOM AVREG 1v8 supply (interval ADC): mV */ -#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39 +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39 /* enum: CCOM AVREG 1v8 supply (external ADC): mV */ -#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a /* enum: CCOM RTS temperature: degC */ -#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b +#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b /* enum: Not a sensor: reserved for the next page flag */ -#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f +#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f /* enum: controller internal temperature sensor voltage on master core * (internal ADC): mV */ -#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40 +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40 /* enum: controller internal temperature on master core (internal ADC): degC */ -#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41 +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41 /* enum: controller internal temperature sensor voltage on master core * (external ADC): mV */ -#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42 +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42 /* enum: controller internal temperature on master core (external ADC): degC */ -#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43 +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43 /* enum: controller internal temperature on slave core sensor voltage (internal * ADC): mV */ -#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44 +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44 /* enum: controller internal temperature on slave core (internal ADC): degC */ -#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45 +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45 /* enum: controller internal temperature on slave core sensor voltage (external * ADC): mV */ -#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46 +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46 /* enum: controller internal temperature on slave core (external ADC): degC */ -#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47 +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47 /* enum: Voltage supplied to the SODIMMs from their power supply: mV */ -#define MC_CMD_SENSOR_SODIMM_VOUT 0x49 +#define MC_CMD_SENSOR_SODIMM_VOUT 0x49 /* enum: Temperature of SODIMM 0 (if installed): degC */ -#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a +#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a /* enum: Temperature of SODIMM 1 (if installed): degC */ -#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b +#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b /* enum: Voltage supplied to the QSFP #0 from their power supply: mV */ -#define MC_CMD_SENSOR_PHY0_VCC 0x4c +#define MC_CMD_SENSOR_PHY0_VCC 0x4c /* enum: Voltage supplied to the QSFP #1 from their power supply: mV */ -#define MC_CMD_SENSOR_PHY1_VCC 0x4d +#define MC_CMD_SENSOR_PHY1_VCC 0x4d /* enum: Controller die temperature (TDIODE): degC */ -#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e +#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e /* enum: Board temperature (front): degC */ -#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f +#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f /* enum: Board temperature (back): degC */ -#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50 +#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50 +/* enum: 1.8v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V8 0x51 +/* enum: 2.5v power current: mA */ +#define MC_CMD_SENSOR_IN_I2V5 0x52 +/* enum: 3.3v power current: mA */ +#define MC_CMD_SENSOR_IN_I3V3 0x53 +/* enum: 12v power current: mA */ +#define MC_CMD_SENSOR_IN_I12V0 0x54 +/* enum: 1.3v power: mV */ +#define MC_CMD_SENSOR_IN_1V3 0x55 +/* enum: 1.3v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V3 0x56 +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE2_NEXT 0x5f /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ #define MC_CMD_SENSOR_ENTRY_OFST 4 #define MC_CMD_SENSOR_ENTRY_LEN 8 @@ -6723,6 +5413,7 @@ #define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252 #define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num)) #define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_SENSOR_INFO_OUT */ #define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31 @@ -6775,7 +5466,7 @@ #define MC_CMD_READ_SENSORS 0x42 #undef MC_CMD_0x42_PRIVILEGE_CTG -#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_READ_SENSORS_IN msgrequest */ #define MC_CMD_READ_SENSORS_IN_LEN 8 @@ -6794,6 +5485,7 @@ #define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4 /* Size in bytes of host buffer. */ #define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8 +#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4 /* MC_CMD_READ_SENSORS_OUT msgresponse */ #define MC_CMD_READ_SENSORS_OUT_LEN 0 @@ -6810,17 +5502,17 @@ #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1 /* enum: Ok. */ -#define MC_CMD_SENSOR_STATE_OK 0x0 +#define MC_CMD_SENSOR_STATE_OK 0x0 /* enum: Breached warning threshold. */ -#define MC_CMD_SENSOR_STATE_WARNING 0x1 +#define MC_CMD_SENSOR_STATE_WARNING 0x1 /* enum: Breached fatal threshold. */ -#define MC_CMD_SENSOR_STATE_FATAL 0x2 +#define MC_CMD_SENSOR_STATE_FATAL 0x2 /* enum: Fault with sensor. */ -#define MC_CMD_SENSOR_STATE_BROKEN 0x3 +#define MC_CMD_SENSOR_STATE_BROKEN 0x3 /* enum: Sensor is working but does not currently have a reading. */ -#define MC_CMD_SENSOR_STATE_NO_READING 0x4 +#define MC_CMD_SENSOR_STATE_NO_READING 0x4 /* enum: Sensor initialisation failed. */ -#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5 +#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3 @@ -6848,6 +5540,7 @@ /* MC_CMD_GET_PHY_STATE_OUT msgresponse */ #define MC_CMD_GET_PHY_STATE_OUT_LEN 4 #define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4 /* enum: Ok. */ #define MC_CMD_PHY_STATE_OK 0x1 /* enum: Faulty. */ @@ -6885,6 +5578,7 @@ /* MC_CMD_WOL_FILTER_GET_OUT msgresponse */ #define MC_CMD_WOL_FILTER_GET_OUT_LEN 4 #define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4 /***********************************/ @@ -6902,8 +5596,9 @@ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num)) #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 #define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */ -#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */ +#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1 @@ -6912,13 +5607,16 @@ /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10 @@ -6929,6 +5627,7 @@ /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4 /***********************************/ @@ -6944,7 +5643,9 @@ /* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */ #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8 #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4 /* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */ #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0 @@ -6972,7 +5673,7 @@ #define MC_CMD_TESTASSERT 0x49 #undef MC_CMD_0x49_PRIVILEGE_CTG -#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_TESTASSERT_IN msgrequest */ #define MC_CMD_TESTASSERT_IN_LEN 0 @@ -6984,20 +5685,21 @@ #define MC_CMD_TESTASSERT_V2_IN_LEN 4 /* How to provoke the assertion */ #define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0 +#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4 /* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless * you're testing firmware, this is what you want. */ -#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0 +#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0 /* enum: Assert using assert(0); */ -#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1 +#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1 /* enum: Deliberately trigger a watchdog */ -#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2 +#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2 /* enum: Deliberately trigger a trap by loading from an invalid address */ -#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3 +#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3 /* enum: Deliberately trigger a trap by storing to an invalid address */ -#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4 +#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4 /* enum: Jump to an invalid address */ -#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5 +#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5 /* MC_CMD_TESTASSERT_V2_OUT msgresponse */ #define MC_CMD_TESTASSERT_V2_OUT_LEN 0 @@ -7020,6 +5722,7 @@ #define MC_CMD_WORKAROUND_IN_LEN 8 /* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */ #define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 +#define MC_CMD_WORKAROUND_IN_TYPE_LEN 4 /* enum: Bug 17230 work around. */ #define MC_CMD_WORKAROUND_BUG17230 0x1 /* enum: Bug 35388 work around (unsafe EVQ writes). */ @@ -7048,6 +5751,7 @@ * the workaround */ #define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 +#define MC_CMD_WORKAROUND_IN_ENABLED_LEN 4 /* MC_CMD_WORKAROUND_OUT msgresponse */ #define MC_CMD_WORKAROUND_OUT_LEN 0 @@ -7057,6 +5761,7 @@ */ #define MC_CMD_WORKAROUND_EXT_OUT_LEN 4 #define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4 #define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0 #define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1 @@ -7078,6 +5783,7 @@ /* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */ #define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4 #define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4 /* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */ #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5 @@ -7085,6 +5791,7 @@ #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num)) /* in bytes */ #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1 @@ -7099,17 +5806,19 @@ #define MC_CMD_NVRAM_TEST 0x4c #undef MC_CMD_0x4c_PRIVILEGE_CTG -#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_NVRAM_TEST_IN msgrequest */ #define MC_CMD_NVRAM_TEST_IN_LEN 4 #define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ /* MC_CMD_NVRAM_TEST_OUT msgresponse */ #define MC_CMD_NVRAM_TEST_OUT_LEN 4 #define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0 +#define MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4 /* enum: Passed. */ #define MC_CMD_NVRAM_TEST_PASS 0x0 /* enum: Failed. */ @@ -7130,12 +5839,16 @@ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16 /* 0-6 low->high de-emph. */ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4 /* 0-8 low->high ref.V */ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4 /* 0-8 0-8 low->high boost */ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4 /* 0-8 low->high ref.V */ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4 /* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */ #define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0 @@ -7144,10 +5857,13 @@ #define MC_CMD_MRSFP_TWEAK_OUT_LEN 12 /* input bits */ #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4 /* output bits */ #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4 /* direction */ #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4 /* enum: Out. */ #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 /* enum: In. */ @@ -7163,21 +5879,26 @@ #define MC_CMD_SENSOR_SET_LIMS 0x4e #undef MC_CMD_0x4e_PRIVILEGE_CTG -#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */ #define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20 #define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0 +#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ /* interpretation is is sensor-specific. */ #define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4 +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4 /* interpretation is is sensor-specific. */ #define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8 +#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4 /* interpretation is is sensor-specific. */ #define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12 +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4 /* interpretation is is sensor-specific. */ #define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16 +#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4 /* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */ #define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0 @@ -7194,9 +5915,13 @@ /* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */ #define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16 #define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4 #define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4 #define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4 #define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4 /***********************************/ @@ -7218,6 +5943,7 @@ #define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num)) /* total number of partitions */ #define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0 +#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4 /* type ID code for each of NUM_PARTITIONS partitions */ #define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4 #define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4 @@ -7239,6 +5965,7 @@ #define MC_CMD_NVRAM_METADATA_IN_LEN 4 /* Partition type ID code */ #define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4 /* MC_CMD_NVRAM_METADATA_OUT msgresponse */ #define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20 @@ -7246,7 +5973,9 @@ #define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num)) /* Partition type ID code */ #define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4 #define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4 +#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4 #define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0 #define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1 #define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1 @@ -7255,6 +5984,7 @@ #define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1 /* Subtype ID code for content of this partition */ #define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4 /* 1st component of W.X.Y.Z version number for content of this partition */ #define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12 #define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2 @@ -7296,8 +6026,10 @@ #define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2 /* Number of allocated MAC addresses */ #define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4 /* Spacing of allocated MAC addresses */ #define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4 /***********************************/ @@ -7307,12 +6039,13 @@ #define MC_CMD_CLP 0x56 #undef MC_CMD_0x56_PRIVILEGE_CTG -#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_CLP_IN msgrequest */ #define MC_CMD_CLP_IN_LEN 4 /* Sub operation */ #define MC_CMD_CLP_IN_OP_OFST 0 +#define MC_CMD_CLP_IN_OP_LEN 4 /* enum: Return to factory default settings */ #define MC_CMD_CLP_OP_DEFAULT 0x1 /* enum: Set MAC address */ @@ -7330,6 +6063,7 @@ /* MC_CMD_CLP_IN_DEFAULT msgrequest */ #define MC_CMD_CLP_IN_DEFAULT_LEN 4 /* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ /* MC_CMD_CLP_OUT_DEFAULT msgresponse */ #define MC_CMD_CLP_OUT_DEFAULT_LEN 0 @@ -7337,6 +6071,7 @@ /* MC_CMD_CLP_IN_SET_MAC msgrequest */ #define MC_CMD_CLP_IN_SET_MAC_LEN 12 /* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ /* MAC address assigned to port */ #define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4 #define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6 @@ -7350,6 +6085,7 @@ /* MC_CMD_CLP_IN_GET_MAC msgrequest */ #define MC_CMD_CLP_IN_GET_MAC_LEN 4 /* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ /* MC_CMD_CLP_OUT_GET_MAC msgresponse */ #define MC_CMD_CLP_OUT_GET_MAC_LEN 8 @@ -7363,6 +6099,7 @@ /* MC_CMD_CLP_IN_SET_BOOT msgrequest */ #define MC_CMD_CLP_IN_SET_BOOT_LEN 5 /* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ /* Boot flag */ #define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4 #define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1 @@ -7373,6 +6110,7 @@ /* MC_CMD_CLP_IN_GET_BOOT msgrequest */ #define MC_CMD_CLP_IN_GET_BOOT_LEN 4 /* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ /* MC_CMD_CLP_OUT_GET_BOOT msgresponse */ #define MC_CMD_CLP_OUT_GET_BOOT_LEN 4 @@ -7391,11 +6129,12 @@ #define MC_CMD_MUM 0x57 #undef MC_CMD_0x57_PRIVILEGE_CTG -#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_MUM_IN msgrequest */ #define MC_CMD_MUM_IN_LEN 4 #define MC_CMD_MUM_IN_OP_HDR_OFST 0 +#define MC_CMD_MUM_IN_OP_HDR_LEN 4 #define MC_CMD_MUM_IN_OP_LBN 0 #define MC_CMD_MUM_IN_OP_WIDTH 8 /* enum: NULL MCDI command to MUM */ @@ -7435,26 +6174,32 @@ #define MC_CMD_MUM_IN_NULL_LEN 4 /* MUM cmd header */ #define MC_CMD_MUM_IN_CMD_OFST 0 +#define MC_CMD_MUM_IN_CMD_LEN 4 /* MC_CMD_MUM_IN_GET_VERSION msgrequest */ #define MC_CMD_MUM_IN_GET_VERSION_LEN 4 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* MC_CMD_MUM_IN_READ msgrequest */ #define MC_CMD_MUM_IN_READ_LEN 16 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* ID of (device connected to MUM) to read from registers of */ #define MC_CMD_MUM_IN_READ_DEVICE_OFST 4 +#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4 /* enum: Hittite HMC1035 clock generator on Sorrento board */ #define MC_CMD_MUM_DEV_HITTITE 0x1 /* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */ #define MC_CMD_MUM_DEV_HITTITE_NIC 0x2 /* 32-bit address to read from */ #define MC_CMD_MUM_IN_READ_ADDR_OFST 8 +#define MC_CMD_MUM_IN_READ_ADDR_LEN 4 /* Number of words to read. */ #define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12 +#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4 /* MC_CMD_MUM_IN_WRITE msgrequest */ #define MC_CMD_MUM_IN_WRITE_LENMIN 16 @@ -7462,12 +6207,15 @@ #define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num)) /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* ID of (device connected to MUM) to write to registers of */ #define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4 +#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4 /* enum: Hittite HMC1035 clock generator on Sorrento board */ /* MC_CMD_MUM_DEV_HITTITE 0x1 */ /* 32-bit address to write to */ #define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8 +#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4 /* Words to write */ #define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12 #define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4 @@ -7480,12 +6228,16 @@ #define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num)) /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* MUM I2C cmd code */ #define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4 +#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4 /* Number of bytes to write */ #define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8 +#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4 /* Number of bytes to read */ #define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12 +#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4 /* Bytes to write */ #define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16 #define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1 @@ -7496,21 +6248,28 @@ #define MC_CMD_MUM_IN_LOG_LEN 8 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_LOG_OP_OFST 4 -#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */ +#define MC_CMD_MUM_IN_LOG_OP_LEN 4 +#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */ /* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */ #define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* MC_CMD_MUM_IN_LOG_OP_OFST 4 */ +/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */ /* Enable/disable debug output to UART */ #define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8 +#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4 /* MC_CMD_MUM_IN_GPIO msgrequest */ #define MC_CMD_MUM_IN_GPIO_LEN 8 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4 #define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0 #define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8 #define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */ @@ -7523,40 +6282,56 @@ /* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */ #define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4 /* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */ #define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4 /* The first 32-bit word to be written to the GPIO OUT register. */ #define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4 /* The second 32-bit word to be written to the GPIO OUT register. */ #define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4 /* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */ #define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4 /* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */ #define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4 /* The first 32-bit word to be written to the GPIO OUT ENABLE register. */ #define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4 /* The second 32-bit word to be written to the GPIO OUT ENABLE register. */ #define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4 /* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */ #define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4 /* MC_CMD_MUM_IN_GPIO_OP msgrequest */ #define MC_CMD_MUM_IN_GPIO_OP_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4 #define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8 #define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8 #define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */ @@ -7569,26 +6344,34 @@ /* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4 /* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4 #define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24 #define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8 /* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4 #define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24 #define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8 /* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4 #define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24 #define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8 @@ -7596,7 +6379,9 @@ #define MC_CMD_MUM_IN_READ_SENSORS_LEN 8 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4 +#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4 #define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0 #define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8 #define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8 @@ -7606,13 +6391,16 @@ #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* Bit-mask of clocks to be programmed */ #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4 #define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */ #define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */ #define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */ /* Control flags for clock programming */ #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1 @@ -7624,19 +6412,24 @@ #define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* Enable/Disable FPGA config from flash */ #define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4 +#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4 /* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */ #define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* MC_CMD_MUM_IN_QSFP msgrequest */ #define MC_CMD_MUM_IN_QSFP_LEN 12 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0 #define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4 #define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */ @@ -7646,52 +6439,77 @@ #define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */ #define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */ #define MC_CMD_MUM_IN_QSFP_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4 /* MC_CMD_MUM_IN_QSFP_INIT msgrequest */ #define MC_CMD_MUM_IN_QSFP_INIT_LEN 16 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4 #define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4 /* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */ #define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4 #define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4 #define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4 #define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4 /* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */ #define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4 /* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */ #define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4 #define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4 /* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */ #define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4 /* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */ #define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4 /* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */ #define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* MC_CMD_MUM_OUT msgresponse */ #define MC_CMD_MUM_OUT_LEN 0 @@ -7702,6 +6520,7 @@ /* MC_CMD_MUM_OUT_GET_VERSION msgresponse */ #define MC_CMD_MUM_OUT_GET_VERSION_LEN 12 #define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0 +#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4 #define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4 #define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8 #define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4 @@ -7739,8 +6558,10 @@ #define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8 /* The first 32-bit word read from the GPIO IN register. */ #define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4 /* The second 32-bit word read from the GPIO IN register. */ #define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4 /* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */ #define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0 @@ -7749,8 +6570,10 @@ #define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8 /* The first 32-bit word read from the GPIO OUT register. */ #define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4 /* The second 32-bit word read from the GPIO OUT register. */ #define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4 /* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */ #define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0 @@ -7758,11 +6581,14 @@ /* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */ #define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8 #define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4 #define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4 /* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */ #define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4 #define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4 /* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */ #define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0 @@ -7791,6 +6617,7 @@ /* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */ #define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4 #define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0 +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4 /* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */ #define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0 @@ -7798,6 +6625,7 @@ /* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */ #define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4 #define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0 +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4 /* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */ #define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0 @@ -7805,7 +6633,9 @@ /* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */ #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1 @@ -7814,6 +6644,7 @@ /* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */ #define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4 #define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4 /* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */ #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5 @@ -7821,6 +6652,7 @@ #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num)) /* in bytes */ #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4 #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4 #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1 #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1 @@ -7829,11 +6661,14 @@ /* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */ #define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8 #define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4 #define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4 /* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */ #define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4 #define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4 /* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */ #define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24 @@ -7841,12 +6676,14 @@ #define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num)) /* Discrete (soldered) DDR resistor strap info */ #define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4 #define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0 #define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16 #define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16 #define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16 /* Number of SODIMM info records */ #define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4 /* Array of SODIMM info records */ #define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8 #define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8 @@ -7907,18 +6744,19 @@ /* EVB_PORT_ID structuredef */ #define EVB_PORT_ID_LEN 4 #define EVB_PORT_ID_PORT_ID_OFST 0 +#define EVB_PORT_ID_PORT_ID_LEN 4 /* enum: An invalid port handle. */ -#define EVB_PORT_ID_NULL 0x0 +#define EVB_PORT_ID_NULL 0x0 /* enum: The port assigned to this function.. */ -#define EVB_PORT_ID_ASSIGNED 0x1000000 +#define EVB_PORT_ID_ASSIGNED 0x1000000 /* enum: External network port 0 */ -#define EVB_PORT_ID_MAC0 0x2000000 +#define EVB_PORT_ID_MAC0 0x2000000 /* enum: External network port 1 */ -#define EVB_PORT_ID_MAC1 0x2000001 +#define EVB_PORT_ID_MAC1 0x2000001 /* enum: External network port 2 */ -#define EVB_PORT_ID_MAC2 0x2000002 +#define EVB_PORT_ID_MAC2 0x2000002 /* enum: External network port 3 */ -#define EVB_PORT_ID_MAC3 0x2000003 +#define EVB_PORT_ID_MAC3 0x2000003 #define EVB_PORT_ID_PORT_ID_LBN 0 #define EVB_PORT_ID_PORT_ID_WIDTH 32 @@ -7930,7 +6768,7 @@ #define EVB_VLAN_TAG_MODE_LBN 12 #define EVB_VLAN_TAG_MODE_WIDTH 4 /* enum: Insert the VLAN. */ -#define EVB_VLAN_TAG_INSERT 0x0 +#define EVB_VLAN_TAG_INSERT 0x0 /* enum: Replace the VLAN if already present. */ #define EVB_VLAN_TAG_REPLACE 0x1 @@ -7959,121 +6797,149 @@ #define NVRAM_PARTITION_TYPE_ID_OFST 0 #define NVRAM_PARTITION_TYPE_ID_LEN 2 /* enum: Primary MC firmware partition */ -#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100 +#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100 /* enum: Secondary MC firmware partition */ -#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200 +#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200 /* enum: Expansion ROM partition */ -#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300 +#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300 /* enum: Static configuration TLV partition */ -#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400 +#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400 /* enum: Dynamic configuration TLV partition */ -#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500 +#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500 /* enum: Expansion ROM configuration data for port 0 */ -#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600 +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600 /* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */ -#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600 +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600 /* enum: Expansion ROM configuration data for port 1 */ -#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601 +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601 /* enum: Expansion ROM configuration data for port 2 */ -#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602 +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602 /* enum: Expansion ROM configuration data for port 3 */ -#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603 +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603 /* enum: Non-volatile log output partition */ -#define NVRAM_PARTITION_TYPE_LOG 0x700 +#define NVRAM_PARTITION_TYPE_LOG 0x700 /* enum: Non-volatile log output of second core on dual-core device */ -#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701 +#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701 /* enum: Device state dump output partition */ -#define NVRAM_PARTITION_TYPE_DUMP 0x800 +#define NVRAM_PARTITION_TYPE_DUMP 0x800 /* enum: Application license key storage partition */ -#define NVRAM_PARTITION_TYPE_LICENSE 0x900 +#define NVRAM_PARTITION_TYPE_LICENSE 0x900 /* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */ -#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00 +#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00 /* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */ -#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff +#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff /* enum: Primary FPGA partition */ -#define NVRAM_PARTITION_TYPE_FPGA 0xb00 +#define NVRAM_PARTITION_TYPE_FPGA 0xb00 /* enum: Secondary FPGA partition */ -#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01 +#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01 /* enum: FC firmware partition */ -#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02 +#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02 /* enum: FC License partition */ -#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03 +#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03 /* enum: Non-volatile log output partition for FC */ -#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04 +#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04 /* enum: MUM firmware partition */ -#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00 +#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00 +/* enum: SUC firmware partition (this is intentionally an alias of + * MUM_FIRMWARE) + */ +#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00 /* enum: MUM Non-volatile log output partition. */ -#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01 +#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01 /* enum: MUM Application table partition. */ -#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02 +#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02 /* enum: MUM boot rom partition. */ -#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03 +#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03 /* enum: MUM production signatures & calibration rom partition. */ -#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04 +#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04 /* enum: MUM user signatures & calibration rom partition. */ -#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05 +#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05 /* enum: MUM fuses and lockbits partition. */ -#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06 +#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06 /* enum: UEFI expansion ROM if separate from PXE */ -#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00 -/* enum: Spare partition 0 */ -#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000 +#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00 +/* enum: Used by the expansion ROM for logging */ +#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000 /* enum: Used for XIP code of shmbooted images */ -#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100 +#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100 /* enum: Spare partition 2 */ -#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200 +#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200 /* enum: Manufacturing partition. Used during manufacture to pass information * between XJTAG and Manftest. */ -#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300 +#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300 /* enum: Spare partition 4 */ -#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400 +#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400 /* enum: Spare partition 5 */ -#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500 +#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500 /* enum: Partition for reporting MC status. See mc_flash_layout.h * medford_mc_status_hdr_t for layout on Medford. */ -#define NVRAM_PARTITION_TYPE_STATUS 0x1600 +#define NVRAM_PARTITION_TYPE_STATUS 0x1600 +/* enum: Spare partition 13 */ +#define NVRAM_PARTITION_TYPE_SPARE_13 0x1700 +/* enum: Spare partition 14 */ +#define NVRAM_PARTITION_TYPE_SPARE_14 0x1800 +/* enum: Spare partition 15 */ +#define NVRAM_PARTITION_TYPE_SPARE_15 0x1900 +/* enum: Spare partition 16 */ +#define NVRAM_PARTITION_TYPE_SPARE_16 0x1a00 +/* enum: Factory defaults for dynamic configuration */ +#define NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS 0x1b00 +/* enum: Factory defaults for expansion ROM configuration */ +#define NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS 0x1c00 +/* enum: Field Replaceable Unit inventory information for use on IPMI + * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a + * subset of the information stored in this partition. + */ +#define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00 /* enum: Start of reserved value range (firmware may use for any purpose) */ -#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 +#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 /* enum: End of reserved value range (firmware may use for any purpose) */ -#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd +#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd /* enum: Recovery partition map (provided if real map is missing or corrupt) */ -#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe +#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe /* enum: Partition map (real map as stored in flash) */ -#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff +#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff #define NVRAM_PARTITION_TYPE_ID_LBN 0 #define NVRAM_PARTITION_TYPE_ID_WIDTH 16 /* LICENSED_APP_ID structuredef */ #define LICENSED_APP_ID_LEN 4 #define LICENSED_APP_ID_ID_OFST 0 +#define LICENSED_APP_ID_ID_LEN 4 /* enum: OpenOnload */ -#define LICENSED_APP_ID_ONLOAD 0x1 +#define LICENSED_APP_ID_ONLOAD 0x1 /* enum: PTP timestamping */ -#define LICENSED_APP_ID_PTP 0x2 +#define LICENSED_APP_ID_PTP 0x2 /* enum: SolarCapture Pro */ -#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 +#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 /* enum: SolarSecure filter engine */ -#define LICENSED_APP_ID_SOLARSECURE 0x8 +#define LICENSED_APP_ID_SOLARSECURE 0x8 /* enum: Performance monitor */ -#define LICENSED_APP_ID_PERF_MONITOR 0x10 +#define LICENSED_APP_ID_PERF_MONITOR 0x10 /* enum: SolarCapture Live */ -#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20 +#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20 /* enum: Capture SolarSystem */ -#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40 +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40 /* enum: Network Access Control */ -#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80 +#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80 /* enum: TCP Direct */ -#define LICENSED_APP_ID_TCP_DIRECT 0x100 +#define LICENSED_APP_ID_TCP_DIRECT 0x100 /* enum: Low Latency */ -#define LICENSED_APP_ID_LOW_LATENCY 0x200 +#define LICENSED_APP_ID_LOW_LATENCY 0x200 /* enum: SolarCapture Tap */ -#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400 +#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400 /* enum: Capture SolarSystem 40G */ #define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800 /* enum: Capture SolarSystem 1G */ -#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000 +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000 +/* enum: ScaleOut Onload */ +#define LICENSED_APP_ID_SCALEOUT_ONLOAD 0x2000 +/* enum: SCS Network Analytics Dashboard */ +#define LICENSED_APP_ID_DSHBRD 0x4000 +/* enum: SolarCapture Trading Analytics */ +#define LICENSED_APP_ID_SCATRD 0x8000 #define LICENSED_APP_ID_ID_LBN 0 #define LICENSED_APP_ID_ID_WIDTH 32 @@ -8140,6 +7006,12 @@ #define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1 #define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12 #define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1 +#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13 +#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1 +#define LICENSED_V3_APPS_DSHBRD_LBN 14 +#define LICENSED_V3_APPS_DSHBRD_WIDTH 1 +#define LICENSED_V3_APPS_SCATRD_LBN 15 +#define LICENSED_V3_APPS_SCATRD_WIDTH 1 #define LICENSED_V3_APPS_MASK_LBN 0 #define LICENSED_V3_APPS_MASK_WIDTH 64 @@ -8185,11 +7057,23 @@ #define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3 #define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1 /* enum: This is a TX completion event, not a timestamp */ -#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0 +#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0 +/* enum: This is a TX completion event for a CTPIO transmit. The event format + * is the same as for TX_EV_COMPLETION. + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION 0x11 +/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The + * event format is the same as for TX_EV_TSTAMP_LO + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO 0x12 +/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The + * event format is the same as for TX_EV_TSTAMP_HI + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI 0x13 /* enum: This is the low part of a TX timestamp event */ -#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51 +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51 /* enum: This is the high part of a TX timestamp event */ -#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52 +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52 #define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24 #define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8 /* upper 16 bits of timestamp data */ @@ -8232,6 +7116,42 @@ #define CTPIO_STATS_MAP_BUCKET_LBN 16 #define CTPIO_STATS_MAP_BUCKET_WIDTH 16 +/* MESSAGE_TYPE structuredef: When present this defines the meaning of a + * message, and is used to protect against chosen message attacks in signed + * messages, regardless their origin. The message type also defines the + * signature cryptographic algorithm, encoding, and message fields included in + * the signature. The values are used in different commands but must be unique + * across all commands, e.g. MC_CMD_TSA_BIND_IN_SECURE_UNBIND uses different + * message type than MC_CMD_SECURE_NIC_INFO_IN_STATUS. + */ +#define MESSAGE_TYPE_LEN 4 +#define MESSAGE_TYPE_MESSAGE_TYPE_OFST 0 +#define MESSAGE_TYPE_MESSAGE_TYPE_LEN 4 +#define MESSAGE_TYPE_UNUSED 0x0 /* enum */ +/* enum: Message type value for the response to a + * MC_CMD_TSA_BIND_IN_SECURE_UNBIND message. TSA_SECURE_UNBIND messages are + * ECDSA SECP384R1 signed using SHA384 message digest algorithm over fields + * MESSAGE_TYPE, TSANID, TSAID, and UNBINDTOKEN, and encoded as suggested by + * RFC6979 (section 2.4). + */ +#define MESSAGE_TYPE_TSA_SECURE_UNBIND 0x1 +/* enum: Message type value for the response to a + * MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION message. TSA_SECURE_DECOMMISSION + * messages are ECDSA SECP384R1 signed using SHA384 message digest algorithm + * over fields MESSAGE_TYPE, TSAID, USER, and REASON, and encoded as suggested + * by RFC6979 (section 2.4). + */ +#define MESSAGE_TYPE_TSA_SECURE_DECOMMISSION 0x2 +/* enum: Message type value for the response to a + * MC_CMD_SECURE_NIC_INFO_IN_STATUS message. This enum value is not sequential + * to other message types for backwards compatibility as the message type for + * MC_CMD_SECURE_NIC_INFO_IN_STATUS was defined before the existence of this + * global enum. + */ +#define MESSAGE_TYPE_SECURE_NIC_INFO_STATUS 0xdb4 +#define MESSAGE_TYPE_MESSAGE_TYPE_LBN 0 +#define MESSAGE_TYPE_MESSAGE_TYPE_WIDTH 32 + /***********************************/ /* MC_CMD_READ_REGS @@ -8240,7 +7160,7 @@ #define MC_CMD_READ_REGS 0x50 #undef MC_CMD_0x50_PRIVILEGE_CTG -#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_READ_REGS_IN msgrequest */ #define MC_CMD_READ_REGS_IN_LEN 0 @@ -8274,17 +7194,22 @@ #define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num)) /* Size, in entries */ #define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function * local queue index. */ #define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4 +#define MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4 /* The initial timer value. The load value is ignored if the timer mode is DIS. */ #define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8 +#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4 /* The reload value is ignored in one-shot modes */ #define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12 +#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4 /* tbd */ #define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4 #define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0 #define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1 #define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1 @@ -8300,6 +7225,7 @@ #define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6 #define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1 #define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20 +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4 /* enum: Disabled */ #define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0 /* enum: Immediate */ @@ -8310,13 +7236,16 @@ #define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3 /* Target EVQ for wakeups if in wakeup mode. */ #define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24 +#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4 /* Target interrupt if in interrupting mode (note union with target EVQ). Use * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test * purposes. */ #define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24 +#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4 /* Event Counter Mode. */ #define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28 +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4 /* enum: Disabled */ #define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0 /* enum: Disabled */ @@ -8327,6 +7256,7 @@ #define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3 /* Event queue packet count threshold. */ #define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32 +#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4 /* 64-bit address of 4k of 4k-aligned host memory buffer */ #define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36 #define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8 @@ -8339,6 +7269,7 @@ #define MC_CMD_INIT_EVQ_OUT_LEN 4 /* Only valid if INTRFLAG was true */ #define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0 +#define MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4 /* MC_CMD_INIT_EVQ_V2_IN msgrequest */ #define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44 @@ -8346,17 +7277,22 @@ #define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num)) /* Size, in entries */ #define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0 +#define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function * local queue index. */ #define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4 +#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4 /* The initial timer value. The load value is ignored if the timer mode is DIS. */ #define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4 /* The reload value is ignored in one-shot modes */ #define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4 /* tbd */ #define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1 @@ -8393,6 +7329,7 @@ */ #define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3 #define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4 /* enum: Disabled */ #define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0 /* enum: Immediate */ @@ -8403,13 +7340,16 @@ #define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3 /* Target EVQ for wakeups if in wakeup mode. */ #define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24 +#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4 /* Target interrupt if in interrupting mode (note union with target EVQ). Use * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test * purposes. */ #define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24 +#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4 /* Event Counter Mode. */ #define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28 +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4 /* enum: Disabled */ #define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0 /* enum: Disabled */ @@ -8420,6 +7360,7 @@ #define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3 /* Event queue packet count threshold. */ #define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32 +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4 /* 64-bit address of 4k of 4k-aligned host memory buffer */ #define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36 #define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8 @@ -8432,8 +7373,10 @@ #define MC_CMD_INIT_EVQ_V2_OUT_LEN 8 /* Only valid if INTRFLAG was true */ #define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0 +#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4 /* Actual configuration applied on the card */ #define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4 #define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0 #define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1 #define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1 @@ -8448,17 +7391,17 @@ #define QUEUE_CRC_MODE_MODE_LBN 0 #define QUEUE_CRC_MODE_MODE_WIDTH 4 /* enum: No CRC. */ -#define QUEUE_CRC_MODE_NONE 0x0 +#define QUEUE_CRC_MODE_NONE 0x0 /* enum: CRC Fiber channel over ethernet. */ -#define QUEUE_CRC_MODE_FCOE 0x1 +#define QUEUE_CRC_MODE_FCOE 0x1 /* enum: CRC (digest) iSCSI header only. */ -#define QUEUE_CRC_MODE_ISCSI_HDR 0x2 +#define QUEUE_CRC_MODE_ISCSI_HDR 0x2 /* enum: CRC (digest) iSCSI header and payload. */ -#define QUEUE_CRC_MODE_ISCSI 0x3 +#define QUEUE_CRC_MODE_ISCSI 0x3 /* enum: CRC Fiber channel over IP over ethernet. */ -#define QUEUE_CRC_MODE_FCOIPOE 0x4 +#define QUEUE_CRC_MODE_FCOIPOE 0x4 /* enum: CRC MPA. */ -#define QUEUE_CRC_MODE_MPA 0x5 +#define QUEUE_CRC_MODE_MPA 0x5 #define QUEUE_CRC_MODE_SPARE_LBN 4 #define QUEUE_CRC_MODE_SPARE_WIDTH 4 @@ -8482,17 +7425,22 @@ #define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num)) /* Size, in entries */ #define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_IN_SIZE_LEN 4 /* The EVQ to send events to. This is an index originally specified to INIT_EVQ */ #define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4 /* The value to put in the event data. Check hardware spec. for valid range. */ #define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_IN_LABEL_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function * local queue index. */ #define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4 /* There will be more flags here. */ #define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4 #define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0 #define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1 #define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1 @@ -8511,8 +7459,10 @@ #define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ #define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4 /* 64-bit address of 4k of 4k-aligned host memory buffer */ #define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28 #define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8 @@ -8527,17 +7477,26 @@ #define MC_CMD_INIT_RXQ_EXT_IN_LEN 544 /* Size, in entries */ #define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0 -/* The EVQ to send events to. This is an index originally specified to INIT_EVQ +#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. */ #define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4 -/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ #define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function * local queue index. */ #define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4 /* There will be more flags here. */ #define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1 @@ -8555,26 +7514,37 @@ #define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10 #define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4 /* enum: One packet per descriptor (for normal networking) */ -#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0 +#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0 /* enum: Pack multiple packets into large descriptors (for SolarCapture) */ -#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 #define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 #define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 -#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */ -#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */ -#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */ -#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */ -#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ #define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4 /* 64-bit address of 4k of 4k-aligned host memory buffer */ #define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28 #define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8 @@ -8583,6 +7553,116 @@ #define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64 /* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ #define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4 + +/* MC_CMD_INIT_RXQ_V3_IN msgrequest */ +#define MC_CMD_INIT_RXQ_V3_IN_LEN 560 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_V3_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_V3_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V3_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_V3_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_V3_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_LEN 4 +/* The number of packet buffers that will be contained within each + * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field + * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544 +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4 +/* The length in bytes of the area in each packet buffer that can be written to + * by the adapter. This is used to store the packet prefix and the packet + * payload. This length does not include any end padding added by the driver. + * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_OFST 548 +#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_LEN 4 +/* The length in bytes of a single packet buffer within a + * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_OFST 552 +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_LEN 4 +/* The maximum time in nanoseconds that the datapath will be backpressured if + * there are no RX descriptors available. If the timeout is reached and there + * are still no descriptors then the packet will be dropped. A timeout of 0 + * means the datapath will never be blocked. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556 +#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4 /* MC_CMD_INIT_RXQ_OUT msgresponse */ #define MC_CMD_INIT_RXQ_OUT_LEN 0 @@ -8590,6 +7670,9 @@ /* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */ #define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0 +/* MC_CMD_INIT_RXQ_V3_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_V3_OUT_LEN 0 + /***********************************/ /* MC_CMD_INIT_TXQ @@ -8607,18 +7690,23 @@ #define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num)) /* Size, in entries */ #define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_TXQ_IN_SIZE_LEN 4 /* The EVQ to send events to. This is an index originally specified to * INIT_EVQ. */ #define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4 /* The value to put in the event data. Check hardware spec. for valid range. */ #define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8 +#define MC_CMD_INIT_TXQ_IN_LABEL_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function * local queue index. */ #define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4 /* There will be more flags here. */ #define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4 #define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0 #define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1 #define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1 @@ -8639,8 +7727,10 @@ #define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ #define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4 /* 64-bit address of 4k of 4k-aligned host memory buffer */ #define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28 #define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8 @@ -8655,18 +7745,23 @@ #define MC_CMD_INIT_TXQ_EXT_IN_LEN 544 /* Size, in entries */ #define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0 +#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4 /* The EVQ to send events to. This is an index originally specified to * INIT_EVQ. */ #define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4 /* The value to put in the event data. Check hardware spec. for valid range. */ #define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8 +#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function * local queue index. */ #define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4 /* There will be more flags here. */ #define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1 @@ -8689,10 +7784,14 @@ #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ #define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4 /* 64-bit address of 4k of 4k-aligned host memory buffer */ #define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28 #define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8 @@ -8702,6 +7801,7 @@ #define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64 /* Flags related to Qbb flow control mode. */ #define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4 #define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0 #define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1 #define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1 @@ -8729,6 +7829,7 @@ * passed to INIT_EVQ */ #define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4 /* MC_CMD_FINI_EVQ_OUT msgresponse */ #define MC_CMD_FINI_EVQ_OUT_LEN 0 @@ -8747,6 +7848,7 @@ #define MC_CMD_FINI_RXQ_IN_LEN 4 /* Instance of RXQ to destroy */ #define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4 /* MC_CMD_FINI_RXQ_OUT msgresponse */ #define MC_CMD_FINI_RXQ_OUT_LEN 0 @@ -8765,6 +7867,7 @@ #define MC_CMD_FINI_TXQ_IN_LEN 4 /* Instance of TXQ to destroy */ #define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4 /* MC_CMD_FINI_TXQ_OUT msgresponse */ #define MC_CMD_FINI_TXQ_OUT_LEN 0 @@ -8783,6 +7886,7 @@ #define MC_CMD_DRIVER_EVENT_IN_LEN 12 /* Handle of target EVQ */ #define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0 +#define MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4 /* Bits 0 - 63 of event */ #define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4 #define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8 @@ -8809,11 +7913,12 @@ #define MC_CMD_PROXY_CMD_IN_LEN 4 /* The handle of the target function. */ #define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0 +#define MC_CMD_PROXY_CMD_IN_TARGET_LEN 4 #define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0 #define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16 #define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16 #define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16 -#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */ +#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */ /* MC_CMD_PROXY_CMD_OUT msgresponse */ #define MC_CMD_PROXY_CMD_OUT_LEN 0 @@ -8824,8 +7929,9 @@ #define MC_PROXY_STATUS_BUFFER_LEN 16 /* Handle allocated by the firmware for this proxy transaction */ #define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4 /* enum: An invalid handle. */ -#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0 #define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0 #define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32 /* The requesting physical function number */ @@ -8854,6 +7960,7 @@ * elevated privilege mask granted to the requesting function. */ #define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12 +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4 #define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96 #define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32 @@ -8871,6 +7978,7 @@ /* MC_CMD_PROXY_CONFIGURE_IN msgrequest */ #define MC_CMD_PROXY_CONFIGURE_IN_LEN 108 #define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0 +#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4 #define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0 #define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS @@ -8882,6 +7990,7 @@ #define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8 /* Must be a power of 2 */ #define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS * of blocks, each of the size REPLY_BLOCK_SIZE. */ @@ -8891,6 +8000,7 @@ #define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20 /* Must be a power of 2 */ #define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if * host intends to complete proxied operations by using MC_CMD_PROXY_CMD. @@ -8901,8 +8011,10 @@ #define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32 /* Must be a power of 2, or zero if this buffer is not provided */ #define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4 /* Applies to all three buffers */ #define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40 +#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4 /* A bit mask defining which MCDI operations may be proxied */ #define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44 #define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64 @@ -8910,6 +8022,7 @@ /* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS @@ -8921,6 +8034,7 @@ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8 /* Must be a power of 2 */ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS * of blocks, each of the size REPLY_BLOCK_SIZE. */ @@ -8930,6 +8044,7 @@ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20 /* Must be a power of 2 */ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if * host intends to complete proxied operations by using MC_CMD_PROXY_CMD. @@ -8940,12 +8055,15 @@ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32 /* Must be a power of 2, or zero if this buffer is not provided */ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4 /* Applies to all three buffers */ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4 /* A bit mask defining which MCDI operations may be proxied */ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4 /* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */ #define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0 @@ -8966,7 +8084,9 @@ /* MC_CMD_PROXY_COMPLETE_IN msgrequest */ #define MC_CMD_PROXY_COMPLETE_IN_LEN 12 #define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0 +#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4 #define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4 +#define MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4 /* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply * is stored in the REPLY_BUFF. */ @@ -8982,6 +8102,7 @@ */ #define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3 #define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8 +#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4 /* MC_CMD_PROXY_COMPLETE_OUT msgresponse */ #define MC_CMD_PROXY_COMPLETE_OUT_LEN 0 @@ -9002,17 +8123,22 @@ #define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8 /* Owner ID to use */ #define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4 /* Size of buffer table pages to use, in bytes (note that only a few values are * legal on any specific hardware). */ #define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4 /* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */ #define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12 #define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4 #define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4 /* Buffer table IDs for use in DMA descriptors. */ #define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4 /***********************************/ @@ -9029,10 +8155,13 @@ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268 #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num)) #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4 /* ID */ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4 /* Num entries */ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4 /* Buffer table entry address */ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12 #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8 @@ -9056,48 +8185,11 @@ /* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */ #define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4 #define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0 +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4 /* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */ #define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0 -/* PORT_CONFIG_ENTRY structuredef */ -#define PORT_CONFIG_ENTRY_LEN 16 -/* External port number (label) */ -#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0 -#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1 -#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0 -#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8 -/* Port core location */ -#define PORT_CONFIG_ENTRY_CORE_OFST 1 -#define PORT_CONFIG_ENTRY_CORE_LEN 1 -#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */ -#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */ -#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */ -#define PORT_CONFIG_ENTRY_CORE_LBN 8 -#define PORT_CONFIG_ENTRY_CORE_WIDTH 8 -/* Internal number (HW resource) relative to the core */ -#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2 -#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1 -#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16 -#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8 -/* Reserved */ -#define PORT_CONFIG_ENTRY_RSVD_OFST 3 -#define PORT_CONFIG_ENTRY_RSVD_LEN 1 -#define PORT_CONFIG_ENTRY_RSVD_LBN 24 -#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8 -/* Bitmask of KR lanes used by the port */ -#define PORT_CONFIG_ENTRY_LANES_OFST 4 -#define PORT_CONFIG_ENTRY_LANES_LBN 32 -#define PORT_CONFIG_ENTRY_LANES_WIDTH 32 -/* Port capabilities (MC_CMD_PHY_CAP_*) */ -#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8 -#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64 -#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32 -/* Reserved (align to 16 bytes) */ -#define PORT_CONFIG_ENTRY_RSVD2_OFST 12 -#define PORT_CONFIG_ENTRY_RSVD2_LBN 96 -#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32 - /***********************************/ /* MC_CMD_FILTER_OP @@ -9112,18 +8204,19 @@ #define MC_CMD_FILTER_OP_IN_LEN 108 /* identifies the type of operation requested */ #define MC_CMD_FILTER_OP_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_IN_OP_LEN 4 /* enum: single-recipient filter insert */ -#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0 +#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0 /* enum: single-recipient filter remove */ -#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1 +#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1 /* enum: multi-recipient filter subscribe */ -#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2 +#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2 /* enum: multi-recipient filter unsubscribe */ -#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3 +#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3 /* enum: replace one recipient with another (warning - the filter handle may * change) */ -#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4 +#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4 /* filter handle (for remove / unsubscribe operations) */ #define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4 #define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8 @@ -9132,8 +8225,10 @@ /* The port ID associated with the v-adaptor which should contain this filter. */ #define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4 /* fields to include in match criteria */ #define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4 #define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0 #define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1 #define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1 @@ -9164,43 +8259,49 @@ #define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 /* receive destination */ #define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4 /* enum: drop packets */ -#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0 +#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0 /* enum: receive to host */ -#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1 +#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1 /* enum: receive to MC */ -#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2 +#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2 /* enum: loop back to TXDP 0 */ -#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3 +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3 /* enum: loop back to TXDP 1 */ -#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 /* receive queue handle (for multiple queue modes, this is the base queue) */ #define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4 /* receive mode */ #define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4 /* enum: receive to just the specified queue */ -#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0 +#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0 /* enum: receive to multiple queues using RSS context */ -#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1 +#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1 /* enum: receive to multiple queues using .1p mapping */ -#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2 +#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2 /* enum: install a filter entry that will never match; for test purposes only */ -#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 /* RSS context (for RX_MODE_RSS) or .1p mapping handle (for * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or * MC_CMD_DOT1P_MAPPING_ALLOC. */ #define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4 /* transmit domain (reserved; set to 0) */ #define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4 /* transmit destination (either set the MAC and/or PM bits for explicit * control, or set this field to TX_DEST_DEFAULT for sensible default * behaviour) */ #define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4 /* enum: request default behaviour (based on filter type) */ -#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff #define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0 #define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1 #define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1 @@ -9231,8 +8332,10 @@ #define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2 /* Firmware defined register 0 to match (reserved; set to 0) */ #define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4 /* Firmware defined register 1 to match (reserved; set to 0) */ #define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72 +#define MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4 /* source IP address to match (as bytes in network order; set last 12 bytes to * 0 for IPv4 address) */ @@ -9251,6 +8354,7 @@ #define MC_CMD_FILTER_OP_EXT_IN_LEN 172 /* identifies the type of operation requested */ #define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_FILTER_OP_IN/OP */ /* filter handle (for remove / unsubscribe operations) */ @@ -9261,8 +8365,10 @@ /* The port ID associated with the v-adaptor which should contain this filter. */ #define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4 /* fields to include in match criteria */ #define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1 @@ -9321,43 +8427,49 @@ #define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 /* receive destination */ #define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4 /* enum: drop packets */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0 /* enum: receive to host */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1 /* enum: receive to MC */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2 /* enum: loop back to TXDP 0 */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3 /* enum: loop back to TXDP 1 */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4 /* receive queue handle (for multiple queue modes, this is the base queue) */ #define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4 /* receive mode */ #define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4 /* enum: receive to just the specified queue */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0 /* enum: receive to multiple queues using RSS context */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1 /* enum: receive to multiple queues using .1p mapping */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2 /* enum: install a filter entry that will never match; for test purposes only */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 /* RSS context (for RX_MODE_RSS) or .1p mapping handle (for * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or * MC_CMD_DOT1P_MAPPING_ALLOC. */ #define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4 /* transmit domain (reserved; set to 0) */ #define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4 /* transmit destination (either set the MAC and/or PM bits for explicit * control, or set this field to TX_DEST_DEFAULT for sensible default * behaviour) */ #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4 /* enum: request default behaviour (based on filter type) */ -#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0 #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1 #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1 @@ -9388,27 +8500,29 @@ #define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2 /* Firmware defined register 0 to match (reserved; set to 0) */ #define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4 /* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP * protocol is GRE) to match (as bytes in network order; set last byte to 0 for * VXLAN/NVGRE, or 1 for Geneve) */ #define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4 #define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0 #define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24 #define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24 #define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8 /* enum: Match VXLAN traffic with this VNI */ -#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0 /* enum: Match Geneve traffic with this VNI */ -#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1 /* enum: Reserved for experimental development use */ -#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe #define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0 #define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24 #define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24 #define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8 /* enum: Match NVGRE traffic with this VSID */ -#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0 /* source IP address to match (as bytes in network order; set last 12 bytes to * 0 for IPv4 address) */ @@ -9458,10 +8572,12 @@ * to 0) */ #define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4 /* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set * to 0) */ #define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4 /* VXLAN/NVGRE inner frame source IP address to match (as bytes in network * order; set last 12 bytes to 0 for IPv4 address) */ @@ -9473,82 +8589,357 @@ #define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156 #define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16 -/* MC_CMD_FILTER_OP_OUT msgresponse */ -#define MC_CMD_FILTER_OP_OUT_LEN 12 -/* identifies the type of operation requested */ -#define MC_CMD_FILTER_OP_OUT_OP_OFST 0 -/* Enum values, see field(s): */ -/* MC_CMD_FILTER_OP_IN/OP */ -/* Returned filter handle (for insert / subscribe operations). Note that these - * handles should be considered opaque to the host, although a value of - * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. +/* MC_CMD_FILTER_OP_V3_IN msgrequest: FILTER_OP extension to support additional + * filter actions for Intel's DPDK (Data Plane Development Kit, dpdk.org) via + * its rte_flow API. This extension is only useful with the sfc_efx driver + * included as part of DPDK, used in conjunction with the dpdk datapath + * firmware variant. */ -#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4 -#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8 -#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4 -#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8 -/* enum: guaranteed invalid filter handle (low 32 bits) */ -#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff -/* enum: guaranteed invalid filter handle (high 32 bits) */ -#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff - -/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */ -#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12 +#define MC_CMD_FILTER_OP_V3_IN_LEN 180 /* identifies the type of operation requested */ -#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0 -/* Enum values, see field(s): */ -/* MC_CMD_FILTER_OP_EXT_IN/OP */ -/* Returned filter handle (for insert / subscribe operations). Note that these - * handles should be considered opaque to the host, although a value of - * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. - */ -#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4 -#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8 -#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4 -#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8 +#define MC_CMD_FILTER_OP_V3_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_V3_IN_OP_LEN 4 /* Enum values, see field(s): */ -/* MC_CMD_FILTER_OP_OUT/HANDLE */ - - -/***********************************/ -/* MC_CMD_GET_PARSER_DISP_INFO - * Get information related to the parser-dispatcher subsystem +/* MC_CMD_FILTER_OP_IN/OP */ +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. */ -#define MC_CMD_GET_PARSER_DISP_INFO 0xe4 -#undef MC_CMD_0xe4_PRIVILEGE_CTG - -#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */ -#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4 -/* identifies the type of operation requested */ -#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0 -/* enum: read the list of supported RX filter matches */ -#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1 -/* enum: read flags indicating restrictions on filter insertion for the calling - * client +#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_LEN 4 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_LBN 11 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_LBN 12 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_LBN 13 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_LBN 14 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_LBN 15 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_LBN 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_LBN 17 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_LBN 18 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_LBN 19 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_LBN 20 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_LBN 21 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_LBN 22 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_LBN 23 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_LEN 4 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only */ -#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2 -/* enum: read properties relating to security rules (Medford-only; for use by - * SolarSecure apps, not directly by drivers. See SF-114946-SW.) +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. */ -#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3 -/* enum: read the list of supported RX filter matches for VXLAN/NVGRE - * encapsulated frames, which follow a different match sequence to normal - * frames (Medford only) +#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_LEN 4 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_LEN 4 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) */ -#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4 - -/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ -#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 -#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252 -#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num)) -/* identifies the type of operation requested */ -#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_LEN 4 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_LEN 4 +/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP + * protocol is GRE) to match (as bytes in network order; set last byte to 0 for + * VXLAN/NVGRE, or 1 for Geneve) + */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_WIDTH 8 +/* enum: Match VXLAN traffic with this VNI */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_VXLAN 0x0 +/* enum: Match Geneve traffic with this VNI */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_GENEVE 0x1 +/* enum: Reserved for experimental development use */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_WIDTH 8 +/* enum: Match NVGRE traffic with this VSID */ +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_NVGRE 0x0 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_V3_IN_DST_IP_LEN 16 +/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_OFST 108 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_LEN 6 +/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_OFST 114 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_LEN 2 +/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in + * network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_OFST 116 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_LEN 6 +/* VXLAN/NVGRE inner frame destination port to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_OFST 122 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_LEN 2 +/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_OFST 124 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_LEN 2 +/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_OFST 126 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_OFST 128 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to + * 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_OFST 130 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_LEN 2 +/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_OFST 132 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_LEN 4 +/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_OFST 136 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_LEN 4 +/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_OFST 140 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_LEN 16 +/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_OFST 156 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_LEN 16 +/* Set an action for all packets matching this filter. The DPDK driver and dpdk + * f/w variant use their own specific delivery structures, which are documented + * in the DPDK Firmware Driver Interface (SF-119419-TC). Requesting anything + * other than MATCH_ACTION_NONE when the NIC is running another f/w variant + * will cause the filter insertion to fail with ENOTSUP. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_OFST 172 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_LEN 4 +/* enum: do nothing extra */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_NONE 0x0 +/* enum: Set the match flag in the packet prefix for packets matching the + * filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to + * support the DPDK rte_flow "FLAG" action. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG 0x1 +/* enum: Insert MATCH_MARK_VALUE into the packet prefix for packets matching + * the filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to + * support the DPDK rte_flow "MARK" action. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK 0x2 +/* the mark value for MATCH_ACTION_MARK. Requesting a value larger than the + * maximum (obtained from MC_CMD_GET_CAPABILITIES_V5/FILTER_ACTION_MARK_MAX) + * will cause the filter insertion to fail with EINVAL. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_OFST 176 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_LEN 4 + +/* MC_CMD_FILTER_OP_OUT msgresponse */ +#define MC_CMD_FILTER_OP_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_OUT_OP_OFST 0 +#define MC_CMD_FILTER_OP_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8 +/* enum: guaranteed invalid filter handle (low 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff +/* enum: guaranteed invalid filter handle (high 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff + +/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */ +#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0 +#define MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_EXT_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_OUT/HANDLE */ + + +/***********************************/ +/* MC_CMD_GET_PARSER_DISP_INFO + * Get information related to the parser-dispatcher subsystem + */ +#define MC_CMD_GET_PARSER_DISP_INFO 0xe4 +#undef MC_CMD_0xe4_PRIVILEGE_CTG + +#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4 +/* enum: read the list of supported RX filter matches */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1 +/* enum: read flags indicating restrictions on filter insertion for the calling + * client + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2 +/* enum: read properties relating to security rules (Medford-only; for use by + * SolarSecure apps, not directly by drivers. See SF-114946-SW.) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3 +/* enum: read the list of supported RX filter matches for VXLAN/NVGRE + * encapsulated frames, which follow a different match sequence to normal + * frames (Medford only) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4 + +/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num)) +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ /* number of supported match types */ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4 /* array of supported match types (valid MATCH_FIELDS values for * MC_CMD_FILTER_OP) sorted in decreasing priority order */ @@ -9561,10 +8952,12 @@ #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8 /* identifies the type of operation requested */ #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ /* bitfield of filter insertion restrictions */ #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4 #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0 #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1 @@ -9578,28 +8971,37 @@ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36 /* identifies the type of operation requested */ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ /* a version number representing the set of rule lookups that are implemented * by the currently running firmware */ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_LEN 4 /* enum: implements lookup sequences described in SF-114946-SW draft C */ -#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0 /* the number of nodes in the subnet map */ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_LEN 4 /* the number of entries in one subnet map node */ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_LEN 4 /* minimum valid value for a subnet ID in a subnet map leaf */ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_LEN 4 /* maximum valid value for a subnet ID in a subnet map leaf */ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_LEN 4 /* the number of entries in the local and remote port range maps */ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_LEN 4 /* minimum valid value for a portrange ID in a port range map leaf */ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_LEN 4 /* maximum valid value for a portrange ID in a port range map leaf */ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_LEN 4 /***********************************/ @@ -9607,7 +9009,9 @@ * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging. * Please note that this interface is only of use to debug tools which have * knowledge of firmware and hardware data structures; nothing here is intended - * for use by normal driver code. + * for use by normal driver code. Note that although this command is in the + * Admin privilege group, in tamperproof adapters, only read operations are + * permitted. */ #define MC_CMD_PARSER_DISP_RW 0xe5 #undef MC_CMD_0xe5_PRIVILEGE_CTG @@ -9618,42 +9022,58 @@ #define MC_CMD_PARSER_DISP_RW_IN_LEN 32 /* identifies the target of the operation */ #define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0 +#define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4 /* enum: RX dispatcher CPU */ -#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0 +#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0 /* enum: TX dispatcher CPU */ -#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1 -/* enum: Lookup engine (with original metadata format) */ -#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 +#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1 +/* enum: Lookup engine (with original metadata format). Deprecated; used only + * by cmdclient as a fallback for very old Huntington firmware, and not + * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA + * instead. + */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 /* enum: Lookup engine (with requested metadata format) */ -#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3 +#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3 /* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */ -#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0 +#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0 /* enum: RX1 dispatcher CPU (only valid for Medford) */ -#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4 +#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4 /* enum: Miscellaneous other state (only valid for Medford) */ -#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5 +#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5 /* identifies the type of operation requested */ #define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4 -/* enum: read a word of DICPU DMEM or a LUE entry */ -#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0 -/* enum: write a word of DICPU DMEM or a LUE entry */ -#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1 -/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */ -#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2 +#define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4 +/* enum: Read a word of DICPU DMEM or a LUE entry */ +#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0 +/* enum: Write a word of DICPU DMEM or a LUE entry. Not permitted on + * tamperproof adapters. + */ +#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1 +/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). Not + * permitted on tamperproof adapters. + */ +#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2 /* data memory address (DICPU targets) or LUE index (LUE targets) */ #define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8 +#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4 /* selector (for MISC_STATE target) */ #define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8 +#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4 /* enum: Port to datapath mapping */ -#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1 +#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1 /* value to write (for DMEM writes) */ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4 /* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4 /* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16 +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4 /* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */ #define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4 /* value to write (for LUE writes) */ #define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12 #define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20 @@ -9662,6 +9082,7 @@ #define MC_CMD_PARSER_DISP_RW_OUT_LEN 52 /* value read (for DMEM reads) */ #define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0 +#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4 /* value read (for LUE reads) */ #define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0 #define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20 @@ -9674,8 +9095,8 @@ #define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0 #define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4 #define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4 -#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */ -#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */ +#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */ +#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */ /***********************************/ @@ -9707,6 +9128,7 @@ #define MC_CMD_SET_PF_COUNT_IN_LEN 4 /* New number of PFs on the device. */ #define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0 +#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4 /* MC_CMD_SET_PF_COUNT_OUT msgresponse */ #define MC_CMD_SET_PF_COUNT_OUT_LEN 0 @@ -9728,6 +9150,7 @@ #define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4 /* Identifies the port assignment for this function. */ #define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0 +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4 /***********************************/ @@ -9743,6 +9166,7 @@ #define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4 /* Identifies the port assignment for this function. */ #define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0 +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4 /* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */ #define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0 @@ -9761,8 +9185,10 @@ #define MC_CMD_ALLOC_VIS_IN_LEN 8 /* The minimum number of VIs that is acceptable */ #define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4 /* The maximum number of VIs that would be useful */ #define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4 +#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4 /* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request. * Use extended version in new code. @@ -9770,21 +9196,26 @@ #define MC_CMD_ALLOC_VIS_OUT_LEN 8 /* The number of VIs allocated on this function */ #define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4 /* The base absolute VI number allocated to this function. Required to * correctly interpret wakeup events. */ #define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4 +#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4 /* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */ #define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12 /* The number of VIs allocated on this function */ #define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4 /* The base absolute VI number allocated to this function. Required to * correctly interpret wakeup events. */ #define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4 /* Function's port vi_shift value (always 0 on Huntington) */ #define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4 /***********************************/ @@ -9820,15 +9251,20 @@ #define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20 /* Number of VFs currently enabled. */ #define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4 /* Max number of VFs before sriov stride and offset may need to be changed. */ #define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4 #define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8 +#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4 #define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0 #define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1 /* RID offset of first VF from PF. */ #define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4 /* RID offset of each subsequent VF from the previous. */ #define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4 /***********************************/ @@ -9844,19 +9280,24 @@ #define MC_CMD_SET_SRIOV_CFG_IN_LEN 20 /* Number of VFs currently enabled. */ #define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4 /* Max number of VFs before sriov stride and offset may need to be changed. */ #define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4 #define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8 +#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4 #define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0 #define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1 /* RID offset of first VF from PF, or 0 for no change, or * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset. */ #define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4 /* RID offset of each subsequent VF from the previous, 0 for no change, or * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride. */ #define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4 /* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */ #define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0 @@ -9879,12 +9320,15 @@ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12 /* The number of VIs allocated on this function */ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4 /* The base absolute VI number allocated to this function. Required to * correctly interpret wakeup events. */ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4 /* Function's port vi_shift value (always 0 on Huntington) */ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4 /***********************************/ @@ -9900,6 +9344,7 @@ #define MC_CMD_DUMP_VI_STATE_IN_LEN 4 /* The VI number to query. */ #define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0 +#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4 /* MC_CMD_DUMP_VI_STATE_OUT msgresponse */ #define MC_CMD_DUMP_VI_STATE_OUT_LEN 96 @@ -9933,6 +9378,7 @@ #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24 /* Combined metadata field. */ #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4 #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0 #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16 #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16 @@ -10015,6 +9461,7 @@ #define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4 /* Handle for allocated push I/O buffer. */ #define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4 /***********************************/ @@ -10030,6 +9477,7 @@ #define MC_CMD_FREE_PIOBUF_IN_LEN 4 /* Handle for allocated push I/O buffer. */ #define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4 /* MC_CMD_FREE_PIOBUF_OUT msgresponse */ #define MC_CMD_FREE_PIOBUF_OUT_LEN 0 @@ -10048,6 +9496,7 @@ #define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4 /* VI number to get information for. */ #define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4 /* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */ #define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4 @@ -10070,6 +9519,7 @@ #define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19 #define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1 #define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4 /***********************************/ @@ -10085,6 +9535,7 @@ #define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8 /* VI number to set information for. */ #define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4 /* Transaction processing steering hint 1 for use with the Rx Queue. */ #define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4 #define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1 @@ -10104,6 +9555,7 @@ #define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51 #define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1 #define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4 /* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */ #define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0 @@ -10121,22 +9573,25 @@ /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */ #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4 /* enum: MISC. */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0 /* enum: IDO. */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1 /* enum: RO. */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2 /* enum: TPH Type. */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3 /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */ #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */ /* Amalgamated TLP info word. */ #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1 @@ -10185,10 +9640,12 @@ /* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */ #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */ /* Amalgamated TLP info word. */ #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0 @@ -10229,7 +9686,7 @@ #define MC_CMD_SATELLITE_DOWNLOAD 0x91 #undef MC_CMD_0x91_PRIVILEGE_CTG -#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs * are subtle, and so downloads must proceed in a number of phases. @@ -10256,57 +9713,61 @@ * in a command from the host.) */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0 -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */ /* Target for download. (These match the blob numbers defined in * mc_flash_layout.h.) */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb /* enum: Valid in phase 3 (PHASE_VECTORS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc /* enum: Valid in phase 3 (PHASE_VECTORS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd /* enum: Valid in phase 3 (PHASE_VECTORS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe /* enum: Valid in phase 3 (PHASE_VECTORS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf /* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff /* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4 /* enum: Last chunk, containing checksum rather than data */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff /* enum: Abort download of this item */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe /* Length of this chunk in bytes */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4 /* Data for this chunk */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16 #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4 @@ -10317,24 +9778,26 @@ #define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8 /* Same as MC_CMD_ERR field, but included as 0 in success cases */ #define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4 /* Extra status information */ #define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4 /* enum: Code download OK, completed. */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0 /* enum: Code download aborted as requested. */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1 /* enum: Code download OK so far, send next chunk. */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2 /* enum: Download phases out of sequence */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100 /* enum: Bad target for this phase */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101 /* enum: Chunk ID out of sequence */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200 /* enum: Chunk length zero or too large */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201 /* enum: Checksum was incorrect */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300 /***********************************/ @@ -10356,6 +9819,7 @@ #define MC_CMD_GET_CAPABILITIES_OUT_LEN 20 /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4 #define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3 #define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4 @@ -10418,48 +9882,58 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6 #define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0 @@ -10469,39 +9943,43 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0 @@ -10511,36 +9989,42 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial TX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ /* enum: TX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ #define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4 /* Licensed capabilities */ #define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4 /* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */ #define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0 @@ -10549,6 +10033,7 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72 /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4 #define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3 #define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4 @@ -10611,48 +10096,58 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0 @@ -10662,39 +10157,43 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0 @@ -10704,38 +10203,45 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial TX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ /* enum: TX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4 /* Licensed capabilities */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4 /* Second word of flags. Not present on older firmware (check the length). */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1 @@ -10766,6 +10272,30 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14 #define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present * on older firmware (check the length). */ @@ -10779,18 +10309,18 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff /* enum: PF does not exist. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe /* enum: PF does exist but is not assigned to any external port. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd /* enum: This value indicates that PF is assigned, but it cannot be expressed * in this field. It is intended for a possible future situation where a more * complex scheme of PFs to ports mapping is being used. The future driver * should look for a new field supporting the new scheme. The current/old * driver should treat this value as PF_NOT_ASSIGNED. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc /* One byte per PF containing the number of its VFs, indexed by PF number. A * special value indicates that a PF is not present. */ @@ -10798,9 +10328,9 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */ /* enum: PF does not exist. */ -/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */ /* Number of VIs available for each external port */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58 #define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2 @@ -10826,6 +10356,7 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76 /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4 #define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3 #define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4 @@ -10888,48 +10419,58 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0 @@ -10939,39 +10480,43 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0 @@ -10981,38 +10526,45 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial TX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ /* enum: TX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4 /* Licensed capabilities */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4 /* Second word of flags. Not present on older firmware (check the length). */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1 @@ -11043,6 +10595,30 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14 #define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present * on older firmware (check the length). */ @@ -11056,18 +10632,18 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 #define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff /* enum: PF does not exist. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe /* enum: PF does exist but is not assigned to any external port. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd /* enum: This value indicates that PF is assigned, but it cannot be expressed * in this field. It is intended for a possible future situation where a more * complex scheme of PFs to ports mapping is being used. The future driver * should look for a new field supporting the new scheme. The current/old * driver should treat this value as PF_NOT_ASSIGNED. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc /* One byte per PF containing the number of its VFs, indexed by PF number. A * special value indicates that a PF is not present. */ @@ -11075,9 +10651,9 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1 #define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */ /* enum: PF does not exist. */ -/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */ /* Number of VIs available for each external port */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58 #define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2 @@ -11108,11 +10684,11 @@ /* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. * CTPIO is not mapped. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0 /* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1 /* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2 /* Number of vFIFOs per adapter that can be used for VFIFO Stuffing * (SF-115995-SW) in the present configuration of firmware and port mode. */ @@ -11124,6 +10700,723 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 #define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present + * on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2 + +/* MC_CMD_GET_CAPABILITIES_V5_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LEN 84 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present + * on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_LEN 4 + /***********************************/ /* MC_CMD_V2_EXTN @@ -11144,7 +11437,16 @@ #define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16 #define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10 #define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26 -#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6 +#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 2 +/* Type of command/response */ +#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_LBN 28 +#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_WIDTH 4 +/* enum: MCDI command directed to or response originating from the MC. */ +#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_MC 0x0 +/* enum: MCDI command directed to a TSA controller. MCDI responses of this type + * are not defined. + */ +#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1 /***********************************/ @@ -11163,6 +11465,7 @@ #define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4 /* the bucket id */ #define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4 /***********************************/ @@ -11178,6 +11481,7 @@ #define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4 /* the bucket id */ #define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4 /* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */ #define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0 @@ -11196,17 +11500,22 @@ #define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8 /* the bucket id */ #define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4 /* the rate in mbps */ #define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4 +#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4 /* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */ #define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12 /* the bucket id */ #define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4 /* the rate in mbps */ #define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4 +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4 /* the desired maximum fill level */ #define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8 +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4 /* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */ #define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0 @@ -11225,10 +11534,13 @@ #define MC_CMD_TCM_TXQ_INIT_IN_LEN 28 /* the txq id */ #define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0 +#define MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4 /* the static priority associated with the txq */ #define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4 +#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4 /* bitmask of the priority queues this txq is inserted into when inserted. */ #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4 #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0 #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1 @@ -11237,25 +11549,32 @@ #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1 /* the reaction point (RP) bucket */ #define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12 +#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4 /* an already reserved bucket (typically set to bucket associated with outer * vswitch) */ #define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16 +#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4 /* an already reserved bucket (typically set to bucket associated with inner * vswitch) */ #define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20 +#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4 /* the min bucket (typically for ETS/minimum bandwidth) */ #define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24 +#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4 /* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32 /* the txq id */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4 /* the static priority associated with the txq */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4 /* bitmask of the priority queues this txq is inserted into when inserted. */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4 #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0 #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1 @@ -11264,18 +11583,23 @@ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1 /* the reaction point (RP) bucket */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4 /* an already reserved bucket (typically set to bucket associated with outer * vswitch) */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4 /* an already reserved bucket (typically set to bucket associated with inner * vswitch) */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4 /* the min bucket (typically for ETS/minimum bandwidth) */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4 /* the static priority associated with the txq */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4 /* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */ #define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0 @@ -11294,8 +11618,10 @@ #define MC_CMD_LINK_PIOBUF_IN_LEN 8 /* Handle for allocated push I/O buffer. */ #define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4 /* Function Local Instance (VI) number. */ #define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4 +#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4 /* MC_CMD_LINK_PIOBUF_OUT msgresponse */ #define MC_CMD_LINK_PIOBUF_OUT_LEN 0 @@ -11314,6 +11640,7 @@ #define MC_CMD_UNLINK_PIOBUF_IN_LEN 4 /* Function Local Instance (VI) number. */ #define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0 +#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4 /* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */ #define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0 @@ -11332,20 +11659,23 @@ #define MC_CMD_VSWITCH_ALLOC_IN_LEN 16 /* The port to connect to the v-switch's upstream port. */ #define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 /* The type of v-switch to create. */ #define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4 /* enum: VLAN */ -#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1 +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1 /* enum: VEB */ -#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2 +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2 /* enum: VEPA (obsolete) */ -#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3 +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3 /* enum: MUX */ -#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4 +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4 /* enum: Snapper specific; semantics TBD */ -#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5 +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5 /* Flags controlling v-port creation */ #define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4 #define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 #define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 /* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators, @@ -11356,6 +11686,7 @@ * v-ports with this number of tags. */ #define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 +#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 /* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */ #define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0 @@ -11374,6 +11705,7 @@ #define MC_CMD_VSWITCH_FREE_IN_LEN 4 /* The port to which the v-switch is connected. */ #define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4 /* MC_CMD_VSWITCH_FREE_OUT msgresponse */ #define MC_CMD_VSWITCH_FREE_OUT_LEN 0 @@ -11394,6 +11726,7 @@ #define MC_CMD_VSWITCH_QUERY_IN_LEN 4 /* The port to which the v-switch is connected. */ #define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4 /* MC_CMD_VSWITCH_QUERY_OUT msgresponse */ #define MC_CMD_VSWITCH_QUERY_OUT_LEN 0 @@ -11412,28 +11745,31 @@ #define MC_CMD_VPORT_ALLOC_IN_LEN 20 /* The port to which the v-switch is connected. */ #define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 /* The type of the new v-port. */ #define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4 /* enum: VLAN (obsolete) */ -#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1 +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1 /* enum: VEB (obsolete) */ -#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2 +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2 /* enum: VEPA (obsolete) */ -#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3 +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3 /* enum: A normal v-port receives packets which match a specified MAC and/or * VLAN. */ -#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4 +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4 /* enum: An expansion v-port packets traffic which don't match any other * v-port. */ -#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5 +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5 /* enum: An test v-port receives packets which match any filters installed by * its downstream components. */ -#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6 +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6 /* Flags controlling v-port creation */ #define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 #define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1 @@ -11443,8 +11779,10 @@ * v-switch. */ #define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 +#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 /* The actual VLAN tags to insert/remove */ #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4 #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0 #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16 #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16 @@ -11454,6 +11792,7 @@ #define MC_CMD_VPORT_ALLOC_OUT_LEN 4 /* The handle of the new v-port */ #define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4 /***********************************/ @@ -11469,6 +11808,7 @@ #define MC_CMD_VPORT_FREE_IN_LEN 4 /* The handle of the v-port */ #define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4 /* MC_CMD_VPORT_FREE_OUT msgresponse */ #define MC_CMD_VPORT_FREE_OUT_LEN 0 @@ -11487,18 +11827,23 @@ #define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30 /* The port to connect to the v-adaptor's port. */ #define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 /* Flags controlling v-adaptor creation */ #define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 /* The number of VLAN tags to strip on receive */ #define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12 +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4 /* The number of VLAN tags to transparently insert/remove. */ #define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 /* The actual VLAN tags to insert/remove */ #define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4 #define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0 #define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16 #define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16 @@ -11507,7 +11852,7 @@ #define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24 #define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6 /* enum: Derive the MAC address from the upstream port */ -#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0 +#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0 /* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */ #define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0 @@ -11526,6 +11871,7 @@ #define MC_CMD_VADAPTOR_FREE_IN_LEN 4 /* The port to which the v-adaptor is connected. */ #define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4 /* MC_CMD_VADAPTOR_FREE_OUT msgresponse */ #define MC_CMD_VADAPTOR_FREE_OUT_LEN 0 @@ -11544,6 +11890,7 @@ #define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10 /* The port to which the v-adaptor is connected. */ #define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4 /* The new MAC address to assign to this v-adaptor */ #define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4 #define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6 @@ -11565,6 +11912,7 @@ #define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4 /* The port to which the v-adaptor is connected. */ #define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4 /* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */ #define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6 @@ -11586,15 +11934,19 @@ #define MC_CMD_VADAPTOR_QUERY_IN_LEN 4 /* The port to which the v-adaptor is connected. */ #define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4 /* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */ #define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12 /* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ #define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0 +#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4 /* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */ #define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4 +#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4 /* The number of VLAN tags that may still be added */ #define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8 +#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4 /***********************************/ @@ -11610,8 +11962,10 @@ #define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8 /* The port to assign. */ #define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4 /* The target function to modify. */ #define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4 #define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0 #define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16 #define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16 @@ -11633,9 +11987,13 @@ /* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */ #define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17 #define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4 #define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4 #define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4 #define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4 /* Write enable bits 0-3, set to write, clear to read. */ #define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128 #define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4 @@ -11647,9 +12005,13 @@ */ #define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16 #define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4 #define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4 #define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4 #define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4 /***********************************/ @@ -11665,11 +12027,13 @@ #define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4 /* The handle of the owning upstream port */ #define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 /* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */ #define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4 /* The handle of the new Onload stack */ #define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4 /***********************************/ @@ -11685,6 +12049,7 @@ #define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4 /* The handle of the Onload stack */ #define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4 /* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */ #define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0 @@ -11703,21 +12068,24 @@ #define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12 /* The handle of the owning upstream port */ #define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 /* The type of context to allocate */ #define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4 /* enum: Allocate a context for exclusive use. The key and indirection table * must be explicitly configured. */ -#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0 /* enum: Allocate a context for shared use; this will spread across a range of * queues, but the key and indirection table are pre-configured and may not be * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64. */ -#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1 /* Number of queues spanned by this context, in the range 1-64; valid offsets * in the indirection table will be in the range 0 to NUM_QUEUES-1. */ #define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4 /* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4 @@ -11726,8 +12094,9 @@ * handle. */ #define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4 /* enum: guaranteed invalid RSS context handle value */ -#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff /***********************************/ @@ -11743,6 +12112,7 @@ #define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4 /* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0 @@ -11761,6 +12131,7 @@ #define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4 /* The 40-byte Toeplitz hash key (TBD endianness issues?) */ #define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40 @@ -11782,6 +12153,7 @@ #define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4 /* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44 @@ -11803,6 +12175,7 @@ #define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4 /* The 128-byte indirection table (1 byte per entry) */ #define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128 @@ -11824,6 +12197,7 @@ #define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4 /* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132 @@ -11845,6 +12219,7 @@ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4 /* Hash control flags. The _EN bits are always supported, but new modes are * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES: * in this case, the MODE fields may be set to non-zero values, and will take @@ -11858,6 +12233,7 @@ * particular packet type.) */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1 @@ -11898,6 +12274,7 @@ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4 /* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8 @@ -11915,6 +12292,7 @@ * always be used for a SET regardless of old/new driver vs. old/new firmware. */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1 @@ -11952,11 +12330,13 @@ #define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8 /* The handle of the owning upstream port */ #define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 /* Number of queues spanned by this mapping, in the range 1-64; valid fixed * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and * referenced RSS contexts must span no more than this number. */ #define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4 +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4 /* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */ #define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4 @@ -11965,8 +12345,9 @@ * handle. */ #define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4 /* enum: guaranteed invalid .1p mapping handle value */ -#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff /***********************************/ @@ -11982,6 +12363,7 @@ #define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4 /* The handle of the .1p mapping */ #define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4 /* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */ #define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0 @@ -12000,6 +12382,7 @@ #define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36 /* The handle of the .1p mapping */ #define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4 /* Per-priority mappings (1 32-bit word per entry - an offset or RSS context * handle) */ @@ -12023,6 +12406,7 @@ #define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4 /* The handle of the .1p mapping */ #define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4 /* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */ #define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36 @@ -12049,10 +12433,13 @@ #define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12 /* Base absolute interrupt vector number. */ #define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0 +#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4 /* Number of interrupt vectors allocate to this PF. */ #define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4 +#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4 /* Number of interrupt vectors to allocate per VF. */ #define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8 +#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4 /***********************************/ @@ -12070,10 +12457,13 @@ * let the system find a suitable base. */ #define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0 +#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4 /* Number of interrupt vectors allocate to this PF. */ #define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4 +#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4 /* Number of interrupt vectors to allocate per VF. */ #define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8 +#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4 /* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */ #define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0 @@ -12092,6 +12482,7 @@ #define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10 /* The handle of the v-port */ #define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4 /* MAC address to add */ #define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4 #define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6 @@ -12113,6 +12504,7 @@ #define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10 /* The handle of the v-port */ #define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4 /* MAC address to add */ #define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4 #define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6 @@ -12134,6 +12526,7 @@ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4 /* The handle of the v-port */ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4 /* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4 @@ -12141,6 +12534,7 @@ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num)) /* The number of MAC addresses returned */ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4 /* Array of MAC addresses */ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4 #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6 @@ -12163,8 +12557,10 @@ #define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44 /* The handle of the v-port */ #define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4 /* Flags requesting what should be changed. */ #define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4 #define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0 #define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1 #define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1 @@ -12174,14 +12570,17 @@ * v-switch. */ #define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8 +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4 /* The actual VLAN tags to insert/remove */ #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4 #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0 #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16 #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16 #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16 /* The number of MAC addresses to add */ #define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4 /* MAC addresses to add */ #define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20 #define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6 @@ -12190,6 +12589,7 @@ /* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */ #define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4 #define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4 #define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0 #define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1 @@ -12207,15 +12607,18 @@ #define MC_CMD_EVB_PORT_QUERY_IN_LEN 4 /* The handle of the v-port */ #define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0 +#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4 /* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */ #define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8 /* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ #define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0 +#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4 /* The number of VLAN tags that may be used on a v-adaptor connected to this * EVB port. */ #define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4 +#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4 /***********************************/ @@ -12228,14 +12631,16 @@ #define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab #undef MC_CMD_0xab_PRIVILEGE_CTG -#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8 /* Index of the first buffer table entry. */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4 /* Number of buffer table entries to dump. */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4 /* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12 @@ -12260,16 +12665,17 @@ /* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */ #define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4 #define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0 +#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2 /* enum: pad to 64 bytes */ -#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0 /* enum: pad to 128 bytes (Medford only) */ -#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1 /* enum: pad to 256 bytes (Medford only) */ -#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2 /* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */ #define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0 @@ -12290,6 +12696,7 @@ /* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */ #define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4 #define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0 +#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4 #define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0 #define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1 #define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1 @@ -12314,8 +12721,10 @@ #define MC_CMD_GET_CLOCK_OUT_LEN 8 /* System frequency, MHz */ #define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0 +#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4 /* DPCPU frequency, MHz */ #define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4 +#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4 /***********************************/ @@ -12325,69 +12734,83 @@ #define MC_CMD_SET_CLOCK 0xad #undef MC_CMD_0xad_PRIVILEGE_CTG -#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_SET_CLOCK_IN msgrequest */ #define MC_CMD_SET_CLOCK_IN_LEN 28 /* Requested frequency in MHz for system clock domain */ #define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0 +#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4 /* enum: Leave the system clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for inter-core clock domain */ #define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4 +#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4 /* enum: Leave the inter-core clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for DPCPU clock domain */ #define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8 +#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4 /* enum: Leave the DPCPU clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for PCS clock domain */ #define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12 +#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4 /* enum: Leave the PCS clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for MC clock domain */ #define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16 +#define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4 /* enum: Leave the MC clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for rmon clock domain */ #define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20 +#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4 /* enum: Leave the rmon clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for vswitch clock domain */ #define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24 +#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4 /* enum: Leave the vswitch clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0 /* MC_CMD_SET_CLOCK_OUT msgresponse */ #define MC_CMD_SET_CLOCK_OUT_LEN 28 /* Resulting system frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0 +#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4 /* enum: The system clock domain doesn't exist */ -#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0 /* Resulting inter-core frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4 +#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4 /* enum: The inter-core clock domain doesn't exist / isn't used */ -#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0 /* Resulting DPCPU frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8 +#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4 /* enum: The dpcpu clock domain doesn't exist */ -#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0 /* Resulting PCS frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12 +#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4 /* enum: The PCS clock domain doesn't exist / isn't controlled */ -#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0 /* Resulting MC frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16 +#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4 /* enum: The MC clock domain doesn't exist / isn't controlled */ -#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0 /* Resulting rmon frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20 +#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4 /* enum: The rmon clock domain doesn't exist / isn't controlled */ -#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0 /* Resulting vswitch frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24 +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4 /* enum: The vswitch clock domain doesn't exist / isn't controlled */ -#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0 /***********************************/ @@ -12397,27 +12820,28 @@ #define MC_CMD_DPCPU_RPC 0xae #undef MC_CMD_0xae_PRIVILEGE_CTG -#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DPCPU_RPC_IN msgrequest */ #define MC_CMD_DPCPU_RPC_IN_LEN 36 #define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0 +#define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4 /* enum: RxDPCPU0 */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0 +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0 /* enum: TxDPCPU0 */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1 +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1 /* enum: TxDPCPU1 */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2 +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2 /* enum: RxDPCPU1 (Medford only) */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3 +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3 /* enum: RxDPCPU (will be for the calling function; for now, just an alias of * DPCPU_RX0) */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80 +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80 /* enum: TxDPCPU (will be for the calling function; for now, just an alias of * DPCPU_TX0) */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81 +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81 /* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be * initialised to zero */ @@ -12425,15 +12849,15 @@ #define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8 -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */ #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16 @@ -12444,11 +12868,11 @@ #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */ #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64 @@ -12457,21 +12881,24 @@ #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16 #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16 #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16 -#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */ #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64 #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16 #define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12 #define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24 /* Register data to write. Only valid in write/write-read. */ #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4 /* Register address. */ #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4 /* MC_CMD_DPCPU_RPC_OUT msgresponse */ #define MC_CMD_DPCPU_RPC_OUT_LEN 36 #define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0 +#define MC_CMD_DPCPU_RPC_OUT_RC_LEN 4 /* DATA */ #define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4 #define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32 @@ -12482,9 +12909,13 @@ #define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12 #define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24 #define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4 #define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4 #define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4 #define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4 /***********************************/ @@ -12500,6 +12931,7 @@ #define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4 /* Interrupt level relative to base for function. */ #define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0 +#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4 /* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */ #define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0 @@ -12512,14 +12944,15 @@ #define MC_CMD_SHMBOOT_OP 0xe6 #undef MC_CMD_0xe6_PRIVILEGE_CTG -#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_SHMBOOT_OP_IN msgrequest */ #define MC_CMD_SHMBOOT_OP_IN_LEN 4 /* Identifies the operation to perform */ #define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0 +#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4 /* enum: Copy slave_data section to the slave core. (Greenport only) */ -#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0 +#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0 /* MC_CMD_SHMBOOT_OP_OUT msgresponse */ #define MC_CMD_SHMBOOT_OP_OUT_LEN 0 @@ -12532,13 +12965,16 @@ #define MC_CMD_CAP_BLK_READ 0xe7 #undef MC_CMD_0xe7_PRIVILEGE_CTG -#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_CAP_BLK_READ_IN msgrequest */ #define MC_CMD_CAP_BLK_READ_IN_LEN 12 #define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0 +#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4 #define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4 +#define MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4 #define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8 +#define MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4 /* MC_CMD_CAP_BLK_READ_OUT msgresponse */ #define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8 @@ -12559,53 +12995,77 @@ #define MC_CMD_DUMP_DO 0xe8 #undef MC_CMD_0xe8_PRIVILEGE_CTG -#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DUMP_DO_IN msgrequest */ #define MC_CMD_DUMP_DO_IN_LEN 52 #define MC_CMD_DUMP_DO_IN_PADDING_OFST 0 +#define MC_CMD_DUMP_DO_IN_PADDING_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4 -#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */ -#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8 -#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */ -#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */ -#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */ -#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12 -#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 +#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20 -#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 +#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4 /* enum: The uart port this command was received over (if using a uart * transport) */ -#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff +#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28 -#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */ -#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4 /* MC_CMD_DUMP_DO_OUT msgresponse */ #define MC_CMD_DUMP_DO_OUT_LEN 4 #define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0 +#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_LEN 4 /***********************************/ @@ -12615,41 +13075,64 @@ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9 #undef MC_CMD_0xe9_PRIVILEGE_CTG -#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4 /***********************************/ @@ -12661,17 +13144,20 @@ #define MC_CMD_SET_PSU 0xea #undef MC_CMD_0xea_PRIVILEGE_CTG -#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_SET_PSU_IN msgrequest */ #define MC_CMD_SET_PSU_IN_LEN 12 #define MC_CMD_SET_PSU_IN_PARAM_OFST 0 -#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */ +#define MC_CMD_SET_PSU_IN_PARAM_LEN 4 +#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */ #define MC_CMD_SET_PSU_IN_RAIL_OFST 4 -#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */ -#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */ +#define MC_CMD_SET_PSU_IN_RAIL_LEN 4 +#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */ +#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */ /* desired value, eg voltage in mV */ #define MC_CMD_SET_PSU_IN_VALUE_OFST 8 +#define MC_CMD_SET_PSU_IN_VALUE_LEN 4 /* MC_CMD_SET_PSU_OUT msgresponse */ #define MC_CMD_SET_PSU_OUT_LEN 0 @@ -12692,7 +13178,9 @@ /* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */ #define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8 #define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0 +#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4 #define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4 +#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4 /***********************************/ @@ -12704,7 +13192,7 @@ #define MC_CMD_ENABLE_OFFLINE_BIST 0xed #undef MC_CMD_0xed_PRIVILEGE_CTG -#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */ #define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0 @@ -12730,12 +13218,16 @@ #define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num)) /* CRC32 over OFFSET, LENGTH, RESERVED, DATA */ #define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0 +#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4 /* Offset at which to write the data */ #define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4 +#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4 /* Length of data */ #define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8 +#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4 /* Reserved for future use */ #define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12 +#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4 #define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16 #define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1 #define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0 @@ -12759,12 +13251,16 @@ #define MC_CMD_UART_RECV_DATA_OUT_LEN 16 /* CRC32 over OFFSET, LENGTH, RESERVED */ #define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0 +#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4 /* Offset from which to read the data */ #define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4 +#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4 /* Length of data */ #define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8 +#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4 /* Reserved for future use */ #define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12 +#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4 /* MC_CMD_UART_RECV_DATA_IN msgresponse */ #define MC_CMD_UART_RECV_DATA_IN_LENMIN 16 @@ -12772,12 +13268,16 @@ #define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num)) /* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */ #define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0 +#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4 /* Offset at which to write the data */ #define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4 +#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4 /* Length of data */ #define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8 +#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4 /* Reserved for future use */ #define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12 +#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4 #define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16 #define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1 #define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0 @@ -12791,14 +13291,16 @@ #define MC_CMD_READ_FUSES 0xf0 #undef MC_CMD_0xf0_PRIVILEGE_CTG -#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_READ_FUSES_IN msgrequest */ #define MC_CMD_READ_FUSES_IN_LEN 8 /* Offset in OTP to read */ #define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0 +#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4 /* Length of data to read in bytes */ #define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4 +#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4 /* MC_CMD_READ_FUSES_OUT msgresponse */ #define MC_CMD_READ_FUSES_OUT_LENMIN 4 @@ -12806,6 +13308,7 @@ #define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num)) /* Length of returned OTP data in bytes */ #define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0 +#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4 /* Returned data */ #define MC_CMD_READ_FUSES_OUT_DATA_OFST 4 #define MC_CMD_READ_FUSES_OUT_DATA_LEN 1 @@ -12820,7 +13323,7 @@ #define MC_CMD_KR_TUNE 0xf1 #undef MC_CMD_0xf1_PRIVILEGE_CTG -#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_KR_TUNE_IN msgrequest */ #define MC_CMD_KR_TUNE_IN_LENMIN 4 @@ -12830,26 +13333,30 @@ #define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0 #define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1 /* enum: Get current RXEQ settings */ -#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0 +#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0 /* enum: Override RXEQ settings */ -#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1 +#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1 /* enum: Get current TX Driver settings */ -#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2 +#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2 /* enum: Override TX Driver settings */ -#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3 +#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3 /* enum: Force KR Serdes reset / recalibration */ -#define MC_CMD_KR_TUNE_IN_RECAL 0x4 +#define MC_CMD_KR_TUNE_IN_RECAL 0x4 /* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid * signal. */ -#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5 +#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5 /* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The * caller should call this command repeatedly after starting eye plot, until no * more data is returned. */ -#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6 +#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6 /* enum: Read Figure Of Merit (eye quality, higher is better). */ -#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7 +#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7 +/* enum: Start/stop link training frames */ +#define MC_CMD_KR_TUNE_IN_LINK_TRAIN_RUN 0x8 +/* enum: Issue KR link training command (control training coefficients) */ +#define MC_CMD_KR_TUNE_IN_LINK_TRAIN_CMD 0x9 /* Align the arguments to 32 bits */ #define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1 #define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3 @@ -12883,44 +13390,98 @@ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 /* enum: Attenuation (0-15, Huntington) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0 /* enum: CTLE Boost (0-15, Huntington) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1 /* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max * positive, Medford - 0-31) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2 /* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max * positive, Medford - 0-31) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3 /* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max * positive, Medford - 0-16) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4 /* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max * positive, Medford - 0-16) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5 /* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max * positive, Medford - 0-16) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6 /* enum: Edge DFE DLEV (0-128 for Medford) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7 /* enum: Variable Gain Amplifier (0-15, Medford) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8 /* enum: CTLE EQ Capacitor (0-15, Medford) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 /* enum: CTLE EQ Resistor (0-7, Medford) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa +/* enum: CTLE gain (0-31, Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_GAIN 0xb +/* enum: CTLE pole (0-31, Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_POLE 0xc +/* enum: CTLE peaking (0-31, Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_PEAK 0xd +/* enum: DFE Tap1 - even path (Medford2 - 6 bit signed (-29 - +29)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_EVEN 0xe +/* enum: DFE Tap1 - odd path (Medford2 - 6 bit signed (-29 - +29)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_ODD 0xf +/* enum: DFE Tap2 (Medford2 - 6 bit signed (-20 - +20)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x10 +/* enum: DFE Tap3 (Medford2 - 6 bit signed (-20 - +20)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x11 +/* enum: DFE Tap4 (Medford2 - 6 bit signed (-20 - +20)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x12 +/* enum: DFE Tap5 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x13 +/* enum: DFE Tap6 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP6 0x14 +/* enum: DFE Tap7 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP7 0x15 +/* enum: DFE Tap8 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP8 0x16 +/* enum: DFE Tap9 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP9 0x17 +/* enum: DFE Tap10 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP10 0x18 +/* enum: DFE Tap11 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP11 0x19 +/* enum: DFE Tap12 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP12 0x1a +/* enum: I/Q clk offset (Medford2 - 4 bit signed (-5 - +5))) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_IQ_OFF 0x1b +/* enum: Negative h1 polarity data sampler offset calibration code, even path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_EVEN 0x1c +/* enum: Negative h1 polarity data sampler offset calibration code, odd path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_ODD 0x1d +/* enum: Positive h1 polarity data sampler offset calibration code, even path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_EVEN 0x1e +/* enum: Positive h1 polarity data sampler offset calibration code, odd path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_ODD 0x1f +/* enum: CDR calibration loop code (Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT 0x20 +/* enum: CDR integral loop code (Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG 0x21 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3 -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12 @@ -12985,39 +13546,39 @@ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 -/* enum: TX Amplitude (Huntington, Medford) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0 +/* enum: TX Amplitude (Huntington, Medford, Medford2) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0 /* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1 /* enum: De-Emphasis Tap1 Fine */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2 /* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3 /* enum: De-Emphasis Tap2 Fine (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4 /* enum: Pre-Emphasis Magnitude (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5 /* enum: Pre-Emphasis Fine (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6 /* enum: TX Slew Rate Coarse control (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7 /* enum: TX Slew Rate Fine control (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8 /* enum: TX Termination Impedance control (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9 /* enum: TX Amplitude Fine control (Medford) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa -/* enum: Pre-shoot Tap (Medford) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb -/* enum: De-emphasis Tap (Medford) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa +/* enum: Pre-shoot Tap (Medford, Medford2) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb +/* enum: De-emphasis Tap (Medford, Medford2) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3 -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16 @@ -13078,7 +13639,27 @@ /* Align the arguments to 32 bits */ #define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1 #define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3 +/* Port-relative lane to scan eye on */ #define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_LEN 4 + +/* MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN msgrequest */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LEN 12 +/* Requested operation */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_LBN 0 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_WIDTH 8 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_LBN 31 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_WIDTH 1 +/* Scan duration / cycle count */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_OFST 8 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_LEN 4 /* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */ #define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0 @@ -13110,10 +13691,91 @@ #define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1 #define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3 #define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_LBN 0 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_WIDTH 8 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_LBN 31 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_WIDTH 1 /* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */ #define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4 #define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0 +#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_LEN 4 + +/* MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN msgrequest */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_RUN_OFST 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_RUN_LEN 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_STOP 0x0 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_START 0x1 /* enum */ + +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN msgrequest */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LEN 28 +/* Requested operation */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LANE_LEN 4 +/* Set INITIALIZE state */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_INITIALIZE_OFST 8 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_INITIALIZE_LEN 4 +/* Set PRESET state */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_PRESET_OFST 12 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_PRESET_LEN 4 +/* C(-1) request */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CM1_OFST 16 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CM1_LEN 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_HOLD 0x0 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_INCREMENT 0x1 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_DECREMENT 0x2 /* enum */ +/* C(0) request */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_C0_OFST 20 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_C0_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ +/* C(+1) request */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CP1_OFST 24 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CP1_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ + +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT msgresponse */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_LEN 24 +/* C(-1) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_STATUS_OFST 0 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_STATUS_LEN 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_NOT_UPDATED 0x0 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_UPDATED 0x1 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_MINIMUM 0x2 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_MAXIMUM 0x3 /* enum */ +/* C(0) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_STATUS_OFST 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_STATUS_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ +/* C(+1) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_OFST 8 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ +/* C(-1) value */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_OFST 12 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_LEN 4 +/* C(0) value */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_OFST 16 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_LEN 4 +/* C(+1) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_OFST 20 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_LEN 4 /***********************************/ @@ -13123,7 +13785,7 @@ #define MC_CMD_PCIE_TUNE 0xf2 #undef MC_CMD_0xf2_PRIVILEGE_CTG -#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_PCIE_TUNE_IN msgrequest */ #define MC_CMD_PCIE_TUNE_IN_LENMIN 4 @@ -13133,22 +13795,22 @@ #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0 #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1 /* enum: Get current RXEQ settings */ -#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0 +#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0 /* enum: Override RXEQ settings */ -#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1 +#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1 /* enum: Get current TX Driver settings */ -#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2 +#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2 /* enum: Override TX Driver settings */ -#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3 +#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3 /* enum: Start PCIe Serdes Eye diagram plot on a given lane. */ -#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5 +#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5 /* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The * caller should call this command repeatedly after starting eye plot, until no * more data is returned. */ -#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6 +#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6 /* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */ -#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7 +#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7 /* Align the arguments to 32 bits */ #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1 #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3 @@ -13182,46 +13844,46 @@ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 /* enum: Attenuation (0-15) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0 /* enum: CTLE Boost (0-15) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1 /* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2 /* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3 /* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4 /* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5 /* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6 /* enum: DFE DLev */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7 /* enum: Figure of Merit */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8 /* enum: CTLE EQ Capacitor (HF Gain) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 /* enum: CTLE EQ Resistor (DC Gain) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14 @@ -13285,15 +13947,15 @@ #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 /* enum: TxMargin (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0 /* enum: TxSwing (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1 /* enum: De-emphasis coefficient C(-1) (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2 /* enum: De-emphasis coefficient C(0) (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3 /* enum: De-emphasis coefficient C(+1) (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4 /* Enum values, see field(s): */ @@ -13312,6 +13974,7 @@ #define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1 #define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3 #define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4 +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4 /* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */ #define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0 @@ -13355,38 +14018,46 @@ #define MC_CMD_LICENSING_IN_LEN 4 /* identifies the type of operation requested */ #define MC_CMD_LICENSING_IN_OP_OFST 0 +#define MC_CMD_LICENSING_IN_OP_LEN 4 /* enum: re-read and apply licenses after a license key partition update; note * that this operation returns a zero-length response */ -#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0 +#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0 /* enum: report counts of installed licenses */ -#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1 +#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1 /* MC_CMD_LICENSING_OUT msgresponse */ #define MC_CMD_LICENSING_OUT_LEN 28 /* count of application keys which are valid */ #define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0 +#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4 /* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with * MC_CMD_FC_OP_LICENSE) */ #define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4 +#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4 /* count of application keys which are invalid due to being blacklisted */ #define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8 +#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4 /* count of application keys which are invalid due to being unverifiable */ #define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12 +#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4 /* count of application keys which are invalid due to being for the wrong node */ #define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16 +#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4 /* licensing state (for diagnostics; the exact meaning of the bits in this * field are private to the firmware) */ #define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20 +#define MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4 /* licensing subsystem self-test report (for manftest) */ #define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24 +#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4 /* enum: licensing subsystem self-test failed */ -#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0 +#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0 /* enum: licensing subsystem self-test passed */ -#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1 +#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1 /***********************************/ @@ -13403,37 +14074,44 @@ #define MC_CMD_LICENSING_V3_IN_LEN 4 /* identifies the type of operation requested */ #define MC_CMD_LICENSING_V3_IN_OP_OFST 0 +#define MC_CMD_LICENSING_V3_IN_OP_LEN 4 /* enum: re-read and apply licenses after a license key partition update; note * that this operation returns a zero-length response */ -#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0 +#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0 /* enum: report counts of installed licenses Returns EAGAIN if license * processing (updating) has been started but not yet completed. */ -#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1 +#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1 /* MC_CMD_LICENSING_V3_OUT msgresponse */ #define MC_CMD_LICENSING_V3_OUT_LEN 88 /* count of keys which are valid */ #define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0 +#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4 /* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with * MC_CMD_FC_OP_LICENSE) */ #define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4 +#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4 /* count of keys which are invalid due to being unverifiable */ #define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8 +#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4 /* count of keys which are invalid due to being for the wrong node */ #define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12 +#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4 /* licensing state (for diagnostics; the exact meaning of the bits in this * field are private to the firmware) */ #define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16 +#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4 /* licensing subsystem self-test report (for manftest) */ #define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20 +#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4 /* enum: licensing subsystem self-test failed */ -#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0 +#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0 /* enum: licensing subsystem self-test passed */ -#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1 +#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1 /* bitmask of licensed applications */ #define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24 #define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8 @@ -13471,8 +14149,10 @@ #define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num)) /* type of license (eg 3) */ #define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4 /* length of the license ID (in bytes) */ #define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4 /* the unique license ID of the adapter */ #define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8 #define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1 @@ -13512,15 +14192,17 @@ #define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4 /* application ID to query (LICENSED_APP_ID_xxx) */ #define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0 +#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4 /* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */ #define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4 /* state of this application */ #define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4 /* enum: no (or invalid) license is present for the application */ -#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0 +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0 /* enum: a valid license is present for the application */ -#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1 +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1 /***********************************/ @@ -13548,10 +14230,11 @@ #define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4 /* state of this application */ #define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4 /* enum: no (or invalid) license is present for the application */ -#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0 /* enum: a valid license is present for the application */ -#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1 /***********************************/ @@ -13600,12 +14283,14 @@ #define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num)) /* application ID */ #define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4 /* the type of operation requested */ #define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4 /* enum: validate application */ -#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0 +#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0 /* enum: mask application */ -#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1 +#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1 /* arguments specific to this particular operation */ #define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8 #define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4 @@ -13626,8 +14311,10 @@ #define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72 /* application ID */ #define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4 /* the type of operation requested */ #define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4 /* validation challenge */ #define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8 #define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64 @@ -13636,6 +14323,7 @@ #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68 /* feature expiry (time_t) */ #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4 /* validation response */ #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4 #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64 @@ -13644,10 +14332,13 @@ #define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12 /* application ID */ #define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4 /* the type of operation requested */ #define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4 /* flag */ #define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4 /* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */ #define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0 @@ -13686,12 +14377,14 @@ #define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96 /* application expiry time */ #define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4 /* application expiry units */ #define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4 /* enum: expiry units are accounting units */ -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0 /* enum: expiry units are calendar days */ -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1 /* base MAC address of the NIC stored in NVRAM (note that this is a constant * value for a given NIC regardless which function is calling, effectively this * is PF0 base MAC address) @@ -13712,7 +14405,7 @@ #define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5 #undef MC_CMD_0xd5_PRIVILEGE_CTG -#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL +#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN /* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */ #define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12 @@ -13723,10 +14416,11 @@ #define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4 /* whether to turn on or turn off the masked features */ #define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4 /* enum: turn the features off */ -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0 /* enum: turn the features back on */ -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1 /* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */ #define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0 @@ -13743,29 +14437,31 @@ #define MC_CMD_LICENSING_V3_TEMPORARY 0xd6 #undef MC_CMD_0xd6_PRIVILEGE_CTG -#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_GENERAL +#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4 /* operation code */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4 /* enum: install a new license, overwriting any existing temporary license. * This is an asynchronous operation owing to the time taken to validate an * ECDSA license */ -#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0 +#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0 /* enum: clear the license immediately rather than waiting for the next power * cycle */ -#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1 +#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1 /* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET * operation */ -#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2 +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2 /* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164 #define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4 /* ECDSA license and signature */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4 #define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160 @@ -13773,23 +14469,26 @@ /* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4 #define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4 /* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4 #define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4 /* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */ #define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12 /* status code */ #define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4 /* enum: finished validating and installing license */ -#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0 +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0 /* enum: license validation and installation in progress */ -#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1 +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1 /* enum: licensing error. More specific error messages are not provided to * avoid exposing details of the licensing system to the client */ -#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2 +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2 /* bitmask of licensed features */ #define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4 #define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8 @@ -13814,23 +14513,27 @@ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16 /* configuration flags */ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4 #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1 #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1 /* receive queue handle (for RSS mode, this is the base queue) */ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4 /* receive mode */ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4 /* enum: receive to just the specified queue */ -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 /* enum: receive to multiple queues using RSS context */ -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 /* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note * that these handles should be considered opaque to the host, although a value * of 0xFFFFFFFF is guaranteed never to be a valid handle. */ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4 /* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */ #define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0 @@ -13845,7 +14548,7 @@ #define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8 #undef MC_CMD_0xf8_PRIVILEGE_CTG -#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0 @@ -13854,20 +14557,24 @@ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16 /* configuration flags */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1 /* receiving queue handle (for RSS mode, this is the base queue) */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4 /* receive mode */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4 /* enum: receiving to just the specified queue */ -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 /* enum: receiving to multiple queues using RSS context */ -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 /* RSS context (for RX_MODE_RSS) */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4 /***********************************/ @@ -13885,19 +14592,21 @@ #define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num)) /* the type of configuration setting to change */ #define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4 /* enum: Per-TXQ enable for multicast UDP destination lookup for possible * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.) */ -#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0 /* enum: Per-v-adaptor enable for suppression of self-transmissions on the * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single * boolean.) */ -#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1 /* handle for the entity to update: queue handle, EVB port ID, etc. depending * on the type of configuration setting being changed */ #define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4 /* new value: the details depend on the type of configuration setting being * changed */ @@ -13923,12 +14632,14 @@ #define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8 /* the type of configuration setting to read */ #define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */ /* handle for the entity to query: queue handle, EVB port ID, etc. depending on * the type of configuration setting being read */ #define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4 /* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */ #define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4 @@ -13962,21 +14673,25 @@ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16 /* configuration flags */ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4 #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 /* receive queue handle (for RSS mode, this is the base queue) */ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4 /* receive mode */ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4 /* enum: receive to just the specified queue */ -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 /* enum: receive to multiple queues using RSS context */ -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 /* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note * that these handles should be considered opaque to the host, although a value * of 0xFFFFFFFF is guaranteed never to be a valid handle. */ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4 /* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0 @@ -13991,7 +14706,7 @@ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc #undef MC_CMD_0xfc_PRIVILEGE_CTG -#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0 @@ -14000,18 +14715,22 @@ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16 /* configuration flags */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4 #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 /* receiving queue handle (for RSS mode, this is the base queue) */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4 /* receive mode */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4 /* enum: receiving to just the specified queue */ -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 /* enum: receiving to multiple queues using RSS context */ -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 /* RSS context (for RX_MODE_RSS) */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4 /***********************************/ @@ -14027,16 +14746,22 @@ #define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8 /* The rx queue to get stats for. */ #define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4 #define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4 #define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0 #define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1 /* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */ #define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16 #define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4 #define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4 #define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4 #define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4 /***********************************/ @@ -14044,6 +14769,9 @@ * Find out about available PCIE resources */ #define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd +#undef MC_CMD_0xfd_PRIVILEGE_CTG + +#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0 @@ -14052,20 +14780,27 @@ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28 /* The maximum number of PFs the device can expose */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4 /* The maximum number of VFs the device can expose in total */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4 /* The maximum number of MSI-X vectors the device can provide in total */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4 /* the number of MSI-X vectors the device will allocate by default to each PF */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4 /* the number of MSI-X vectors the device will allocate by default to each VF */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4 /* the maximum number of MSI-X vectors the device can allocate to any one PF */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4 /* the maximum number of MSI-X vectors the device can allocate to any one VF */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4 /***********************************/ @@ -14084,10 +14819,13 @@ #define MC_CMD_GET_PORT_MODES_OUT_LEN 12 /* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */ #define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0 +#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4 /* Default (canonical) board mode */ #define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4 +#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4 /* Current board mode */ #define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8 +#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4 /***********************************/ @@ -14097,21 +14835,26 @@ #define MC_CMD_READ_ATB 0x100 #undef MC_CMD_0x100_PRIVILEGE_CTG -#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_READ_ATB_IN msgrequest */ #define MC_CMD_READ_ATB_IN_LEN 16 #define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0 -#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */ -#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */ -#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */ +#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4 +#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */ #define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4 +#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4 #define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8 +#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4 #define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12 +#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4 /* MC_CMD_READ_ATB_OUT msgresponse */ #define MC_CMD_READ_ATB_OUT_LEN 4 #define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0 +#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4 /***********************************/ @@ -14129,7 +14872,9 @@ /* Each workaround is represented by a single bit according to the enums below. */ #define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0 +#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4 #define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4 +#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4 /* enum: Bug 17230 work around. */ #define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2 /* enum: Bug 35388 work around (unsafe EVQ writes). */ @@ -14165,50 +14910,63 @@ * 1,3 = 0x00030001 */ #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4 #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0 #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16 #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16 #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16 -#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */ /* New privilege mask to be set. The mask will only be changed if the MSB is * set to 1. */ #define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4 -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */ /* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */ /* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC * adress. */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800 /* enum: Privilege that allows a Function to change the MAC address configured * in its associated vAdapter/vPort. */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000 /* enum: Privilege that allows a Function to install filters that specify VLANs * that are not in the permit list for the associated vPort. This privilege is * primarily to support ESX where vPorts are created that restrict traffic to * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT. */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000 +/* enum: Privilege for insecure commands. Commands that belong to this group + * are not permitted on secure adapters regardless of the privilege mask. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000 +/* enum: Trusted Server Adapter (TSA) / ServerLock. Privilege for + * administrator-level operations that are not allowed from the local host once + * an adapter has Bound to a remote ServerLock Controller (see doxbox + * SF-117064-DG for background). + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN_TSA_UNBOUND 0x8000 /* enum: Set this bit to indicate that a new privilege mask is to be set, * otherwise the command will only read the existing mask. */ -#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000 +#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000 /* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */ #define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4 /* For an admin function, always all the privileges are reported. */ #define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4 /***********************************/ @@ -14226,27 +14984,30 @@ * e.g. VF 1,3 = 0x00030001 */ #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4 #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0 #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16 #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16 #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16 /* New link state mode to be set */ #define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4 -#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */ -#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */ -#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4 +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */ /* enum: Use this value to just read the existing setting without modifying it. */ -#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff +#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff /* MC_CMD_LINK_STATE_MODE_OUT msgresponse */ #define MC_CMD_LINK_STATE_MODE_OUT_LEN 4 #define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0 +#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4 /***********************************/ /* MC_CMD_GET_SNAPSHOT_LENGTH - * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH + * Obtain the current range of allowable values for the SNAPSHOT_LENGTH * parameter to MC_CMD_INIT_RXQ. */ #define MC_CMD_GET_SNAPSHOT_LENGTH 0x101 @@ -14261,8 +15022,10 @@ #define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8 /* Minimum acceptable snapshot length. */ #define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0 +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4 /* Maximum acceptable snapshot length. */ #define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4 +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4 /***********************************/ @@ -14272,7 +15035,7 @@ #define MC_CMD_FUSE_DIAGS 0x102 #undef MC_CMD_0x102_PRIVILEGE_CTG -#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_FUSE_DIAGS_IN msgrequest */ #define MC_CMD_FUSE_DIAGS_IN_LEN 0 @@ -14281,28 +15044,40 @@ #define MC_CMD_FUSE_DIAGS_OUT_LEN 48 /* Total number of mismatched bits between pairs in area 0 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4 /* Total number of unexpectedly clear (set in B but not A) bits in area 0 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4 /* Total number of unexpectedly clear (set in A but not B) bits in area 0 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4 /* Checksum of data after logical OR of pairs in area 0 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4 /* Total number of mismatched bits between pairs in area 1 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4 /* Total number of unexpectedly clear (set in B but not A) bits in area 1 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4 /* Total number of unexpectedly clear (set in A but not B) bits in area 1 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4 /* Checksum of data after logical OR of pairs in area 1 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4 /* Total number of mismatched bits between pairs in area 2 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4 /* Total number of unexpectedly clear (set in B but not A) bits in area 2 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4 /* Total number of unexpectedly clear (set in A but not B) bits in area 2 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4 /* Checksum of data after logical OR of pairs in area 2 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4 /***********************************/ @@ -14320,14 +15095,16 @@ #define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16 /* The groups of functions to have their privilege masks modified. */ #define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0 -#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */ /* For VFS_OF_PF specify the PF, for ONE specify the target function */ #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4 #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0 #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16 #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16 @@ -14336,10 +15113,12 @@ * refer to the command MC_CMD_PRIVILEGE_MASK */ #define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8 +#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4 /* Privileges to be removed from the target functions. For privilege * definitions refer to the command MC_CMD_PRIVILEGE_MASK */ #define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12 +#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4 /* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */ #define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0 @@ -14358,8 +15137,10 @@ #define MC_CMD_XPM_READ_BYTES_IN_LEN 8 /* Start address (byte) */ #define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0 +#define MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4 /* Count (bytes) */ #define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4 +#define MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4 /* MC_CMD_XPM_READ_BYTES_OUT msgresponse */ #define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0 @@ -14379,7 +15160,7 @@ #define MC_CMD_XPM_WRITE_BYTES 0x104 #undef MC_CMD_0x104_PRIVILEGE_CTG -#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */ #define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8 @@ -14387,8 +15168,10 @@ #define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num)) /* Start address (byte) */ #define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0 +#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4 /* Count (bytes) */ #define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4 +#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4 /* Data */ #define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8 #define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1 @@ -14406,14 +15189,16 @@ #define MC_CMD_XPM_READ_SECTOR 0x105 #undef MC_CMD_0x105_PRIVILEGE_CTG -#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_READ_SECTOR_IN msgrequest */ #define MC_CMD_XPM_READ_SECTOR_IN_LEN 8 /* Sector index */ #define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0 +#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4 /* Sector size */ #define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4 +#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4 /* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */ #define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4 @@ -14421,10 +15206,12 @@ #define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num)) /* Sector type */ #define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0 -#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */ -#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */ -#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */ -#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4 +#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */ /* Sector data */ #define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4 #define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1 @@ -14439,7 +15226,7 @@ #define MC_CMD_XPM_WRITE_SECTOR 0x106 #undef MC_CMD_0x106_PRIVILEGE_CTG -#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */ #define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12 @@ -14456,10 +15243,12 @@ #define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3 /* Sector type */ #define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4 +#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */ /* Sector size */ #define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8 +#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4 /* Sector data */ #define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12 #define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1 @@ -14470,6 +15259,7 @@ #define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4 /* New sector index */ #define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0 +#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4 /***********************************/ @@ -14479,12 +15269,13 @@ #define MC_CMD_XPM_INVALIDATE_SECTOR 0x107 #undef MC_CMD_0x107_PRIVILEGE_CTG -#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */ #define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4 /* Sector index */ #define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0 +#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4 /* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */ #define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0 @@ -14497,14 +15288,16 @@ #define MC_CMD_XPM_BLANK_CHECK 0x108 #undef MC_CMD_0x108_PRIVILEGE_CTG -#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */ #define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8 /* Start address (byte) */ #define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0 +#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4 /* Count (bytes) */ #define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4 +#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4 /* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */ #define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4 @@ -14512,6 +15305,7 @@ #define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num)) /* Total number of bad (non-blank) locations */ #define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4 /* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit * into MCDI response) */ @@ -14528,14 +15322,16 @@ #define MC_CMD_XPM_REPAIR 0x109 #undef MC_CMD_0x109_PRIVILEGE_CTG -#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_REPAIR_IN msgrequest */ #define MC_CMD_XPM_REPAIR_IN_LEN 8 /* Start address (byte) */ #define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0 +#define MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4 /* Count (bytes) */ #define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4 +#define MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4 /* MC_CMD_XPM_REPAIR_OUT msgresponse */ #define MC_CMD_XPM_REPAIR_OUT_LEN 0 @@ -14549,7 +15345,7 @@ #define MC_CMD_XPM_DECODER_TEST 0x10a #undef MC_CMD_0x10a_PRIVILEGE_CTG -#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_DECODER_TEST_IN msgrequest */ #define MC_CMD_XPM_DECODER_TEST_IN_LEN 0 @@ -14569,7 +15365,7 @@ #define MC_CMD_XPM_WRITE_TEST 0x10b #undef MC_CMD_0x10b_PRIVILEGE_CTG -#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_WRITE_TEST_IN msgrequest */ #define MC_CMD_XPM_WRITE_TEST_IN_LEN 0 @@ -14590,16 +15386,19 @@ #define MC_CMD_EXEC_SIGNED 0x10c #undef MC_CMD_0x10c_PRIVILEGE_CTG -#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_EXEC_SIGNED_IN msgrequest */ #define MC_CMD_EXEC_SIGNED_IN_LEN 28 /* the length of code to include in the CMAC */ #define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0 +#define MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4 /* the length of date to include in the CMAC */ #define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4 +#define MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4 /* the XPM sector containing the key to use */ #define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8 +#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4 /* the expected CMAC value */ #define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12 #define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16 @@ -14617,12 +15416,13 @@ #define MC_CMD_PREPARE_SIGNED 0x10d #undef MC_CMD_0x10d_PRIVILEGE_CTG -#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_PREPARE_SIGNED_IN msgrequest */ #define MC_CMD_PREPARE_SIGNED_IN_LEN 4 /* the length of data area to clear */ #define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0 +#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4 /* MC_CMD_PREPARE_SIGNED_OUT msgresponse */ #define MC_CMD_PREPARE_SIGNED_OUT_LEN 0 @@ -14639,12 +15439,13 @@ #define MC_CMD_SET_SECURITY_RULE 0x10f #undef MC_CMD_0x10f_PRIVILEGE_CTG -#define MC_CMD_0x10f_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x10f_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_SET_SECURITY_RULE_IN msgrequest */ #define MC_CMD_SET_SECURITY_RULE_IN_LEN 92 /* fields to include in match criteria */ #define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_LEN 4 #define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_LBN 0 #define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_WIDTH 1 #define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_LBN 1 @@ -14701,8 +15502,10 @@ #define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_LEN 2 /* Physical port to match (as little-endian 32-bit value) */ #define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_OFST 28 +#define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_LEN 4 /* Reserved; set to 0 */ #define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_OFST 32 +#define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_LEN 4 /* remote IP address to match (as bytes in network order; set last 12 bytes to * 0 for IPv4 address) */ @@ -14719,58 +15522,85 @@ * MC_CMD_SUBNET_MAP_SET_NODE appropriately */ #define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_OFST 68 +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_LEN 4 /* remote portrange ID to match (as little-endian 32-bit value); note that * remote port ranges are matched by mapping the remote port to a "portrange * ID" via a data structure which must already have been configured using * MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE */ #define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_OFST 72 +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_LEN 4 /* local portrange ID to match (as little-endian 32-bit value); note that local * port ranges are matched by mapping the local port to a "portrange ID" via a * data structure which must already have been configured using * MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE */ #define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_OFST 76 +#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_LEN 4 /* set the action for transmitted packets matching this rule */ #define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_OFST 80 +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_LEN 4 /* enum: make no decision */ -#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_NONE 0x0 +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_NONE 0x0 /* enum: decide to accept the packet */ -#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_WHITELIST 0x1 +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_WHITELIST 0x1 /* enum: decide to drop the packet */ -#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_BLACKLIST 0x2 +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_BLACKLIST 0x2 +/* enum: inform the TSA controller about some sample of packets matching this + * rule (via MC_CMD_TSA_INFO_IN_PKT_SAMPLE messages); may be bitwise-ORed with + * either the WHITELIST or BLACKLIST action + */ +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_SAMPLE 0x4 /* enum: do not change the current TX action */ -#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_UNCHANGED 0xffffffff +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_UNCHANGED 0xffffffff /* set the action for received packets matching this rule */ #define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_OFST 84 +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_LEN 4 /* enum: make no decision */ -#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_NONE 0x0 +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_NONE 0x0 /* enum: decide to accept the packet */ -#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_WHITELIST 0x1 +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_WHITELIST 0x1 /* enum: decide to drop the packet */ -#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_BLACKLIST 0x2 +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_BLACKLIST 0x2 +/* enum: inform the TSA controller about some sample of packets matching this + * rule (via MC_CMD_TSA_INFO_IN_PKT_SAMPLE messages); may be bitwise-ORed with + * either the WHITELIST or BLACKLIST action + */ +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_SAMPLE 0x4 /* enum: do not change the current RX action */ -#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_UNCHANGED 0xffffffff +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_UNCHANGED 0xffffffff /* counter ID to associate with this rule; IDs are allocated using * MC_CMD_SECURITY_RULE_COUNTER_ALLOC */ #define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_OFST 88 +#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_LEN 4 /* enum: special value for the null counter ID */ -#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_NONE 0x0 +#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_NONE 0x0 +/* enum: special value to tell the MC to allocate an available counter */ +#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_SW_AUTO 0xeeeeeeee +/* enum: special value to request use of hardware counter (Medford2 only) */ +#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_HW 0xffffffff /* MC_CMD_SET_SECURITY_RULE_OUT msgresponse */ -#define MC_CMD_SET_SECURITY_RULE_OUT_LEN 28 +#define MC_CMD_SET_SECURITY_RULE_OUT_LEN 32 /* new reference count for uses of counter ID */ #define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_LEN 4 /* constructed match bits for this rule (as a tracing aid only) */ #define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_OFST 4 #define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_LEN 12 /* constructed discriminator bits for this rule (as a tracing aid only) */ #define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_OFST 16 +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_LEN 4 /* base location for probes for this rule (as a tracing aid only) */ #define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_OFST 20 +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_LEN 4 /* step for probes for this rule (as a tracing aid only) */ #define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_OFST 24 +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_LEN 4 +/* ID for reading back the counter */ +#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_ID_OFST 28 +#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_ID_LEN 4 /***********************************/ @@ -14784,14 +15614,15 @@ #define MC_CMD_RESET_SECURITY_RULES 0x110 #undef MC_CMD_0x110_PRIVILEGE_CTG -#define MC_CMD_0x110_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x110_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_RESET_SECURITY_RULES_IN msgrequest */ #define MC_CMD_RESET_SECURITY_RULES_IN_LEN 4 /* index of physical port to reset (or ALL_PHYSICAL_PORTS to reset all) */ #define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_OFST 0 +#define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_LEN 4 /* enum: special value to reset all physical ports */ -#define MC_CMD_RESET_SECURITY_RULES_IN_ALL_PHYSICAL_PORTS 0xffffffff +#define MC_CMD_RESET_SECURITY_RULES_IN_ALL_PHYSICAL_PORTS 0xffffffff /* MC_CMD_RESET_SECURITY_RULES_OUT msgresponse */ #define MC_CMD_RESET_SECURITY_RULES_OUT_LEN 0 @@ -14836,12 +15667,13 @@ #define MC_CMD_SECURITY_RULE_COUNTER_ALLOC 0x112 #undef MC_CMD_0x112_PRIVILEGE_CTG -#define MC_CMD_0x112_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x112_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN msgrequest */ #define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_LEN 4 /* the number of new counter IDs to request */ #define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_OFST 0 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_LEN 4 /* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT msgresponse */ #define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMIN 4 @@ -14851,6 +15683,7 @@ * requested if resources are unavailable) */ #define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_OFST 0 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_LEN 4 /* new counter ID(s) */ #define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_OFST 4 #define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_LEN 4 @@ -14869,7 +15702,7 @@ #define MC_CMD_SECURITY_RULE_COUNTER_FREE 0x113 #undef MC_CMD_0x113_PRIVILEGE_CTG -#define MC_CMD_0x113_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x113_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_SECURITY_RULE_COUNTER_FREE_IN msgrequest */ #define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMIN 4 @@ -14877,6 +15710,7 @@ #define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LEN(num) (4+4*(num)) /* the number of counter IDs to free */ #define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_OFST 0 +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_LEN 4 /* the counter ID(s) to free */ #define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_OFST 4 #define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_LEN 4 @@ -14900,7 +15734,7 @@ #define MC_CMD_SUBNET_MAP_SET_NODE 0x114 #undef MC_CMD_0x114_PRIVILEGE_CTG -#define MC_CMD_0x114_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x114_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_SUBNET_MAP_SET_NODE_IN msgrequest */ #define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMIN 6 @@ -14908,6 +15742,7 @@ #define MC_CMD_SUBNET_MAP_SET_NODE_IN_LEN(num) (4+2*(num)) /* node to update in the range 0 .. SUBNET_MAP_NUM_NODES-1 */ #define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_OFST 0 +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_LEN 4 /* SUBNET_MAP_NUM_ENTRIES_PER_NODE new entries; each entry is either a pointer * to the next node, expressed as an offset in the trie memory (i.e. node ID * multiplied by SUBNET_MAP_NUM_ENTRIES_PER_NODE), or a leaf value in the range @@ -14928,7 +15763,7 @@ */ #define PORTRANGE_TREE_ENTRY_BRANCH_KEY_OFST 0 #define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LEN 2 -#define PORTRANGE_TREE_ENTRY_LEAF_NODE_KEY 0xffff /* enum */ +#define PORTRANGE_TREE_ENTRY_LEAF_NODE_KEY 0xffff /* enum */ #define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LBN 0 #define PORTRANGE_TREE_ENTRY_BRANCH_KEY_WIDTH 16 /* final portrange ID for leaf nodes (don't care for branch nodes) */ @@ -14951,7 +15786,7 @@ #define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE 0x115 #undef MC_CMD_0x115_PRIVILEGE_CTG -#define MC_CMD_0x115_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x115_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN msgrequest */ #define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4 @@ -14982,7 +15817,7 @@ #define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE 0x116 #undef MC_CMD_0x116_PRIVILEGE_CTG -#define MC_CMD_0x116_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x116_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN msgrequest */ #define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4 @@ -15005,18 +15840,18 @@ #define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0 #define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2 /* enum: the IANA allocated UDP port for VXLAN */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5 /* enum: the IANA allocated UDP port for Geneve */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1 #define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0 #define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16 /* tunnel encapsulation protocol (only those named below are supported) */ #define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2 #define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2 /* enum: This port will be used for VXLAN on both IPv4 and IPv6 */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0 /* enum: This port will be used for Geneve on both IPv4 and IPv6 */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1 #define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16 #define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16 @@ -15073,18 +15908,22 @@ #define MC_CMD_RX_BALANCING 0x118 #undef MC_CMD_0x118_PRIVILEGE_CTG -#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_RX_BALANCING_IN msgrequest */ #define MC_CMD_RX_BALANCING_IN_LEN 16 /* The RX port whose upconverter table will be modified */ #define MC_CMD_RX_BALANCING_IN_PORT_OFST 0 +#define MC_CMD_RX_BALANCING_IN_PORT_LEN 4 /* The VLAN priority associated to the table index and vFIFO */ #define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4 +#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4 /* The resulting bit of SRC^DST for indexing the table */ #define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8 +#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4 /* The RX engine to which the vFIFO in the table entry will point to */ #define MC_CMD_RX_BALANCING_IN_ENG_OFST 12 +#define MC_CMD_RX_BALANCING_IN_ENG_LEN 4 /* MC_CMD_RX_BALANCING_OUT msgresponse */ #define MC_CMD_RX_BALANCING_OUT_LEN 0 @@ -15093,12 +15932,7 @@ /***********************************/ /* MC_CMD_TSA_BIND * TSAN - TSAC binding communication protocol. Refer to SF-115479-TC for more - * info in respect to the binding protocol. This MCDI command is only available - * over a TLS secure connection between the TSAN and TSAC, and is not available - * to host software. Note- The messages definitions that do comprise this MCDI - * command deemed as provisional. This MCDI command has not yet been used in - * any released code and may change during development. This note will be - * removed once it is regarded as stable. + * info in respect to the binding protocol. */ #define MC_CMD_TSA_BIND 0x119 #undef MC_CMD_0x119_PRIVILEGE_CTG @@ -15108,15 +15942,13 @@ /* MC_CMD_TSA_BIND_IN msgrequest: Protocol operation code */ #define MC_CMD_TSA_BIND_IN_LEN 4 #define MC_CMD_TSA_BIND_IN_OP_OFST 0 -/* enum: Retrieve the TSAN ID from a TSAN. TSAN ID is a unique identifier for - * the network adapter. More specifically, TSAN ID equals the MAC address of - * the network adapter. TSAN ID is used as part of the TSAN authentication - * protocol. Refer to SF-114946-SW for more information. - */ +#define MC_CMD_TSA_BIND_IN_OP_LEN 4 +/* enum: Obsolete. Use MC_CMD_SECURE_NIC_INFO_IN_STATUS. */ #define MC_CMD_TSA_BIND_OP_GET_ID 0x1 /* enum: Get a binding ticket from the TSAN. The binding ticket is used as part * of the binding procedure to authorize the binding of an adapter to a TSAID. - * Refer to SF-114946-SW for more information. + * Refer to SF-114946-SW for more information. This sub-command is only + * available over a TLS secure connection between the TSAN and TSAC. */ #define MC_CMD_TSA_BIND_OP_GET_TICKET 0x2 /* enum: Opcode associated with the propagation of a private key that TSAN uses @@ -15124,18 +15956,47 @@ * uses this key for a signing operation. TSAC uses the counterpart public key * to verify the signature. Note - The post-binding authentication occurs when * the TSAN-TSAC connection terminates and TSAN tries to reconnect. Refer to - * SF-114946-SW for more information. + * SF-114946-SW for more information. This sub-command is only available over a + * TLS secure connection between the TSAN and TSAC. */ #define MC_CMD_TSA_BIND_OP_SET_KEY 0x3 -/* enum: Request an unbinding operation. Note- TSAN clears the binding ticket - * from the Nvram section. +/* enum: Request an insecure unbinding operation. This sub-command is available + * for any privileged client. */ #define MC_CMD_TSA_BIND_OP_UNBIND 0x4 - -/* MC_CMD_TSA_BIND_IN_GET_ID msgrequest */ +/* enum: Obsolete. Use MC_CMD_TSA_BIND_OP_SECURE_UNBIND. */ +#define MC_CMD_TSA_BIND_OP_UNBIND_EXT 0x5 +/* enum: Opcode associated with the propagation of the unbinding secret token. + * TSAN persists the unbinding secret token. Refer to SF-115479-TC for more + * information. This sub-command is only available over a TLS secure connection + * between the TSAN and TSAC. + */ +#define MC_CMD_TSA_BIND_OP_SET_UNBINDTOKEN 0x6 +/* enum: Obsolete. Use MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION. */ +#define MC_CMD_TSA_BIND_OP_DECOMMISSION 0x7 +/* enum: Obsolete. Use MC_CMD_GET_CERTIFICATE. */ +#define MC_CMD_TSA_BIND_OP_GET_CERTIFICATE 0x8 +/* enum: Request a secure unbinding operation using unbinding token. This sub- + * command is available for any privileged client. + */ +#define MC_CMD_TSA_BIND_OP_SECURE_UNBIND 0x9 +/* enum: Request a secure decommissioning operation. This sub-command is + * available for any privileged client. + */ +#define MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION 0xa +/* enum: Test facility that allows an adapter to be configured to behave as if + * Bound to a TSA controller with restricted MCDI administrator operations. + * This operation is primarily intended to aid host driver development. + */ +#define MC_CMD_TSA_BIND_OP_TEST_MCDI 0xb + +/* MC_CMD_TSA_BIND_IN_GET_ID msgrequest: Obsolete. Use + * MC_CMD_SECURE_NIC_INFO_IN_STATUS. + */ #define MC_CMD_TSA_BIND_IN_GET_ID_LEN 20 /* The operation requested. */ #define MC_CMD_TSA_BIND_IN_GET_ID_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_GET_ID_OP_LEN 4 /* Cryptographic nonce that TSAC generates and sends to TSAN. TSAC generates * the nonce every time as part of the TSAN post-binding authentication * procedure when the TSAN-TSAC connection terminates and TSAN does need to re- @@ -15148,6 +16009,7 @@ #define MC_CMD_TSA_BIND_IN_GET_TICKET_LEN 4 /* The operation requested. */ #define MC_CMD_TSA_BIND_IN_GET_TICKET_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_GET_TICKET_OP_LEN 4 /* MC_CMD_TSA_BIND_IN_SET_KEY msgrequest */ #define MC_CMD_TSA_BIND_IN_SET_KEY_LENMIN 5 @@ -15155,6 +16017,7 @@ #define MC_CMD_TSA_BIND_IN_SET_KEY_LEN(num) (4+1*(num)) /* The operation requested. */ #define MC_CMD_TSA_BIND_IN_SET_KEY_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_SET_KEY_OP_LEN 4 /* This data blob contains the private key generated by the TSAC. TSAN uses * this key for a signing operation. Note- This private key is used in * conjunction with the post-binding TSAN authentication procedure that occurs @@ -15166,26 +16029,261 @@ #define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MINNUM 1 #define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MAXNUM 248 -/* MC_CMD_TSA_BIND_IN_UNBIND msgrequest: Asks for the un-binding procedure */ +/* MC_CMD_TSA_BIND_IN_UNBIND msgrequest: Request an insecure unbinding + * operation. + */ #define MC_CMD_TSA_BIND_IN_UNBIND_LEN 10 /* The operation requested. */ #define MC_CMD_TSA_BIND_IN_UNBIND_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_UNBIND_OP_LEN 4 /* TSAN unique identifier for the network adapter */ #define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_OFST 4 #define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_LEN 6 -/* MC_CMD_TSA_BIND_OUT_GET_ID msgresponse */ +/* MC_CMD_TSA_BIND_IN_UNBIND_EXT msgrequest: Obsolete. Use + * MC_CMD_TSA_BIND_IN_SECURE_UNBIND. + */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LENMIN 93 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LENMAX 252 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LEN(num) (92+1*(num)) +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_OP_LEN 4 +/* TSAN unique identifier for the network adapter */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_OFST 4 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_LEN 6 +/* Align the arguments to 32 bits */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_RSVD_OFST 10 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_RSVD_LEN 2 +/* This attribute identifies the TSA infrastructure domain. The length of the + * TSAID attribute is limited to 64 bytes. This is how TSA SDK defines the max + * length. Note- The TSAID is the Organizational Unit Name filed as part of the + * root and server certificates. + */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_OFST 12 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_LEN 1 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_NUM 64 +/* Unbinding secret token. The adapter validates this unbinding token by + * comparing it against the one stored on the adapter as part of the + * MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest. Refer to SF-115479-TC for + * more information. + */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_UNBINDTOKEN_OFST 76 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_UNBINDTOKEN_LEN 16 +/* This is the signature of the above mentioned fields- TSANID, TSAID and + * UNBINDTOKEN. As per current requirements, the SIG opaque data blob contains + * ECDSA ECC-384 based signature. The ECC curve is secp384r1. The signature is + * also ASN-1 encoded. Note- The signature is verified based on the public key + * stored into the root certificate that is provisioned on the adapter side. + * This key is known as the PUKtsaid. Refer to SF-115479-TC for more + * information. + */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_OFST 92 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_LEN 1 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_MINNUM 1 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_MAXNUM 160 + +/* MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest */ +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_LEN 20 +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_OP_LEN 4 +/* Unbinding secret token. TSAN persists the unbinding secret token. Refer to + * SF-115479-TC for more information. + */ +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_UNBINDTOKEN_OFST 4 +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_UNBINDTOKEN_LEN 16 +/* enum: There are situations when the binding process does not complete + * successfully due to key, other attributes corruption at the database level + * (Controller). Adapter can't connect to the controller anymore. To recover, + * make usage of the decommission command that forces the adapter into + * unbinding state. + */ +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_ADAPTER_BINDING_FAILURE 0x1 + +/* MC_CMD_TSA_BIND_IN_DECOMMISSION msgrequest: Obsolete. Use + * MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LENMIN 109 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LENMAX 252 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LEN(num) (108+1*(num)) +/* This is the signature of the above mentioned fields- TSAID, USER and REASON. + * As per current requirements, the SIG opaque data blob contains ECDSA ECC-384 + * based signature. The ECC curve is secp384r1. The signature is also ASN-1 + * encoded . Note- The signature is verified based on the public key stored + * into the root certificate that is provisioned on the adapter side. This key + * is known as the PUKtsaid. Refer to SF-115479-TC for more information. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_OFST 108 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_LEN 1 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_MINNUM 1 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_MAXNUM 144 +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_OP_LEN 4 +/* This attribute identifies the TSA infrastructure domain. The length of the + * TSAID attribute is limited to 64 bytes. This is how TSA SDK defines the max + * length. Note- The TSAID is the Organizational Unit Name filed as part of the + * root and server certificates. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_OFST 4 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_LEN 1 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_NUM 64 +/* User ID that comes, as an example, from the Controller. Note- The 33 byte + * length of this attribute is max length of the linux user name plus null + * character. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_OFST 68 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_LEN 1 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_NUM 33 +/* Align the arguments to 32 bits */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_RSVD_OFST 101 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_RSVD_LEN 3 +/* Reason of why decommissioning happens Note- The list of reasons, defined as + * part of the enumeration below, can be extended. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_REASON_OFST 104 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_REASON_LEN 4 + +/* MC_CMD_TSA_BIND_IN_GET_CERTIFICATE msgrequest: Obsolete. Use + * MC_CMD_GET_CERTIFICATE. + */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_LEN 8 +/* The operation requested, must be MC_CMD_TSA_BIND_OP_GET_CERTIFICATE. */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_OP_LEN 4 +/* Type of the certificate to be retrieved. */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_TYPE_OFST 4 +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_TYPE_LEN 4 +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_UNUSED 0x0 /* enum */ +/* enum: Adapter Authentication Certificate (AAC). The AAC is used by the + * controller to verify the authenticity of the adapter. + */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_AAC 0x1 +/* enum: Adapter Authentication Signing Certificate (AASC). The AASC is used by + * the controller to verify the validity of AAC. + */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_AASC 0x2 + +/* MC_CMD_TSA_BIND_IN_SECURE_UNBIND msgrequest: Request a secure unbinding + * operation using unbinding token. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LENMIN 97 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LENMAX 200 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LEN(num) (96+1*(num)) +/* The operation requested, must be MC_CMD_TSA_BIND_OP_SECURE_UNBIND. */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_OP_LEN 4 +/* Type of the message. (MESSAGE_TYPE_xxx) Must be + * MESSAGE_TYPE_TSA_SECURE_UNBIND. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_MESSAGE_TYPE_OFST 4 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_MESSAGE_TYPE_LEN 4 +/* TSAN unique identifier for the network adapter */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_OFST 8 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_LEN 6 +/* Align the arguments to 32 bits */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_RSVD_OFST 14 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_RSVD_LEN 2 +/* A NUL padded US-ASCII string identifying the TSA infrastructure domain. This + * field is for information only, and not used by the firmware. Note- The TSAID + * is the Organizational Unit Name field as part of the root and server + * certificates. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_OFST 16 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_NUM 64 +/* Unbinding secret token. The adapter validates this unbinding token by + * comparing it against the one stored on the adapter as part of the + * MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest. Refer to SF-115479-TC for + * more information. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_UNBINDTOKEN_OFST 80 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_UNBINDTOKEN_LEN 16 +/* The signature computed and encoded as specified by MESSAGE_TYPE. */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_OFST 96 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_MINNUM 1 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_MAXNUM 104 + +/* MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION msgrequest: Request a secure + * decommissioning operation. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LENMIN 113 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LENMAX 216 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LEN(num) (112+1*(num)) +/* The operation requested, must be MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION. */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_OP_LEN 4 +/* Type of the message. (MESSAGE_TYPE_xxx) Must be + * MESSAGE_TYPE_SECURE_DECOMMISSION. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_MESSAGE_TYPE_OFST 4 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_MESSAGE_TYPE_LEN 4 +/* A NUL padded US-ASCII string identifying the TSA infrastructure domain. This + * field is for information only, and not used by the firmware. Note- The TSAID + * is the Organizational Unit Name field as part of the root and server + * certificates. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_OFST 8 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_NUM 64 +/* A NUL padded US-ASCII string containing user name of the creator of the + * decommissioning ticket. This field is for information only, and not used by + * the firmware. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_OFST 72 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_NUM 36 +/* Reason of why decommissioning happens */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_REASON_OFST 108 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_REASON_LEN 4 +/* enum: There are situations when the binding process does not complete + * successfully due to key, other attributes corruption at the database level + * (Controller). Adapter can't connect to the controller anymore. To recover, + * use the decommission command to force the adapter into unbound state. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_ADAPTER_BINDING_FAILURE 0x1 +/* The signature computed and encoded as specified by MESSAGE_TYPE. */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_OFST 112 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_MINNUM 1 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_MAXNUM 104 + +/* MC_CMD_TSA_BIND_IN_TEST_MCDI msgrequest: Test mode that emulates MCDI + * interface restrictions of a bound adapter. This operation is intended for + * test use on adapters that are not deployed and bound to a TSA Controller. + * Using it on a Bound adapter will succeed but will not alter the MCDI + * privileges as MCDI operations will already be restricted. + */ +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_LEN 8 +/* The operation requested must be MC_CMD_TSA_BIND_OP_TEST_MCDI. */ +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_OP_LEN 4 +/* Enable or disable emulation of bound adapter */ +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_CTRL_OFST 4 +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_CTRL_LEN 4 +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_DISABLE 0x0 /* enum */ +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_ENABLE 0x1 /* enum */ + +/* MC_CMD_TSA_BIND_OUT_GET_ID msgresponse: Obsolete. Use + * MC_CMD_SECURE_NIC_INFO_OUT_STATUS. + */ #define MC_CMD_TSA_BIND_OUT_GET_ID_LENMIN 15 #define MC_CMD_TSA_BIND_OUT_GET_ID_LENMAX 252 #define MC_CMD_TSA_BIND_OUT_GET_ID_LEN(num) (14+1*(num)) -/* The operation completion code. */ +/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_ID that is sent back to + * the caller. + */ #define MC_CMD_TSA_BIND_OUT_GET_ID_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_GET_ID_OP_LEN 4 /* Rules engine type. Note- The rules engine type allows TSAC to further * identify the connected endpoint (e.g. TSAN, NIC Emulator) type and take the * proper action accordingly. As an example, TSAC uses the rules engine type to * select the SF key that differs in the case of TSAN vs. NIC Emulator. */ #define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_OFST 4 +#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_LEN 4 /* enum: Hardware rules engine. */ #define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_TSAN 0x1 /* enum: Nic emulator rules engine. */ @@ -15209,8 +16307,11 @@ #define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMIN 5 #define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMAX 252 #define MC_CMD_TSA_BIND_OUT_GET_TICKET_LEN(num) (4+1*(num)) -/* The operation completion code. */ +/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_TICKET that is sent back + * to the caller. + */ #define MC_CMD_TSA_BIND_OUT_GET_TICKET_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_OP_LEN 4 /* The ticket represents the data blob construct that TSAN sends to TSAC as * part of the binding protocol. From the TSAN perspective the ticket is an * opaque construct. For more info refer to SF-115479-TC. @@ -15222,23 +16323,142 @@ /* MC_CMD_TSA_BIND_OUT_SET_KEY msgresponse */ #define MC_CMD_TSA_BIND_OUT_SET_KEY_LEN 4 -/* The operation completion code. */ +/* The protocol operation code MC_CMD_TSA_BIND_OP_SET_KEY that is sent back to + * the caller. + */ #define MC_CMD_TSA_BIND_OUT_SET_KEY_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_SET_KEY_OP_LEN 4 -/* MC_CMD_TSA_BIND_OUT_UNBIND msgresponse */ +/* MC_CMD_TSA_BIND_OUT_UNBIND msgresponse: Response to insecure unbind request. + */ #define MC_CMD_TSA_BIND_OUT_UNBIND_LEN 8 /* Same as MC_CMD_ERR field, but included as 0 in success cases */ #define MC_CMD_TSA_BIND_OUT_UNBIND_RESULT_OFST 0 +#define MC_CMD_TSA_BIND_OUT_UNBIND_RESULT_LEN 4 /* Extra status information */ #define MC_CMD_TSA_BIND_OUT_UNBIND_INFO_OFST 4 +#define MC_CMD_TSA_BIND_OUT_UNBIND_INFO_LEN 4 +/* enum: Unbind successful. */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_OK_UNBOUND 0x0 +/* enum: TSANID mismatch */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_BAD_TSANID 0x1 +/* enum: Unable to remove the binding ticket from persistent storage. */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_REMOVE_TICKET 0x2 +/* enum: TSAN is not bound to a binding ticket. */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_NOT_BOUND 0x3 + +/* MC_CMD_TSA_BIND_OUT_UNBIND_EXT msgresponse: Obsolete. Use + * MC_CMD_TSA_BIND_OUT_SECURE_UNBIND. + */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_LEN 8 +/* Same as MC_CMD_ERR field, but included as 0 in success cases */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_RESULT_OFST 0 +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_RESULT_LEN 4 +/* Extra status information */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_INFO_OFST 4 +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_INFO_LEN 4 /* enum: Unbind successful. */ -#define MC_CMD_TSA_BIND_OUT_UNBIND_OK_UNBOUND 0x0 +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_OK_UNBOUND 0x0 /* enum: TSANID mismatch */ -#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_BAD_TSANID 0x1 +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_TSANID 0x1 /* enum: Unable to remove the binding ticket from persistent storage. */ -#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_REMOVE_TICKET 0x2 +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_REMOVE_TICKET 0x2 /* enum: TSAN is not bound to a binding ticket. */ -#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_NOT_BOUND 0x3 +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_NOT_BOUND 0x3 +/* enum: Invalid unbind token */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_TOKEN 0x4 +/* enum: Invalid signature */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_SIGNATURE 0x5 + +/* MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN msgresponse */ +#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_LEN 4 +/* The protocol operation code MC_CMD_TSA_BIND_OP_SET_UNBINDTOKEN that is sent + * back to the caller. + */ +#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_OP_LEN 4 + +/* MC_CMD_TSA_BIND_OUT_DECOMMISSION msgresponse: Obsolete. Use + * MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION. + */ +#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_LEN 4 +/* The protocol operation code MC_CMD_TSA_BIND_OP_DECOMMISSION that is sent + * back to the caller. + */ +#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_OP_LEN 4 + +/* MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE msgresponse */ +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LENMIN 9 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LENMAX 252 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LEN(num) (8+1*(num)) +/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_CERTIFICATE that is sent + * back to the caller. + */ +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_OP_LEN 4 +/* Type of the certificate. */ +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_TYPE_OFST 4 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_TSA_BIND_IN_GET_CERTIFICATE/TYPE */ +/* The certificate data. */ +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_OFST 8 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_LEN 1 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_MINNUM 1 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_MAXNUM 244 + +/* MC_CMD_TSA_BIND_OUT_SECURE_UNBIND msgresponse: Response to secure unbind + * request. + */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_LEN 8 +/* The protocol operation code that is sent back to the caller. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OP_LEN 4 +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_RESULT_OFST 4 +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_RESULT_LEN 4 +/* enum: Unbind successful. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OK_UNBOUND 0x0 +/* enum: TSANID mismatch */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_TSANID 0x1 +/* enum: Unable to remove the binding ticket from persistent storage. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_REMOVE_TICKET 0x2 +/* enum: TSAN is not bound to a domain. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_NOT_BOUND 0x3 +/* enum: Invalid unbind token */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_TOKEN 0x4 +/* enum: Invalid signature */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_SIGNATURE 0x5 + +/* MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION msgresponse: Response to secure + * decommission request. + */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_LEN 8 +/* The protocol operation code that is sent back to the caller. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OP_LEN 4 +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_RESULT_OFST 4 +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_RESULT_LEN 4 +/* enum: Unbind successful. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OK_UNBOUND 0x0 +/* enum: TSANID mismatch */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_TSANID 0x1 +/* enum: Unable to remove the binding ticket from persistent storage. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_REMOVE_TICKET 0x2 +/* enum: TSAN is not bound to a domain. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_NOT_BOUND 0x3 +/* enum: Invalid unbind token */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_TOKEN 0x4 +/* enum: Invalid signature */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_SIGNATURE 0x5 + +/* MC_CMD_TSA_BIND_OUT_TEST_MCDI msgrequest */ +#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_LEN 4 +/* The protocol operation code MC_CMD_TSA_BIND_OP_TEST_MCDI that is sent back + * to the caller. + */ +#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_OP_LEN 4 /***********************************/ @@ -15251,9 +16471,9 @@ * will be loaded at power on or MC reboot, instead of the default ruleset. * Rollback of the currently active ruleset to the cached version (when it is * valid) is also supported. (Medford-only; for use by SolarSecure apps, not - * directly by drivers. See SF-114946-SW.) NOTE - this message definition is - * provisional. It has not yet been used in any released code and may change - * during development. This note will be removed once it is regarded as stable. + * directly by drivers. See SF-114946-SW.) NOTE - The only sub-operation + * allowed in an adapter bound to a TSA controller from the local host is + * OP_GET_CACHED_VERSION. All other sub-operations are prohibited. */ #define MC_CMD_MANAGE_SECURITY_RULESET_CACHE 0x11a #undef MC_CMD_0x11a_PRIVILEGE_CTG @@ -15264,18 +16484,19 @@ #define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_LEN 4 /* the operation to perform */ #define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_OFST 0 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_LEN 4 /* enum: reports the ruleset version that is cached in persistent storage but * performs no other action */ -#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_GET_CACHED_VERSION 0x0 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_GET_CACHED_VERSION 0x0 /* enum: rolls back the active state to the cached version. (May fail with * ENOENT if there is no valid cached version.) */ -#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_ROLLBACK 0x1 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_ROLLBACK 0x1 /* enum: commits the active state to the persistent cache */ -#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_COMMIT 0x2 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_COMMIT 0x2 /* enum: invalidates the persistent cache without affecting the active state */ -#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_INVALIDATE 0x3 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_INVALIDATE 0x3 /* MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT msgresponse */ #define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMIN 5 @@ -15285,12 +16506,13 @@ * requested operation in the case of rollback, commit, or invalidate) */ #define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_OFST 0 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_LEN 4 /* enum: persistent cache is invalid (the VERSION field will be empty in this * case) */ -#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_INVALID 0x0 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_INVALID 0x0 /* enum: persistent cache is valid */ -#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_VALID 0x1 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_VALID 0x1 /* cached ruleset version (after completion of the requested operation, in the * case of rollback, commit, or invalidate) as an opaque hash value in the same * form as MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION @@ -15309,7 +16531,7 @@ #define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c #undef MC_CMD_0x11c_PRIVILEGE_CTG -#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */ #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9 @@ -15317,8 +16539,10 @@ #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num)) /* The tag to be appended */ #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4 /* The length of the data */ #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4 /* The data to be contained in the TLV structure */ #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8 #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1 @@ -15344,6 +16568,7 @@ #define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4 /* Data type to be checked */ #define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0 +#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4 /* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12 @@ -15351,10 +16576,13 @@ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num)) /* Number of sectors found (test builds only) */ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4 /* Number of bytes found (test builds only) */ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4 /* Length of signature */ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4 /* Signature */ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12 #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1 @@ -15380,23 +16608,29 @@ #define MC_CMD_SET_EVQ_TMR_IN_LEN 16 /* Function-relative queue instance */ #define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0 +#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_LEN 4 /* Requested value for timer load (in nanoseconds) */ #define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4 +#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_LEN 4 /* Requested value for timer reload (in nanoseconds) */ #define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8 +#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_LEN 4 /* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */ #define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12 -#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */ -#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */ -#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */ -#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_LEN 4 +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */ /* MC_CMD_SET_EVQ_TMR_OUT msgresponse */ #define MC_CMD_SET_EVQ_TMR_OUT_LEN 8 /* Actual value for timer load (in nanoseconds) */ #define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0 +#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_LEN 4 /* Actual value for timer reload (in nanoseconds) */ #define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4 +#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_LEN 4 /***********************************/ @@ -15415,29 +16649,35 @@ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36 /* Reserved for future use. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_LEN 4 /* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in * nanoseconds) for each increment of the timer load/reload count. The * requested duration of a timer is this value multiplied by the timer * load/reload count. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_LEN 4 /* For timers updated via writes to EVQ_TMR_REG, this is the maximum value * allowed for timer load/reload counts. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_LEN 4 /* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a * multiple of this step size will be rounded in an implementation defined * manner. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_LEN 4 /* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only * meaningful if MC_CMD_SET_EVQ_TMR is implemented. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_LEN 4 /* Timer durations requested via MCDI that are not a multiple of this step size * will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_LEN 4 /* For timers updated using the bug35388 workaround, this is the time interval * (in nanoseconds) for each increment of the timer load/reload count. The * requested duration of a timer is this value multiplied by the timer @@ -15445,17 +16685,20 @@ * is enabled. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_LEN 4 /* For timers updated using the bug35388 workaround, this is the maximum value * allowed for timer load/reload counts. This field is only meaningful if the * bug35388 workaround is enabled. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_LEN 4 /* For timers updated using the bug35388 workaround, timer load/reload counts * not a multiple of this step size will be rounded in an implementation * defined manner. This field is only meaningful if the bug35388 workaround is * enabled. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4 /***********************************/ @@ -15466,7 +16709,7 @@ #define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d #undef MC_CMD_0x11d_PRIVILEGE_CTG -#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20 @@ -15474,34 +16717,40 @@ * local queue index. */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4 /* Will the common pool be used as TX_vFIFO_ULL (1) */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4 -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */ /* enum: Using this interface without TX_vFIFO_ULL is not supported for now */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0 /* Number of buffers to reserve for the common pool */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4 /* TX datapath to which the Common Pool is connected to. */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4 /* enum: Extracts information from function */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 /* Network port or RX Engine to which the common pool connects. */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4 /* enum: Extracts information from function */ -/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */ +/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */ /* enum: To enable Switch loopback with Rx engine 0 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4 /* enum: To enable Switch loopback with Rx engine 1 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5 /* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4 /* ID of the common pool allocated */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4 /***********************************/ @@ -15512,42 +16761,49 @@ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e #undef MC_CMD_0x11e_PRIVILEGE_CTG -#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20 /* Common pool previously allocated to which the new vFIFO will be associated */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4 /* Port or RX engine to associate the vFIFO egress */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4 /* enum: Extracts information from common pool */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1 -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */ /* enum: To enable Switch loopback with Rx engine 0 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4 /* enum: To enable Switch loopback with Rx engine 1 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5 /* Minimum number of buffers that the pool must have */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4 /* enum: Do not check the space available */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0 /* Will the vFIFO be used as TX_vFIFO_ULL */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4 /* Network priority of the vFIFO,if applicable */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4 /* enum: Search for the lowest unused priority */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1 /* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8 /* Short vFIFO ID */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4 /* Network priority of the vFIFO */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4 /***********************************/ @@ -15558,12 +16814,13 @@ #define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f #undef MC_CMD_0x11f_PRIVILEGE_CTG -#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */ #define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4 /* Short vFIFO ID */ #define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0 +#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4 /* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */ #define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0 @@ -15577,12 +16834,13 @@ #define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121 #undef MC_CMD_0x121_PRIVILEGE_CTG -#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */ #define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4 /* Common pool ID given when pool allocated */ #define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0 +#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4 /* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */ #define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0 @@ -15604,16 +16862,17 @@ #define MC_CMD_REKEY 0x123 #undef MC_CMD_0x123_PRIVILEGE_CTG -#define MC_CMD_0x123_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x123_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_REKEY_IN msgrequest */ #define MC_CMD_REKEY_IN_LEN 4 /* the type of operation requested */ #define MC_CMD_REKEY_IN_OP_OFST 0 +#define MC_CMD_REKEY_IN_OP_LEN 4 /* enum: Start the rekeying operation */ -#define MC_CMD_REKEY_IN_OP_REKEY 0x0 +#define MC_CMD_REKEY_IN_OP_REKEY 0x0 /* enum: Poll for completion of the rekeying operation */ -#define MC_CMD_REKEY_IN_OP_POLL 0x1 +#define MC_CMD_REKEY_IN_OP_POLL 0x1 /* MC_CMD_REKEY_OUT msgresponse */ #define MC_CMD_REKEY_OUT_LEN 0 @@ -15627,7 +16886,7 @@ #define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124 #undef MC_CMD_0x124_PRIVILEGE_CTG -#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */ #define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0 @@ -15636,8 +16895,10 @@ #define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8 /* Available buffers for the ENG to NET vFIFOs. */ #define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0 +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4 /* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */ #define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4 +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4 /***********************************/ @@ -15653,19 +16914,1231 @@ #define MC_CMD_SET_SECURITY_FUSES 0x126 #undef MC_CMD_0x126_PRIVILEGE_CTG -#define MC_CMD_0x126_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x126_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_SET_SECURITY_FUSES_IN msgrequest */ #define MC_CMD_SET_SECURITY_FUSES_IN_LEN 4 /* Flags specifying what type of security features are being set */ #define MC_CMD_SET_SECURITY_FUSES_IN_FLAGS_OFST 0 +#define MC_CMD_SET_SECURITY_FUSES_IN_FLAGS_LEN 4 #define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_LBN 0 #define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_WIDTH 1 #define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_LBN 1 #define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_WIDTH 1 +#define MC_CMD_SET_SECURITY_FUSES_IN_SOFT_CONFIG_LBN 31 +#define MC_CMD_SET_SECURITY_FUSES_IN_SOFT_CONFIG_WIDTH 1 /* MC_CMD_SET_SECURITY_FUSES_OUT msgresponse */ #define MC_CMD_SET_SECURITY_FUSES_OUT_LEN 0 +/* MC_CMD_SET_SECURITY_FUSES_V2_OUT msgresponse */ +#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_LEN 4 +/* Flags specifying which security features are enforced on the NIC after the + * flags in the request have been applied. See + * MC_CMD_SET_SECURITY_FUSES_IN/FLAGS for flag definitions. + */ +#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_FLAGS_OFST 0 +#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_FLAGS_LEN 4 + + +/***********************************/ +/* MC_CMD_TSA_INFO + * Messages sent from TSA adapter to TSA controller. This command is only valid + * when the MCDI header has MESSAGE_TYPE set to MCDI_MESSAGE_TYPE_TSA. This + * command is not sent by the driver to the MC; it is sent from the MC to a TSA + * controller, being treated more like an alert message rather than a command; + * hence the MC does not expect a response in return. Doxbox reference + * SF-117371-SW + */ +#define MC_CMD_TSA_INFO 0x127 +#undef MC_CMD_0x127_PRIVILEGE_CTG + +#define MC_CMD_0x127_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_INFO_IN msgrequest */ +#define MC_CMD_TSA_INFO_IN_LEN 4 +#define MC_CMD_TSA_INFO_IN_OP_HDR_OFST 0 +#define MC_CMD_TSA_INFO_IN_OP_HDR_LEN 4 +#define MC_CMD_TSA_INFO_IN_OP_LBN 0 +#define MC_CMD_TSA_INFO_IN_OP_WIDTH 16 +/* enum: Information about recently discovered local IP address of the adapter + */ +#define MC_CMD_TSA_INFO_OP_LOCAL_IP 0x1 +/* enum: Information about a sampled packet that either - did not match any + * black/white-list filters and was allowed by the default filter or - did not + * match any black/white-list filters and was denied by the default filter + */ +#define MC_CMD_TSA_INFO_OP_PKT_SAMPLE 0x2 + +/* MC_CMD_TSA_INFO_IN_LOCAL_IP msgrequest: + * + * The TSA controller maintains a list of IP addresses valid for each port of a + * TSA adapter. The TSA controller requires information from the adapter + * inorder to learn new IP addresses assigned to a physical port and to + * identify those that are no longer assigned to the physical port. For this + * purpose, the TSA adapter snoops ARP replys, gratuitous ARP requests and ARP + * probe packets seen on each physical port. This definition describes the + * format of the notification message sent from a TSA adapter to a TSA + * controller related to any information related to a change in IP address + * assignment for a port. Doxbox reference SF-117371. + * + * There may be a possibility of combining multiple notifications in a single + * message in future. When that happens, a new flag can be defined using the + * reserved bits to describe the extended format of this notification. + */ +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_LEN 18 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_OP_HDR_OFST 0 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_OP_HDR_LEN 4 +/* Additional metadata describing the IP address information such as source of + * information retrieval, type of IP address, physical port number. + */ +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_OFST 4 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_LEN 4 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_PORT_INDEX_LBN 0 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_PORT_INDEX_WIDTH 8 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED_LBN 8 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED_WIDTH 8 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_REASON_LBN 16 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_REASON_WIDTH 8 +/* enum: ARP reply sent out of the physical port */ +#define MC_CMD_TSA_INFO_IP_REASON_TX_ARP 0x0 +/* enum: ARP probe packet received on the physical port */ +#define MC_CMD_TSA_INFO_IP_REASON_RX_ARP_PROBE 0x1 +/* enum: Gratuitous ARP packet received on the physical port */ +#define MC_CMD_TSA_INFO_IP_REASON_RX_GRATUITOUS_ARP 0x2 +/* enum: DHCP ACK packet received on the physical port */ +#define MC_CMD_TSA_INFO_IP_REASON_RX_DHCP_ACK 0x3 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_IPV4_LBN 24 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_IPV4_WIDTH 1 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED1_LBN 25 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED1_WIDTH 7 +/* IPV4 address retrieved from the sampled packets. This field is relevant only + * when META_IPV4 is set to 1. + */ +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_IPV4_ADDR_OFST 8 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_IPV4_ADDR_LEN 4 +/* Target MAC address retrieved from the sampled packet. */ +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_OFST 12 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_LEN 1 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_NUM 6 + +/* MC_CMD_TSA_INFO_IN_PKT_SAMPLE msgrequest: + * + * It is desireable for the TSA controller to learn the traffic pattern of + * packets seen at the network port being monitored. In order to learn about + * the traffic pattern, the TSA controller may want to sample packets seen at + * the network port. Based on the packet samples that the TSA controller + * receives from the adapter, the controller may choose to configure additional + * black-list or white-list rules to allow or block packets as required. + * + * Although the entire sampled packet as seen on the network port is available + * to the MC the length of sampled packet sent to controller is restricted by + * MCDI payload size. Besides, the TSA controller does not require the entire + * packet to make decisions about filter updates. Hence the packet sample being + * passed to the controller is truncated to 128 bytes. This length is large + * enough to hold the ethernet header, IP header and maximum length of + * supported L4 protocol headers (IPv4 only, but can hold IPv6 header too, if + * required in future). + * + * The intention is that any future changes to this message format that are not + * backwards compatible will be defined with a new operation code. + */ +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_LEN 136 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_OP_HDR_OFST 0 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_OP_HDR_LEN 4 +/* Additional metadata describing the sampled packet */ +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_OFST 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_LEN 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_PORT_INDEX_LBN 0 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_PORT_INDEX_WIDTH 8 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_DIRECTION_LBN 8 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_DIRECTION_WIDTH 1 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_RESERVED_LBN 9 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_RESERVED_WIDTH 7 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_MASK_LBN 16 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_MASK_WIDTH 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_ALLOW_LBN 16 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_ALLOW_WIDTH 1 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_DENY_LBN 17 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_DENY_WIDTH 1 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_COUNT_LBN 18 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_COUNT_WIDTH 1 +/* 128-byte raw prefix of the sampled packet which includes the ethernet + * header, IP header and L4 protocol header (only IPv4 supported initially). + * This provides the controller enough information about the packet sample to + * report traffic patterns seen on a network port and to make decisions + * concerning rule-set updates. + */ +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_OFST 8 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_LEN 1 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_NUM 128 + +/* MC_CMD_TSA_INFO_OUT msgresponse */ +#define MC_CMD_TSA_INFO_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_HOST_INFO + * Commands to appply or retrieve host-related information from an adapter. + * Doxbox reference SF-117371-SW + */ +#define MC_CMD_HOST_INFO 0x128 +#undef MC_CMD_0x128_PRIVILEGE_CTG + +#define MC_CMD_0x128_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_HOST_INFO_IN msgrequest */ +#define MC_CMD_HOST_INFO_IN_LEN 4 +/* sub-operation code info */ +#define MC_CMD_HOST_INFO_IN_OP_HDR_OFST 0 +#define MC_CMD_HOST_INFO_IN_OP_HDR_LEN 4 +#define MC_CMD_HOST_INFO_IN_OP_LBN 0 +#define MC_CMD_HOST_INFO_IN_OP_WIDTH 16 +/* enum: Read a 16-byte unique host identifier from the adapter. This UUID + * helps to identify the host that an adapter is plugged into. This identifier + * is ideally the system UUID retrieved and set by the UEFI driver. If the UEFI + * driver is unable to extract the system UUID, it would still set a random + * 16-byte value into each supported SF adapter plugged into it. Host UUIDs may + * change if the system is power-cycled, however, they persist across adapter + * resets. If the host UUID was not set on an adapter, due to an unsupported + * version of UEFI driver, then this command returns an error. Doxbox reference + * - SF-117371-SW section 'Host UUID'. + */ +#define MC_CMD_HOST_INFO_OP_GET_UUID 0x0 +/* enum: Set a 16-byte unique host identifier on the adapter to identify the + * host that the adapter is plugged into. See MC_CMD_HOST_INFO_OP_GET_UUID for + * further details. + */ +#define MC_CMD_HOST_INFO_OP_SET_UUID 0x1 + +/* MC_CMD_HOST_INFO_IN_GET_UUID msgrequest */ +#define MC_CMD_HOST_INFO_IN_GET_UUID_LEN 4 +/* sub-operation code info */ +#define MC_CMD_HOST_INFO_IN_GET_UUID_OP_HDR_OFST 0 +#define MC_CMD_HOST_INFO_IN_GET_UUID_OP_HDR_LEN 4 + +/* MC_CMD_HOST_INFO_OUT_GET_UUID msgresponse */ +#define MC_CMD_HOST_INFO_OUT_GET_UUID_LEN 16 +/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID + * for further details. + */ +#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_OFST 0 +#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_LEN 1 +#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_NUM 16 + +/* MC_CMD_HOST_INFO_IN_SET_UUID msgrequest */ +#define MC_CMD_HOST_INFO_IN_SET_UUID_LEN 20 +/* sub-operation code info */ +#define MC_CMD_HOST_INFO_IN_SET_UUID_OP_HDR_OFST 0 +#define MC_CMD_HOST_INFO_IN_SET_UUID_OP_HDR_LEN 4 +/* 16-byte host UUID set on the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID for + * further details. + */ +#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_OFST 4 +#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_LEN 1 +#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_NUM 16 + +/* MC_CMD_HOST_INFO_OUT_SET_UUID msgresponse */ +#define MC_CMD_HOST_INFO_OUT_SET_UUID_LEN 0 + + +/***********************************/ +/* MC_CMD_TSAN_INFO + * Get TSA adapter information. TSA controllers query each TSA adapter to learn + * some configuration parameters of each adapter. Doxbox reference SF-117371-SW + * section 'Adapter Information' + */ +#define MC_CMD_TSAN_INFO 0x129 +#undef MC_CMD_0x129_PRIVILEGE_CTG + +#define MC_CMD_0x129_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_TSAN_INFO_IN msgrequest */ +#define MC_CMD_TSAN_INFO_IN_LEN 4 +/* sub-operation code info */ +#define MC_CMD_TSAN_INFO_IN_OP_HDR_OFST 0 +#define MC_CMD_TSAN_INFO_IN_OP_HDR_LEN 4 +#define MC_CMD_TSAN_INFO_IN_OP_LBN 0 +#define MC_CMD_TSAN_INFO_IN_OP_WIDTH 16 +/* enum: Read configuration parameters and IDs that uniquely identify an + * adapter. The parameters include - host identification, adapter + * identification string and number of physical ports on the adapter. + */ +#define MC_CMD_TSAN_INFO_OP_GET_CFG 0x0 + +/* MC_CMD_TSAN_INFO_IN_GET_CFG msgrequest */ +#define MC_CMD_TSAN_INFO_IN_GET_CFG_LEN 4 +/* sub-operation code info */ +#define MC_CMD_TSAN_INFO_IN_GET_CFG_OP_HDR_OFST 0 +#define MC_CMD_TSAN_INFO_IN_GET_CFG_OP_HDR_LEN 4 + +/* MC_CMD_TSAN_INFO_OUT_GET_CFG msgresponse */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_LEN 26 +/* Information about the configuration parameters returned in this response. */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CONFIG_WORD_OFST 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CONFIG_WORD_LEN 4 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CAP_FLAGS_LBN 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CAP_FLAGS_WIDTH 16 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_FLAG_HOST_UUID_VALID_LBN 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_FLAG_HOST_UUID_VALID_WIDTH 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_NUM_PORTS_LBN 16 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_NUM_PORTS_WIDTH 8 +/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID + * for further details. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_OFST 4 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_LEN 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_NUM 16 +/* A unique identifier per adapter. The base MAC address of the card is used + * for this purpose. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_OFST 20 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_LEN 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_NUM 6 + +/* MC_CMD_TSAN_INFO_OUT_GET_CFG_V2 msgresponse */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_LEN 36 +/* Information about the configuration parameters returned in this response. */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CONFIG_WORD_OFST 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CONFIG_WORD_LEN 4 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CAP_FLAGS_LBN 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CAP_FLAGS_WIDTH 16 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_FLAG_HOST_UUID_VALID_LBN 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_FLAG_HOST_UUID_VALID_WIDTH 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_NUM_PORTS_LBN 16 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_NUM_PORTS_WIDTH 8 +/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID + * for further details. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_OFST 4 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_LEN 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_NUM 16 +/* A unique identifier per adapter. The base MAC address of the card is used + * for this purpose. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_OFST 20 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_LEN 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_NUM 6 +/* Unused bytes, defined for 32-bit alignment of new fields. */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_UNUSED_OFST 26 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_UNUSED_LEN 2 +/* Maximum number of TSA statistics counters in each direction of dataflow + * supported on the card. Note that the statistics counters are always + * allocated in pairs, i.e. a counter ID is associated with one Tx and one Rx + * counter. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_MAX_STATS_OFST 28 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_MAX_STATS_LEN 4 +/* Width of each statistics counter (represented in bits). This gives an + * indication of wrap point to the user. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_STATS_WIDTH_OFST 32 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_STATS_WIDTH_LEN 4 + + +/***********************************/ +/* MC_CMD_TSA_STATISTICS + * TSA adapter statistics operations. + */ +#define MC_CMD_TSA_STATISTICS 0x130 +#undef MC_CMD_0x130_PRIVILEGE_CTG + +#define MC_CMD_0x130_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_STATISTICS_IN msgrequest */ +#define MC_CMD_TSA_STATISTICS_IN_LEN 4 +/* TSA statistics sub-operation code */ +#define MC_CMD_TSA_STATISTICS_IN_OP_CODE_OFST 0 +#define MC_CMD_TSA_STATISTICS_IN_OP_CODE_LEN 4 +/* enum: Get the configuration parameters that describe the TSA statistics + * layout on the adapter. + */ +#define MC_CMD_TSA_STATISTICS_OP_GET_CONFIG 0x0 +/* enum: Read and/or clear TSA statistics counters. */ +#define MC_CMD_TSA_STATISTICS_OP_READ_CLEAR 0x1 + +/* MC_CMD_TSA_STATISTICS_IN_GET_CONFIG msgrequest */ +#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_LEN 4 +/* TSA statistics sub-operation code */ +#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_OP_CODE_OFST 0 +#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_OP_CODE_LEN 4 + +/* MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG msgresponse */ +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_LEN 8 +/* Maximum number of TSA statistics counters in each direction of dataflow + * supported on the card. Note that the statistics counters are always + * allocated in pairs, i.e. a counter ID is associated with one Tx and one Rx + * counter. + */ +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_MAX_STATS_OFST 0 +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_MAX_STATS_LEN 4 +/* Width of each statistics counter (represented in bits). This gives an + * indication of wrap point to the user. + */ +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_STATS_WIDTH_OFST 4 +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_STATS_WIDTH_LEN 4 + +/* MC_CMD_TSA_STATISTICS_IN_READ_CLEAR msgrequest */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LENMIN 20 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LENMAX 252 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LEN(num) (16+4*(num)) +/* TSA statistics sub-operation code */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_OP_CODE_OFST 0 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_OP_CODE_LEN 4 +/* Parameters describing the statistics operation */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_FLAGS_OFST 4 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_FLAGS_LEN 4 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_READ_LBN 0 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_READ_WIDTH 1 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_CLEAR_LBN 1 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_CLEAR_WIDTH 1 +/* Counter ID list specification type */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_MODE_OFST 8 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_MODE_LEN 4 +/* enum: The statistics counters are specified as an unordered list of + * individual counter ID. + */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LIST 0x0 +/* enum: The statistics counters are specified as a range of consecutive + * counter IDs. + */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_RANGE 0x1 +/* Number of statistics counters */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_NUM_STATS_OFST 12 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_NUM_STATS_LEN 4 +/* Counter IDs to be read/cleared. When mode is set to LIST, this entry holds a + * list of counter IDs to be operated on. When mode is set to RANGE, this entry + * holds a single counter ID representing the start of the range of counter IDs + * to be operated on. + */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_OFST 16 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_LEN 4 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_MINNUM 1 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_MAXNUM 59 + +/* MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR msgresponse */ +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LENMIN 24 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LENMAX 248 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LEN(num) (8+16*(num)) +/* Number of statistics counters returned in this response */ +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_NUM_STATS_OFST 0 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_NUM_STATS_LEN 4 +/* MC_TSA_STATISTICS_ENTRY Note that this field is expected to start at a + * 64-bit aligned offset + */ +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_OFST 8 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_LEN 16 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_MINNUM 1 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_MAXNUM 15 + +/* MC_TSA_STATISTICS_ENTRY structuredef */ +#define MC_TSA_STATISTICS_ENTRY_LEN 16 +/* Tx statistics counter */ +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_OFST 0 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LEN 8 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LO_OFST 0 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_HI_OFST 4 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LBN 0 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_WIDTH 64 +/* Rx statistics counter */ +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_OFST 8 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LEN 8 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LO_OFST 8 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_HI_OFST 12 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LBN 64 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_WIDTH 64 + + +/***********************************/ +/* MC_CMD_ERASE_INITIAL_NIC_SECRET + * This request causes the NIC to find the initial NIC secret (programmed + * during ATE) in XPM memory and if and only if the NIC has already been + * rekeyed with MC_CMD_REKEY, erase it. This is used by manftest after + * installing TSA binding certificates. See SF-117631-TC. + */ +#define MC_CMD_ERASE_INITIAL_NIC_SECRET 0x131 +#undef MC_CMD_0x131_PRIVILEGE_CTG + +#define MC_CMD_0x131_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_ERASE_INITIAL_NIC_SECRET_IN msgrequest */ +#define MC_CMD_ERASE_INITIAL_NIC_SECRET_IN_LEN 0 + +/* MC_CMD_ERASE_INITIAL_NIC_SECRET_OUT msgresponse */ +#define MC_CMD_ERASE_INITIAL_NIC_SECRET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TSA_CONFIG + * TSA adapter configuration operations. This command is used to prepare the + * NIC for TSA binding. + */ +#define MC_CMD_TSA_CONFIG 0x64 +#undef MC_CMD_0x64_PRIVILEGE_CTG + +#define MC_CMD_0x64_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_TSA_CONFIG_IN msgrequest */ +#define MC_CMD_TSA_CONFIG_IN_LEN 4 +/* TSA configuration sub-operation code */ +#define MC_CMD_TSA_CONFIG_IN_OP_OFST 0 +#define MC_CMD_TSA_CONFIG_IN_OP_LEN 4 +/* enum: Append a single item to the tsa_config partition. Items will be + * encrypted unless they are declared as non-sensitive. Returns + * MC_CMD_ERR_EEXIST if the tag is already present. + */ +#define MC_CMD_TSA_CONFIG_OP_APPEND 0x1 +/* enum: Reset the tsa_config partition to a clean state. */ +#define MC_CMD_TSA_CONFIG_OP_RESET 0x2 +/* enum: Read back a configured item from tsa_config partition. Returns + * MC_CMD_ERR_ENOENT if the item doesn't exist, or MC_CMD_ERR_EPERM if the item + * is declared as sensitive (i.e. is encrypted). + */ +#define MC_CMD_TSA_CONFIG_OP_READ 0x3 + +/* MC_CMD_TSA_CONFIG_IN_APPEND msgrequest */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_LENMIN 12 +#define MC_CMD_TSA_CONFIG_IN_APPEND_LENMAX 252 +#define MC_CMD_TSA_CONFIG_IN_APPEND_LEN(num) (12+1*(num)) +/* TSA configuration sub-operation code. The value shall be + * MC_CMD_TSA_CONFIG_OP_APPEND. + */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_OP_OFST 0 +#define MC_CMD_TSA_CONFIG_IN_APPEND_OP_LEN 4 +/* The tag to be appended */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_TAG_OFST 4 +#define MC_CMD_TSA_CONFIG_IN_APPEND_TAG_LEN 4 +/* The length of the data in bytes */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_LENGTH_OFST 8 +#define MC_CMD_TSA_CONFIG_IN_APPEND_LENGTH_LEN 4 +/* The item data */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_OFST 12 +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_LEN 1 +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_MINNUM 0 +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_MAXNUM 240 + +/* MC_CMD_TSA_CONFIG_OUT_APPEND msgresponse */ +#define MC_CMD_TSA_CONFIG_OUT_APPEND_LEN 0 + +/* MC_CMD_TSA_CONFIG_IN_RESET msgrequest */ +#define MC_CMD_TSA_CONFIG_IN_RESET_LEN 4 +/* TSA configuration sub-operation code. The value shall be + * MC_CMD_TSA_CONFIG_OP_RESET. + */ +#define MC_CMD_TSA_CONFIG_IN_RESET_OP_OFST 0 +#define MC_CMD_TSA_CONFIG_IN_RESET_OP_LEN 4 + +/* MC_CMD_TSA_CONFIG_OUT_RESET msgresponse */ +#define MC_CMD_TSA_CONFIG_OUT_RESET_LEN 0 + +/* MC_CMD_TSA_CONFIG_IN_READ msgrequest */ +#define MC_CMD_TSA_CONFIG_IN_READ_LEN 8 +/* TSA configuration sub-operation code. The value shall be + * MC_CMD_TSA_CONFIG_OP_READ. + */ +#define MC_CMD_TSA_CONFIG_IN_READ_OP_OFST 0 +#define MC_CMD_TSA_CONFIG_IN_READ_OP_LEN 4 +/* The tag to be read */ +#define MC_CMD_TSA_CONFIG_IN_READ_TAG_OFST 4 +#define MC_CMD_TSA_CONFIG_IN_READ_TAG_LEN 4 + +/* MC_CMD_TSA_CONFIG_OUT_READ msgresponse */ +#define MC_CMD_TSA_CONFIG_OUT_READ_LENMIN 8 +#define MC_CMD_TSA_CONFIG_OUT_READ_LENMAX 252 +#define MC_CMD_TSA_CONFIG_OUT_READ_LEN(num) (8+1*(num)) +/* The tag that was read */ +#define MC_CMD_TSA_CONFIG_OUT_READ_TAG_OFST 0 +#define MC_CMD_TSA_CONFIG_OUT_READ_TAG_LEN 4 +/* The length of the data in bytes */ +#define MC_CMD_TSA_CONFIG_OUT_READ_LENGTH_OFST 4 +#define MC_CMD_TSA_CONFIG_OUT_READ_LENGTH_LEN 4 +/* The data of the item. */ +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_OFST 8 +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_LEN 1 +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_MINNUM 0 +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_MAXNUM 244 + +/* MC_TSA_IPV4_ITEM structuredef */ +#define MC_TSA_IPV4_ITEM_LEN 8 +/* Additional metadata describing the IP address information such as the + * physical port number the address is being used on. Unused space in this + * field is reserved for future expansion. + */ +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_OFST 0 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_LEN 4 +#define MC_TSA_IPV4_ITEM_PORT_IDX_LBN 0 +#define MC_TSA_IPV4_ITEM_PORT_IDX_WIDTH 8 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_LBN 0 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_WIDTH 32 +/* The IPv4 address in little endian byte order. */ +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_OFST 4 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_LEN 4 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_LBN 32 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_WIDTH 32 + + +/***********************************/ +/* MC_CMD_TSA_IPADDR + * TSA operations relating to the monitoring and expiry of local IP addresses + * discovered by the controller. These commands are sent from a TSA controller + * to a TSA adapter. + */ +#define MC_CMD_TSA_IPADDR 0x65 +#undef MC_CMD_0x65_PRIVILEGE_CTG + +#define MC_CMD_0x65_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_IPADDR_IN msgrequest */ +#define MC_CMD_TSA_IPADDR_IN_LEN 4 +/* Header containing information to identify which sub-operation of this + * command to perform. The header contains a 16-bit op-code. Unused space in + * this field is reserved for future expansion. + */ +#define MC_CMD_TSA_IPADDR_IN_OP_HDR_OFST 0 +#define MC_CMD_TSA_IPADDR_IN_OP_HDR_LEN 4 +#define MC_CMD_TSA_IPADDR_IN_OP_LBN 0 +#define MC_CMD_TSA_IPADDR_IN_OP_WIDTH 16 +/* enum: Request that the adapter verifies that the IPv4 addresses supplied are + * still in use by the host by sending ARP probes to the host. The MC does not + * wait for a response to the probes and sends an MCDI response to the + * controller once the probes have been sent to the host. The response to the + * probes (if there are any) will be forwarded to the controller using + * MC_CMD_TSA_INFO alerts. + */ +#define MC_CMD_TSA_IPADDR_OP_VALIDATE_IPV4 0x1 +/* enum: Notify the adapter that one or more IPv4 addresses are no longer valid + * for the host of the adapter. The adapter should remove the IPv4 addresses + * from its local cache. + */ +#define MC_CMD_TSA_IPADDR_OP_REMOVE_IPV4 0x2 + +/* MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4 msgrequest */ +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LENMIN 16 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LENMAX 248 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LEN(num) (8+8*(num)) +/* Header containing information to identify which sub-operation of this + * command to perform. The header contains a 16-bit op-code. Unused space in + * this field is reserved for future expansion. + */ +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_HDR_OFST 0 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_HDR_LEN 4 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_LBN 0 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_WIDTH 16 +/* Number of IPv4 addresses to validate. */ +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_NUM_ITEMS_OFST 4 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_NUM_ITEMS_LEN 4 +/* The IPv4 addresses to validate, in struct MC_TSA_IPV4_ITEM format. */ +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_OFST 8 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_LEN 8 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_LO_OFST 8 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_HI_OFST 12 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_MINNUM 1 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_MAXNUM 30 + +/* MC_CMD_TSA_IPADDR_OUT_VALIDATE_IPV4 msgresponse */ +#define MC_CMD_TSA_IPADDR_OUT_VALIDATE_IPV4_LEN 0 + +/* MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4 msgrequest */ +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LENMIN 16 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LENMAX 248 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LEN(num) (8+8*(num)) +/* Header containing information to identify which sub-operation of this + * command to perform. The header contains a 16-bit op-code. Unused space in + * this field is reserved for future expansion. + */ +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_HDR_OFST 0 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_HDR_LEN 4 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_LBN 0 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_WIDTH 16 +/* Number of IPv4 addresses to remove. */ +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_NUM_ITEMS_OFST 4 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_NUM_ITEMS_LEN 4 +/* The IPv4 addresses that have expired, in struct MC_TSA_IPV4_ITEM format. */ +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_OFST 8 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_LEN 8 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_LO_OFST 8 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_HI_OFST 12 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_MINNUM 1 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_MAXNUM 30 + +/* MC_CMD_TSA_IPADDR_OUT_REMOVE_IPV4 msgresponse */ +#define MC_CMD_TSA_IPADDR_OUT_REMOVE_IPV4_LEN 0 + + +/***********************************/ +/* MC_CMD_SECURE_NIC_INFO + * Get secure NIC information. While many of the features reported by these + * commands are related to TSA, they must be supported in firmware where TSA is + * disabled. + */ +#define MC_CMD_SECURE_NIC_INFO 0x132 +#undef MC_CMD_0x132_PRIVILEGE_CTG + +#define MC_CMD_0x132_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SECURE_NIC_INFO_IN msgrequest */ +#define MC_CMD_SECURE_NIC_INFO_IN_LEN 4 +/* sub-operation code info */ +#define MC_CMD_SECURE_NIC_INFO_IN_OP_HDR_OFST 0 +#define MC_CMD_SECURE_NIC_INFO_IN_OP_HDR_LEN 4 +#define MC_CMD_SECURE_NIC_INFO_IN_OP_LBN 0 +#define MC_CMD_SECURE_NIC_INFO_IN_OP_WIDTH 16 +/* enum: Get the status of various security settings, all signed along with a + * challenge chosen by the host. + */ +#define MC_CMD_SECURE_NIC_INFO_OP_STATUS 0x0 + +/* MC_CMD_SECURE_NIC_INFO_IN_STATUS msgrequest */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_LEN 24 +/* sub-operation code, must be MC_CMD_SECURE_NIC_INFO_OP_STATUS */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_OP_HDR_OFST 0 +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_OP_HDR_LEN 4 +/* Type of key to be used to sign response. */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_KEY_TYPE_OFST 4 +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_KEY_TYPE_LEN 4 +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_UNUSED 0x0 /* enum */ +/* enum: Solarflare adapter authentication key, installed by Manftest. */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_SF_ADAPTER_AUTH 0x1 +/* enum: TSA binding key, installed after adapter is bound to a TSA controller. + * This is not supported in firmware which does not support TSA. + */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_TSA_BINDING 0x2 +/* enum: Customer adapter authentication key. Installed by the customer in the + * field, but otherwise similar to the Solarflare adapter authentication key. + */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CUSTOMER_ADAPTER_AUTH 0x3 +/* Random challenge generated by the host. */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CHALLENGE_OFST 8 +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CHALLENGE_LEN 16 + +/* MC_CMD_SECURE_NIC_INFO_OUT_STATUS msgresponse */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_LEN 420 +/* Length of the signature in MSG_SIGNATURE. */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN_OFST 0 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN_LEN 4 +/* Signature over the message, starting at MESSAGE_TYPE and continuing to the + * end of the MCDI response, allowing the message format to be extended. The + * signature uses ECDSA 384 encoding in ASN.1 format. It has variable length, + * with a maximum of 384 bytes. + */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_OFST 4 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN 384 +/* Enum value indicating the type of response. This protects against chosen + * message attacks. The enum values are random rather than sequential to make + * it unlikely that values will be reused should other commands in a different + * namespace need to create signed messages. + */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MESSAGE_TYPE_OFST 388 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MESSAGE_TYPE_LEN 4 +/* enum: Message type value for the response to a + * MC_CMD_SECURE_NIC_INFO_IN_STATUS message. + */ +#define MC_CMD_SECURE_NIC_INFO_STATUS 0xdb4 +/* The challenge provided by the host in the MC_CMD_SECURE_NIC_INFO_IN_STATUS + * message + */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_CHALLENGE_OFST 392 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_CHALLENGE_LEN 16 +/* The first 32 bits of XPM memory, which include security and flag bits, die + * ID and chip ID revision. The meaning of these bits is defined in + * mc/include/mc/xpm.h in the firmwaresrc repository. + */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_XPM_STATUS_BITS_OFST 408 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_XPM_STATUS_BITS_LEN 4 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_A_OFST 412 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_A_LEN 2 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_B_OFST 414 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_B_LEN 2 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_C_OFST 416 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_C_LEN 2 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_D_OFST 418 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_D_LEN 2 + + +/***********************************/ +/* MC_CMD_TSA_TEST + * A simple ping-pong command just to test the adapter<>controller MCDI + * communication channel. This command makes not changes to the TSA adapter's + * internal state. It is used by the controller just to verify that the MCDI + * communication channel is working fine. This command takes no additonal + * parameters in request or response. + */ +#define MC_CMD_TSA_TEST 0x125 +#undef MC_CMD_0x125_PRIVILEGE_CTG + +#define MC_CMD_0x125_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_TEST_IN msgrequest */ +#define MC_CMD_TSA_TEST_IN_LEN 0 + +/* MC_CMD_TSA_TEST_OUT msgresponse */ +#define MC_CMD_TSA_TEST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TSA_RULESET_OVERRIDE + * Override TSA ruleset that is currently active on the adapter. This operation + * does not modify the ruleset itself. This operation provides a mechanism to + * apply an allow-all or deny-all operation on all packets, thereby completely + * ignoring the rule-set configured on the adapter. The main purpose of this + * operation is to provide a deterministic state to the TSA firewall during + * rule-set transitions. + */ +#define MC_CMD_TSA_RULESET_OVERRIDE 0x12a +#undef MC_CMD_0x12a_PRIVILEGE_CTG + +#define MC_CMD_0x12a_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_RULESET_OVERRIDE_IN msgrequest */ +#define MC_CMD_TSA_RULESET_OVERRIDE_IN_LEN 4 +/* The override state to apply. */ +#define MC_CMD_TSA_RULESET_OVERRIDE_IN_STATE_OFST 0 +#define MC_CMD_TSA_RULESET_OVERRIDE_IN_STATE_LEN 4 +/* enum: No override in place - the existing ruleset is in operation. */ +#define MC_CMD_TSA_RULESET_OVERRIDE_NONE 0x0 +/* enum: Block all packets seen on all datapath channel except those packets + * required for basic configuration of the TSA NIC such as ARPs and TSA- + * communication traffic. Such exceptional traffic is handled differently + * compared to TSA rulesets. + */ +#define MC_CMD_TSA_RULESET_OVERRIDE_BLOCK 0x1 +/* enum: Allow all packets through all datapath channel. The TSA adapter + * behaves like a normal NIC without any firewalls. + */ +#define MC_CMD_TSA_RULESET_OVERRIDE_ALLOW 0x2 + +/* MC_CMD_TSA_RULESET_OVERRIDE_OUT msgresponse */ +#define MC_CMD_TSA_RULESET_OVERRIDE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TSAC_REQUEST + * Generic command to send requests from a TSA controller to a TSA adapter. + * Specific usage is determined by the TYPE field. + */ +#define MC_CMD_TSAC_REQUEST 0x12b +#undef MC_CMD_0x12b_PRIVILEGE_CTG + +#define MC_CMD_0x12b_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSAC_REQUEST_IN msgrequest */ +#define MC_CMD_TSAC_REQUEST_IN_LEN 4 +/* The type of request from the controller. */ +#define MC_CMD_TSAC_REQUEST_IN_TYPE_OFST 0 +#define MC_CMD_TSAC_REQUEST_IN_TYPE_LEN 4 +/* enum: Request the adapter to resend localIP information from it's cache. The + * command does not return any IP address information; IP addresses are sent as + * TSA notifications as descibed in MC_CMD_TSA_INFO_IN_LOCAL_IP. + */ +#define MC_CMD_TSAC_REQUEST_LOCALIP 0x0 + +/* MC_CMD_TSAC_REQUEST_OUT msgresponse */ +#define MC_CMD_TSAC_REQUEST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SUC_VERSION + * Get the version of the SUC + */ +#define MC_CMD_SUC_VERSION 0x134 +#undef MC_CMD_0x134_PRIVILEGE_CTG + +#define MC_CMD_0x134_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SUC_VERSION_IN msgrequest */ +#define MC_CMD_SUC_VERSION_IN_LEN 0 + +/* MC_CMD_SUC_VERSION_OUT msgresponse */ +#define MC_CMD_SUC_VERSION_OUT_LEN 24 +/* The SUC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_SUC_VERSION_OUT_VERSION_OFST 0 +#define MC_CMD_SUC_VERSION_OUT_VERSION_LEN 4 +#define MC_CMD_SUC_VERSION_OUT_VERSION_NUM 4 +/* The date, in seconds since the Unix epoch, when the firmware image was + * built. + */ +#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_OFST 16 +#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_LEN 4 +/* The ID of the SUC chip. This is specific to the platform but typically + * indicates family, memory sizes etc. See SF-116728-SW for further details. + */ +#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_OFST 20 +#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_LEN 4 + +/* MC_CMD_SUC_BOOT_VERSION_IN msgrequest: Get the version of the SUC boot + * loader. + */ +#define MC_CMD_SUC_BOOT_VERSION_IN_LEN 4 +#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_OFST 0 +#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_LEN 4 +/* enum: Requests the SUC boot version. */ +#define MC_CMD_SUC_VERSION_GET_BOOT_VERSION 0xb007700b + +/* MC_CMD_SUC_BOOT_VERSION_OUT msgresponse */ +#define MC_CMD_SUC_BOOT_VERSION_OUT_LEN 4 +/* The SUC boot version */ +#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_OFST 0 +#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_LEN 4 + + +/***********************************/ +/* MC_CMD_SUC_MANFTEST + * Operations to support manftest on SUC based systems. + */ +#define MC_CMD_SUC_MANFTEST 0x135 +#undef MC_CMD_0x135_PRIVILEGE_CTG + +#define MC_CMD_0x135_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_SUC_MANFTEST_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_IN_LEN 4 +/* The manftest operation to be performed. */ +#define MC_CMD_SUC_MANFTEST_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_IN_OP_LEN 4 +/* enum: Read serial number and use count. */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ 0x0 +/* enum: Update use count on wearout adapter. */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE 0x1 +/* enum: Start an ADC calibration. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START 0x2 +/* enum: Read the status of an ADC calibration. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS 0x3 +/* enum: Read the results of an ADC calibration. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT 0x4 +/* enum: Read the PCIe configuration. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ 0x5 +/* enum: Write the PCIe configuration. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE 0x6 +/* enum: Write FRU information to SUC. The FRU information is taken from the + * FRU_INFORMATION partition. Attempts to write to read-only FRUs are rejected. + */ +#define MC_CMD_SUC_MANFTEST_FRU_WRITE 0x7 + +/* MC_CMD_SUC_MANFTEST_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_OUT_LEN 0 + +/* MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_WEAROUT_READ. + */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_LEN 20 +/* The serial number of the wearout adapter, see SF-112717-PR for format. */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_SERIAL_NUMBER_OFST 0 +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_SERIAL_NUMBER_LEN 16 +/* The use count of the wearout adapter. */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_USE_COUNT_OFST 16 +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_USE_COUNT_LEN 4 + +/* MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE. + */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_OUT_LEN 0 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START. + */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_OUT_LEN 0 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS. + */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_LEN 4 +/* The combined status of the calibration operation. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FLAGS_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FLAGS_LEN 4 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_CALIBRATING_LBN 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_CALIBRATING_WIDTH 1 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FAILED_LBN 1 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FAILED_WIDTH 1 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_RESULT_LBN 2 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_RESULT_WIDTH 4 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_INDEX_LBN 6 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_INDEX_WIDTH 2 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT. + */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_LEN 12 +/* The set of calibration results. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_LEN 4 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_NUM 3 + +/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ. + */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_LEN 4 +/* The PCIe vendor ID. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_VENDOR_ID_OFST 0 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_VENDOR_ID_LEN 2 +/* The PCIe device ID. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_DEVICE_ID_OFST 2 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_DEVICE_ID_LEN 2 + +/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_LEN 8 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE. + */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_OP_LEN 4 +/* The PCIe vendor ID. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_VENDOR_ID_OFST 4 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_VENDOR_ID_LEN 2 +/* The PCIe device ID. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_DEVICE_ID_OFST 6 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_DEVICE_ID_LEN 2 + +/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_OUT_LEN 0 + +/* MC_CMD_SUC_MANFTEST_FRU_WRITE_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_FRU_WRITE + */ +#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_FRU_WRITE_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_FRU_WRITE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_CERTIFICATE + * Request a certificate. + */ +#define MC_CMD_GET_CERTIFICATE 0x12c +#undef MC_CMD_0x12c_PRIVILEGE_CTG + +#define MC_CMD_0x12c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_CERTIFICATE_IN msgrequest */ +#define MC_CMD_GET_CERTIFICATE_IN_LEN 8 +/* Type of the certificate to be retrieved. */ +#define MC_CMD_GET_CERTIFICATE_IN_TYPE_OFST 0 +#define MC_CMD_GET_CERTIFICATE_IN_TYPE_LEN 4 +#define MC_CMD_GET_CERTIFICATE_IN_UNUSED 0x0 /* enum */ +#define MC_CMD_GET_CERTIFICATE_IN_AAC 0x1 /* enum */ +/* enum: Adapter Authentication Certificate (AAC). The AAC is unique to each + * adapter and is used to verify its authenticity. It is installed by Manftest. + */ +#define MC_CMD_GET_CERTIFICATE_IN_ADAPTER_AUTH 0x1 +#define MC_CMD_GET_CERTIFICATE_IN_AASC 0x2 /* enum */ +/* enum: Adapter Authentication Signing Certificate (AASC). The AASC is shared + * by a group of adapters (typically a purchase order) and is used to verify + * the validity of AAC along with the SF root certificate. It is installed by + * Manftest. + */ +#define MC_CMD_GET_CERTIFICATE_IN_ADAPTER_AUTH_SIGNING 0x2 +#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_AAC 0x3 /* enum */ +/* enum: Customer Adapter Authentication Certificate. The Customer AAC is + * unique to each adapter and is used to verify its authenticity in cases where + * either the AAC is not installed or a customer desires to use their own + * certificate chain. It is installed by the customer. + */ +#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_ADAPTER_AUTH 0x3 +#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_AASC 0x4 /* enum */ +/* enum: Customer Adapter Authentication Certificate. The Customer AASC is + * shared by a group of adapters and is used to verify the validity of the + * Customer AAC along with the customers root certificate. It is installed by + * the customer. + */ +#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_ADAPTER_AUTH_SIGNING 0x4 +/* Offset, measured in bytes, relative to the start of the certificate data + * from which the certificate is to be retrieved. + */ +#define MC_CMD_GET_CERTIFICATE_IN_OFFSET_OFST 4 +#define MC_CMD_GET_CERTIFICATE_IN_OFFSET_LEN 4 + +/* MC_CMD_GET_CERTIFICATE_OUT msgresponse */ +#define MC_CMD_GET_CERTIFICATE_OUT_LENMIN 13 +#define MC_CMD_GET_CERTIFICATE_OUT_LENMAX 252 +#define MC_CMD_GET_CERTIFICATE_OUT_LEN(num) (12+1*(num)) +/* Type of the certificate. */ +#define MC_CMD_GET_CERTIFICATE_OUT_TYPE_OFST 0 +#define MC_CMD_GET_CERTIFICATE_OUT_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_CERTIFICATE_IN/TYPE */ +/* Offset, measured in bytes, relative to the start of the certificate data + * from which data in this message starts. + */ +#define MC_CMD_GET_CERTIFICATE_OUT_OFFSET_OFST 4 +#define MC_CMD_GET_CERTIFICATE_OUT_OFFSET_LEN 4 +/* Total length of the certificate data. */ +#define MC_CMD_GET_CERTIFICATE_OUT_TOTAL_LENGTH_OFST 8 +#define MC_CMD_GET_CERTIFICATE_OUT_TOTAL_LENGTH_LEN 4 +/* The certificate data. */ +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_OFST 12 +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_LEN 1 +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_MINNUM 1 +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_MAXNUM 240 + + +/***********************************/ +/* MC_CMD_GET_NIC_GLOBAL + * Get a global value which applies to all PCI functions + */ +#define MC_CMD_GET_NIC_GLOBAL 0x12d +#undef MC_CMD_0x12d_PRIVILEGE_CTG + +#define MC_CMD_0x12d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_NIC_GLOBAL_IN msgrequest */ +#define MC_CMD_GET_NIC_GLOBAL_IN_LEN 4 +/* Key to request value for, see enum values in MC_CMD_SET_NIC_GLOBAL. If the + * given key is unknown to the current firmware, the call will fail with + * ENOENT. + */ +#define MC_CMD_GET_NIC_GLOBAL_IN_KEY_OFST 0 +#define MC_CMD_GET_NIC_GLOBAL_IN_KEY_LEN 4 + +/* MC_CMD_GET_NIC_GLOBAL_OUT msgresponse */ +#define MC_CMD_GET_NIC_GLOBAL_OUT_LEN 4 +/* Value of requested key, see key descriptions below. */ +#define MC_CMD_GET_NIC_GLOBAL_OUT_VALUE_OFST 0 +#define MC_CMD_GET_NIC_GLOBAL_OUT_VALUE_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_NIC_GLOBAL + * Set a global value which applies to all PCI functions. Most global values + * can only be changed under specific conditions, and this call will return an + * appropriate error otherwise (see key descriptions). + */ +#define MC_CMD_SET_NIC_GLOBAL 0x12e +#undef MC_CMD_0x12e_PRIVILEGE_CTG + +#define MC_CMD_0x12e_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_NIC_GLOBAL_IN msgrequest */ +#define MC_CMD_SET_NIC_GLOBAL_IN_LEN 8 +/* Key to change value of. Firmware will return ENOENT for keys it doesn't know + * about. + */ +#define MC_CMD_SET_NIC_GLOBAL_IN_KEY_OFST 0 +#define MC_CMD_SET_NIC_GLOBAL_IN_KEY_LEN 4 +/* enum: Request switching the datapath firmware sub-variant. Currently only + * useful when running the DPDK f/w variant. See key values below, and the DPDK + * section of the EF10 Driver Writers Guide. Note that any driver attaching + * with the SUBVARIANT_AWARE flag cleared is implicitly considered as a request + * to switch back to the default sub-variant, and will thus reset this value. + * If a sub-variant switch happens, all other PCI functions will get their + * resources reset (they will see an MC reboot). + */ +#define MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT 0x1 +/* New value to set, see key descriptions above. */ +#define MC_CMD_SET_NIC_GLOBAL_IN_VALUE_OFST 4 +#define MC_CMD_SET_NIC_GLOBAL_IN_VALUE_LEN 4 +/* enum: Only if KEY = FIRMWARE_SUBVARIANT. Default sub-variant with support + * for maximum features for the current f/w variant. A request from a + * privileged function to set this particular value will always succeed. + */ +#define MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT 0x0 +/* enum: Only if KEY = FIRMWARE_SUBVARIANT. Increases packet rate at the cost + * of not supporting any TX checksum offloads. Only supported when running some + * f/w variants, others will return ENOTSUP (as reported by the homonymous bit + * in MC_CMD_GET_CAPABILITIES_V2). Can only be set when no other drivers are + * attached, and the calling driver must have no resources allocated. See the + * DPDK section of the EF10 Driver Writers Guide for a more detailed + * description with possible error codes. + */ +#define MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM 0x1 + + +/***********************************/ +/* MC_CMD_LTSSM_TRACE_POLL + * Medford2 hardware has support for logging all LTSSM state transitions to a + * hardware buffer. When built with WITH_LTSSM_TRACE=1, the firmware will + * periodially dump the contents of this hardware buffer to an internal + * firmware buffer for later extraction. + */ +#define MC_CMD_LTSSM_TRACE_POLL 0x12f +#undef MC_CMD_0x12f_PRIVILEGE_CTG + +#define MC_CMD_0x12f_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_LTSSM_TRACE_POLL_IN msgrequest: Read transitions from the firmware + * internal buffer. + */ +#define MC_CMD_LTSSM_TRACE_POLL_IN_LEN 4 +/* The maximum number of row that the caller can accept. The format of each row + * is defined in MC_CMD_LTSSM_TRACE_POLL_OUT. + */ +#define MC_CMD_LTSSM_TRACE_POLL_IN_MAX_ROW_COUNT_OFST 0 +#define MC_CMD_LTSSM_TRACE_POLL_IN_MAX_ROW_COUNT_LEN 4 + +/* MC_CMD_LTSSM_TRACE_POLL_OUT msgresponse */ +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LENMIN 16 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LENMAX 248 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LEN(num) (8+8*(num)) +#define MC_CMD_LTSSM_TRACE_POLL_OUT_FLAGS_OFST 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_FLAGS_LEN 4 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_HW_BUFFER_OVERFLOW_LBN 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_HW_BUFFER_OVERFLOW_WIDTH 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_FW_BUFFER_OVERFLOW_LBN 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_FW_BUFFER_OVERFLOW_WIDTH 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_CONTINUES_LBN 31 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_CONTINUES_WIDTH 1 +/* The number of rows present in this response. */ +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROW_COUNT_OFST 4 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROW_COUNT_LEN 4 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_OFST 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_LEN 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_LO_OFST 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_HI_OFST 12 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_MINNUM 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_MAXNUM 30 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LTSSM_STATE_LBN 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LTSSM_STATE_WIDTH 6 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_RDLH_LINK_UP_LBN 6 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_RDLH_LINK_UP_WIDTH 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_WAKE_N_LBN 7 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_WAKE_N_WIDTH 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_PS_LBN 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_PS_WIDTH 24 +/* The time of the LTSSM transition. Times are reported as fractional + * microseconds since MC boot (wrapping at 2^32us). The fractional part is + * reported in picoseconds. 0 <= TIMESTAMP_PS < 1000000 timestamp in seconds = + * ((TIMESTAMP_US + TIMESTAMP_PS / 1000000) / 1000000) + */ +#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_US_OFST 12 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_US_LEN 4 + #endif /* _SIENA_MC_DRIVER_PCOL_H */ /*! \cidoxg_end */ diff --git a/drivers/net/sfc/base/efx_regs_mcdi_aoe.h b/drivers/net/sfc/base/efx_regs_mcdi_aoe.h new file mode 100644 index 00000000..6aaf212f --- /dev/null +++ b/drivers/net/sfc/base/efx_regs_mcdi_aoe.h @@ -0,0 +1,2914 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright 2008-2018 Solarflare Communications Inc. + * All rights reserved. + */ + +/*! \cidoxg_firmware_mc_cmd */ + +#ifndef _SIENA_MC_DRIVER_PCOL_AOE_H +#define _SIENA_MC_DRIVER_PCOL_AOE_H + + + +/***********************************/ +/* MC_CMD_FC + * Perform an FC operation + */ +#define MC_CMD_FC 0x9 + +/* MC_CMD_FC_IN msgrequest */ +#define MC_CMD_FC_IN_LEN 4 +#define MC_CMD_FC_IN_OP_HDR_OFST 0 +#define MC_CMD_FC_IN_OP_HDR_LEN 4 +#define MC_CMD_FC_IN_OP_LBN 0 +#define MC_CMD_FC_IN_OP_WIDTH 8 +/* enum: NULL MCDI command to FC. */ +#define MC_CMD_FC_OP_NULL 0x1 +/* enum: Unused opcode */ +#define MC_CMD_FC_OP_UNUSED 0x2 +/* enum: MAC driver commands */ +#define MC_CMD_FC_OP_MAC 0x3 +/* enum: Read FC memory */ +#define MC_CMD_FC_OP_READ32 0x4 +/* enum: Write to FC memory */ +#define MC_CMD_FC_OP_WRITE32 0x5 +/* enum: Read FC memory */ +#define MC_CMD_FC_OP_TRC_READ 0x6 +/* enum: Write to FC memory */ +#define MC_CMD_FC_OP_TRC_WRITE 0x7 +/* enum: FC firmware Version */ +#define MC_CMD_FC_OP_GET_VERSION 0x8 +/* enum: Read FC memory */ +#define MC_CMD_FC_OP_TRC_RX_READ 0x9 +/* enum: Write to FC memory */ +#define MC_CMD_FC_OP_TRC_RX_WRITE 0xa +/* enum: SFP parameters */ +#define MC_CMD_FC_OP_SFP 0xb +/* enum: DDR3 test */ +#define MC_CMD_FC_OP_DDR_TEST 0xc +/* enum: Get Crash context from FC */ +#define MC_CMD_FC_OP_GET_ASSERT 0xd +/* enum: Get FPGA Build registers */ +#define MC_CMD_FC_OP_FPGA_BUILD 0xe +/* enum: Read map support commands */ +#define MC_CMD_FC_OP_READ_MAP 0xf +/* enum: FC Capabilities */ +#define MC_CMD_FC_OP_CAPABILITIES 0x10 +/* enum: FC Global flags */ +#define MC_CMD_FC_OP_GLOBAL_FLAGS 0x11 +/* enum: FC IO using relative addressing modes */ +#define MC_CMD_FC_OP_IO_REL 0x12 +/* enum: FPGA link information */ +#define MC_CMD_FC_OP_UHLINK 0x13 +/* enum: Configure loopbacks and link on FPGA ports */ +#define MC_CMD_FC_OP_SET_LINK 0x14 +/* enum: Licensing operations relating to AOE */ +#define MC_CMD_FC_OP_LICENSE 0x15 +/* enum: Startup information to the FC */ +#define MC_CMD_FC_OP_STARTUP 0x16 +/* enum: Configure a DMA read */ +#define MC_CMD_FC_OP_DMA 0x17 +/* enum: Configure a timed read */ +#define MC_CMD_FC_OP_TIMED_READ 0x18 +/* enum: Control UART logging */ +#define MC_CMD_FC_OP_LOG 0x19 +/* enum: Get the value of a given clock_id */ +#define MC_CMD_FC_OP_CLOCK 0x1a +/* enum: DDR3/QDR3 parameters */ +#define MC_CMD_FC_OP_DDR 0x1b +/* enum: PTP and timestamp control */ +#define MC_CMD_FC_OP_TIMESTAMP 0x1c +/* enum: Commands for SPI Flash interface */ +#define MC_CMD_FC_OP_SPI 0x1d +/* enum: Commands for diagnostic components */ +#define MC_CMD_FC_OP_DIAG 0x1e +/* enum: External AOE port. */ +#define MC_CMD_FC_IN_PORT_EXT_OFST 0x0 +/* enum: Internal AOE port. */ +#define MC_CMD_FC_IN_PORT_INT_OFST 0x40 + +/* MC_CMD_FC_IN_NULL msgrequest */ +#define MC_CMD_FC_IN_NULL_LEN 4 +#define MC_CMD_FC_IN_CMD_OFST 0 +#define MC_CMD_FC_IN_CMD_LEN 4 + +/* MC_CMD_FC_IN_PHY msgrequest */ +#define MC_CMD_FC_IN_PHY_LEN 5 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* FC PHY driver operation code */ +#define MC_CMD_FC_IN_PHY_OP_OFST 4 +#define MC_CMD_FC_IN_PHY_OP_LEN 1 +/* enum: PHY init handler */ +#define MC_CMD_FC_OP_PHY_OP_INIT 0x1 +/* enum: PHY reconfigure handler */ +#define MC_CMD_FC_OP_PHY_OP_RECONFIGURE 0x2 +/* enum: PHY reboot handler */ +#define MC_CMD_FC_OP_PHY_OP_REBOOT 0x3 +/* enum: PHY get_supported_cap handler */ +#define MC_CMD_FC_OP_PHY_OP_GET_SUPPORTED_CAP 0x4 +/* enum: PHY get_config handler */ +#define MC_CMD_FC_OP_PHY_OP_GET_CONFIG 0x5 +/* enum: PHY get_media_info handler */ +#define MC_CMD_FC_OP_PHY_OP_GET_MEDIA_INFO 0x6 +/* enum: PHY set_led handler */ +#define MC_CMD_FC_OP_PHY_OP_SET_LED 0x7 +/* enum: PHY lasi_interrupt handler */ +#define MC_CMD_FC_OP_PHY_OP_LASI_INTERRUPT 0x8 +/* enum: PHY check_link handler */ +#define MC_CMD_FC_OP_PHY_OP_CHECK_LINK 0x9 +/* enum: PHY fill_stats handler */ +#define MC_CMD_FC_OP_PHY_OP_FILL_STATS 0xa +/* enum: PHY bpx_link_state_changed handler */ +#define MC_CMD_FC_OP_PHY_OP_BPX_LINK_STATE_CHANGED 0xb +/* enum: PHY get_state handler */ +#define MC_CMD_FC_OP_PHY_OP_GET_STATE 0xc +/* enum: PHY start_bist handler */ +#define MC_CMD_FC_OP_PHY_OP_START_BIST 0xd +/* enum: PHY poll_bist handler */ +#define MC_CMD_FC_OP_PHY_OP_POLL_BIST 0xe +/* enum: PHY nvram_test handler */ +#define MC_CMD_FC_OP_PHY_OP_NVRAM_TEST 0xf +/* enum: PHY relinquish handler */ +#define MC_CMD_FC_OP_PHY_OP_RELINQUISH_SPI 0x10 +/* enum: PHY read connection from FC - may be not required */ +#define MC_CMD_FC_OP_PHY_OP_GET_CONNECTION 0x11 +/* enum: PHY read flags from FC - may be not required */ +#define MC_CMD_FC_OP_PHY_OP_GET_FLAGS 0x12 + +/* MC_CMD_FC_IN_PHY_INIT msgrequest */ +#define MC_CMD_FC_IN_PHY_INIT_LEN 4 +#define MC_CMD_FC_IN_PHY_CMD_OFST 0 +#define MC_CMD_FC_IN_PHY_CMD_LEN 4 + +/* MC_CMD_FC_IN_MAC msgrequest */ +#define MC_CMD_FC_IN_MAC_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_MAC_HEADER_OFST 4 +#define MC_CMD_FC_IN_MAC_HEADER_LEN 4 +#define MC_CMD_FC_IN_MAC_OP_LBN 0 +#define MC_CMD_FC_IN_MAC_OP_WIDTH 8 +/* enum: MAC reconfigure handler */ +#define MC_CMD_FC_OP_MAC_OP_RECONFIGURE 0x1 +/* enum: MAC Set command - same as MC_CMD_SET_MAC */ +#define MC_CMD_FC_OP_MAC_OP_SET_LINK 0x2 +/* enum: MAC statistics */ +#define MC_CMD_FC_OP_MAC_OP_GET_STATS 0x3 +/* enum: MAC RX statistics */ +#define MC_CMD_FC_OP_MAC_OP_GET_RX_STATS 0x6 +/* enum: MAC TX statistics */ +#define MC_CMD_FC_OP_MAC_OP_GET_TX_STATS 0x7 +/* enum: MAC Read status */ +#define MC_CMD_FC_OP_MAC_OP_READ_STATUS 0x8 +#define MC_CMD_FC_IN_MAC_PORT_TYPE_LBN 8 +#define MC_CMD_FC_IN_MAC_PORT_TYPE_WIDTH 8 +/* enum: External FPGA port. */ +#define MC_CMD_FC_PORT_EXT 0x0 +/* enum: Internal Siena-facing FPGA ports. */ +#define MC_CMD_FC_PORT_INT 0x1 +#define MC_CMD_FC_IN_MAC_PORT_IDX_LBN 16 +#define MC_CMD_FC_IN_MAC_PORT_IDX_WIDTH 8 +#define MC_CMD_FC_IN_MAC_CMD_FORMAT_LBN 24 +#define MC_CMD_FC_IN_MAC_CMD_FORMAT_WIDTH 8 +/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are + * irrelevant. Port number is derived from pci_fn; passed in FC header. + */ +#define MC_CMD_FC_OP_MAC_CMD_FORMAT_DEFAULT 0x0 +/* enum: Override default port number. Port number determined by fields + * PORT_TYPE and PORT_IDX. + */ +#define MC_CMD_FC_OP_MAC_CMD_FORMAT_PORT_OVERRIDE 0x1 + +/* MC_CMD_FC_IN_MAC_RECONFIGURE msgrequest */ +#define MC_CMD_FC_IN_MAC_RECONFIGURE_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */ + +/* MC_CMD_FC_IN_MAC_SET_LINK msgrequest */ +#define MC_CMD_FC_IN_MAC_SET_LINK_LEN 32 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */ +/* MTU size */ +#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_OFST 8 +#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_LEN 4 +/* Drain Tx FIFO */ +#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_OFST 12 +#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_LEN 4 +#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_OFST 16 +#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LEN 8 +#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LO_OFST 16 +#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_HI_OFST 20 +#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_OFST 24 +#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_LEN 4 +#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_LBN 0 +#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_WIDTH 1 +#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_LBN 1 +#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_WIDTH 1 +#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_OFST 28 +#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_LEN 4 + +/* MC_CMD_FC_IN_MAC_READ_STATUS msgrequest */ +#define MC_CMD_FC_IN_MAC_READ_STATUS_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */ + +/* MC_CMD_FC_IN_MAC_GET_RX_STATS msgrequest */ +#define MC_CMD_FC_IN_MAC_GET_RX_STATS_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */ + +/* MC_CMD_FC_IN_MAC_GET_TX_STATS msgrequest */ +#define MC_CMD_FC_IN_MAC_GET_TX_STATS_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */ + +/* MC_CMD_FC_IN_MAC_GET_STATS msgrequest */ +#define MC_CMD_FC_IN_MAC_GET_STATS_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */ +/* MC Statistics index */ +#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_OFST 8 +#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_LEN 4 +#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_OFST 12 +#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_LEN 4 +#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_LBN 0 +#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_WIDTH 1 +#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_LBN 1 +#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_WIDTH 1 +#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_LBN 2 +#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_WIDTH 1 +/* Number of statistics to read */ +#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_OFST 16 +#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_LEN 4 +#define MC_CMD_FC_MAC_NSTATS_PER_BLOCK 0x1e /* enum */ +#define MC_CMD_FC_MAC_NBYTES_PER_STAT 0x8 /* enum */ + +/* MC_CMD_FC_IN_READ32 msgrequest */ +#define MC_CMD_FC_IN_READ32_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_READ32_ADDR_HI_OFST 4 +#define MC_CMD_FC_IN_READ32_ADDR_HI_LEN 4 +#define MC_CMD_FC_IN_READ32_ADDR_LO_OFST 8 +#define MC_CMD_FC_IN_READ32_ADDR_LO_LEN 4 +#define MC_CMD_FC_IN_READ32_NUMWORDS_OFST 12 +#define MC_CMD_FC_IN_READ32_NUMWORDS_LEN 4 + +/* MC_CMD_FC_IN_WRITE32 msgrequest */ +#define MC_CMD_FC_IN_WRITE32_LENMIN 16 +#define MC_CMD_FC_IN_WRITE32_LENMAX 252 +#define MC_CMD_FC_IN_WRITE32_LEN(num) (12+4*(num)) +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_WRITE32_ADDR_HI_OFST 4 +#define MC_CMD_FC_IN_WRITE32_ADDR_HI_LEN 4 +#define MC_CMD_FC_IN_WRITE32_ADDR_LO_OFST 8 +#define MC_CMD_FC_IN_WRITE32_ADDR_LO_LEN 4 +#define MC_CMD_FC_IN_WRITE32_BUFFER_OFST 12 +#define MC_CMD_FC_IN_WRITE32_BUFFER_LEN 4 +#define MC_CMD_FC_IN_WRITE32_BUFFER_MINNUM 1 +#define MC_CMD_FC_IN_WRITE32_BUFFER_MAXNUM 60 + +/* MC_CMD_FC_IN_TRC_READ msgrequest */ +#define MC_CMD_FC_IN_TRC_READ_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TRC_READ_TRC_OFST 4 +#define MC_CMD_FC_IN_TRC_READ_TRC_LEN 4 +#define MC_CMD_FC_IN_TRC_READ_CHANNEL_OFST 8 +#define MC_CMD_FC_IN_TRC_READ_CHANNEL_LEN 4 + +/* MC_CMD_FC_IN_TRC_WRITE msgrequest */ +#define MC_CMD_FC_IN_TRC_WRITE_LEN 28 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TRC_WRITE_TRC_OFST 4 +#define MC_CMD_FC_IN_TRC_WRITE_TRC_LEN 4 +#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_OFST 8 +#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_LEN 4 +#define MC_CMD_FC_IN_TRC_WRITE_DATA_OFST 12 +#define MC_CMD_FC_IN_TRC_WRITE_DATA_LEN 4 +#define MC_CMD_FC_IN_TRC_WRITE_DATA_NUM 4 + +/* MC_CMD_FC_IN_GET_VERSION msgrequest */ +#define MC_CMD_FC_IN_GET_VERSION_LEN 4 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ + +/* MC_CMD_FC_IN_TRC_RX_READ msgrequest */ +#define MC_CMD_FC_IN_TRC_RX_READ_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TRC_RX_READ_TRC_OFST 4 +#define MC_CMD_FC_IN_TRC_RX_READ_TRC_LEN 4 +#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_OFST 8 +#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_LEN 4 + +/* MC_CMD_FC_IN_TRC_RX_WRITE msgrequest */ +#define MC_CMD_FC_IN_TRC_RX_WRITE_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_OFST 4 +#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_LEN 4 +#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_OFST 8 +#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_LEN 4 +#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_OFST 12 +#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_LEN 4 +#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_NUM 2 + +/* MC_CMD_FC_IN_SFP msgrequest */ +#define MC_CMD_FC_IN_SFP_LEN 28 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* Link speed is 100, 1000, 10000, 40000 */ +#define MC_CMD_FC_IN_SFP_SPEED_OFST 4 +#define MC_CMD_FC_IN_SFP_SPEED_LEN 4 +/* Length of copper cable - zero when not relevant (e.g. if cable is fibre) */ +#define MC_CMD_FC_IN_SFP_COPPER_LEN_OFST 8 +#define MC_CMD_FC_IN_SFP_COPPER_LEN_LEN 4 +/* Not relevant for cards with QSFP modules. For older cards, true if module is + * a dual speed SFP+ module. + */ +#define MC_CMD_FC_IN_SFP_DUAL_SPEED_OFST 12 +#define MC_CMD_FC_IN_SFP_DUAL_SPEED_LEN 4 +/* True if an SFP Module is present (other fields valid when true) */ +#define MC_CMD_FC_IN_SFP_PRESENT_OFST 16 +#define MC_CMD_FC_IN_SFP_PRESENT_LEN 4 +/* The type of the SFP+ Module. For later cards with QSFP modules, this field + * is unused and the type is communicated by other means. + */ +#define MC_CMD_FC_IN_SFP_TYPE_OFST 20 +#define MC_CMD_FC_IN_SFP_TYPE_LEN 4 +/* Capabilities corresponding to 1 bits. */ +#define MC_CMD_FC_IN_SFP_CAPS_OFST 24 +#define MC_CMD_FC_IN_SFP_CAPS_LEN 4 + +/* MC_CMD_FC_IN_DDR_TEST msgrequest */ +#define MC_CMD_FC_IN_DDR_TEST_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 +#define MC_CMD_FC_IN_DDR_TEST_HEADER_LEN 4 +#define MC_CMD_FC_IN_DDR_TEST_OP_LBN 0 +#define MC_CMD_FC_IN_DDR_TEST_OP_WIDTH 8 +/* enum: DRAM Test Start */ +#define MC_CMD_FC_OP_DDR_TEST_START 0x1 +/* enum: DRAM Test Poll */ +#define MC_CMD_FC_OP_DDR_TEST_POLL 0x2 + +/* MC_CMD_FC_IN_DDR_TEST_START msgrequest */ +#define MC_CMD_FC_IN_DDR_TEST_START_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_DDR_TEST_HEADER_LEN 4 */ +#define MC_CMD_FC_IN_DDR_TEST_START_MASK_OFST 8 +#define MC_CMD_FC_IN_DDR_TEST_START_MASK_LEN 4 +#define MC_CMD_FC_IN_DDR_TEST_START_T0_LBN 0 +#define MC_CMD_FC_IN_DDR_TEST_START_T0_WIDTH 1 +#define MC_CMD_FC_IN_DDR_TEST_START_T1_LBN 1 +#define MC_CMD_FC_IN_DDR_TEST_START_T1_WIDTH 1 +#define MC_CMD_FC_IN_DDR_TEST_START_B0_LBN 2 +#define MC_CMD_FC_IN_DDR_TEST_START_B0_WIDTH 1 +#define MC_CMD_FC_IN_DDR_TEST_START_B1_LBN 3 +#define MC_CMD_FC_IN_DDR_TEST_START_B1_WIDTH 1 + +/* MC_CMD_FC_IN_DDR_TEST_POLL msgrequest */ +#define MC_CMD_FC_IN_DDR_TEST_POLL_LEN 12 +#define MC_CMD_FC_IN_DDR_TEST_CMD_OFST 0 +#define MC_CMD_FC_IN_DDR_TEST_CMD_LEN 4 +/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_DDR_TEST_HEADER_LEN 4 */ +/* Clear previous test result and prepare for restarting DDR test */ +#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_OFST 8 +#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_LEN 4 + +/* MC_CMD_FC_IN_GET_ASSERT msgrequest */ +#define MC_CMD_FC_IN_GET_ASSERT_LEN 4 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ + +/* MC_CMD_FC_IN_FPGA_BUILD msgrequest */ +#define MC_CMD_FC_IN_FPGA_BUILD_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* FPGA build info operation code */ +#define MC_CMD_FC_IN_FPGA_BUILD_OP_OFST 4 +#define MC_CMD_FC_IN_FPGA_BUILD_OP_LEN 4 +/* enum: Get the build registers */ +#define MC_CMD_FC_IN_FPGA_BUILD_BUILD 0x1 +/* enum: Get the services registers */ +#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES 0x2 +/* enum: Get the BSP version */ +#define MC_CMD_FC_IN_FPGA_BUILD_BSP_VERSION 0x3 +/* enum: Get build register for V2 (SFA974X) */ +#define MC_CMD_FC_IN_FPGA_BUILD_BUILD_V2 0x4 +/* enum: GEt the services register for V2 (SFA974X) */ +#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES_V2 0x5 + +/* MC_CMD_FC_IN_READ_MAP msgrequest */ +#define MC_CMD_FC_IN_READ_MAP_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 +#define MC_CMD_FC_IN_READ_MAP_HEADER_LEN 4 +#define MC_CMD_FC_IN_READ_MAP_OP_LBN 0 +#define MC_CMD_FC_IN_READ_MAP_OP_WIDTH 8 +/* enum: Get the number of map regions */ +#define MC_CMD_FC_OP_READ_MAP_COUNT 0x1 +/* enum: Get the specified map */ +#define MC_CMD_FC_OP_READ_MAP_INDEX 0x2 + +/* MC_CMD_FC_IN_READ_MAP_COUNT msgrequest */ +#define MC_CMD_FC_IN_READ_MAP_COUNT_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_READ_MAP_HEADER_LEN 4 */ + +/* MC_CMD_FC_IN_READ_MAP_INDEX msgrequest */ +#define MC_CMD_FC_IN_READ_MAP_INDEX_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_READ_MAP_HEADER_LEN 4 */ +#define MC_CMD_FC_IN_MAP_INDEX_OFST 8 +#define MC_CMD_FC_IN_MAP_INDEX_LEN 4 + +/* MC_CMD_FC_IN_CAPABILITIES msgrequest */ +#define MC_CMD_FC_IN_CAPABILITIES_LEN 4 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ + +/* MC_CMD_FC_IN_GLOBAL_FLAGS msgrequest */ +#define MC_CMD_FC_IN_GLOBAL_FLAGS_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_OFST 4 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_LEN 4 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_LBN 0 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_WIDTH 1 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_LBN 1 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_WIDTH 1 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_LBN 2 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_WIDTH 1 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_LBN 3 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_WIDTH 1 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_LBN 4 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_WIDTH 1 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_LBN 5 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_WIDTH 1 + +/* MC_CMD_FC_IN_IO_REL msgrequest */ +#define MC_CMD_FC_IN_IO_REL_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 +#define MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 +#define MC_CMD_FC_IN_IO_REL_OP_LBN 0 +#define MC_CMD_FC_IN_IO_REL_OP_WIDTH 8 +/* enum: Get the base address that the FC applies to relative commands */ +#define MC_CMD_FC_IN_IO_REL_GET_ADDR 0x1 +/* enum: Read data */ +#define MC_CMD_FC_IN_IO_REL_READ32 0x2 +/* enum: Write data */ +#define MC_CMD_FC_IN_IO_REL_WRITE32 0x3 +#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_LBN 8 +#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_WIDTH 8 +/* enum: Application address space */ +#define MC_CMD_FC_COMP_TYPE_APP_ADDR_SPACE 0x1 +/* enum: Flash address space */ +#define MC_CMD_FC_COMP_TYPE_FLASH 0x2 + +/* MC_CMD_FC_IN_IO_REL_GET_ADDR msgrequest */ +#define MC_CMD_FC_IN_IO_REL_GET_ADDR_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 */ + +/* MC_CMD_FC_IN_IO_REL_READ32 msgrequest */ +#define MC_CMD_FC_IN_IO_REL_READ32_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 */ +#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_OFST 8 +#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_LEN 4 +#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_OFST 12 +#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_LEN 4 +#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_OFST 16 +#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_LEN 4 + +/* MC_CMD_FC_IN_IO_REL_WRITE32 msgrequest */ +#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMIN 20 +#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMAX 252 +#define MC_CMD_FC_IN_IO_REL_WRITE32_LEN(num) (16+4*(num)) +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 */ +#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_OFST 8 +#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_LEN 4 +#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_OFST 12 +#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_LEN 4 +#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_OFST 16 +#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_LEN 4 +#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MINNUM 1 +#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MAXNUM 59 + +/* MC_CMD_FC_IN_UHLINK msgrequest */ +#define MC_CMD_FC_IN_UHLINK_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 +#define MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 +#define MC_CMD_FC_IN_UHLINK_OP_LBN 0 +#define MC_CMD_FC_IN_UHLINK_OP_WIDTH 8 +/* enum: Get PHY configuration info */ +#define MC_CMD_FC_OP_UHLINK_PHY 0x1 +/* enum: Get MAC configuration info */ +#define MC_CMD_FC_OP_UHLINK_MAC 0x2 +/* enum: Get Rx eye table */ +#define MC_CMD_FC_OP_UHLINK_RX_EYE 0x3 +/* enum: Get Rx eye plot */ +#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT 0x4 +/* enum: Get Rx eye plot */ +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT 0x5 +/* enum: Retune Rx settings */ +#define MC_CMD_FC_OP_UHLINK_RX_TUNE 0x6 +/* enum: Set loopback mode on fpga port */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET 0x7 +/* enum: Get loopback mode config state on fpga port */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET 0x8 +#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_LBN 8 +#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_WIDTH 8 +#define MC_CMD_FC_IN_UHLINK_PORT_IDX_LBN 16 +#define MC_CMD_FC_IN_UHLINK_PORT_IDX_WIDTH 8 +#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_LBN 24 +#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_WIDTH 8 +/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are + * irrelevant. Port number is derived from pci_fn; passed in FC header. + */ +#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_DEFAULT 0x0 +/* enum: Override default port number. Port number determined by fields + * PORT_TYPE and PORT_IDX. + */ +#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_PORT_OVERRIDE 0x1 + +/* MC_CMD_FC_OP_UHLINK_PHY msgrequest */ +#define MC_CMD_FC_OP_UHLINK_PHY_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ + +/* MC_CMD_FC_OP_UHLINK_MAC msgrequest */ +#define MC_CMD_FC_OP_UHLINK_MAC_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ + +/* MC_CMD_FC_OP_UHLINK_RX_EYE msgrequest */ +#define MC_CMD_FC_OP_UHLINK_RX_EYE_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ +#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_OFST 8 +#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_LEN 4 +#define MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK 0x30 /* enum */ + +/* MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT msgrequest */ +#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ + +/* MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT msgrequest */ +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_OFST 8 +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_LEN 4 +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_OFST 12 +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_LEN 4 +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_OFST 16 +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_LEN 4 +#define MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK 0x1e /* enum */ + +/* MC_CMD_FC_OP_UHLINK_RX_TUNE msgrequest */ +#define MC_CMD_FC_OP_UHLINK_RX_TUNE_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ + +/* MC_CMD_FC_OP_UHLINK_LOOPBACK_SET msgrequest */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_OFST 8 +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_LEN 4 +#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PCS_SERIAL 0x0 /* enum */ +#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_PRE_CDR 0x1 /* enum */ +#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_POST_CDR 0x2 /* enum */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_OFST 12 +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_LEN 4 +#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_OFF 0x0 /* enum */ +#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_ON 0x1 /* enum */ + +/* MC_CMD_FC_OP_UHLINK_LOOPBACK_GET msgrequest */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_OFST 8 +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_LEN 4 + +/* MC_CMD_FC_IN_SET_LINK msgrequest */ +#define MC_CMD_FC_IN_SET_LINK_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* See MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +#define MC_CMD_FC_IN_SET_LINK_MODE_OFST 4 +#define MC_CMD_FC_IN_SET_LINK_MODE_LEN 4 +#define MC_CMD_FC_IN_SET_LINK_SPEED_OFST 8 +#define MC_CMD_FC_IN_SET_LINK_SPEED_LEN 4 +#define MC_CMD_FC_IN_SET_LINK_FLAGS_OFST 12 +#define MC_CMD_FC_IN_SET_LINK_FLAGS_LEN 4 +#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_LBN 0 +#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_WIDTH 1 +#define MC_CMD_FC_IN_SET_LINK_POWEROFF_LBN 1 +#define MC_CMD_FC_IN_SET_LINK_POWEROFF_WIDTH 1 +#define MC_CMD_FC_IN_SET_LINK_TXDIS_LBN 2 +#define MC_CMD_FC_IN_SET_LINK_TXDIS_WIDTH 1 + +/* MC_CMD_FC_IN_LICENSE msgrequest */ +#define MC_CMD_FC_IN_LICENSE_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_LICENSE_OP_OFST 4 +#define MC_CMD_FC_IN_LICENSE_OP_LEN 4 +#define MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE 0x0 /* enum */ +#define MC_CMD_FC_IN_LICENSE_GET_KEY_STATS 0x1 /* enum */ + +/* MC_CMD_FC_IN_STARTUP msgrequest */ +#define MC_CMD_FC_IN_STARTUP_LEN 40 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_STARTUP_BASE_OFST 4 +#define MC_CMD_FC_IN_STARTUP_BASE_LEN 4 +#define MC_CMD_FC_IN_STARTUP_LENGTH_OFST 8 +#define MC_CMD_FC_IN_STARTUP_LENGTH_LEN 4 +/* Length of identifier */ +#define MC_CMD_FC_IN_STARTUP_IDLENGTH_OFST 12 +#define MC_CMD_FC_IN_STARTUP_IDLENGTH_LEN 4 +/* Identifier for AOE FPGA */ +#define MC_CMD_FC_IN_STARTUP_ID_OFST 16 +#define MC_CMD_FC_IN_STARTUP_ID_LEN 1 +#define MC_CMD_FC_IN_STARTUP_ID_NUM 24 + +/* MC_CMD_FC_IN_DMA msgrequest */ +#define MC_CMD_FC_IN_DMA_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DMA_OP_OFST 4 +#define MC_CMD_FC_IN_DMA_OP_LEN 4 +#define MC_CMD_FC_IN_DMA_STOP 0x0 /* enum */ +#define MC_CMD_FC_IN_DMA_READ 0x1 /* enum */ + +/* MC_CMD_FC_IN_DMA_STOP msgrequest */ +#define MC_CMD_FC_IN_DMA_STOP_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_DMA_OP_OFST 4 */ +/* MC_CMD_FC_IN_DMA_OP_LEN 4 */ +/* FC supplied handle */ +#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_OFST 8 +#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_LEN 4 + +/* MC_CMD_FC_IN_DMA_READ msgrequest */ +#define MC_CMD_FC_IN_DMA_READ_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_DMA_OP_OFST 4 */ +/* MC_CMD_FC_IN_DMA_OP_LEN 4 */ +#define MC_CMD_FC_IN_DMA_READ_OFFSET_OFST 8 +#define MC_CMD_FC_IN_DMA_READ_OFFSET_LEN 4 +#define MC_CMD_FC_IN_DMA_READ_LENGTH_OFST 12 +#define MC_CMD_FC_IN_DMA_READ_LENGTH_LEN 4 + +/* MC_CMD_FC_IN_TIMED_READ msgrequest */ +#define MC_CMD_FC_IN_TIMED_READ_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 +#define MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 +#define MC_CMD_FC_IN_TIMED_READ_SET 0x0 /* enum */ +#define MC_CMD_FC_IN_TIMED_READ_GET 0x1 /* enum */ +#define MC_CMD_FC_IN_TIMED_READ_CLEAR 0x2 /* enum */ + +/* MC_CMD_FC_IN_TIMED_READ_SET msgrequest */ +#define MC_CMD_FC_IN_TIMED_READ_SET_LEN 52 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */ +/* MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 */ +/* Host supplied handle (unique) */ +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_OFST 8 +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_LEN 4 +/* Address into which to transfer data in host */ +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_OFST 12 +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LEN 8 +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LO_OFST 12 +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_HI_OFST 16 +/* AOE address from which to transfer data */ +#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_OFST 20 +#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LEN 8 +#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LO_OFST 20 +#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_HI_OFST 24 +/* Length of AOE transfer (total) */ +#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_OFST 28 +#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_LEN 4 +/* Length of host transfer (total) */ +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_OFST 32 +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_LEN 4 +/* Offset back from aoe_address to apply operation to */ +#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_OFST 36 +#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_LEN 4 +/* Data to apply at offset */ +#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_OFST 40 +#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_LEN 4 +#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_OFST 44 +#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_LEN 4 +#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_LBN 0 +#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_WIDTH 1 +#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_LBN 1 +#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_WIDTH 1 +#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_LBN 2 +#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_WIDTH 1 +#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_LBN 3 +#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_WIDTH 2 +#define MC_CMD_FC_IN_TIMED_READ_SET_NONE 0x0 /* enum */ +#define MC_CMD_FC_IN_TIMED_READ_SET_READ 0x1 /* enum */ +#define MC_CMD_FC_IN_TIMED_READ_SET_WRITE 0x2 /* enum */ +#define MC_CMD_FC_IN_TIMED_READ_SET_READWRITE 0x3 /* enum */ +/* Period at which reads are performed (100ms units) */ +#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_OFST 48 +#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_LEN 4 + +/* MC_CMD_FC_IN_TIMED_READ_GET msgrequest */ +#define MC_CMD_FC_IN_TIMED_READ_GET_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */ +/* MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 */ +/* FC supplied handle */ +#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_OFST 8 +#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_LEN 4 + +/* MC_CMD_FC_IN_TIMED_READ_CLEAR msgrequest */ +#define MC_CMD_FC_IN_TIMED_READ_CLEAR_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */ +/* MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 */ +/* FC supplied handle */ +#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_OFST 8 +#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_LEN 4 + +/* MC_CMD_FC_IN_LOG msgrequest */ +#define MC_CMD_FC_IN_LOG_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_LOG_OP_OFST 4 +#define MC_CMD_FC_IN_LOG_OP_LEN 4 +#define MC_CMD_FC_IN_LOG_ADDR_RANGE 0x0 /* enum */ +#define MC_CMD_FC_IN_LOG_JTAG_UART 0x1 /* enum */ + +/* MC_CMD_FC_IN_LOG_ADDR_RANGE msgrequest */ +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_LOG_OP_OFST 4 */ +/* MC_CMD_FC_IN_LOG_OP_LEN 4 */ +/* Partition offset into flash */ +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_OFST 8 +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_LEN 4 +/* Partition length */ +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_OFST 12 +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_LEN 4 +/* Partition erase size */ +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_OFST 16 +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_LEN 4 + +/* MC_CMD_FC_IN_LOG_JTAG_UART msgrequest */ +#define MC_CMD_FC_IN_LOG_JTAG_UART_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_LOG_OP_OFST 4 */ +/* MC_CMD_FC_IN_LOG_OP_LEN 4 */ +/* Enable/disable printing to JTAG UART */ +#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_OFST 8 +#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_LEN 4 + +/* MC_CMD_FC_IN_CLOCK msgrequest: Perform a clock operation */ +#define MC_CMD_FC_IN_CLOCK_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_CLOCK_OP_OFST 4 +#define MC_CMD_FC_IN_CLOCK_OP_LEN 4 +#define MC_CMD_FC_IN_CLOCK_GET_TIME 0x0 /* enum */ +#define MC_CMD_FC_IN_CLOCK_SET_TIME 0x1 /* enum */ +#define MC_CMD_FC_IN_CLOCK_ID_OFST 8 +#define MC_CMD_FC_IN_CLOCK_ID_LEN 4 +#define MC_CMD_FC_IN_CLOCK_STATS 0x0 /* enum */ +#define MC_CMD_FC_IN_CLOCK_MAC 0x1 /* enum */ + +/* MC_CMD_FC_IN_CLOCK_GET_TIME msgrequest: Retrieve the clock value of the + * specified clock + */ +#define MC_CMD_FC_IN_CLOCK_GET_TIME_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */ +/* MC_CMD_FC_IN_CLOCK_OP_LEN 4 */ +/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */ +/* MC_CMD_FC_IN_CLOCK_ID_LEN 4 */ + +/* MC_CMD_FC_IN_CLOCK_SET_TIME msgrequest: Set the clock value of the specified + * clock + */ +#define MC_CMD_FC_IN_CLOCK_SET_TIME_LEN 24 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */ +/* MC_CMD_FC_IN_CLOCK_OP_LEN 4 */ +/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */ +/* MC_CMD_FC_IN_CLOCK_ID_LEN 4 */ +#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_OFST 12 +#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LEN 8 +#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LO_OFST 12 +#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_HI_OFST 16 +#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_OFST 20 +#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_LEN 4 + +/* MC_CMD_FC_IN_DDR msgrequest */ +#define MC_CMD_FC_IN_DDR_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DDR_OP_OFST 4 +#define MC_CMD_FC_IN_DDR_OP_LEN 4 +#define MC_CMD_FC_IN_DDR_SET_SPD 0x0 /* enum */ +#define MC_CMD_FC_IN_DDR_GET_STATUS 0x1 /* enum */ +#define MC_CMD_FC_IN_DDR_SET_INFO 0x2 /* enum */ +#define MC_CMD_FC_IN_DDR_BANK_OFST 8 +#define MC_CMD_FC_IN_DDR_BANK_LEN 4 +#define MC_CMD_FC_IN_DDR_BANK_B0 0x0 /* enum */ +#define MC_CMD_FC_IN_DDR_BANK_B1 0x1 /* enum */ +#define MC_CMD_FC_IN_DDR_BANK_T0 0x2 /* enum */ +#define MC_CMD_FC_IN_DDR_BANK_T1 0x3 /* enum */ +#define MC_CMD_FC_IN_DDR_NUM_BANKS 0x4 /* enum */ + +/* MC_CMD_FC_IN_DDR_SET_SPD msgrequest */ +#define MC_CMD_FC_IN_DDR_SET_SPD_LEN 148 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_DDR_OP_OFST 4 */ +/* MC_CMD_FC_IN_DDR_OP_LEN 4 */ +/* Affected bank */ +/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */ +/* MC_CMD_FC_IN_DDR_BANK_LEN 4 */ +/* Flags */ +#define MC_CMD_FC_IN_DDR_FLAGS_OFST 12 +#define MC_CMD_FC_IN_DDR_FLAGS_LEN 4 +#define MC_CMD_FC_IN_DDR_SET_SPD_ACTIVE 0x1 /* enum */ +/* 128-byte page of serial presence detect data read from module's EEPROM */ +#define MC_CMD_FC_IN_DDR_SPD_OFST 16 +#define MC_CMD_FC_IN_DDR_SPD_LEN 1 +#define MC_CMD_FC_IN_DDR_SPD_NUM 128 +/* Page index of the spd data copied into MC_CMD_FC_IN_DDR_SPD */ +#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_OFST 144 +#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_LEN 4 + +/* MC_CMD_FC_IN_DDR_SET_INFO msgrequest */ +#define MC_CMD_FC_IN_DDR_SET_INFO_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_DDR_OP_OFST 4 */ +/* MC_CMD_FC_IN_DDR_OP_LEN 4 */ +/* Affected bank */ +/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */ +/* MC_CMD_FC_IN_DDR_BANK_LEN 4 */ +/* Size of DDR */ +#define MC_CMD_FC_IN_DDR_SIZE_OFST 12 +#define MC_CMD_FC_IN_DDR_SIZE_LEN 4 + +/* MC_CMD_FC_IN_DDR_GET_STATUS msgrequest */ +#define MC_CMD_FC_IN_DDR_GET_STATUS_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_DDR_OP_OFST 4 */ +/* MC_CMD_FC_IN_DDR_OP_LEN 4 */ +/* Affected bank */ +/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */ +/* MC_CMD_FC_IN_DDR_BANK_LEN 4 */ + +/* MC_CMD_FC_IN_TIMESTAMP msgrequest */ +#define MC_CMD_FC_IN_TIMESTAMP_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* FC timestamp operation code */ +#define MC_CMD_FC_IN_TIMESTAMP_OP_OFST 4 +#define MC_CMD_FC_IN_TIMESTAMP_OP_LEN 4 +/* enum: Read transmit timestamp(s) */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT 0x0 +/* enum: Read snapshot timestamps */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT 0x1 +/* enum: Clear all transmit timestamps */ +#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT 0x2 + +/* MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT msgrequest */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LEN 28 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_OFST 4 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_LEN 4 +/* Control filtering of the returned timestamp and sequence number specified + * here + */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_OFST 8 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_LEN 4 +/* enum: Return most recent timestamp. No filtering */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LATEST 0x0 +/* enum: Match timestamp against the PTP clock ID, port number and sequence + * number specified + */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_MATCH 0x1 +/* Clock identity of PTP packet for which timestamp required */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_OFST 12 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LEN 8 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LO_OFST 12 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_HI_OFST 16 +/* Port number of PTP packet for which timestamp required */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_OFST 20 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_LEN 4 +/* Sequence number of PTP packet for which timestamp required */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_OFST 24 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_LEN 4 + +/* MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT msgrequest */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_OFST 4 +#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_LEN 4 + +/* MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT msgrequest */ +#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_OFST 4 +#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_LEN 4 + +/* MC_CMD_FC_IN_SPI msgrequest */ +#define MC_CMD_FC_IN_SPI_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* Basic commands for SPI Flash. */ +#define MC_CMD_FC_IN_SPI_OP_OFST 4 +#define MC_CMD_FC_IN_SPI_OP_LEN 4 +/* enum: SPI Flash read */ +#define MC_CMD_FC_IN_SPI_READ 0x0 +/* enum: SPI Flash write */ +#define MC_CMD_FC_IN_SPI_WRITE 0x1 +/* enum: SPI Flash erase */ +#define MC_CMD_FC_IN_SPI_ERASE 0x2 + +/* MC_CMD_FC_IN_SPI_READ msgrequest */ +#define MC_CMD_FC_IN_SPI_READ_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_SPI_READ_OP_OFST 4 +#define MC_CMD_FC_IN_SPI_READ_OP_LEN 4 +#define MC_CMD_FC_IN_SPI_READ_ADDR_OFST 8 +#define MC_CMD_FC_IN_SPI_READ_ADDR_LEN 4 +#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_OFST 12 +#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_LEN 4 + +/* MC_CMD_FC_IN_SPI_WRITE msgrequest */ +#define MC_CMD_FC_IN_SPI_WRITE_LENMIN 16 +#define MC_CMD_FC_IN_SPI_WRITE_LENMAX 252 +#define MC_CMD_FC_IN_SPI_WRITE_LEN(num) (12+4*(num)) +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_SPI_WRITE_OP_OFST 4 +#define MC_CMD_FC_IN_SPI_WRITE_OP_LEN 4 +#define MC_CMD_FC_IN_SPI_WRITE_ADDR_OFST 8 +#define MC_CMD_FC_IN_SPI_WRITE_ADDR_LEN 4 +#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_OFST 12 +#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_LEN 4 +#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MINNUM 1 +#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MAXNUM 60 + +/* MC_CMD_FC_IN_SPI_ERASE msgrequest */ +#define MC_CMD_FC_IN_SPI_ERASE_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_SPI_ERASE_OP_OFST 4 +#define MC_CMD_FC_IN_SPI_ERASE_OP_LEN 4 +#define MC_CMD_FC_IN_SPI_ERASE_ADDR_OFST 8 +#define MC_CMD_FC_IN_SPI_ERASE_ADDR_LEN 4 +#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_OFST 12 +#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_LEN 4 + +/* MC_CMD_FC_IN_DIAG msgrequest */ +#define MC_CMD_FC_IN_DIAG_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* Operation code indicating component type */ +#define MC_CMD_FC_IN_DIAG_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_OP_LEN 4 +/* enum: Power noise generator. */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE 0x0 +/* enum: DDR soak test component. */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK 0x1 +/* enum: Diagnostics datapath control component. */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL 0x2 + +/* MC_CMD_FC_IN_DIAG_POWER_NOISE msgrequest */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_LEN 4 +/* Sub-opcode describing the operation to be carried out */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_LEN 4 +/* enum: Read the configuration (the 32-bit values in each of the clock enable + * count and toggle count registers) + */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG 0x0 +/* enum: Write a new configuration to the clock enable count and toggle count + * registers + */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG 0x1 + +/* MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG msgrequest */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_LEN 4 + +/* MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG msgrequest */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_LEN 4 +/* The 32-bit value to be written to the toggle count register */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_OFST 12 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_LEN 4 +/* The 32-bit value to be written to the clock enable count register */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_OFST 16 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_LEN 4 + +/* MC_CMD_FC_IN_DIAG_DDR_SOAK msgrequest */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_LEN 4 +/* Sub-opcode describing the operation to be carried out */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_LEN 4 +/* enum: Starts DDR soak test on selected banks */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START 0x0 +/* enum: Read status of DDR soak test */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT 0x1 +/* enum: Stop test */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP 0x2 +/* enum: Set or clear bit that triggers fake errors. These cause subsequent + * tests to fail until the bit is cleared. + */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR 0x3 + +/* MC_CMD_FC_IN_DIAG_DDR_SOAK_START msgrequest */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_LEN 24 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_LEN 4 +/* Mask of DDR banks to be tested */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_OFST 12 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_LEN 4 +/* Pattern to use in the soak test */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_OFST 16 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ZEROS 0x0 /* enum */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONES 0x1 /* enum */ +/* Either multiple automatic tests until a STOP command is issued, or one + * single test + */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_OFST 20 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONGOING_TEST 0x0 /* enum */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SINGLE_TEST 0x1 /* enum */ + +/* MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT msgrequest */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_LEN 4 +/* DDR bank to read status from */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_OFST 12 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_LEN 4 +#define MC_CMD_FC_DDR_BANK0 0x0 /* enum */ +#define MC_CMD_FC_DDR_BANK1 0x1 /* enum */ +#define MC_CMD_FC_DDR_BANK2 0x2 /* enum */ +#define MC_CMD_FC_DDR_BANK3 0x3 /* enum */ +#define MC_CMD_FC_DDR_AOEMEM_MAX_BANKS 0x4 /* enum */ + +/* MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP msgrequest */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_LEN 4 +/* Mask of DDR banks to be tested */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_OFST 12 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_LEN 4 + +/* MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR msgrequest */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_LEN 4 +/* Mask of DDR banks to set/clear error flag on */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_OFST 12 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_OFST 16 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_CLEAR 0x0 /* enum */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SET 0x1 /* enum */ + +/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL msgrequest */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_LEN 4 +/* Sub-opcode describing the operation to be carried out */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_LEN 4 +/* enum: Set a known datapath configuration */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE 0x0 +/* enum: Apply raw config to datapath control registers */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG 0x1 + +/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE msgrequest */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_LEN 4 +/* Datapath configuration identifier */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_OFST 12 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_LEN 4 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_PASSTHROUGH 0x0 /* enum */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SNAKE 0x1 /* enum */ + +/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG msgrequest */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 24 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_LEN 4 +/* Value to write into control register 1 */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_OFST 12 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_LEN 4 +/* Value to write into control register 2 */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_OFST 16 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_LEN 4 +/* Value to write into control register 3 */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_OFST 20 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_LEN 4 + +/* MC_CMD_FC_OUT msgresponse */ +#define MC_CMD_FC_OUT_LEN 0 + +/* MC_CMD_FC_OUT_NULL msgresponse */ +#define MC_CMD_FC_OUT_NULL_LEN 0 + +/* MC_CMD_FC_OUT_READ32 msgresponse */ +#define MC_CMD_FC_OUT_READ32_LENMIN 4 +#define MC_CMD_FC_OUT_READ32_LENMAX 252 +#define MC_CMD_FC_OUT_READ32_LEN(num) (0+4*(num)) +#define MC_CMD_FC_OUT_READ32_BUFFER_OFST 0 +#define MC_CMD_FC_OUT_READ32_BUFFER_LEN 4 +#define MC_CMD_FC_OUT_READ32_BUFFER_MINNUM 1 +#define MC_CMD_FC_OUT_READ32_BUFFER_MAXNUM 63 + +/* MC_CMD_FC_OUT_WRITE32 msgresponse */ +#define MC_CMD_FC_OUT_WRITE32_LEN 0 + +/* MC_CMD_FC_OUT_TRC_READ msgresponse */ +#define MC_CMD_FC_OUT_TRC_READ_LEN 16 +#define MC_CMD_FC_OUT_TRC_READ_DATA_OFST 0 +#define MC_CMD_FC_OUT_TRC_READ_DATA_LEN 4 +#define MC_CMD_FC_OUT_TRC_READ_DATA_NUM 4 + +/* MC_CMD_FC_OUT_TRC_WRITE msgresponse */ +#define MC_CMD_FC_OUT_TRC_WRITE_LEN 0 + +/* MC_CMD_FC_OUT_GET_VERSION msgresponse */ +#define MC_CMD_FC_OUT_GET_VERSION_LEN 12 +#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_OFST 0 +#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_LEN 4 +#define MC_CMD_FC_OUT_GET_VERSION_VERSION_OFST 4 +#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LEN 8 +#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LO_OFST 4 +#define MC_CMD_FC_OUT_GET_VERSION_VERSION_HI_OFST 8 + +/* MC_CMD_FC_OUT_TRC_RX_READ msgresponse */ +#define MC_CMD_FC_OUT_TRC_RX_READ_LEN 8 +#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_OFST 0 +#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_LEN 4 +#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_NUM 2 + +/* MC_CMD_FC_OUT_TRC_RX_WRITE msgresponse */ +#define MC_CMD_FC_OUT_TRC_RX_WRITE_LEN 0 + +/* MC_CMD_FC_OUT_MAC_RECONFIGURE msgresponse */ +#define MC_CMD_FC_OUT_MAC_RECONFIGURE_LEN 0 + +/* MC_CMD_FC_OUT_MAC_SET_LINK msgresponse */ +#define MC_CMD_FC_OUT_MAC_SET_LINK_LEN 0 + +/* MC_CMD_FC_OUT_MAC_READ_STATUS msgresponse */ +#define MC_CMD_FC_OUT_MAC_READ_STATUS_LEN 4 +#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_OFST 0 +#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_LEN 4 + +/* MC_CMD_FC_OUT_MAC_GET_RX_STATS msgresponse */ +#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_RX_NSTATS))+1))>>3) +#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_OFST 0 +#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LEN 8 +#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LO_OFST 0 +#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_HI_OFST 4 +#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_RX_NSTATS +#define MC_CMD_FC_MAC_RX_STATS_OCTETS 0x0 /* enum */ +#define MC_CMD_FC_MAC_RX_OCTETS_OK 0x1 /* enum */ +#define MC_CMD_FC_MAC_RX_ALIGNMENT_ERRORS 0x2 /* enum */ +#define MC_CMD_FC_MAC_RX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */ +#define MC_CMD_FC_MAC_RX_FRAMES_OK 0x4 /* enum */ +#define MC_CMD_FC_MAC_RX_CRC_ERRORS 0x5 /* enum */ +#define MC_CMD_FC_MAC_RX_VLAN_OK 0x6 /* enum */ +#define MC_CMD_FC_MAC_RX_ERRORS 0x7 /* enum */ +#define MC_CMD_FC_MAC_RX_UCAST_PKTS 0x8 /* enum */ +#define MC_CMD_FC_MAC_RX_MULTICAST_PKTS 0x9 /* enum */ +#define MC_CMD_FC_MAC_RX_BROADCAST_PKTS 0xa /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_DROP_EVENTS 0xb /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS 0xc /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_UNDERSIZE_PKTS 0xd /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_64 0xe /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_65_127 0xf /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_128_255 0x10 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_256_511 0x11 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_512_1023 0x12 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_1024_1518 0x13 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_1519_MAX 0x14 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_OVERSIZE_PKTS 0x15 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_JABBERS 0x16 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_FRAGMENTS 0x17 /* enum */ +#define MC_CMD_FC_MAC_RX_MAC_CONTROL_FRAMES 0x18 /* enum */ +/* enum: (Last entry) */ +#define MC_CMD_FC_MAC_RX_NSTATS 0x19 + +/* MC_CMD_FC_OUT_MAC_GET_TX_STATS msgresponse */ +#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_TX_NSTATS))+1))>>3) +#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_OFST 0 +#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LEN 8 +#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LO_OFST 0 +#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_HI_OFST 4 +#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_TX_NSTATS +#define MC_CMD_FC_MAC_TX_STATS_OCTETS 0x0 /* enum */ +#define MC_CMD_FC_MAC_TX_OCTETS_OK 0x1 /* enum */ +#define MC_CMD_FC_MAC_TX_ALIGNMENT_ERRORS 0x2 /* enum */ +#define MC_CMD_FC_MAC_TX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */ +#define MC_CMD_FC_MAC_TX_FRAMES_OK 0x4 /* enum */ +#define MC_CMD_FC_MAC_TX_CRC_ERRORS 0x5 /* enum */ +#define MC_CMD_FC_MAC_TX_VLAN_OK 0x6 /* enum */ +#define MC_CMD_FC_MAC_TX_ERRORS 0x7 /* enum */ +#define MC_CMD_FC_MAC_TX_UCAST_PKTS 0x8 /* enum */ +#define MC_CMD_FC_MAC_TX_MULTICAST_PKTS 0x9 /* enum */ +#define MC_CMD_FC_MAC_TX_BROADCAST_PKTS 0xa /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_DROP_EVENTS 0xb /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS 0xc /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_UNDERSIZE_PKTS 0xd /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_64 0xe /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_65_127 0xf /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_128_255 0x10 /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_256_511 0x11 /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_512_1023 0x12 /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_1024_1518 0x13 /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_1519_TX_MTU 0x14 /* enum */ +#define MC_CMD_FC_MAC_TX_MAC_CONTROL_FRAMES 0x15 /* enum */ +/* enum: (Last entry) */ +#define MC_CMD_FC_MAC_TX_NSTATS 0x16 + +/* MC_CMD_FC_OUT_MAC_GET_STATS msgresponse */ +#define MC_CMD_FC_OUT_MAC_GET_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_NSTATS_PER_BLOCK))+1))>>3) +/* MAC Statistics */ +#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_OFST 0 +#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LEN 8 +#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LO_OFST 0 +#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_HI_OFST 4 +#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_NUM MC_CMD_FC_MAC_NSTATS_PER_BLOCK + +/* MC_CMD_FC_OUT_MAC msgresponse */ +#define MC_CMD_FC_OUT_MAC_LEN 0 + +/* MC_CMD_FC_OUT_SFP msgresponse */ +#define MC_CMD_FC_OUT_SFP_LEN 0 + +/* MC_CMD_FC_OUT_DDR_TEST_START msgresponse */ +#define MC_CMD_FC_OUT_DDR_TEST_START_LEN 0 + +/* MC_CMD_FC_OUT_DDR_TEST_POLL msgresponse */ +#define MC_CMD_FC_OUT_DDR_TEST_POLL_LEN 8 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_OFST 0 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_LEN 4 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_LBN 0 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_WIDTH 8 +/* enum: Test not yet initiated */ +#define MC_CMD_FC_OP_DDR_TEST_NONE 0x0 +/* enum: Test is in progress */ +#define MC_CMD_FC_OP_DDR_TEST_INPROGRESS 0x1 +/* enum: Timed completed */ +#define MC_CMD_FC_OP_DDR_TEST_SUCCESS 0x2 +/* enum: Test did not complete in specified time */ +#define MC_CMD_FC_OP_DDR_TEST_TIMER_EXPIRED 0x3 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_LBN 11 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_LBN 10 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_LBN 9 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_LBN 8 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_WIDTH 1 +/* Test result from FPGA */ +#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_OFST 4 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_LEN 4 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_LBN 31 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_LBN 30 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_LBN 29 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_LBN 28 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_LBN 15 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_WIDTH 5 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_LBN 10 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_WIDTH 5 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_LBN 5 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_WIDTH 5 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_LBN 0 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_WIDTH 5 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_COMPLETE 0x0 /* enum */ +#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_FAIL 0x1 /* enum */ +#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_PASS 0x2 /* enum */ +#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_FAIL 0x3 /* enum */ +#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_SUCCESS 0x4 /* enum */ + +/* MC_CMD_FC_OUT_DDR_TEST msgresponse */ +#define MC_CMD_FC_OUT_DDR_TEST_LEN 0 + +/* MC_CMD_FC_OUT_GET_ASSERT msgresponse */ +#define MC_CMD_FC_OUT_GET_ASSERT_LEN 144 +/* Assertion status flag. */ +#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_LEN 4 +#define MC_CMD_FC_OUT_GET_ASSERT_STATE_LBN 8 +#define MC_CMD_FC_OUT_GET_ASSERT_STATE_WIDTH 8 +/* enum: No crash data available */ +#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0 +/* enum: New crash data available */ +#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1 +/* enum: Crash data has been sent */ +#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2 +#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_LBN 0 +#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_WIDTH 8 +/* enum: No crash has been recorded. */ +#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0 +/* enum: Crash due to exception. */ +#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1 +/* enum: Crash due to assertion. */ +#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2 +/* Failing PC value */ +#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_OFST 8 +#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_LEN 4 +#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_NUM 31 +/* Exception Type */ +#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_OFST 132 +#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_LEN 4 +/* Instruction at which exception occurred */ +#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_OFST 136 +#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_LEN 4 +/* BAD Address that triggered address-based exception */ +#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_OFST 140 +#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_LEN 4 + +/* MC_CMD_FC_OUT_FPGA_BUILD msgresponse */ +#define MC_CMD_FC_OUT_FPGA_BUILD_LEN 32 +#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_OFST 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_LBN 31 +#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_LBN 30 +#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_WIDTH 14 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_LBN 12 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_WIDTH 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_LBN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_WIDTH 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_WIDTH 4 +/* Build timestamp (seconds since epoch) */ +#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_OFST 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_OFST 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_WIDTH 8 +#define MC_CMD_FC_FPGA_TYPE_A7 0xa7 /* enum */ +#define MC_CMD_FC_FPGA_TYPE_A5 0xa5 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_LBN 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_WIDTH 10 +#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_LBN 18 +#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_LBN 19 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_LBN 20 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_LBN 21 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_LBN 22 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_LBN 23 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_LBN 24 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_LBN 25 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_LBN 26 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_LBN 27 +#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_LBN 28 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_LBN 29 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_WIDTH 2 +#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_LBN 31 +#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_OFST 12 +#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_WIDTH 1 +#define MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 /* enum */ +#define MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_LBN 17 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_WIDTH 15 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_OFST 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_OFST 20 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_OFST 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LEN 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LO_OFST 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_HI_OFST 20 +#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_OFST 24 +#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_OFST 28 +#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_WIDTH 16 + +/* MC_CMD_FC_OUT_FPGA_BUILD_V2 msgresponse */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_LEN 32 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_OFST 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_LBN 31 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_LBN 30 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_WIDTH 14 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_LBN 12 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_WIDTH 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_LBN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_WIDTH 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_WIDTH 4 +/* Build timestamp (seconds since epoch) */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_OFST 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_OFST 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_LBN 31 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_LBN 29 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_LBN 28 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_LBN 27 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_LBN 26 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_LBN 25 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_LBN 24 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_LBN 23 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_LBN 22 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_LBN 21 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_LBN 20 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_LBN 19 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_LBN 18 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_10G 0x0 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_40G 0x1 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_LBN 17 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_10G 0x0 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_40G 0x1 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_10G 0x0 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_40G 0x1 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_LBN 15 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_LBN 14 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_LBN 13 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_LBN 12 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_LBN 11 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_LBN 10 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_LBN 9 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_LBN 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_LBN 7 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_LBN 6 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_LBN 5 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_LBN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_WIDTH 4 +#define MC_CMD_FC_FPGA_V2_TYPE_A3 0x0 /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_A4 0x1 /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_A5 0x2 /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_A7 0x3 /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_D3 0x8 /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_D4 0x9 /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_D5 0xa /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_D7 0xb /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_OFST 12 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_WIDTH 1 +/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */ +/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_OFST 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_OFST 20 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_OFST 24 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_OFST 28 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_WIDTH 16 + +/* MC_CMD_FC_OUT_FPGA_SERVICES msgresponse */ +#define MC_CMD_FC_OUT_FPGA_SERVICES_LEN 32 +#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_OFST 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_LBN 31 +#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_LBN 30 +#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_LBN 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_WIDTH 14 +#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_LBN 12 +#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_WIDTH 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_LBN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_WIDTH 8 +#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_WIDTH 4 +/* Build timestamp (seconds since epoch) */ +#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_OFST 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_OFST 8 +#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_LBN 8 +#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_LBN 27 +#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_LBN 28 +#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_LBN 29 +#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_LBN 30 +#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_LBN 31 +#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_OFST 12 +#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_LBN 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_OFST 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_LBN 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_OFST 20 +#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_LBN 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_OFST 24 +#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_OFST 28 +#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_WIDTH 16 + +/* MC_CMD_FC_OUT_FPGA_SERVICES_V2 msgresponse */ +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_LEN 32 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_OFST 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_LBN 31 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_LBN 30 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_LBN 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_WIDTH 14 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_LBN 12 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_WIDTH 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_LBN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_WIDTH 8 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_WIDTH 4 +/* Build timestamp (seconds since epoch) */ +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_OFST 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_OFST 8 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_LBN 8 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_OFST 12 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_LBN 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_WIDTH 1 +/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */ +/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */ +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_OFST 24 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_OFST 28 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_WIDTH 16 + +/* MC_CMD_FC_OUT_BSP_VERSION msgresponse */ +#define MC_CMD_FC_OUT_BSP_VERSION_LEN 4 +/* Qsys system ID */ +#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_OFST 0 +#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_LEN 4 +#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_LBN 12 +#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_WIDTH 4 +#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_LBN 4 +#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_WIDTH 8 +#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_LBN 0 +#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_WIDTH 4 + +/* MC_CMD_FC_OUT_READ_MAP_COUNT msgresponse */ +#define MC_CMD_FC_OUT_READ_MAP_COUNT_LEN 4 +/* Number of maps */ +#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_OFST 0 +#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_LEN 4 + +/* MC_CMD_FC_OUT_READ_MAP_INDEX msgresponse */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN 164 +/* Index of the map */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_OFST 0 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_LEN 4 +/* Options for the map */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_OFST 4 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_LEN 4 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_8 0x0 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_16 0x1 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_32 0x2 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_64 0x3 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_MASK 0x3 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_FC 0x4 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_MEM 0x8 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_READ 0x10 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_WRITE 0x20 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_FREE 0x0 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_LICENSED 0x40 /* enum */ +/* Address of start of map */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_OFST 8 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LEN 8 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LO_OFST 8 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_HI_OFST 12 +/* Length of address map */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_OFST 16 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LEN 8 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LO_OFST 16 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_HI_OFST 20 +/* Component information field */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_OFST 24 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_LEN 4 +/* License expiry data for map */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_OFST 28 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LEN 8 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LO_OFST 28 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_HI_OFST 32 +/* Name of the component */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_OFST 36 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_LEN 1 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_NUM 128 + +/* MC_CMD_FC_OUT_READ_MAP msgresponse */ +#define MC_CMD_FC_OUT_READ_MAP_LEN 0 + +/* MC_CMD_FC_OUT_CAPABILITIES msgresponse */ +#define MC_CMD_FC_OUT_CAPABILITIES_LEN 8 +/* Number of internal ports */ +#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_OFST 0 +#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_LEN 4 +/* Number of external ports */ +#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_OFST 4 +#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_LEN 4 + +/* MC_CMD_FC_OUT_GLOBAL_FLAGS msgresponse */ +#define MC_CMD_FC_OUT_GLOBAL_FLAGS_LEN 4 +#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_OFST 0 +#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_LEN 4 + +/* MC_CMD_FC_OUT_IO_REL msgresponse */ +#define MC_CMD_FC_OUT_IO_REL_LEN 0 + +/* MC_CMD_FC_OUT_IO_REL_GET_ADDR msgresponse */ +#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_LEN 8 +#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_OFST 0 +#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_LEN 4 +#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_OFST 4 +#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_LEN 4 + +/* MC_CMD_FC_OUT_IO_REL_READ32 msgresponse */ +#define MC_CMD_FC_OUT_IO_REL_READ32_LENMIN 4 +#define MC_CMD_FC_OUT_IO_REL_READ32_LENMAX 252 +#define MC_CMD_FC_OUT_IO_REL_READ32_LEN(num) (0+4*(num)) +#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_OFST 0 +#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_LEN 4 +#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MINNUM 1 +#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MAXNUM 63 + +/* MC_CMD_FC_OUT_IO_REL_WRITE32 msgresponse */ +#define MC_CMD_FC_OUT_IO_REL_WRITE32_LEN 0 + +/* MC_CMD_FC_OUT_UHLINK_PHY msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_PHY_LEN 48 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_OFST 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_LBN 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_WIDTH 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_LBN 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_WIDTH 16 +/* Transceiver Transmit settings */ +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_OFST 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_LBN 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_WIDTH 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_LBN 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_WIDTH 16 +/* Transceiver Receive settings */ +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_OFST 8 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_LBN 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_WIDTH 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_LBN 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_WIDTH 16 +/* Rx eye opening */ +#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_OFST 12 +#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_LBN 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_WIDTH 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_LBN 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_WIDTH 16 +/* PCS status word */ +#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_OFST 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_LEN 4 +/* Link status word */ +#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_OFST 20 +#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_LBN 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WIDTH 1 +#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_LBN 1 +#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_WIDTH 1 +/* Current SFp parameters applied */ +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_OFST 24 +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_LEN 20 +/* Link speed is 100, 1000, 10000 */ +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_OFST 24 +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_LEN 4 +/* Length of copper cable - zero when not relevant */ +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_OFST 28 +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_LEN 4 +/* True if a dual speed SFP+ module */ +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_OFST 32 +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_LEN 4 +/* True if an SFP Module is present (other fields valid when true) */ +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_OFST 36 +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_LEN 4 +/* The type of the SFP+ Module */ +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_OFST 40 +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_LEN 4 +/* PHY config flags */ +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_OFST 44 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_LBN 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_WIDTH 1 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_LBN 1 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_WIDTH 1 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_LBN 2 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_WIDTH 1 + +/* MC_CMD_FC_OUT_UHLINK_MAC msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_MAC_LEN 20 +/* MAC configuration applied */ +#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_OFST 0 +#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_LEN 4 +/* MTU size */ +#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_OFST 4 +#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_LEN 4 +/* IF Mode status */ +#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_OFST 8 +#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_LEN 4 +/* MAC address configured */ +#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_OFST 12 +#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LEN 8 +#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LO_OFST 12 +#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_HI_OFST 16 + +/* MC_CMD_FC_OUT_UHLINK_RX_EYE msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_RX_EYE_LEN ((((0-1+(32*MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK))+1))>>3) +/* Rx Eye measurements */ +#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_OFST 0 +#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_NUM MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK + +/* MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT_LEN 0 + +/* MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_LEN ((((32-1+(64*MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK))+1))>>3) +/* Has the eye plot dump completed and data returned is valid? */ +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_OFST 0 +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_LEN 4 +/* Rx Eye binary plot */ +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_OFST 4 +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LEN 8 +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LO_OFST 4 +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_HI_OFST 8 +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_NUM MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK + +/* MC_CMD_FC_OUT_UHLINK_RX_TUNE msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_RX_TUNE_LEN 0 + +/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET_LEN 0 + +/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_OFST 0 +#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_LEN 4 + +/* MC_CMD_FC_OUT_UHLINK msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_LEN 0 + +/* MC_CMD_FC_OUT_SET_LINK msgresponse */ +#define MC_CMD_FC_OUT_SET_LINK_LEN 0 + +/* MC_CMD_FC_OUT_LICENSE msgresponse */ +#define MC_CMD_FC_OUT_LICENSE_LEN 12 +/* Count of valid keys */ +#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_OFST 0 +#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_LEN 4 +/* Count of invalid keys */ +#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_OFST 4 +#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_LEN 4 +/* Count of blacklisted keys */ +#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_OFST 8 +#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_LEN 4 + +/* MC_CMD_FC_OUT_STARTUP msgresponse */ +#define MC_CMD_FC_OUT_STARTUP_LEN 4 +/* Capabilities of the FPGA/FC */ +#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_OFST 0 +#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_LEN 4 +#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_LBN 0 +#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_WIDTH 1 + +/* MC_CMD_FC_OUT_DMA_READ msgresponse */ +#define MC_CMD_FC_OUT_DMA_READ_LENMIN 1 +#define MC_CMD_FC_OUT_DMA_READ_LENMAX 252 +#define MC_CMD_FC_OUT_DMA_READ_LEN(num) (0+1*(num)) +/* The data read */ +#define MC_CMD_FC_OUT_DMA_READ_DATA_OFST 0 +#define MC_CMD_FC_OUT_DMA_READ_DATA_LEN 1 +#define MC_CMD_FC_OUT_DMA_READ_DATA_MINNUM 1 +#define MC_CMD_FC_OUT_DMA_READ_DATA_MAXNUM 252 + +/* MC_CMD_FC_OUT_TIMED_READ_SET msgresponse */ +#define MC_CMD_FC_OUT_TIMED_READ_SET_LEN 4 +/* Timer handle */ +#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_OFST 0 +#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_LEN 4 + +/* MC_CMD_FC_OUT_TIMED_READ_GET msgresponse */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_LEN 52 +/* Host supplied handle (unique) */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_OFST 0 +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_LEN 4 +/* Address into which to transfer data in host */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_OFST 4 +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LEN 8 +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LO_OFST 4 +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_HI_OFST 8 +/* AOE address from which to transfer data */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_OFST 12 +#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LEN 8 +#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LO_OFST 12 +#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_HI_OFST 16 +/* Length of AOE transfer (total) */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_OFST 20 +#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_LEN 4 +/* Length of host transfer (total) */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_OFST 24 +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_LEN 4 +/* See FLAGS entry for MC_CMD_FC_IN_TIMED_READ_SET */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_OFST 28 +#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_LEN 4 +#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_OFST 32 +#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_LEN 4 +/* When active, start read time */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_OFST 36 +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LEN 8 +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LO_OFST 36 +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_HI_OFST 40 +/* When active, end read time */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_OFST 44 +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LEN 8 +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LO_OFST 44 +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_HI_OFST 48 + +/* MC_CMD_FC_OUT_LOG_ADDR_RANGE msgresponse */ +#define MC_CMD_FC_OUT_LOG_ADDR_RANGE_LEN 0 + +/* MC_CMD_FC_OUT_LOG msgresponse */ +#define MC_CMD_FC_OUT_LOG_LEN 0 + +/* MC_CMD_FC_OUT_CLOCK_GET_TIME msgresponse */ +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_LEN 24 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_OFST 0 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_LEN 4 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_OFST 4 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LEN 8 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LO_OFST 4 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_HI_OFST 8 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_OFST 12 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_LEN 4 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_OFST 16 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_LEN 4 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_OFST 20 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_LEN 4 + +/* MC_CMD_FC_OUT_CLOCK_SET_TIME msgresponse */ +#define MC_CMD_FC_OUT_CLOCK_SET_TIME_LEN 0 + +/* MC_CMD_FC_OUT_DDR_SET_SPD msgresponse */ +#define MC_CMD_FC_OUT_DDR_SET_SPD_LEN 0 + +/* MC_CMD_FC_OUT_DDR_SET_INFO msgresponse */ +#define MC_CMD_FC_OUT_DDR_SET_INFO_LEN 0 + +/* MC_CMD_FC_OUT_DDR_GET_STATUS msgresponse */ +#define MC_CMD_FC_OUT_DDR_GET_STATUS_LEN 4 +#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_OFST 0 +#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_LEN 4 +#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_LBN 0 +#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_LBN 1 +#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_WIDTH 1 + +/* MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT msgresponse */ +#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_LEN 8 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_OFST 0 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_LEN 4 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_OFST 4 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_LEN 4 + +/* MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT msgresponse */ +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMIN 8 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMAX 248 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LEN(num) (0+8*(num)) +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_OFST 0 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_LEN 4 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_OFST 4 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_LEN 4 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_OFST 0 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LEN 8 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LO_OFST 0 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_HI_OFST 4 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MINNUM 0 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MAXNUM 31 + +/* MC_CMD_FC_OUT_SPI_READ msgresponse */ +#define MC_CMD_FC_OUT_SPI_READ_LENMIN 4 +#define MC_CMD_FC_OUT_SPI_READ_LENMAX 252 +#define MC_CMD_FC_OUT_SPI_READ_LEN(num) (0+4*(num)) +#define MC_CMD_FC_OUT_SPI_READ_BUFFER_OFST 0 +#define MC_CMD_FC_OUT_SPI_READ_BUFFER_LEN 4 +#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MINNUM 1 +#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MAXNUM 63 + +/* MC_CMD_FC_OUT_SPI_WRITE msgresponse */ +#define MC_CMD_FC_OUT_SPI_WRITE_LEN 0 + +/* MC_CMD_FC_OUT_SPI_ERASE msgresponse */ +#define MC_CMD_FC_OUT_SPI_ERASE_LEN 0 + +/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG msgresponse */ +#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_LEN 8 +/* The 32-bit value read from the toggle count register */ +#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_OFST 0 +#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_LEN 4 +/* The 32-bit value read from the clock enable count register */ +#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_OFST 4 +#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_LEN 4 + +/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG msgresponse */ +#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 0 + +/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_START msgresponse */ +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_START_LEN 0 + +/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT msgresponse */ +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_LEN 8 +/* DDR soak test status word; bits [4:0] are relevant. */ +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_OFST 0 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_LEN 4 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_LBN 0 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_WIDTH 1 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_LBN 1 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_WIDTH 1 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_LBN 2 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_WIDTH 1 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_LBN 3 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_WIDTH 1 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_LBN 4 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_WIDTH 1 +/* DDR soak test error count */ +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_OFST 4 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_LEN 4 + +/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP msgresponse */ +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP_LEN 0 + +/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR msgresponse */ +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR_LEN 0 + +/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE msgresponse */ +#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE_LEN 0 + +/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG msgresponse */ +#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 0 + + +/***********************************/ +/* MC_CMD_AOE + * AOE operations on MC + */ +#define MC_CMD_AOE 0xa + +/* MC_CMD_AOE_IN msgrequest */ +#define MC_CMD_AOE_IN_LEN 4 +#define MC_CMD_AOE_IN_OP_HDR_OFST 0 +#define MC_CMD_AOE_IN_OP_HDR_LEN 4 +#define MC_CMD_AOE_IN_OP_LBN 0 +#define MC_CMD_AOE_IN_OP_WIDTH 8 +/* enum: FPGA and CPLD information */ +#define MC_CMD_AOE_OP_INFO 0x1 +/* enum: Currents and voltages read from MCP3424s; DEBUG */ +#define MC_CMD_AOE_OP_CURRENTS 0x2 +/* enum: Temperatures at locations around the PCB; DEBUG */ +#define MC_CMD_AOE_OP_TEMPERATURES 0x3 +/* enum: Set CPLD to idle */ +#define MC_CMD_AOE_OP_CPLD_IDLE 0x4 +/* enum: Read from CPLD register */ +#define MC_CMD_AOE_OP_CPLD_READ 0x5 +/* enum: Write to CPLD register */ +#define MC_CMD_AOE_OP_CPLD_WRITE 0x6 +/* enum: Execute CPLD instruction */ +#define MC_CMD_AOE_OP_CPLD_INSTRUCTION 0x7 +/* enum: Reprogram the CPLD on the AOE device */ +#define MC_CMD_AOE_OP_CPLD_REPROGRAM 0x8 +/* enum: AOE power control */ +#define MC_CMD_AOE_OP_POWER 0x9 +/* enum: AOE image loading */ +#define MC_CMD_AOE_OP_LOAD 0xa +/* enum: Fan monitoring */ +#define MC_CMD_AOE_OP_FAN_CONTROL 0xb +/* enum: Fan failures since last reset */ +#define MC_CMD_AOE_OP_FAN_FAILURES 0xc +/* enum: Get generic AOE MAC statistics */ +#define MC_CMD_AOE_OP_MAC_STATS 0xd +/* enum: Retrieve PHY specific information */ +#define MC_CMD_AOE_OP_GET_PHY_MEDIA_INFO 0xe +/* enum: Write a number of JTAG primitive commands, return will give data */ +#define MC_CMD_AOE_OP_JTAG_WRITE 0xf +/* enum: Control access to the FPGA via the Siena JTAG Chain */ +#define MC_CMD_AOE_OP_FPGA_ACCESS 0x10 +/* enum: Set the MTU offset between Siena and AOE MACs */ +#define MC_CMD_AOE_OP_SET_MTU_OFFSET 0x11 +/* enum: How link state is handled */ +#define MC_CMD_AOE_OP_LINK_STATE 0x12 +/* enum: How Siena MAC statistics are reported (deprecated - use + * MC_CMD_AOE_OP_ASIC_STATS) + */ +#define MC_CMD_AOE_OP_SIENA_STATS 0x13 +/* enum: How native ASIC MAC statistics are reported - replaces the deprecated + * command MC_CMD_AOE_OP_SIENA_STATS + */ +#define MC_CMD_AOE_OP_ASIC_STATS 0x13 +/* enum: DDR memory information */ +#define MC_CMD_AOE_OP_DDR 0x14 +/* enum: FC control */ +#define MC_CMD_AOE_OP_FC 0x15 +/* enum: DDR ECC status reads */ +#define MC_CMD_AOE_OP_DDR_ECC_STATUS 0x16 +/* enum: Commands for MC-SPI Master emulation */ +#define MC_CMD_AOE_OP_MC_SPI_MASTER 0x17 +/* enum: Commands for FC boot control */ +#define MC_CMD_AOE_OP_FC_BOOT 0x18 +/* enum: Get number of internal ports */ +#define MC_CMD_AOE_OP_GET_ASIC_PORTS 0x19 +/* enum: Get FC assert information and register dump */ +#define MC_CMD_AOE_OP_GET_FC_ASSERT_INFO 0x1a + +/* MC_CMD_AOE_OUT msgresponse */ +#define MC_CMD_AOE_OUT_LEN 0 + +/* MC_CMD_AOE_IN_INFO msgrequest */ +#define MC_CMD_AOE_IN_INFO_LEN 4 +#define MC_CMD_AOE_IN_CMD_OFST 0 +#define MC_CMD_AOE_IN_CMD_LEN 4 + +/* MC_CMD_AOE_IN_CURRENTS msgrequest */ +#define MC_CMD_AOE_IN_CURRENTS_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_TEMPERATURES msgrequest */ +#define MC_CMD_AOE_IN_TEMPERATURES_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_CPLD_IDLE msgrequest */ +#define MC_CMD_AOE_IN_CPLD_IDLE_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_CPLD_READ msgrequest */ +#define MC_CMD_AOE_IN_CPLD_READ_LEN 12 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_OFST 4 +#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_LEN 4 +#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_OFST 8 +#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_LEN 4 + +/* MC_CMD_AOE_IN_CPLD_WRITE msgrequest */ +#define MC_CMD_AOE_IN_CPLD_WRITE_LEN 16 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_OFST 4 +#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_LEN 4 +#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_OFST 8 +#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_LEN 4 +#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_OFST 12 +#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_LEN 4 + +/* MC_CMD_AOE_IN_CPLD_INSTRUCTION msgrequest */ +#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_OFST 4 +#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_LEN 4 + +/* MC_CMD_AOE_IN_CPLD_REPROGRAM msgrequest */ +#define MC_CMD_AOE_IN_CPLD_REPROGRAM_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_OFST 4 +#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_LEN 4 +/* enum: Reprogram CPLD, poll for completion */ +#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM 0x1 +/* enum: Reprogram CPLD, send event on completion */ +#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM_EVENT 0x3 +/* enum: Get status of reprogramming operation */ +#define MC_CMD_AOE_IN_CPLD_REPROGRAM_STATUS 0x4 + +/* MC_CMD_AOE_IN_POWER msgrequest */ +#define MC_CMD_AOE_IN_POWER_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* Turn on or off AOE power */ +#define MC_CMD_AOE_IN_POWER_OP_OFST 4 +#define MC_CMD_AOE_IN_POWER_OP_LEN 4 +/* enum: Turn off FPGA power */ +#define MC_CMD_AOE_IN_POWER_OFF 0x0 +/* enum: Turn on FPGA power */ +#define MC_CMD_AOE_IN_POWER_ON 0x1 +/* enum: Clear peak power measurement */ +#define MC_CMD_AOE_IN_POWER_CLEAR 0x2 +/* enum: Show current power in sensors output */ +#define MC_CMD_AOE_IN_POWER_SHOW_CURRENT 0x3 +/* enum: Show peak power in sensors output */ +#define MC_CMD_AOE_IN_POWER_SHOW_PEAK 0x4 +/* enum: Show current DDR current */ +#define MC_CMD_AOE_IN_POWER_DDR_LAST 0x5 +/* enum: Show peak DDR current */ +#define MC_CMD_AOE_IN_POWER_DDR_PEAK 0x6 +/* enum: Clear peak DDR current */ +#define MC_CMD_AOE_IN_POWER_DDR_CLEAR 0x7 + +/* MC_CMD_AOE_IN_LOAD msgrequest */ +#define MC_CMD_AOE_IN_LOAD_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* Image to be loaded (0 - main or 1 - diagnostic) to load in normal sequence + */ +#define MC_CMD_AOE_IN_LOAD_IMAGE_OFST 4 +#define MC_CMD_AOE_IN_LOAD_IMAGE_LEN 4 + +/* MC_CMD_AOE_IN_FAN_CONTROL msgrequest */ +#define MC_CMD_AOE_IN_FAN_CONTROL_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* If non zero report measured fan RPM rather than nominal */ +#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_OFST 4 +#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_LEN 4 + +/* MC_CMD_AOE_IN_FAN_FAILURES msgrequest */ +#define MC_CMD_AOE_IN_FAN_FAILURES_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_MAC_STATS msgrequest */ +#define MC_CMD_AOE_IN_MAC_STATS_LEN 24 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* AOE port */ +#define MC_CMD_AOE_IN_MAC_STATS_PORT_OFST 4 +#define MC_CMD_AOE_IN_MAC_STATS_PORT_LEN 4 +/* Host memory address for statistics */ +#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_OFST 8 +#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LEN 8 +#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LO_OFST 8 +#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_HI_OFST 12 +#define MC_CMD_AOE_IN_MAC_STATS_CMD_OFST 16 +#define MC_CMD_AOE_IN_MAC_STATS_CMD_LEN 4 +#define MC_CMD_AOE_IN_MAC_STATS_DMA_LBN 0 +#define MC_CMD_AOE_IN_MAC_STATS_DMA_WIDTH 1 +#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_LBN 1 +#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_WIDTH 1 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_LBN 2 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_WIDTH 1 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_LBN 3 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_WIDTH 1 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_LBN 4 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_WIDTH 1 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_LBN 5 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_WIDTH 1 +#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_LBN 16 +#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_WIDTH 16 +/* Length of DMA data (optional) */ +#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_OFST 20 +#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_LEN 4 + +/* MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO msgrequest */ +#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_LEN 12 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* AOE port */ +#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_OFST 4 +#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_LEN 4 +#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_OFST 8 +#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_LEN 4 + +/* MC_CMD_AOE_IN_JTAG_WRITE msgrequest */ +#define MC_CMD_AOE_IN_JTAG_WRITE_LENMIN 12 +#define MC_CMD_AOE_IN_JTAG_WRITE_LENMAX 252 +#define MC_CMD_AOE_IN_JTAG_WRITE_LEN(num) (8+4*(num)) +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_OFST 4 +#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_LEN 4 +#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_OFST 8 +#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_LEN 4 +#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MINNUM 1 +#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MAXNUM 61 + +/* MC_CMD_AOE_IN_FPGA_ACCESS msgrequest */ +#define MC_CMD_AOE_IN_FPGA_ACCESS_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* Enable or disable access */ +#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_OFST 4 +#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_LEN 4 +/* enum: Enable access */ +#define MC_CMD_AOE_IN_FPGA_ACCESS_ENABLE 0x1 +/* enum: Disable access */ +#define MC_CMD_AOE_IN_FPGA_ACCESS_DISABLE 0x2 + +/* MC_CMD_AOE_IN_SET_MTU_OFFSET msgrequest */ +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_LEN 12 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* AOE port - when not ALL_EXTERNAL or ALL_INTERNAL specifies port number */ +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_OFST 4 +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_LEN 4 +/* enum: Apply to all external ports */ +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_EXTERNAL 0x8000 +/* enum: Apply to all internal ports */ +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_INTERNAL 0x4000 +/* The MTU offset to be applied to the external ports */ +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_OFST 8 +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_LEN 4 + +/* MC_CMD_AOE_IN_LINK_STATE msgrequest */ +#define MC_CMD_AOE_IN_LINK_STATE_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_LINK_STATE_MODE_OFST 4 +#define MC_CMD_AOE_IN_LINK_STATE_MODE_LEN 4 +#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_LBN 0 +#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_WIDTH 8 +/* enum: AOE and associated external port */ +#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_SEPARATE 0x0 +/* enum: AOE and OR of all external ports */ +#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_COMBINED 0x1 +/* enum: Individual ports */ +#define MC_CMD_AOE_IN_LINK_STATE_DIAGNOSTIC 0x2 +/* enum: Configure link state mode on given AOE port */ +#define MC_CMD_AOE_IN_LINK_STATE_CUSTOM 0x3 +#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_LBN 8 +#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_WIDTH 8 +/* enum: No-op */ +#define MC_CMD_AOE_IN_LINK_STATE_OP_NONE 0x0 +/* enum: logical OR of all SFP ports link status */ +#define MC_CMD_AOE_IN_LINK_STATE_OP_OR 0x1 +/* enum: logical AND of all SFP ports link status */ +#define MC_CMD_AOE_IN_LINK_STATE_OP_AND 0x2 +#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_LBN 16 +#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_WIDTH 16 + +/* MC_CMD_AOE_IN_GET_ASIC_PORTS msgrequest */ +#define MC_CMD_AOE_IN_GET_ASIC_PORTS_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_GET_FC_ASSERT_INFO msgrequest */ +#define MC_CMD_AOE_IN_GET_FC_ASSERT_INFO_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_SIENA_STATS msgrequest */ +#define MC_CMD_AOE_IN_SIENA_STATS_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* How MAC statistics are reported */ +#define MC_CMD_AOE_IN_SIENA_STATS_MODE_OFST 4 +#define MC_CMD_AOE_IN_SIENA_STATS_MODE_LEN 4 +/* enum: Statistics from Siena (default) */ +#define MC_CMD_AOE_IN_SIENA_STATS_STATS_SIENA 0x0 +/* enum: Statistics from AOE external ports */ +#define MC_CMD_AOE_IN_SIENA_STATS_STATS_AOE 0x1 + +/* MC_CMD_AOE_IN_ASIC_STATS msgrequest */ +#define MC_CMD_AOE_IN_ASIC_STATS_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* How MAC statistics are reported */ +#define MC_CMD_AOE_IN_ASIC_STATS_MODE_OFST 4 +#define MC_CMD_AOE_IN_ASIC_STATS_MODE_LEN 4 +/* enum: Statistics from the ASIC (default) */ +#define MC_CMD_AOE_IN_ASIC_STATS_STATS_ASIC 0x0 +/* enum: Statistics from AOE external ports */ +#define MC_CMD_AOE_IN_ASIC_STATS_STATS_AOE 0x1 + +/* MC_CMD_AOE_IN_DDR msgrequest */ +#define MC_CMD_AOE_IN_DDR_LEN 12 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_DDR_BANK_OFST 4 +#define MC_CMD_AOE_IN_DDR_BANK_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */ +/* Page index of SPD data */ +#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_OFST 8 +#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_LEN 4 + +/* MC_CMD_AOE_IN_FC msgrequest */ +#define MC_CMD_AOE_IN_FC_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_DDR_ECC_STATUS msgrequest */ +#define MC_CMD_AOE_IN_DDR_ECC_STATUS_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_OFST 4 +#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */ + +/* MC_CMD_AOE_IN_MC_SPI_MASTER msgrequest */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* Basic commands for MC SPI Master emulation. */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_OFST 4 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_LEN 4 +/* enum: MC SPI read */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ 0x0 +/* enum: MC SPI write */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE 0x1 + +/* MC_CMD_AOE_IN_MC_SPI_MASTER_READ msgrequest */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_LEN 12 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_OFST 4 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_LEN 4 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_OFST 8 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_LEN 4 + +/* MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE msgrequest */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_LEN 16 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_OFST 4 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_LEN 4 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_OFST 8 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_LEN 4 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_OFST 12 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_LEN 4 + +/* MC_CMD_AOE_IN_FC_BOOT msgrequest */ +#define MC_CMD_AOE_IN_FC_BOOT_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* FC boot control flags */ +#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_OFST 4 +#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_LEN 4 +#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_LBN 0 +#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_WIDTH 1 + +/* MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO msgresponse */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_LEN 144 +/* Assertion status flag. */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GLOBAL_FLAGS_LEN 4 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_STATE_LBN 8 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_STATE_WIDTH 8 +/* enum: No crash data available */ +/* MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0 */ +/* enum: New crash data available */ +/* MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1 */ +/* enum: Crash data has been sent */ +/* MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2 */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_TYPE_LBN 0 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_TYPE_WIDTH 8 +/* enum: No crash has been recorded. */ +/* MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0 */ +/* enum: Crash due to exception. */ +/* MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1 */ +/* enum: Crash due to assertion. */ +/* MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2 */ +/* Failing PC value */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GP_REGS_OFFS_OFST 8 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GP_REGS_OFFS_LEN 4 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GP_REGS_OFFS_NUM 31 +/* Exception Type */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_TYPE_OFFS_OFST 132 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_TYPE_OFFS_LEN 4 +/* Instruction at which exception occurred */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_PC_ADDR_OFFS_OFST 136 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_PC_ADDR_OFFS_LEN 4 +/* BAD Address that triggered address-based exception */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_BAD_ADDR_OFFS_OFST 140 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_BAD_ADDR_OFFS_LEN 4 + +/* MC_CMD_AOE_OUT_INFO msgresponse */ +#define MC_CMD_AOE_OUT_INFO_LEN 44 +/* JTAG IDCODE of CPLD */ +#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_OFST 0 +#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_LEN 4 +/* Version of CPLD */ +#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_OFST 4 +#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_LEN 4 +/* JTAG IDCODE of FPGA */ +#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_OFST 8 +#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_LEN 4 +/* JTAG USERCODE of FPGA */ +#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_OFST 12 +#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_LEN 4 +/* FPGA type - read from CPLD straps */ +#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_OFST 16 +#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_LEN 4 +#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A5_C2 0x1 /* enum */ +#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A7_C2 0x2 /* enum */ +/* FPGA state (debug) */ +#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_OFST 20 +#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_LEN 4 +/* FPGA image - partition from which loaded */ +#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_OFST 24 +#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_LEN 4 +/* FC state */ +#define MC_CMD_AOE_OUT_INFO_FC_STATE_OFST 28 +#define MC_CMD_AOE_OUT_INFO_FC_STATE_LEN 4 +/* enum: Set if watchdog working */ +#define MC_CMD_AOE_OUT_INFO_WATCHDOG 0x1 +/* enum: Set if MC-FC communications working */ +#define MC_CMD_AOE_OUT_INFO_COMMS 0x2 +/* Random pieces of information */ +#define MC_CMD_AOE_OUT_INFO_FLAGS_OFST 32 +#define MC_CMD_AOE_OUT_INFO_FLAGS_LEN 4 +/* enum: Power to FPGA supplied by PEG connector, not PCIe bus */ +#define MC_CMD_AOE_OUT_INFO_PEG_POWER 0x1 +/* enum: CPLD apparently good */ +#define MC_CMD_AOE_OUT_INFO_CPLD_GOOD 0x2 +/* enum: FPGA working normally */ +#define MC_CMD_AOE_OUT_INFO_FPGA_GOOD 0x4 +/* enum: FPGA is powered */ +#define MC_CMD_AOE_OUT_INFO_FPGA_POWER 0x8 +/* enum: Board has incompatible SODIMMs fitted */ +#define MC_CMD_AOE_OUT_INFO_BAD_SODIMM 0x10 +/* enum: Board has ByteBlaster connected */ +#define MC_CMD_AOE_OUT_INFO_HAS_BYTEBLASTER 0x20 +/* enum: FPGA Boot flash has an invalid header. */ +#define MC_CMD_AOE_OUT_INFO_FPGA_BAD_BOOT_HDR 0x40 +/* enum: FPGA Application flash is accessible. */ +#define MC_CMD_AOE_OUT_INFO_FPGA_APP_FLASH_GOOD 0x80 +/* Revision of Modena and Sorrento boards. Sorrento can be R1_2 or R1_3. */ +#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_OFST 36 +#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_LEN 4 +#define MC_CMD_AOE_OUT_INFO_UNKNOWN 0x0 /* enum */ +#define MC_CMD_AOE_OUT_INFO_R1_0 0x10 /* enum */ +#define MC_CMD_AOE_OUT_INFO_R1_1 0x11 /* enum */ +#define MC_CMD_AOE_OUT_INFO_R1_2 0x12 /* enum */ +#define MC_CMD_AOE_OUT_INFO_R1_3 0x13 /* enum */ +/* Result of FC booting - not valid while a ByteBlaster is connected. */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_OFST 40 +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_LEN 4 +/* enum: No error */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_NO_ERROR 0x0 +/* enum: Bad address set in CPLD */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_ADDRESS 0x1 +/* enum: Bad header */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_MAGIC 0x2 +/* enum: Bad text section details */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_TEXT 0x3 +/* enum: Bad checksum */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_CHECKSUM 0x4 +/* enum: Bad BSP */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_BSP 0x5 +/* enum: Flash mode is invalid */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_INVALID_FLASH_MODE 0x6 +/* enum: FC application loaded and execution attempted */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_EXECUTE 0x80 +/* enum: FC application Started */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_STARTED 0x81 +/* enum: No bootrom in FPGA */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_NO_BOOTROM 0xff + +/* MC_CMD_AOE_OUT_CURRENTS msgresponse */ +#define MC_CMD_AOE_OUT_CURRENTS_LEN 68 +/* Set of currents and voltages (mA or mV as appropriate) */ +#define MC_CMD_AOE_OUT_CURRENTS_VALUES_OFST 0 +#define MC_CMD_AOE_OUT_CURRENTS_VALUES_LEN 4 +#define MC_CMD_AOE_OUT_CURRENTS_VALUES_NUM 17 +#define MC_CMD_AOE_OUT_CURRENTS_I_2V5 0x0 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_1V8 0x1 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_GXB 0x2 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_PGM 0x3 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_XCVR 0x4 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_1V5 0x5 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_V_3V3 0x6 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_V_1V5 0x7 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_IN 0x8 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_OUT 0x9 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_V_IN 0xa /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR1 0xb /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR1 0xc /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR2 0xd /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR2 0xe /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR3 0xf /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR3 0x10 /* enum */ + +/* MC_CMD_AOE_OUT_TEMPERATURES msgresponse */ +#define MC_CMD_AOE_OUT_TEMPERATURES_LEN 40 +/* Set of temperatures */ +#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_OFST 0 +#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_LEN 4 +#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_NUM 10 +/* enum: The first set of enum values are for Modena code. */ +#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_0 0x0 +#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_1 0x1 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_IND_0 0x2 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_IND_1 0x3 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO1 0x4 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO2 0x5 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO3 0x6 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_PSU 0x7 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_FPGA 0x8 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SIENA 0x9 /* enum */ +/* enum: The second set of enum values are for Sorrento code. */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_0 0x0 +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_1 0x1 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_0 0x2 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_1 0x3 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_0 0x4 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_1 0x5 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_FPGA 0x6 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY0 0x7 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY1 0x8 /* enum */ + +/* MC_CMD_AOE_OUT_CPLD_READ msgresponse */ +#define MC_CMD_AOE_OUT_CPLD_READ_LEN 4 +/* The value read from the CPLD */ +#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_OFST 0 +#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_LEN 4 + +/* MC_CMD_AOE_OUT_FAN_FAILURES msgresponse */ +#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMIN 4 +#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMAX 252 +#define MC_CMD_AOE_OUT_FAN_FAILURES_LEN(num) (0+4*(num)) +/* Failure counts for each fan */ +#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_OFST 0 +#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_LEN 4 +#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MINNUM 1 +#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MAXNUM 63 + +/* MC_CMD_AOE_OUT_CPLD_REPROGRAM msgresponse */ +#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_LEN 4 +/* Results of status command (only) */ +#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_OFST 0 +#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_LEN 4 + +/* MC_CMD_AOE_OUT_POWER_OFF msgresponse */ +#define MC_CMD_AOE_OUT_POWER_OFF_LEN 0 + +/* MC_CMD_AOE_OUT_POWER_ON msgresponse */ +#define MC_CMD_AOE_OUT_POWER_ON_LEN 0 + +/* MC_CMD_AOE_OUT_LOAD msgresponse */ +#define MC_CMD_AOE_OUT_LOAD_LEN 0 + +/* MC_CMD_AOE_OUT_MAC_STATS_DMA msgresponse */ +#define MC_CMD_AOE_OUT_MAC_STATS_DMA_LEN 0 + +/* MC_CMD_AOE_OUT_MAC_STATS_NO_DMA msgresponse: See MC_CMD_MAC_STATS_OUT_NO_DMA + * for details + */ +#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3) +#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS + +/* MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO msgresponse */ +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMIN 5 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMAX 252 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LEN(num) (4+1*(num)) +/* in bytes */ +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_OFST 0 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_LEN 4 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_OFST 4 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_LEN 1 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MINNUM 1 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MAXNUM 248 + +/* MC_CMD_AOE_OUT_JTAG_WRITE msgresponse */ +#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMIN 12 +#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMAX 252 +#define MC_CMD_AOE_OUT_JTAG_WRITE_LEN(num) (8+4*(num)) +/* Used to align the in and out data blocks so the MC can re-use the cmd */ +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_OFST 0 +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_LEN 4 +/* out bytes */ +#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_OFST 4 +#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_LEN 4 +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_OFST 8 +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_LEN 4 +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MINNUM 1 +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MAXNUM 61 + +/* MC_CMD_AOE_OUT_FPGA_ACCESS msgresponse */ +#define MC_CMD_AOE_OUT_FPGA_ACCESS_LEN 0 + +/* MC_CMD_AOE_OUT_DDR msgresponse */ +#define MC_CMD_AOE_OUT_DDR_LENMIN 17 +#define MC_CMD_AOE_OUT_DDR_LENMAX 252 +#define MC_CMD_AOE_OUT_DDR_LEN(num) (16+1*(num)) +/* Information on the module. */ +#define MC_CMD_AOE_OUT_DDR_FLAGS_OFST 0 +#define MC_CMD_AOE_OUT_DDR_FLAGS_LEN 4 +#define MC_CMD_AOE_OUT_DDR_PRESENT_LBN 0 +#define MC_CMD_AOE_OUT_DDR_PRESENT_WIDTH 1 +#define MC_CMD_AOE_OUT_DDR_POWERED_LBN 1 +#define MC_CMD_AOE_OUT_DDR_POWERED_WIDTH 1 +#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_LBN 2 +#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_WIDTH 1 +#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_LBN 3 +#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_WIDTH 1 +/* Memory size, in MB. */ +#define MC_CMD_AOE_OUT_DDR_CAPACITY_OFST 4 +#define MC_CMD_AOE_OUT_DDR_CAPACITY_LEN 4 +/* The memory type, as reported from SPD information */ +#define MC_CMD_AOE_OUT_DDR_TYPE_OFST 8 +#define MC_CMD_AOE_OUT_DDR_TYPE_LEN 4 +/* Nominal voltage of the module (as applied) */ +#define MC_CMD_AOE_OUT_DDR_VOLTAGE_OFST 12 +#define MC_CMD_AOE_OUT_DDR_VOLTAGE_LEN 4 +/* SPD data read from the module */ +#define MC_CMD_AOE_OUT_DDR_SPD_OFST 16 +#define MC_CMD_AOE_OUT_DDR_SPD_LEN 1 +#define MC_CMD_AOE_OUT_DDR_SPD_MINNUM 1 +#define MC_CMD_AOE_OUT_DDR_SPD_MAXNUM 236 + +/* MC_CMD_AOE_OUT_SET_MTU_OFFSET msgresponse */ +#define MC_CMD_AOE_OUT_SET_MTU_OFFSET_LEN 0 + +/* MC_CMD_AOE_OUT_LINK_STATE msgresponse */ +#define MC_CMD_AOE_OUT_LINK_STATE_LEN 0 + +/* MC_CMD_AOE_OUT_SIENA_STATS msgresponse */ +#define MC_CMD_AOE_OUT_SIENA_STATS_LEN 0 + +/* MC_CMD_AOE_OUT_ASIC_STATS msgresponse */ +#define MC_CMD_AOE_OUT_ASIC_STATS_LEN 0 + +/* MC_CMD_AOE_OUT_FC msgresponse */ +#define MC_CMD_AOE_OUT_FC_LEN 0 + +/* MC_CMD_AOE_OUT_GET_ASIC_PORTS msgresponse */ +#define MC_CMD_AOE_OUT_GET_ASIC_PORTS_LEN 4 +/* get the number of internal ports */ +#define MC_CMD_AOE_OUT_GET_ASIC_PORTS_COUNT_PORTS_OFST 0 +#define MC_CMD_AOE_OUT_GET_ASIC_PORTS_COUNT_PORTS_LEN 4 + +/* MC_CMD_AOE_OUT_DDR_ECC_STATUS msgresponse */ +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_LEN 8 +/* Flags describing status info on the module. */ +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_OFST 0 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_LEN 4 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_LBN 0 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_WIDTH 1 +/* DDR ECC status on the module. */ +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_OFST 4 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_LEN 4 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_LBN 0 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_WIDTH 1 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_LBN 1 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_WIDTH 1 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_LBN 2 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_WIDTH 1 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_LBN 8 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_WIDTH 8 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_LBN 16 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_WIDTH 8 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_LBN 24 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_WIDTH 8 + +/* MC_CMD_AOE_OUT_MC_SPI_MASTER_READ msgresponse */ +#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_LEN 4 +#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_OFST 0 +#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_LEN 4 + +/* MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE msgresponse */ +#define MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE_LEN 0 + +/* MC_CMD_AOE_OUT_MC_SPI_MASTER msgresponse */ +#define MC_CMD_AOE_OUT_MC_SPI_MASTER_LEN 0 + +/* MC_CMD_AOE_OUT_FC_BOOT msgresponse */ +#define MC_CMD_AOE_OUT_FC_BOOT_LEN 0 + +#endif /* _SIENA_MC_DRIVER_PCOL_AOE_H */ +/*! \cidoxg_end */ diff --git a/drivers/net/sfc/base/efx_rx.c b/drivers/net/sfc/base/efx_rx.c index c0dcb752..4fd73bab 100644 --- a/drivers/net/sfc/base/efx_rx.c +++ b/drivers/net/sfc/base/efx_rx.c @@ -107,7 +107,7 @@ siena_rx_qcreate( __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, - __in uint32_t type_data, + __in const efx_rxq_type_data_t *type_data, __in efsys_mem_t *esmp, __in size_t ndescs, __in uint32_t id, @@ -151,7 +151,7 @@ static const efx_rx_ops_t __efx_rx_siena_ops = { }; #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static const efx_rx_ops_t __efx_rx_ef10_ops = { ef10_rx_init, /* erxo_init */ ef10_rx_fini, /* erxo_fini */ @@ -178,7 +178,7 @@ static const efx_rx_ops_t __efx_rx_ef10_ops = { ef10_rx_qcreate, /* erxo_qcreate */ ef10_rx_qdestroy, /* erxo_qdestroy */ }; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t @@ -220,6 +220,12 @@ efx_rx_init( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + erxop = &__efx_rx_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; @@ -288,6 +294,94 @@ fail1: #endif /* EFSYS_OPT_RX_SCATTER */ #if EFSYS_OPT_RX_SCALE + __checkReturn efx_rc_t +efx_rx_scale_hash_flags_get( + __in efx_nic_t *enp, + __in efx_rx_hash_alg_t hash_alg, + __inout_ecount(EFX_RX_HASH_NFLAGS) unsigned int *flags, + __out unsigned int *nflagsp) +{ + efx_nic_cfg_t *encp = &enp->en_nic_cfg; + boolean_t l4; + boolean_t additional_modes; + unsigned int *entryp = flags; + efx_rc_t rc; + + if (flags == NULL || nflagsp == NULL) { + rc = EINVAL; + goto fail1; + } + + l4 = encp->enc_rx_scale_l4_hash_supported; + additional_modes = encp->enc_rx_scale_additional_modes_supported; + +#define LIST_FLAGS(_entryp, _class, _l4_hashing, _additional_modes) \ + do { \ + if (_l4_hashing) { \ + *(_entryp++) = EFX_RX_HASH(_class, 4TUPLE); \ + \ + if (_additional_modes) { \ + *(_entryp++) = \ + EFX_RX_HASH(_class, 2TUPLE_DST); \ + *(_entryp++) = \ + EFX_RX_HASH(_class, 2TUPLE_SRC); \ + } \ + } \ + \ + *(_entryp++) = EFX_RX_HASH(_class, 2TUPLE); \ + \ + if (_additional_modes) { \ + *(_entryp++) = EFX_RX_HASH(_class, 1TUPLE_DST); \ + *(_entryp++) = EFX_RX_HASH(_class, 1TUPLE_SRC); \ + } \ + \ + *(_entryp++) = EFX_RX_HASH(_class, DISABLE); \ + \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + + switch (hash_alg) { + case EFX_RX_HASHALG_PACKED_STREAM: + if ((encp->enc_rx_scale_hash_alg_mask & (1U << hash_alg)) == 0) + break; + /* FALLTHRU */ + case EFX_RX_HASHALG_TOEPLITZ: + if ((encp->enc_rx_scale_hash_alg_mask & (1U << hash_alg)) == 0) + break; + + LIST_FLAGS(entryp, IPV4_TCP, l4, additional_modes); + LIST_FLAGS(entryp, IPV6_TCP, l4, additional_modes); + + if (additional_modes) { + LIST_FLAGS(entryp, IPV4_UDP, l4, additional_modes); + LIST_FLAGS(entryp, IPV6_UDP, l4, additional_modes); + } + + LIST_FLAGS(entryp, IPV4, B_FALSE, additional_modes); + LIST_FLAGS(entryp, IPV6, B_FALSE, additional_modes); + break; + + default: + rc = EINVAL; + goto fail2; + } + +#undef LIST_FLAGS + + *nflagsp = (unsigned int)(entryp - flags); + EFSYS_ASSERT3U(*nflagsp, <=, EFX_RX_HASH_NFLAGS); + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + __checkReturn efx_rc_t efx_rx_hash_default_support_get( __in efx_nic_t *enp, @@ -419,19 +513,82 @@ efx_rx_scale_mode_set( __in boolean_t insert) { const efx_rx_ops_t *erxop = enp->en_erxop; + unsigned int type_flags[EFX_RX_HASH_NFLAGS]; + unsigned int type_nflags; + efx_rx_hash_type_t type_check; + unsigned int i; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); + /* + * Legacy flags and modern bits cannot be + * used at the same time in the hash type. + */ + if ((type & EFX_RX_HASH_LEGACY_MASK) && + (type & ~EFX_RX_HASH_LEGACY_MASK)) { + rc = EINVAL; + goto fail1; + } + + /* + * Translate legacy flags to the new representation + * so that chip-specific handlers will consider the + * new flags only. + */ + if (type & EFX_RX_HASH_IPV4) { + type |= EFX_RX_HASH(IPV4, 2TUPLE); + type |= EFX_RX_HASH(IPV4_TCP, 2TUPLE); + type |= EFX_RX_HASH(IPV4_UDP, 2TUPLE); + } + + if (type & EFX_RX_HASH_TCPIPV4) + type |= EFX_RX_HASH(IPV4_TCP, 4TUPLE); + + if (type & EFX_RX_HASH_IPV6) { + type |= EFX_RX_HASH(IPV6, 2TUPLE); + type |= EFX_RX_HASH(IPV6_TCP, 2TUPLE); + type |= EFX_RX_HASH(IPV6_UDP, 2TUPLE); + } + + if (type & EFX_RX_HASH_TCPIPV6) + type |= EFX_RX_HASH(IPV6_TCP, 4TUPLE); + + type &= ~EFX_RX_HASH_LEGACY_MASK; + type_check = type; + + /* + * Get the list of supported hash flags and sanitise the input. + */ + rc = efx_rx_scale_hash_flags_get(enp, alg, type_flags, &type_nflags); + if (rc != 0) + goto fail2; + + for (i = 0; i < type_nflags; ++i) { + if ((type_check & type_flags[i]) == type_flags[i]) + type_check &= ~(type_flags[i]); + } + + if (type_check != 0) { + rc = EINVAL; + goto fail3; + } + if (erxop->erxo_scale_mode_set != NULL) { if ((rc = erxop->erxo_scale_mode_set(enp, rss_context, alg, type, insert)) != 0) - goto fail1; + goto fail4; } return (0); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); @@ -594,7 +751,7 @@ efx_rx_qcreate_internal( __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, - __in uint32_t type_data, + __in const efx_rxq_type_data_t *type_data, __in efsys_mem_t *esmp, __in size_t ndescs, __in uint32_t id, @@ -655,8 +812,8 @@ efx_rx_qcreate( __in efx_evq_t *eep, __deref_out efx_rxq_t **erpp) { - return efx_rx_qcreate_internal(enp, index, label, type, 0, esmp, ndescs, - id, flags, eep, erpp); + return efx_rx_qcreate_internal(enp, index, label, type, NULL, + esmp, ndescs, id, flags, eep, erpp); } #if EFSYS_OPT_RX_PACKED_STREAM @@ -672,13 +829,71 @@ efx_rx_qcreate_packed_stream( __in efx_evq_t *eep, __deref_out efx_rxq_t **erpp) { + efx_rxq_type_data_t type_data; + + memset(&type_data, 0, sizeof(type_data)); + + type_data.ertd_packed_stream.eps_buf_size = ps_buf_size; + return efx_rx_qcreate_internal(enp, index, label, - EFX_RXQ_TYPE_PACKED_STREAM, ps_buf_size, esmp, ndescs, + EFX_RXQ_TYPE_PACKED_STREAM, &type_data, esmp, ndescs, 0 /* id unused on EF10 */, EFX_RXQ_FLAG_NONE, eep, erpp); } #endif +#if EFSYS_OPT_RX_ES_SUPER_BUFFER + + __checkReturn efx_rc_t +efx_rx_qcreate_es_super_buffer( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in uint32_t n_bufs_per_desc, + __in uint32_t max_dma_len, + __in uint32_t buf_stride, + __in uint32_t hol_block_timeout, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in unsigned int flags, + __in efx_evq_t *eep, + __deref_out efx_rxq_t **erpp) +{ + efx_rc_t rc; + efx_rxq_type_data_t type_data; + + if (hol_block_timeout > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) { + rc = EINVAL; + goto fail1; + } + + memset(&type_data, 0, sizeof(type_data)); + + type_data.ertd_es_super_buffer.eessb_bufs_per_desc = n_bufs_per_desc; + type_data.ertd_es_super_buffer.eessb_max_dma_len = max_dma_len; + type_data.ertd_es_super_buffer.eessb_buf_stride = buf_stride; + type_data.ertd_es_super_buffer.eessb_hol_block_timeout = + hol_block_timeout; + + rc = efx_rx_qcreate_internal(enp, index, label, + EFX_RXQ_TYPE_ES_SUPER_BUFFER, &type_data, esmp, ndescs, + 0 /* id unused on EF10 */, flags, eep, erpp); + if (rc != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif + + void efx_rx_qdestroy( __in efx_rxq_t *erp) @@ -875,6 +1090,10 @@ siena_rx_scale_mode_set( __in efx_rx_hash_type_t type, __in boolean_t insert) { + efx_rx_hash_type_t type_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE); + efx_rx_hash_type_t type_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE); + efx_rx_hash_type_t type_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE); + efx_rx_hash_type_t type_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE); efx_rc_t rc; if (rss_context != EFX_RSS_CONTEXT_DEFAULT) { @@ -889,12 +1108,12 @@ siena_rx_scale_mode_set( case EFX_RX_HASHALG_TOEPLITZ: EFX_RX_TOEPLITZ_IPV4_HASH(enp, insert, - type & EFX_RX_HASH_IPV4, - type & EFX_RX_HASH_TCPIPV4); + (type & type_ipv4) == type_ipv4, + (type & type_ipv4_tcp) == type_ipv4_tcp); EFX_RX_TOEPLITZ_IPV6_HASH(enp, - type & EFX_RX_HASH_IPV6, - type & EFX_RX_HASH_TCPIPV6, + (type & type_ipv6) == type_ipv6, + (type & type_ipv6_tcp) == type_ipv6_tcp, rc); if (rc != 0) goto fail2; @@ -1320,7 +1539,7 @@ siena_rx_qcreate( __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, - __in uint32_t type_data, + __in const efx_rxq_type_data_t *type_data, __in efsys_mem_t *esmp, __in size_t ndescs, __in uint32_t id, diff --git a/drivers/net/sfc/base/efx_sram.c b/drivers/net/sfc/base/efx_sram.c index 1f0ba0a9..7851ff13 100644 --- a/drivers/net/sfc/base/efx_sram.c +++ b/drivers/net/sfc/base/efx_sram.c @@ -25,9 +25,10 @@ efx_sram_buf_tbl_set( EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 if (enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD) { + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2) { /* * FIXME: the efx_sram_buf_tbl_*() functionality needs to be * pulled inside the Falcon/Siena queue create/destroy code, @@ -39,7 +40,7 @@ efx_sram_buf_tbl_set( return (0); } -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ if (stop >= EFX_BUF_TBL_SIZE) { rc = EFBIG; @@ -147,9 +148,10 @@ efx_sram_buf_tbl_clear( EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 if (enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD) { + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2) { /* * FIXME: the efx_sram_buf_tbl_*() functionality needs to be * pulled inside the Falcon/Siena queue create/destroy code, @@ -161,7 +163,7 @@ efx_sram_buf_tbl_clear( return; } -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ EFSYS_ASSERT3U(stop, <, EFX_BUF_TBL_SIZE); diff --git a/drivers/net/sfc/base/efx_tunnel.c b/drivers/net/sfc/base/efx_tunnel.c index 25fa976f..399fd540 100644 --- a/drivers/net/sfc/base/efx_tunnel.c +++ b/drivers/net/sfc/base/efx_tunnel.c @@ -17,20 +17,20 @@ static const efx_tunnel_ops_t __efx_tunnel_dummy_ops = { }; #endif /* EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON */ -#if EFSYS_OPT_MEDFORD +#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static __checkReturn boolean_t -medford_udp_encap_supported( +ef10_udp_encap_supported( __in efx_nic_t *enp); static __checkReturn efx_rc_t -medford_tunnel_reconfigure( +ef10_tunnel_reconfigure( __in efx_nic_t *enp); -static const efx_tunnel_ops_t __efx_tunnel_medford_ops = { - medford_udp_encap_supported, /* eto_udp_encap_supported */ - medford_tunnel_reconfigure, /* eto_reconfigure */ +static const efx_tunnel_ops_t __efx_tunnel_ef10_ops = { + ef10_udp_encap_supported, /* eto_udp_encap_supported */ + ef10_tunnel_reconfigure, /* eto_reconfigure */ }; -#endif /* EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ static __checkReturn efx_rc_t efx_mcdi_set_tunnel_encap_udp_ports( @@ -161,10 +161,16 @@ efx_tunnel_init( #if EFSYS_OPT_MEDFORD case EFX_FAMILY_MEDFORD: - etop = &__efx_tunnel_medford_ops; + etop = &__efx_tunnel_ef10_ops; break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + etop = &__efx_tunnel_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; @@ -394,9 +400,9 @@ fail1: return (rc); } -#if EFSYS_OPT_MEDFORD +#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static __checkReturn boolean_t -medford_udp_encap_supported( +ef10_udp_encap_supported( __in efx_nic_t *enp) { const efx_nic_cfg_t *encp = &enp->en_nic_cfg; @@ -410,7 +416,7 @@ medford_udp_encap_supported( } static __checkReturn efx_rc_t -medford_tunnel_reconfigure( +ef10_tunnel_reconfigure( __in efx_nic_t *enp) { efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg; @@ -423,7 +429,7 @@ medford_tunnel_reconfigure( memcpy(&etc, etcp, sizeof (etc)); EFSYS_UNLOCK(enp->en_eslp, state); - if (medford_udp_encap_supported(enp) == B_FALSE) { + if (ef10_udp_encap_supported(enp) == B_FALSE) { /* * It is OK to apply empty UDP tunnel ports when UDP * tunnel encapsulations are not supported - just nothing @@ -458,6 +464,6 @@ fail1: return (rc); } -#endif /* EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ #endif /* EFSYS_OPT_TUNNEL */ diff --git a/drivers/net/sfc/base/efx_tx.c b/drivers/net/sfc/base/efx_tx.c index 4e02c869..da37580a 100644 --- a/drivers/net/sfc/base/efx_tx.c +++ b/drivers/net/sfc/base/efx_tx.c @@ -117,6 +117,7 @@ static const efx_tx_ops_t __efx_tx_siena_ops = { NULL, /* etxo_qdesc_tso_create */ NULL, /* etxo_qdesc_tso2_create */ NULL, /* etxo_qdesc_vlantci_create */ + NULL, /* etxo_qdesc_checksum_create */ #if EFSYS_OPT_QSTATS siena_tx_qstats_update, /* etxo_qstats_update */ #endif @@ -143,6 +144,7 @@ static const efx_tx_ops_t __efx_tx_hunt_ops = { ef10_tx_qdesc_tso_create, /* etxo_qdesc_tso_create */ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */ + ef10_tx_qdesc_checksum_create, /* etxo_qdesc_checksum_create */ #if EFSYS_OPT_QSTATS ef10_tx_qstats_update, /* etxo_qstats_update */ #endif @@ -169,12 +171,41 @@ static const efx_tx_ops_t __efx_tx_medford_ops = { NULL, /* etxo_qdesc_tso_create */ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */ + ef10_tx_qdesc_checksum_create, /* etxo_qdesc_checksum_create */ #if EFSYS_OPT_QSTATS ef10_tx_qstats_update, /* etxo_qstats_update */ #endif }; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 +static const efx_tx_ops_t __efx_tx_medford2_ops = { + ef10_tx_init, /* etxo_init */ + ef10_tx_fini, /* etxo_fini */ + ef10_tx_qcreate, /* etxo_qcreate */ + ef10_tx_qdestroy, /* etxo_qdestroy */ + ef10_tx_qpost, /* etxo_qpost */ + ef10_tx_qpush, /* etxo_qpush */ + ef10_tx_qpace, /* etxo_qpace */ + ef10_tx_qflush, /* etxo_qflush */ + ef10_tx_qenable, /* etxo_qenable */ + ef10_tx_qpio_enable, /* etxo_qpio_enable */ + ef10_tx_qpio_disable, /* etxo_qpio_disable */ + ef10_tx_qpio_write, /* etxo_qpio_write */ + ef10_tx_qpio_post, /* etxo_qpio_post */ + ef10_tx_qdesc_post, /* etxo_qdesc_post */ + ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */ + NULL, /* etxo_qdesc_tso_create */ + ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */ + ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */ + ef10_tx_qdesc_checksum_create, /* etxo_qdesc_checksum_create */ +#if EFSYS_OPT_QSTATS + ef10_tx_qstats_update, /* etxo_qstats_update */ +#endif +}; +#endif /* EFSYS_OPT_MEDFORD2 */ + + __checkReturn efx_rc_t efx_tx_init( __in efx_nic_t *enp) @@ -214,6 +245,12 @@ efx_tx_init( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + etxop = &__efx_tx_medford2_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; @@ -588,6 +625,7 @@ efx_tx_qdesc_tso_create( efx_tx_qdesc_tso2_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, + __in uint16_t outer_ipv4_id, __in uint32_t tcp_seq, __in uint16_t mss, __out_ecount(count) efx_desc_t *edp, @@ -599,7 +637,8 @@ efx_tx_qdesc_tso2_create( EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); EFSYS_ASSERT(etxop->etxo_qdesc_tso2_create != NULL); - etxop->etxo_qdesc_tso2_create(etp, ipv4_id, tcp_seq, mss, edp, count); + etxop->etxo_qdesc_tso2_create(etp, ipv4_id, outer_ipv4_id, + tcp_seq, mss, edp, count); } void @@ -617,6 +656,21 @@ efx_tx_qdesc_vlantci_create( etxop->etxo_qdesc_vlantci_create(etp, tci, edp); } + void +efx_tx_qdesc_checksum_create( + __in efx_txq_t *etp, + __in uint16_t flags, + __out efx_desc_t *edp) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + EFSYS_ASSERT(etxop->etxo_qdesc_checksum_create != NULL); + + etxop->etxo_qdesc_checksum_create(etp, flags, edp); +} + #if EFSYS_OPT_QSTATS void diff --git a/drivers/net/sfc/base/efx_types.h b/drivers/net/sfc/base/efx_types.h index 0581f67f..65168ab7 100644 --- a/drivers/net/sfc/base/efx_types.h +++ b/drivers/net/sfc/base/efx_types.h @@ -328,6 +328,16 @@ extern int fix_lint; #define FIX_LINT(_x) (_x) #endif +/* + * Saturation arithmetic subtract with minimum equal to zero. + * + * Use saturating arithmetic to ensure a non-negative result. This + * avoids undefined behaviour (and compiler warnings) when used as a + * shift count. + */ +#define EFX_SSUB(_val, _sub) \ + ((_val) > (_sub) ? ((_val) - (_sub)) : 0) + /* * Extract bit field portion [low,high) from the native-endian element * which contains bits [min,max). @@ -347,8 +357,8 @@ extern int fix_lint; ((FIX_LINT(_low > _max) || FIX_LINT(_high < _min)) ? \ 0U : \ ((_low > _min) ? \ - ((_element) >> (_low - _min)) : \ - ((_element) << (_min - _low)))) + ((_element) >> EFX_SSUB(_low, _min)) : \ + ((_element) << EFX_SSUB(_min, _low)))) /* * Extract bit field portion [low,high) from the 64-bit little-endian @@ -537,29 +547,29 @@ extern int fix_lint; (((_low > _max) || (_high < _min)) ? \ 0U : \ ((_low > _min) ? \ - (((uint64_t)(_value)) << (_low - _min)) : \ - (((uint64_t)(_value)) >> (_min - _low)))) + (((uint64_t)(_value)) << EFX_SSUB(_low, _min)) :\ + (((uint64_t)(_value)) >> EFX_SSUB(_min, _low)))) #define EFX_INSERT_NATIVE32(_min, _max, _low, _high, _value) \ (((_low > _max) || (_high < _min)) ? \ 0U : \ ((_low > _min) ? \ - (((uint32_t)(_value)) << (_low - _min)) : \ - (((uint32_t)(_value)) >> (_min - _low)))) + (((uint32_t)(_value)) << EFX_SSUB(_low, _min)) :\ + (((uint32_t)(_value)) >> EFX_SSUB(_min, _low)))) #define EFX_INSERT_NATIVE16(_min, _max, _low, _high, _value) \ (((_low > _max) || (_high < _min)) ? \ 0U : \ (uint16_t)((_low > _min) ? \ - ((_value) << (_low - _min)) : \ - ((_value) >> (_min - _low)))) + ((_value) << EFX_SSUB(_low, _min)) : \ + ((_value) >> EFX_SSUB(_min, _low)))) #define EFX_INSERT_NATIVE8(_min, _max, _low, _high, _value) \ (((_low > _max) || (_high < _min)) ? \ 0U : \ (uint8_t)((_low > _min) ? \ - ((_value) << (_low - _min)) : \ - ((_value) >> (_min - _low)))) + ((_value) << EFX_SSUB(_low, _min)) : \ + ((_value) >> EFX_SSUB(_min, _low)))) /* * Construct bit field portion @@ -1288,22 +1298,22 @@ extern int fix_lint; #define EFX_SHIFT64(_bit, _base) \ (((_bit) >= (_base) && (_bit) < (_base) + 64) ? \ - ((uint64_t)1 << ((_bit) - (_base))) : \ + ((uint64_t)1 << EFX_SSUB((_bit), (_base))) : \ 0U) #define EFX_SHIFT32(_bit, _base) \ (((_bit) >= (_base) && (_bit) < (_base) + 32) ? \ - ((uint32_t)1 << ((_bit) - (_base))) : \ + ((uint32_t)1 << EFX_SSUB((_bit),(_base))) : \ 0U) #define EFX_SHIFT16(_bit, _base) \ (((_bit) >= (_base) && (_bit) < (_base) + 16) ? \ - (uint16_t)(1 << ((_bit) - (_base))) : \ + (uint16_t)(1 << EFX_SSUB((_bit), (_base))) : \ 0U) #define EFX_SHIFT8(_bit, _base) \ (((_bit) >= (_base) && (_bit) < (_base) + 8) ? \ - (uint8_t)(1 << ((_bit) - (_base))) : \ + (uint8_t)(1 << EFX_SSUB((_bit), (_base))) : \ 0U) #define EFX_SET_OWORD_BIT64(_oword, _bit) \ diff --git a/drivers/net/sfc/base/efx_vpd.c b/drivers/net/sfc/base/efx_vpd.c index 7b8138f3..6d783d74 100644 --- a/drivers/net/sfc/base/efx_vpd.c +++ b/drivers/net/sfc/base/efx_vpd.c @@ -44,7 +44,7 @@ static const efx_vpd_ops_t __efx_vpd_siena_ops = { #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static const efx_vpd_ops_t __efx_vpd_ef10_ops = { ef10_vpd_init, /* evpdo_init */ @@ -59,7 +59,7 @@ static const efx_vpd_ops_t __efx_vpd_ef10_ops = { ef10_vpd_fini, /* evpdo_fini */ }; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t efx_vpd_init( @@ -91,6 +91,12 @@ efx_vpd_init( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + evpdop = &__efx_vpd_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; diff --git a/drivers/net/sfc/base/hunt_nic.c b/drivers/net/sfc/base/hunt_nic.c index d03cc138..16ea81d2 100644 --- a/drivers/net/sfc/base/hunt_nic.c +++ b/drivers/net/sfc/base/hunt_nic.c @@ -36,7 +36,7 @@ hunt_nic_get_required_pcie_bandwidth( goto out; } - if (port_modes & (1 << TLV_PORT_MODE_40G_40G)) { + if (port_modes & (1U << TLV_PORT_MODE_40G_40G)) { /* * This needs the full PCIe bandwidth (and could use * more) - roughly 64 Gbit/s for 8 lanes of Gen3. @@ -45,9 +45,9 @@ hunt_nic_get_required_pcie_bandwidth( EFX_PCIE_LINK_SPEED_GEN3, &bandwidth)) != 0) goto fail1; } else { - if (port_modes & (1 << TLV_PORT_MODE_40G)) { + if (port_modes & (1U << TLV_PORT_MODE_40G)) { max_port_mode = TLV_PORT_MODE_40G; - } else if (port_modes & (1 << TLV_PORT_MODE_10G_10G_10G_10G)) { + } else if (port_modes & (1U << TLV_PORT_MODE_10G_10G_10G_10G)) { max_port_mode = TLV_PORT_MODE_10G_10G_10G_10G; } else { /* Assume two 10G ports */ @@ -76,90 +76,13 @@ fail1: hunt_board_cfg( __in efx_nic_t *enp) { - efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_nic_cfg_t *encp = &(enp->en_nic_cfg); - uint8_t mac_addr[6] = { 0 }; - uint32_t board_type = 0; - ef10_link_state_t els; efx_port_t *epp = &(enp->en_port); - uint32_t port; - uint32_t pf; - uint32_t vf; - uint32_t mask; uint32_t flags; uint32_t sysclk, dpcpu_clk; - uint32_t base, nvec; uint32_t bandwidth; efx_rc_t rc; - if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0) - goto fail1; - - /* - * NOTE: The MCDI protocol numbers ports from zero. - * The common code MCDI interface numbers ports from one. - */ - emip->emi_port = port + 1; - - if ((rc = ef10_external_port_mapping(enp, port, - &encp->enc_external_port)) != 0) - goto fail2; - - /* - * Get PCIe function number from firmware (used for - * per-function privilege and dynamic config info). - * - PCIe PF: pf = PF number, vf = 0xffff. - * - PCIe VF: pf = parent PF, vf = VF number. - */ - if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0) - goto fail3; - - encp->enc_pf = pf; - encp->enc_vf = vf; - - /* MAC address for this function */ - if (EFX_PCI_FUNCTION_IS_PF(encp)) { - rc = efx_mcdi_get_mac_address_pf(enp, mac_addr); - if ((rc == 0) && (mac_addr[0] & 0x02)) { - /* - * If the static config does not include a global MAC - * address pool then the board may return a locally - * administered MAC address (this should only happen on - * incorrectly programmed boards). - */ - rc = EINVAL; - } - } else { - rc = efx_mcdi_get_mac_address_vf(enp, mac_addr); - } - if (rc != 0) - goto fail4; - - EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr); - - /* Board configuration */ - rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL); - if (rc != 0) { - /* Unprivileged functions may not be able to read board cfg */ - if (rc == EACCES) - board_type = 0; - else - goto fail5; - } - - encp->enc_board_type = board_type; - encp->enc_clk_mult = 1; /* not used for Huntington */ - - /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */ - if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0) - goto fail6; - - /* Obtain the default PHY advertised capabilities */ - if ((rc = ef10_phy_get_link(enp, &els)) != 0) - goto fail7; - epp->ep_default_adv_cap_mask = els.els_adv_cap_mask; - epp->ep_adv_cap_mask = els.els_adv_cap_mask; - /* * Enable firmware workarounds for hardware errata. * Expected responses are: @@ -187,7 +110,7 @@ hunt_board_cfg( else if ((rc == ENOTSUP) || (rc == ENOENT)) encp->enc_bug35388_workaround = B_FALSE; else - goto fail8; + goto fail1; /* * If the bug41750 workaround is enabled, then do not test interrupts, @@ -206,7 +129,7 @@ hunt_board_cfg( } else if ((rc == ENOTSUP) || (rc == ENOENT)) { encp->enc_bug41750_workaround = B_FALSE; } else { - goto fail9; + goto fail2; } if (EFX_PCI_FUNCTION_IS_VF(encp)) { /* Interrupt testing does not work for VFs. See bug50084. */ @@ -244,12 +167,12 @@ hunt_board_cfg( } else if ((rc == ENOTSUP) || (rc == ENOENT)) { encp->enc_bug26807_workaround = B_FALSE; } else { - goto fail10; + goto fail3; } /* Get clock frequencies (in MHz). */ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0) - goto fail11; + goto fail4; /* * The Huntington timer quantum is 1536 sysclk cycles, documented for @@ -266,80 +189,23 @@ hunt_board_cfg( encp->enc_bug61265_workaround = B_FALSE; /* Medford only */ - /* Check capabilities of running datapath firmware */ - if ((rc = ef10_get_datapath_caps(enp)) != 0) - goto fail12; - /* Alignment for receive packet DMA buffers */ encp->enc_rx_buf_align_start = 1; encp->enc_rx_buf_align_end = 64; /* RX DMA end padding */ - /* Alignment for WPTR updates */ - encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN; - - /* - * Maximum number of exclusive RSS contexts which can be allocated. The - * hardware supports 64, but 6 are reserved for shared contexts. They - * are a global resource so not all may be available. - */ - encp->enc_rx_scale_max_exclusive_contexts = 58; - - encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT); - /* No boundary crossing limits */ - encp->enc_tx_dma_desc_boundary = 0; - - /* - * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use - * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available - * resources (allocated to this PCIe function), which is zero until - * after we have allocated VIs. - */ - encp->enc_evq_limit = 1024; - encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET; - encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET; - /* * The workaround for bug35388 uses the top bit of transmit queue * descriptor writes, preventing the use of 4096 descriptor TXQs. */ encp->enc_txq_max_ndescs = encp->enc_bug35388_workaround ? 2048 : 4096; - encp->enc_buftbl_limit = 0xFFFFFFFF; - + EFX_STATIC_ASSERT(HUNT_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS); encp->enc_piobuf_limit = HUNT_PIOBUF_NBUFS; encp->enc_piobuf_size = HUNT_PIOBUF_SIZE; encp->enc_piobuf_min_alloc_size = HUNT_MIN_PIO_ALLOC_SIZE; - /* - * Get the current privilege mask. Note that this may be modified - * dynamically, so this value is informational only. DO NOT use - * the privilege mask to check for sufficient privileges, as that - * can result in time-of-check/time-of-use bugs. - */ - if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0) - goto fail13; - encp->enc_privilege_mask = mask; - - /* Get interrupt vector limits */ - if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) { - if (EFX_PCI_FUNCTION_IS_PF(encp)) - goto fail14; - - /* Ignore error (cannot query vector limits from a VF). */ - base = 0; - nvec = 1024; - } - encp->enc_intr_vec_base = base; - encp->enc_intr_limit = nvec; - - /* - * Maximum number of bytes into the frame the TCP header can start for - * firmware assisted TSO to work. - */ - encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT; - if ((rc = hunt_nic_get_required_pcie_bandwidth(enp, &bandwidth)) != 0) - goto fail15; + goto fail5; encp->enc_required_pcie_bandwidth_mbps = bandwidth; /* All Huntington devices have a PCIe Gen3, 8 lane connector */ @@ -347,26 +213,6 @@ hunt_board_cfg( return (0); -fail15: - EFSYS_PROBE(fail15); -fail14: - EFSYS_PROBE(fail14); -fail13: - EFSYS_PROBE(fail13); -fail12: - EFSYS_PROBE(fail12); -fail11: - EFSYS_PROBE(fail11); -fail10: - EFSYS_PROBE(fail10); -fail9: - EFSYS_PROBE(fail9); -fail8: - EFSYS_PROBE(fail8); -fail7: - EFSYS_PROBE(fail7); -fail6: - EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: diff --git a/drivers/net/sfc/base/mcdi_mon.c b/drivers/net/sfc/base/mcdi_mon.c index e4de0dab..940bd026 100644 --- a/drivers/net/sfc/base/mcdi_mon.c +++ b/drivers/net/sfc/base/mcdi_mon.c @@ -135,6 +135,10 @@ static const struct mcdi_sensor_map_s { STAT(Px, BOARD_BACK_TEMP), /* 0x50 BOARD_BACK_TEMP */ STAT(Px, I1V8), /* 0x51 IN_I1V8 */ STAT(Px, I2V5), /* 0x52 IN_I2V5 */ + STAT(Px, I3V3), /* 0x53 IN_I3V3 */ + STAT(Px, I12V0), /* 0x54 IN_I12V0 */ + STAT(Px, 1_3V), /* 0x55 IN_1V3 */ + STAT(Px, I1V3), /* 0x56 IN_I1V3 */ }; #define MCDI_STATIC_SENSOR_ASSERT(_field) \ @@ -476,6 +480,11 @@ mcdi_mon_cfg_build( case EFX_FAMILY_MEDFORD: encp->enc_mon_type = EFX_MON_SFC92X0; break; +#endif +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + encp->enc_mon_type = EFX_MON_SFC92X0; + break; #endif default: rc = EINVAL; diff --git a/drivers/net/sfc/base/medford2_impl.h b/drivers/net/sfc/base/medford2_impl.h new file mode 100644 index 00000000..6259a700 --- /dev/null +++ b/drivers/net/sfc/base/medford2_impl.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2015-2018 Solarflare Communications Inc. + * All rights reserved. + */ + +#ifndef _SYS_MEDFORD2_IMPL_H +#define _SYS_MEDFORD2_IMPL_H + +#ifdef __cplusplus +extern "C" { +#endif + + +#ifndef ER_EZ_TX_PIOBUF_SIZE +#define ER_EZ_TX_PIOBUF_SIZE 4096 +#endif + + +#define MEDFORD2_PIOBUF_NBUFS (16) +#define MEDFORD2_PIOBUF_SIZE (ER_EZ_TX_PIOBUF_SIZE) + +#define MEDFORD2_MIN_PIO_ALLOC_SIZE (MEDFORD2_PIOBUF_SIZE / 32) + + +extern __checkReturn efx_rc_t +medford2_board_cfg( + __in efx_nic_t *enp); + + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_MEDFORD2_IMPL_H */ diff --git a/drivers/net/sfc/base/medford2_nic.c b/drivers/net/sfc/base/medford2_nic.c new file mode 100644 index 00000000..7f5ad175 --- /dev/null +++ b/drivers/net/sfc/base/medford2_nic.c @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2015-2018 Solarflare Communications Inc. + * All rights reserved. + */ + +#include "efx.h" +#include "efx_impl.h" + + +#if EFSYS_OPT_MEDFORD2 + +static __checkReturn efx_rc_t +medford2_nic_get_required_pcie_bandwidth( + __in efx_nic_t *enp, + __out uint32_t *bandwidth_mbpsp) +{ + uint32_t port_modes; + uint32_t current_mode; + uint32_t bandwidth; + efx_rc_t rc; + + /* FIXME: support new Medford2 dynamic port modes */ + + if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, + ¤t_mode)) != 0) { + /* No port mode info available. */ + bandwidth = 0; + goto out; + } + + if ((rc = ef10_nic_get_port_mode_bandwidth(current_mode, + &bandwidth)) != 0) + goto fail1; + +out: + *bandwidth_mbpsp = bandwidth; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +medford2_board_cfg( + __in efx_nic_t *enp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + uint32_t sysclk, dpcpu_clk; + uint32_t end_padding; + uint32_t bandwidth; + efx_rc_t rc; + + /* + * Enable firmware workarounds for hardware errata. + * Expected responses are: + * - 0 (zero): + * Success: workaround enabled or disabled as requested. + * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP): + * Firmware does not support the MC_CMD_WORKAROUND request. + * (assume that the workaround is not supported). + * - MC_CMD_ERR_ENOENT (reported as ENOENT): + * Firmware does not support the requested workaround. + * - MC_CMD_ERR_EPERM (reported as EACCES): + * Unprivileged function cannot enable/disable workarounds. + * + * See efx_mcdi_request_errcode() for MCDI error translations. + */ + + + if (EFX_PCI_FUNCTION_IS_VF(encp)) { + /* + * Interrupt testing does not work for VFs on Medford2. + * See bug50084 and bug71432 comment 21. + */ + encp->enc_bug41750_workaround = B_TRUE; + } + + /* Chained multicast is always enabled on Medford2 */ + encp->enc_bug26807_workaround = B_TRUE; + + /* + * If the bug61265 workaround is enabled, then interrupt holdoff timers + * cannot be controlled by timer table writes, so MCDI must be used + * (timer table writes can still be used for wakeup timers). + */ + rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG61265, B_TRUE, + NULL); + if ((rc == 0) || (rc == EACCES)) + encp->enc_bug61265_workaround = B_TRUE; + else if ((rc == ENOTSUP) || (rc == ENOENT)) + encp->enc_bug61265_workaround = B_FALSE; + else + goto fail1; + + /* Get clock frequencies (in MHz). */ + if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0) + goto fail2; + + /* + * The Medford2 timer quantum is 1536 dpcpu_clk cycles, documented for + * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units. + */ + encp->enc_evq_timer_quantum_ns = 1536000UL / dpcpu_clk; /* 1536 cycles */ + encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns << + FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000; + + /* Alignment for receive packet DMA buffers */ + encp->enc_rx_buf_align_start = 1; + + /* Get the RX DMA end padding alignment configuration */ + if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) { + if (rc != EACCES) + goto fail3; + + /* Assume largest tail padding size supported by hardware */ + end_padding = 256; + } + encp->enc_rx_buf_align_end = end_padding; + + /* + * The maximum supported transmit queue size is 2048. TXQs with 4096 + * descriptors are not supported as the top bit is used for vfifo + * stuffing. + */ + encp->enc_txq_max_ndescs = 2048; + + EFX_STATIC_ASSERT(MEDFORD2_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS); + encp->enc_piobuf_limit = MEDFORD2_PIOBUF_NBUFS; + encp->enc_piobuf_size = MEDFORD2_PIOBUF_SIZE; + encp->enc_piobuf_min_alloc_size = MEDFORD2_MIN_PIO_ALLOC_SIZE; + + /* + * Medford2 stores a single global copy of VPD, not per-PF as on + * Huntington. + */ + encp->enc_vpd_is_global = B_TRUE; + + rc = medford2_nic_get_required_pcie_bandwidth(enp, &bandwidth); + if (rc != 0) + goto fail4; + encp->enc_required_pcie_bandwidth_mbps = bandwidth; + encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3; + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/medford_nic.c b/drivers/net/sfc/base/medford_nic.c index 1365e9e3..6dc895f5 100644 --- a/drivers/net/sfc/base/medford_nic.c +++ b/drivers/net/sfc/base/medford_nic.c @@ -10,64 +10,6 @@ #if EFSYS_OPT_MEDFORD -static __checkReturn efx_rc_t -efx_mcdi_get_rxdp_config( - __in efx_nic_t *enp, - __out uint32_t *end_paddingp) -{ - efx_mcdi_req_t req; - uint8_t payload[MAX(MC_CMD_GET_RXDP_CONFIG_IN_LEN, - MC_CMD_GET_RXDP_CONFIG_OUT_LEN)]; - uint32_t end_padding; - efx_rc_t rc; - - memset(payload, 0, sizeof (payload)); - req.emr_cmd = MC_CMD_GET_RXDP_CONFIG; - req.emr_in_buf = payload; - req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN; - req.emr_out_buf = payload; - req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN; - - efx_mcdi_execute(enp, &req); - if (req.emr_rc != 0) { - rc = req.emr_rc; - goto fail1; - } - - if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA, - GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) { - /* RX DMA end padding is disabled */ - end_padding = 0; - } else { - switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA, - GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) { - case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64: - end_padding = 64; - break; - case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128: - end_padding = 128; - break; - case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256: - end_padding = 256; - break; - default: - rc = ENOTSUP; - goto fail2; - } - } - - *end_paddingp = end_padding; - - return (0); - -fail2: - EFSYS_PROBE(fail2); -fail1: - EFSYS_PROBE1(fail1, efx_rc_t, rc); - - return (rc); -} - static __checkReturn efx_rc_t medford_nic_get_required_pcie_bandwidth( __in efx_nic_t *enp, @@ -104,103 +46,12 @@ fail1: medford_board_cfg( __in efx_nic_t *enp) { - efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_nic_cfg_t *encp = &(enp->en_nic_cfg); - uint8_t mac_addr[6] = { 0 }; - uint32_t board_type = 0; - ef10_link_state_t els; - efx_port_t *epp = &(enp->en_port); - uint32_t port; - uint32_t pf; - uint32_t vf; - uint32_t mask; uint32_t sysclk, dpcpu_clk; - uint32_t base, nvec; uint32_t end_padding; uint32_t bandwidth; efx_rc_t rc; - /* - * FIXME: Likely to be incomplete and incorrect. - * Parts of this should be shared with Huntington. - */ - - if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0) - goto fail1; - - /* - * NOTE: The MCDI protocol numbers ports from zero. - * The common code MCDI interface numbers ports from one. - */ - emip->emi_port = port + 1; - - if ((rc = ef10_external_port_mapping(enp, port, - &encp->enc_external_port)) != 0) - goto fail2; - - /* - * Get PCIe function number from firmware (used for - * per-function privilege and dynamic config info). - * - PCIe PF: pf = PF number, vf = 0xffff. - * - PCIe VF: pf = parent PF, vf = VF number. - */ - if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0) - goto fail3; - - encp->enc_pf = pf; - encp->enc_vf = vf; - - /* MAC address for this function */ - if (EFX_PCI_FUNCTION_IS_PF(encp)) { - rc = efx_mcdi_get_mac_address_pf(enp, mac_addr); -#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC - /* - * Disable static config checking for Medford NICs, ONLY - * for manufacturing test and setup at the factory, to - * allow the static config to be installed. - */ -#else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ - if ((rc == 0) && (mac_addr[0] & 0x02)) { - /* - * If the static config does not include a global MAC - * address pool then the board may return a locally - * administered MAC address (this should only happen on - * incorrectly programmed boards). - */ - rc = EINVAL; - } -#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ - } else { - rc = efx_mcdi_get_mac_address_vf(enp, mac_addr); - } - if (rc != 0) - goto fail4; - - EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr); - - /* Board configuration */ - rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL); - if (rc != 0) { - /* Unprivileged functions may not be able to read board cfg */ - if (rc == EACCES) - board_type = 0; - else - goto fail5; - } - - encp->enc_board_type = board_type; - encp->enc_clk_mult = 1; /* not used for Medford */ - - /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */ - if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0) - goto fail6; - - /* Obtain the default PHY advertised capabilities */ - if ((rc = ef10_phy_get_link(enp, &els)) != 0) - goto fail7; - epp->ep_default_adv_cap_mask = els.els_adv_cap_mask; - epp->ep_adv_cap_mask = els.els_adv_cap_mask; - /* * Enable firmware workarounds for hardware errata. * Expected responses are: @@ -220,8 +71,8 @@ medford_board_cfg( if (EFX_PCI_FUNCTION_IS_VF(encp)) { /* - * Interrupt testing does not work for VFs. See bug50084. - * FIXME: Does this still apply to Medford? + * Interrupt testing does not work for VFs. See bug50084 and + * bug71432 comment 21. */ encp->enc_bug41750_workaround = B_TRUE; } @@ -241,11 +92,11 @@ medford_board_cfg( else if ((rc == ENOTSUP) || (rc == ENOENT)) encp->enc_bug61265_workaround = B_FALSE; else - goto fail8; + goto fail1; /* Get clock frequencies (in MHz). */ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0) - goto fail9; + goto fail2; /* * The Medford timer quantum is 1536 dpcpu_clk cycles, documented for @@ -255,47 +106,19 @@ medford_board_cfg( encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns << FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000; - /* Check capabilities of running datapath firmware */ - if ((rc = ef10_get_datapath_caps(enp)) != 0) - goto fail10; - /* Alignment for receive packet DMA buffers */ encp->enc_rx_buf_align_start = 1; /* Get the RX DMA end padding alignment configuration */ if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) { if (rc != EACCES) - goto fail11; + goto fail3; /* Assume largest tail padding size supported by hardware */ end_padding = 256; } encp->enc_rx_buf_align_end = end_padding; - /* Alignment for WPTR updates */ - encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN; - - /* - * Maximum number of exclusive RSS contexts which can be allocated. The - * hardware supports 64, but 6 are reserved for shared contexts. They - * are a global resource so not all may be available. - */ - encp->enc_rx_scale_max_exclusive_contexts = 58; - - encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT); - /* No boundary crossing limits */ - encp->enc_tx_dma_desc_boundary = 0; - - /* - * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use - * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available - * resources (allocated to this PCIe function), which is zero until - * after we have allocated VIs. - */ - encp->enc_evq_limit = 1024; - encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET; - encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET; - /* * The maximum supported transmit queue size is 2048. TXQs with 4096 * descriptors are not supported as the top bit is used for vfifo @@ -303,40 +126,11 @@ medford_board_cfg( */ encp->enc_txq_max_ndescs = 2048; - encp->enc_buftbl_limit = 0xFFFFFFFF; - + EFX_STATIC_ASSERT(MEDFORD_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS); encp->enc_piobuf_limit = MEDFORD_PIOBUF_NBUFS; encp->enc_piobuf_size = MEDFORD_PIOBUF_SIZE; encp->enc_piobuf_min_alloc_size = MEDFORD_MIN_PIO_ALLOC_SIZE; - /* - * Get the current privilege mask. Note that this may be modified - * dynamically, so this value is informational only. DO NOT use - * the privilege mask to check for sufficient privileges, as that - * can result in time-of-check/time-of-use bugs. - */ - if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0) - goto fail12; - encp->enc_privilege_mask = mask; - - /* Get interrupt vector limits */ - if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) { - if (EFX_PCI_FUNCTION_IS_PF(encp)) - goto fail13; - - /* Ignore error (cannot query vector limits from a VF). */ - base = 0; - nvec = 1024; - } - encp->enc_intr_vec_base = base; - encp->enc_intr_limit = nvec; - - /* - * Maximum number of bytes into the frame the TCP header can start for - * firmware assisted TSO to work. - */ - encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT; - /* * Medford stores a single global copy of VPD, not per-PF as on * Huntington. @@ -345,32 +139,12 @@ medford_board_cfg( rc = medford_nic_get_required_pcie_bandwidth(enp, &bandwidth); if (rc != 0) - goto fail14; + goto fail4; encp->enc_required_pcie_bandwidth_mbps = bandwidth; encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3; return (0); -fail14: - EFSYS_PROBE(fail14); -fail13: - EFSYS_PROBE(fail13); -fail12: - EFSYS_PROBE(fail12); -fail11: - EFSYS_PROBE(fail11); -fail10: - EFSYS_PROBE(fail10); -fail9: - EFSYS_PROBE(fail9); -fail8: - EFSYS_PROBE(fail8); -fail7: - EFSYS_PROBE(fail7); -fail6: - EFSYS_PROBE(fail6); -fail5: - EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: diff --git a/drivers/net/sfc/base/meson.build b/drivers/net/sfc/base/meson.build index f1e49735..da2bf44d 100644 --- a/drivers/net/sfc/base/meson.build +++ b/drivers/net/sfc/base/meson.build @@ -34,6 +34,7 @@ sources = [ 'siena_vpd.c', 'ef10_ev.c', 'ef10_filter.c', + 'ef10_image.c', 'ef10_intr.c', 'ef10_mac.c', 'ef10_mcdi.c', @@ -44,7 +45,8 @@ sources = [ 'ef10_tx.c', 'ef10_vpd.c', 'hunt_nic.c', - 'medford_nic.c' + 'medford_nic.c', + 'medford2_nic.c' ] extra_flags = [ diff --git a/drivers/net/sfc/base/siena_flash.h b/drivers/net/sfc/base/siena_flash.h index 91a9fe05..74bb9496 100644 --- a/drivers/net/sfc/base/siena_flash.h +++ b/drivers/net/sfc/base/siena_flash.h @@ -103,7 +103,14 @@ typedef struct siena_mc_boot_hdr_s { /* GENERATED BY scripts/genfwdef */ /* the key, or 0xffff if unsigned. (Otherwise set to 0) */ efx_byte_t mumfw_subtype; /* MUM & SUC images: subtype. (Otherwise set to 0) */ efx_byte_t reserved_b[3]; /* (set to 0) */ - efx_dword_t reserved_c[6]; /* (set to 0) */ + efx_dword_t security_level; /* This number increases every time a serious security flaw */ + /* is fixed. A secure NIC may not downgrade to any image */ + /* with a lower security level than the current image. */ + /* Note: The number in this header should only be used for */ + /* determining the level of new images, not to determine */ + /* the level of the current image as this header is not */ + /* protected by a CMAC. */ + efx_dword_t reserved_c[5]; /* (set to 0) */ } siena_mc_boot_hdr_t; #define SIENA_MC_BOOT_HDR_PADDING \ diff --git a/drivers/net/sfc/base/siena_mac.c b/drivers/net/sfc/base/siena_mac.c index 904e03ed..f8857cdd 100644 --- a/drivers/net/sfc/base/siena_mac.c +++ b/drivers/net/sfc/base/siena_mac.c @@ -245,16 +245,28 @@ siena_mac_stats_update( __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, __inout_opt uint32_t *generationp) { - efx_qword_t value; + const efx_nic_cfg_t *encp = &enp->en_nic_cfg; efx_qword_t generation_start; efx_qword_t generation_end; + efx_qword_t value; + efx_rc_t rc; - _NOTE(ARGUNUSED(enp)) + if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) { + /* MAC stats count too small */ + rc = ENOSPC; + goto fail1; + } + if (EFSYS_MEM_SIZE(esmp) < + (encp->enc_mac_stats_nstats * sizeof (efx_qword_t))) { + /* DMA buffer too small */ + rc = ENOSPC; + goto fail2; + } /* Read END first so we don't race with the MC */ - EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE); - SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END, - &generation_end); + EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp)); + SIENA_MAC_STAT_READ(esmp, (encp->enc_mac_stats_nstats - 1), + &generation_end); EFSYS_MEM_READ_BARRIER(); /* TX */ @@ -422,7 +434,7 @@ siena_mac_stats_update( SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value); - EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE); + EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp)); EFSYS_MEM_READ_BARRIER(); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START, &generation_start); @@ -437,6 +449,13 @@ siena_mac_stats_update( *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0); return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); } #endif /* EFSYS_OPT_MAC_STATS */ diff --git a/drivers/net/sfc/base/siena_mcdi.c b/drivers/net/sfc/base/siena_mcdi.c index ef844591..d727c187 100644 --- a/drivers/net/sfc/base/siena_mcdi.c +++ b/drivers/net/sfc/base/siena_mcdi.c @@ -124,17 +124,21 @@ siena_mcdi_read_response( { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); unsigned int pdur; - unsigned int pos; + unsigned int pos = 0; efx_dword_t data; + size_t remaining = length; EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2); pdur = SIENA_MCDI_PDU(emip); - for (pos = 0; pos < length; pos += sizeof (efx_dword_t)) { + while (remaining > 0) { + size_t chunk = MIN(remaining, sizeof (data)); + EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, pdur + ((offset + pos) >> 2), &data, B_FALSE); - memcpy((uint8_t *)bufferp + pos, &data, - MIN(sizeof (data), length - pos)); + memcpy((uint8_t *)bufferp + pos, &data, chunk); + pos += chunk; + remaining -= chunk; } } diff --git a/drivers/net/sfc/base/siena_nic.c b/drivers/net/sfc/base/siena_nic.c index f223c9be..31eef80b 100644 --- a/drivers/net/sfc/base/siena_nic.c +++ b/drivers/net/sfc/base/siena_nic.c @@ -66,6 +66,10 @@ siena_board_cfg( uint32_t nevq, nrxq, ntxq; efx_rc_t rc; + /* Siena has a fixed 8Kbyte VI window size */ + EFX_STATIC_ASSERT(1U << EFX_VI_WINDOW_SHIFT_8K == 8192); + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K; + /* External port identifier using one-based port numbering */ encp->enc_external_port = (uint8_t)enp->en_mcdi.em_emip.emi_port; @@ -114,6 +118,18 @@ siena_board_cfg( /* There is one RSS context per function */ encp->enc_rx_scale_max_exclusive_contexts = 1; + encp->enc_rx_scale_hash_alg_mask |= (1U << EFX_RX_HASHALG_LFSR); + encp->enc_rx_scale_hash_alg_mask |= (1U << EFX_RX_HASHALG_TOEPLITZ); + + /* + * It is always possible to use port numbers + * as the input data for hash computation. + */ + encp->enc_rx_scale_l4_hash_supported = B_TRUE; + + /* There is no support for additional RSS modes */ + encp->enc_rx_scale_additional_modes_supported = B_FALSE; + encp->enc_tx_dma_desc_size_max = EFX_MASK32(FSF_AZ_TX_KER_BYTE_COUNT); /* Fragments must not span 4k boundaries. */ encp->enc_tx_dma_desc_boundary = 4096; @@ -145,6 +161,8 @@ siena_board_cfg( encp->enc_allow_set_mac_with_installed_filters = B_TRUE; encp->enc_rx_packed_stream_supported = B_FALSE; encp->enc_rx_var_packed_stream_supported = B_FALSE; + encp->enc_rx_es_super_buffer_supported = B_FALSE; + encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE; /* Siena supports two 10G ports, and 8 lanes of PCIe Gen2 */ encp->enc_required_pcie_bandwidth_mbps = 2 * 10000; @@ -152,6 +170,12 @@ siena_board_cfg( encp->enc_nvram_update_verify_result_supported = B_FALSE; + encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS; + + encp->enc_filter_action_flag_supported = B_FALSE; + encp->enc_filter_action_mark_supported = B_FALSE; + encp->enc_filter_action_mark_max = 0; + return (0); fail2: diff --git a/drivers/net/sfc/base/siena_nvram.c b/drivers/net/sfc/base/siena_nvram.c index e72bba0b..8cdd2df7 100644 --- a/drivers/net/sfc/base/siena_nvram.c +++ b/drivers/net/sfc/base/siena_nvram.c @@ -304,15 +304,20 @@ siena_nvram_get_dynamic_cfg( if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0) goto fail1; + if (size < SIENA_NVRAM_CHUNK) { + rc = EINVAL; + goto fail2; + } + EFSYS_KMEM_ALLOC(enp->en_esip, size, dcfg); if (dcfg == NULL) { rc = ENOMEM; - goto fail2; + goto fail3; } if ((rc = siena_nvram_partn_read(enp, partn, 0, (caddr_t)dcfg, SIENA_NVRAM_CHUNK)) != 0) - goto fail3; + goto fail4; /* Verify the magic */ if (EFX_DWORD_FIELD(dcfg->magic, EFX_DWORD_0) @@ -347,7 +352,7 @@ siena_nvram_get_dynamic_cfg( if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK, (caddr_t)dcfg + SIENA_NVRAM_CHUNK, region - SIENA_NVRAM_CHUNK)) != 0) - goto fail4; + goto fail5; } /* Verify checksum */ @@ -389,13 +394,15 @@ done: return (0); +fail5: + EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); -fail3: - EFSYS_PROBE(fail3); EFSYS_KMEM_FREE(enp->en_esip, size, dcfg); +fail3: + EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: diff --git a/drivers/net/sfc/base/siena_phy.c b/drivers/net/sfc/base/siena_phy.c index d638646b..4b2190d3 100644 --- a/drivers/net/sfc/base/siena_phy.c +++ b/drivers/net/sfc/base/siena_phy.c @@ -534,6 +534,11 @@ siena_phy_stats_update( MC_CMD_PHY_STATS_OUT_DMA_LEN)]; efx_rc_t rc; + if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_PHY_STATS_SIZE)) { + rc = EINVAL; + goto fail1; + } + (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_PHY_STATS; req.emr_in_buf = payload; @@ -550,7 +555,7 @@ siena_phy_stats_update( if (req.emr_rc != 0) { rc = req.emr_rc; - goto fail1; + goto fail2; } EFSYS_ASSERT3U(req.emr_out_length, ==, MC_CMD_PHY_STATS_OUT_DMA_LEN); @@ -559,6 +564,8 @@ siena_phy_stats_update( return (0); +fail2: + EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); diff --git a/drivers/net/sfc/base/siena_vpd.c b/drivers/net/sfc/base/siena_vpd.c index f188eb58..ebb12abf 100644 --- a/drivers/net/sfc/base/siena_vpd.c +++ b/drivers/net/sfc/base/siena_vpd.c @@ -36,21 +36,26 @@ siena_vpd_get_static( if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0) goto fail1; + if (size < SIENA_NVRAM_CHUNK) { + rc = EINVAL; + goto fail2; + } + EFSYS_KMEM_ALLOC(enp->en_esip, size, scfg); if (scfg == NULL) { rc = ENOMEM; - goto fail2; + goto fail3; } if ((rc = siena_nvram_partn_read(enp, partn, 0, (caddr_t)scfg, SIENA_NVRAM_CHUNK)) != 0) - goto fail3; + goto fail4; /* Verify the magic number */ if (EFX_DWORD_FIELD(scfg->magic, EFX_DWORD_0) != SIENA_MC_STATIC_CONFIG_MAGIC) { rc = EINVAL; - goto fail4; + goto fail5; } /* All future versions of the structure must be backwards compatible */ @@ -64,7 +69,7 @@ siena_vpd_get_static( if (hdr_length > size || vpd_offset > size || vpd_length > size || vpd_length + vpd_offset > size) { rc = EINVAL; - goto fail5; + goto fail6; } /* Read the remainder of scfg + static vpd */ @@ -73,7 +78,7 @@ siena_vpd_get_static( if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK, (caddr_t)scfg + SIENA_NVRAM_CHUNK, region - SIENA_NVRAM_CHUNK)) != 0) - goto fail6; + goto fail7; } /* Verify checksum */ @@ -82,7 +87,7 @@ siena_vpd_get_static( cksum += ((uint8_t *)scfg)[pos]; if (cksum != 0) { rc = EINVAL; - goto fail7; + goto fail8; } if (vpd_length == 0) @@ -92,7 +97,7 @@ siena_vpd_get_static( EFSYS_KMEM_ALLOC(enp->en_esip, vpd_length, svpd); if (svpd == NULL) { rc = ENOMEM; - goto fail8; + goto fail9; } memcpy(svpd, (caddr_t)scfg + vpd_offset, vpd_length); } @@ -104,6 +109,8 @@ siena_vpd_get_static( return (0); +fail9: + EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: @@ -114,11 +121,11 @@ fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); -fail3: - EFSYS_PROBE(fail3); EFSYS_KMEM_FREE(enp->en_esip, size, scfg); +fail3: + EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: diff --git a/drivers/net/sfc/efsys.h b/drivers/net/sfc/efsys.h index c7a54c3b..b9d2df58 100644 --- a/drivers/net/sfc/efsys.h +++ b/drivers/net/sfc/efsys.h @@ -26,6 +26,7 @@ #include #include "sfc_debug.h" +#include "sfc_log.h" #ifdef __cplusplus extern "C" { @@ -119,6 +120,8 @@ prefetch_read_once(const volatile void *addr) #define __out_ecount_opt(_n) #define __out_bcount(_n) #define __out_bcount_opt(_n) +#define __out_bcount_part(_n, _l) +#define __out_bcount_part_opt(_n, _l) #define __deref_out @@ -148,6 +151,8 @@ prefetch_read_once(const volatile void *addr) #define EFSYS_OPT_HUNTINGTON 1 /* Enable SFN8xxx support */ #define EFSYS_OPT_MEDFORD 1 +/* Enable SFN2xxx support */ +#define EFSYS_OPT_MEDFORD2 1 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG #define EFSYS_OPT_CHECK_REG 1 #else @@ -161,7 +166,7 @@ prefetch_read_once(const volatile void *addr) #define EFSYS_OPT_MAC_STATS 1 -#define EFSYS_OPT_LOOPBACK 0 +#define EFSYS_OPT_LOOPBACK 1 #define EFSYS_OPT_MON_MCDI 0 #define EFSYS_OPT_MON_STATS 0 @@ -174,6 +179,7 @@ prefetch_read_once(const volatile void *addr) #define EFSYS_OPT_VPD 0 #define EFSYS_OPT_NVRAM 0 #define EFSYS_OPT_BOOTCFG 0 +#define EFSYS_OPT_IMAGE_LAYOUT 0 #define EFSYS_OPT_DIAG 0 #define EFSYS_OPT_RX_SCALE 1 @@ -192,8 +198,12 @@ prefetch_read_once(const volatile void *addr) #define EFSYS_OPT_RX_PACKED_STREAM 0 +#define EFSYS_OPT_RX_ES_SUPER_BUFFER 1 + #define EFSYS_OPT_TUNNEL 1 +#define EFSYS_OPT_FW_SUBVARIANT_AWARE 1 + /* ID */ typedef struct __efsys_identifier_s efsys_identifier_t; @@ -370,6 +380,9 @@ typedef struct efsys_mem_s { } while (B_FALSE) +#define EFSYS_MEM_SIZE(_esmp) \ + ((_esmp)->esm_mz->len) + #define EFSYS_MEM_ADDR(_esmp) \ ((_esmp)->esm_addr) @@ -721,7 +734,7 @@ typedef uint64_t efsys_stat_t; #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \ do { \ (void)(_esip); \ - RTE_LOG(ERR, PMD, "FATAL ERROR #%u (0x%08x%08x)\n", \ + SFC_GENERIC_LOG(ERR, "FATAL ERROR #%u (0x%08x%08x)", \ (_code), (_dword0), (_dword1)); \ _NOTE(CONSTANTCONDITION); \ } while (B_FALSE) diff --git a/drivers/net/sfc/meson.build b/drivers/net/sfc/meson.build index b60a8f58..2d34e869 100644 --- a/drivers/net/sfc/meson.build +++ b/drivers/net/sfc/meson.build @@ -6,7 +6,7 @@ # This software was jointly developed between OKTET Labs (under contract # for Solarflare) and Solarflare Communications, Inc. -if arch_subdir != 'x86' +if arch_subdir != 'x86' or cc.sizeof('void *') == 4 build = false endif @@ -30,10 +30,6 @@ extra_flags += [ '-Wbad-function-cast' ] -# Suppress ICC false positive warning on 'bulk' may be used before its -# value is set -extra_flags += '-wd3656' - foreach flag: extra_flags if cc.has_argument(flag) cflags += flag @@ -58,6 +54,7 @@ sources = files( 'sfc_flow.c', 'sfc_dp.c', 'sfc_ef10_rx.c', + 'sfc_ef10_essb_rx.c', 'sfc_ef10_tx.c' ) diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c index ac5fdcaa..6690053f 100644 --- a/drivers/net/sfc/sfc.c +++ b/drivers/net/sfc/sfc.c @@ -20,6 +20,8 @@ #include "sfc_ev.h" #include "sfc_rx.h" #include "sfc_tx.h" +#include "sfc_kvargs.h" +#include "sfc_tweak.h" int @@ -81,14 +83,23 @@ sfc_phy_cap_from_link_speeds(uint32_t speeds) phy_caps |= (1 << EFX_PHY_CAP_1000FDX) | (1 << EFX_PHY_CAP_10000FDX) | - (1 << EFX_PHY_CAP_40000FDX); + (1 << EFX_PHY_CAP_25000FDX) | + (1 << EFX_PHY_CAP_40000FDX) | + (1 << EFX_PHY_CAP_50000FDX) | + (1 << EFX_PHY_CAP_100000FDX); } if (speeds & ETH_LINK_SPEED_1G) phy_caps |= (1 << EFX_PHY_CAP_1000FDX); if (speeds & ETH_LINK_SPEED_10G) phy_caps |= (1 << EFX_PHY_CAP_10000FDX); + if (speeds & ETH_LINK_SPEED_25G) + phy_caps |= (1 << EFX_PHY_CAP_25000FDX); if (speeds & ETH_LINK_SPEED_40G) phy_caps |= (1 << EFX_PHY_CAP_40000FDX); + if (speeds & ETH_LINK_SPEED_50G) + phy_caps |= (1 << EFX_PHY_CAP_50000FDX); + if (speeds & ETH_LINK_SPEED_100G) + phy_caps |= (1 << EFX_PHY_CAP_100000FDX); return phy_caps; } @@ -113,10 +124,12 @@ sfc_check_conf(struct sfc_adapter *sa) rc = EINVAL; } +#if !EFSYS_OPT_LOOPBACK if (conf->lpbk_mode != 0) { sfc_err(sa, "Loopback not supported"); rc = EINVAL; } +#endif if (conf->dcb_capability_en != 0) { sfc_err(sa, "Priority-based flow control not supported"); @@ -249,6 +262,58 @@ sfc_set_drv_limits(struct sfc_adapter *sa) return efx_nic_set_drv_limits(sa->nic, &lim); } +static int +sfc_set_fw_subvariant(struct sfc_adapter *sa) +{ + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads; + unsigned int txq_index; + efx_nic_fw_subvariant_t req_fw_subvariant; + efx_nic_fw_subvariant_t cur_fw_subvariant; + int rc; + + if (!encp->enc_fw_subvariant_no_tx_csum_supported) { + sfc_info(sa, "no-Tx-checksum subvariant not supported"); + return 0; + } + + for (txq_index = 0; txq_index < sa->txq_count; ++txq_index) { + struct sfc_txq_info *txq_info = &sa->txq_info[txq_index]; + + if (txq_info->txq != NULL) + tx_offloads |= txq_info->txq->offloads; + } + + if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) + req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT; + else + req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM; + + rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant); + if (rc != 0) { + sfc_err(sa, "failed to get FW subvariant: %d", rc); + return rc; + } + sfc_info(sa, "FW subvariant is %u vs required %u", + cur_fw_subvariant, req_fw_subvariant); + + if (cur_fw_subvariant == req_fw_subvariant) + return 0; + + rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant); + if (rc != 0) { + sfc_err(sa, "failed to set FW subvariant %u: %d", + req_fw_subvariant, rc); + return rc; + } + sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant); + + return 0; +} + static int sfc_try_start(struct sfc_adapter *sa) { @@ -260,6 +325,11 @@ sfc_try_start(struct sfc_adapter *sa) SFC_ASSERT(sfc_adapter_is_locked(sa)); SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING); + sfc_log_init(sa, "set FW subvariant"); + rc = sfc_set_fw_subvariant(sa); + if (rc != 0) + goto fail_set_fw_subvariant; + sfc_log_init(sa, "set resource limits"); rc = sfc_set_drv_limits(sa); if (rc != 0) @@ -326,6 +396,7 @@ fail_tunnel_reconfigure: fail_nic_init: fail_set_drv_limits: +fail_set_fw_subvariant: sfc_log_init(sa, "failed %d", rc); return rc; } @@ -344,7 +415,7 @@ sfc_start(struct sfc_adapter *sa) case SFC_ADAPTER_CONFIGURED: break; case SFC_ADAPTER_STARTED: - sfc_info(sa, "already started"); + sfc_notice(sa, "already started"); return 0; default: rc = EINVAL; @@ -383,7 +454,7 @@ sfc_stop(struct sfc_adapter *sa) case SFC_ADAPTER_STARTED: break; case SFC_ADAPTER_CONFIGURED: - sfc_info(sa, "already stopped"); + sfc_notice(sa, "already stopped"); return; default: sfc_err(sa, "stop in unexpected state %u", sa->state); @@ -454,7 +525,7 @@ sfc_schedule_restart(struct sfc_adapter *sa) else if (rc != 0) sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc); else - sfc_info(sa, "restart scheduled"); + sfc_notice(sa, "restart scheduled"); } int @@ -530,27 +601,18 @@ sfc_close(struct sfc_adapter *sa) } static int -sfc_mem_bar_init(struct sfc_adapter *sa) +sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar) { struct rte_eth_dev *eth_dev = sa->eth_dev; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); efsys_bar_t *ebp = &sa->mem_bar; - unsigned int i; - struct rte_mem_resource *res; - - for (i = 0; i < RTE_DIM(pci_dev->mem_resource); i++) { - res = &pci_dev->mem_resource[i]; - if ((res->len != 0) && (res->phys_addr != 0)) { - /* Found first memory BAR */ - SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name); - ebp->esb_rid = i; - ebp->esb_dev = pci_dev; - ebp->esb_base = res->addr; - return 0; - } - } + struct rte_mem_resource *res = &pci_dev->mem_resource[membar]; - return EFAULT; + SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name); + ebp->esb_rid = membar; + ebp->esb_dev = pci_dev; + ebp->esb_base = res->addr; + return 0; } static void @@ -562,7 +624,6 @@ sfc_mem_bar_fini(struct sfc_adapter *sa) memset(ebp, 0, sizeof(*ebp)); } -#if EFSYS_OPT_RX_SCALE /* * A fixed RSS key which has a property of being symmetric * (symmetrical flows are distributed to the same CPU) @@ -576,12 +637,11 @@ static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = { 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, }; -#endif -#if EFSYS_OPT_RX_SCALE static int -sfc_set_rss_defaults(struct sfc_adapter *sa) +sfc_rss_attach(struct sfc_adapter *sa) { + struct sfc_rss *rss = &sa->rss; int rc; rc = efx_intr_init(sa->nic, sa->intr.type, NULL); @@ -596,26 +656,31 @@ sfc_set_rss_defaults(struct sfc_adapter *sa) if (rc != 0) goto fail_rx_init; - rc = efx_rx_scale_default_support_get(sa->nic, &sa->rss_support); + rc = efx_rx_scale_default_support_get(sa->nic, &rss->context_type); if (rc != 0) goto fail_scale_support_get; - rc = efx_rx_hash_default_support_get(sa->nic, &sa->hash_support); + rc = efx_rx_hash_default_support_get(sa->nic, &rss->hash_support); if (rc != 0) goto fail_hash_support_get; + rc = sfc_rx_hash_init(sa); + if (rc != 0) + goto fail_rx_hash_init; + efx_rx_fini(sa->nic); efx_ev_fini(sa->nic); efx_intr_fini(sa->nic); - sa->rss_hash_types = sfc_rte_to_efx_hash_type(SFC_RSS_OFFLOADS); - - rte_memcpy(sa->rss_key, default_rss_key, sizeof(sa->rss_key)); + rte_memcpy(rss->key, default_rss_key, sizeof(rss->key)); return 0; +fail_rx_hash_init: fail_hash_support_get: fail_scale_support_get: + efx_rx_fini(sa->nic); + fail_rx_init: efx_ev_fini(sa->nic); @@ -625,13 +690,12 @@ fail_ev_init: fail_intr_init: return rc; } -#else -static int -sfc_set_rss_defaults(__rte_unused struct sfc_adapter *sa) + +static void +sfc_rss_detach(struct sfc_adapter *sa) { - return 0; + sfc_rx_hash_fini(sa); } -#endif int sfc_attach(struct sfc_adapter *sa) @@ -690,9 +754,9 @@ sfc_attach(struct sfc_adapter *sa) if (rc != 0) goto fail_port_attach; - rc = sfc_set_rss_defaults(sa); + rc = sfc_rss_attach(sa); if (rc != 0) - goto fail_set_rss_defaults; + goto fail_rss_attach; rc = sfc_filter_attach(sa); if (rc != 0) @@ -709,7 +773,9 @@ sfc_attach(struct sfc_adapter *sa) return 0; fail_filter_attach: -fail_set_rss_defaults: + sfc_rss_detach(sa); + +fail_rss_attach: sfc_port_detach(sa); fail_port_attach: @@ -741,6 +807,7 @@ sfc_detach(struct sfc_adapter *sa) sfc_flow_fini(sa); sfc_filter_detach(sa); + sfc_rss_detach(sa); sfc_port_detach(sa); sfc_ev_detach(sa); sfc_intr_detach(sa); @@ -749,10 +816,169 @@ sfc_detach(struct sfc_adapter *sa) sa->state = SFC_ADAPTER_UNINITIALIZED; } +static int +sfc_kvarg_fv_variant_handler(__rte_unused const char *key, + const char *value_str, void *opaque) +{ + uint32_t *value = opaque; + + if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0) + *value = EFX_FW_VARIANT_DONT_CARE; + else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0) + *value = EFX_FW_VARIANT_FULL_FEATURED; + else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0) + *value = EFX_FW_VARIANT_LOW_LATENCY; + else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0) + *value = EFX_FW_VARIANT_PACKED_STREAM; + else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DPDK) == 0) + *value = EFX_FW_VARIANT_DPDK; + else + return -EINVAL; + + return 0; +} + +static int +sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv) +{ + efx_nic_fw_info_t enfi; + int rc; + + rc = efx_nic_get_fw_version(sa->nic, &enfi); + if (rc != 0) + return rc; + else if (!enfi.enfi_dpcpu_fw_ids_valid) + return ENOTSUP; + + /* + * Firmware variant can be uniquely identified by the RxDPCPU + * firmware id + */ + switch (enfi.enfi_rx_dpcpu_fw_id) { + case EFX_RXDP_FULL_FEATURED_FW_ID: + *efv = EFX_FW_VARIANT_FULL_FEATURED; + break; + + case EFX_RXDP_LOW_LATENCY_FW_ID: + *efv = EFX_FW_VARIANT_LOW_LATENCY; + break; + + case EFX_RXDP_PACKED_STREAM_FW_ID: + *efv = EFX_FW_VARIANT_PACKED_STREAM; + break; + + case EFX_RXDP_DPDK_FW_ID: + *efv = EFX_FW_VARIANT_DPDK; + break; + + default: + /* + * Other firmware variants are not considered, since they are + * not supported in the device parameters + */ + *efv = EFX_FW_VARIANT_DONT_CARE; + break; + } + + return 0; +} + +static const char * +sfc_fw_variant2str(efx_fw_variant_t efv) +{ + switch (efv) { + case EFX_RXDP_FULL_FEATURED_FW_ID: + return SFC_KVARG_FW_VARIANT_FULL_FEATURED; + case EFX_RXDP_LOW_LATENCY_FW_ID: + return SFC_KVARG_FW_VARIANT_LOW_LATENCY; + case EFX_RXDP_PACKED_STREAM_FW_ID: + return SFC_KVARG_FW_VARIANT_PACKED_STREAM; + case EFX_RXDP_DPDK_FW_ID: + return SFC_KVARG_FW_VARIANT_DPDK; + default: + return "unknown"; + } +} + +static int +sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter *sa) +{ + int rc; + long value; + + value = SFC_RXD_WAIT_TIMEOUT_NS_DEF; + + rc = sfc_kvargs_process(sa, SFC_KVARG_RXD_WAIT_TIMEOUT_NS, + sfc_kvarg_long_handler, &value); + if (rc != 0) + return rc; + + if (value < 0 || + (unsigned long)value > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) { + sfc_err(sa, "wrong '" SFC_KVARG_RXD_WAIT_TIMEOUT_NS "' " + "was set (%ld);", value); + sfc_err(sa, "it must not be less than 0 or greater than %u", + EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX); + return EINVAL; + } + + sa->rxd_wait_timeout_ns = value; + return 0; +} + +static int +sfc_nic_probe(struct sfc_adapter *sa) +{ + efx_nic_t *enp = sa->nic; + efx_fw_variant_t preferred_efv; + efx_fw_variant_t efv; + int rc; + + preferred_efv = EFX_FW_VARIANT_DONT_CARE; + rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT, + sfc_kvarg_fv_variant_handler, + &preferred_efv); + if (rc != 0) { + sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT); + return rc; + } + + rc = sfc_kvarg_rxd_wait_timeout_ns(sa); + if (rc != 0) + return rc; + + rc = efx_nic_probe(enp, preferred_efv); + if (rc == EACCES) { + /* Unprivileged functions cannot set FW variant */ + rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE); + } + if (rc != 0) + return rc; + + rc = sfc_get_fw_variant(sa, &efv); + if (rc == ENOTSUP) { + sfc_warn(sa, "FW variant can not be obtained"); + return 0; + } + if (rc != 0) + return rc; + + /* Check that firmware variant was changed to the requested one */ + if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) { + sfc_warn(sa, "FW variant has not changed to the requested %s", + sfc_fw_variant2str(preferred_efv)); + } + + sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv)); + + return 0; +} + int sfc_probe(struct sfc_adapter *sa) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev); + unsigned int membar; efx_nic_t *enp; int rc; @@ -763,17 +989,17 @@ sfc_probe(struct sfc_adapter *sa) sa->socket_id = rte_socket_id(); rte_atomic32_init(&sa->restart_required); - sfc_log_init(sa, "init mem bar"); - rc = sfc_mem_bar_init(sa); - if (rc != 0) - goto fail_mem_bar_init; - sfc_log_init(sa, "get family"); rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id, - &sa->family); + &sa->family, &membar); if (rc != 0) goto fail_family; - sfc_log_init(sa, "family is %u", sa->family); + sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar); + + sfc_log_init(sa, "init mem bar"); + rc = sfc_mem_bar_init(sa, membar); + if (rc != 0) + goto fail_mem_bar_init; sfc_log_init(sa, "create nic"); rte_spinlock_init(&sa->nic_lock); @@ -788,7 +1014,7 @@ sfc_probe(struct sfc_adapter *sa) goto fail_mcdi_init; sfc_log_init(sa, "probe nic"); - rc = efx_nic_probe(enp); + rc = sfc_nic_probe(sa); if (rc != 0) goto fail_nic_probe; @@ -804,10 +1030,10 @@ fail_mcdi_init: efx_nic_destroy(enp); fail_nic_create: -fail_family: sfc_mem_bar_fini(sa); fail_mem_bar_init: +fail_family: sfc_log_init(sa, "failed %d", rc); return rc; } @@ -843,3 +1069,35 @@ sfc_unprobe(struct sfc_adapter *sa) sfc_flow_fini(sa); sa->state = SFC_ADAPTER_UNINITIALIZED; } + +uint32_t +sfc_register_logtype(struct sfc_adapter *sa, const char *lt_prefix_str, + uint32_t ll_default) +{ + size_t lt_prefix_str_size = strlen(lt_prefix_str); + size_t lt_str_size_max; + char *lt_str = NULL; + int ret; + + if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) { + ++lt_prefix_str_size; /* Reserve space for prefix separator */ + lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1; + } else { + return RTE_LOGTYPE_PMD; + } + + lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0); + if (lt_str == NULL) + return RTE_LOGTYPE_PMD; + + strncpy(lt_str, lt_prefix_str, lt_prefix_str_size); + lt_str[lt_prefix_str_size - 1] = '.'; + rte_pci_device_name(&sa->pci_addr, lt_str + lt_prefix_str_size, + lt_str_size_max - lt_prefix_str_size); + lt_str[lt_str_size_max - 1] = '\0'; + + ret = rte_log_register_type_and_pick_level(lt_str, ll_default); + rte_free(lt_str); + + return (ret < 0) ? RTE_LOGTYPE_PMD : ret; +} diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h index 75575349..51be4403 100644 --- a/drivers/net/sfc/sfc.h +++ b/drivers/net/sfc/sfc.h @@ -27,11 +27,6 @@ extern "C" { #endif -#if EFSYS_OPT_RX_SCALE -/** RSS hash offloads mask */ -#define SFC_RSS_OFFLOADS (ETH_RSS_IP | ETH_RSS_TCP) -#endif - /* * +---------------+ * | UNINITIALIZED |<-----------+ @@ -104,7 +99,7 @@ struct sfc_mcdi { efsys_mem_t mem; enum sfc_mcdi_state state; efx_mcdi_transport_t transport; - bool logging; + uint32_t logtype; uint32_t proxy_handle; efx_rc_t proxy_result; }; @@ -156,6 +151,24 @@ struct sfc_port { uint32_t mac_stats_mask[EFX_MAC_STATS_MASK_NPAGES]; }; +struct sfc_rss_hf_rte_to_efx { + uint64_t rte; + efx_rx_hash_type_t efx; +}; + +struct sfc_rss { + unsigned int channels; + efx_rx_scale_context_type_t context_type; + efx_rx_hash_support_t hash_support; + efx_rx_hash_alg_t hash_alg; + unsigned int hf_map_nb_entries; + struct sfc_rss_hf_rte_to_efx *hf_map; + + efx_rx_hash_type_t hash_types; + unsigned int tbl[EFX_RSS_TBL_SIZE]; + uint8_t key[EFX_RSS_KEY_SIZE]; +}; + /* Adapter private data */ struct sfc_adapter { /* @@ -170,7 +183,7 @@ struct sfc_adapter { uint16_t port_id; struct rte_eth_dev *eth_dev; struct rte_kvargs *kvargs; - bool debug_init; + uint32_t logtype_main; int socket_id; efsys_bar_t mem_bar; efx_family_t family; @@ -225,15 +238,9 @@ struct sfc_adapter { boolean_t tso; - unsigned int rss_channels; + uint32_t rxd_wait_timeout_ns; -#if EFSYS_OPT_RX_SCALE - efx_rx_scale_context_type_t rss_support; - efx_rx_hash_support_t hash_support; - efx_rx_hash_type_t rss_hash_types; - unsigned int rss_tbl[EFX_RSS_TBL_SIZE]; - uint8_t rss_key[EFX_RSS_KEY_SIZE]; -#endif + struct sfc_rss rss; /* * Shared memory copy of the Rx datapath name to be used by @@ -302,6 +309,10 @@ int sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id, size_t len, int socket_id, efsys_mem_t *esmp); void sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp); +uint32_t sfc_register_logtype(struct sfc_adapter *sa, + const char *lt_prefix_str, + uint32_t ll_default); + int sfc_probe(struct sfc_adapter *sa); void sfc_unprobe(struct sfc_adapter *sa); int sfc_attach(struct sfc_adapter *sa); diff --git a/drivers/net/sfc/sfc_dp.c b/drivers/net/sfc/sfc_dp.c index 9a5ca20b..b121dc09 100644 --- a/drivers/net/sfc/sfc_dp.c +++ b/drivers/net/sfc/sfc_dp.c @@ -14,6 +14,7 @@ #include #include "sfc_dp.h" +#include "sfc_log.h" void sfc_dp_queue_init(struct sfc_dp_queue *dpq, uint16_t port_id, uint16_t queue_id, @@ -63,8 +64,8 @@ int sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry) { if (sfc_dp_find_by_name(head, entry->type, entry->name) != NULL) { - rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD, - "sfc %s dapapath '%s' already registered\n", + SFC_GENERIC_LOG(ERR, + "sfc %s dapapath '%s' already registered", entry->type == SFC_DP_RX ? "Rx" : entry->type == SFC_DP_TX ? "Tx" : "unknown", diff --git a/drivers/net/sfc/sfc_dp.h b/drivers/net/sfc/sfc_dp.h index b142532d..3da65abe 100644 --- a/drivers/net/sfc/sfc_dp.h +++ b/drivers/net/sfc/sfc_dp.h @@ -15,6 +15,8 @@ #include +#include "sfc_log.h" + #ifdef __cplusplus extern "C" { #endif @@ -58,10 +60,10 @@ void sfc_dp_queue_init(struct sfc_dp_queue *dpq, const struct sfc_dp_queue *_dpq = (dpq); \ const struct rte_pci_addr *_addr = &(_dpq)->pci_addr; \ \ - RTE_LOG(level, PMD, \ + SFC_GENERIC_LOG(level, \ RTE_FMT("%s " PCI_PRI_FMT \ " #%" PRIu16 ".%" PRIu16 ": " \ - RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ + RTE_FMT_HEAD(__VA_ARGS__ ,), \ dp_name, \ _addr->domain, _addr->bus, \ _addr->devid, _addr->function, \ @@ -77,7 +79,8 @@ struct sfc_dp { enum sfc_dp_type type; /* Mask of required hardware/firmware capabilities */ unsigned int hw_fw_caps; -#define SFC_DP_HW_FW_CAP_EF10 0x1 +#define SFC_DP_HW_FW_CAP_EF10 0x1 +#define SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER 0x2 }; /** List of datapath variants */ diff --git a/drivers/net/sfc/sfc_dp_rx.h b/drivers/net/sfc/sfc_dp_rx.h index be725dcb..ce96e83f 100644 --- a/drivers/net/sfc/sfc_dp_rx.h +++ b/drivers/net/sfc/sfc_dp_rx.h @@ -78,6 +78,8 @@ struct sfc_dp_rx_qcreate_info { * doorbell */ volatile void *mem_bar; + /** VI window size shift */ + unsigned int vi_window_shift; }; /** @@ -87,11 +89,24 @@ struct sfc_dp_rx_qcreate_info { */ typedef void (sfc_dp_rx_get_dev_info_t)(struct rte_eth_dev_info *dev_info); +/** + * Test if an Rx datapath supports specific mempool ops. + * + * @param pool The name of the pool operations to test. + * + * @return Check status. + * @retval 0 Best mempool ops choice. + * @retval 1 Mempool ops are supported. + * @retval -ENOTSUP Mempool ops not supported. + */ +typedef int (sfc_dp_rx_pool_ops_supported_t)(const char *pool); + /** * Get size of receive and event queue rings by the number of Rx - * descriptors. + * descriptors and mempool configuration. * * @param nb_rx_desc Number of Rx descriptors + * @param mb_pool mbuf pool with Rx buffers * @param rxq_entries Location for number of Rx ring entries * @param evq_entries Location for number of event ring entries * @param rxq_max_fill_level Location for maximum Rx ring fill level @@ -99,6 +114,7 @@ typedef void (sfc_dp_rx_get_dev_info_t)(struct rte_eth_dev_info *dev_info); * @return 0 or positive errno. */ typedef int (sfc_dp_rx_qsize_up_rings_t)(uint16_t nb_rx_desc, + struct rte_mempool *mb_pool, unsigned int *rxq_entries, unsigned int *evq_entries, unsigned int *rxq_max_fill_level); @@ -145,6 +161,12 @@ typedef void (sfc_dp_rx_qstop_t)(struct sfc_dp_rxq *dp_rxq, */ typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id); +/** + * Packed stream receive event handler used during queue flush only. + */ +typedef bool (sfc_dp_rx_qrx_ps_ev_t)(struct sfc_dp_rxq *dp_rxq, + unsigned int id); + /** * Receive queue purge function called after queue flush. * @@ -171,13 +193,18 @@ struct sfc_dp_rx { #define SFC_DP_RX_FEAT_SCATTER 0x1 #define SFC_DP_RX_FEAT_MULTI_PROCESS 0x2 #define SFC_DP_RX_FEAT_TUNNELS 0x4 +#define SFC_DP_RX_FEAT_FLOW_FLAG 0x8 +#define SFC_DP_RX_FEAT_FLOW_MARK 0x10 +#define SFC_DP_RX_FEAT_CHECKSUM 0x20 sfc_dp_rx_get_dev_info_t *get_dev_info; + sfc_dp_rx_pool_ops_supported_t *pool_ops_supported; sfc_dp_rx_qsize_up_rings_t *qsize_up_rings; sfc_dp_rx_qcreate_t *qcreate; sfc_dp_rx_qdestroy_t *qdestroy; sfc_dp_rx_qstart_t *qstart; sfc_dp_rx_qstop_t *qstop; sfc_dp_rx_qrx_ev_t *qrx_ev; + sfc_dp_rx_qrx_ps_ev_t *qrx_ps_ev; sfc_dp_rx_qpurge_t *qpurge; sfc_dp_rx_supported_ptypes_get_t *supported_ptypes_get; sfc_dp_rx_qdesc_npending_t *qdesc_npending; @@ -203,6 +230,7 @@ sfc_dp_find_rx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps) extern struct sfc_dp_rx sfc_efx_rx; extern struct sfc_dp_rx sfc_ef10_rx; +extern struct sfc_dp_rx sfc_ef10_essb_rx; #ifdef __cplusplus } diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h index 0c1aad90..eda9676c 100644 --- a/drivers/net/sfc/sfc_dp_tx.h +++ b/drivers/net/sfc/sfc_dp_tx.h @@ -39,8 +39,6 @@ struct sfc_dp_tx_qcreate_info { unsigned int max_fill_level; /** Minimum number of unused Tx descriptors to do reap */ unsigned int free_thresh; - /** Transmit queue configuration flags */ - unsigned int flags; /** Offloads enabled on the transmit queue */ uint64_t offloads; /** Tx queue size */ @@ -57,6 +55,8 @@ struct sfc_dp_tx_qcreate_info { unsigned int hw_index; /** Virtual address of the memory-mapped BAR to push Tx doorbell */ volatile void *mem_bar; + /** VI window size shift */ + unsigned int vi_window_shift; }; /** diff --git a/drivers/net/sfc/sfc_ef10.h b/drivers/net/sfc/sfc_ef10.h index ace6a1dd..a73e0bde 100644 --- a/drivers/net/sfc/sfc_ef10.h +++ b/drivers/net/sfc/sfc_ef10.h @@ -79,6 +79,40 @@ sfc_ef10_ev_present(const efx_qword_t ev) ~EFX_QWORD_FIELD(ev, EFX_DWORD_1); } + +/** + * Alignment requirement for value written to RX WPTR: + * the WPTR must be aligned to an 8 descriptor boundary. + */ +#define SFC_EF10_RX_WPTR_ALIGN 8u + +static inline void +sfc_ef10_rx_qpush(volatile void *doorbell, unsigned int added, + unsigned int ptr_mask) +{ + efx_dword_t dword; + + /* Hardware has alignment restriction for WPTR */ + RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0); + SFC_ASSERT(RTE_ALIGN(added, SFC_EF10_RX_WPTR_ALIGN) == added); + + EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, added & ptr_mask); + + /* DMA sync to device is not required */ + + /* + * rte_write32() has rte_io_wmb() which guarantees that the STORE + * operations (i.e. Rx and event descriptor updates) that precede + * the rte_io_wmb() call are visible to NIC before the STORE + * operations that follow it (i.e. doorbell write). + */ + rte_write32(dword.ed_u32[0], doorbell); +} + + +const uint32_t * sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps); + + #ifdef __cplusplus } #endif diff --git a/drivers/net/sfc/sfc_ef10_essb_rx.c b/drivers/net/sfc/sfc_ef10_essb_rx.c new file mode 100644 index 00000000..81c8f7fb --- /dev/null +++ b/drivers/net/sfc/sfc_ef10_essb_rx.c @@ -0,0 +1,723 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2017-2018 Solarflare Communications Inc. + * All rights reserved. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +/* EF10 equal stride packed stream receive native datapath implementation */ + +#include + +#include +#include +#include +#include + +#include "efx.h" +#include "efx_types.h" +#include "efx_regs.h" +#include "efx_regs_ef10.h" + +#include "sfc_tweak.h" +#include "sfc_dp_rx.h" +#include "sfc_kvargs.h" +#include "sfc_ef10.h" + +/* Tunnels are not supported */ +#define SFC_EF10_RX_EV_ENCAP_SUPPORT 0 +#include "sfc_ef10_rx_ev.h" + +#define sfc_ef10_essb_rx_err(dpq, ...) \ + SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, ERR, dpq, __VA_ARGS__) + +#define sfc_ef10_essb_rx_info(dpq, ...) \ + SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, INFO, dpq, __VA_ARGS__) + +/* + * Fake length for RXQ descriptors in equal stride super-buffer mode + * to make hardware happy. + */ +#define SFC_EF10_ESSB_RX_FAKE_BUF_SIZE 32 + +/** + * Minimum number of Rx buffers the datapath allows to use. + * + * Each HW Rx descriptor has many Rx buffers. The number of buffers + * in one HW Rx descriptor is equal to size of contiguous block + * provided by Rx buffers memory pool. The contiguous block size + * depends on CONFIG_RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB and rte_mbuf + * data size specified on the memory pool creation. Typical rte_mbuf + * data size is about 2k which makes a bit less than 32 buffers in + * contiguous block with default bucket size equal to 64k. + * Since HW Rx descriptors are pushed by 8 (see SFC_EF10_RX_WPTR_ALIGN), + * it makes about 256 as required minimum. Double it in advertised + * minimum to allow for at least 2 refill blocks. + */ +#define SFC_EF10_ESSB_RX_DESCS_MIN 512 + +/** + * Number of Rx buffers should be aligned to. + * + * There are no extra requirements on alignment since actual number of + * pushed Rx buffers will be multiple by contiguous block size which + * is unknown beforehand. + */ +#define SFC_EF10_ESSB_RX_DESCS_ALIGN 1 + +/** + * Maximum number of descriptors/buffers in the Rx ring. + * It should guarantee that corresponding event queue never overfill. + */ +#define SFC_EF10_ESSB_RXQ_LIMIT(_nevs) \ + ((_nevs) - 1 /* head must not step on tail */ - \ + (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \ + 1 /* Rx error */ - 1 /* flush */) + +struct sfc_ef10_essb_rx_sw_desc { + struct rte_mbuf *first_mbuf; +}; + +struct sfc_ef10_essb_rxq { + /* Used on data path */ + unsigned int flags; +#define SFC_EF10_ESSB_RXQ_STARTED 0x1 +#define SFC_EF10_ESSB_RXQ_NOT_RUNNING 0x2 +#define SFC_EF10_ESSB_RXQ_EXCEPTION 0x4 + unsigned int rxq_ptr_mask; + unsigned int block_size; + unsigned int buf_stride; + unsigned int bufs_ptr; + unsigned int completed; + unsigned int pending_id; + unsigned int bufs_pending; + unsigned int left_in_completed; + unsigned int left_in_pending; + unsigned int evq_read_ptr; + unsigned int evq_ptr_mask; + efx_qword_t *evq_hw_ring; + struct sfc_ef10_essb_rx_sw_desc *sw_ring; + uint16_t port_id; + + /* Used on refill */ + unsigned int added; + unsigned int max_fill_level; + unsigned int refill_threshold; + struct rte_mempool *refill_mb_pool; + efx_qword_t *rxq_hw_ring; + volatile void *doorbell; + + /* Datapath receive queue anchor */ + struct sfc_dp_rxq dp; +}; + +static inline struct sfc_ef10_essb_rxq * +sfc_ef10_essb_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq) +{ + return container_of(dp_rxq, struct sfc_ef10_essb_rxq, dp); +} + +static struct rte_mbuf * +sfc_ef10_essb_next_mbuf(const struct sfc_ef10_essb_rxq *rxq, + struct rte_mbuf *mbuf) +{ + return (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride); +} + +static struct rte_mbuf * +sfc_ef10_essb_mbuf_by_index(const struct sfc_ef10_essb_rxq *rxq, + struct rte_mbuf *mbuf, unsigned int idx) +{ + return (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride); +} + +static struct rte_mbuf * +sfc_ef10_essb_maybe_next_completed(struct sfc_ef10_essb_rxq *rxq) +{ + const struct sfc_ef10_essb_rx_sw_desc *rxd; + + if (rxq->left_in_completed != 0) { + rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask]; + return sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf, + rxq->block_size - rxq->left_in_completed); + } else { + rxq->completed++; + rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask]; + rxq->left_in_completed = rxq->block_size; + return rxd->first_mbuf; + } +} + +static void +sfc_ef10_essb_rx_qrefill(struct sfc_ef10_essb_rxq *rxq) +{ + const unsigned int rxq_ptr_mask = rxq->rxq_ptr_mask; + unsigned int free_space; + unsigned int bulks; + void *mbuf_blocks[SFC_EF10_RX_WPTR_ALIGN]; + unsigned int added = rxq->added; + + free_space = rxq->max_fill_level - (added - rxq->completed); + + if (free_space < rxq->refill_threshold) + return; + + bulks = free_space / RTE_DIM(mbuf_blocks); + /* refill_threshold guarantees that bulks is positive */ + SFC_ASSERT(bulks > 0); + + do { + unsigned int id; + unsigned int i; + + if (unlikely(rte_mempool_get_contig_blocks(rxq->refill_mb_pool, + mbuf_blocks, RTE_DIM(mbuf_blocks)) < 0)) { + struct rte_eth_dev_data *dev_data = + rte_eth_devices[rxq->port_id].data; + + /* + * It is hardly a safe way to increment counter + * from different contexts, but all PMDs do it. + */ + dev_data->rx_mbuf_alloc_failed += RTE_DIM(mbuf_blocks); + /* Return if we have posted nothing yet */ + if (added == rxq->added) + return; + /* Push posted */ + break; + } + + for (i = 0, id = added & rxq_ptr_mask; + i < RTE_DIM(mbuf_blocks); + ++i, ++id) { + struct rte_mbuf *m = mbuf_blocks[i]; + struct sfc_ef10_essb_rx_sw_desc *rxd; + + SFC_ASSERT((id & ~rxq_ptr_mask) == 0); + rxd = &rxq->sw_ring[id]; + rxd->first_mbuf = m; + + /* RX_KER_BYTE_CNT is ignored by firmware */ + EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id], + ESF_DZ_RX_KER_BYTE_CNT, + SFC_EF10_ESSB_RX_FAKE_BUF_SIZE, + ESF_DZ_RX_KER_BUF_ADDR, + rte_mbuf_data_iova_default(m)); + } + + added += RTE_DIM(mbuf_blocks); + + } while (--bulks > 0); + + SFC_ASSERT(rxq->added != added); + rxq->added = added; + sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask); +} + +static bool +sfc_ef10_essb_rx_event_get(struct sfc_ef10_essb_rxq *rxq, efx_qword_t *rx_ev) +{ + *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->evq_ptr_mask]; + + if (!sfc_ef10_ev_present(*rx_ev)) + return false; + + if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) != + FSE_AZ_EV_CODE_RX_EV)) { + /* + * Do not move read_ptr to keep the event for exception + * handling + */ + rxq->flags |= SFC_EF10_ESSB_RXQ_EXCEPTION; + sfc_ef10_essb_rx_err(&rxq->dp.dpq, + "RxQ exception at EvQ read ptr %#x", + rxq->evq_read_ptr); + return false; + } + + rxq->evq_read_ptr++; + return true; +} + +static void +sfc_ef10_essb_rx_process_ev(struct sfc_ef10_essb_rxq *rxq, efx_qword_t rx_ev) +{ + unsigned int ready; + + ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - + rxq->bufs_ptr) & + EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); + + rxq->bufs_ptr += ready; + rxq->bufs_pending += ready; + + SFC_ASSERT(ready > 0); + do { + const struct sfc_ef10_essb_rx_sw_desc *rxd; + struct rte_mbuf *m; + unsigned int todo_bufs; + struct rte_mbuf *m0; + + rxd = &rxq->sw_ring[rxq->pending_id]; + m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf, + rxq->block_size - rxq->left_in_pending); + + if (ready < rxq->left_in_pending) { + todo_bufs = ready; + ready = 0; + rxq->left_in_pending -= todo_bufs; + } else { + todo_bufs = rxq->left_in_pending; + ready -= todo_bufs; + rxq->left_in_pending = rxq->block_size; + if (rxq->pending_id != rxq->rxq_ptr_mask) + rxq->pending_id++; + else + rxq->pending_id = 0; + } + + SFC_ASSERT(todo_bufs > 0); + --todo_bufs; + + sfc_ef10_rx_ev_to_offloads(rx_ev, m, ~0ull); + + /* Prefetch pseudo-header */ + rte_prefetch0((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM); + + m0 = m; + while (todo_bufs-- > 0) { + m = sfc_ef10_essb_next_mbuf(rxq, m); + m->ol_flags = m0->ol_flags; + m->packet_type = m0->packet_type; + /* Prefetch pseudo-header */ + rte_prefetch0((uint8_t *)m->buf_addr + + RTE_PKTMBUF_HEADROOM); + } + } while (ready > 0); +} + +static unsigned int +sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + unsigned int n_rx_pkts = 0; + unsigned int todo_bufs; + struct rte_mbuf *m; + + while ((todo_bufs = RTE_MIN(nb_pkts - n_rx_pkts, + rxq->bufs_pending)) > 0) { + m = sfc_ef10_essb_maybe_next_completed(rxq); + + todo_bufs = RTE_MIN(todo_bufs, rxq->left_in_completed); + + rxq->bufs_pending -= todo_bufs; + rxq->left_in_completed -= todo_bufs; + + SFC_ASSERT(todo_bufs > 0); + todo_bufs--; + + do { + const efx_qword_t *qwordp; + uint16_t pkt_len; + + /* Buffers to be discarded have 0 in packet type */ + if (unlikely(m->packet_type == 0)) { + rte_mempool_put(rxq->refill_mb_pool, m); + goto next_buf; + } + + rx_pkts[n_rx_pkts++] = m; + + /* Parse pseudo-header */ + qwordp = (const efx_qword_t *) + ((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM); + pkt_len = + EFX_QWORD_FIELD(*qwordp, + ES_EZ_ESSB_RX_PREFIX_DATA_LEN); + + m->data_off = RTE_PKTMBUF_HEADROOM + + ES_EZ_ESSB_RX_PREFIX_LEN; + m->port = rxq->port_id; + + rte_pktmbuf_pkt_len(m) = pkt_len; + rte_pktmbuf_data_len(m) = pkt_len; + + m->ol_flags |= + (PKT_RX_RSS_HASH * + !!EFX_TEST_QWORD_BIT(*qwordp, + ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN)) | + (PKT_RX_FDIR_ID * + !!EFX_TEST_QWORD_BIT(*qwordp, + ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN)) | + (PKT_RX_FDIR * + !!EFX_TEST_QWORD_BIT(*qwordp, + ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN)); + + /* EFX_QWORD_FIELD converts little-endian to CPU */ + m->hash.rss = + EFX_QWORD_FIELD(*qwordp, + ES_EZ_ESSB_RX_PREFIX_HASH); + m->hash.fdir.hi = + EFX_QWORD_FIELD(*qwordp, + ES_EZ_ESSB_RX_PREFIX_MARK); + +next_buf: + m = sfc_ef10_essb_next_mbuf(rxq, m); + } while (todo_bufs-- > 0); + } + + return n_rx_pkts; +} + + +static uint16_t +sfc_ef10_essb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(rx_queue); + const unsigned int evq_old_read_ptr = rxq->evq_read_ptr; + uint16_t n_rx_pkts; + efx_qword_t rx_ev; + + if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING | + SFC_EF10_ESSB_RXQ_EXCEPTION))) + return 0; + + n_rx_pkts = sfc_ef10_essb_rx_get_pending(rxq, rx_pkts, nb_pkts); + + while (n_rx_pkts != nb_pkts && + sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) { + /* + * DROP_EVENT is an internal to the NIC, software should + * never see it and, therefore, may ignore it. + */ + + sfc_ef10_essb_rx_process_ev(rxq, rx_ev); + n_rx_pkts += sfc_ef10_essb_rx_get_pending(rxq, + rx_pkts + n_rx_pkts, + nb_pkts - n_rx_pkts); + } + + sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask, + evq_old_read_ptr, rxq->evq_read_ptr); + + /* It is not a problem if we refill in the case of exception */ + sfc_ef10_essb_rx_qrefill(rxq); + + return n_rx_pkts; +} + +static sfc_dp_rx_qdesc_npending_t sfc_ef10_essb_rx_qdesc_npending; +static unsigned int +sfc_ef10_essb_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + const unsigned int evq_old_read_ptr = rxq->evq_read_ptr; + efx_qword_t rx_ev; + + if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING | + SFC_EF10_ESSB_RXQ_EXCEPTION))) + return rxq->bufs_pending; + + while (sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) { + /* + * DROP_EVENT is an internal to the NIC, software should + * never see it and, therefore, may ignore it. + */ + sfc_ef10_essb_rx_process_ev(rxq, rx_ev); + } + + sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask, + evq_old_read_ptr, rxq->evq_read_ptr); + + return rxq->bufs_pending; +} + +static sfc_dp_rx_qdesc_status_t sfc_ef10_essb_rx_qdesc_status; +static int +sfc_ef10_essb_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + unsigned int pending = sfc_ef10_essb_rx_qdesc_npending(dp_rxq); + + if (offset < pending) + return RTE_ETH_RX_DESC_DONE; + + if (offset < (rxq->added - rxq->completed) * rxq->block_size + + rxq->left_in_completed - rxq->block_size) + return RTE_ETH_RX_DESC_AVAIL; + + return RTE_ETH_RX_DESC_UNAVAIL; +} + +static sfc_dp_rx_get_dev_info_t sfc_ef10_essb_rx_get_dev_info; +static void +sfc_ef10_essb_rx_get_dev_info(struct rte_eth_dev_info *dev_info) +{ + /* + * Number of descriptors just defines maximum number of pushed + * descriptors (fill level). + */ + dev_info->rx_desc_lim.nb_min = SFC_EF10_ESSB_RX_DESCS_MIN; + dev_info->rx_desc_lim.nb_align = SFC_EF10_ESSB_RX_DESCS_ALIGN; +} + +static sfc_dp_rx_pool_ops_supported_t sfc_ef10_essb_rx_pool_ops_supported; +static int +sfc_ef10_essb_rx_pool_ops_supported(const char *pool) +{ + SFC_ASSERT(pool != NULL); + + if (strcmp(pool, "bucket") == 0) + return 0; + + return -ENOTSUP; +} + +static sfc_dp_rx_qsize_up_rings_t sfc_ef10_essb_rx_qsize_up_rings; +static int +sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc, + struct rte_mempool *mb_pool, + unsigned int *rxq_entries, + unsigned int *evq_entries, + unsigned int *rxq_max_fill_level) +{ + int rc; + struct rte_mempool_info mp_info; + unsigned int nb_hw_rx_desc; + unsigned int max_events; + + rc = rte_mempool_ops_get_info(mb_pool, &mp_info); + if (rc != 0) + return -rc; + if (mp_info.contig_block_size == 0) + return EINVAL; + + /* + * Calculate required number of hardware Rx descriptors each + * carrying contig block size Rx buffers. + * It cannot be less than Rx write pointer alignment plus 1 + * in order to avoid cases when the ring is guaranteed to be + * empty. + */ + nb_hw_rx_desc = RTE_MAX(SFC_DIV_ROUND_UP(nb_rx_desc, + mp_info.contig_block_size), + SFC_EF10_RX_WPTR_ALIGN + 1); + if (nb_hw_rx_desc <= EFX_RXQ_MINNDESCS) { + *rxq_entries = EFX_RXQ_MINNDESCS; + } else { + *rxq_entries = rte_align32pow2(nb_hw_rx_desc); + if (*rxq_entries > EFX_RXQ_MAXNDESCS) + return EINVAL; + } + + max_events = RTE_ALIGN_FLOOR(nb_hw_rx_desc, SFC_EF10_RX_WPTR_ALIGN) * + mp_info.contig_block_size + + (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ + + 1 /* Rx error */ + 1 /* flush */ + 1 /* head-tail space */; + + *evq_entries = rte_align32pow2(max_events); + *evq_entries = RTE_MAX(*evq_entries, (unsigned int)EFX_EVQ_MINNEVS); + *evq_entries = RTE_MIN(*evq_entries, (unsigned int)EFX_EVQ_MAXNEVS); + + /* + * May be even maximum event queue size is insufficient to handle + * so many Rx descriptors. If so, we should limit Rx queue fill level. + */ + *rxq_max_fill_level = RTE_MIN(nb_rx_desc, + SFC_EF10_ESSB_RXQ_LIMIT(*evq_entries)); + return 0; +} + +static sfc_dp_rx_qcreate_t sfc_ef10_essb_rx_qcreate; +static int +sfc_ef10_essb_rx_qcreate(uint16_t port_id, uint16_t queue_id, + const struct rte_pci_addr *pci_addr, int socket_id, + const struct sfc_dp_rx_qcreate_info *info, + struct sfc_dp_rxq **dp_rxqp) +{ + struct rte_mempool * const mp = info->refill_mb_pool; + struct rte_mempool_info mp_info; + struct sfc_ef10_essb_rxq *rxq; + int rc; + + rc = rte_mempool_ops_get_info(mp, &mp_info); + if (rc != 0) { + /* Positive errno is used in the driver */ + rc = -rc; + goto fail_get_contig_block_size; + } + + /* Check if the mempool provides block dequeue */ + rc = EINVAL; + if (mp_info.contig_block_size == 0) + goto fail_no_block_dequeue; + + rc = ENOMEM; + rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq == NULL) + goto fail_rxq_alloc; + + sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr); + + rc = ENOMEM; + rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring", + info->rxq_entries, + sizeof(*rxq->sw_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq->sw_ring == NULL) + goto fail_desc_alloc; + + rxq->block_size = mp_info.contig_block_size; + rxq->buf_stride = mp->header_size + mp->elt_size + mp->trailer_size; + rxq->rxq_ptr_mask = info->rxq_entries - 1; + rxq->evq_ptr_mask = info->evq_entries - 1; + rxq->evq_hw_ring = info->evq_hw_ring; + rxq->port_id = port_id; + + rxq->max_fill_level = info->max_fill_level / mp_info.contig_block_size; + rxq->refill_threshold = + RTE_MAX(info->refill_threshold / mp_info.contig_block_size, + SFC_EF10_RX_WPTR_ALIGN); + rxq->refill_mb_pool = mp; + rxq->rxq_hw_ring = info->rxq_hw_ring; + + rxq->doorbell = (volatile uint8_t *)info->mem_bar + + ER_DZ_RX_DESC_UPD_REG_OFST + + (info->hw_index << info->vi_window_shift); + + sfc_ef10_essb_rx_info(&rxq->dp.dpq, + "block size is %u, buf stride is %u", + rxq->block_size, rxq->buf_stride); + sfc_ef10_essb_rx_info(&rxq->dp.dpq, + "max fill level is %u descs (%u bufs), " + "refill threashold %u descs (%u bufs)", + rxq->max_fill_level, + rxq->max_fill_level * rxq->block_size, + rxq->refill_threshold, + rxq->refill_threshold * rxq->block_size); + + *dp_rxqp = &rxq->dp; + return 0; + +fail_desc_alloc: + rte_free(rxq); + +fail_rxq_alloc: +fail_no_block_dequeue: +fail_get_contig_block_size: + return rc; +} + +static sfc_dp_rx_qdestroy_t sfc_ef10_essb_rx_qdestroy; +static void +sfc_ef10_essb_rx_qdestroy(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + + rte_free(rxq->sw_ring); + rte_free(rxq); +} + +static sfc_dp_rx_qstart_t sfc_ef10_essb_rx_qstart; +static int +sfc_ef10_essb_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + + rxq->evq_read_ptr = evq_read_ptr; + + /* Initialize before refill */ + rxq->completed = rxq->pending_id = rxq->added = 0; + rxq->left_in_completed = rxq->left_in_pending = rxq->block_size; + rxq->bufs_ptr = UINT_MAX; + rxq->bufs_pending = 0; + + sfc_ef10_essb_rx_qrefill(rxq); + + rxq->flags |= SFC_EF10_ESSB_RXQ_STARTED; + rxq->flags &= + ~(SFC_EF10_ESSB_RXQ_NOT_RUNNING | SFC_EF10_ESSB_RXQ_EXCEPTION); + + return 0; +} + +static sfc_dp_rx_qstop_t sfc_ef10_essb_rx_qstop; +static void +sfc_ef10_essb_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + + rxq->flags |= SFC_EF10_ESSB_RXQ_NOT_RUNNING; + + *evq_read_ptr = rxq->evq_read_ptr; +} + +static sfc_dp_rx_qrx_ev_t sfc_ef10_essb_rx_qrx_ev; +static bool +sfc_ef10_essb_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id) +{ + __rte_unused struct sfc_ef10_essb_rxq *rxq; + + rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + SFC_ASSERT(rxq->flags & SFC_EF10_ESSB_RXQ_NOT_RUNNING); + + /* + * It is safe to ignore Rx event since we free all mbufs on + * queue purge anyway. + */ + + return false; +} + +static sfc_dp_rx_qpurge_t sfc_ef10_essb_rx_qpurge; +static void +sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + unsigned int i; + const struct sfc_ef10_essb_rx_sw_desc *rxd; + struct rte_mbuf *m; + + for (i = rxq->completed; i != rxq->added; ++i) { + rxd = &rxq->sw_ring[i & rxq->rxq_ptr_mask]; + m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf, + rxq->block_size - rxq->left_in_completed); + while (rxq->left_in_completed > 0) { + rte_mempool_put(rxq->refill_mb_pool, m); + m = sfc_ef10_essb_next_mbuf(rxq, m); + rxq->left_in_completed--; + } + rxq->left_in_completed = rxq->block_size; + } + + rxq->flags &= ~SFC_EF10_ESSB_RXQ_STARTED; +} + +struct sfc_dp_rx sfc_ef10_essb_rx = { + .dp = { + .name = SFC_KVARG_DATAPATH_EF10_ESSB, + .type = SFC_DP_RX, + .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10 | + SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER, + }, + .features = SFC_DP_RX_FEAT_FLOW_FLAG | + SFC_DP_RX_FEAT_FLOW_MARK | + SFC_DP_RX_FEAT_CHECKSUM, + .get_dev_info = sfc_ef10_essb_rx_get_dev_info, + .pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported, + .qsize_up_rings = sfc_ef10_essb_rx_qsize_up_rings, + .qcreate = sfc_ef10_essb_rx_qcreate, + .qdestroy = sfc_ef10_essb_rx_qdestroy, + .qstart = sfc_ef10_essb_rx_qstart, + .qstop = sfc_ef10_essb_rx_qstop, + .qrx_ev = sfc_ef10_essb_rx_qrx_ev, + .qpurge = sfc_ef10_essb_rx_qpurge, + .supported_ptypes_get = sfc_ef10_supported_ptypes_get, + .qdesc_npending = sfc_ef10_essb_rx_qdesc_npending, + .qdesc_status = sfc_ef10_essb_rx_qdesc_status, + .pkt_burst = sfc_ef10_essb_recv_pkts, +}; diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c index 0b3e8fbb..6a5052b9 100644 --- a/drivers/net/sfc/sfc_ef10_rx.c +++ b/drivers/net/sfc/sfc_ef10_rx.c @@ -26,15 +26,12 @@ #include "sfc_kvargs.h" #include "sfc_ef10.h" +#define SFC_EF10_RX_EV_ENCAP_SUPPORT 1 +#include "sfc_ef10_rx_ev.h" + #define sfc_ef10_rx_err(dpq, ...) \ SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__) -/** - * Alignment requirement for value written to RX WPTR: - * the WPTR must be aligned to an 8 descriptor boundary. - */ -#define SFC_EF10_RX_WPTR_ALIGN 8 - /** * Maximum number of descriptors/buffers in the Rx ring. * It should guarantee that corresponding event queue never overfill. @@ -87,29 +84,6 @@ sfc_ef10_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq) return container_of(dp_rxq, struct sfc_ef10_rxq, dp); } -static void -sfc_ef10_rx_qpush(struct sfc_ef10_rxq *rxq) -{ - efx_dword_t dword; - - /* Hardware has alignment restriction for WPTR */ - RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0); - SFC_ASSERT(RTE_ALIGN(rxq->added, SFC_EF10_RX_WPTR_ALIGN) == rxq->added); - - EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, - rxq->added & rxq->ptr_mask); - - /* DMA sync to device is not required */ - - /* - * rte_write32() has rte_io_wmb() which guarantees that the STORE - * operations (i.e. Rx and event descriptor updates) that precede - * the rte_io_wmb() call are visible to NIC before the STORE - * operations that follow it (i.e. doorbell write). - */ - rte_write32(dword.ed_u32[0], rxq->doorbell); -} - static void sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq) { @@ -120,6 +94,8 @@ sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq) void *objs[SFC_RX_REFILL_BULK]; unsigned int added = rxq->added; + RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0); + free_space = rxq->max_fill_level - (added - rxq->completed); if (free_space < rxq->refill_threshold) @@ -178,7 +154,7 @@ sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq) SFC_ASSERT(rxq->added != added); rxq->added = added; - sfc_ef10_rx_qpush(rxq); + sfc_ef10_rx_qpush(rxq->doorbell, added, ptr_mask); } static void @@ -225,137 +201,6 @@ sfc_ef10_rx_prepared(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts, return n_rx_pkts; } -static void -sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev, - struct rte_mbuf *m) -{ - uint32_t tun_ptype = 0; - /* Which event bit is mapped to PKT_RX_IP_CKSUM_* */ - int8_t ip_csum_err_bit; - /* Which event bit is mapped to PKT_RX_L4_CKSUM_* */ - int8_t l4_csum_err_bit; - uint32_t l2_ptype = 0; - uint32_t l3_ptype = 0; - uint32_t l4_ptype = 0; - uint64_t ol_flags = 0; - - if (unlikely(EFX_TEST_QWORD_BIT(rx_ev, ESF_DZ_RX_PARSE_INCOMPLETE_LBN))) - goto done; - - switch (EFX_QWORD_FIELD(rx_ev, ESF_EZ_RX_ENCAP_HDR)) { - default: - /* Unexpected encapsulation tag class */ - SFC_ASSERT(false); - /* FALLTHROUGH */ - case ESE_EZ_ENCAP_HDR_NONE: - break; - case ESE_EZ_ENCAP_HDR_VXLAN: - /* - * It is definitely UDP, but we have no information - * about IPv4 vs IPv6 and VLAN tagging. - */ - tun_ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP; - break; - case ESE_EZ_ENCAP_HDR_GRE: - /* - * We have no information about IPv4 vs IPv6 and VLAN tagging. - */ - tun_ptype = RTE_PTYPE_TUNNEL_NVGRE; - break; - } - - if (tun_ptype == 0) { - ip_csum_err_bit = ESF_DZ_RX_IPCKSUM_ERR_LBN; - l4_csum_err_bit = ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN; - } else { - ip_csum_err_bit = ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN; - l4_csum_err_bit = ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN; - if (unlikely(EFX_TEST_QWORD_BIT(rx_ev, - ESF_DZ_RX_IPCKSUM_ERR_LBN))) - ol_flags |= PKT_RX_EIP_CKSUM_BAD; - } - - switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) { - case ESE_DZ_ETH_TAG_CLASS_NONE: - l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER : - RTE_PTYPE_INNER_L2_ETHER; - break; - case ESE_DZ_ETH_TAG_CLASS_VLAN1: - l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_VLAN : - RTE_PTYPE_INNER_L2_ETHER_VLAN; - break; - case ESE_DZ_ETH_TAG_CLASS_VLAN2: - l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_QINQ : - RTE_PTYPE_INNER_L2_ETHER_QINQ; - break; - default: - /* Unexpected Eth tag class */ - SFC_ASSERT(false); - } - - switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L3_CLASS)) { - case ESE_DZ_L3_CLASS_IP4_FRAG: - l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG : - RTE_PTYPE_INNER_L4_FRAG; - /* FALLTHROUGH */ - case ESE_DZ_L3_CLASS_IP4: - l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; - ol_flags |= PKT_RX_RSS_HASH | - ((EFX_TEST_QWORD_BIT(rx_ev, ip_csum_err_bit)) ? - PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD); - break; - case ESE_DZ_L3_CLASS_IP6_FRAG: - l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG : - RTE_PTYPE_INNER_L4_FRAG; - /* FALLTHROUGH */ - case ESE_DZ_L3_CLASS_IP6: - l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; - ol_flags |= PKT_RX_RSS_HASH; - break; - case ESE_DZ_L3_CLASS_ARP: - /* Override Layer 2 packet type */ - /* There is no ARP classification for inner packets */ - if (tun_ptype == 0) - l2_ptype = RTE_PTYPE_L2_ETHER_ARP; - break; - default: - /* Unexpected Layer 3 class */ - SFC_ASSERT(false); - } - - switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L4_CLASS)) { - case ESE_DZ_L4_CLASS_TCP: - l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_TCP : - RTE_PTYPE_INNER_L4_TCP; - ol_flags |= - (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ? - PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD; - break; - case ESE_DZ_L4_CLASS_UDP: - l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_UDP : - RTE_PTYPE_INNER_L4_UDP; - ol_flags |= - (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ? - PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD; - break; - case ESE_DZ_L4_CLASS_UNKNOWN: - break; - default: - /* Unexpected Layer 4 class */ - SFC_ASSERT(false); - } - - /* Remove RSS hash offload flag if RSS is not enabled */ - if (~rxq->flags & SFC_EF10_RXQ_RSS_HASH) - ol_flags &= ~PKT_RX_RSS_HASH; - -done: - m->ol_flags = ol_flags; - m->packet_type = tun_ptype | l2_ptype | l3_ptype | l4_ptype; -} - static uint16_t sfc_ef10_rx_pseudo_hdr_get_len(const uint8_t *pseudo_hdr) { @@ -414,7 +259,10 @@ sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev, m->rearm_data[0] = rxq->rearm_data; /* Classify packet based on Rx event */ - sfc_ef10_rx_ev_to_offloads(rxq, rx_ev, m); + /* Mask RSS hash offload flag if RSS is not enabled */ + sfc_ef10_rx_ev_to_offloads(rx_ev, m, + (rxq->flags & SFC_EF10_RXQ_RSS_HASH) ? + ~0ull : ~PKT_RX_RSS_HASH); /* data_off already moved past pseudo header */ pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM; @@ -538,7 +386,7 @@ sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) return n_rx_pkts; } -static const uint32_t * +const uint32_t * sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps) { static const uint32_t ef10_native_ptypes[] = { @@ -587,8 +435,8 @@ sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps) 1u << EFX_TUNNEL_PROTOCOL_NVGRE): return ef10_overlay_ptypes; default: - RTE_LOG(ERR, PMD, - "Unexpected set of supported tunnel encapsulations: %#x\n", + SFC_GENERIC_LOG(ERR, + "Unexpected set of supported tunnel encapsulations: %#x", tunnel_encaps); /* FALLTHROUGH */ case 0: @@ -632,6 +480,7 @@ sfc_ef10_rx_get_dev_info(struct rte_eth_dev_info *dev_info) static sfc_dp_rx_qsize_up_rings_t sfc_ef10_rx_qsize_up_rings; static int sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc, + __rte_unused struct rte_mempool *mb_pool, unsigned int *rxq_entries, unsigned int *evq_entries, unsigned int *rxq_max_fill_level) @@ -716,7 +565,7 @@ sfc_ef10_rx_qcreate(uint16_t port_id, uint16_t queue_id, rxq->rxq_hw_ring = info->rxq_hw_ring; rxq->doorbell = (volatile uint8_t *)info->mem_bar + ER_DZ_RX_DESC_UPD_REG_OFST + - info->hw_index * ER_DZ_RX_DESC_UPD_REG_STEP; + (info->hw_index << info->vi_window_shift); *dp_rxqp = &rxq->dp; return 0; @@ -809,7 +658,8 @@ struct sfc_dp_rx sfc_ef10_rx = { .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10, }, .features = SFC_DP_RX_FEAT_MULTI_PROCESS | - SFC_DP_RX_FEAT_TUNNELS, + SFC_DP_RX_FEAT_TUNNELS | + SFC_DP_RX_FEAT_CHECKSUM, .get_dev_info = sfc_ef10_rx_get_dev_info, .qsize_up_rings = sfc_ef10_rx_qsize_up_rings, .qcreate = sfc_ef10_rx_qcreate, diff --git a/drivers/net/sfc/sfc_ef10_rx_ev.h b/drivers/net/sfc/sfc_ef10_rx_ev.h new file mode 100644 index 00000000..868c755f --- /dev/null +++ b/drivers/net/sfc/sfc_ef10_rx_ev.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2018 Solarflare Communications Inc. + * All rights reserved. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_EF10_RX_EV_H +#define _SFC_EF10_RX_EV_H + +#include + +#include "efx_types.h" +#include "efx_regs.h" +#include "efx_regs_ef10.h" + +#ifdef __cplusplus +extern "C" { +#endif + +static inline void +sfc_ef10_rx_ev_to_offloads(const efx_qword_t rx_ev, struct rte_mbuf *m, + uint64_t ol_mask) +{ + uint32_t tun_ptype = 0; + /* Which event bit is mapped to PKT_RX_IP_CKSUM_* */ + int8_t ip_csum_err_bit; + /* Which event bit is mapped to PKT_RX_L4_CKSUM_* */ + int8_t l4_csum_err_bit; + uint32_t l2_ptype = 0; + uint32_t l3_ptype = 0; + uint32_t l4_ptype = 0; + uint64_t ol_flags = 0; + + if (unlikely(rx_ev.eq_u64[0] & + rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) | + (1ull << ESF_DZ_RX_ECRC_ERR_LBN) | + (1ull << ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))) { + /* Zero packet type is used as a marker to dicard bad packets */ + goto done; + } + +#if SFC_EF10_RX_EV_ENCAP_SUPPORT + switch (EFX_QWORD_FIELD(rx_ev, ESF_EZ_RX_ENCAP_HDR)) { + default: + /* Unexpected encapsulation tag class */ + SFC_ASSERT(false); + /* FALLTHROUGH */ + case ESE_EZ_ENCAP_HDR_NONE: + break; + case ESE_EZ_ENCAP_HDR_VXLAN: + /* + * It is definitely UDP, but we have no information + * about IPv4 vs IPv6 and VLAN tagging. + */ + tun_ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP; + break; + case ESE_EZ_ENCAP_HDR_GRE: + /* + * We have no information about IPv4 vs IPv6 and VLAN tagging. + */ + tun_ptype = RTE_PTYPE_TUNNEL_NVGRE; + break; + } +#endif + + if (tun_ptype == 0) { + ip_csum_err_bit = ESF_DZ_RX_IPCKSUM_ERR_LBN; + l4_csum_err_bit = ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN; + } else { + ip_csum_err_bit = ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN; + l4_csum_err_bit = ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN; + if (unlikely(EFX_TEST_QWORD_BIT(rx_ev, + ESF_DZ_RX_IPCKSUM_ERR_LBN))) + ol_flags |= PKT_RX_EIP_CKSUM_BAD; + } + + switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) { + case ESE_DZ_ETH_TAG_CLASS_NONE: + l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER : + RTE_PTYPE_INNER_L2_ETHER; + break; + case ESE_DZ_ETH_TAG_CLASS_VLAN1: + l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_VLAN : + RTE_PTYPE_INNER_L2_ETHER_VLAN; + break; + case ESE_DZ_ETH_TAG_CLASS_VLAN2: + l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_QINQ : + RTE_PTYPE_INNER_L2_ETHER_QINQ; + break; + default: + /* Unexpected Eth tag class */ + SFC_ASSERT(false); + } + + switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L3_CLASS)) { + case ESE_DZ_L3_CLASS_IP4_FRAG: + l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG : + RTE_PTYPE_INNER_L4_FRAG; + /* FALLTHROUGH */ + case ESE_DZ_L3_CLASS_IP4: + l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; + ol_flags |= PKT_RX_RSS_HASH | + ((EFX_TEST_QWORD_BIT(rx_ev, ip_csum_err_bit)) ? + PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD); + break; + case ESE_DZ_L3_CLASS_IP6_FRAG: + l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG : + RTE_PTYPE_INNER_L4_FRAG; + /* FALLTHROUGH */ + case ESE_DZ_L3_CLASS_IP6: + l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; + ol_flags |= PKT_RX_RSS_HASH; + break; + case ESE_DZ_L3_CLASS_ARP: + /* Override Layer 2 packet type */ + /* There is no ARP classification for inner packets */ + if (tun_ptype == 0) + l2_ptype = RTE_PTYPE_L2_ETHER_ARP; + break; + case ESE_DZ_L3_CLASS_UNKNOWN: + break; + default: + /* Unexpected Layer 3 class */ + SFC_ASSERT(false); + } + + /* + * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only + * 2 bits wide on Medford2. Check it is safe to use the Medford2 field + * and values for all EF10 controllers. + */ + RTE_BUILD_BUG_ON(ESF_FZ_RX_L4_CLASS_LBN != ESF_DE_RX_L4_CLASS_LBN); + switch (EFX_QWORD_FIELD(rx_ev, ESF_FZ_RX_L4_CLASS)) { + case ESE_FZ_L4_CLASS_TCP: + RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_TCP != ESE_DE_L4_CLASS_TCP); + l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_TCP : + RTE_PTYPE_INNER_L4_TCP; + ol_flags |= + (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ? + PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD; + break; + case ESE_FZ_L4_CLASS_UDP: + RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_UDP != ESE_DE_L4_CLASS_UDP); + l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_UDP : + RTE_PTYPE_INNER_L4_UDP; + ol_flags |= + (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ? + PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD; + break; + case ESE_FZ_L4_CLASS_UNKNOWN: + RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_UNKNOWN != + ESE_DE_L4_CLASS_UNKNOWN); + break; + default: + /* Unexpected Layer 4 class */ + SFC_ASSERT(false); + } + + SFC_ASSERT(l2_ptype != 0); + +done: + m->ol_flags = ol_flags & ol_mask; + m->packet_type = tun_ptype | l2_ptype | l3_ptype | l4_ptype; +} + + +#ifdef __cplusplus +} +#endif +#endif /* _SFC_EF10_RX_EV_H */ diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c index 12387972..d0daa3b3 100644 --- a/drivers/net/sfc/sfc_ef10_tx.c +++ b/drivers/net/sfc/sfc_ef10_tx.c @@ -531,7 +531,7 @@ sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id, txq->txq_hw_ring = info->txq_hw_ring; txq->doorbell = (volatile uint8_t *)info->mem_bar + ER_DZ_TX_DESC_UPD_REG_OFST + - info->hw_index * ER_DZ_TX_DESC_UPD_REG_STEP; + (info->hw_index << info->vi_window_shift); txq->evq_hw_ring = info->evq_hw_ring; *dp_txqp = &txq->dp; diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c index 89a45290..9decbf5a 100644 --- a/drivers/net/sfc/sfc_ethdev.c +++ b/drivers/net/sfc/sfc_ethdev.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "efx.h" @@ -27,6 +28,8 @@ #include "sfc_dp.h" #include "sfc_dp_rx.h" +uint32_t sfc_logtype_driver; + static struct sfc_dp_list sfc_dp_head = TAILQ_HEAD_INITIALIZER(sfc_dp_head); @@ -82,12 +85,11 @@ static void sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct sfc_adapter *sa = dev->data->dev_private; - const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + struct sfc_rss *rss = &sa->rss; uint64_t txq_offloads_def = 0; sfc_log_init(sa, "entry"); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX; /* Autonegotiation may be disabled */ @@ -96,8 +98,14 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa |= ETH_LINK_SPEED_1G; if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX) dev_info->speed_capa |= ETH_LINK_SPEED_10G; + if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_25000FDX) + dev_info->speed_capa |= ETH_LINK_SPEED_25G; if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX) dev_info->speed_capa |= ETH_LINK_SPEED_40G; + if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_50000FDX) + dev_info->speed_capa |= ETH_LINK_SPEED_50G; + if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_100000FDX) + dev_info->speed_capa |= ETH_LINK_SPEED_100G; dev_info->max_rx_queues = sa->rxq_max; dev_info->max_tx_queues = sa->txq_max; @@ -130,27 +138,17 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->default_txconf.offloads |= txq_offloads_def; - dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP; - if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) || - !encp->enc_hw_tx_insert_vlan_enabled) - dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL; - - if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG) - dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS; - - if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL) - dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTMEMP; + if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) { + uint64_t rte_hf = 0; + unsigned int i; - if (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT) - dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT; + for (i = 0; i < rss->hf_map_nb_entries; ++i) + rte_hf |= rss->hf_map[i].rte; -#if EFSYS_OPT_RX_SCALE - if (sa->rss_support != EFX_RX_SCALE_UNAVAILABLE) { dev_info->reta_size = EFX_RSS_TBL_SIZE; dev_info->hash_key_size = EFX_RSS_KEY_SIZE; - dev_info->flow_type_rss_offloads = SFC_RSS_OFFLOADS; + dev_info->flow_type_rss_offloads = rte_hf; } -#endif /* Initialize to hardware limits */ dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS; @@ -236,22 +234,13 @@ static int sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) { struct sfc_adapter *sa = dev->data->dev_private; - struct rte_eth_link *dev_link = &dev->data->dev_link; - struct rte_eth_link old_link; struct rte_eth_link current_link; + int ret; sfc_log_init(sa, "entry"); -retry: - EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t)); - *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link); - if (sa->state != SFC_ADAPTER_STARTED) { sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, ¤t_link); - if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link, - *(uint64_t *)&old_link, - *(uint64_t *)¤t_link)) - goto retry; } else if (wait_to_complete) { efx_link_mode_t link_mode; @@ -259,21 +248,17 @@ retry: link_mode = EFX_LINK_UNKNOWN; sfc_port_link_mode_to_info(link_mode, ¤t_link); - if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link, - *(uint64_t *)&old_link, - *(uint64_t *)¤t_link)) - goto retry; } else { sfc_ev_mgmt_qpoll(sa); - *(int64_t *)¤t_link = - rte_atomic64_read((rte_atomic64_t *)dev_link); + rte_eth_linkstatus_get(dev, ¤t_link); } - if (old_link.link_status != current_link.link_status) - sfc_info(sa, "Link status is %s", - current_link.link_status ? "UP" : "DOWN"); + ret = rte_eth_linkstatus_set(dev, ¤t_link); + if (ret == 0) + sfc_notice(sa, "Link status is %s", + current_link.link_status ? "UP" : "DOWN"); - return old_link.link_status == current_link.link_status ? 0 : -1; + return ret; } static void @@ -664,7 +649,7 @@ sfc_xstats_get_names(struct rte_eth_dev *dev, for (i = 0; i < EFX_MAC_NSTATS; ++i) { if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { if (xstats_names != NULL && nstats < xstats_count) - strncpy(xstats_names[nstats].name, + strlcpy(xstats_names[nstats].name, efx_mac_stat_name(sa->nic, i), sizeof(xstats_names[0].name)); nstats++; @@ -742,9 +727,8 @@ sfc_xstats_get_names_by_id(struct rte_eth_dev *dev, if ((ids == NULL) || (ids[nb_written] == nb_supported)) { char *name = xstats_names[nb_written++].name; - strncpy(name, efx_mac_stat_name(sa->nic, i), + strlcpy(name, efx_mac_stat_name(sa->nic, i), sizeof(xstats_names[0].name)); - name[sizeof(xstats_names[0].name) - 1] = '\0'; } ++nb_supported; @@ -890,14 +874,12 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) } /* - * The driver does not use it, but other PMDs update jumbo_frame + * The driver does not use it, but other PMDs update jumbo frame * flag and max_rx_pkt_len when MTU is set. */ if (mtu > ETHER_MAX_LEN) { struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; - rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; - rxmode->jumbo_frame = 1; } dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu; @@ -920,13 +902,14 @@ fail_inval: SFC_ASSERT(rc > 0); return -rc; } -static void +static int sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { struct sfc_adapter *sa = dev->data->dev_private; const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); struct sfc_port *port = &sa->port; - int rc; + struct ether_addr *old_addr = &dev->data->mac_addrs[0]; + int rc = 0; sfc_adapter_lock(sa); @@ -936,15 +919,22 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) */ ether_addr_copy(mac_addr, &port->default_mac_addr); + /* + * Neither of the two following checks can return + * an error. The new MAC address is preserved in + * the device private data and can be activated + * on the next port start if the user prevents + * isolated mode from being enabled. + */ if (port->isolated) { - sfc_err(sa, "isolated mode is active on the port"); - sfc_err(sa, "will not set MAC address"); + sfc_warn(sa, "isolated mode is active on the port"); + sfc_warn(sa, "will not set MAC address"); goto unlock; } if (sa->state != SFC_ADAPTER_STARTED) { - sfc_info(sa, "the port is not started"); - sfc_info(sa, "the new MAC address will be set on port start"); + sfc_notice(sa, "the port is not started"); + sfc_notice(sa, "the new MAC address will be set on port start"); goto unlock; } @@ -962,8 +952,12 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) * we also need to update unicast filters */ rc = sfc_set_rx_mode(sa); - if (rc != 0) + if (rc != 0) { sfc_err(sa, "cannot set filter (rc = %u)", rc); + /* Rollback the old address */ + (void)efx_mac_addr_set(sa->nic, old_addr->addr_bytes); + (void)sfc_set_rx_mode(sa); + } } else { sfc_warn(sa, "cannot set MAC address with filters installed"); sfc_warn(sa, "adapter will be restarted to pick the new MAC"); @@ -982,14 +976,13 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) } unlock: - /* - * In the case of failure sa->port->default_mac_addr does not - * need rollback since no error code is returned, and the upper - * API will anyway update the external MAC address storage. - * To be consistent with that new value it is better to keep - * the device private value the same. - */ + if (rc != 0) + ether_addr_copy(old_addr, &port->default_mac_addr); + sfc_adapter_unlock(sa); + + SFC_ASSERT(rc >= 0); + return -rc; } @@ -1034,7 +1027,7 @@ sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, if (rc != 0) sfc_err(sa, "cannot set multicast address list (rc = %u)", rc); - SFC_ASSERT(rc > 0); + SFC_ASSERT(rc >= 0); return -rc; } @@ -1062,9 +1055,7 @@ sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, qinfo->conf.rx_free_thresh = rxq->refill_threshold; qinfo->conf.rx_drop_en = 1; qinfo->conf.rx_deferred_start = rxq_info->deferred_start; - qinfo->conf.offloads = DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; + qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) { qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER; qinfo->scattered_rx = 1; @@ -1094,7 +1085,6 @@ sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, memset(qinfo, 0, sizeof(*qinfo)); - qinfo->conf.txq_flags = txq_info->txq->flags; qinfo->conf.offloads = txq_info->txq->offloads; qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh; qinfo->conf.tx_deferred_start = txq_info->deferred_start; @@ -1352,18 +1342,18 @@ sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT); } -#if EFSYS_OPT_RX_SCALE static int sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_rss *rss = &sa->rss; struct sfc_port *port = &sa->port; - if ((sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) || port->isolated) + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || port->isolated) return -ENOTSUP; - if (sa->rss_channels == 0) + if (rss->channels == 0) return -EINVAL; sfc_adapter_lock(sa); @@ -1374,10 +1364,10 @@ sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev, * flags which corresponds to the active EFX configuration stored * locally in 'sfc_adapter' and kept up-to-date */ - rss_conf->rss_hf = sfc_efx_to_rte_hash_type(sa->rss_hash_types); + rss_conf->rss_hf = sfc_rx_hf_efx_to_rte(sa, rss->hash_types); rss_conf->rss_key_len = EFX_RSS_KEY_SIZE; if (rss_conf->rss_key != NULL) - rte_memcpy(rss_conf->rss_key, sa->rss_key, EFX_RSS_KEY_SIZE); + rte_memcpy(rss_conf->rss_key, rss->key, EFX_RSS_KEY_SIZE); sfc_adapter_unlock(sa); @@ -1389,6 +1379,7 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_rss *rss = &sa->rss; struct sfc_port *port = &sa->port; unsigned int efx_hash_types; int rc = 0; @@ -1396,35 +1387,31 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev, if (port->isolated) return -ENOTSUP; - if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) { + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) { sfc_err(sa, "RSS is not available"); return -ENOTSUP; } - if (sa->rss_channels == 0) { + if (rss->channels == 0) { sfc_err(sa, "RSS is not configured"); return -EINVAL; } if ((rss_conf->rss_key != NULL) && - (rss_conf->rss_key_len != sizeof(sa->rss_key))) { + (rss_conf->rss_key_len != sizeof(rss->key))) { sfc_err(sa, "RSS key size is wrong (should be %lu)", - sizeof(sa->rss_key)); - return -EINVAL; - } - - if ((rss_conf->rss_hf & ~SFC_RSS_OFFLOADS) != 0) { - sfc_err(sa, "unsupported hash functions requested"); + sizeof(rss->key)); return -EINVAL; } sfc_adapter_lock(sa); - efx_hash_types = sfc_rte_to_efx_hash_type(rss_conf->rss_hf); + rc = sfc_rx_hf_rte_to_efx(sa, rss_conf->rss_hf, &efx_hash_types); + if (rc != 0) + goto fail_rx_hf_rte_to_efx; rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, - EFX_RX_HASHALG_TOEPLITZ, - efx_hash_types, B_TRUE); + rss->hash_alg, efx_hash_types, B_TRUE); if (rc != 0) goto fail_scale_mode_set; @@ -1433,15 +1420,15 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev, rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, rss_conf->rss_key, - sizeof(sa->rss_key)); + sizeof(rss->key)); if (rc != 0) goto fail_scale_key_set; } - rte_memcpy(sa->rss_key, rss_conf->rss_key, sizeof(sa->rss_key)); + rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key)); } - sa->rss_hash_types = efx_hash_types; + rss->hash_types = efx_hash_types; sfc_adapter_unlock(sa); @@ -1450,10 +1437,11 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev, fail_scale_key_set: if (efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, EFX_RX_HASHALG_TOEPLITZ, - sa->rss_hash_types, B_TRUE) != 0) + rss->hash_types, B_TRUE) != 0) sfc_err(sa, "failed to restore RSS mode"); fail_scale_mode_set: +fail_rx_hf_rte_to_efx: sfc_adapter_unlock(sa); return -rc; } @@ -1464,13 +1452,14 @@ sfc_dev_rss_reta_query(struct rte_eth_dev *dev, uint16_t reta_size) { struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_rss *rss = &sa->rss; struct sfc_port *port = &sa->port; int entry; - if ((sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) || port->isolated) + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || port->isolated) return -ENOTSUP; - if (sa->rss_channels == 0) + if (rss->channels == 0) return -EINVAL; if (reta_size != EFX_RSS_TBL_SIZE) @@ -1483,7 +1472,7 @@ sfc_dev_rss_reta_query(struct rte_eth_dev *dev, int grp_idx = entry % RTE_RETA_GROUP_SIZE; if ((reta_conf[grp].mask >> grp_idx) & 1) - reta_conf[grp].reta[grp_idx] = sa->rss_tbl[entry]; + reta_conf[grp].reta[grp_idx] = rss->tbl[entry]; } sfc_adapter_unlock(sa); @@ -1497,6 +1486,7 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev, uint16_t reta_size) { struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_rss *rss = &sa->rss; struct sfc_port *port = &sa->port; unsigned int *rss_tbl_new; uint16_t entry; @@ -1506,12 +1496,12 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev, if (port->isolated) return -ENOTSUP; - if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) { + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) { sfc_err(sa, "RSS is not available"); return -ENOTSUP; } - if (sa->rss_channels == 0) { + if (rss->channels == 0) { sfc_err(sa, "RSS is not configured"); return -EINVAL; } @@ -1522,13 +1512,13 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev, return -EINVAL; } - rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(sa->rss_tbl), 0); + rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(rss->tbl), 0); if (rss_tbl_new == NULL) return -ENOMEM; sfc_adapter_lock(sa); - rte_memcpy(rss_tbl_new, sa->rss_tbl, sizeof(sa->rss_tbl)); + rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl)); for (entry = 0; entry < reta_size; entry++) { int grp_idx = entry % RTE_RETA_GROUP_SIZE; @@ -1537,7 +1527,7 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev, grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE]; if (grp->mask & (1ull << grp_idx)) { - if (grp->reta[grp_idx] >= sa->rss_channels) { + if (grp->reta[grp_idx] >= rss->channels) { rc = EINVAL; goto bad_reta_entry; } @@ -1552,7 +1542,7 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev, goto fail_scale_tbl_set; } - rte_memcpy(sa->rss_tbl, rss_tbl_new, sizeof(sa->rss_tbl)); + rte_memcpy(rss->tbl, rss_tbl_new, sizeof(rss->tbl)); fail_scale_tbl_set: bad_reta_entry: @@ -1563,7 +1553,6 @@ bad_reta_entry: SFC_ASSERT(rc >= 0); return -rc; } -#endif static int sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, @@ -1621,6 +1610,21 @@ sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, return -rc; } +static int +sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool) +{ + struct sfc_adapter *sa = dev->data->dev_private; + + /* + * If Rx datapath does not provide callback to check mempool, + * all pools are supported. + */ + if (sa->dp_rx->pool_ops_supported == NULL) + return 1; + + return sa->dp_rx->pool_ops_supported(pool); +} + static const struct eth_dev_ops sfc_eth_dev_ops = { .dev_configure = sfc_dev_configure, .dev_start = sfc_dev_start, @@ -1658,12 +1662,10 @@ static const struct eth_dev_ops sfc_eth_dev_ops = { .mac_addr_set = sfc_mac_addr_set, .udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add, .udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del, -#if EFSYS_OPT_RX_SCALE .reta_update = sfc_dev_rss_reta_update, .reta_query = sfc_dev_rss_reta_query, .rss_hash_update = sfc_dev_rss_hash_update, .rss_hash_conf_get = sfc_dev_rss_hash_conf_get, -#endif .filter_ctrl = sfc_dev_filter_ctrl, .set_mc_addr_list = sfc_set_mc_addr_list, .rxq_info_get = sfc_rx_queue_info_get, @@ -1671,6 +1673,7 @@ static const struct eth_dev_ops sfc_eth_dev_ops = { .fw_version_get = sfc_fw_version_get, .xstats_get_by_id = sfc_xstats_get_by_id, .xstats_get_names_by_id = sfc_xstats_get_names_by_id, + .pool_ops_supported = sfc_pool_ops_supported, }; /** @@ -1700,6 +1703,7 @@ static int sfc_eth_dev_set_ops(struct rte_eth_dev *dev) { struct sfc_adapter *sa = dev->data->dev_private; + const efx_nic_cfg_t *encp; unsigned int avail_caps = 0; const char *rx_name = NULL; const char *tx_name = NULL; @@ -1708,12 +1712,17 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev) switch (sa->family) { case EFX_FAMILY_HUNTINGTON: case EFX_FAMILY_MEDFORD: + case EFX_FAMILY_MEDFORD2: avail_caps |= SFC_DP_HW_FW_CAP_EF10; break; default: break; } + encp = efx_nic_cfg_get(sa->nic); + if (encp->enc_rx_es_super_buffer_supported) + avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER; + rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH, sfc_kvarg_string_handler, &rx_name); if (rc != 0) @@ -1749,7 +1758,7 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev) goto fail_dp_rx_name; } - sfc_info(sa, "use %s Rx datapath", sa->dp_rx_name); + sfc_notice(sa, "use %s Rx datapath", sa->dp_rx_name); dev->rx_pkt_burst = sa->dp_rx->pkt_burst; @@ -1788,7 +1797,7 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev) goto fail_dp_tx_name; } - sfc_info(sa, "use %s Tx datapath", sa->dp_tx_name); + sfc_notice(sa, "use %s Tx datapath", sa->dp_tx_name); dev->tx_pkt_burst = sa->dp_tx->pkt_burst; @@ -1903,6 +1912,7 @@ sfc_register_dp(void) /* Register once */ if (TAILQ_EMPTY(&sfc_dp_head)) { /* Prefer EF10 datapath */ + sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp); sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp); sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp); @@ -1935,15 +1945,13 @@ sfc_eth_dev_init(struct rte_eth_dev *dev) /* Copy PCI device info to the dev->data */ rte_eth_copy_pci_info(dev, pci_dev); + sa->logtype_main = sfc_register_logtype(sa, SFC_LOGTYPE_MAIN_STR, + RTE_LOG_NOTICE); + rc = sfc_kvargs_parse(sa); if (rc != 0) goto fail_kvargs_parse; - rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT, - sfc_kvarg_bool_handler, &sa->debug_init); - if (rc != 0) - goto fail_kvarg_debug_init; - sfc_log_init(sa, "entry"); dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0); @@ -1997,7 +2005,6 @@ fail_probe: dev->data->mac_addrs = NULL; fail_mac_addrs: -fail_kvarg_debug_init: sfc_kvargs_cleanup(sa); fail_kvargs_parse: @@ -2048,6 +2055,8 @@ static const struct rte_pci_id pci_id_sfc_efx_map[] = { { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) }, { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) }, { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) }, + { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2) }, + { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2_VF) }, { .vendor_id = 0 /* sentinel */ } }; @@ -2079,6 +2088,15 @@ RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx, SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " " SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " " SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " " - SFC_KVARG_STATS_UPDATE_PERIOD_MS "= " - SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " " - SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL); + SFC_KVARG_FW_VARIANT "=" SFC_KVARG_VALUES_FW_VARIANT " " + SFC_KVARG_RXD_WAIT_TIMEOUT_NS "= " + SFC_KVARG_STATS_UPDATE_PERIOD_MS "="); + +RTE_INIT(sfc_driver_register_logtype) +{ + int ret; + + ret = rte_log_register_type_and_pick_level(SFC_LOGTYPE_PREFIX "driver", + RTE_LOG_NOTICE); + sfc_logtype_driver = (ret < 0) ? RTE_LOGTYPE_PMD : ret; +} diff --git a/drivers/net/sfc/sfc_ev.c b/drivers/net/sfc/sfc_ev.c index 7abe61ae..f93d30e5 100644 --- a/drivers/net/sfc/sfc_ev.c +++ b/drivers/net/sfc/sfc_ev.c @@ -162,6 +162,35 @@ sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id, return evq->sa->dp_rx->qrx_ev(dp_rxq, id); } +static boolean_t +sfc_ev_nop_rx_ps(void *arg, uint32_t label, uint32_t id, + uint32_t pkt_count, uint16_t flags) +{ + struct sfc_evq *evq = arg; + + sfc_err(evq->sa, + "EVQ %u unexpected packed stream Rx event label=%u id=%#x pkt_count=%u flags=%#x", + evq->evq_index, label, id, pkt_count, flags); + return B_TRUE; +} + +/* It is not actually used on datapath, but required on RxQ flush */ +static boolean_t +sfc_ev_dp_rx_ps(void *arg, __rte_unused uint32_t label, uint32_t id, + __rte_unused uint32_t pkt_count, __rte_unused uint16_t flags) +{ + struct sfc_evq *evq = arg; + struct sfc_dp_rxq *dp_rxq; + + dp_rxq = evq->dp_rxq; + SFC_ASSERT(dp_rxq != NULL); + + if (evq->sa->dp_rx->qrx_ps_ev != NULL) + return evq->sa->dp_rx->qrx_ps_ev(dp_rxq, id); + else + return B_FALSE; +} + static boolean_t sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id) { @@ -382,27 +411,11 @@ sfc_ev_link_change(void *arg, efx_link_mode_t link_mode) { struct sfc_evq *evq = arg; struct sfc_adapter *sa = evq->sa; - struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link; struct rte_eth_link new_link; - uint64_t new_link_u64; - uint64_t old_link_u64; - - EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t)); sfc_port_link_mode_to_info(link_mode, &new_link); - - new_link_u64 = *(uint64_t *)&new_link; - do { - old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link); - if (old_link_u64 == new_link_u64) - break; - - if (rte_atomic64_cmpset((volatile uint64_t *)dev_link, - old_link_u64, new_link_u64)) { - evq->sa->port.lsc_seq++; - break; - } - } while (B_TRUE); + if (rte_eth_linkstatus_set(sa->eth_dev, &new_link)) + evq->sa->port.lsc_seq++; return B_FALSE; } @@ -410,6 +423,7 @@ sfc_ev_link_change(void *arg, efx_link_mode_t link_mode) static const efx_ev_callbacks_t sfc_ev_callbacks = { .eec_initialized = sfc_ev_initialized, .eec_rx = sfc_ev_nop_rx, + .eec_rx_ps = sfc_ev_nop_rx_ps, .eec_tx = sfc_ev_nop_tx, .eec_exception = sfc_ev_exception, .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, @@ -425,6 +439,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks = { static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = { .eec_initialized = sfc_ev_initialized, .eec_rx = sfc_ev_efx_rx, + .eec_rx_ps = sfc_ev_nop_rx_ps, .eec_tx = sfc_ev_nop_tx, .eec_exception = sfc_ev_exception, .eec_rxq_flush_done = sfc_ev_rxq_flush_done, @@ -440,6 +455,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = { static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = { .eec_initialized = sfc_ev_initialized, .eec_rx = sfc_ev_dp_rx, + .eec_rx_ps = sfc_ev_dp_rx_ps, .eec_tx = sfc_ev_nop_tx, .eec_exception = sfc_ev_exception, .eec_rxq_flush_done = sfc_ev_rxq_flush_done, @@ -455,6 +471,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = { static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = { .eec_initialized = sfc_ev_initialized, .eec_rx = sfc_ev_nop_rx, + .eec_rx_ps = sfc_ev_nop_rx_ps, .eec_tx = sfc_ev_tx, .eec_exception = sfc_ev_exception, .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, @@ -470,6 +487,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = { static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = { .eec_initialized = sfc_ev_initialized, .eec_rx = sfc_ev_nop_rx, + .eec_rx_ps = sfc_ev_nop_rx_ps, .eec_tx = sfc_ev_dp_tx, .eec_exception = sfc_ev_exception, .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, @@ -837,7 +855,7 @@ static int sfc_kvarg_perf_profile_handler(__rte_unused const char *key, const char *value_str, void *opaque) { - uint64_t *value = opaque; + uint32_t *value = opaque; if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0) *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; diff --git a/drivers/net/sfc/sfc_filter.c b/drivers/net/sfc/sfc_filter.c index 77e2ea56..6ff380a3 100644 --- a/drivers/net/sfc/sfc_filter.c +++ b/drivers/net/sfc/sfc_filter.c @@ -75,6 +75,7 @@ int sfc_filter_attach(struct sfc_adapter *sa) { int rc; + unsigned int i; sfc_log_init(sa, "entry"); @@ -88,6 +89,19 @@ sfc_filter_attach(struct sfc_adapter *sa) efx_filter_fini(sa->nic); + sa->filter.supports_ip_proto_or_addr_filter = B_FALSE; + sa->filter.supports_rem_or_local_port_filter = B_FALSE; + for (i = 0; i < sa->filter.supported_match_num; ++i) { + if (sa->filter.supported_match[i] & + (EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | + EFX_FILTER_MATCH_REM_HOST)) + sa->filter.supports_ip_proto_or_addr_filter = B_TRUE; + + if (sa->filter.supported_match[i] & + (EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT)) + sa->filter.supports_rem_or_local_port_filter = B_TRUE; + } + sfc_log_init(sa, "done"); return 0; diff --git a/drivers/net/sfc/sfc_filter.h b/drivers/net/sfc/sfc_filter.h index d3e1c2f9..64ab114e 100644 --- a/drivers/net/sfc/sfc_filter.h +++ b/drivers/net/sfc/sfc_filter.h @@ -25,6 +25,16 @@ struct sfc_filter { uint32_t *supported_match; /** List of flow rules */ struct sfc_flow_list flow_list; + /** + * Supports any of ip_proto, remote host or local host + * filters. This flag is used for filter match exceptions + */ + boolean_t supports_ip_proto_or_addr_filter; + /** + * Supports any of remote port or local port filters. + * This flag is used for filter match exceptions + */ + boolean_t supports_rem_or_local_port_filter; }; struct sfc_adapter; diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c index 93cdf8f4..371648b0 100644 --- a/drivers/net/sfc/sfc_flow.c +++ b/drivers/net/sfc/sfc_flow.c @@ -7,6 +7,7 @@ * for Solarflare) and Solarflare Communications, Inc. */ +#include #include #include #include @@ -22,13 +23,17 @@ #include "sfc_filter.h" #include "sfc_flow.h" #include "sfc_log.h" +#include "sfc_dp_rx.h" /* * At now flow API is implemented in such a manner that each - * flow rule is converted to a hardware filter. + * flow rule is converted to one or more hardware filters. * All elements of flow rule (attributes, pattern items, actions) * correspond to one or more fields in the efx_filter_spec_s structure * that is responsible for the hardware filter. + * If some required field is unset in the flow rule, then a handful + * of filter copies will be created to cover all possible values + * of such a field. */ enum sfc_flow_item_layers { @@ -57,6 +62,39 @@ static sfc_flow_item_parse sfc_flow_parse_ipv4; static sfc_flow_item_parse sfc_flow_parse_ipv6; static sfc_flow_item_parse sfc_flow_parse_tcp; static sfc_flow_item_parse sfc_flow_parse_udp; +static sfc_flow_item_parse sfc_flow_parse_vxlan; +static sfc_flow_item_parse sfc_flow_parse_geneve; +static sfc_flow_item_parse sfc_flow_parse_nvgre; + +typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec, + unsigned int filters_count_for_one_val, + struct rte_flow_error *error); + +typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match, + efx_filter_spec_t *spec, + struct sfc_filter *filter); + +struct sfc_flow_copy_flag { + /* EFX filter specification match flag */ + efx_filter_match_flags_t flag; + /* Number of values of corresponding field */ + unsigned int vals_count; + /* Function to set values in specifications */ + sfc_flow_spec_set_vals *set_vals; + /* + * Function to check that the specification is suitable + * for adding this match flag + */ + sfc_flow_spec_check *spec_check; +}; + +static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags; +static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags; +static sfc_flow_spec_set_vals sfc_flow_set_ethertypes; +static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags; +static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags; +static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag; +static sfc_flow_spec_check sfc_flow_check_outer_vid_flag; static boolean_t sfc_flow_is_zero(const uint8_t *buf, unsigned int size) @@ -85,7 +123,6 @@ sfc_flow_parse_init(const struct rte_flow_item *item, const uint8_t *spec; const uint8_t *mask; const uint8_t *last; - uint8_t match; uint8_t supp; unsigned int i; @@ -115,13 +152,13 @@ sfc_flow_parse_init(const struct rte_flow_item *item, return -rte_errno; } - mask = (const uint8_t *)def_mask; + mask = def_mask; } else { - mask = (const uint8_t *)item->mask; + mask = item->mask; } - spec = (const uint8_t *)item->spec; - last = (const uint8_t *)item->last; + spec = item->spec; + last = item->last; if (spec == NULL) goto exit; @@ -146,12 +183,11 @@ sfc_flow_parse_init(const struct rte_flow_item *item, return -rte_errno; } - /* Check that mask and spec not asks for more match than supp_mask */ + /* Check that mask does not ask for more match than supp_mask */ for (i = 0; i < size; i++) { - match = spec[i] | mask[i]; supp = ((const uint8_t *)supp_mask)[i]; - if ((match | supp) != supp) { + if (~supp & mask[i]) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "Item's field is not supported"); @@ -184,11 +220,11 @@ sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item, * Convert Ethernet item to EFX filter specification. * * @param item[in] - * Item specification. Only source and destination addresses and - * Ethernet type fields are supported. In addition to full and - * empty masks of destination address, individual/group mask is - * also supported. If the mask is NULL, default mask will be used. - * Ranging is not supported. + * Item specification. Outer frame specification may only comprise + * source/destination addresses and Ethertype field. + * Inner frame specification may contain destination address only. + * There is support for individual/group mask as well as for empty and full. + * If the mask is NULL, default mask will be used. Ranging is not supported. * @param efx_spec[in, out] * EFX filter specification to update. * @param[out] error @@ -207,15 +243,32 @@ sfc_flow_parse_eth(const struct rte_flow_item *item, .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, .type = 0xffff, }; + const struct rte_flow_item_eth ifrm_supp_mask = { + .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + }; const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; + const struct rte_flow_item_eth *supp_mask_p; + const struct rte_flow_item_eth *def_mask_p; + uint8_t *loc_mac = NULL; + boolean_t is_ifrm = (efx_spec->efs_encap_type != + EFX_TUNNEL_PROTOCOL_NONE); + + if (is_ifrm) { + supp_mask_p = &ifrm_supp_mask; + def_mask_p = &ifrm_supp_mask; + loc_mac = efx_spec->efs_ifrm_loc_mac; + } else { + supp_mask_p = &supp_mask; + def_mask_p = &rte_flow_item_eth_mask; + loc_mac = efx_spec->efs_loc_mac; + } rc = sfc_flow_parse_init(item, (const void **)&spec, (const void **)&mask, - &supp_mask, - &rte_flow_item_eth_mask, + supp_mask_p, def_mask_p, sizeof(struct rte_flow_item_eth), error); if (rc != 0) @@ -226,21 +279,30 @@ sfc_flow_parse_eth(const struct rte_flow_item *item, return 0; if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) { - efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC; - rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes, + efx_spec->efs_match_flags |= is_ifrm ? + EFX_FILTER_MATCH_IFRM_LOC_MAC : + EFX_FILTER_MATCH_LOC_MAC; + rte_memcpy(loc_mac, spec->dst.addr_bytes, EFX_MAC_ADDR_LEN); } else if (memcmp(mask->dst.addr_bytes, ig_mask, EFX_MAC_ADDR_LEN) == 0) { if (is_unicast_ether_addr(&spec->dst)) - efx_spec->efs_match_flags |= + efx_spec->efs_match_flags |= is_ifrm ? + EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST : EFX_FILTER_MATCH_UNKNOWN_UCAST_DST; else - efx_spec->efs_match_flags |= + efx_spec->efs_match_flags |= is_ifrm ? + EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST : EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; } else if (!is_zero_ether_addr(&mask->dst)) { goto fail_bad_mask; } + /* + * ifrm_supp_mask ensures that the source address and + * ethertype masks are equal to zero in inner frame, + * so these fields are filled in only for the outer frame + */ if (is_same_ether_addr(&mask->src, &supp_mask.src)) { efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC; rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes, @@ -291,6 +353,7 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item, const struct rte_flow_item_vlan *mask = NULL; const struct rte_flow_item_vlan supp_mask = { .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX), + .inner_type = RTE_BE16(0xffff), }; rc = sfc_flow_parse_init(item, @@ -310,7 +373,8 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item, * the outer tag and the next matches the inner tag. */ if (mask->tci == supp_mask.tci) { - vid = rte_bswap16(spec->tci); + /* Apply mask to keep VID only */ + vid = rte_bswap16(spec->tci & mask->tci); if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_OUTER_VID)) { @@ -333,6 +397,22 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item, return -rte_errno; } + if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "VLAN TPID matching is not supported"); + return -rte_errno; + } + if (mask->inner_type == supp_mask.inner_type) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; + efx_spec->efs_ether_type = rte_bswap16(spec->inner_type); + } else if (mask->inner_type) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Bad mask for VLAN inner_type"); + return -rte_errno; + } + return 0; } @@ -696,6 +776,253 @@ fail_bad_mask: return -rte_errno; } +/* + * Filters for encapsulated packets match based on the EtherType and IP + * protocol in the outer frame. + */ +static int +sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item, + efx_filter_spec_t *efx_spec, + uint8_t ip_proto, + struct rte_flow_error *error) +{ + if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; + efx_spec->efs_ip_proto = ip_proto; + } else if (efx_spec->efs_ip_proto != ip_proto) { + switch (ip_proto) { + case EFX_IPPROTO_UDP: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Outer IP header protocol must be UDP " + "in VxLAN/GENEVE pattern"); + return -rte_errno; + + case EFX_IPPROTO_GRE: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Outer IP header protocol must be GRE " + "in NVGRE pattern"); + return -rte_errno; + + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Only VxLAN/GENEVE/NVGRE tunneling patterns " + "are supported"); + return -rte_errno; + } + } + + if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE && + efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 && + efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Outer frame EtherType in pattern with tunneling " + "must be IPv4 or IPv6"); + return -rte_errno; + } + + return 0; +} + +static int +sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec, + const uint8_t *vni_or_vsid_val, + const uint8_t *vni_or_vsid_mask, + const struct rte_flow_item *item, + struct rte_flow_error *error) +{ + const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = { + 0xff, 0xff, 0xff + }; + + if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask, + EFX_VNI_OR_VSID_LEN) == 0) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID; + rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val, + EFX_VNI_OR_VSID_LEN); + } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Unsupported VNI/VSID mask"); + return -rte_errno; + } + + return 0; +} + +/** + * Convert VXLAN item to EFX filter specification. + * + * @param item[in] + * Item specification. Only VXLAN network identifier field is supported. + * If the mask is NULL, default mask will be used. + * Ranging is not supported. + * @param efx_spec[in, out] + * EFX filter specification to update. + * @param[out] error + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_parse_vxlan(const struct rte_flow_item *item, + efx_filter_spec_t *efx_spec, + struct rte_flow_error *error) +{ + int rc; + const struct rte_flow_item_vxlan *spec = NULL; + const struct rte_flow_item_vxlan *mask = NULL; + const struct rte_flow_item_vxlan supp_mask = { + .vni = { 0xff, 0xff, 0xff } + }; + + rc = sfc_flow_parse_init(item, + (const void **)&spec, + (const void **)&mask, + &supp_mask, + &rte_flow_item_vxlan_mask, + sizeof(struct rte_flow_item_vxlan), + error); + if (rc != 0) + return rc; + + rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, + EFX_IPPROTO_UDP, error); + if (rc != 0) + return rc; + + efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN; + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + + if (spec == NULL) + return 0; + + rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, + mask->vni, item, error); + + return rc; +} + +/** + * Convert GENEVE item to EFX filter specification. + * + * @param item[in] + * Item specification. Only Virtual Network Identifier and protocol type + * fields are supported. But protocol type can be only Ethernet (0x6558). + * If the mask is NULL, default mask will be used. + * Ranging is not supported. + * @param efx_spec[in, out] + * EFX filter specification to update. + * @param[out] error + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_parse_geneve(const struct rte_flow_item *item, + efx_filter_spec_t *efx_spec, + struct rte_flow_error *error) +{ + int rc; + const struct rte_flow_item_geneve *spec = NULL; + const struct rte_flow_item_geneve *mask = NULL; + const struct rte_flow_item_geneve supp_mask = { + .protocol = RTE_BE16(0xffff), + .vni = { 0xff, 0xff, 0xff } + }; + + rc = sfc_flow_parse_init(item, + (const void **)&spec, + (const void **)&mask, + &supp_mask, + &rte_flow_item_geneve_mask, + sizeof(struct rte_flow_item_geneve), + error); + if (rc != 0) + return rc; + + rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, + EFX_IPPROTO_UDP, error); + if (rc != 0) + return rc; + + efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE; + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + + if (spec == NULL) + return 0; + + if (mask->protocol == supp_mask.protocol) { + if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "GENEVE encap. protocol must be Ethernet " + "(0x6558) in the GENEVE pattern item"); + return -rte_errno; + } + } else if (mask->protocol != 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Unsupported mask for GENEVE encap. protocol"); + return -rte_errno; + } + + rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, + mask->vni, item, error); + + return rc; +} + +/** + * Convert NVGRE item to EFX filter specification. + * + * @param item[in] + * Item specification. Only virtual subnet ID field is supported. + * If the mask is NULL, default mask will be used. + * Ranging is not supported. + * @param efx_spec[in, out] + * EFX filter specification to update. + * @param[out] error + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_parse_nvgre(const struct rte_flow_item *item, + efx_filter_spec_t *efx_spec, + struct rte_flow_error *error) +{ + int rc; + const struct rte_flow_item_nvgre *spec = NULL; + const struct rte_flow_item_nvgre *mask = NULL; + const struct rte_flow_item_nvgre supp_mask = { + .tni = { 0xff, 0xff, 0xff } + }; + + rc = sfc_flow_parse_init(item, + (const void **)&spec, + (const void **)&mask, + &supp_mask, + &rte_flow_item_nvgre_mask, + sizeof(struct rte_flow_item_nvgre), + error); + if (rc != 0) + return rc; + + rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, + EFX_IPPROTO_GRE, error); + if (rc != 0) + return rc; + + efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE; + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + + if (spec == NULL) + return 0; + + rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni, + mask->tni, item, error); + + return rc; +} + static const struct sfc_flow_item sfc_flow_items[] = { { .type = RTE_FLOW_ITEM_TYPE_VOID, @@ -739,6 +1066,24 @@ static const struct sfc_flow_item sfc_flow_items[] = { .layer = SFC_FLOW_ITEM_L4, .parse = sfc_flow_parse_udp, }, + { + .type = RTE_FLOW_ITEM_TYPE_VXLAN, + .prev_layer = SFC_FLOW_ITEM_L4, + .layer = SFC_FLOW_ITEM_START_LAYER, + .parse = sfc_flow_parse_vxlan, + }, + { + .type = RTE_FLOW_ITEM_TYPE_GENEVE, + .prev_layer = SFC_FLOW_ITEM_L4, + .layer = SFC_FLOW_ITEM_START_LAYER, + .parse = sfc_flow_parse_geneve, + }, + { + .type = RTE_FLOW_ITEM_TYPE_NVGRE, + .prev_layer = SFC_FLOW_ITEM_L3, + .layer = SFC_FLOW_ITEM_START_LAYER, + .parse = sfc_flow_parse_nvgre, + }, }; /* @@ -773,6 +1118,12 @@ sfc_flow_parse_attr(const struct rte_flow_attr *attr, "Egress is not supported"); return -rte_errno; } + if (attr->transfer != 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, + "Transfer is not supported"); + return -rte_errno; + } if (attr->ingress == 0) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, @@ -780,8 +1131,8 @@ sfc_flow_parse_attr(const struct rte_flow_attr *attr, return -rte_errno; } - flow->spec.efs_flags |= EFX_FILTER_FLAG_RX; - flow->spec.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; + flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX; + flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; return 0; } @@ -806,6 +1157,7 @@ sfc_flow_parse_pattern(const struct rte_flow_item pattern[], { int rc; unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER; + boolean_t is_ifrm = B_FALSE; const struct sfc_flow_item *item; if (pattern == NULL) { @@ -837,7 +1189,41 @@ sfc_flow_parse_pattern(const struct rte_flow_item pattern[], return -rte_errno; } - rc = item->parse(pattern, &flow->spec, error); + /* + * Allow only VOID and ETH pattern items in the inner frame. + * Also check that there is only one tunneling protocol. + */ + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_VOID: + case RTE_FLOW_ITEM_TYPE_ETH: + break; + + case RTE_FLOW_ITEM_TYPE_VXLAN: + case RTE_FLOW_ITEM_TYPE_GENEVE: + case RTE_FLOW_ITEM_TYPE_NVGRE: + if (is_ifrm) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "More than one tunneling protocol"); + return -rte_errno; + } + is_ifrm = B_TRUE; + break; + + default: + if (is_ifrm) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "There is an unsupported pattern item " + "in the inner frame"); + return -rte_errno; + } + break; + } + + rc = item->parse(pattern, &flow->spec.template, error); if (rc != 0) return rc; @@ -859,28 +1245,27 @@ sfc_flow_parse_queue(struct sfc_adapter *sa, return -EINVAL; rxq = sa->rxq_info[queue->index].rxq; - flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index; + flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index; return 0; } -#if EFSYS_OPT_RX_SCALE static int sfc_flow_parse_rss(struct sfc_adapter *sa, - const struct rte_flow_action_rss *rss, + const struct rte_flow_action_rss *action_rss, struct rte_flow *flow) { + struct sfc_rss *rss = &sa->rss; unsigned int rxq_sw_index; struct sfc_rxq *rxq; unsigned int rxq_hw_index_min; unsigned int rxq_hw_index_max; - const struct rte_eth_rss_conf *rss_conf = rss->rss_conf; - uint64_t rss_hf; - uint8_t *rss_key = NULL; + efx_rx_hash_type_t efx_hash_types; + const uint8_t *rss_key; struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf; unsigned int i; - if (rss->num == 0) + if (action_rss->queue_num == 0) return -EINVAL; rxq_sw_index = sa->rxq_count - 1; @@ -888,8 +1273,8 @@ sfc_flow_parse_rss(struct sfc_adapter *sa, rxq_hw_index_min = rxq->hw_index; rxq_hw_index_max = 0; - for (i = 0; i < rss->num; ++i) { - rxq_sw_index = rss->queue[i]; + for (i = 0; i < action_rss->queue_num; ++i) { + rxq_sw_index = action_rss->queue[i]; if (rxq_sw_index >= sa->rxq_count) return -EINVAL; @@ -903,28 +1288,62 @@ sfc_flow_parse_rss(struct sfc_adapter *sa, rxq_hw_index_max = rxq->hw_index; } - rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS; - if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0) + switch (action_rss->func) { + case RTE_ETH_HASH_FUNCTION_DEFAULT: + case RTE_ETH_HASH_FUNCTION_TOEPLITZ: + break; + default: + return -EINVAL; + } + + if (action_rss->level) return -EINVAL; - if (rss_conf != NULL) { - if (rss_conf->rss_key_len != sizeof(sa->rss_key)) + /* + * Dummy RSS action with only one queue and no specific settings + * for hash types and key does not require dedicated RSS context + * and may be simplified to single queue action. + */ + if (action_rss->queue_num == 1 && action_rss->types == 0 && + action_rss->key_len == 0) { + flow->spec.template.efs_dmaq_id = rxq_hw_index_min; + return 0; + } + + if (action_rss->types) { + int rc; + + rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types, + &efx_hash_types); + if (rc != 0) + return -rc; + } else { + unsigned int i; + + efx_hash_types = 0; + for (i = 0; i < rss->hf_map_nb_entries; ++i) + efx_hash_types |= rss->hf_map[i].efx; + } + + if (action_rss->key_len) { + if (action_rss->key_len != sizeof(rss->key)) return -EINVAL; - rss_key = rss_conf->rss_key; + rss_key = action_rss->key; } else { - rss_key = sa->rss_key; + rss_key = rss->key; } flow->rss = B_TRUE; sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min; sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max; - sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf); - rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key)); + sfc_rss_conf->rss_hash_types = efx_hash_types; + rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key)); for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) { - unsigned int rxq_sw_index = rss->queue[i % rss->num]; + unsigned int nb_queues = action_rss->queue_num; + unsigned int rxq_sw_index = action_rss->queue[i % nb_queues]; struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq; sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min; @@ -932,47 +1351,101 @@ sfc_flow_parse_rss(struct sfc_adapter *sa, return 0; } -#endif /* EFSYS_OPT_RX_SCALE */ + +static int +sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec, + unsigned int filters_count) +{ + unsigned int i; + int ret = 0; + + for (i = 0; i < filters_count; i++) { + int rc; + + rc = efx_filter_remove(sa->nic, &spec->filters[i]); + if (ret == 0 && rc != 0) { + sfc_err(sa, "failed to remove filter specification " + "(rc = %d)", rc); + ret = rc; + } + } + + return ret; +} + +static int +sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec) +{ + unsigned int i; + int rc = 0; + + for (i = 0; i < spec->count; i++) { + rc = efx_filter_insert(sa->nic, &spec->filters[i]); + if (rc != 0) { + sfc_flow_spec_flush(sa, spec, i); + break; + } + } + + return rc; +} + +static int +sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec) +{ + return sfc_flow_spec_flush(sa, spec, spec->count); +} static int sfc_flow_filter_insert(struct sfc_adapter *sa, struct rte_flow *flow) { - efx_filter_spec_t *spec = &flow->spec; - -#if EFSYS_OPT_RX_SCALE - struct sfc_flow_rss *rss = &flow->rss_conf; + struct sfc_rss *rss = &sa->rss; + struct sfc_flow_rss *flow_rss = &flow->rss_conf; + uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; + unsigned int i; int rc = 0; if (flow->rss) { - unsigned int rss_spread = MIN(rss->rxq_hw_index_max - - rss->rxq_hw_index_min + 1, + unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max - + flow_rss->rxq_hw_index_min + 1, EFX_MAXRSS); rc = efx_rx_scale_context_alloc(sa->nic, EFX_RX_SCALE_EXCLUSIVE, rss_spread, - &spec->efs_rss_context); + &efs_rss_context); if (rc != 0) goto fail_scale_context_alloc; - rc = efx_rx_scale_mode_set(sa->nic, spec->efs_rss_context, - EFX_RX_HASHALG_TOEPLITZ, - rss->rss_hash_types, B_TRUE); + rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context, + rss->hash_alg, + flow_rss->rss_hash_types, B_TRUE); if (rc != 0) goto fail_scale_mode_set; - rc = efx_rx_scale_key_set(sa->nic, spec->efs_rss_context, - rss->rss_key, - sizeof(sa->rss_key)); + rc = efx_rx_scale_key_set(sa->nic, efs_rss_context, + flow_rss->rss_key, + sizeof(rss->key)); if (rc != 0) goto fail_scale_key_set; - spec->efs_dmaq_id = rss->rxq_hw_index_min; - spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS; + /* + * At this point, fully elaborated filter specifications + * have been produced from the template. To make sure that + * RSS behaviour is consistent between them, set the same + * RSS context value everywhere. + */ + for (i = 0; i < flow->spec.count; i++) { + efx_filter_spec_t *spec = &flow->spec.filters[i]; + + spec->efs_rss_context = efs_rss_context; + spec->efs_dmaq_id = flow_rss->rxq_hw_index_min; + spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS; + } } - rc = efx_filter_insert(sa->nic, spec); + rc = sfc_flow_spec_insert(sa, &flow->spec); if (rc != 0) goto fail_filter_insert; @@ -985,8 +1458,9 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, * the HW knows all the information needed to verify * the table entries, and the operation will succeed */ - rc = efx_rx_scale_tbl_set(sa->nic, spec->efs_rss_context, - rss->rss_tbl, RTE_DIM(rss->rss_tbl)); + rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context, + flow_rss->rss_tbl, + RTE_DIM(flow_rss->rss_tbl)); if (rc != 0) goto fail_scale_tbl_set; } @@ -994,62 +1468,96 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, return 0; fail_scale_tbl_set: - efx_filter_remove(sa->nic, spec); + sfc_flow_spec_remove(sa, &flow->spec); fail_filter_insert: fail_scale_key_set: fail_scale_mode_set: - if (flow->rss) - efx_rx_scale_context_free(sa->nic, spec->efs_rss_context); + if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT) + efx_rx_scale_context_free(sa->nic, efs_rss_context); fail_scale_context_alloc: return rc; -#else /* !EFSYS_OPT_RX_SCALE */ - return efx_filter_insert(sa->nic, spec); -#endif /* EFSYS_OPT_RX_SCALE */ } static int sfc_flow_filter_remove(struct sfc_adapter *sa, struct rte_flow *flow) { - efx_filter_spec_t *spec = &flow->spec; int rc = 0; - rc = efx_filter_remove(sa->nic, spec); + rc = sfc_flow_spec_remove(sa, &flow->spec); if (rc != 0) return rc; -#if EFSYS_OPT_RX_SCALE - if (flow->rss) + if (flow->rss) { + /* + * All specifications for a given flow rule have the same RSS + * context, so that RSS context value is taken from the first + * filter specification + */ + efx_filter_spec_t *spec = &flow->spec.filters[0]; + rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context); -#endif /* EFSYS_OPT_RX_SCALE */ + } return rc; } static int -sfc_flow_parse_actions(struct sfc_adapter *sa, - const struct rte_flow_action actions[], - struct rte_flow *flow, - struct rte_flow_error *error) +sfc_flow_parse_mark(struct sfc_adapter *sa, + const struct rte_flow_action_mark *mark, + struct rte_flow *flow) { - int rc; - boolean_t is_specified = B_FALSE; + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); - if (actions == NULL) { - rte_flow_error_set(error, EINVAL, + if (mark == NULL || mark->id > encp->enc_filter_action_mark_max) + return EINVAL; + + flow->spec.template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK; + flow->spec.template.efs_mark = mark->id; + + return 0; +} + +static int +sfc_flow_parse_actions(struct sfc_adapter *sa, + const struct rte_flow_action actions[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int rc; + const unsigned int dp_rx_features = sa->dp_rx->features; + uint32_t actions_set = 0; + const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) | + (1UL << RTE_FLOW_ACTION_TYPE_RSS) | + (1UL << RTE_FLOW_ACTION_TYPE_DROP); + const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) | + (1UL << RTE_FLOW_ACTION_TYPE_FLAG); + + if (actions == NULL) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, "NULL actions"); return -rte_errno; } +#define SFC_BUILD_SET_OVERFLOW(_action, _set) \ + RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT) + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { case RTE_FLOW_ACTION_TYPE_VOID: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID, + actions_set); break; case RTE_FLOW_ACTION_TYPE_QUEUE: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE, + actions_set); + if ((actions_set & fate_actions_mask) != 0) + goto fail_fate_actions; + rc = sfc_flow_parse_queue(sa, actions->conf, flow); if (rc != 0) { rte_flow_error_set(error, EINVAL, @@ -1057,23 +1565,71 @@ sfc_flow_parse_actions(struct sfc_adapter *sa, "Bad QUEUE action"); return -rte_errno; } - - is_specified = B_TRUE; break; -#if EFSYS_OPT_RX_SCALE case RTE_FLOW_ACTION_TYPE_RSS: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS, + actions_set); + if ((actions_set & fate_actions_mask) != 0) + goto fail_fate_actions; + rc = sfc_flow_parse_rss(sa, actions->conf, flow); if (rc != 0) { - rte_flow_error_set(error, rc, + rte_flow_error_set(error, -rc, RTE_FLOW_ERROR_TYPE_ACTION, actions, "Bad RSS action"); return -rte_errno; } + break; + + case RTE_FLOW_ACTION_TYPE_DROP: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP, + actions_set); + if ((actions_set & fate_actions_mask) != 0) + goto fail_fate_actions; + + flow->spec.template.efs_dmaq_id = + EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; + break; + + case RTE_FLOW_ACTION_TYPE_FLAG: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG, + actions_set); + if ((actions_set & mark_actions_mask) != 0) + goto fail_actions_overlap; + + if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "FLAG action is not supported on the current Rx datapath"); + return -rte_errno; + } - is_specified = B_TRUE; + flow->spec.template.efs_flags |= + EFX_FILTER_FLAG_ACTION_FLAG; + break; + + case RTE_FLOW_ACTION_TYPE_MARK: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK, + actions_set); + if ((actions_set & mark_actions_mask) != 0) + goto fail_actions_overlap; + + if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "MARK action is not supported on the current Rx datapath"); + return -rte_errno; + } + + rc = sfc_flow_parse_mark(sa, actions->conf, flow); + if (rc != 0) { + rte_flow_error_set(error, rc, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Bad MARK action"); + return -rte_errno; + } break; -#endif /* EFSYS_OPT_RX_SCALE */ default: rte_flow_error_set(error, ENOTSUP, @@ -1081,12 +1637,607 @@ sfc_flow_parse_actions(struct sfc_adapter *sa, "Action is not supported"); return -rte_errno; } + + actions_set |= (1UL << actions->type); } +#undef SFC_BUILD_SET_OVERFLOW + + /* When fate is unknown, drop traffic. */ + if ((actions_set & fate_actions_mask) == 0) { + flow->spec.template.efs_dmaq_id = + EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; + } + + return 0; + +fail_fate_actions: + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Cannot combine several fate-deciding actions, " + "choose between QUEUE, RSS or DROP"); + return -rte_errno; - if (!is_specified) { +fail_actions_overlap: + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Overlapping actions are not supported"); + return -rte_errno; +} + +/** + * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST + * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same + * specifications after copying. + * + * @param spec[in, out] + * SFC flow specification to update. + * @param filters_count_for_one_val[in] + * How many specifications should have the same match flag, what is the + * number of specifications before copying. + * @param error[out] + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec, + unsigned int filters_count_for_one_val, + struct rte_flow_error *error) +{ + unsigned int i; + static const efx_filter_match_flags_t vals[] = { + EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, + EFX_FILTER_MATCH_UNKNOWN_MCAST_DST + }; + + if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) { rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions, - "Action is unspecified"); + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Number of specifications is incorrect while copying " + "by unknown destination flags"); + return -rte_errno; + } + + for (i = 0; i < spec->count; i++) { + /* The check above ensures that divisor can't be zero here */ + spec->filters[i].efs_match_flags |= + vals[i / filters_count_for_one_val]; + } + + return 0; +} + +/** + * Check that the following conditions are met: + * - the list of supported filters has a filter + * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of + * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also + * be inserted. + * + * @param match[in] + * The match flags of filter. + * @param spec[in] + * Specification to be supplemented. + * @param filter[in] + * SFC filter with list of supported filters. + */ +static boolean_t +sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match, + __rte_unused efx_filter_spec_t *spec, + struct sfc_filter *filter) +{ + unsigned int i; + efx_filter_match_flags_t match_mcast_dst; + + match_mcast_dst = + (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) | + EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; + for (i = 0; i < filter->supported_match_num; i++) { + if (match_mcast_dst == filter->supported_match[i]) + return B_TRUE; + } + + return B_FALSE; +} + +/** + * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and + * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same + * specifications after copying. + * + * @param spec[in, out] + * SFC flow specification to update. + * @param filters_count_for_one_val[in] + * How many specifications should have the same EtherType value, what is the + * number of specifications before copying. + * @param error[out] + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_set_ethertypes(struct sfc_flow_spec *spec, + unsigned int filters_count_for_one_val, + struct rte_flow_error *error) +{ + unsigned int i; + static const uint16_t vals[] = { + EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6 + }; + + if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Number of specifications is incorrect " + "while copying by Ethertype"); + return -rte_errno; + } + + for (i = 0; i < spec->count; i++) { + spec->filters[i].efs_match_flags |= + EFX_FILTER_MATCH_ETHER_TYPE; + + /* + * The check above ensures that + * filters_count_for_one_val is not 0 + */ + spec->filters[i].efs_ether_type = + vals[i / filters_count_for_one_val]; + } + + return 0; +} + +/** + * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0 + * in the same specifications after copying. + * + * @param spec[in, out] + * SFC flow specification to update. + * @param filters_count_for_one_val[in] + * How many specifications should have the same match flag, what is the + * number of specifications before copying. + * @param error[out] + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec, + unsigned int filters_count_for_one_val, + struct rte_flow_error *error) +{ + unsigned int i; + + if (filters_count_for_one_val != spec->count) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Number of specifications is incorrect " + "while copying by outer VLAN ID"); + return -rte_errno; + } + + for (i = 0; i < spec->count; i++) { + spec->filters[i].efs_match_flags |= + EFX_FILTER_MATCH_OUTER_VID; + + spec->filters[i].efs_outer_vid = 0; + } + + return 0; +} + +/** + * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and + * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same + * specifications after copying. + * + * @param spec[in, out] + * SFC flow specification to update. + * @param filters_count_for_one_val[in] + * How many specifications should have the same match flag, what is the + * number of specifications before copying. + * @param error[out] + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec, + unsigned int filters_count_for_one_val, + struct rte_flow_error *error) +{ + unsigned int i; + static const efx_filter_match_flags_t vals[] = { + EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, + EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST + }; + + if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Number of specifications is incorrect while copying " + "by inner frame unknown destination flags"); + return -rte_errno; + } + + for (i = 0; i < spec->count; i++) { + /* The check above ensures that divisor can't be zero here */ + spec->filters[i].efs_match_flags |= + vals[i / filters_count_for_one_val]; + } + + return 0; +} + +/** + * Check that the following conditions are met: + * - the specification corresponds to a filter for encapsulated traffic + * - the list of supported filters has a filter + * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of + * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also + * be inserted. + * + * @param match[in] + * The match flags of filter. + * @param spec[in] + * Specification to be supplemented. + * @param filter[in] + * SFC filter with list of supported filters. + */ +static boolean_t +sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match, + efx_filter_spec_t *spec, + struct sfc_filter *filter) +{ + unsigned int i; + efx_tunnel_protocol_t encap_type = spec->efs_encap_type; + efx_filter_match_flags_t match_mcast_dst; + + if (encap_type == EFX_TUNNEL_PROTOCOL_NONE) + return B_FALSE; + + match_mcast_dst = + (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) | + EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST; + for (i = 0; i < filter->supported_match_num; i++) { + if (match_mcast_dst == filter->supported_match[i]) + return B_TRUE; + } + + return B_FALSE; +} + +/** + * Check that the list of supported filters has a filter that differs + * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID + * in this case that filter will be used and the flag + * EFX_FILTER_MATCH_OUTER_VID is not needed. + * + * @param match[in] + * The match flags of filter. + * @param spec[in] + * Specification to be supplemented. + * @param filter[in] + * SFC filter with list of supported filters. + */ +static boolean_t +sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match, + __rte_unused efx_filter_spec_t *spec, + struct sfc_filter *filter) +{ + unsigned int i; + efx_filter_match_flags_t match_without_vid = + match & ~EFX_FILTER_MATCH_OUTER_VID; + + for (i = 0; i < filter->supported_match_num; i++) { + if (match_without_vid == filter->supported_match[i]) + return B_FALSE; + } + + return B_TRUE; +} + +/* + * Match flags that can be automatically added to filters. + * Selecting the last minimum when searching for the copy flag ensures that the + * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than + * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter + * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported + * filters. + */ +static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = { + { + .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, + .vals_count = 2, + .set_vals = sfc_flow_set_unknown_dst_flags, + .spec_check = sfc_flow_check_unknown_dst_flags, + }, + { + .flag = EFX_FILTER_MATCH_ETHER_TYPE, + .vals_count = 2, + .set_vals = sfc_flow_set_ethertypes, + .spec_check = NULL, + }, + { + .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, + .vals_count = 2, + .set_vals = sfc_flow_set_ifrm_unknown_dst_flags, + .spec_check = sfc_flow_check_ifrm_unknown_dst_flags, + }, + { + .flag = EFX_FILTER_MATCH_OUTER_VID, + .vals_count = 1, + .set_vals = sfc_flow_set_outer_vid_flag, + .spec_check = sfc_flow_check_outer_vid_flag, + }, +}; + +/* Get item from array sfc_flow_copy_flags */ +static const struct sfc_flow_copy_flag * +sfc_flow_get_copy_flag(efx_filter_match_flags_t flag) +{ + unsigned int i; + + for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { + if (sfc_flow_copy_flags[i].flag == flag) + return &sfc_flow_copy_flags[i]; + } + + return NULL; +} + +/** + * Make copies of the specifications, set match flag and values + * of the field that corresponds to it. + * + * @param spec[in, out] + * SFC flow specification to update. + * @param flag[in] + * The match flag to add. + * @param error[out] + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec, + efx_filter_match_flags_t flag, + struct rte_flow_error *error) +{ + unsigned int i; + unsigned int new_filters_count; + unsigned int filters_count_for_one_val; + const struct sfc_flow_copy_flag *copy_flag; + int rc; + + copy_flag = sfc_flow_get_copy_flag(flag); + if (copy_flag == NULL) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Unsupported spec field for copying"); + return -rte_errno; + } + + new_filters_count = spec->count * copy_flag->vals_count; + if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Too much EFX specifications in the flow rule"); + return -rte_errno; + } + + /* Copy filters specifications */ + for (i = spec->count; i < new_filters_count; i++) + spec->filters[i] = spec->filters[i - spec->count]; + + filters_count_for_one_val = spec->count; + spec->count = new_filters_count; + + rc = copy_flag->set_vals(spec, filters_count_for_one_val, error); + if (rc != 0) + return rc; + + return 0; +} + +/** + * Check that the given set of match flags missing in the original filter spec + * could be covered by adding spec copies which specify the corresponding + * flags and packet field values to match. + * + * @param miss_flags[in] + * Flags that are missing until the supported filter. + * @param spec[in] + * Specification to be supplemented. + * @param filter[in] + * SFC filter. + * + * @return + * Number of specifications after copy or 0, if the flags can not be added. + */ +static unsigned int +sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags, + efx_filter_spec_t *spec, + struct sfc_filter *filter) +{ + unsigned int i; + efx_filter_match_flags_t copy_flags = 0; + efx_filter_match_flags_t flag; + efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags; + sfc_flow_spec_check *check; + unsigned int multiplier = 1; + + for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { + flag = sfc_flow_copy_flags[i].flag; + check = sfc_flow_copy_flags[i].spec_check; + if ((flag & miss_flags) == flag) { + if (check != NULL && (!check(match, spec, filter))) + continue; + + copy_flags |= flag; + multiplier *= sfc_flow_copy_flags[i].vals_count; + } + } + + if (copy_flags == miss_flags) + return multiplier; + + return 0; +} + +/** + * Attempt to supplement the specification template to the minimally + * supported set of match flags. To do this, it is necessary to copy + * the specifications, filling them with the values of fields that + * correspond to the missing flags. + * The necessary and sufficient filter is built from the fewest number + * of copies which could be made to cover the minimally required set + * of flags. + * + * @param sa[in] + * SFC adapter. + * @param spec[in, out] + * SFC flow specification to update. + * @param error[out] + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_spec_filters_complete(struct sfc_adapter *sa, + struct sfc_flow_spec *spec, + struct rte_flow_error *error) +{ + struct sfc_filter *filter = &sa->filter; + efx_filter_match_flags_t miss_flags; + efx_filter_match_flags_t min_miss_flags = 0; + efx_filter_match_flags_t match; + unsigned int min_multiplier = UINT_MAX; + unsigned int multiplier; + unsigned int i; + int rc; + + match = spec->template.efs_match_flags; + for (i = 0; i < filter->supported_match_num; i++) { + if ((match & filter->supported_match[i]) == match) { + miss_flags = filter->supported_match[i] & (~match); + multiplier = sfc_flow_check_missing_flags(miss_flags, + &spec->template, filter); + if (multiplier > 0) { + if (multiplier <= min_multiplier) { + min_multiplier = multiplier; + min_miss_flags = miss_flags; + } + } + } + } + + if (min_multiplier == UINT_MAX) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "The flow rule pattern is unsupported"); + return -rte_errno; + } + + for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { + efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag; + + if ((flag & min_miss_flags) == flag) { + rc = sfc_flow_spec_add_match_flag(spec, flag, error); + if (rc != 0) + return rc; + } + } + + return 0; +} + +/** + * Check that set of match flags is referred to by a filter. Filter is + * described by match flags with the ability to add OUTER_VID and INNER_VID + * flags. + * + * @param match_flags[in] + * Set of match flags. + * @param flags_pattern[in] + * Pattern of filter match flags. + */ +static boolean_t +sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags, + efx_filter_match_flags_t flags_pattern) +{ + if ((match_flags & flags_pattern) != flags_pattern) + return B_FALSE; + + switch (match_flags & ~flags_pattern) { + case 0: + case EFX_FILTER_MATCH_OUTER_VID: + case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID: + return B_TRUE; + default: + return B_FALSE; + } +} + +/** + * Check whether the spec maps to a hardware filter which is known to be + * ineffective despite being valid. + * + * @param filter[in] + * SFC filter with list of supported filters. + * @param spec[in] + * SFC flow specification. + */ +static boolean_t +sfc_flow_is_match_flags_exception(struct sfc_filter *filter, + struct sfc_flow_spec *spec) +{ + unsigned int i; + uint16_t ether_type; + uint8_t ip_proto; + efx_filter_match_flags_t match_flags; + + for (i = 0; i < spec->count; i++) { + match_flags = spec->filters[i].efs_match_flags; + + if (sfc_flow_is_match_with_vids(match_flags, + EFX_FILTER_MATCH_ETHER_TYPE) || + sfc_flow_is_match_with_vids(match_flags, + EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_LOC_MAC)) { + ether_type = spec->filters[i].efs_ether_type; + if (filter->supports_ip_proto_or_addr_filter && + (ether_type == EFX_ETHER_TYPE_IPV4 || + ether_type == EFX_ETHER_TYPE_IPV6)) + return B_TRUE; + } else if (sfc_flow_is_match_with_vids(match_flags, + EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO) || + sfc_flow_is_match_with_vids(match_flags, + EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_MAC)) { + ip_proto = spec->filters[i].efs_ip_proto; + if (filter->supports_rem_or_local_port_filter && + (ip_proto == EFX_IPPROTO_TCP || + ip_proto == EFX_IPPROTO_UDP)) + return B_TRUE; + } + } + + return B_FALSE; +} + +static int +sfc_flow_validate_match_flags(struct sfc_adapter *sa, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + efx_filter_spec_t *spec_tmpl = &flow->spec.template; + efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags; + int rc; + + /* Initialize the first filter spec with template */ + flow->spec.filters[0] = *spec_tmpl; + flow->spec.count = 1; + + if (!sfc_filter_is_match_supported(sa, match_flags)) { + rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error); + if (rc != 0) + return rc; + } + + if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "The flow rule pattern is unsupported"); return -rte_errno; } @@ -1116,12 +2267,11 @@ sfc_flow_parse(struct rte_eth_dev *dev, if (rc != 0) goto fail_bad_value; - if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "Flow rule pattern is not supported"); - return -rte_errno; - } + rc = sfc_flow_validate_match_flags(sa, flow, error); + if (rc != 0) + goto fail_bad_value; + + return 0; fail_bad_value: return rc; diff --git a/drivers/net/sfc/sfc_flow.h b/drivers/net/sfc/sfc_flow.h index 35472ad3..71ec18cb 100644 --- a/drivers/net/sfc/sfc_flow.h +++ b/drivers/net/sfc/sfc_flow.h @@ -19,7 +19,13 @@ extern "C" { #endif -#if EFSYS_OPT_RX_SCALE +/* + * The maximum number of fully elaborated hardware filter specifications + * which can be produced from a template by means of multiplication, if + * missing match flags are needed to be taken into account + */ +#define SF_FLOW_SPEC_NB_FILTERS_MAX 8 + /* RSS configuration storage */ struct sfc_flow_rss { unsigned int rxq_hw_index_min; @@ -28,15 +34,22 @@ struct sfc_flow_rss { uint8_t rss_key[EFX_RSS_KEY_SIZE]; unsigned int rss_tbl[EFX_RSS_TBL_SIZE]; }; -#endif /* EFSYS_OPT_RX_SCALE */ + +/* Filter specification storage */ +struct sfc_flow_spec { + /* partial specification from flow rule */ + efx_filter_spec_t template; + /* fully elaborated hardware filters specifications */ + efx_filter_spec_t filters[SF_FLOW_SPEC_NB_FILTERS_MAX]; + /* number of complete specifications */ + unsigned int count; +}; /* PMD-specific definition of the opaque type from rte_flow.h */ struct rte_flow { - efx_filter_spec_t spec; /* filter specification */ -#if EFSYS_OPT_RX_SCALE + struct sfc_flow_spec spec; /* flow spec for hardware filter(s) */ boolean_t rss; /* RSS toggle */ struct sfc_flow_rss rss_conf; /* RSS configuration */ -#endif /* EFSYS_OPT_RX_SCALE */ TAILQ_ENTRY(rte_flow) entries; /* flow list entries */ }; diff --git a/drivers/net/sfc/sfc_intr.c b/drivers/net/sfc/sfc_intr.c index d6c84927..fbdc7eea 100644 --- a/drivers/net/sfc/sfc_intr.c +++ b/drivers/net/sfc/sfc_intr.c @@ -86,7 +86,7 @@ sfc_intr_line_handler(void *cb_arg) exit: if (lsc_seq != sa->port.lsc_seq) { - sfc_info(sa, "link status change event: link %s", + sfc_notice(sa, "link status change event: link %s", sa->eth_dev->data->dev_link.link_status ? "UP" : "DOWN"); _rte_eth_dev_callback_process(sa->eth_dev, @@ -130,7 +130,7 @@ sfc_intr_message_handler(void *cb_arg) exit: if (lsc_seq != sa->port.lsc_seq) { - sfc_info(sa, "link status change event"); + sfc_notice(sa, "link status change event"); _rte_eth_dev_callback_process(sa->eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); @@ -251,7 +251,7 @@ sfc_intr_configure(struct sfc_adapter *sa) intr->handler = NULL; intr->lsc_intr = (sa->eth_dev->data->dev_conf.intr_conf.lsc != 0); if (!intr->lsc_intr) { - sfc_info(sa, "LSC tracking using interrupts is disabled"); + sfc_notice(sa, "LSC tracking using interrupts is disabled"); goto done; } diff --git a/drivers/net/sfc/sfc_kvargs.c b/drivers/net/sfc/sfc_kvargs.c index 8f55e99b..7a89c769 100644 --- a/drivers/net/sfc/sfc_kvargs.c +++ b/drivers/net/sfc/sfc_kvargs.c @@ -23,11 +23,11 @@ sfc_kvargs_parse(struct sfc_adapter *sa) struct rte_devargs *devargs = eth_dev->device->devargs; const char **params = (const char *[]){ SFC_KVARG_STATS_UPDATE_PERIOD_MS, - SFC_KVARG_DEBUG_INIT, - SFC_KVARG_MCDI_LOGGING, SFC_KVARG_PERF_PROFILE, SFC_KVARG_RX_DATAPATH, SFC_KVARG_TX_DATAPATH, + SFC_KVARG_FW_VARIANT, + SFC_KVARG_RXD_WAIT_TIMEOUT_NS, NULL, }; diff --git a/drivers/net/sfc/sfc_kvargs.h b/drivers/net/sfc/sfc_kvargs.h index e7044ca6..4506667a 100644 --- a/drivers/net/sfc/sfc_kvargs.h +++ b/drivers/net/sfc/sfc_kvargs.h @@ -18,10 +18,6 @@ extern "C" { #define SFC_KVARG_VALUES_BOOL "[1|y|yes|on|0|n|no|off]" -#define SFC_KVARG_DEBUG_INIT "debug_init" - -#define SFC_KVARG_MCDI_LOGGING "mcdi_logging" - #define SFC_KVARG_PERF_PROFILE "perf_profile" #define SFC_KVARG_PERF_PROFILE_AUTO "auto" @@ -37,11 +33,13 @@ extern "C" { #define SFC_KVARG_DATAPATH_EFX "efx" #define SFC_KVARG_DATAPATH_EF10 "ef10" #define SFC_KVARG_DATAPATH_EF10_SIMPLE "ef10_simple" +#define SFC_KVARG_DATAPATH_EF10_ESSB "ef10_essb" #define SFC_KVARG_RX_DATAPATH "rx_datapath" #define SFC_KVARG_VALUES_RX_DATAPATH \ "[" SFC_KVARG_DATAPATH_EFX "|" \ - SFC_KVARG_DATAPATH_EF10 "]" + SFC_KVARG_DATAPATH_EF10 "|" \ + SFC_KVARG_DATAPATH_EF10_ESSB "]" #define SFC_KVARG_TX_DATAPATH "tx_datapath" #define SFC_KVARG_VALUES_TX_DATAPATH \ @@ -49,6 +47,22 @@ extern "C" { SFC_KVARG_DATAPATH_EF10 "|" \ SFC_KVARG_DATAPATH_EF10_SIMPLE "]" +#define SFC_KVARG_FW_VARIANT "fw_variant" + +#define SFC_KVARG_FW_VARIANT_DONT_CARE "dont-care" +#define SFC_KVARG_FW_VARIANT_FULL_FEATURED "full-feature" +#define SFC_KVARG_FW_VARIANT_LOW_LATENCY "ultra-low-latency" +#define SFC_KVARG_FW_VARIANT_PACKED_STREAM "capture-packed-stream" +#define SFC_KVARG_FW_VARIANT_DPDK "dpdk" +#define SFC_KVARG_VALUES_FW_VARIANT \ + "[" SFC_KVARG_FW_VARIANT_DONT_CARE "|" \ + SFC_KVARG_FW_VARIANT_FULL_FEATURED "|" \ + SFC_KVARG_FW_VARIANT_LOW_LATENCY "|" \ + SFC_KVARG_FW_VARIANT_PACKED_STREAM "|" \ + SFC_KVARG_FW_VARIANT_DPDK "]" + +#define SFC_KVARG_RXD_WAIT_TIMEOUT_NS "rxd_wait_timeout_ns" + struct sfc_adapter; int sfc_kvargs_parse(struct sfc_adapter *sa); diff --git a/drivers/net/sfc/sfc_log.h b/drivers/net/sfc/sfc_log.h index a18191ed..d6f34352 100644 --- a/drivers/net/sfc/sfc_log.h +++ b/drivers/net/sfc/sfc_log.h @@ -10,14 +10,35 @@ #ifndef _SFC_LOG_H_ #define _SFC_LOG_H_ +/** Generic driver log type */ +extern uint32_t sfc_logtype_driver; + +/** Common log type name prefix */ +#define SFC_LOGTYPE_PREFIX "pmd.net.sfc." + +/** Log PMD generic message, add a prefix and a line break */ +#define SFC_GENERIC_LOG(level, ...) \ + rte_log(RTE_LOG_ ## level, sfc_logtype_driver, \ + RTE_FMT("PMD: " RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \ + RTE_FMT_TAIL(__VA_ARGS__ ,))) + +/** Name prefix for the per-device log type used to report basic information */ +#define SFC_LOGTYPE_MAIN_STR SFC_LOGTYPE_PREFIX "main" + +/** Device MCDI log type name prefix */ +#define SFC_LOGTYPE_MCDI_STR SFC_LOGTYPE_PREFIX "mcdi" + +/** Level value used by MCDI log statements */ +#define SFC_LOG_LEVEL_MCDI RTE_LOG_INFO + /* Log PMD message, automatically add prefix and \n */ -#define SFC_LOG(sa, level, ...) \ +#define SFC_LOG(sa, level, type, ...) \ do { \ const struct sfc_adapter *__sa = (sa); \ \ - RTE_LOG(level, PMD, \ - RTE_FMT("sfc_efx " PCI_PRI_FMT " #%" PRIu8 ": " \ - RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ + rte_log(level, type, \ + RTE_FMT("PMD: sfc_efx " PCI_PRI_FMT " #%" PRIu8 \ + ": " RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \ __sa->pci_addr.domain, \ __sa->pci_addr.bus, \ __sa->pci_addr.devid, \ @@ -27,27 +48,55 @@ } while (0) #define sfc_err(sa, ...) \ - SFC_LOG(sa, ERR, __VA_ARGS__) + do { \ + const struct sfc_adapter *_sa = (sa); \ + \ + SFC_LOG(_sa, RTE_LOG_ERR, _sa->logtype_main, \ + __VA_ARGS__); \ + } while (0) #define sfc_warn(sa, ...) \ - SFC_LOG(sa, WARNING, __VA_ARGS__) + do { \ + const struct sfc_adapter *_sa = (sa); \ + \ + SFC_LOG(_sa, RTE_LOG_WARNING, _sa->logtype_main, \ + __VA_ARGS__); \ + } while (0) #define sfc_notice(sa, ...) \ - SFC_LOG(sa, NOTICE, __VA_ARGS__) + do { \ + const struct sfc_adapter *_sa = (sa); \ + \ + SFC_LOG(_sa, RTE_LOG_NOTICE, _sa->logtype_main, \ + __VA_ARGS__); \ + } while (0) #define sfc_info(sa, ...) \ - SFC_LOG(sa, INFO, __VA_ARGS__) + do { \ + const struct sfc_adapter *_sa = (sa); \ + \ + SFC_LOG(_sa, RTE_LOG_INFO, _sa->logtype_main, \ + __VA_ARGS__); \ + } while (0) #define sfc_log_init(sa, ...) \ do { \ const struct sfc_adapter *_sa = (sa); \ \ - if (_sa->debug_init) \ - SFC_LOG(_sa, INFO, \ - RTE_FMT("%s(): " \ - RTE_FMT_HEAD(__VA_ARGS__,), \ - __func__, \ - RTE_FMT_TAIL(__VA_ARGS__,))); \ + SFC_LOG(_sa, RTE_LOG_INFO, _sa->logtype_main, \ + RTE_FMT("%s(): " \ + RTE_FMT_HEAD(__VA_ARGS__ ,), \ + __func__, \ + RTE_FMT_TAIL(__VA_ARGS__ ,))); \ } while (0) +#define sfc_log_mcdi(sa, ...) \ + do { \ + const struct sfc_adapter *_sa = (sa); \ + \ + SFC_LOG(_sa, SFC_LOG_LEVEL_MCDI, _sa->mcdi.logtype, \ + __VA_ARGS__); \ + } while (0) + + #endif /* _SFC_LOG_H_ */ diff --git a/drivers/net/sfc/sfc_mcdi.c b/drivers/net/sfc/sfc_mcdi.c index 9d92b8c5..007506b4 100644 --- a/drivers/net/sfc/sfc_mcdi.c +++ b/drivers/net/sfc/sfc_mcdi.c @@ -15,7 +15,6 @@ #include "sfc.h" #include "sfc_log.h" -#include "sfc_kvargs.h" #include "sfc_ev.h" #define SFC_MCDI_POLL_INTERVAL_MIN_US 10 /* 10us in 1us units */ @@ -176,7 +175,7 @@ sfc_mcdi_do_log(const struct sfc_adapter *sa, * at the end which is required by netlogdecode. */ buffer[position] = '\0'; - sfc_info(sa, "%s \\", buffer); + sfc_log_mcdi(sa, "%s \\", buffer); /* Preserve prefix for the next log message */ position = pfxsize; } @@ -198,10 +197,17 @@ sfc_mcdi_logger(void *arg, efx_log_msg_t type, size_t pfxsize; size_t start; - if (!sa->mcdi.logging) + /* + * Unlike the other cases, MCDI logging implies more onerous work + * needed to produce a message. If the dynamic log level prevents + * the end result from being printed, the CPU time will be wasted. + * + * To avoid wasting time, the actual level is examined in advance. + */ + if (rte_log_get_level(sa->mcdi.logtype) < (int)SFC_LOG_LEVEL_MCDI) return; - /* The format including prefix added by sfc_info() is the format + /* The format including prefix added by sfc_log_mcdi() is the format * consumed by the Solarflare netlogdecode tool. */ pfxsize = snprintf(buffer, sizeof(buffer), "MCDI RPC %s:", @@ -212,7 +218,7 @@ sfc_mcdi_logger(void *arg, efx_log_msg_t type, start = sfc_mcdi_do_log(sa, buffer, data, data_size, pfxsize, start); if (start != pfxsize) { buffer[start] = '\0'; - sfc_info(sa, "%s", buffer); + sfc_log_mcdi(sa, "%s", buffer); } } @@ -250,11 +256,8 @@ sfc_mcdi_init(struct sfc_adapter *sa) if (rc != 0) goto fail_dma_alloc; - /* Convert negative error to positive used in the driver */ - rc = sfc_kvargs_process(sa, SFC_KVARG_MCDI_LOGGING, - sfc_kvarg_bool_handler, &mcdi->logging); - if (rc != 0) - goto fail_kvargs_process; + mcdi->logtype = sfc_register_logtype(sa, SFC_LOGTYPE_MCDI_STR, + RTE_LOG_NOTICE); emtp = &mcdi->transport; emtp->emt_context = sa; @@ -274,8 +277,6 @@ sfc_mcdi_init(struct sfc_adapter *sa) fail_mcdi_init: memset(emtp, 0, sizeof(*emtp)); - -fail_kvargs_process: sfc_dma_free(sa, &mcdi->mem); fail_dma_alloc: diff --git a/drivers/net/sfc/sfc_port.c b/drivers/net/sfc/sfc_port.c index c423f527..5384dbbd 100644 --- a/drivers/net/sfc/sfc_port.c +++ b/drivers/net/sfc/sfc_port.c @@ -121,6 +121,28 @@ sfc_port_init_dev_link(struct sfc_adapter *sa) return 0; } +#if EFSYS_OPT_LOOPBACK + +static efx_link_mode_t +sfc_port_phy_caps_to_max_link_speed(uint32_t phy_caps) +{ + if (phy_caps & (1u << EFX_PHY_CAP_100000FDX)) + return EFX_LINK_100000FDX; + if (phy_caps & (1u << EFX_PHY_CAP_50000FDX)) + return EFX_LINK_50000FDX; + if (phy_caps & (1u << EFX_PHY_CAP_40000FDX)) + return EFX_LINK_40000FDX; + if (phy_caps & (1u << EFX_PHY_CAP_25000FDX)) + return EFX_LINK_25000FDX; + if (phy_caps & (1u << EFX_PHY_CAP_10000FDX)) + return EFX_LINK_10000FDX; + if (phy_caps & (1u << EFX_PHY_CAP_1000FDX)) + return EFX_LINK_1000FDX; + return EFX_LINK_UNKNOWN; +} + +#endif + int sfc_port_start(struct sfc_adapter *sa) { @@ -143,6 +165,21 @@ sfc_port_start(struct sfc_adapter *sa) if (rc != 0) goto fail_port_init; +#if EFSYS_OPT_LOOPBACK + if (sa->eth_dev->data->dev_conf.lpbk_mode != 0) { + efx_link_mode_t link_mode; + + link_mode = + sfc_port_phy_caps_to_max_link_speed(port->phy_adv_cap); + sfc_log_init(sa, "set loopback link_mode=%u type=%u", link_mode, + sa->eth_dev->data->dev_conf.lpbk_mode); + rc = efx_port_loopback_set(sa->nic, link_mode, + sa->eth_dev->data->dev_conf.lpbk_mode); + if (rc != 0) + goto fail_loopback_set; + } +#endif + sfc_log_init(sa, "set flow control to %#x autoneg=%u", port->flow_ctrl, port->flow_ctrl_autoneg); rc = efx_mac_fcntl_set(sa->nic, port->flow_ctrl, @@ -155,6 +192,16 @@ sfc_port_start(struct sfc_adapter *sa) SFC_ASSERT((port->phy_adv_cap & phy_pause_caps) == 0); phy_adv_cap = port->phy_adv_cap | (phy_adv_cap & phy_pause_caps); + /* + * No controls for FEC yet. Use default FEC mode. + * I.e. advertise everything supported (*_FEC=1), but do not request + * anything explicitly (*_FEC_REQUESTED=0). + */ + phy_adv_cap |= port->phy_adv_cap_mask & + (1u << EFX_PHY_CAP_BASER_FEC | + 1u << EFX_PHY_CAP_RS_FEC | + 1u << EFX_PHY_CAP_25G_BASER_FEC); + sfc_log_init(sa, "set phy adv caps to %#x", phy_adv_cap); rc = efx_phy_adv_cap_set(sa->nic, phy_adv_cap); if (rc != 0) @@ -268,6 +315,9 @@ fail_mac_addr_set: fail_mac_pdu_set: fail_phy_adv_cap_set: fail_mac_fcntl_set: +#if EFSYS_OPT_LOOPBACK +fail_loopback_set: +#endif efx_port_fini(sa->nic); fail_port_init: @@ -323,6 +373,8 @@ sfc_port_attach(struct sfc_adapter *sa) struct sfc_port *port = &sa->port; const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); const struct ether_addr *from; + uint32_t mac_nstats; + size_t mac_stats_size; long kvarg_stats_update_period_ms; int rc; @@ -358,7 +410,9 @@ sfc_port_attach(struct sfc_adapter *sa) if (port->mac_stats_buf == NULL) goto fail_mac_stats_buf_alloc; - rc = sfc_dma_alloc(sa, "mac_stats", 0, EFX_MAC_STATS_SIZE, + mac_nstats = efx_nic_cfg_get(sa->nic)->enc_mac_stats_nstats; + mac_stats_size = RTE_ALIGN(mac_nstats * sizeof(uint64_t), EFX_BUF_SIZE); + rc = sfc_dma_alloc(sa, "mac_stats", 0, mac_stats_size, sa->socket_id, &port->mac_stats_dma_mem); if (rc != 0) goto fail_mac_stats_dma_alloc; @@ -470,10 +524,22 @@ sfc_port_link_mode_to_info(efx_link_mode_t link_mode, link_info->link_speed = ETH_SPEED_NUM_10G; link_info->link_duplex = ETH_LINK_FULL_DUPLEX; break; + case EFX_LINK_25000FDX: + link_info->link_speed = ETH_SPEED_NUM_25G; + link_info->link_duplex = ETH_LINK_FULL_DUPLEX; + break; case EFX_LINK_40000FDX: link_info->link_speed = ETH_SPEED_NUM_40G; link_info->link_duplex = ETH_LINK_FULL_DUPLEX; break; + case EFX_LINK_50000FDX: + link_info->link_speed = ETH_SPEED_NUM_50G; + link_info->link_duplex = ETH_LINK_FULL_DUPLEX; + break; + case EFX_LINK_100000FDX: + link_info->link_speed = ETH_SPEED_NUM_100G; + link_info->link_duplex = ETH_LINK_FULL_DUPLEX; + break; default: SFC_ASSERT(B_FALSE); /* FALLTHROUGH */ diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c index abc53fb5..d8503e20 100644 --- a/drivers/net/sfc/sfc_rx.c +++ b/drivers/net/sfc/sfc_rx.c @@ -184,7 +184,6 @@ sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps) return ptypes; } -#if EFSYS_OPT_RX_SCALE static void sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags, struct rte_mbuf *m) @@ -205,14 +204,6 @@ sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags, m->ol_flags |= PKT_RX_RSS_HASH; } } -#else -static void -sfc_efx_rx_set_rss_hash(__rte_unused struct sfc_efx_rxq *rxq, - __rte_unused unsigned int flags, - __rte_unused struct rte_mbuf *m) -{ -} -#endif static uint16_t sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -393,6 +384,7 @@ sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq) static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings; static int sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc, + __rte_unused struct rte_mempool *mb_pool, unsigned int *rxq_entries, unsigned int *evq_entries, unsigned int *rxq_max_fill_level) @@ -525,7 +517,8 @@ struct sfc_dp_rx sfc_efx_rx = { .type = SFC_DP_RX, .hw_fw_caps = 0, }, - .features = SFC_DP_RX_FEAT_SCATTER, + .features = SFC_DP_RX_FEAT_SCATTER | + SFC_DP_RX_FEAT_CHECKSUM, .qsize_up_rings = sfc_efx_rx_qsize_up_rings, .qcreate = sfc_efx_rx_qcreate, .qdestroy = sfc_efx_rx_qdestroy, @@ -608,7 +601,7 @@ sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index) sfc_err(sa, "RxQ %u flush failed", sw_index); if (rxq->state & SFC_RXQ_FLUSHED) - sfc_info(sa, "RxQ %u flushed", sw_index); + sfc_notice(sa, "RxQ %u flushed", sw_index); } sa->dp_rx->qpurge(rxq->dp); @@ -617,7 +610,8 @@ sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index) static int sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq) { - boolean_t rss = (sa->rss_channels > 0) ? B_TRUE : B_FALSE; + struct sfc_rss *rss = &sa->rss; + boolean_t need_rss = (rss->channels > 0) ? B_TRUE : B_FALSE; struct sfc_port *port = &sa->port; int rc; @@ -629,7 +623,7 @@ sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq) * repeat this step without promiscuous and all-multicast flags set */ retry: - rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, rss); + rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, need_rss); if (rc == 0) return 0; else if (rc != EOPNOTSUPP) @@ -687,10 +681,37 @@ sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index) if (rc != 0) goto fail_ev_qstart; - rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type, - &rxq->mem, rxq_info->entries, - 0 /* not used on EF10 */, rxq_info->type_flags, - evq->common, &rxq->common); + switch (rxq_info->type) { + case EFX_RXQ_TYPE_DEFAULT: + rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type, + &rxq->mem, rxq_info->entries, 0 /* not used on EF10 */, + rxq_info->type_flags, evq->common, &rxq->common); + break; + case EFX_RXQ_TYPE_ES_SUPER_BUFFER: { + struct rte_mempool *mp = rxq->refill_mb_pool; + struct rte_mempool_info mp_info; + + rc = rte_mempool_ops_get_info(mp, &mp_info); + if (rc != 0) { + /* Positive errno is used in the driver */ + rc = -rc; + goto fail_mp_get_info; + } + if (mp_info.contig_block_size <= 0) { + rc = EINVAL; + goto fail_bad_contig_block_size; + } + rc = efx_rx_qcreate_es_super_buffer(sa->nic, rxq->hw_index, 0, + mp_info.contig_block_size, rxq->buf_size, + mp->header_size + mp->elt_size + mp->trailer_size, + sa->rxd_wait_timeout_ns, + &rxq->mem, rxq_info->entries, rxq_info->type_flags, + evq->common, &rxq->common); + break; + } + default: + rc = ENOTSUP; + } if (rc != 0) goto fail_rx_qcreate; @@ -721,6 +742,8 @@ fail_dp_qstart: sfc_rx_qflush(sa, sw_index); fail_rx_qcreate: +fail_bad_contig_block_size: +fail_mp_get_info: sfc_ev_qstop(evq); fail_ev_qstart: @@ -770,9 +793,12 @@ sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa) caps |= DEV_RX_OFFLOAD_JUMBO_FRAME; caps |= DEV_RX_OFFLOAD_CRC_STRIP; - caps |= DEV_RX_OFFLOAD_IPV4_CKSUM; - caps |= DEV_RX_OFFLOAD_UDP_CKSUM; - caps |= DEV_RX_OFFLOAD_TCP_CKSUM; + + if (sa->dp_rx->features & SFC_DP_RX_FEAT_CHECKSUM) { + caps |= DEV_RX_OFFLOAD_IPV4_CKSUM; + caps |= DEV_RX_OFFLOAD_UDP_CKSUM; + caps |= DEV_RX_OFFLOAD_TCP_CKSUM; + } if (encp->enc_tunnel_encapsulations_supported && (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS)) @@ -792,51 +818,11 @@ sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa) return caps; } -static void -sfc_rx_log_offloads(struct sfc_adapter *sa, const char *offload_group, - const char *verdict, uint64_t offloads) -{ - unsigned long long bit; - - while ((bit = __builtin_ffsll(offloads)) != 0) { - uint64_t flag = (1ULL << --bit); - - sfc_err(sa, "Rx %s offload %s %s", offload_group, - rte_eth_dev_rx_offload_name(flag), verdict); - - offloads &= ~flag; - } -} - -static boolean_t -sfc_rx_queue_offloads_mismatch(struct sfc_adapter *sa, uint64_t requested) -{ - uint64_t mandatory = sa->eth_dev->data->dev_conf.rxmode.offloads; - uint64_t supported = sfc_rx_get_dev_offload_caps(sa) | - sfc_rx_get_queue_offload_caps(sa); - uint64_t rejected = requested & ~supported; - uint64_t missing = (requested & mandatory) ^ mandatory; - boolean_t mismatch = B_FALSE; - - if (rejected) { - sfc_rx_log_offloads(sa, "queue", "is unsupported", rejected); - mismatch = B_TRUE; - } - - if (missing) { - sfc_rx_log_offloads(sa, "queue", "must be set", missing); - mismatch = B_TRUE; - } - - return mismatch; -} - static int sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level, - const struct rte_eth_rxconf *rx_conf) + const struct rte_eth_rxconf *rx_conf, + __rte_unused uint64_t offloads) { - uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) | - sfc_rx_get_queue_offload_caps(sa); int rc = 0; if (rx_conf->rx_thresh.pthresh != 0 || @@ -858,17 +844,6 @@ sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level, rc = EINVAL; } - if ((rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) != - DEV_RX_OFFLOAD_CHECKSUM) - sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)"); - - if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) && - (~rx_conf->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) - sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on"); - - if (sfc_rx_queue_offloads_mismatch(sa, rx_conf->offloads)) - rc = EINVAL; - return rc; } @@ -887,7 +862,7 @@ sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool) order = MIN(order, rte_bsf32(data_off)); - return 1u << (order - 1); + return 1u << order; } static uint16_t @@ -979,26 +954,29 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, struct rte_mempool *mb_pool) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + struct sfc_rss *rss = &sa->rss; int rc; unsigned int rxq_entries; unsigned int evq_entries; unsigned int rxq_max_fill_level; + uint64_t offloads; uint16_t buf_size; struct sfc_rxq_info *rxq_info; struct sfc_evq *evq; struct sfc_rxq *rxq; struct sfc_dp_rx_qcreate_info info; - rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, &rxq_entries, &evq_entries, - &rxq_max_fill_level); + rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, mb_pool, &rxq_entries, + &evq_entries, &rxq_max_fill_level); if (rc != 0) goto fail_size_up_rings; SFC_ASSERT(rxq_entries >= EFX_RXQ_MINNDESCS); SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS); - SFC_ASSERT(rxq_entries >= nb_rx_desc); SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc); - rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf); + offloads = rx_conf->offloads | + sa->eth_dev->data->dev_conf.rxmode.offloads; + rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads); if (rc != 0) goto fail_bad_conf; @@ -1011,7 +989,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, } if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) && - (~rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)) { + (~offloads & DEV_RX_OFFLOAD_SCATTER)) { sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool " "object size is too small", sw_index); sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs " @@ -1027,9 +1005,14 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, SFC_ASSERT(rxq_entries <= rxq_info->max_entries); rxq_info->entries = rxq_entries; - rxq_info->type = EFX_RXQ_TYPE_DEFAULT; + + if (sa->dp_rx->dp.hw_fw_caps & SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER) + rxq_info->type = EFX_RXQ_TYPE_ES_SUPER_BUFFER; + else + rxq_info->type = EFX_RXQ_TYPE_DEFAULT; + rxq_info->type_flags = - (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ? + (offloads & DEV_RX_OFFLOAD_SCATTER) ? EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE; if ((encp->enc_tunnel_encapsulations_supported != 0) && @@ -1054,6 +1037,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, rxq->refill_threshold = RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK); rxq->refill_mb_pool = mb_pool; + rxq->buf_size = buf_size; rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries), socket_id, &rxq->mem); @@ -1068,10 +1052,8 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, info.batch_max = encp->enc_rx_batch_max; info.prefix_size = encp->enc_rx_prefix_size; -#if EFSYS_OPT_RX_SCALE - if (sa->hash_support == EFX_RX_HASH_AVAILABLE && sa->rss_channels > 0) + if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0) info.flags |= SFC_RXQ_FLAG_RSS_HASH; -#endif info.rxq_entries = rxq_info->entries; info.rxq_hw_ring = rxq->mem.esm_base; @@ -1079,6 +1061,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, info.evq_hw_ring = evq->mem.esm_base; info.hw_index = rxq->hw_index; info.mem_bar = sa->mem_bar.esb_base; + info.vi_window_shift = encp->enc_vi_window_shift; rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index, &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr, @@ -1140,85 +1123,228 @@ sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index) rte_free(rxq); } -#if EFSYS_OPT_RX_SCALE -efx_rx_hash_type_t -sfc_rte_to_efx_hash_type(uint64_t rss_hf) +/* + * Mapping between RTE RSS hash functions and their EFX counterparts. + */ +struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = { + { ETH_RSS_NONFRAG_IPV4_TCP, + EFX_RX_HASH(IPV4_TCP, 4TUPLE) }, + { ETH_RSS_NONFRAG_IPV4_UDP, + EFX_RX_HASH(IPV4_UDP, 4TUPLE) }, + { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX, + EFX_RX_HASH(IPV6_TCP, 4TUPLE) }, + { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX, + EFX_RX_HASH(IPV6_UDP, 4TUPLE) }, + { ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER, + EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) | + EFX_RX_HASH(IPV4, 2TUPLE) }, + { ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER | + ETH_RSS_IPV6_EX, + EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) | + EFX_RX_HASH(IPV6, 2TUPLE) } +}; + +static efx_rx_hash_type_t +sfc_rx_hash_types_mask_supp(efx_rx_hash_type_t hash_type, + unsigned int *hash_type_flags_supported, + unsigned int nb_hash_type_flags_supported) +{ + efx_rx_hash_type_t hash_type_masked = 0; + unsigned int i, j; + + for (i = 0; i < nb_hash_type_flags_supported; ++i) { + unsigned int class_tuple_lbn[] = { + EFX_RX_CLASS_IPV4_TCP_LBN, + EFX_RX_CLASS_IPV4_UDP_LBN, + EFX_RX_CLASS_IPV4_LBN, + EFX_RX_CLASS_IPV6_TCP_LBN, + EFX_RX_CLASS_IPV6_UDP_LBN, + EFX_RX_CLASS_IPV6_LBN + }; + + for (j = 0; j < RTE_DIM(class_tuple_lbn); ++j) { + unsigned int tuple_mask = EFX_RX_CLASS_HASH_4TUPLE; + unsigned int flag; + + tuple_mask <<= class_tuple_lbn[j]; + flag = hash_type & tuple_mask; + + if (flag == hash_type_flags_supported[i]) + hash_type_masked |= flag; + } + } + + return hash_type_masked; +} + +int +sfc_rx_hash_init(struct sfc_adapter *sa) +{ + struct sfc_rss *rss = &sa->rss; + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + uint32_t alg_mask = encp->enc_rx_scale_hash_alg_mask; + efx_rx_hash_alg_t alg; + unsigned int flags_supp[EFX_RX_HASH_NFLAGS]; + unsigned int nb_flags_supp; + struct sfc_rss_hf_rte_to_efx *hf_map; + struct sfc_rss_hf_rte_to_efx *entry; + efx_rx_hash_type_t efx_hash_types; + unsigned int i; + int rc; + + if (alg_mask & (1U << EFX_RX_HASHALG_TOEPLITZ)) + alg = EFX_RX_HASHALG_TOEPLITZ; + else if (alg_mask & (1U << EFX_RX_HASHALG_PACKED_STREAM)) + alg = EFX_RX_HASHALG_PACKED_STREAM; + else + return EINVAL; + + rc = efx_rx_scale_hash_flags_get(sa->nic, alg, flags_supp, + &nb_flags_supp); + if (rc != 0) + return rc; + + hf_map = rte_calloc_socket("sfc-rss-hf-map", + RTE_DIM(sfc_rss_hf_map), + sizeof(*hf_map), 0, sa->socket_id); + if (hf_map == NULL) + return ENOMEM; + + entry = hf_map; + efx_hash_types = 0; + for (i = 0; i < RTE_DIM(sfc_rss_hf_map); ++i) { + efx_rx_hash_type_t ht; + + ht = sfc_rx_hash_types_mask_supp(sfc_rss_hf_map[i].efx, + flags_supp, nb_flags_supp); + if (ht != 0) { + entry->rte = sfc_rss_hf_map[i].rte; + entry->efx = ht; + efx_hash_types |= ht; + ++entry; + } + } + + rss->hash_alg = alg; + rss->hf_map_nb_entries = (unsigned int)(entry - hf_map); + rss->hf_map = hf_map; + rss->hash_types = efx_hash_types; + + return 0; +} + +void +sfc_rx_hash_fini(struct sfc_adapter *sa) { - efx_rx_hash_type_t efx_hash_types = 0; + struct sfc_rss *rss = &sa->rss; - if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | - ETH_RSS_NONFRAG_IPV4_OTHER)) != 0) - efx_hash_types |= EFX_RX_HASH_IPV4; + rte_free(rss->hf_map); +} - if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0) - efx_hash_types |= EFX_RX_HASH_TCPIPV4; +int +sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte, + efx_rx_hash_type_t *efx) +{ + struct sfc_rss *rss = &sa->rss; + efx_rx_hash_type_t hash_types = 0; + unsigned int i; - if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | - ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0) - efx_hash_types |= EFX_RX_HASH_IPV6; + for (i = 0; i < rss->hf_map_nb_entries; ++i) { + uint64_t rte_mask = rss->hf_map[i].rte; - if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0) - efx_hash_types |= EFX_RX_HASH_TCPIPV6; + if ((rte & rte_mask) != 0) { + rte &= ~rte_mask; + hash_types |= rss->hf_map[i].efx; + } + } + + if (rte != 0) { + sfc_err(sa, "unsupported hash functions requested"); + return EINVAL; + } - return efx_hash_types; + *efx = hash_types; + + return 0; } uint64_t -sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types) +sfc_rx_hf_efx_to_rte(struct sfc_adapter *sa, efx_rx_hash_type_t efx) +{ + struct sfc_rss *rss = &sa->rss; + uint64_t rte = 0; + unsigned int i; + + for (i = 0; i < rss->hf_map_nb_entries; ++i) { + efx_rx_hash_type_t hash_type = rss->hf_map[i].efx; + + if ((efx & hash_type) == hash_type) + rte |= rss->hf_map[i].rte; + } + + return rte; +} + +static int +sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa, + struct rte_eth_rss_conf *conf) { - uint64_t rss_hf = 0; + struct sfc_rss *rss = &sa->rss; + efx_rx_hash_type_t efx_hash_types = rss->hash_types; + uint64_t rss_hf = sfc_rx_hf_efx_to_rte(sa, efx_hash_types); + int rc; - if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0) - rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | - ETH_RSS_NONFRAG_IPV4_OTHER); + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) { + if ((conf->rss_hf != 0 && conf->rss_hf != rss_hf) || + conf->rss_key != NULL) + return EINVAL; + } - if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0) - rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + if (conf->rss_hf != 0) { + rc = sfc_rx_hf_rte_to_efx(sa, conf->rss_hf, &efx_hash_types); + if (rc != 0) + return rc; + } - if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0) - rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | - ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX); + if (conf->rss_key != NULL) { + if (conf->rss_key_len != sizeof(rss->key)) { + sfc_err(sa, "RSS key size is wrong (should be %lu)", + sizeof(rss->key)); + return EINVAL; + } + rte_memcpy(rss->key, conf->rss_key, sizeof(rss->key)); + } - if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0) - rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX); + rss->hash_types = efx_hash_types; - return rss_hf; + return 0; } -#endif -#if EFSYS_OPT_RX_SCALE static int sfc_rx_rss_config(struct sfc_adapter *sa) { + struct sfc_rss *rss = &sa->rss; int rc = 0; - if (sa->rss_channels > 0) { + if (rss->channels > 0) { rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, - EFX_RX_HASHALG_TOEPLITZ, - sa->rss_hash_types, B_TRUE); + rss->hash_alg, rss->hash_types, + B_TRUE); if (rc != 0) goto finish; rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, - sa->rss_key, - sizeof(sa->rss_key)); + rss->key, sizeof(rss->key)); if (rc != 0) goto finish; rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, - sa->rss_tbl, RTE_DIM(sa->rss_tbl)); + rss->tbl, RTE_DIM(rss->tbl)); } finish: return rc; } -#else -static int -sfc_rx_rss_config(__rte_unused struct sfc_adapter *sa) -{ - return 0; -} -#endif int sfc_rx_start(struct sfc_adapter *sa) @@ -1294,37 +1420,48 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode) { uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) | sfc_rx_get_queue_offload_caps(sa); - uint64_t offloads_rejected = rxmode->offloads & ~offloads_supported; + struct sfc_rss *rss = &sa->rss; int rc = 0; switch (rxmode->mq_mode) { case ETH_MQ_RX_NONE: /* No special checks are required */ break; -#if EFSYS_OPT_RX_SCALE case ETH_MQ_RX_RSS: - if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) { + if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) { sfc_err(sa, "RSS is not available"); rc = EINVAL; } break; -#endif default: sfc_err(sa, "Rx multi-queue mode %u not supported", rxmode->mq_mode); rc = EINVAL; } - if (offloads_rejected) { - sfc_rx_log_offloads(sa, "device", "is unsupported", - offloads_rejected); - rc = EINVAL; - } - - if (~rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) { + /* KEEP_CRC offload flag is not supported by PMD + * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed + */ + if (rte_eth_dev_must_keep_crc(rxmode->offloads)) { sfc_warn(sa, "FCS stripping cannot be disabled - always on"); rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP; - rxmode->hw_strip_crc = 1; + } + + /* + * Requested offloads are validated against supported by ethdev, + * so unsupported offloads cannot be added as the result of + * below check. + */ + if ((rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) != + (offloads_supported & DEV_RX_OFFLOAD_CHECKSUM)) { + sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)"); + rxmode->offloads |= DEV_RX_OFFLOAD_CHECKSUM; + } + + if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) && + (~rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) { + sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on"); + rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; } return rc; @@ -1361,6 +1498,7 @@ sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues) int sfc_rx_configure(struct sfc_adapter *sa) { + struct sfc_rss *rss = &sa->rss; struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf; const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues; int rc; @@ -1410,21 +1548,26 @@ sfc_rx_configure(struct sfc_adapter *sa) sa->rxq_count++; } -#if EFSYS_OPT_RX_SCALE - sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ? - MIN(sa->rxq_count, EFX_MAXRSS) : 0; + rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ? + MIN(sa->rxq_count, EFX_MAXRSS) : 0; - if (sa->rss_channels > 0) { + if (rss->channels > 0) { + struct rte_eth_rss_conf *adv_conf_rss; unsigned int sw_index; for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index) - sa->rss_tbl[sw_index] = sw_index % sa->rss_channels; + rss->tbl[sw_index] = sw_index % rss->channels; + + adv_conf_rss = &dev_conf->rx_adv_conf.rss_conf; + rc = sfc_rx_process_adv_conf_rss(sa, adv_conf_rss); + if (rc != 0) + goto fail_rx_process_adv_conf_rss; } -#endif done: return 0; +fail_rx_process_adv_conf_rss: fail_rx_qinit_info: fail_rxqs_realloc: fail_rxqs_alloc: @@ -1443,9 +1586,11 @@ fail_check_mode: void sfc_rx_close(struct sfc_adapter *sa) { + struct sfc_rss *rss = &sa->rss; + sfc_rx_fini_queues(sa, 0); - sa->rss_channels = 0; + rss->channels = 0; rte_free(sa->rxq_info); sa->rxq_info = NULL; diff --git a/drivers/net/sfc/sfc_rx.h b/drivers/net/sfc/sfc_rx.h index 6706ee6f..3fba7d8a 100644 --- a/drivers/net/sfc/sfc_rx.h +++ b/drivers/net/sfc/sfc_rx.h @@ -60,6 +60,7 @@ struct sfc_rxq { unsigned int hw_index; unsigned int refill_threshold; struct rte_mempool *refill_mb_pool; + uint16_t buf_size; struct sfc_dp_rxq *dp; unsigned int state; }; @@ -152,10 +153,12 @@ unsigned int sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index); int sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset); -#if EFSYS_OPT_RX_SCALE -efx_rx_hash_type_t sfc_rte_to_efx_hash_type(uint64_t rss_hf); -uint64_t sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types); -#endif +int sfc_rx_hash_init(struct sfc_adapter *sa); +void sfc_rx_hash_fini(struct sfc_adapter *sa); +int sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte, + efx_rx_hash_type_t *efx); +uint64_t sfc_rx_hf_efx_to_rte(struct sfc_adapter *sa, + efx_rx_hash_type_t efx); #ifdef __cplusplus } diff --git a/drivers/net/sfc/sfc_tso.c b/drivers/net/sfc/sfc_tso.c index ba8496df..effe9853 100644 --- a/drivers/net/sfc/sfc_tso.c +++ b/drivers/net/sfc/sfc_tso.c @@ -164,7 +164,8 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx, rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t)); sent_seq = rte_be_to_cpu_32(sent_seq); - efx_tx_qdesc_tso2_create(txq->common, packet_id, sent_seq, m->tso_segsz, + efx_tx_qdesc_tso2_create(txq->common, packet_id, 0, sent_seq, + m->tso_segsz, *pend, EFX_TX_FATSOV2_OPT_NDESCS); *pend += EFX_TX_FATSOV2_OPT_NDESCS; diff --git a/drivers/net/sfc/sfc_tweak.h b/drivers/net/sfc/sfc_tweak.h index b4026851..4d543f68 100644 --- a/drivers/net/sfc/sfc_tweak.h +++ b/drivers/net/sfc/sfc_tweak.h @@ -34,4 +34,12 @@ /** Number of mbufs to be freed in bulk in a single call */ #define SFC_TX_REAP_BULK_SIZE 32 +/** + * Default head-of-line block timeout to wait for Rx descriptor before + * packet drop because of no descriptors available. + * + * DPDK FW variant only with equal stride super-buffer Rx mode. + */ +#define SFC_RXD_WAIT_TIMEOUT_NS_DEF (200U * 1000) + #endif /* _SFC_TWEAK_H_ */ diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c index 757b03ba..6d42a1a6 100644 --- a/drivers/net/sfc/sfc_tx.c +++ b/drivers/net/sfc/sfc_tx.c @@ -73,48 +73,10 @@ sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa) return caps; } -static void -sfc_tx_log_offloads(struct sfc_adapter *sa, const char *offload_group, - const char *verdict, uint64_t offloads) -{ - unsigned long long bit; - - while ((bit = __builtin_ffsll(offloads)) != 0) { - uint64_t flag = (1ULL << --bit); - - sfc_err(sa, "Tx %s offload %s %s", offload_group, - rte_eth_dev_tx_offload_name(flag), verdict); - - offloads &= ~flag; - } -} - -static int -sfc_tx_queue_offload_mismatch(struct sfc_adapter *sa, uint64_t requested) -{ - uint64_t mandatory = sa->eth_dev->data->dev_conf.txmode.offloads; - uint64_t supported = sfc_tx_get_dev_offload_caps(sa) | - sfc_tx_get_queue_offload_caps(sa); - uint64_t rejected = requested & ~supported; - uint64_t missing = (requested & mandatory) ^ mandatory; - boolean_t mismatch = B_FALSE; - - if (rejected) { - sfc_tx_log_offloads(sa, "queue", "is unsupported", rejected); - mismatch = B_TRUE; - } - - if (missing) { - sfc_tx_log_offloads(sa, "queue", "must be set", missing); - mismatch = B_TRUE; - } - - return mismatch; -} - static int sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level, - const struct rte_eth_txconf *tx_conf) + const struct rte_eth_txconf *tx_conf, + uint64_t offloads) { int rc = 0; @@ -138,15 +100,12 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level, } /* We either perform both TCP and UDP offload, or no offload at all */ - if (((tx_conf->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) != - ((tx_conf->offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) { + if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) != + ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) { sfc_err(sa, "TCP and UDP offloads can't be set independently"); rc = EINVAL; } - if (sfc_tx_queue_offload_mismatch(sa, tx_conf->offloads)) - rc = EINVAL; - return rc; } @@ -171,6 +130,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, struct sfc_txq *txq; int rc = 0; struct sfc_dp_tx_qcreate_info info; + uint64_t offloads; sfc_log_init(sa, "TxQ = %u", sw_index); @@ -183,7 +143,9 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, SFC_ASSERT(txq_entries >= nb_tx_desc); SFC_ASSERT(txq_max_fill_level <= nb_tx_desc); - rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf); + offloads = tx_conf->offloads | + sa->eth_dev->data->dev_conf.txmode.offloads; + rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads); if (rc != 0) goto fail_bad_conf; @@ -209,8 +171,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, txq->free_thresh = (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : SFC_TX_DEFAULT_FREE_THRESH; - txq->flags = tx_conf->txq_flags; - txq->offloads = tx_conf->offloads; + txq->offloads = offloads; rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries), socket_id, &txq->mem); @@ -220,8 +181,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, memset(&info, 0, sizeof(info)); info.max_fill_level = txq_max_fill_level; info.free_thresh = txq->free_thresh; - info.flags = tx_conf->txq_flags; - info.offloads = tx_conf->offloads; + info.offloads = offloads; info.txq_entries = txq_info->entries; info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max; info.txq_hw_ring = txq->mem.esm_base; @@ -229,6 +189,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, info.evq_hw_ring = evq->mem.esm_base; info.hw_index = txq->hw_index; info.mem_bar = sa->mem_bar.esb_base; + info.vi_window_shift = encp->enc_vi_window_shift; rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index, &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr, @@ -303,9 +264,6 @@ sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index) static int sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode) { - uint64_t offloads_supported = sfc_tx_get_dev_offload_caps(sa) | - sfc_tx_get_queue_offload_caps(sa); - uint64_t offloads_rejected = txmode->offloads & ~offloads_supported; int rc = 0; switch (txmode->mq_mode) { @@ -336,12 +294,6 @@ sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode) rc = EINVAL; } - if (offloads_rejected) { - sfc_tx_log_offloads(sa, "device", "is unsupported", - offloads_rejected); - rc = EINVAL; - } - return rc; } @@ -477,39 +429,21 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) if (rc != 0) goto fail_ev_qstart; - /* - * The absence of ETH_TXQ_FLAGS_IGNORE is associated with a legacy - * application which expects that IPv4 checksum offload is enabled - * all the time as there is no legacy flag to turn off the offload. - */ - if ((txq->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) || - (~txq->flags & ETH_TXQ_FLAGS_IGNORE)) + if (txq->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) flags |= EFX_TXQ_CKSUM_IPV4; - if ((txq->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) || - ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) && - (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))) + if (txq->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) flags |= EFX_TXQ_CKSUM_INNER_IPV4; if ((txq->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) || (txq->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { flags |= EFX_TXQ_CKSUM_TCPUDP; - if ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) && - (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) + if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) flags |= EFX_TXQ_CKSUM_INNER_TCPUDP; } - /* - * The absence of ETH_TXQ_FLAGS_IGNORE is associated with a legacy - * application. In turn, the absence of ETH_TXQ_FLAGS_NOXSUMTCP is - * associated specifically with a legacy application which expects - * both TCP checksum offload and TSO to be enabled because the legacy - * API does not provide a dedicated mechanism to control TSO. - */ - if ((txq->offloads & DEV_TX_OFFLOAD_TCP_TSO) || - ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) && - (~txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP))) + if (txq->offloads & DEV_TX_OFFLOAD_TCP_TSO) flags |= EFX_TXQ_FATSOV2; rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem, @@ -606,7 +540,7 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) sfc_err(sa, "TxQ %u flush timed out", sw_index); if (txq->state & SFC_TXQ_FLUSHED) - sfc_info(sa, "TxQ %u flushed", sw_index); + sfc_notice(sa, "TxQ %u flushed", sw_index); } sa->dp_tx->qreap(txq->dp); diff --git a/drivers/net/sfc/sfc_tx.h b/drivers/net/sfc/sfc_tx.h index c2e5f13e..146b805c 100644 --- a/drivers/net/sfc/sfc_tx.h +++ b/drivers/net/sfc/sfc_tx.h @@ -58,7 +58,6 @@ struct sfc_txq { struct sfc_dp_txq *dp; efx_txq_t *common; unsigned int free_thresh; - unsigned int flags; uint64_t offloads; }; diff --git a/drivers/net/softnic/Makefile b/drivers/net/softnic/Makefile index d56fecd6..ea9b65f4 100644 --- a/drivers/net/softnic/Makefile +++ b/drivers/net/softnic/Makefile @@ -8,13 +8,15 @@ include $(RTE_SDK)/mk/rte.vars.mk # LIB = librte_pmd_softnic.a +CFLAGS += -DALLOW_EXPERIMENTAL_API CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lrte_pipeline -lrte_port -lrte_table LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_sched LDLIBS += -lrte_bus_vdev -EXPORT_MAP := rte_pmd_eth_softnic_version.map +EXPORT_MAP := rte_pmd_softnic_version.map LIBABIVER := 1 @@ -22,11 +24,30 @@ LIBABIVER := 1 # all source are stored in SRCS-y # SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_mempool.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_swq.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_link.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_tm.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_tap.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_action.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_pipeline.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_thread.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_cli.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += parser.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += conn.c # # Export include files # SYMLINK-y-include += rte_eth_softnic.h +ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp") +$(info Softnic PMD can only operate in a linuxapp environment, \ +please change the definition of the RTE_TARGET environment variable) +all: +clean: +else + include $(RTE_SDK)/mk/rte.lib.mk + +endif diff --git a/drivers/net/softnic/conn.c b/drivers/net/softnic/conn.c new file mode 100644 index 00000000..990cf40f --- /dev/null +++ b/drivers/net/softnic/conn.c @@ -0,0 +1,332 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#define __USE_GNU +#include + +#include +#include +#include +#include + +#include "conn.h" + +#define MSG_CMD_TOO_LONG "Command too long." + +struct softnic_conn { + char *welcome; + char *prompt; + char *buf; + char *msg_in; + char *msg_out; + size_t buf_size; + size_t msg_in_len_max; + size_t msg_out_len_max; + size_t msg_in_len; + int fd_server; + int fd_client_group; + softnic_conn_msg_handle_t msg_handle; + void *msg_handle_arg; +}; + +struct softnic_conn * +softnic_conn_init(struct softnic_conn_params *p) +{ + struct sockaddr_in server_address; + struct softnic_conn *conn; + int fd_server, fd_client_group, status; + + memset(&server_address, 0, sizeof(server_address)); + + /* Check input arguments */ + if (p == NULL || + p->welcome == NULL || + p->prompt == NULL || + p->addr == NULL || + p->buf_size == 0 || + p->msg_in_len_max == 0 || + p->msg_out_len_max == 0 || + p->msg_handle == NULL) + return NULL; + + status = inet_aton(p->addr, &server_address.sin_addr); + if (status == 0) + return NULL; + + /* Memory allocation */ + conn = calloc(1, sizeof(struct softnic_conn)); + if (conn == NULL) + return NULL; + + conn->welcome = calloc(1, CONN_WELCOME_LEN_MAX + 1); + conn->prompt = calloc(1, CONN_PROMPT_LEN_MAX + 1); + conn->buf = calloc(1, p->buf_size); + conn->msg_in = calloc(1, p->msg_in_len_max + 1); + conn->msg_out = calloc(1, p->msg_out_len_max + 1); + + if (conn->welcome == NULL || + conn->prompt == NULL || + conn->buf == NULL || + conn->msg_in == NULL || + conn->msg_out == NULL) { + softnic_conn_free(conn); + return NULL; + } + + /* Server socket */ + server_address.sin_family = AF_INET; + server_address.sin_port = htons(p->port); + + fd_server = socket(AF_INET, + SOCK_STREAM | SOCK_NONBLOCK, + 0); + if (fd_server == -1) { + softnic_conn_free(conn); + return NULL; + } + + status = bind(fd_server, + (struct sockaddr *)&server_address, + sizeof(server_address)); + if (status == -1) { + softnic_conn_free(conn); + close(fd_server); + return NULL; + } + + status = listen(fd_server, 16); + if (status == -1) { + softnic_conn_free(conn); + close(fd_server); + return NULL; + } + + /* Client group */ + fd_client_group = epoll_create(1); + if (fd_client_group == -1) { + softnic_conn_free(conn); + close(fd_server); + return NULL; + } + + /* Fill in */ + strncpy(conn->welcome, p->welcome, CONN_WELCOME_LEN_MAX); + strncpy(conn->prompt, p->prompt, CONN_PROMPT_LEN_MAX); + conn->buf_size = p->buf_size; + conn->msg_in_len_max = p->msg_in_len_max; + conn->msg_out_len_max = p->msg_out_len_max; + conn->msg_in_len = 0; + conn->fd_server = fd_server; + conn->fd_client_group = fd_client_group; + conn->msg_handle = p->msg_handle; + conn->msg_handle_arg = p->msg_handle_arg; + + return conn; +} + +void +softnic_conn_free(struct softnic_conn *conn) +{ + if (conn == NULL) + return; + + if (conn->fd_client_group) + close(conn->fd_client_group); + + if (conn->fd_server) + close(conn->fd_server); + + free(conn->msg_out); + free(conn->msg_in); + free(conn->prompt); + free(conn->welcome); + free(conn); +} + +int +softnic_conn_poll_for_conn(struct softnic_conn *conn) +{ + struct sockaddr_in client_address; + struct epoll_event event; + socklen_t client_address_length; + int fd_client, status; + + /* Check input arguments */ + if (conn == NULL) + return -1; + + /* Server socket */ + client_address_length = sizeof(client_address); + fd_client = accept4(conn->fd_server, + (struct sockaddr *)&client_address, + &client_address_length, + SOCK_NONBLOCK); + if (fd_client == -1) { + if (errno == EAGAIN || errno == EWOULDBLOCK) + return 0; + + return -1; + } + + /* Client group */ + event.events = EPOLLIN | EPOLLRDHUP | EPOLLHUP; + event.data.fd = fd_client; + + status = epoll_ctl(conn->fd_client_group, + EPOLL_CTL_ADD, + fd_client, + &event); + if (status == -1) { + close(fd_client); + return -1; + } + + /* Client */ + status = write(fd_client, + conn->welcome, + strlen(conn->welcome)); + if (status == -1) { + close(fd_client); + return -1; + } + + status = write(fd_client, + conn->prompt, + strlen(conn->prompt)); + if (status == -1) { + close(fd_client); + return -1; + } + + return 0; +} + +static int +data_event_handle(struct softnic_conn *conn, + int fd_client) +{ + ssize_t len, i, status; + + /* Read input message */ + + len = read(fd_client, + conn->buf, + conn->buf_size); + if (len == -1) { + if (errno == EAGAIN || errno == EWOULDBLOCK) + return 0; + + return -1; + } + if (len == 0) + return 0; + + /* Handle input messages */ + for (i = 0; i < len; i++) { + if (conn->buf[i] == '\n') { + size_t n; + + conn->msg_in[conn->msg_in_len] = 0; + conn->msg_out[0] = 0; + + conn->msg_handle(conn->msg_in, + conn->msg_out, + conn->msg_out_len_max, + conn->msg_handle_arg); + + n = strlen(conn->msg_out); + if (n) { + status = write(fd_client, + conn->msg_out, + n); + if (status == -1) + return status; + } + + conn->msg_in_len = 0; + } else if (conn->msg_in_len < conn->msg_in_len_max) { + conn->msg_in[conn->msg_in_len] = conn->buf[i]; + conn->msg_in_len++; + } else { + status = write(fd_client, + MSG_CMD_TOO_LONG, + strlen(MSG_CMD_TOO_LONG)); + if (status == -1) + return status; + + conn->msg_in_len = 0; + } + } + + /* Write prompt */ + status = write(fd_client, + conn->prompt, + strlen(conn->prompt)); + if (status == -1) + return status; + + return 0; +} + +static int +control_event_handle(struct softnic_conn *conn, + int fd_client) +{ + int status; + + status = epoll_ctl(conn->fd_client_group, + EPOLL_CTL_DEL, + fd_client, + NULL); + if (status == -1) + return -1; + + status = close(fd_client); + if (status == -1) + return -1; + + return 0; +} + +int +softnic_conn_poll_for_msg(struct softnic_conn *conn) +{ + struct epoll_event event; + int fd_client, status, status_data = 0, status_control = 0; + + /* Check input arguments */ + if (conn == NULL) + return -1; + + /* Client group */ + status = epoll_wait(conn->fd_client_group, + &event, + 1, + 0); + if (status == -1) + return -1; + if (status == 0) + return 0; + + fd_client = event.data.fd; + + /* Data available */ + if (event.events & EPOLLIN) + status_data = data_event_handle(conn, fd_client); + + /* Control events */ + if (event.events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) + status_control = control_event_handle(conn, fd_client); + + if (status_data || status_control) + return -1; + + return 0; +} diff --git a/drivers/net/softnic/conn.h b/drivers/net/softnic/conn.h new file mode 100644 index 00000000..631edeef --- /dev/null +++ b/drivers/net/softnic/conn.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#ifndef __INCLUDE_CONN_H__ +#define __INCLUDE_CONN_H__ + +#include + +struct softnic_conn; + +#ifndef CONN_WELCOME_LEN_MAX +#define CONN_WELCOME_LEN_MAX 1024 +#endif + +#ifndef CONN_PROMPT_LEN_MAX +#define CONN_PROMPT_LEN_MAX 16 +#endif + +typedef void (*softnic_conn_msg_handle_t)(char *msg_in, + char *msg_out, + size_t msg_out_len_max, + void *arg); + +struct softnic_conn_params { + const char *welcome; + const char *prompt; + const char *addr; + uint16_t port; + size_t buf_size; + size_t msg_in_len_max; + size_t msg_out_len_max; + softnic_conn_msg_handle_t msg_handle; + void *msg_handle_arg; +}; + +struct softnic_conn * +softnic_conn_init(struct softnic_conn_params *p); + +void +softnic_conn_free(struct softnic_conn *conn); + +int +softnic_conn_poll_for_conn(struct softnic_conn *conn); + +int +softnic_conn_poll_for_msg(struct softnic_conn *conn); + +#endif diff --git a/drivers/net/softnic/firmware.cli b/drivers/net/softnic/firmware.cli new file mode 100644 index 00000000..300cf6e3 --- /dev/null +++ b/drivers/net/softnic/firmware.cli @@ -0,0 +1,21 @@ +; SPDX-License-Identifier: BSD-3-Clause +; Copyright(c) 2018 Intel Corporation + +link LINK dev 0000:02:00.0 + +pipeline RX period 10 offset_port_id 0 +pipeline RX port in bsz 32 link LINK rxq 0 +pipeline RX port out bsz 32 swq RXQ0 +pipeline RX table match stub +pipeline RX port in 0 table 0 +pipeline RX table 0 rule add match default action fwd port 0 + +pipeline TX period 10 offset_port_id 0 +pipeline TX port in bsz 32 swq TXQ0 +pipeline TX port out bsz 32 link LINK txq 0 +pipeline TX table match stub +pipeline TX port in 0 table 0 +pipeline TX table 0 rule add match default action fwd port 0 + +thread 1 pipeline RX enable +thread 1 pipeline TX enable diff --git a/drivers/net/softnic/hash_func.h b/drivers/net/softnic/hash_func.h new file mode 100644 index 00000000..198d2b20 --- /dev/null +++ b/drivers/net/softnic/hash_func.h @@ -0,0 +1,359 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#ifndef __INCLUDE_HASH_FUNC_H__ +#define __INCLUDE_HASH_FUNC_H__ + +#include + +static inline uint64_t +hash_xor_key8(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key; + uint64_t *m = mask; + uint64_t xor0; + + xor0 = seed ^ (k[0] & m[0]); + + return (xor0 >> 32) ^ xor0; +} + +static inline uint64_t +hash_xor_key16(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key; + uint64_t *m = mask; + uint64_t xor0; + + xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]); + + return (xor0 >> 32) ^ xor0; +} + +static inline uint64_t +hash_xor_key24(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key; + uint64_t *m = mask; + uint64_t xor0; + + xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]); + + xor0 ^= k[2] & m[2]; + + return (xor0 >> 32) ^ xor0; +} + +static inline uint64_t +hash_xor_key32(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key; + uint64_t *m = mask; + uint64_t xor0, xor1; + + xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]); + xor1 = (k[2] & m[2]) ^ (k[3] & m[3]); + + xor0 ^= xor1; + + return (xor0 >> 32) ^ xor0; +} + +static inline uint64_t +hash_xor_key40(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key; + uint64_t *m = mask; + uint64_t xor0, xor1; + + xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]); + xor1 = (k[2] & m[2]) ^ (k[3] & m[3]); + + xor0 ^= xor1; + + xor0 ^= k[4] & m[4]; + + return (xor0 >> 32) ^ xor0; +} + +static inline uint64_t +hash_xor_key48(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key; + uint64_t *m = mask; + uint64_t xor0, xor1, xor2; + + xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]); + xor1 = (k[2] & m[2]) ^ (k[3] & m[3]); + xor2 = (k[4] & m[4]) ^ (k[5] & m[5]); + + xor0 ^= xor1; + + xor0 ^= xor2; + + return (xor0 >> 32) ^ xor0; +} + +static inline uint64_t +hash_xor_key56(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key; + uint64_t *m = mask; + uint64_t xor0, xor1, xor2; + + xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]); + xor1 = (k[2] & m[2]) ^ (k[3] & m[3]); + xor2 = (k[4] & m[4]) ^ (k[5] & m[5]); + + xor0 ^= xor1; + xor2 ^= k[6] & m[6]; + + xor0 ^= xor2; + + return (xor0 >> 32) ^ xor0; +} + +static inline uint64_t +hash_xor_key64(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key; + uint64_t *m = mask; + uint64_t xor0, xor1, xor2, xor3; + + xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]); + xor1 = (k[2] & m[2]) ^ (k[3] & m[3]); + xor2 = (k[4] & m[4]) ^ (k[5] & m[5]); + xor3 = (k[6] & m[6]) ^ (k[7] & m[7]); + + xor0 ^= xor1; + xor2 ^= xor3; + + xor0 ^= xor2; + + return (xor0 >> 32) ^ xor0; +} + +#if defined(RTE_ARCH_X86_64) + +#include + +static inline uint64_t +hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key; + uint64_t *m = mask; + uint64_t crc0; + + crc0 = _mm_crc32_u64(seed, k[0] & m[0]); + + return crc0; +} + +static inline uint64_t +hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key; + uint64_t *m = mask; + uint64_t k0, crc0, crc1; + + k0 = k[0] & m[0]; + + crc0 = _mm_crc32_u64(k0, seed); + crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]); + + crc0 ^= crc1; + + return crc0; +} + +static inline uint64_t +hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key; + uint64_t *m = mask; + uint64_t k0, k2, crc0, crc1; + + k0 = k[0] & m[0]; + k2 = k[2] & m[2]; + + crc0 = _mm_crc32_u64(k0, seed); + crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]); + + crc0 = _mm_crc32_u64(crc0, k2); + + crc0 ^= crc1; + + return crc0; +} + +static inline uint64_t +hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key; + uint64_t *m = mask; + uint64_t k0, k2, crc0, crc1, crc2, crc3; + + k0 = k[0] & m[0]; + k2 = k[2] & m[2]; + + crc0 = _mm_crc32_u64(k0, seed); + crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]); + + crc2 = _mm_crc32_u64(k2, k[3] & m[3]); + crc3 = k2 >> 32; + + crc0 = _mm_crc32_u64(crc0, crc1); + crc1 = _mm_crc32_u64(crc2, crc3); + + crc0 ^= crc1; + + return crc0; +} + +static inline uint64_t +hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key; + uint64_t *m = mask; + uint64_t k0, k2, crc0, crc1, crc2, crc3; + + k0 = k[0] & m[0]; + k2 = k[2] & m[2]; + + crc0 = _mm_crc32_u64(k0, seed); + crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]); + + crc2 = _mm_crc32_u64(k2, k[3] & m[3]); + crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]); + + crc0 = _mm_crc32_u64(crc0, crc1); + crc1 = _mm_crc32_u64(crc2, crc3); + + crc0 ^= crc1; + + return crc0; +} + +static inline uint64_t +hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key; + uint64_t *m = mask; + uint64_t k0, k2, k5, crc0, crc1, crc2, crc3; + + k0 = k[0] & m[0]; + k2 = k[2] & m[2]; + k5 = k[5] & m[5]; + + crc0 = _mm_crc32_u64(k0, seed); + crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]); + + crc2 = _mm_crc32_u64(k2, k[3] & m[3]); + crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]); + + crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2); + crc1 = _mm_crc32_u64(crc3, k5); + + crc0 ^= crc1; + + return crc0; +} + +static inline uint64_t +hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key; + uint64_t *m = mask; + uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5; + + k0 = k[0] & m[0]; + k2 = k[2] & m[2]; + k5 = k[5] & m[5]; + + crc0 = _mm_crc32_u64(k0, seed); + crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]); + + crc2 = _mm_crc32_u64(k2, k[3] & m[3]); + crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]); + + crc4 = _mm_crc32_u64(k5, k[6] & m[6]); + crc5 = k5 >> 32; + + crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2); + crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5); + + crc0 ^= crc1; + + return crc0; +} + +static inline uint64_t +hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key; + uint64_t *m = mask; + uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5; + + k0 = k[0] & m[0]; + k2 = k[2] & m[2]; + k5 = k[5] & m[5]; + + crc0 = _mm_crc32_u64(k0, seed); + crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]); + + crc2 = _mm_crc32_u64(k2, k[3] & m[3]); + crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]); + + crc4 = _mm_crc32_u64(k5, k[6] & m[6]); + crc5 = _mm_crc32_u64(k5 >> 32, k[7] & m[7]); + + crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2); + crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5); + + crc0 ^= crc1; + + return crc0; +} + +#define hash_default_key8 hash_crc_key8 +#define hash_default_key16 hash_crc_key16 +#define hash_default_key24 hash_crc_key24 +#define hash_default_key32 hash_crc_key32 +#define hash_default_key40 hash_crc_key40 +#define hash_default_key48 hash_crc_key48 +#define hash_default_key56 hash_crc_key56 +#define hash_default_key64 hash_crc_key64 + +#elif defined(RTE_ARCH_ARM64) +#include "hash_func_arm64.h" +#else + +#define hash_default_key8 hash_xor_key8 +#define hash_default_key16 hash_xor_key16 +#define hash_default_key24 hash_xor_key24 +#define hash_default_key32 hash_xor_key32 +#define hash_default_key40 hash_xor_key40 +#define hash_default_key48 hash_xor_key48 +#define hash_default_key56 hash_xor_key56 +#define hash_default_key64 hash_xor_key64 + +#endif + +#endif diff --git a/drivers/net/softnic/hash_func_arm64.h b/drivers/net/softnic/hash_func_arm64.h new file mode 100644 index 00000000..ae6c0f41 --- /dev/null +++ b/drivers/net/softnic/hash_func_arm64.h @@ -0,0 +1,261 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2017 Linaro Limited. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __HASH_FUNC_ARM64_H__ +#define __HASH_FUNC_ARM64_H__ + +#define _CRC32CX(crc, val) \ + __asm__("crc32cx %w[c], %w[c], %x[v]":[c] "+r" (crc):[v] "r" (val)) + +static inline uint64_t +hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key; + uint64_t *m = mask; + uint32_t crc0; + + crc0 = seed; + _CRC32CX(crc0, k[0] & m[0]); + + return crc0; +} + +static inline uint64_t +hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key, k0; + uint64_t *m = mask; + uint32_t crc0, crc1; + + k0 = k[0] & m[0]; + + crc0 = k0; + _CRC32CX(crc0, seed); + crc1 = k0 >> 32; + _CRC32CX(crc1, k[1] & m[1]); + + crc0 ^= crc1; + + return crc0; +} + +static inline uint64_t +hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key, k0, k2; + uint64_t *m = mask; + uint32_t crc0, crc1; + + k0 = k[0] & m[0]; + k2 = k[2] & m[2]; + + crc0 = k0; + _CRC32CX(crc0, seed); + crc1 = k0 >> 32; + _CRC32CX(crc1, k[1] & m[1]); + + _CRC32CX(crc0, k2); + + crc0 ^= crc1; + + return crc0; +} + +static inline uint64_t +hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key, k0, k2; + uint64_t *m = mask; + uint32_t crc0, crc1, crc2, crc3; + + k0 = k[0] & m[0]; + k2 = k[2] & m[2]; + + crc0 = k0; + _CRC32CX(crc0, seed); + crc1 = k0 >> 32; + _CRC32CX(crc1, k[1] & m[1]); + + crc2 = k2; + _CRC32CX(crc2, k[3] & m[3]); + crc3 = k2 >> 32; + + _CRC32CX(crc0, crc1); + _CRC32CX(crc2, crc3); + + crc0 ^= crc2; + + return crc0; +} + +static inline uint64_t +hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key, k0, k2; + uint64_t *m = mask; + uint32_t crc0, crc1, crc2, crc3; + + k0 = k[0] & m[0]; + k2 = k[2] & m[2]; + + crc0 = k0; + _CRC32CX(crc0, seed); + crc1 = k0 >> 32; + _CRC32CX(crc1, k[1] & m[1]); + + crc2 = k2; + _CRC32CX(crc2, k[3] & m[3]); + crc3 = k2 >> 32; + _CRC32CX(crc3, k[4] & m[4]); + + _CRC32CX(crc0, crc1); + _CRC32CX(crc2, crc3); + + crc0 ^= crc2; + + return crc0; +} + +static inline uint64_t +hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key, k0, k2, k5; + uint64_t *m = mask; + uint32_t crc0, crc1, crc2, crc3; + + k0 = k[0] & m[0]; + k2 = k[2] & m[2]; + k5 = k[5] & m[5]; + + crc0 = k0; + _CRC32CX(crc0, seed); + crc1 = k0 >> 32; + _CRC32CX(crc1, k[1] & m[1]); + + crc2 = k2; + _CRC32CX(crc2, k[3] & m[3]); + crc3 = k2 >> 32; + _CRC32CX(crc3, k[4] & m[4]); + + _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2); + _CRC32CX(crc3, k5); + + crc0 ^= crc3; + + return crc0; +} + +static inline uint64_t +hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key, k0, k2, k5; + uint64_t *m = mask; + uint32_t crc0, crc1, crc2, crc3, crc4, crc5; + + k0 = k[0] & m[0]; + k2 = k[2] & m[2]; + k5 = k[5] & m[5]; + + crc0 = k0; + _CRC32CX(crc0, seed); + crc1 = k0 >> 32; + _CRC32CX(crc1, k[1] & m[1]); + + crc2 = k2; + _CRC32CX(crc2, k[3] & m[3]); + crc3 = k2 >> 32; + _CRC32CX(crc3, k[4] & m[4]); + + crc4 = k5; + _CRC32CX(crc4, k[6] & m[6]); + crc5 = k5 >> 32; + + _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2); + _CRC32CX(crc3, ((uint64_t)crc4 << 32) ^ crc5); + + crc0 ^= crc3; + + return crc0; +} + +static inline uint64_t +hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size, + uint64_t seed) +{ + uint64_t *k = key, k0, k2, k5; + uint64_t *m = mask; + uint32_t crc0, crc1, crc2, crc3, crc4, crc5; + + k0 = k[0] & m[0]; + k2 = k[2] & m[2]; + k5 = k[5] & m[5]; + + crc0 = k0; + _CRC32CX(crc0, seed); + crc1 = k0 >> 32; + _CRC32CX(crc1, k[1] & m[1]); + + crc2 = k2; + _CRC32CX(crc2, k[3] & m[3]); + crc3 = k2 >> 32; + _CRC32CX(crc3, k[4] & m[4]); + + crc4 = k5; + _CRC32CX(crc4, k[6] & m[6]); + crc5 = k5 >> 32; + _CRC32CX(crc5, k[7] & m[7]); + + _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2); + _CRC32CX(crc3, ((uint64_t)crc4 << 32) ^ crc5); + + crc0 ^= crc3; + + return crc0; +} + +#define hash_default_key8 hash_crc_key8 +#define hash_default_key16 hash_crc_key16 +#define hash_default_key24 hash_crc_key24 +#define hash_default_key32 hash_crc_key32 +#define hash_default_key40 hash_crc_key40 +#define hash_default_key48 hash_crc_key48 +#define hash_default_key56 hash_crc_key56 +#define hash_default_key64 hash_crc_key64 + +#endif diff --git a/drivers/net/softnic/meson.build b/drivers/net/softnic/meson.build new file mode 100644 index 00000000..ff982274 --- /dev/null +++ b/drivers/net/softnic/meson.build @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +allow_experimental_apis = true +install_headers('rte_eth_softnic.h') +sources = files('rte_eth_softnic_tm.c', + 'rte_eth_softnic.c', + 'rte_eth_softnic_mempool.c', + 'rte_eth_softnic_swq.c', + 'rte_eth_softnic_link.c', + 'rte_eth_softnic_tap.c', + 'rte_eth_softnic_action.c', + 'rte_eth_softnic_pipeline.c', + 'rte_eth_softnic_thread.c', + 'rte_eth_softnic_cli.c', + 'parser.c', + 'conn.c') +deps += ['pipeline', 'port', 'table', 'sched'] diff --git a/drivers/net/softnic/parser.c b/drivers/net/softnic/parser.c new file mode 100644 index 00000000..a8688a21 --- /dev/null +++ b/drivers/net/softnic/parser.c @@ -0,0 +1,703 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation. + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + */ + +/* For inet_pton4() and inet_pton6() functions: + * + * Copyright (c) 1996 by Internet Software Consortium. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS + * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE + * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "parser.h" + +static uint32_t +get_hex_val(char c) +{ + switch (c) { + case '0': case '1': case '2': case '3': case '4': case '5': + case '6': case '7': case '8': case '9': + return c - '0'; + case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': + return c - 'A' + 10; + case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': + return c - 'a' + 10; + default: + return 0; + } +} + +int +softnic_parser_read_arg_bool(const char *p) +{ + p = skip_white_spaces(p); + int result = -EINVAL; + + if (((p[0] == 'y') && (p[1] == 'e') && (p[2] == 's')) || + ((p[0] == 'Y') && (p[1] == 'E') && (p[2] == 'S'))) { + p += 3; + result = 1; + } + + if (((p[0] == 'o') && (p[1] == 'n')) || + ((p[0] == 'O') && (p[1] == 'N'))) { + p += 2; + result = 1; + } + + if (((p[0] == 'n') && (p[1] == 'o')) || + ((p[0] == 'N') && (p[1] == 'O'))) { + p += 2; + result = 0; + } + + if (((p[0] == 'o') && (p[1] == 'f') && (p[2] == 'f')) || + ((p[0] == 'O') && (p[1] == 'F') && (p[2] == 'F'))) { + p += 3; + result = 0; + } + + p = skip_white_spaces(p); + + if (p[0] != '\0') + return -EINVAL; + + return result; +} + +int +softnic_parser_read_int32(int32_t *value, const char *p) +{ + char *next; + int32_t val; + + p = skip_white_spaces(p); + if (!isdigit(*p)) + return -EINVAL; + + val = strtol(p, &next, 10); + if (p == next) + return -EINVAL; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint64(uint64_t *value, const char *p) +{ + char *next; + uint64_t val; + + p = skip_white_spaces(p); + if (!isdigit(*p)) + return -EINVAL; + + val = strtoul(p, &next, 10); + if (p == next) + return -EINVAL; + + p = next; + switch (*p) { + case 'T': + val *= 1024ULL; + /* fall through */ + case 'G': + val *= 1024ULL; + /* fall through */ + case 'M': + val *= 1024ULL; + /* fall through */ + case 'k': + case 'K': + val *= 1024ULL; + p++; + break; + } + + p = skip_white_spaces(p); + if (*p != '\0') + return -EINVAL; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint64_hex(uint64_t *value, const char *p) +{ + char *next; + uint64_t val; + + p = skip_white_spaces(p); + + val = strtoul(p, &next, 16); + if (p == next) + return -EINVAL; + + p = skip_white_spaces(next); + if (*p != '\0') + return -EINVAL; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint32(uint32_t *value, const char *p) +{ + uint64_t val = 0; + int ret = softnic_parser_read_uint64(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT32_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint32_hex(uint32_t *value, const char *p) +{ + uint64_t val = 0; + int ret = softnic_parser_read_uint64_hex(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT32_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint16(uint16_t *value, const char *p) +{ + uint64_t val = 0; + int ret = softnic_parser_read_uint64(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT16_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint16_hex(uint16_t *value, const char *p) +{ + uint64_t val = 0; + int ret = softnic_parser_read_uint64_hex(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT16_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint8(uint8_t *value, const char *p) +{ + uint64_t val = 0; + int ret = softnic_parser_read_uint64(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT8_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint8_hex(uint8_t *value, const char *p) +{ + uint64_t val = 0; + int ret = softnic_parser_read_uint64_hex(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT8_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +int +softnic_parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens) +{ + uint32_t i; + + if (string == NULL || + tokens == NULL || + (*n_tokens < 1)) + return -EINVAL; + + for (i = 0; i < *n_tokens; i++) { + tokens[i] = strtok_r(string, PARSE_DELIMITER, &string); + if (tokens[i] == NULL) + break; + } + + if (i == *n_tokens && + strtok_r(string, PARSE_DELIMITER, &string) != NULL) + return -E2BIG; + + *n_tokens = i; + return 0; +} + +int +softnic_parse_hex_string(char *src, uint8_t *dst, uint32_t *size) +{ + char *c; + uint32_t len, i; + + /* Check input parameters */ + if (src == NULL || + dst == NULL || + size == NULL || + (*size == 0)) + return -1; + + len = strlen(src); + if (((len & 3) != 0) || + (len > (*size) * 2)) + return -1; + *size = len / 2; + + for (c = src; *c != 0; c++) { + if ((((*c) >= '0') && ((*c) <= '9')) || + (((*c) >= 'A') && ((*c) <= 'F')) || + (((*c) >= 'a') && ((*c) <= 'f'))) + continue; + + return -1; + } + + /* Convert chars to bytes */ + for (i = 0; i < *size; i++) + dst[i] = get_hex_val(src[2 * i]) * 16 + + get_hex_val(src[2 * i + 1]); + + return 0; +} + +int +softnic_parse_mpls_labels(char *string, uint32_t *labels, uint32_t *n_labels) +{ + uint32_t n_max_labels = *n_labels, count = 0; + + /* Check for void list of labels */ + if (strcmp(string, "") == 0) { + *n_labels = 0; + return 0; + } + + /* At least one label should be present */ + for ( ; (*string != '\0'); ) { + char *next; + int value; + + if (count >= n_max_labels) + return -1; + + if (count > 0) { + if (string[0] != ':') + return -1; + + string++; + } + + value = strtol(string, &next, 10); + if (next == string) + return -1; + string = next; + + labels[count++] = (uint32_t)value; + } + + *n_labels = count; + return 0; +} + +#define INADDRSZ 4 +#define IN6ADDRSZ 16 + +/* int + * inet_pton4(src, dst) + * like inet_aton() but without all the hexadecimal and shorthand. + * return: + * 1 if `src' is a valid dotted quad, else 0. + * notice: + * does not touch `dst' unless it's returning 1. + * author: + * Paul Vixie, 1996. + */ +static int +inet_pton4(const char *src, unsigned char *dst) +{ + static const char digits[] = "0123456789"; + int saw_digit, octets, ch; + unsigned char tmp[INADDRSZ], *tp; + + saw_digit = 0; + octets = 0; + *(tp = tmp) = 0; + while ((ch = *src++) != '\0') { + const char *pch; + + pch = strchr(digits, ch); + if (pch != NULL) { + unsigned int new = *tp * 10 + (pch - digits); + + if (new > 255) + return 0; + if (!saw_digit) { + if (++octets > 4) + return 0; + saw_digit = 1; + } + *tp = (unsigned char)new; + } else if (ch == '.' && saw_digit) { + if (octets == 4) + return 0; + *++tp = 0; + saw_digit = 0; + } else + return 0; + } + if (octets < 4) + return 0; + + memcpy(dst, tmp, INADDRSZ); + return 1; +} + +/* int + * inet_pton6(src, dst) + * convert presentation level address to network order binary form. + * return: + * 1 if `src' is a valid [RFC1884 2.2] address, else 0. + * notice: + * (1) does not touch `dst' unless it's returning 1. + * (2) :: in a full address is silently ignored. + * credit: + * inspired by Mark Andrews. + * author: + * Paul Vixie, 1996. + */ +static int +inet_pton6(const char *src, unsigned char *dst) +{ + static const char xdigits_l[] = "0123456789abcdef", + xdigits_u[] = "0123456789ABCDEF"; + unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0; + const char *xdigits = 0, *curtok = 0; + int ch = 0, saw_xdigit = 0, count_xdigit = 0; + unsigned int val = 0; + unsigned int dbloct_count = 0; + + memset((tp = tmp), '\0', IN6ADDRSZ); + endp = tp + IN6ADDRSZ; + colonp = NULL; + /* Leading :: requires some special handling. */ + if (*src == ':') + if (*++src != ':') + return 0; + curtok = src; + saw_xdigit = count_xdigit = 0; + val = 0; + + while ((ch = *src++) != '\0') { + const char *pch; + + pch = strchr((xdigits = xdigits_l), ch); + if (pch == NULL) + pch = strchr((xdigits = xdigits_u), ch); + if (pch != NULL) { + if (count_xdigit >= 4) + return 0; + val <<= 4; + val |= (pch - xdigits); + if (val > 0xffff) + return 0; + saw_xdigit = 1; + count_xdigit++; + continue; + } + if (ch == ':') { + curtok = src; + if (!saw_xdigit) { + if (colonp) + return 0; + colonp = tp; + continue; + } else if (*src == '\0') { + return 0; + } + if (tp + sizeof(int16_t) > endp) + return 0; + *tp++ = (unsigned char)((val >> 8) & 0xff); + *tp++ = (unsigned char)(val & 0xff); + saw_xdigit = 0; + count_xdigit = 0; + val = 0; + dbloct_count++; + continue; + } + if (ch == '.' && ((tp + INADDRSZ) <= endp) && + inet_pton4(curtok, tp) > 0) { + tp += INADDRSZ; + saw_xdigit = 0; + dbloct_count += 2; + break; /* '\0' was seen by inet_pton4(). */ + } + return 0; + } + if (saw_xdigit) { + if (tp + sizeof(int16_t) > endp) + return 0; + *tp++ = (unsigned char)((val >> 8) & 0xff); + *tp++ = (unsigned char)(val & 0xff); + dbloct_count++; + } + if (colonp != NULL) { + /* if we already have 8 double octets, having a colon means error */ + if (dbloct_count == 8) + return 0; + + /* Since some memmove()'s erroneously fail to handle + * overlapping regions, we'll do the shift by hand. + */ + const int n = tp - colonp; + int i; + + for (i = 1; i <= n; i++) { + endp[-i] = colonp[n - i]; + colonp[n - i] = 0; + } + tp = endp; + } + if (tp != endp) + return 0; + memcpy(dst, tmp, IN6ADDRSZ); + return 1; +} + +static struct ether_addr * +my_ether_aton(const char *a) +{ + int i; + char *end; + unsigned long o[ETHER_ADDR_LEN]; + static struct ether_addr ether_addr; + + i = 0; + do { + errno = 0; + o[i] = strtoul(a, &end, 16); + if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0)) + return NULL; + a = end + 1; + } while (++i != sizeof(o) / sizeof(o[0]) && end[0] != 0); + + /* Junk at the end of line */ + if (end[0] != 0) + return NULL; + + /* Support the format XX:XX:XX:XX:XX:XX */ + if (i == ETHER_ADDR_LEN) { + while (i-- != 0) { + if (o[i] > UINT8_MAX) + return NULL; + ether_addr.addr_bytes[i] = (uint8_t)o[i]; + } + /* Support the format XXXX:XXXX:XXXX */ + } else if (i == ETHER_ADDR_LEN / 2) { + while (i-- != 0) { + if (o[i] > UINT16_MAX) + return NULL; + ether_addr.addr_bytes[i * 2] = (uint8_t)(o[i] >> 8); + ether_addr.addr_bytes[i * 2 + 1] = (uint8_t)(o[i] & 0xff); + } + /* unknown format */ + } else + return NULL; + + return (struct ether_addr *)ðer_addr; +} + +int +softnic_parse_ipv4_addr(const char *token, struct in_addr *ipv4) +{ + if (strlen(token) >= INET_ADDRSTRLEN) + return -EINVAL; + + if (inet_pton4(token, (unsigned char *)ipv4) != 1) + return -EINVAL; + + return 0; +} + +int +softnic_parse_ipv6_addr(const char *token, struct in6_addr *ipv6) +{ + if (strlen(token) >= INET6_ADDRSTRLEN) + return -EINVAL; + + if (inet_pton6(token, (unsigned char *)ipv6) != 1) + return -EINVAL; + + return 0; +} + +int +softnic_parse_mac_addr(const char *token, struct ether_addr *addr) +{ + struct ether_addr *tmp; + + tmp = my_ether_aton(token); + if (tmp == NULL) + return -1; + + memcpy(addr, tmp, sizeof(struct ether_addr)); + return 0; +} + +int +softnic_parse_cpu_core(const char *entry, + struct softnic_cpu_core_params *p) +{ + size_t num_len; + char num[8]; + + uint32_t s = 0, c = 0, h = 0, val; + uint8_t s_parsed = 0, c_parsed = 0, h_parsed = 0; + const char *next = skip_white_spaces(entry); + char type; + + if (p == NULL) + return -EINVAL; + + /* Expect or [sX][cY][h]. At least one parameter is required. */ + while (*next != '\0') { + /* If everything parsed nothing should left */ + if (s_parsed && c_parsed && h_parsed) + return -EINVAL; + + type = *next; + switch (type) { + case 's': + case 'S': + if (s_parsed || c_parsed || h_parsed) + return -EINVAL; + s_parsed = 1; + next++; + break; + case 'c': + case 'C': + if (c_parsed || h_parsed) + return -EINVAL; + c_parsed = 1; + next++; + break; + case 'h': + case 'H': + if (h_parsed) + return -EINVAL; + h_parsed = 1; + next++; + break; + default: + /* If it start from digit it must be only core id. */ + if (!isdigit(*next) || s_parsed || c_parsed || h_parsed) + return -EINVAL; + + type = 'C'; + } + + for (num_len = 0; *next != '\0'; next++, num_len++) { + if (num_len == RTE_DIM(num)) + return -EINVAL; + + if (!isdigit(*next)) + break; + + num[num_len] = *next; + } + + if (num_len == 0 && type != 'h' && type != 'H') + return -EINVAL; + + if (num_len != 0 && (type == 'h' || type == 'H')) + return -EINVAL; + + num[num_len] = '\0'; + val = strtol(num, NULL, 10); + + h = 0; + switch (type) { + case 's': + case 'S': + s = val; + break; + case 'c': + case 'C': + c = val; + break; + case 'h': + case 'H': + h = 1; + break; + } + } + + p->socket_id = s; + p->core_id = c; + p->thread_id = h; + return 0; +} diff --git a/drivers/net/softnic/parser.h b/drivers/net/softnic/parser.h new file mode 100644 index 00000000..1ee3f82a --- /dev/null +++ b/drivers/net/softnic/parser.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation + */ + +#ifndef __INCLUDE_SOFTNIC_PARSER_H__ +#define __INCLUDE_SOFTNIC_PARSER_H__ + +#include + +#include +#include + +#define PARSE_DELIMITER " \f\n\r\t\v" + +#define skip_white_spaces(pos) \ +({ \ + __typeof__(pos) _p = (pos); \ + for ( ; isspace(*_p); _p++) \ + ; \ + _p; \ +}) + +static inline size_t +skip_digits(const char *src) +{ + size_t i; + + for (i = 0; isdigit(src[i]); i++) + ; + + return i; +} + +int softnic_parser_read_arg_bool(const char *p); + +int softnic_parser_read_int32(int32_t *value, const char *p); + +int softnic_parser_read_uint64(uint64_t *value, const char *p); +int softnic_parser_read_uint32(uint32_t *value, const char *p); +int softnic_parser_read_uint16(uint16_t *value, const char *p); +int softnic_parser_read_uint8(uint8_t *value, const char *p); + +int softnic_parser_read_uint64_hex(uint64_t *value, const char *p); +int softnic_parser_read_uint32_hex(uint32_t *value, const char *p); +int softnic_parser_read_uint16_hex(uint16_t *value, const char *p); +int softnic_parser_read_uint8_hex(uint8_t *value, const char *p); + +int softnic_parse_hex_string(char *src, uint8_t *dst, uint32_t *size); + +int softnic_parse_ipv4_addr(const char *token, struct in_addr *ipv4); +int softnic_parse_ipv6_addr(const char *token, struct in6_addr *ipv6); +int softnic_parse_mac_addr(const char *token, struct ether_addr *addr); +int softnic_parse_mpls_labels(char *string, + uint32_t *labels, uint32_t *n_labels); + +struct softnic_cpu_core_params { + uint32_t socket_id; + uint32_t core_id; + uint32_t thread_id; +}; + +int softnic_parse_cpu_core(const char *entry, + struct softnic_cpu_core_params *p); + +int softnic_parse_tokenize_string(char *string, + char *tokens[], uint32_t *n_tokens); + +#endif diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c index b0c13415..30fb3952 100644 --- a/drivers/net/softnic/rte_eth_softnic.c +++ b/drivers/net/softnic/rte_eth_softnic.c @@ -13,43 +13,51 @@ #include #include #include -#include #include #include "rte_eth_softnic.h" #include "rte_eth_softnic_internals.h" -#define DEV_HARD(p) \ - (&rte_eth_devices[p->hard.port_id]) - -#define PMD_PARAM_SOFT_TM "soft_tm" -#define PMD_PARAM_SOFT_TM_RATE "soft_tm_rate" -#define PMD_PARAM_SOFT_TM_NB_QUEUES "soft_tm_nb_queues" -#define PMD_PARAM_SOFT_TM_QSIZE0 "soft_tm_qsize0" -#define PMD_PARAM_SOFT_TM_QSIZE1 "soft_tm_qsize1" -#define PMD_PARAM_SOFT_TM_QSIZE2 "soft_tm_qsize2" -#define PMD_PARAM_SOFT_TM_QSIZE3 "soft_tm_qsize3" -#define PMD_PARAM_SOFT_TM_ENQ_BSZ "soft_tm_enq_bsz" -#define PMD_PARAM_SOFT_TM_DEQ_BSZ "soft_tm_deq_bsz" - -#define PMD_PARAM_HARD_NAME "hard_name" -#define PMD_PARAM_HARD_TX_QUEUE_ID "hard_tx_queue_id" +#define PMD_PARAM_FIRMWARE "firmware" +#define PMD_PARAM_CONN_PORT "conn_port" +#define PMD_PARAM_CPU_ID "cpu_id" +#define PMD_PARAM_TM_N_QUEUES "tm_n_queues" +#define PMD_PARAM_TM_QSIZE0 "tm_qsize0" +#define PMD_PARAM_TM_QSIZE1 "tm_qsize1" +#define PMD_PARAM_TM_QSIZE2 "tm_qsize2" +#define PMD_PARAM_TM_QSIZE3 "tm_qsize3" static const char *pmd_valid_args[] = { - PMD_PARAM_SOFT_TM, - PMD_PARAM_SOFT_TM_RATE, - PMD_PARAM_SOFT_TM_NB_QUEUES, - PMD_PARAM_SOFT_TM_QSIZE0, - PMD_PARAM_SOFT_TM_QSIZE1, - PMD_PARAM_SOFT_TM_QSIZE2, - PMD_PARAM_SOFT_TM_QSIZE3, - PMD_PARAM_SOFT_TM_ENQ_BSZ, - PMD_PARAM_SOFT_TM_DEQ_BSZ, - PMD_PARAM_HARD_NAME, - PMD_PARAM_HARD_TX_QUEUE_ID, + PMD_PARAM_FIRMWARE, + PMD_PARAM_CONN_PORT, + PMD_PARAM_CPU_ID, + PMD_PARAM_TM_N_QUEUES, + PMD_PARAM_TM_QSIZE0, + PMD_PARAM_TM_QSIZE1, + PMD_PARAM_TM_QSIZE2, + PMD_PARAM_TM_QSIZE3, NULL }; +static const char welcome[] = + "\n" + "Welcome to Soft NIC!\n" + "\n"; + +static const char prompt[] = "softnic> "; + +struct softnic_conn_params conn_params_default = { + .welcome = welcome, + .prompt = prompt, + .addr = "0.0.0.0", + .port = 0, + .buf_size = 1024 * 1024, + .msg_in_len_max = 1024, + .msg_out_len_max = 1024 * 1024, + .msg_handle = softnic_cli_process, + .msg_handle_arg = NULL, +}; + static const struct rte_eth_dev_info pmd_dev_info = { .min_rx_bufsize = 0, .max_rx_pktlen = UINT32_MAX, @@ -65,8 +73,15 @@ static const struct rte_eth_dev_info pmd_dev_info = { .nb_min = 0, .nb_align = 1, }, + .rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP, }; +static int pmd_softnic_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, pmd_softnic_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + static void pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused, struct rte_eth_dev_info *dev_info) @@ -75,50 +90,36 @@ pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused, } static int -pmd_dev_configure(struct rte_eth_dev *dev) +pmd_dev_configure(struct rte_eth_dev *dev __rte_unused) { - struct pmd_internals *p = dev->data->dev_private; - struct rte_eth_dev *hard_dev = DEV_HARD(p); - - if (dev->data->nb_rx_queues > hard_dev->data->nb_rx_queues) - return -1; - - if (p->params.hard.tx_queue_id >= hard_dev->data->nb_tx_queues) - return -1; - return 0; } static int pmd_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, - uint16_t nb_rx_desc __rte_unused, - unsigned int socket_id, + uint16_t nb_rx_desc, + unsigned int socket_id __rte_unused, const struct rte_eth_rxconf *rx_conf __rte_unused, struct rte_mempool *mb_pool __rte_unused) { + char name[NAME_SIZE]; struct pmd_internals *p = dev->data->dev_private; + struct softnic_swq *swq; - if (p->params.soft.intrusive == 0) { - struct pmd_rx_queue *rxq; - - rxq = rte_zmalloc_socket(p->params.soft.name, - sizeof(struct pmd_rx_queue), 0, socket_id); - if (rxq == NULL) - return -ENOMEM; + struct softnic_swq_params params = { + .size = nb_rx_desc, + }; - rxq->hard.port_id = p->hard.port_id; - rxq->hard.rx_queue_id = rx_queue_id; - dev->data->rx_queues[rx_queue_id] = rxq; - } else { - struct rte_eth_dev *hard_dev = DEV_HARD(p); - void *rxq = hard_dev->data->rx_queues[rx_queue_id]; + snprintf(name, sizeof(name), "RXQ%u", rx_queue_id); - if (rxq == NULL) - return -1; + swq = softnic_swq_create(p, + name, + ¶ms); + if (swq == NULL) + return -1; - dev->data->rx_queues[rx_queue_id] = rxq; - } + dev->data->rx_queues[rx_queue_id] = swq->r; return 0; } @@ -126,21 +127,26 @@ static int pmd_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc, - unsigned int socket_id, + unsigned int socket_id __rte_unused, const struct rte_eth_txconf *tx_conf __rte_unused) { - uint32_t size = RTE_ETH_NAME_MAX_LEN + strlen("_txq") + 4; - char name[size]; - struct rte_ring *r; - - snprintf(name, sizeof(name), "%s_txq%04x", - dev->data->name, tx_queue_id); - r = rte_ring_create(name, nb_tx_desc, socket_id, - RING_F_SP_ENQ | RING_F_SC_DEQ); - if (r == NULL) + char name[NAME_SIZE]; + struct pmd_internals *p = dev->data->dev_private; + struct softnic_swq *swq; + + struct softnic_swq_params params = { + .size = nb_tx_desc, + }; + + snprintf(name, sizeof(name), "TXQ%u", tx_queue_id); + + swq = softnic_swq_create(p, + name, + ¶ms); + if (swq == NULL) return -1; - dev->data->tx_queues[tx_queue_id] = r; + dev->data->tx_queues[tx_queue_id] = swq->r; return 0; } @@ -148,23 +154,19 @@ static int pmd_dev_start(struct rte_eth_dev *dev) { struct pmd_internals *p = dev->data->dev_private; + int status; - if (tm_used(dev)) { - int status = tm_start(p); - - if (status) - return status; - } + /* Firmware */ + status = softnic_cli_script_process(p, + p->params.firmware, + conn_params_default.msg_in_len_max, + conn_params_default.msg_out_len_max); + if (status) + return status; + /* Link UP */ dev->data->dev_link.link_status = ETH_LINK_UP; - if (p->params.soft.intrusive) { - struct rte_eth_dev *hard_dev = DEV_HARD(p); - - /* The hard_dev->rx_pkt_burst should be stable by now */ - dev->rx_pkt_burst = hard_dev->rx_pkt_burst; - } - return 0; } @@ -173,20 +175,27 @@ pmd_dev_stop(struct rte_eth_dev *dev) { struct pmd_internals *p = dev->data->dev_private; + /* Link DOWN */ dev->data->dev_link.link_status = ETH_LINK_DOWN; - if (tm_used(dev)) - tm_stop(p); + /* Firmware */ + softnic_pipeline_disable_all(p); + softnic_pipeline_free(p); + softnic_table_action_profile_free(p); + softnic_port_in_action_profile_free(p); + softnic_tap_free(p); + softnic_tmgr_free(p); + softnic_link_free(p); + softnic_softnic_swq_free_keep_rxq_txq(p); + softnic_mempool_free(p); + + tm_hierarchy_free(p); } static void -pmd_dev_close(struct rte_eth_dev *dev) +pmd_dev_close(struct rte_eth_dev *dev __rte_unused) { - uint32_t i; - - /* TX queues */ - for (i = 0; i < dev->data->nb_tx_queues; i++) - rte_ring_free((struct rte_ring *)dev->data->tx_queues[i]); + return; } static int @@ -197,10 +206,9 @@ pmd_link_update(struct rte_eth_dev *dev __rte_unused, } static int -pmd_tm_ops_get(struct rte_eth_dev *dev, void *arg) +pmd_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg) { - *(const struct rte_tm_ops **)arg = - (tm_enabled(dev)) ? &pmd_tm_ops : NULL; + *(const struct rte_tm_ops **)arg = &pmd_tm_ops; return 0; } @@ -222,12 +230,10 @@ pmd_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { - struct pmd_rx_queue *rx_queue = rxq; - - return rte_eth_rx_burst(rx_queue->hard.port_id, - rx_queue->hard.rx_queue_id, - rx_pkts, - nb_pkts); + return (uint16_t)rte_ring_sc_dequeue_burst(rxq, + (void **)rx_pkts, + nb_pkts, + NULL); } static uint16_t @@ -235,239 +241,56 @@ pmd_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - return (uint16_t)rte_ring_enqueue_burst(txq, + return (uint16_t)rte_ring_sp_enqueue_burst(txq, (void **)tx_pkts, nb_pkts, NULL); } -static __rte_always_inline int -run_default(struct rte_eth_dev *dev) -{ - struct pmd_internals *p = dev->data->dev_private; - - /* Persistent context: Read Only (update not required) */ - struct rte_mbuf **pkts = p->soft.def.pkts; - uint16_t nb_tx_queues = dev->data->nb_tx_queues; - - /* Persistent context: Read - Write (update required) */ - uint32_t txq_pos = p->soft.def.txq_pos; - uint32_t pkts_len = p->soft.def.pkts_len; - uint32_t flush_count = p->soft.def.flush_count; - - /* Not part of the persistent context */ - uint32_t pos; - uint16_t i; - - /* Soft device TXQ read, Hard device TXQ write */ - for (i = 0; i < nb_tx_queues; i++) { - struct rte_ring *txq = dev->data->tx_queues[txq_pos]; - - /* Read soft device TXQ burst to packet enqueue buffer */ - pkts_len += rte_ring_sc_dequeue_burst(txq, - (void **)&pkts[pkts_len], - DEFAULT_BURST_SIZE, - NULL); - - /* Increment soft device TXQ */ - txq_pos++; - if (txq_pos >= nb_tx_queues) - txq_pos = 0; - - /* Hard device TXQ write when complete burst is available */ - if (pkts_len >= DEFAULT_BURST_SIZE) { - for (pos = 0; pos < pkts_len; ) - pos += rte_eth_tx_burst(p->hard.port_id, - p->params.hard.tx_queue_id, - &pkts[pos], - (uint16_t)(pkts_len - pos)); - - pkts_len = 0; - flush_count = 0; - break; - } - } - - if (flush_count >= FLUSH_COUNT_THRESHOLD) { - for (pos = 0; pos < pkts_len; ) - pos += rte_eth_tx_burst(p->hard.port_id, - p->params.hard.tx_queue_id, - &pkts[pos], - (uint16_t)(pkts_len - pos)); - - pkts_len = 0; - flush_count = 0; - } - - p->soft.def.txq_pos = txq_pos; - p->soft.def.pkts_len = pkts_len; - p->soft.def.flush_count = flush_count + 1; - - return 0; -} - -static __rte_always_inline int -run_tm(struct rte_eth_dev *dev) -{ - struct pmd_internals *p = dev->data->dev_private; - - /* Persistent context: Read Only (update not required) */ - struct rte_sched_port *sched = p->soft.tm.sched; - struct rte_mbuf **pkts_enq = p->soft.tm.pkts_enq; - struct rte_mbuf **pkts_deq = p->soft.tm.pkts_deq; - uint32_t enq_bsz = p->params.soft.tm.enq_bsz; - uint32_t deq_bsz = p->params.soft.tm.deq_bsz; - uint16_t nb_tx_queues = dev->data->nb_tx_queues; - - /* Persistent context: Read - Write (update required) */ - uint32_t txq_pos = p->soft.tm.txq_pos; - uint32_t pkts_enq_len = p->soft.tm.pkts_enq_len; - uint32_t flush_count = p->soft.tm.flush_count; - - /* Not part of the persistent context */ - uint32_t pkts_deq_len, pos; - uint16_t i; - - /* Soft device TXQ read, TM enqueue */ - for (i = 0; i < nb_tx_queues; i++) { - struct rte_ring *txq = dev->data->tx_queues[txq_pos]; - - /* Read TXQ burst to packet enqueue buffer */ - pkts_enq_len += rte_ring_sc_dequeue_burst(txq, - (void **)&pkts_enq[pkts_enq_len], - enq_bsz, - NULL); - - /* Increment TXQ */ - txq_pos++; - if (txq_pos >= nb_tx_queues) - txq_pos = 0; - - /* TM enqueue when complete burst is available */ - if (pkts_enq_len >= enq_bsz) { - rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len); - - pkts_enq_len = 0; - flush_count = 0; - break; - } - } - - if (flush_count >= FLUSH_COUNT_THRESHOLD) { - if (pkts_enq_len) - rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len); - - pkts_enq_len = 0; - flush_count = 0; - } - - p->soft.tm.txq_pos = txq_pos; - p->soft.tm.pkts_enq_len = pkts_enq_len; - p->soft.tm.flush_count = flush_count + 1; - - /* TM dequeue, Hard device TXQ write */ - pkts_deq_len = rte_sched_port_dequeue(sched, pkts_deq, deq_bsz); - - for (pos = 0; pos < pkts_deq_len; ) - pos += rte_eth_tx_burst(p->hard.port_id, - p->params.hard.tx_queue_id, - &pkts_deq[pos], - (uint16_t)(pkts_deq_len - pos)); - - return 0; -} - -int -rte_pmd_softnic_run(uint16_t port_id) -{ - struct rte_eth_dev *dev = &rte_eth_devices[port_id]; - -#ifdef RTE_LIBRTE_ETHDEV_DEBUG - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); -#endif - - return (tm_used(dev)) ? run_tm(dev) : run_default(dev); -} - -static struct ether_addr eth_addr = { .addr_bytes = {0} }; - -static uint32_t -eth_dev_speed_max_mbps(uint32_t speed_capa) -{ - uint32_t rate_mbps[32] = { - ETH_SPEED_NUM_NONE, - ETH_SPEED_NUM_10M, - ETH_SPEED_NUM_10M, - ETH_SPEED_NUM_100M, - ETH_SPEED_NUM_100M, - ETH_SPEED_NUM_1G, - ETH_SPEED_NUM_2_5G, - ETH_SPEED_NUM_5G, - ETH_SPEED_NUM_10G, - ETH_SPEED_NUM_20G, - ETH_SPEED_NUM_25G, - ETH_SPEED_NUM_40G, - ETH_SPEED_NUM_50G, - ETH_SPEED_NUM_56G, - ETH_SPEED_NUM_100G, - }; - - uint32_t pos = (speed_capa) ? (31 - __builtin_clz(speed_capa)) : 0; - return rate_mbps[pos]; -} - -static int -default_init(struct pmd_internals *p, - struct pmd_params *params, - int numa_node) -{ - p->soft.def.pkts = rte_zmalloc_socket(params->soft.name, - 2 * DEFAULT_BURST_SIZE * sizeof(struct rte_mbuf *), - 0, - numa_node); - - if (p->soft.def.pkts == NULL) - return -ENOMEM; - - return 0; -} - -static void -default_free(struct pmd_internals *p) -{ - rte_free(p->soft.def.pkts); -} - static void * -pmd_init(struct pmd_params *params, int numa_node) +pmd_init(struct pmd_params *params) { struct pmd_internals *p; int status; - p = rte_zmalloc_socket(params->soft.name, + p = rte_zmalloc_socket(params->name, sizeof(struct pmd_internals), 0, - numa_node); + params->cpu_id); if (p == NULL) return NULL; + /* Params */ memcpy(&p->params, params, sizeof(p->params)); - rte_eth_dev_get_port_by_name(params->hard.name, &p->hard.port_id); - /* Default */ - status = default_init(p, params, numa_node); + /* Resources */ + tm_hierarchy_init(p); + + softnic_mempool_init(p); + softnic_swq_init(p); + softnic_link_init(p); + softnic_tmgr_init(p); + softnic_tap_init(p); + softnic_port_in_action_profile_init(p); + softnic_table_action_profile_init(p); + softnic_pipeline_init(p); + + status = softnic_thread_init(p); if (status) { - free(p->params.hard.name); rte_free(p); return NULL; } - /* Traffic Management (TM)*/ - if (params->soft.flags & PMD_FEATURE_TM) { - status = tm_init(p, params, numa_node); - if (status) { - default_free(p); - free(p->params.hard.name); + if (params->conn_port) { + struct softnic_conn_params conn_params; + + memcpy(&conn_params, &conn_params_default, sizeof(conn_params)); + conn_params.port = p->params.conn_port; + conn_params.msg_handle_arg = p; + + p->conn = softnic_conn_init(&conn_params); + if (p->conn == NULL) { + softnic_thread_free(p); rte_free(p); return NULL; } @@ -479,55 +302,62 @@ pmd_init(struct pmd_params *params, int numa_node) static void pmd_free(struct pmd_internals *p) { - if (p->params.soft.flags & PMD_FEATURE_TM) - tm_free(p); + if (p == NULL) + return; + + if (p->params.conn_port) + softnic_conn_free(p->conn); - default_free(p); + softnic_thread_free(p); + softnic_pipeline_free(p); + softnic_table_action_profile_free(p); + softnic_port_in_action_profile_free(p); + softnic_tap_free(p); + softnic_tmgr_free(p); + softnic_link_free(p); + softnic_swq_free(p); + softnic_mempool_free(p); + + tm_hierarchy_free(p); - free(p->params.hard.name); rte_free(p); } +static struct ether_addr eth_addr = { + .addr_bytes = {0}, +}; + static int pmd_ethdev_register(struct rte_vdev_device *vdev, struct pmd_params *params, void *dev_private) { - struct rte_eth_dev_info hard_info; - struct rte_eth_dev *soft_dev; - uint32_t hard_speed; - int numa_node; - uint16_t hard_port_id; - - rte_eth_dev_get_port_by_name(params->hard.name, &hard_port_id); - rte_eth_dev_info_get(hard_port_id, &hard_info); - hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa); - numa_node = rte_eth_dev_socket_id(hard_port_id); + struct rte_eth_dev *dev; /* Ethdev entry allocation */ - soft_dev = rte_eth_dev_allocate(params->soft.name); - if (!soft_dev) + dev = rte_eth_dev_allocate(params->name); + if (!dev) return -ENOMEM; /* dev */ - soft_dev->rx_pkt_burst = (params->soft.intrusive) ? - NULL : /* set up later */ - pmd_rx_pkt_burst; - soft_dev->tx_pkt_burst = pmd_tx_pkt_burst; - soft_dev->tx_pkt_prepare = NULL; - soft_dev->dev_ops = &pmd_ops; - soft_dev->device = &vdev->device; + dev->rx_pkt_burst = pmd_rx_pkt_burst; + dev->tx_pkt_burst = pmd_tx_pkt_burst; + dev->tx_pkt_prepare = NULL; + dev->dev_ops = &pmd_ops; + dev->device = &vdev->device; /* dev->data */ - soft_dev->data->dev_private = dev_private; - soft_dev->data->dev_link.link_speed = hard_speed; - soft_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; - soft_dev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG; - soft_dev->data->dev_link.link_status = ETH_LINK_DOWN; - soft_dev->data->mac_addrs = ð_addr; - soft_dev->data->promiscuous = 1; - soft_dev->data->kdrv = RTE_KDRV_NONE; - soft_dev->data->numa_node = numa_node; + dev->data->dev_private = dev_private; + dev->data->dev_link.link_speed = ETH_SPEED_NUM_100G; + dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; + dev->data->dev_link.link_autoneg = ETH_LINK_FIXED; + dev->data->dev_link.link_status = ETH_LINK_DOWN; + dev->data->mac_addrs = ð_addr; + dev->data->promiscuous = 1; + dev->data->kdrv = RTE_KDRV_NONE; + dev->data->numa_node = params->cpu_id; + + rte_eth_dev_probing_finish(dev); return 0; } @@ -558,10 +388,21 @@ get_uint32(const char *key __rte_unused, const char *value, void *extra_args) } static int -pmd_parse_args(struct pmd_params *p, const char *name, const char *params) +get_uint16(const char *key __rte_unused, const char *value, void *extra_args) +{ + if (!value || !extra_args) + return -EINVAL; + + *(uint16_t *)extra_args = strtoull(value, NULL, 0); + + return 0; +} + +static int +pmd_parse_args(struct pmd_params *p, const char *params) { struct rte_kvargs *kvlist; - int i, ret; + int ret = 0; kvlist = rte_kvargs_parse(params, pmd_valid_args); if (kvlist == NULL) @@ -569,141 +410,71 @@ pmd_parse_args(struct pmd_params *p, const char *name, const char *params) /* Set default values */ memset(p, 0, sizeof(*p)); - p->soft.name = name; - p->soft.intrusive = INTRUSIVE; - p->soft.tm.rate = 0; - p->soft.tm.nb_queues = SOFTNIC_SOFT_TM_NB_QUEUES; - for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) - p->soft.tm.qsize[i] = SOFTNIC_SOFT_TM_QUEUE_SIZE; - p->soft.tm.enq_bsz = SOFTNIC_SOFT_TM_ENQ_BSZ; - p->soft.tm.deq_bsz = SOFTNIC_SOFT_TM_DEQ_BSZ; - p->hard.tx_queue_id = SOFTNIC_HARD_TX_QUEUE_ID; - - /* SOFT: TM (optional) */ - if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM) == 1) { - char *s; - - ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM, - &get_string, &s); - if (ret < 0) - goto out_free; - - if (strcmp(s, "on") == 0) - p->soft.flags |= PMD_FEATURE_TM; - else if (strcmp(s, "off") == 0) - p->soft.flags &= ~PMD_FEATURE_TM; - else - ret = -EINVAL; - - free(s); - if (ret) - goto out_free; - } - - /* SOFT: TM rate (measured in bytes/second) (optional) */ - if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_RATE) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_RATE, - &get_uint32, &p->soft.tm.rate); - if (ret < 0) - goto out_free; - - p->soft.flags |= PMD_FEATURE_TM; - } - - /* SOFT: TM number of queues (optional) */ - if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES, - &get_uint32, &p->soft.tm.nb_queues); + p->firmware = SOFTNIC_FIRMWARE; + p->cpu_id = SOFTNIC_CPU_ID; + p->tm.n_queues = SOFTNIC_TM_N_QUEUES; + p->tm.qsize[0] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[1] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[2] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[3] = SOFTNIC_TM_QUEUE_SIZE; + + /* Firmware script (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_FIRMWARE) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_FIRMWARE, + &get_string, &p->firmware); if (ret < 0) goto out_free; - - p->soft.flags |= PMD_FEATURE_TM; } - /* SOFT: TM queue size 0 .. 3 (optional) */ - if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE0) == 1) { - uint32_t qsize; - - ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE0, - &get_uint32, &qsize); + /* Connection listening port (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_CONN_PORT) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_CONN_PORT, + &get_uint16, &p->conn_port); if (ret < 0) goto out_free; - - p->soft.tm.qsize[0] = (uint16_t)qsize; - p->soft.flags |= PMD_FEATURE_TM; } - if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE1) == 1) { - uint32_t qsize; - - ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE1, - &get_uint32, &qsize); + /* CPU ID (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_CPU_ID) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_CPU_ID, + &get_uint32, &p->cpu_id); if (ret < 0) goto out_free; - - p->soft.tm.qsize[1] = (uint16_t)qsize; - p->soft.flags |= PMD_FEATURE_TM; } - if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE2) == 1) { - uint32_t qsize; - - ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE2, - &get_uint32, &qsize); + /* TM number of queues (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_N_QUEUES) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_N_QUEUES, + &get_uint32, &p->tm.n_queues); if (ret < 0) goto out_free; - - p->soft.tm.qsize[2] = (uint16_t)qsize; - p->soft.flags |= PMD_FEATURE_TM; } - if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE3) == 1) { - uint32_t qsize; - - ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE3, - &get_uint32, &qsize); + /* TM queue size 0 .. 3 (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE0) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE0, + &get_uint32, &p->tm.qsize[0]); if (ret < 0) goto out_free; - - p->soft.tm.qsize[3] = (uint16_t)qsize; - p->soft.flags |= PMD_FEATURE_TM; } - /* SOFT: TM enqueue burst size (optional) */ - if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ, - &get_uint32, &p->soft.tm.enq_bsz); + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE1) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE1, + &get_uint32, &p->tm.qsize[1]); if (ret < 0) goto out_free; - - p->soft.flags |= PMD_FEATURE_TM; } - /* SOFT: TM dequeue burst size (optional) */ - if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ, - &get_uint32, &p->soft.tm.deq_bsz); + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE2) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE2, + &get_uint32, &p->tm.qsize[2]); if (ret < 0) goto out_free; - - p->soft.flags |= PMD_FEATURE_TM; } - /* HARD: name (mandatory) */ - if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_NAME) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_NAME, - &get_string, &p->hard.name); - if (ret < 0) - goto out_free; - } else { - ret = -EINVAL; - goto out_free; - } - - /* HARD: tx_queue_id (optional) */ - if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID, - &get_uint32, &p->hard.tx_queue_id); + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE3) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE3, + &get_uint32, &p->tm.qsize[3]); if (ret < 0) goto out_free; } @@ -718,54 +489,31 @@ pmd_probe(struct rte_vdev_device *vdev) { struct pmd_params p; const char *params; - int status; + int status = 0; - struct rte_eth_dev_info hard_info; - uint32_t hard_speed; - uint16_t hard_port_id; - int numa_node; void *dev_private; + const char *name = rte_vdev_device_name(vdev); - RTE_LOG(INFO, PMD, - "Probing device \"%s\"\n", - rte_vdev_device_name(vdev)); + PMD_LOG(INFO, "Probing device \"%s\"", name); /* Parse input arguments */ params = rte_vdev_device_args(vdev); if (!params) return -EINVAL; - status = pmd_parse_args(&p, rte_vdev_device_name(vdev), params); + status = pmd_parse_args(&p, params); if (status) return status; - /* Check input arguments */ - if (rte_eth_dev_get_port_by_name(p.hard.name, &hard_port_id)) - return -EINVAL; - - rte_eth_dev_info_get(hard_port_id, &hard_info); - hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa); - numa_node = rte_eth_dev_socket_id(hard_port_id); - - if (p.hard.tx_queue_id >= hard_info.max_tx_queues) - return -EINVAL; - - if (p.soft.flags & PMD_FEATURE_TM) { - status = tm_params_check(&p, hard_speed); - - if (status) - return status; - } + p.name = name; /* Allocate and initialize soft ethdev private data */ - dev_private = pmd_init(&p, numa_node); + dev_private = pmd_init(&p); if (dev_private == NULL) return -ENOMEM; /* Register soft ethdev */ - RTE_LOG(INFO, PMD, - "Creating soft ethdev \"%s\" for hard ethdev \"%s\"\n", - p.soft.name, p.hard.name); + PMD_LOG(INFO, "Creating soft ethdev \"%s\"", p.name); status = pmd_ethdev_register(vdev, &p, dev_private); if (status) { @@ -785,8 +533,7 @@ pmd_remove(struct rte_vdev_device *vdev) if (!vdev) return -EINVAL; - RTE_LOG(INFO, PMD, "Removing device \"%s\"\n", - rte_vdev_device_name(vdev)); + PMD_LOG(INFO, "Removing device \"%s\"", rte_vdev_device_name(vdev)); /* Find the ethdev entry */ dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev)); @@ -795,9 +542,9 @@ pmd_remove(struct rte_vdev_device *vdev) p = dev->data->dev_private; /* Free device data structures*/ - pmd_free(p); rte_free(dev->data); rte_eth_dev_release_port(dev); + pmd_free(p); return 0; } @@ -809,14 +556,39 @@ static struct rte_vdev_driver pmd_softnic_drv = { RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv); RTE_PMD_REGISTER_PARAM_STRING(net_softnic, - PMD_PARAM_SOFT_TM "=on|off " - PMD_PARAM_SOFT_TM_RATE "= " - PMD_PARAM_SOFT_TM_NB_QUEUES "= " - PMD_PARAM_SOFT_TM_QSIZE0 "= " - PMD_PARAM_SOFT_TM_QSIZE1 "= " - PMD_PARAM_SOFT_TM_QSIZE2 "= " - PMD_PARAM_SOFT_TM_QSIZE3 "= " - PMD_PARAM_SOFT_TM_ENQ_BSZ "= " - PMD_PARAM_SOFT_TM_DEQ_BSZ "= " - PMD_PARAM_HARD_NAME "= " - PMD_PARAM_HARD_TX_QUEUE_ID "="); + PMD_PARAM_FIRMWARE "= " + PMD_PARAM_CONN_PORT "= " + PMD_PARAM_CPU_ID "= " + PMD_PARAM_TM_N_QUEUES "= " + PMD_PARAM_TM_QSIZE0 "= " + PMD_PARAM_TM_QSIZE1 "= " + PMD_PARAM_TM_QSIZE2 "= " + PMD_PARAM_TM_QSIZE3 "=" +); + + +RTE_INIT(pmd_softnic_init_log) +{ + pmd_softnic_logtype = rte_log_register("pmd.net.softnic"); + if (pmd_softnic_logtype >= 0) + rte_log_set_level(pmd_softnic_logtype, RTE_LOG_NOTICE); +} + +int +rte_pmd_softnic_manage(uint16_t port_id) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct pmd_internals *softnic; + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); +#endif + + softnic = dev->data->dev_private; + + softnic_conn_poll_for_conn(softnic->conn); + + softnic_conn_poll_for_msg(softnic->conn); + + return 0; +} diff --git a/drivers/net/softnic/rte_eth_softnic.h b/drivers/net/softnic/rte_eth_softnic.h index 9a2c7ba9..048dfe6b 100644 --- a/drivers/net/softnic/rte_eth_softnic.h +++ b/drivers/net/softnic/rte_eth_softnic.h @@ -11,42 +11,53 @@ extern "C" { #endif -#ifndef SOFTNIC_SOFT_TM_NB_QUEUES -#define SOFTNIC_SOFT_TM_NB_QUEUES 65536 +/** Firmware. */ +#ifndef SOFTNIC_FIRMWARE +#define SOFTNIC_FIRMWARE "firmware.cli" #endif -#ifndef SOFTNIC_SOFT_TM_QUEUE_SIZE -#define SOFTNIC_SOFT_TM_QUEUE_SIZE 64 +/** TCP connection port (0 = no connectivity). */ +#ifndef SOFTNIC_CONN_PORT +#define SOFTNIC_CONN_PORT 0 #endif -#ifndef SOFTNIC_SOFT_TM_ENQ_BSZ -#define SOFTNIC_SOFT_TM_ENQ_BSZ 32 +/** NUMA node ID. */ +#ifndef SOFTNIC_CPU_ID +#define SOFTNIC_CPU_ID 0 #endif -#ifndef SOFTNIC_SOFT_TM_DEQ_BSZ -#define SOFTNIC_SOFT_TM_DEQ_BSZ 24 +/** Traffic Manager: Number of scheduler queues. */ +#ifndef SOFTNIC_TM_N_QUEUES +#define SOFTNIC_TM_N_QUEUES (64 * 1024) #endif -#ifndef SOFTNIC_HARD_TX_QUEUE_ID -#define SOFTNIC_HARD_TX_QUEUE_ID 0 +/** Traffic Manager: Scheduler queue size (per traffic class). */ +#ifndef SOFTNIC_TM_QUEUE_SIZE +#define SOFTNIC_TM_QUEUE_SIZE 64 #endif /** - * Run the traffic management function on the softnic device + * Soft NIC run. * - * This function read the packets from the softnic input queues, insert into - * QoS scheduler queues based on mbuf sched field value and transmit the - * scheduled packets out through the hard device interface. - * - * @param portid - * port id of the soft device. + * @param port_id + * Port ID of the Soft NIC device. * @return - * zero. + * Zero on success, error code otherwise. */ - int rte_pmd_softnic_run(uint16_t port_id); +/** + * Soft NIC manage. + * + * @param port_id + * Port ID of the Soft NIC device. + * @return + * Zero on success, error code otherwise. + */ +int __rte_experimental +rte_pmd_softnic_manage(uint16_t port_id); + #ifdef __cplusplus } #endif diff --git a/drivers/net/softnic/rte_eth_softnic_action.c b/drivers/net/softnic/rte_eth_softnic_action.c new file mode 100644 index 00000000..c25f4dd9 --- /dev/null +++ b/drivers/net/softnic/rte_eth_softnic_action.c @@ -0,0 +1,389 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#include +#include +#include + +#include + +#include "hash_func.h" +#include "rte_eth_softnic_internals.h" + +/** + * Input port + */ +int +softnic_port_in_action_profile_init(struct pmd_internals *p) +{ + TAILQ_INIT(&p->port_in_action_profile_list); + + return 0; +} + +void +softnic_port_in_action_profile_free(struct pmd_internals *p) +{ + for ( ; ; ) { + struct softnic_port_in_action_profile *profile; + + profile = TAILQ_FIRST(&p->port_in_action_profile_list); + if (profile == NULL) + break; + + TAILQ_REMOVE(&p->port_in_action_profile_list, profile, node); + free(profile); + } +} + +struct softnic_port_in_action_profile * +softnic_port_in_action_profile_find(struct pmd_internals *p, + const char *name) +{ + struct softnic_port_in_action_profile *profile; + + if (name == NULL) + return NULL; + + TAILQ_FOREACH(profile, &p->port_in_action_profile_list, node) + if (strcmp(profile->name, name) == 0) + return profile; + + return NULL; +} + +struct softnic_port_in_action_profile * +softnic_port_in_action_profile_create(struct pmd_internals *p, + const char *name, + struct softnic_port_in_action_profile_params *params) +{ + struct softnic_port_in_action_profile *profile; + struct rte_port_in_action_profile *ap; + int status; + + /* Check input params */ + if (name == NULL || + softnic_port_in_action_profile_find(p, name) || + params == NULL) + return NULL; + + if ((params->action_mask & (1LLU << RTE_PORT_IN_ACTION_LB)) && + params->lb.f_hash == NULL) { + switch (params->lb.key_size) { + case 8: + params->lb.f_hash = hash_default_key8; + break; + + case 16: + params->lb.f_hash = hash_default_key16; + break; + + case 24: + params->lb.f_hash = hash_default_key24; + break; + + case 32: + params->lb.f_hash = hash_default_key32; + break; + + case 40: + params->lb.f_hash = hash_default_key40; + break; + + case 48: + params->lb.f_hash = hash_default_key48; + break; + + case 56: + params->lb.f_hash = hash_default_key56; + break; + + case 64: + params->lb.f_hash = hash_default_key64; + break; + + default: + return NULL; + } + + params->lb.seed = 0; + } + + /* Resource */ + ap = rte_port_in_action_profile_create(0); + if (ap == NULL) + return NULL; + + if (params->action_mask & (1LLU << RTE_PORT_IN_ACTION_FLTR)) { + status = rte_port_in_action_profile_action_register(ap, + RTE_PORT_IN_ACTION_FLTR, + ¶ms->fltr); + + if (status) { + rte_port_in_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_PORT_IN_ACTION_LB)) { + status = rte_port_in_action_profile_action_register(ap, + RTE_PORT_IN_ACTION_LB, + ¶ms->lb); + + if (status) { + rte_port_in_action_profile_free(ap); + return NULL; + } + } + + status = rte_port_in_action_profile_freeze(ap); + if (status) { + rte_port_in_action_profile_free(ap); + return NULL; + } + + /* Node allocation */ + profile = calloc(1, sizeof(struct softnic_port_in_action_profile)); + if (profile == NULL) { + rte_port_in_action_profile_free(ap); + return NULL; + } + + /* Node fill in */ + strlcpy(profile->name, name, sizeof(profile->name)); + memcpy(&profile->params, params, sizeof(*params)); + profile->ap = ap; + + /* Node add to list */ + TAILQ_INSERT_TAIL(&p->port_in_action_profile_list, profile, node); + + return profile; +} + +/** + * Table + */ +int +softnic_table_action_profile_init(struct pmd_internals *p) +{ + TAILQ_INIT(&p->table_action_profile_list); + + return 0; +} + +void +softnic_table_action_profile_free(struct pmd_internals *p) +{ + for ( ; ; ) { + struct softnic_table_action_profile *profile; + + profile = TAILQ_FIRST(&p->table_action_profile_list); + if (profile == NULL) + break; + + TAILQ_REMOVE(&p->table_action_profile_list, profile, node); + free(profile); + } +} + +struct softnic_table_action_profile * +softnic_table_action_profile_find(struct pmd_internals *p, + const char *name) +{ + struct softnic_table_action_profile *profile; + + if (name == NULL) + return NULL; + + TAILQ_FOREACH(profile, &p->table_action_profile_list, node) + if (strcmp(profile->name, name) == 0) + return profile; + + return NULL; +} + +struct softnic_table_action_profile * +softnic_table_action_profile_create(struct pmd_internals *p, + const char *name, + struct softnic_table_action_profile_params *params) +{ + struct softnic_table_action_profile *profile; + struct rte_table_action_profile *ap; + int status; + + /* Check input params */ + if (name == NULL || + softnic_table_action_profile_find(p, name) || + params == NULL || + ((params->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) == 0)) + return NULL; + + if ((params->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) && + params->lb.f_hash == NULL) { + switch (params->lb.key_size) { + case 8: + params->lb.f_hash = hash_default_key8; + break; + + case 16: + params->lb.f_hash = hash_default_key16; + break; + + case 24: + params->lb.f_hash = hash_default_key24; + break; + + case 32: + params->lb.f_hash = hash_default_key32; + break; + + case 40: + params->lb.f_hash = hash_default_key40; + break; + + case 48: + params->lb.f_hash = hash_default_key48; + break; + + case 56: + params->lb.f_hash = hash_default_key56; + break; + + case 64: + params->lb.f_hash = hash_default_key64; + break; + + default: + return NULL; + } + + params->lb.seed = 0; + } + + /* Resource */ + ap = rte_table_action_profile_create(¶ms->common); + if (ap == NULL) + return NULL; + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_FWD, + NULL); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_LB, + ¶ms->lb); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_MTR, + ¶ms->mtr); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_TM, + ¶ms->tm); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_ENCAP, + ¶ms->encap); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_NAT, + ¶ms->nat); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_TTL, + ¶ms->ttl); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_STATS, + ¶ms->stats); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_TIME, + NULL); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + status = rte_table_action_profile_freeze(ap); + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + + /* Node allocation */ + profile = calloc(1, sizeof(struct softnic_table_action_profile)); + if (profile == NULL) { + rte_table_action_profile_free(ap); + return NULL; + } + + /* Node fill in */ + strlcpy(profile->name, name, sizeof(profile->name)); + memcpy(&profile->params, params, sizeof(*params)); + profile->ap = ap; + + /* Node add to list */ + TAILQ_INSERT_TAIL(&p->table_action_profile_list, profile, node); + + return profile; +} diff --git a/drivers/net/softnic/rte_eth_softnic_cli.c b/drivers/net/softnic/rte_eth_softnic_cli.c new file mode 100644 index 00000000..0c7448cc --- /dev/null +++ b/drivers/net/softnic/rte_eth_softnic_cli.c @@ -0,0 +1,5259 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#include +#include +#include +#include + +#include +#include + +#include "rte_eth_softnic_internals.h" +#include "parser.h" + +#ifndef CMD_MAX_TOKENS +#define CMD_MAX_TOKENS 256 +#endif + +#define MSG_OUT_OF_MEMORY "Not enough memory.\n" +#define MSG_CMD_UNKNOWN "Unknown command \"%s\".\n" +#define MSG_CMD_UNIMPLEM "Command \"%s\" not implemented.\n" +#define MSG_ARG_NOT_ENOUGH "Not enough arguments for command \"%s\".\n" +#define MSG_ARG_TOO_MANY "Too many arguments for command \"%s\".\n" +#define MSG_ARG_MISMATCH "Wrong number of arguments for command \"%s\".\n" +#define MSG_ARG_NOT_FOUND "Argument \"%s\" not found.\n" +#define MSG_ARG_INVALID "Invalid value for argument \"%s\".\n" +#define MSG_FILE_ERR "Error in file \"%s\" at line %u.\n" +#define MSG_FILE_NOT_ENOUGH "Not enough rules in file \"%s\".\n" +#define MSG_CMD_FAIL "Command \"%s\" failed.\n" + +static int +is_comment(char *in) +{ + if ((strlen(in) && index("!#%;", in[0])) || + (strncmp(in, "//", 2) == 0) || + (strncmp(in, "--", 2) == 0)) + return 1; + + return 0; +} + +/** + * mempool + * buffer + * pool + * cache + */ +static void +cmd_mempool(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_mempool_params p; + char *name; + struct softnic_mempool *mempool; + + if (n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + name = tokens[1]; + + if (strcmp(tokens[2], "buffer") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "buffer"); + return; + } + + if (softnic_parser_read_uint32(&p.buffer_size, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "buffer_size"); + return; + } + + if (strcmp(tokens[4], "pool") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pool"); + return; + } + + if (softnic_parser_read_uint32(&p.pool_size, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "pool_size"); + return; + } + + if (strcmp(tokens[6], "cache") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cache"); + return; + } + + if (softnic_parser_read_uint32(&p.cache_size, tokens[7]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "cache_size"); + return; + } + + mempool = softnic_mempool_create(softnic, name, &p); + if (mempool == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * link + * dev | port + */ +static void +cmd_link(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_link_params p; + struct softnic_link *link; + char *name; + + memset(&p, 0, sizeof(p)); + + if (n_tokens != 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + name = tokens[1]; + + if (strcmp(tokens[2], "dev") == 0) { + p.dev_name = tokens[3]; + } else if (strcmp(tokens[2], "port") == 0) { + p.dev_name = NULL; + + if (softnic_parser_read_uint16(&p.port_id, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "dev or port"); + return; + } + + link = softnic_link_create(softnic, name, &p); + if (link == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * swq + * size + */ +static void +cmd_swq(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_swq_params p; + char *name; + struct softnic_swq *swq; + + if (n_tokens != 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + name = tokens[1]; + + if (strcmp(tokens[2], "size") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); + return; + } + + if (softnic_parser_read_uint32(&p.size, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "size"); + return; + } + + swq = softnic_swq_create(softnic, name, &p); + if (swq == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * tmgr shaper profile + * id + * rate size + * adj + */ +static void +cmd_tmgr_shaper_profile(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_tm_shaper_params sp; + struct rte_tm_error error; + uint32_t shaper_profile_id; + uint16_t port_id; + int status; + + memset(&sp, 0, sizeof(struct rte_tm_shaper_params)); + + if (n_tokens != 11) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "shaper") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper"); + return; + } + + if (strcmp(tokens[2], "profile") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); + return; + } + + if (strcmp(tokens[3], "id") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id"); + return; + } + + if (softnic_parser_read_uint32(&shaper_profile_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "profile_id"); + return; + } + + if (strcmp(tokens[5], "rate") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rate"); + return; + } + + if (softnic_parser_read_uint64(&sp.peak.rate, tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tb_rate"); + return; + } + + if (strcmp(tokens[7], "size") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); + return; + } + + if (softnic_parser_read_uint64(&sp.peak.size, tokens[8]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tb_size"); + return; + } + + if (strcmp(tokens[9], "adj") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "adj"); + return; + } + + if (softnic_parser_read_int32(&sp.pkt_length_adjust, tokens[10]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "packet_length_adjust"); + return; + } + + status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); + if (status) + return; + + status = rte_tm_shaper_profile_add(port_id, shaper_profile_id, &sp, &error); + if (status != 0) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * tmgr shared shaper + * id + * profile + */ +static void +cmd_tmgr_shared_shaper(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_tm_error error; + uint32_t shared_shaper_id, shaper_profile_id; + uint16_t port_id; + int status; + + if (n_tokens != 7) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "shared") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shared"); + return; + } + + if (strcmp(tokens[2], "shaper") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper"); + return; + } + + if (strcmp(tokens[3], "id") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id"); + return; + } + + if (softnic_parser_read_uint32(&shared_shaper_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared_shaper_id"); + return; + } + + if (strcmp(tokens[5], "profile") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); + return; + } + + if (softnic_parser_read_uint32(&shaper_profile_id, tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shaper_profile_id"); + return; + } + + status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); + if (status) + return; + + status = rte_tm_shared_shaper_add_update(port_id, + shared_shaper_id, + shaper_profile_id, + &error); + if (status != 0) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * tmgr node + * id + * parent + * priority + * weight + * [shaper profile ] + * [shared shaper ] + * [nonleaf sp ] + */ +static void +cmd_tmgr_node(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_tm_error error; + struct rte_tm_node_params np; + uint32_t node_id, parent_node_id, priority, weight, shared_shaper_id; + uint16_t port_id; + int status; + + memset(&np, 0, sizeof(struct rte_tm_node_params)); + np.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE; + np.nonleaf.n_sp_priorities = 1; + + if (n_tokens < 10) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "node") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "node"); + return; + } + + if (strcmp(tokens[2], "id") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id"); + return; + } + + if (softnic_parser_read_uint32(&node_id, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "node_id"); + return; + } + + if (strcmp(tokens[4], "parent") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "parent"); + return; + } + + if (strcmp(tokens[5], "none") == 0) + parent_node_id = RTE_TM_NODE_ID_NULL; + else { + if (softnic_parser_read_uint32(&parent_node_id, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "parent_node_id"); + return; + } + } + + if (strcmp(tokens[6], "priority") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "priority"); + return; + } + + if (softnic_parser_read_uint32(&priority, tokens[7]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "priority"); + return; + } + + if (strcmp(tokens[8], "weight") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "weight"); + return; + } + + if (softnic_parser_read_uint32(&weight, tokens[9]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "weight"); + return; + } + + tokens += 10; + n_tokens -= 10; + + if (n_tokens >= 2 && + (strcmp(tokens[0], "shaper") == 0) && + (strcmp(tokens[1], "profile") == 0)) { + if (n_tokens < 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node"); + return; + } + + if (strcmp(tokens[2], "none") == 0) { + np.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE; + } else { + if (softnic_parser_read_uint32(&np.shaper_profile_id, tokens[2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shaper_profile_id"); + return; + } + } + + tokens += 3; + n_tokens -= 3; + } /* shaper profile */ + + if (n_tokens >= 2 && + (strcmp(tokens[0], "shared") == 0) && + (strcmp(tokens[1], "shaper") == 0)) { + if (n_tokens < 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node"); + return; + } + + if (softnic_parser_read_uint32(&shared_shaper_id, tokens[2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared_shaper_id"); + return; + } + + np.shared_shaper_id = &shared_shaper_id; + np.n_shared_shapers = 1; + + tokens += 3; + n_tokens -= 3; + } /* shared shaper */ + + if (n_tokens >= 2 && + (strcmp(tokens[0], "nonleaf") == 0) && + (strcmp(tokens[1], "sp") == 0)) { + if (n_tokens < 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node"); + return; + } + + if (softnic_parser_read_uint32(&np.nonleaf.n_sp_priorities, tokens[2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_sp_priorities"); + return; + } + + tokens += 3; + n_tokens -= 3; + } /* nonleaf sp */ + + if (n_tokens) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); + if (status != 0) + return; + + status = rte_tm_node_add(port_id, + node_id, + parent_node_id, + priority, + weight, + RTE_TM_NODE_LEVEL_ID_ANY, + &np, + &error); + if (status != 0) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +static uint32_t +root_node_id(uint32_t n_spp, + uint32_t n_pps) +{ + uint32_t n_queues = n_spp * n_pps * RTE_SCHED_QUEUES_PER_PIPE; + uint32_t n_tc = n_spp * n_pps * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; + uint32_t n_pipes = n_spp * n_pps; + + return n_queues + n_tc + n_pipes + n_spp; +} + +static uint32_t +subport_node_id(uint32_t n_spp, + uint32_t n_pps, + uint32_t subport_id) +{ + uint32_t n_pipes = n_spp * n_pps; + uint32_t n_tc = n_pipes * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; + uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE; + + return n_queues + n_tc + n_pipes + subport_id; +} + +static uint32_t +pipe_node_id(uint32_t n_spp, + uint32_t n_pps, + uint32_t subport_id, + uint32_t pipe_id) +{ + uint32_t n_pipes = n_spp * n_pps; + uint32_t n_tc = n_pipes * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; + uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE; + + return n_queues + + n_tc + + pipe_id + + subport_id * n_pps; +} + +static uint32_t +tc_node_id(uint32_t n_spp, + uint32_t n_pps, + uint32_t subport_id, + uint32_t pipe_id, + uint32_t tc_id) +{ + uint32_t n_pipes = n_spp * n_pps; + uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE; + + return n_queues + + tc_id + + (pipe_id + subport_id * n_pps) * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; +} + +static uint32_t +queue_node_id(uint32_t n_spp __rte_unused, + uint32_t n_pps, + uint32_t subport_id, + uint32_t pipe_id, + uint32_t tc_id, + uint32_t queue_id) +{ + return queue_id + + tc_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + + (pipe_id + subport_id * n_pps) * RTE_SCHED_QUEUES_PER_PIPE; +} + +struct tmgr_hierarchy_default_params { + uint32_t n_spp; /**< Number of subports per port. */ + uint32_t n_pps; /**< Number of pipes per subport. */ + + struct { + uint32_t port; + uint32_t subport; + uint32_t pipe; + uint32_t tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + } shaper_profile_id; + + struct { + uint32_t tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + uint32_t tc_valid[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + } shared_shaper_id; + + struct { + uint32_t queue[RTE_SCHED_QUEUES_PER_PIPE]; + } weight; +}; + +static int +tmgr_hierarchy_default(struct pmd_internals *softnic, + struct tmgr_hierarchy_default_params *params) +{ + struct rte_tm_node_params root_node_params = { + .shaper_profile_id = params->shaper_profile_id.port, + .nonleaf = { + .n_sp_priorities = 1, + }, + }; + + struct rte_tm_node_params subport_node_params = { + .shaper_profile_id = params->shaper_profile_id.subport, + .nonleaf = { + .n_sp_priorities = 1, + }, + }; + + struct rte_tm_node_params pipe_node_params = { + .shaper_profile_id = params->shaper_profile_id.pipe, + .nonleaf = { + .n_sp_priorities = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, + }, + }; + + struct rte_tm_node_params tc_node_params[] = { + [0] = { + .shaper_profile_id = params->shaper_profile_id.tc[0], + .shared_shaper_id = ¶ms->shared_shaper_id.tc[0], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[0]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [1] = { + .shaper_profile_id = params->shaper_profile_id.tc[1], + .shared_shaper_id = ¶ms->shared_shaper_id.tc[1], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[1]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [2] = { + .shaper_profile_id = params->shaper_profile_id.tc[2], + .shared_shaper_id = ¶ms->shared_shaper_id.tc[2], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[2]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [3] = { + .shaper_profile_id = params->shaper_profile_id.tc[3], + .shared_shaper_id = ¶ms->shared_shaper_id.tc[3], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[3]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + }; + + struct rte_tm_node_params queue_node_params = { + .shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE, + }; + + struct rte_tm_error error; + uint32_t n_spp = params->n_spp, n_pps = params->n_pps, s; + int status; + uint16_t port_id; + + status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); + if (status) + return -1; + + /* Hierarchy level 0: Root node */ + status = rte_tm_node_add(port_id, + root_node_id(n_spp, n_pps), + RTE_TM_NODE_ID_NULL, + 0, + 1, + RTE_TM_NODE_LEVEL_ID_ANY, + &root_node_params, + &error); + if (status) + return -1; + + /* Hierarchy level 1: Subport nodes */ + for (s = 0; s < params->n_spp; s++) { + uint32_t p; + + status = rte_tm_node_add(port_id, + subport_node_id(n_spp, n_pps, s), + root_node_id(n_spp, n_pps), + 0, + 1, + RTE_TM_NODE_LEVEL_ID_ANY, + &subport_node_params, + &error); + if (status) + return -1; + + /* Hierarchy level 2: Pipe nodes */ + for (p = 0; p < params->n_pps; p++) { + uint32_t t; + + status = rte_tm_node_add(port_id, + pipe_node_id(n_spp, n_pps, s, p), + subport_node_id(n_spp, n_pps, s), + 0, + 1, + RTE_TM_NODE_LEVEL_ID_ANY, + &pipe_node_params, + &error); + if (status) + return -1; + + /* Hierarchy level 3: Traffic class nodes */ + for (t = 0; t < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; t++) { + uint32_t q; + + status = rte_tm_node_add(port_id, + tc_node_id(n_spp, n_pps, s, p, t), + pipe_node_id(n_spp, n_pps, s, p), + t, + 1, + RTE_TM_NODE_LEVEL_ID_ANY, + &tc_node_params[t], + &error); + if (status) + return -1; + + /* Hierarchy level 4: Queue nodes */ + for (q = 0; q < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; q++) { + status = rte_tm_node_add(port_id, + queue_node_id(n_spp, n_pps, s, p, t, q), + tc_node_id(n_spp, n_pps, s, p, t), + 0, + params->weight.queue[q], + RTE_TM_NODE_LEVEL_ID_ANY, + &queue_node_params, + &error); + if (status) + return -1; + } /* Queue */ + } /* TC */ + } /* Pipe */ + } /* Subport */ + + return 0; +} + + +/** + * tmgr hierarchy-default + * spp + * pps + * shaper profile + * port + * subport + * pipe + * tc0 + * tc1 + * tc2 + * tc3 + * shared shaper + * tc0 + * tc1 + * tc2 + * tc3 + * weight + * queue ... + */ +static void +cmd_tmgr_hierarchy_default(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct tmgr_hierarchy_default_params p; + int i, status; + + memset(&p, 0, sizeof(p)); + + if (n_tokens != 50) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "hierarchy-default") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "hierarchy-default"); + return; + } + + if (strcmp(tokens[2], "spp") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "spp"); + return; + } + + if (softnic_parser_read_uint32(&p.n_spp, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_subports_per_port"); + return; + } + + if (strcmp(tokens[4], "pps") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pps"); + return; + } + + if (softnic_parser_read_uint32(&p.n_pps, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_pipes_per_subport"); + return; + } + + /* Shaper profile */ + + if (strcmp(tokens[6], "shaper") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper"); + return; + } + + if (strcmp(tokens[7], "profile") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); + return; + } + + if (strcmp(tokens[8], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.port, tokens[9]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port profile id"); + return; + } + + if (strcmp(tokens[10], "subport") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "subport"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.subport, tokens[11]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "subport profile id"); + return; + } + + if (strcmp(tokens[12], "pipe") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipe"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.pipe, tokens[13]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "pipe_profile_id"); + return; + } + + if (strcmp(tokens[14], "tc0") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc0"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[0], tokens[15]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc0 profile id"); + return; + } + + if (strcmp(tokens[16], "tc1") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc1"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[1], tokens[17]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc1 profile id"); + return; + } + + if (strcmp(tokens[18], "tc2") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc2"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[2], tokens[19]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc2 profile id"); + return; + } + + if (strcmp(tokens[20], "tc3") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc3"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[3], tokens[21]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc3 profile id"); + return; + } + + /* Shared shaper */ + + if (strcmp(tokens[22], "shared") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shared"); + return; + } + + if (strcmp(tokens[23], "shaper") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper"); + return; + } + + if (strcmp(tokens[24], "tc0") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc0"); + return; + } + + if (strcmp(tokens[25], "none") == 0) + p.shared_shaper_id.tc_valid[0] = 0; + else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[0], tokens[25]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc0"); + return; + } + + p.shared_shaper_id.tc_valid[0] = 1; + } + + if (strcmp(tokens[26], "tc1") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc1"); + return; + } + + if (strcmp(tokens[27], "none") == 0) + p.shared_shaper_id.tc_valid[1] = 0; + else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[1], tokens[27]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc1"); + return; + } + + p.shared_shaper_id.tc_valid[1] = 1; + } + + if (strcmp(tokens[28], "tc2") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc2"); + return; + } + + if (strcmp(tokens[29], "none") == 0) + p.shared_shaper_id.tc_valid[2] = 0; + else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[2], tokens[29]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc2"); + return; + } + + p.shared_shaper_id.tc_valid[2] = 1; + } + + if (strcmp(tokens[30], "tc3") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc3"); + return; + } + + if (strcmp(tokens[31], "none") == 0) + p.shared_shaper_id.tc_valid[3] = 0; + else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[3], tokens[31]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc3"); + return; + } + + p.shared_shaper_id.tc_valid[3] = 1; + } + + /* Weight */ + + if (strcmp(tokens[32], "weight") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "weight"); + return; + } + + if (strcmp(tokens[33], "queue") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "queue"); + return; + } + + for (i = 0; i < 16; i++) { + if (softnic_parser_read_uint32(&p.weight.queue[i], tokens[34 + i]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "weight queue"); + return; + } + } + + status = tmgr_hierarchy_default(softnic, &p); + if (status != 0) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * tmgr hierarchy commit + */ +static void +cmd_tmgr_hierarchy_commit(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_tm_error error; + uint16_t port_id; + int status; + + if (n_tokens != 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "hierarchy") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "hierarchy"); + return; + } + + if (strcmp(tokens[2], "commit") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "commit"); + return; + } + + status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); + if (status != 0) + return; + + status = rte_tm_hierarchy_commit(port_id, 1, &error); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * tmgr + */ +static void +cmd_tmgr(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *name; + struct softnic_tmgr_port *tmgr_port; + + if (n_tokens != 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + name = tokens[1]; + + tmgr_port = softnic_tmgr_port_create(softnic, name); + if (tmgr_port == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * tap + */ +static void +cmd_tap(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *name; + struct softnic_tap *tap; + + if (n_tokens != 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + name = tokens[1]; + + tap = softnic_tap_create(softnic, name); + if (tap == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * port in action profile + * [filter match | mismatch offset mask key port ] + * [balance offset mask port ... ] + */ +static void +cmd_port_in_action_profile(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_port_in_action_profile_params p; + struct softnic_port_in_action_profile *ap; + char *name; + uint32_t t0; + + memset(&p, 0, sizeof(p)); + + if (n_tokens < 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "in") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in"); + return; + } + + if (strcmp(tokens[2], "action") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "action"); + return; + } + + if (strcmp(tokens[3], "profile") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); + return; + } + + name = tokens[4]; + + t0 = 5; + + if (t0 < n_tokens && + (strcmp(tokens[t0], "filter") == 0)) { + uint32_t size; + + if (n_tokens < t0 + 10) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "port in action profile filter"); + return; + } + + if (strcmp(tokens[t0 + 1], "match") == 0) { + p.fltr.filter_on_match = 1; + } else if (strcmp(tokens[t0 + 1], "mismatch") == 0) { + p.fltr.filter_on_match = 0; + } else { + snprintf(out, out_size, MSG_ARG_INVALID, "match or mismatch"); + return; + } + + if (strcmp(tokens[t0 + 2], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.fltr.key_offset, + tokens[t0 + 3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_offset"); + return; + } + + if (strcmp(tokens[t0 + 4], "mask") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask"); + return; + } + + size = RTE_PORT_IN_ACTION_FLTR_KEY_SIZE; + if ((softnic_parse_hex_string(tokens[t0 + 5], + p.fltr.key_mask, &size) != 0) || + size != RTE_PORT_IN_ACTION_FLTR_KEY_SIZE) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_mask"); + return; + } + + if (strcmp(tokens[t0 + 6], "key") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "key"); + return; + } + + size = RTE_PORT_IN_ACTION_FLTR_KEY_SIZE; + if ((softnic_parse_hex_string(tokens[t0 + 7], + p.fltr.key, &size) != 0) || + size != RTE_PORT_IN_ACTION_FLTR_KEY_SIZE) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_value"); + return; + } + + if (strcmp(tokens[t0 + 8], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (softnic_parser_read_uint32(&p.fltr.port_id, + tokens[t0 + 9]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + p.action_mask |= 1LLU << RTE_PORT_IN_ACTION_FLTR; + t0 += 10; + } /* filter */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "balance") == 0)) { + uint32_t i; + + if (n_tokens < t0 + 22) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "port in action profile balance"); + return; + } + + if (strcmp(tokens[t0 + 1], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.lb.key_offset, + tokens[t0 + 2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_offset"); + return; + } + + if (strcmp(tokens[t0 + 3], "mask") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask"); + return; + } + + p.lb.key_size = RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX; + if (softnic_parse_hex_string(tokens[t0 + 4], + p.lb.key_mask, &p.lb.key_size) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_mask"); + return; + } + + if (strcmp(tokens[t0 + 5], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + for (i = 0; i < 16; i++) + if (softnic_parser_read_uint32(&p.lb.port_id[i], + tokens[t0 + 6 + i]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + p.action_mask |= 1LLU << RTE_PORT_IN_ACTION_LB; + t0 += 22; + } /* balance */ + + if (t0 < n_tokens) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + ap = softnic_port_in_action_profile_create(softnic, name, &p); + if (ap == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * table action profile + * ipv4 | ipv6 + * offset + * fwd + * [balance offset mask outoffset ] + * [meter srtcm | trtcm + * tc + * stats none | pkts | bytes | both] + * [tm spp pps ] + * [encap ether | vlan | qinq | mpls | pppoe] + * [nat src | dst + * proto udp | tcp] + * [ttl drop | fwd + * stats none | pkts] + * [stats pkts | bytes | both] + * [time] + */ +static void +cmd_table_action_profile(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_table_action_profile_params p; + struct softnic_table_action_profile *ap; + char *name; + uint32_t t0; + + memset(&p, 0, sizeof(p)); + + if (n_tokens < 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "action") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "action"); + return; + } + + if (strcmp(tokens[2], "profile") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); + return; + } + + name = tokens[3]; + + if (strcmp(tokens[4], "ipv4") == 0) { + p.common.ip_version = 1; + } else if (strcmp(tokens[4], "ipv6") == 0) { + p.common.ip_version = 0; + } else { + snprintf(out, out_size, MSG_ARG_INVALID, "ipv4 or ipv6"); + return; + } + + if (strcmp(tokens[5], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.common.ip_offset, + tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "ip_offset"); + return; + } + + if (strcmp(tokens[7], "fwd") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "fwd"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_FWD; + + t0 = 8; + if (t0 < n_tokens && + (strcmp(tokens[t0], "balance") == 0)) { + if (n_tokens < t0 + 7) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "table action profile balance"); + return; + } + + if (strcmp(tokens[t0 + 1], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.lb.key_offset, + tokens[t0 + 2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_offset"); + return; + } + + if (strcmp(tokens[t0 + 3], "mask") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask"); + return; + } + + p.lb.key_size = RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX; + if (softnic_parse_hex_string(tokens[t0 + 4], + p.lb.key_mask, &p.lb.key_size) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_mask"); + return; + } + + if (strcmp(tokens[t0 + 5], "outoffset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "outoffset"); + return; + } + + if (softnic_parser_read_uint32(&p.lb.out_offset, + tokens[t0 + 6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "out_offset"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_LB; + t0 += 7; + } /* balance */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "meter") == 0)) { + if (n_tokens < t0 + 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "table action profile meter"); + return; + } + + if (strcmp(tokens[t0 + 1], "srtcm") == 0) { + p.mtr.alg = RTE_TABLE_ACTION_METER_SRTCM; + } else if (strcmp(tokens[t0 + 1], "trtcm") == 0) { + p.mtr.alg = RTE_TABLE_ACTION_METER_TRTCM; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "srtcm or trtcm"); + return; + } + + if (strcmp(tokens[t0 + 2], "tc") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc"); + return; + } + + if (softnic_parser_read_uint32(&p.mtr.n_tc, + tokens[t0 + 3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_tc"); + return; + } + + if (strcmp(tokens[t0 + 4], "stats") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats"); + return; + } + + if (strcmp(tokens[t0 + 5], "none") == 0) { + p.mtr.n_packets_enabled = 0; + p.mtr.n_bytes_enabled = 0; + } else if (strcmp(tokens[t0 + 5], "pkts") == 0) { + p.mtr.n_packets_enabled = 1; + p.mtr.n_bytes_enabled = 0; + } else if (strcmp(tokens[t0 + 5], "bytes") == 0) { + p.mtr.n_packets_enabled = 0; + p.mtr.n_bytes_enabled = 1; + } else if (strcmp(tokens[t0 + 5], "both") == 0) { + p.mtr.n_packets_enabled = 1; + p.mtr.n_bytes_enabled = 1; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "none or pkts or bytes or both"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_MTR; + t0 += 6; + } /* meter */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "tm") == 0)) { + if (n_tokens < t0 + 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "table action profile tm"); + return; + } + + if (strcmp(tokens[t0 + 1], "spp") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "spp"); + return; + } + + if (softnic_parser_read_uint32(&p.tm.n_subports_per_port, + tokens[t0 + 2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "n_subports_per_port"); + return; + } + + if (strcmp(tokens[t0 + 3], "pps") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pps"); + return; + } + + if (softnic_parser_read_uint32(&p.tm.n_pipes_per_subport, + tokens[t0 + 4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "n_pipes_per_subport"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_TM; + t0 += 5; + } /* tm */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "encap") == 0)) { + if (n_tokens < t0 + 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "action profile encap"); + return; + } + + if (strcmp(tokens[t0 + 1], "ether") == 0) { + p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER; + } else if (strcmp(tokens[t0 + 1], "vlan") == 0) { + p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN; + } else if (strcmp(tokens[t0 + 1], "qinq") == 0) { + p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ; + } else if (strcmp(tokens[t0 + 1], "mpls") == 0) { + p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS; + } else if (strcmp(tokens[t0 + 1], "pppoe") == 0) { + p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE; + } else { + snprintf(out, out_size, MSG_ARG_MISMATCH, "encap"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_ENCAP; + t0 += 2; + } /* encap */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "nat") == 0)) { + if (n_tokens < t0 + 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "table action profile nat"); + return; + } + + if (strcmp(tokens[t0 + 1], "src") == 0) { + p.nat.source_nat = 1; + } else if (strcmp(tokens[t0 + 1], "dst") == 0) { + p.nat.source_nat = 0; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "src or dst"); + return; + } + + if (strcmp(tokens[t0 + 2], "proto") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "proto"); + return; + } + + if (strcmp(tokens[t0 + 3], "tcp") == 0) { + p.nat.proto = 0x06; + } else if (strcmp(tokens[t0 + 3], "udp") == 0) { + p.nat.proto = 0x11; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "tcp or udp"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_NAT; + t0 += 4; + } /* nat */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "ttl") == 0)) { + if (n_tokens < t0 + 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "table action profile ttl"); + return; + } + + if (strcmp(tokens[t0 + 1], "drop") == 0) { + p.ttl.drop = 1; + } else if (strcmp(tokens[t0 + 1], "fwd") == 0) { + p.ttl.drop = 0; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "drop or fwd"); + return; + } + + if (strcmp(tokens[t0 + 2], "stats") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats"); + return; + } + + if (strcmp(tokens[t0 + 3], "none") == 0) { + p.ttl.n_packets_enabled = 0; + } else if (strcmp(tokens[t0 + 3], "pkts") == 0) { + p.ttl.n_packets_enabled = 1; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "none or pkts"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_TTL; + t0 += 4; + } /* ttl */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "stats") == 0)) { + if (n_tokens < t0 + 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "table action profile stats"); + return; + } + + if (strcmp(tokens[t0 + 1], "pkts") == 0) { + p.stats.n_packets_enabled = 1; + p.stats.n_bytes_enabled = 0; + } else if (strcmp(tokens[t0 + 1], "bytes") == 0) { + p.stats.n_packets_enabled = 0; + p.stats.n_bytes_enabled = 1; + } else if (strcmp(tokens[t0 + 1], "both") == 0) { + p.stats.n_packets_enabled = 1; + p.stats.n_bytes_enabled = 1; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "pkts or bytes or both"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_STATS; + t0 += 2; + } /* stats */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "time") == 0)) { + p.action_mask |= 1LLU << RTE_TABLE_ACTION_TIME; + t0 += 1; + } /* time */ + + if (t0 < n_tokens) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + ap = softnic_table_action_profile_create(softnic, name, &p); + if (ap == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline + * period + * offset_port_id + */ +static void +cmd_pipeline(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct pipeline_params p; + char *name; + struct pipeline *pipeline; + + if (n_tokens != 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + name = tokens[1]; + + if (strcmp(tokens[2], "period") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "period"); + return; + } + + if (softnic_parser_read_uint32(&p.timer_period_ms, + tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "timer_period_ms"); + return; + } + + if (strcmp(tokens[4], "offset_port_id") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset_port_id"); + return; + } + + if (softnic_parser_read_uint32(&p.offset_port_id, + tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "offset_port_id"); + return; + } + + pipeline = softnic_pipeline_create(softnic, name, &p); + if (pipeline == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline port in + * bsz + * link rxq + * | swq + * | tmgr + * | tap mempool mtu + * | source mempool file bpp + * [action ] + * [disabled] + */ +static void +cmd_pipeline_port_in(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_port_in_params p; + char *pipeline_name; + uint32_t t0; + int enabled, status; + + if (n_tokens < 7) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "in") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in"); + return; + } + + if (strcmp(tokens[4], "bsz") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "bsz"); + return; + } + + if (softnic_parser_read_uint32(&p.burst_size, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "burst_size"); + return; + } + + t0 = 6; + + if (strcmp(tokens[t0], "link") == 0) { + if (n_tokens < t0 + 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port in link"); + return; + } + + p.type = PORT_IN_RXQ; + + p.dev_name = tokens[t0 + 1]; + + if (strcmp(tokens[t0 + 2], "rxq") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rxq"); + return; + } + + if (softnic_parser_read_uint16(&p.rxq.queue_id, + tokens[t0 + 3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "queue_id"); + return; + } + t0 += 4; + } else if (strcmp(tokens[t0], "swq") == 0) { + if (n_tokens < t0 + 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port in swq"); + return; + } + + p.type = PORT_IN_SWQ; + + p.dev_name = tokens[t0 + 1]; + + t0 += 2; + } else if (strcmp(tokens[t0], "tmgr") == 0) { + if (n_tokens < t0 + 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port in tmgr"); + return; + } + + p.type = PORT_IN_TMGR; + + p.dev_name = tokens[t0 + 1]; + + t0 += 2; + } else if (strcmp(tokens[t0], "tap") == 0) { + if (n_tokens < t0 + 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port in tap"); + return; + } + + p.type = PORT_IN_TAP; + + p.dev_name = tokens[t0 + 1]; + + if (strcmp(tokens[t0 + 2], "mempool") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "mempool"); + return; + } + + p.tap.mempool_name = tokens[t0 + 3]; + + if (strcmp(tokens[t0 + 4], "mtu") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "mtu"); + return; + } + + if (softnic_parser_read_uint32(&p.tap.mtu, + tokens[t0 + 5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "mtu"); + return; + } + + t0 += 6; + } else if (strcmp(tokens[t0], "source") == 0) { + if (n_tokens < t0 + 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port in source"); + return; + } + + p.type = PORT_IN_SOURCE; + + p.dev_name = NULL; + + if (strcmp(tokens[t0 + 1], "mempool") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "mempool"); + return; + } + + p.source.mempool_name = tokens[t0 + 2]; + + if (strcmp(tokens[t0 + 3], "file") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "file"); + return; + } + + p.source.file_name = tokens[t0 + 4]; + + if (strcmp(tokens[t0 + 5], "bpp") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "bpp"); + return; + } + + if (softnic_parser_read_uint32(&p.source.n_bytes_per_pkt, + tokens[t0 + 6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "n_bytes_per_pkt"); + return; + } + + t0 += 7; + } else { + snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]); + return; + } + + p.action_profile_name = NULL; + if (n_tokens > t0 && + (strcmp(tokens[t0], "action") == 0)) { + if (n_tokens < t0 + 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "action"); + return; + } + + p.action_profile_name = tokens[t0 + 1]; + + t0 += 2; + } + + enabled = 1; + if (n_tokens > t0 && + (strcmp(tokens[t0], "disabled") == 0)) { + enabled = 0; + + t0 += 1; + } + + if (n_tokens != t0) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + status = softnic_pipeline_port_in_create(softnic, + pipeline_name, + &p, + enabled); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline port out + * bsz + * link txq + * | swq + * | tmgr + * | tap + * | sink [file pkts ] + */ +static void +cmd_pipeline_port_out(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_port_out_params p; + char *pipeline_name; + int status; + + memset(&p, 0, sizeof(p)); + + if (n_tokens < 7) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "out") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "out"); + return; + } + + if (strcmp(tokens[4], "bsz") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "bsz"); + return; + } + + if (softnic_parser_read_uint32(&p.burst_size, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "burst_size"); + return; + } + + if (strcmp(tokens[6], "link") == 0) { + if (n_tokens != 10) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out link"); + return; + } + + p.type = PORT_OUT_TXQ; + + p.dev_name = tokens[7]; + + if (strcmp(tokens[8], "txq") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "txq"); + return; + } + + if (softnic_parser_read_uint16(&p.txq.queue_id, + tokens[9]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "queue_id"); + return; + } + } else if (strcmp(tokens[6], "swq") == 0) { + if (n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out swq"); + return; + } + + p.type = PORT_OUT_SWQ; + + p.dev_name = tokens[7]; + } else if (strcmp(tokens[6], "tmgr") == 0) { + if (n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out tmgr"); + return; + } + + p.type = PORT_OUT_TMGR; + + p.dev_name = tokens[7]; + } else if (strcmp(tokens[6], "tap") == 0) { + if (n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out tap"); + return; + } + + p.type = PORT_OUT_TAP; + + p.dev_name = tokens[7]; + } else if (strcmp(tokens[6], "sink") == 0) { + if ((n_tokens != 7) && (n_tokens != 11)) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out sink"); + return; + } + + p.type = PORT_OUT_SINK; + + p.dev_name = NULL; + + if (n_tokens == 7) { + p.sink.file_name = NULL; + p.sink.max_n_pkts = 0; + } else { + if (strcmp(tokens[7], "file") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "file"); + return; + } + + p.sink.file_name = tokens[8]; + + if (strcmp(tokens[9], "pkts") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pkts"); + return; + } + + if (softnic_parser_read_uint32(&p.sink.max_n_pkts, + tokens[10]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "max_n_pkts"); + return; + } + } + } else { + snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]); + return; + } + + status = softnic_pipeline_port_out_create(softnic, pipeline_name, &p); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline table + * match + * acl + * ipv4 | ipv6 + * offset + * size + * | array + * offset + * size + * | hash + * ext | lru + * key + * mask + * offset + * buckets + * size + * | lpm + * ipv4 | ipv6 + * offset + * size + * | stub + * [action ] + */ +static void +cmd_pipeline_table(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + uint8_t key_mask[TABLE_RULE_MATCH_SIZE_MAX]; + struct softnic_table_params p; + char *pipeline_name; + uint32_t t0; + int status; + + if (n_tokens < 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "table") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table"); + return; + } + + if (strcmp(tokens[3], "match") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "match"); + return; + } + + t0 = 4; + if (strcmp(tokens[t0], "acl") == 0) { + if (n_tokens < t0 + 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline table acl"); + return; + } + + p.match_type = TABLE_ACL; + + if (strcmp(tokens[t0 + 1], "ipv4") == 0) { + p.match.acl.ip_version = 1; + } else if (strcmp(tokens[t0 + 1], "ipv6") == 0) { + p.match.acl.ip_version = 0; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "ipv4 or ipv6"); + return; + } + + if (strcmp(tokens[t0 + 2], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.match.acl.ip_header_offset, + tokens[t0 + 3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "ip_header_offset"); + return; + } + + if (strcmp(tokens[t0 + 4], "size") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); + return; + } + + if (softnic_parser_read_uint32(&p.match.acl.n_rules, + tokens[t0 + 5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_rules"); + return; + } + + t0 += 6; + } else if (strcmp(tokens[t0], "array") == 0) { + if (n_tokens < t0 + 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline table array"); + return; + } + + p.match_type = TABLE_ARRAY; + + if (strcmp(tokens[t0 + 1], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.match.array.key_offset, + tokens[t0 + 2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_offset"); + return; + } + + if (strcmp(tokens[t0 + 3], "size") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); + return; + } + + if (softnic_parser_read_uint32(&p.match.array.n_keys, + tokens[t0 + 4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_keys"); + return; + } + + t0 += 5; + } else if (strcmp(tokens[t0], "hash") == 0) { + uint32_t key_mask_size = TABLE_RULE_MATCH_SIZE_MAX; + + if (n_tokens < t0 + 12) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline table hash"); + return; + } + + p.match_type = TABLE_HASH; + + if (strcmp(tokens[t0 + 1], "ext") == 0) { + p.match.hash.extendable_bucket = 1; + } else if (strcmp(tokens[t0 + 1], "lru") == 0) { + p.match.hash.extendable_bucket = 0; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "ext or lru"); + return; + } + + if (strcmp(tokens[t0 + 2], "key") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "key"); + return; + } + + if ((softnic_parser_read_uint32(&p.match.hash.key_size, + tokens[t0 + 3]) != 0) || + p.match.hash.key_size == 0 || + p.match.hash.key_size > TABLE_RULE_MATCH_SIZE_MAX) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_size"); + return; + } + + if (strcmp(tokens[t0 + 4], "mask") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask"); + return; + } + + if ((softnic_parse_hex_string(tokens[t0 + 5], + key_mask, &key_mask_size) != 0) || + key_mask_size != p.match.hash.key_size) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_mask"); + return; + } + p.match.hash.key_mask = key_mask; + + if (strcmp(tokens[t0 + 6], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.match.hash.key_offset, + tokens[t0 + 7]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_offset"); + return; + } + + if (strcmp(tokens[t0 + 8], "buckets") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "buckets"); + return; + } + + if (softnic_parser_read_uint32(&p.match.hash.n_buckets, + tokens[t0 + 9]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_buckets"); + return; + } + + if (strcmp(tokens[t0 + 10], "size") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); + return; + } + + if (softnic_parser_read_uint32(&p.match.hash.n_keys, + tokens[t0 + 11]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_keys"); + return; + } + + t0 += 12; + } else if (strcmp(tokens[t0], "lpm") == 0) { + if (n_tokens < t0 + 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline table lpm"); + return; + } + + p.match_type = TABLE_LPM; + + if (strcmp(tokens[t0 + 1], "ipv4") == 0) { + p.match.lpm.key_size = 4; + } else if (strcmp(tokens[t0 + 1], "ipv6") == 0) { + p.match.lpm.key_size = 16; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "ipv4 or ipv6"); + return; + } + + if (strcmp(tokens[t0 + 2], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.match.lpm.key_offset, + tokens[t0 + 3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_offset"); + return; + } + + if (strcmp(tokens[t0 + 4], "size") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); + return; + } + + if (softnic_parser_read_uint32(&p.match.lpm.n_rules, + tokens[t0 + 5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_rules"); + return; + } + + t0 += 6; + } else if (strcmp(tokens[t0], "stub") == 0) { + p.match_type = TABLE_STUB; + + t0 += 1; + } else { + snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]); + return; + } + + p.action_profile_name = NULL; + if (n_tokens > t0 && + (strcmp(tokens[t0], "action") == 0)) { + if (n_tokens < t0 + 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "action"); + return; + } + + p.action_profile_name = tokens[t0 + 1]; + + t0 += 2; + } + + if (n_tokens > t0) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + status = softnic_pipeline_table_create(softnic, pipeline_name, &p); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline port in table + */ +static void +cmd_pipeline_port_in_table(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *pipeline_name; + uint32_t port_id, table_id; + int status; + + if (n_tokens != 7) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "in") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in"); + return; + } + + if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + if (strcmp(tokens[5], "table") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table"); + return; + } + + if (softnic_parser_read_uint32(&table_id, tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "table_id"); + return; + } + + status = softnic_pipeline_port_in_connect_to_table(softnic, + pipeline_name, + port_id, + table_id); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline port in stats read [clear] + */ + +#define MSG_PIPELINE_PORT_IN_STATS \ + "Pkts in: %" PRIu64 "\n" \ + "Pkts dropped by AH: %" PRIu64 "\n" \ + "Pkts dropped by other: %" PRIu64 "\n" + +static void +cmd_pipeline_port_in_stats(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_pipeline_port_in_stats stats; + char *pipeline_name; + uint32_t port_id; + int clear, status; + + if (n_tokens != 7 && + n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "in") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in"); + return; + } + + if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + if (strcmp(tokens[5], "stats") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats"); + return; + } + + if (strcmp(tokens[6], "read") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read"); + return; + } + + clear = 0; + if (n_tokens == 8) { + if (strcmp(tokens[7], "clear") != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "clear"); + return; + } + + clear = 1; + } + + status = softnic_pipeline_port_in_stats_read(softnic, + pipeline_name, + port_id, + &stats, + clear); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } + + snprintf(out, out_size, MSG_PIPELINE_PORT_IN_STATS, + stats.stats.n_pkts_in, + stats.n_pkts_dropped_by_ah, + stats.stats.n_pkts_drop); +} + +/** + * pipeline port in enable + */ +static void +cmd_softnic_pipeline_port_in_enable(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *pipeline_name; + uint32_t port_id; + int status; + + if (n_tokens != 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "in") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in"); + return; + } + + if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + if (strcmp(tokens[5], "enable") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "enable"); + return; + } + + status = softnic_pipeline_port_in_enable(softnic, pipeline_name, port_id); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline port in disable + */ +static void +cmd_softnic_pipeline_port_in_disable(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *pipeline_name; + uint32_t port_id; + int status; + + if (n_tokens != 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "in") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in"); + return; + } + + if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + if (strcmp(tokens[5], "disable") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "disable"); + return; + } + + status = softnic_pipeline_port_in_disable(softnic, pipeline_name, port_id); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline port out stats read [clear] + */ +#define MSG_PIPELINE_PORT_OUT_STATS \ + "Pkts in: %" PRIu64 "\n" \ + "Pkts dropped by AH: %" PRIu64 "\n" \ + "Pkts dropped by other: %" PRIu64 "\n" + +static void +cmd_pipeline_port_out_stats(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_pipeline_port_out_stats stats; + char *pipeline_name; + uint32_t port_id; + int clear, status; + + if (n_tokens != 7 && + n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "out") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "out"); + return; + } + + if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + if (strcmp(tokens[5], "stats") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats"); + return; + } + + if (strcmp(tokens[6], "read") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read"); + return; + } + + clear = 0; + if (n_tokens == 8) { + if (strcmp(tokens[7], "clear") != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "clear"); + return; + } + + clear = 1; + } + + status = softnic_pipeline_port_out_stats_read(softnic, + pipeline_name, + port_id, + &stats, + clear); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } + + snprintf(out, out_size, MSG_PIPELINE_PORT_OUT_STATS, + stats.stats.n_pkts_in, + stats.n_pkts_dropped_by_ah, + stats.stats.n_pkts_drop); +} + +/** + * pipeline table stats read [clear] + */ +#define MSG_PIPELINE_TABLE_STATS \ + "Pkts in: %" PRIu64 "\n" \ + "Pkts in with lookup miss: %" PRIu64 "\n" \ + "Pkts in with lookup hit dropped by AH: %" PRIu64 "\n" \ + "Pkts in with lookup hit dropped by others: %" PRIu64 "\n" \ + "Pkts in with lookup miss dropped by AH: %" PRIu64 "\n" \ + "Pkts in with lookup miss dropped by others: %" PRIu64 "\n" + +static void +cmd_pipeline_table_stats(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_pipeline_table_stats stats; + char *pipeline_name; + uint32_t table_id; + int clear, status; + + if (n_tokens != 6 && + n_tokens != 7) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "table") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "table_id"); + return; + } + + if (strcmp(tokens[4], "stats") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats"); + return; + } + + if (strcmp(tokens[5], "read") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read"); + return; + } + + clear = 0; + if (n_tokens == 7) { + if (strcmp(tokens[6], "clear") != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "clear"); + return; + } + + clear = 1; + } + + status = softnic_pipeline_table_stats_read(softnic, + pipeline_name, + table_id, + &stats, + clear); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } + + snprintf(out, out_size, MSG_PIPELINE_TABLE_STATS, + stats.stats.n_pkts_in, + stats.stats.n_pkts_lookup_miss, + stats.n_pkts_dropped_by_lkp_hit_ah, + stats.n_pkts_dropped_lkp_hit, + stats.n_pkts_dropped_by_lkp_miss_ah, + stats.n_pkts_dropped_lkp_miss); +} + +/** + * ::= + * + * match + * acl + * priority + * ipv4 | ipv6 + * + * | array + * | hash + * raw + * | ipv4_5tuple + * | ipv6_5tuple + * | ipv4_addr + * | ipv6_addr + * | qinq + * | lpm + * ipv4 | ipv6 + */ +struct pkt_key_qinq { + uint16_t ethertype_svlan; + uint16_t svlan; + uint16_t ethertype_cvlan; + uint16_t cvlan; +} __attribute__((__packed__)); + +struct pkt_key_ipv4_5tuple { + uint8_t time_to_live; + uint8_t proto; + uint16_t hdr_checksum; + uint32_t sa; + uint32_t da; + uint16_t sp; + uint16_t dp; +} __attribute__((__packed__)); + +struct pkt_key_ipv6_5tuple { + uint16_t payload_length; + uint8_t proto; + uint8_t hop_limit; + uint8_t sa[16]; + uint8_t da[16]; + uint16_t sp; + uint16_t dp; +} __attribute__((__packed__)); + +struct pkt_key_ipv4_addr { + uint32_t addr; +} __attribute__((__packed__)); + +struct pkt_key_ipv6_addr { + uint8_t addr[16]; +} __attribute__((__packed__)); + +static uint32_t +parse_match(char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size, + struct softnic_table_rule_match *m) +{ + memset(m, 0, sizeof(*m)); + + if (n_tokens < 2) + return 0; + + if (strcmp(tokens[0], "match") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "match"); + return 0; + } + + if (strcmp(tokens[1], "acl") == 0) { + if (n_tokens < 14) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return 0; + } + + m->match_type = TABLE_ACL; + + if (strcmp(tokens[2], "priority") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "priority"); + return 0; + } + + if (softnic_parser_read_uint32(&m->match.acl.priority, + tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "priority"); + return 0; + } + + if (strcmp(tokens[4], "ipv4") == 0) { + struct in_addr saddr, daddr; + + m->match.acl.ip_version = 1; + + if (softnic_parse_ipv4_addr(tokens[5], &saddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sa"); + return 0; + } + m->match.acl.ipv4.sa = rte_be_to_cpu_32(saddr.s_addr); + + if (softnic_parse_ipv4_addr(tokens[7], &daddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "da"); + return 0; + } + m->match.acl.ipv4.da = rte_be_to_cpu_32(daddr.s_addr); + } else if (strcmp(tokens[4], "ipv6") == 0) { + struct in6_addr saddr, daddr; + + m->match.acl.ip_version = 0; + + if (softnic_parse_ipv6_addr(tokens[5], &saddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sa"); + return 0; + } + memcpy(m->match.acl.ipv6.sa, saddr.s6_addr, 16); + + if (softnic_parse_ipv6_addr(tokens[7], &daddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "da"); + return 0; + } + memcpy(m->match.acl.ipv6.da, daddr.s6_addr, 16); + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "ipv4 or ipv6"); + return 0; + } + + if (softnic_parser_read_uint32(&m->match.acl.sa_depth, + tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sa_depth"); + return 0; + } + + if (softnic_parser_read_uint32(&m->match.acl.da_depth, + tokens[8]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "da_depth"); + return 0; + } + + if (softnic_parser_read_uint16(&m->match.acl.sp0, tokens[9]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sp0"); + return 0; + } + + if (softnic_parser_read_uint16(&m->match.acl.sp1, tokens[10]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sp1"); + return 0; + } + + if (softnic_parser_read_uint16(&m->match.acl.dp0, tokens[11]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "dp0"); + return 0; + } + + if (softnic_parser_read_uint16(&m->match.acl.dp1, tokens[12]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "dp1"); + return 0; + } + + if (softnic_parser_read_uint8(&m->match.acl.proto, tokens[13]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "proto"); + return 0; + } + + m->match.acl.proto_mask = 0xff; + + return 14; + } /* acl */ + + if (strcmp(tokens[1], "array") == 0) { + if (n_tokens < 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return 0; + } + + m->match_type = TABLE_ARRAY; + + if (softnic_parser_read_uint32(&m->match.array.pos, tokens[2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "pos"); + return 0; + } + + return 3; + } /* array */ + + if (strcmp(tokens[1], "hash") == 0) { + if (n_tokens < 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return 0; + } + + m->match_type = TABLE_HASH; + + if (strcmp(tokens[2], "raw") == 0) { + uint32_t key_size = TABLE_RULE_MATCH_SIZE_MAX; + + if (n_tokens < 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return 0; + } + + if (softnic_parse_hex_string(tokens[3], + m->match.hash.key, &key_size) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key"); + return 0; + } + + return 4; + } /* hash raw */ + + if (strcmp(tokens[2], "ipv4_5tuple") == 0) { + struct pkt_key_ipv4_5tuple *ipv4 = + (struct pkt_key_ipv4_5tuple *)m->match.hash.key; + struct in_addr saddr, daddr; + uint16_t sp, dp; + uint8_t proto; + + if (n_tokens < 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return 0; + } + + if (softnic_parse_ipv4_addr(tokens[3], &saddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sa"); + return 0; + } + + if (softnic_parse_ipv4_addr(tokens[4], &daddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "da"); + return 0; + } + + if (softnic_parser_read_uint16(&sp, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sp"); + return 0; + } + + if (softnic_parser_read_uint16(&dp, tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "dp"); + return 0; + } + + if (softnic_parser_read_uint8(&proto, tokens[7]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "proto"); + return 0; + } + + ipv4->sa = saddr.s_addr; + ipv4->da = daddr.s_addr; + ipv4->sp = rte_cpu_to_be_16(sp); + ipv4->dp = rte_cpu_to_be_16(dp); + ipv4->proto = proto; + + return 8; + } /* hash ipv4_5tuple */ + + if (strcmp(tokens[2], "ipv6_5tuple") == 0) { + struct pkt_key_ipv6_5tuple *ipv6 = + (struct pkt_key_ipv6_5tuple *)m->match.hash.key; + struct in6_addr saddr, daddr; + uint16_t sp, dp; + uint8_t proto; + + if (n_tokens < 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return 0; + } + + if (softnic_parse_ipv6_addr(tokens[3], &saddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sa"); + return 0; + } + + if (softnic_parse_ipv6_addr(tokens[4], &daddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "da"); + return 0; + } + + if (softnic_parser_read_uint16(&sp, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sp"); + return 0; + } + + if (softnic_parser_read_uint16(&dp, tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "dp"); + return 0; + } + + if (softnic_parser_read_uint8(&proto, tokens[7]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "proto"); + return 0; + } + + memcpy(ipv6->sa, saddr.s6_addr, 16); + memcpy(ipv6->da, daddr.s6_addr, 16); + ipv6->sp = rte_cpu_to_be_16(sp); + ipv6->dp = rte_cpu_to_be_16(dp); + ipv6->proto = proto; + + return 8; + } /* hash ipv6_5tuple */ + + if (strcmp(tokens[2], "ipv4_addr") == 0) { + struct pkt_key_ipv4_addr *ipv4_addr = + (struct pkt_key_ipv4_addr *)m->match.hash.key; + struct in_addr addr; + + if (n_tokens < 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return 0; + } + + if (softnic_parse_ipv4_addr(tokens[3], &addr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "addr"); + return 0; + } + + ipv4_addr->addr = addr.s_addr; + + return 4; + } /* hash ipv4_addr */ + + if (strcmp(tokens[2], "ipv6_addr") == 0) { + struct pkt_key_ipv6_addr *ipv6_addr = + (struct pkt_key_ipv6_addr *)m->match.hash.key; + struct in6_addr addr; + + if (n_tokens < 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return 0; + } + + if (softnic_parse_ipv6_addr(tokens[3], &addr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "addr"); + return 0; + } + + memcpy(ipv6_addr->addr, addr.s6_addr, 16); + + return 4; + } /* hash ipv6_5tuple */ + + if (strcmp(tokens[2], "qinq") == 0) { + struct pkt_key_qinq *qinq = + (struct pkt_key_qinq *)m->match.hash.key; + uint16_t svlan, cvlan; + + if (n_tokens < 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return 0; + } + + if ((softnic_parser_read_uint16(&svlan, tokens[3]) != 0) || + svlan > 0xFFF) { + snprintf(out, out_size, MSG_ARG_INVALID, + "svlan"); + return 0; + } + + if ((softnic_parser_read_uint16(&cvlan, tokens[4]) != 0) || + cvlan > 0xFFF) { + snprintf(out, out_size, MSG_ARG_INVALID, + "cvlan"); + return 0; + } + + qinq->svlan = rte_cpu_to_be_16(svlan); + qinq->cvlan = rte_cpu_to_be_16(cvlan); + + return 5; + } /* hash qinq */ + + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return 0; + } /* hash */ + + if (strcmp(tokens[1], "lpm") == 0) { + if (n_tokens < 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return 0; + } + + m->match_type = TABLE_LPM; + + if (strcmp(tokens[2], "ipv4") == 0) { + struct in_addr addr; + + m->match.lpm.ip_version = 1; + + if (softnic_parse_ipv4_addr(tokens[3], &addr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "addr"); + return 0; + } + + m->match.lpm.ipv4 = rte_be_to_cpu_32(addr.s_addr); + } else if (strcmp(tokens[2], "ipv6") == 0) { + struct in6_addr addr; + + m->match.lpm.ip_version = 0; + + if (softnic_parse_ipv6_addr(tokens[3], &addr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "addr"); + return 0; + } + + memcpy(m->match.lpm.ipv6, addr.s6_addr, 16); + } else { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "ipv4 or ipv6"); + return 0; + } + + if (softnic_parser_read_uint8(&m->match.lpm.depth, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "depth"); + return 0; + } + + return 5; + } /* lpm */ + + snprintf(out, out_size, MSG_ARG_MISMATCH, + "acl or array or hash or lpm"); + return 0; +} + +/** + * table_action ::= + * + * action + * fwd + * drop + * | port + * | meta + * | table + * [balance ... ] + * [meter + * tc0 meter policer g y r + * [tc1 meter policer g y r + * tc2 meter policer g y r + * tc3 meter policer g y r ]] + * [tm subport pipe ] + * [encap + * ether + * | vlan + * | qinq + * | mpls unicast | multicast + * + * label0